gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2008, Stephen Hansen
# Copyright (c) 2009, Robert Corsaro
# Copyright (c) 2010, Steffen Hoffmann
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <ORGANIZATION> nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
# TODO: pick format based on subscription. For now users will use the same
# format for all announcements, but in the future we can make this more
# flexible, since it's in the subscription table.
import Queue
import random
import re
import smtplib
import sys
import threading
import time
from email.Charset import Charset, QP, BASE64
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
from email.Utils import formatdate, formataddr
try:
from email.header import Header
except:
from email.Header import Header
from subprocess import Popen, PIPE
from trac.config import BoolOption, ExtensionOption, IntOption, Option, \
OrderedExtensionsOption
from trac.core import *
from trac.util import get_pkginfo, md5
from trac.util.compat import set, sorted
from trac.util.datefmt import to_timestamp
from trac.util.text import CRLF, to_unicode
from announcer.api import AnnouncementSystem
from announcer.api import IAnnouncementAddressResolver
from announcer.api import IAnnouncementDistributor
from announcer.api import IAnnouncementFormatter
from announcer.api import IAnnouncementPreferenceProvider
from announcer.api import IAnnouncementProducer
from announcer.api import _
from announcer.util.mail import set_header
from announcer.util.mail_crypto import CryptoTxt
class IEmailSender(Interface):
"""Extension point interface for components that allow sending e-mail."""
def send(self, from_addr, recipients, message):
"""Send message to recipients."""
class IAnnouncementEmailDecorator(Interface):
def decorate_message(event, message, decorators):
"""Manipulate the message before it is sent on it's way. The callee
should call the next decorator by popping decorators and calling the
popped decorator. If decorators is empty, don't worry about it.
"""
class EmailDistributor(Component):
implements(IAnnouncementDistributor)
formatters = ExtensionPoint(IAnnouncementFormatter)
# Make ordered
decorators = ExtensionPoint(IAnnouncementEmailDecorator)
resolvers = OrderedExtensionsOption('announcer', 'email_address_resolvers',
IAnnouncementAddressResolver, 'SpecifiedEmailResolver, '\
'SessionEmailResolver, DefaultDomainEmailResolver',
"""Comma seperated list of email resolver components in the order
they will be called. If an email address is resolved, the remaining
resolvers will not be called.
""")
email_sender = ExtensionOption('announcer', 'email_sender',
IEmailSender, 'SmtpEmailSender',
"""Name of the component implementing `IEmailSender`.
This component is used by the announcer system to send emails.
Currently, `SmtpEmailSender` and `SendmailEmailSender` are provided.
""")
enabled = BoolOption('announcer', 'email_enabled', 'true',
"""Enable email notification.""")
email_from = Option('announcer', 'email_from', 'trac@localhost',
"""Sender address to use in notification emails.""")
from_name = Option('announcer', 'email_from_name', '',
"""Sender name to use in notification emails.""")
replyto = Option('announcer', 'email_replyto', 'trac@localhost',
"""Reply-To address to use in notification emails.""")
mime_encoding = Option('announcer', 'mime_encoding', 'base64',
"""Specifies the MIME encoding scheme for emails.
Valid options are 'base64' for Base64 encoding, 'qp' for
Quoted-Printable, and 'none' for no encoding. Note that the no encoding
means that non-ASCII characters in text are going to cause problems
with notifications.
""")
use_public_cc = BoolOption('announcer', 'use_public_cc', 'false',
"""Recipients can see email addresses of other CC'ed recipients.
If this option is disabled (the default), recipients are put on BCC
""")
# used in email decorators, but not here
subject_prefix = Option('announcer', 'email_subject_prefix',
'__default__',
"""Text to prepend to subject line of notification emails.
If the setting is not defined, then the [$project_name] prefix.
If no prefix is desired, then specifying an empty option
will disable it.
""")
to = Option('announcer', 'email_to', 'undisclosed-recipients: ;',
'Default To: field')
use_threaded_delivery = BoolOption('announcer', 'use_threaded_delivery',
'false',
"""Do message delivery in a separate thread.
Enabling this will improve responsiveness for requests that end up
with an announcement being sent over email. It requires building
Python with threading support enabled-- which is usually the case.
To test, start Python and type 'import threading' to see
if it raises an error.
""")
default_email_format = Option('announcer', 'default_email_format',
'text/plain',
"""The default mime type of the email notifications.
This can be overridden on a per user basis through the announcer
preferences panel.
""")
rcpt_allow_regexp = Option('announcer', 'rcpt_allow_regexp', '',
"""A whitelist pattern to match any address to before adding to
recipients list.
""")
rcpt_local_regexp = Option('announcer', 'rcpt_local_regexp', '',
"""A whitelist pattern to match any address, that should be
considered local.
This will be evaluated only if msg encryption is set too.
Recipients with matching email addresses will continue to
receive unencrypted email messages.
""")
crypto = Option('announcer', 'email_crypto', '',
"""Enable cryptographically operation on email msg body.
Empty string, the default for unset, disables all crypto operations.
Valid values are:
sign sign msg body with given privkey
encrypt encrypt msg body with pubkeys of all recipients
sign,encrypt sign, than encrypt msg body
""")
# get GnuPG configuration options
gpg_binary = Option('announcer', 'gpg_binary', 'gpg',
"""GnuPG binary name, allows for full path too.
Value 'gpg' is same default as in python-gnupg itself.
For usual installations location of the gpg binary is auto-detected.
""")
gpg_home = Option('announcer', 'gpg_home', '',
"""Directory containing keyring files.
In case of wrong configuration missing keyring files without content
will be created in the configured location, provided necessary
write permssion is granted for the corresponding parent directory.
""")
private_key = Option('announcer', 'gpg_signing_key', None,
"""Keyid of private key (last 8 chars or more) used for signing.
If unset, a private key will be selected from keyring automagicly.
The password must be available i.e. provided by running gpg-agent
or empty (bad security). On failing to unlock the private key,
msg body will get emptied.
""")
def __init__(self):
self.delivery_queue = None
self._init_pref_encoding()
def get_delivery_queue(self):
if not self.delivery_queue:
self.delivery_queue = Queue.Queue()
thread = DeliveryThread(self.delivery_queue, self.send)
thread.start()
return self.delivery_queue
# IAnnouncementDistributor
def transports(self):
yield "email"
def formats(self, transport, realm):
"Find valid formats for transport and realm"
formats = {}
for f in self.formatters:
for style in f.styles(transport, realm):
formats[style] = f
self.log.debug(
"EmailDistributor has found the following formats capable "
"of handling '%s' of '%s': %s"%(transport, realm,
', '.join(formats.keys())))
if not formats:
self.log.error("EmailDistributor is unable to continue " \
"without supporting formatters.")
return formats
def distribute(self, transport, recipients, event):
found = False
for supported_transport in self.transports():
if supported_transport == transport:
found = True
if not self.enabled or not found:
self.log.debug("EmailDistributer email_enabled set to false")
return
fmtdict = self.formats(transport, event.realm)
if not fmtdict:
self.log.error(
"EmailDistributer No formats found for %s %s"%(
transport, event.realm))
return
msgdict = {}
msgdict_encrypt = {}
msg_pubkey_ids = []
# compile pattern before use for better performance
RCPT_ALLOW_RE = re.compile(self.rcpt_allow_regexp)
RCPT_LOCAL_RE = re.compile(self.rcpt_local_regexp)
if self.crypto != '':
self.log.debug("EmailDistributor attempts crypto operation.")
self.enigma = CryptoTxt(self.gpg_binary, self.gpg_home)
for name, authed, addr in recipients:
fmt = name and \
self._get_preferred_format(event.realm, name, authed) or \
self._get_default_format()
if fmt not in fmtdict:
self.log.debug(("EmailDistributer format %s not available " +
"for %s %s, looking for an alternative")%(
fmt, transport, event.realm))
# If the fmt is not available for this realm, then try to find
# an alternative
oldfmt = fmt
fmt = None
for f in fmtdict.values():
fmt = f.alternative_style_for(
transport, event.realm, oldfmt)
if fmt: break
if not fmt:
self.log.error(
"EmailDistributer was unable to find a formatter " +
"for format %s"%k
)
continue
rslvr = None
if name and not addr:
# figure out what the addr should be if it's not defined
for rslvr in self.resolvers:
addr = rslvr.get_address_for_name(name, authed)
if addr: break
if addr:
self.log.debug("EmailDistributor found the " \
"address '%s' for '%s (%s)' via: %s"%(
addr, name, authed and \
'authenticated' or 'not authenticated',
rslvr.__class__.__name__))
# ok, we found an addr, add the message
# but wait, check for allowed rcpt first, if set
if RCPT_ALLOW_RE.search(addr) is not None:
# check for local recipients now
local_match = RCPT_LOCAL_RE.search(addr)
if self.crypto in ['encrypt', 'sign,encrypt'] and \
local_match is None:
# search available public keys for matching UID
pubkey_ids = self.enigma.get_pubkey_ids(addr)
if len(pubkey_ids) > 0:
msgdict_encrypt.setdefault(fmt, set()).add((name,
authed, addr))
msg_pubkey_ids[len(msg_pubkey_ids):] = pubkey_ids
self.log.debug("EmailDistributor got pubkeys " \
"for %s: %s" % (addr, pubkey_ids))
else:
self.log.debug("EmailDistributor dropped %s " \
"after missing pubkey with corresponding " \
"address %s in any UID" % (name, addr))
else:
msgdict.setdefault(fmt, set()).add((name, authed,
addr))
if local_match is not None:
self.log.debug("EmailDistributor expected " \
"local delivery for %s to: %s" % (name, addr))
else:
self.log.debug("EmailDistributor dropped %s for " \
"not matching allowed recipient pattern %s" % \
(addr, self.rcpt_allow_regexp))
else:
self.log.debug("EmailDistributor was unable to find an " \
"address for: %s (%s)"%(name, authed and \
'authenticated' or 'not authenticated'))
for k, v in msgdict.items():
if not v or not fmtdict.get(k):
continue
self.log.debug(
"EmailDistributor is sending event as '%s' to: %s"%(
fmt, ', '.join(x[2] for x in v)))
self._do_send(transport, event, k, v, fmtdict[k])
for k, v in msgdict_encrypt.items():
if not v or not fmtdict.get(k):
continue
self.log.debug(
"EmailDistributor is sending encrypted info on event " \
"as '%s' to: %s"%(fmt, ', '.join(x[2] for x in v)))
self._do_send(transport, event, k, v, fmtdict[k], msg_pubkey_ids)
def _get_default_format(self):
return self.default_email_format
def _get_preferred_format(self, realm, sid, authenticated):
if authenticated is None:
authenticated = 0
db = self.env.get_db_cnx()
cursor = db.cursor()
cursor.execute("""
SELECT value
FROM session_attribute
WHERE sid=%s
AND authenticated=%s
AND name=%s
""", (sid, int(authenticated), 'announcer_email_format_%s' % realm))
result = cursor.fetchone()
if result:
chosen = result[0]
self.log.debug("EmailDistributor determined the preferred format" \
" for '%s (%s)' is: %s"%(sid, authenticated and \
'authenticated' or 'not authenticated', chosen))
return chosen
else:
return self._get_default_format()
def _init_pref_encoding(self):
self._charset = Charset()
self._charset.input_charset = 'utf-8'
pref = self.mime_encoding.lower()
if pref == 'base64':
self._charset.header_encoding = BASE64
self._charset.body_encoding = BASE64
self._charset.output_charset = 'utf-8'
self._charset.input_codec = 'utf-8'
self._charset.output_codec = 'utf-8'
elif pref in ['qp', 'quoted-printable']:
self._charset.header_encoding = QP
self._charset.body_encoding = QP
self._charset.output_charset = 'utf-8'
self._charset.input_codec = 'utf-8'
self._charset.output_codec = 'utf-8'
elif pref == 'none':
self._charset.header_encoding = None
self._charset.body_encoding = None
self._charset.input_codec = None
self._charset.output_charset = 'ascii'
else:
raise TracError(_('Invalid email encoding setting: %s'%pref))
def _message_id(self, realm):
"""Generate a predictable, but sufficiently unique message ID."""
modtime = time.time()
rand = random.randint(0,32000)
s = '%s.%d.%d.%s' % (self.env.project_url,
modtime, rand,
realm.encode('ascii', 'ignore'))
dig = md5(s).hexdigest()
host = self.email_from[self.email_from.find('@') + 1:]
msgid = '<%03d.%s@%s>' % (len(s), dig, host)
return msgid
def _filter_recipients(self, rcpt):
return rcpt
def _do_send(self, transport, event, format, recipients, formatter,
pubkey_ids=[]):
output = formatter.format(transport, event.realm, format, event)
# DEVEL: force message body plaintext style for crypto operations
if self.crypto != '' and pubkey_ids != []:
if self.crypto == 'sign':
output = self.enigma.sign(output, self.private_key)
elif self.crypto == 'encrypt':
output = self.enigma.encrypt(output, pubkey_ids)
elif self.crypto == 'sign,encrypt':
output = self.enigma.sign_encrypt(output, pubkey_ids,
self.private_key)
self.log.debug(output)
self.log.debug(_("EmailDistributor crypto operaton successful."))
alternate_output = None
else:
alternate_style = formatter.alternative_style_for(
transport,
event.realm,
format
)
if alternate_style:
alternate_output = formatter.format(
transport,
event.realm,
alternate_style,
event
)
else:
alternate_output = None
# sanity check
if not self._charset.body_encoding:
try:
dummy = output.encode('ascii')
except UnicodeDecodeError:
raise TracError(_("Ticket contains non-ASCII chars. " \
"Please change encoding setting"))
rootMessage = MIMEMultipart("related")
# TODO: is this good? (from jabber branch)
#rootMessage.set_charset(self._charset)
headers = dict()
headers['Message-ID'] = self._message_id(event.realm)
headers['Date'] = formatdate()
from_header = formataddr((
self.from_name or self.env.project_name,
self.email_from
))
headers['From'] = from_header
headers['To'] = '"%s"'%(self.to)
if self.use_public_cc:
headers['Cc'] = ', '.join([x[2] for x in recipients if x])
headers['Reply-To'] = self.replyto
for k, v in headers.iteritems():
set_header(rootMessage, k, v)
rootMessage.preamble = 'This is a multi-part message in MIME format.'
if alternate_output:
parentMessage = MIMEMultipart('alternative')
rootMessage.attach(parentMessage)
alt_msg_format = 'html' in alternate_style and 'html' or 'plain'
msgText = MIMEText(alternate_output, alt_msg_format)
parentMessage.attach(msgText)
else:
parentMessage = rootMessage
msg_format = 'html' in format and 'html' or 'plain'
msgText = MIMEText(output, msg_format)
del msgText['Content-Transfer-Encoding']
msgText.set_charset(self._charset)
parentMessage.attach(msgText)
decorators = self._get_decorators()
if len(decorators) > 0:
decorator = decorators.pop()
decorator.decorate_message(event, rootMessage, decorators)
recip_adds = [x[2] for x in recipients if x]
# Append any to, cc or bccs added to the recipient list
for field in ('To', 'Cc', 'Bcc'):
if rootMessage[field] and \
len(str(rootMessage[field]).split(',')) > 0:
for addy in str(rootMessage[field]).split(','):
self._add_recipient(recip_adds, addy)
# replace with localized bcc hint
if headers['To'] == 'undisclosed-recipients: ;':
set_header(rootMessage, 'To', _('undisclosed-recipients: ;'))
self.log.debug("Content of recip_adds: %s" %(recip_adds))
package = (from_header, recip_adds, rootMessage.as_string())
if len(recip_adds) > 0:
start = time.time()
if self.use_threaded_delivery:
self.get_delivery_queue().put(package)
else:
self.send(*package)
stop = time.time()
self.log.debug("EmailDistributor took %s seconds to send."\
%(round(stop-start,2)))
def send(self, from_addr, recipients, message):
"""Send message to recipients via e-mail."""
# Ensure the message complies with RFC2822: use CRLF line endings
message = CRLF.join(re.split("\r?\n", message))
self.email_sender.send(from_addr, recipients, message)
def _get_decorators(self):
return self.decorators[:]
def _add_recipient(self, recipients, addy):
if addy.strip() != '"undisclosed-recipients: ;"':
recipients.append(addy)
class SmtpEmailSender(Component):
"""E-mail sender connecting to an SMTP server."""
implements(IEmailSender)
server = Option('smtp', 'server', 'localhost',
"""SMTP server hostname to use for email notifications.""")
timeout = IntOption('smtp', 'timeout', 10,
"""SMTP server connection timeout. (requires python-2.6)""")
port = IntOption('smtp', 'port', 25,
"""SMTP server port to use for email notification.""")
user = Option('smtp', 'user', '',
"""Username for SMTP server.""")
password = Option('smtp', 'password', '',
"""Password for SMTP server.""")
use_tls = BoolOption('smtp', 'use_tls', 'false',
"""Use SSL/TLS to send notifications over SMTP.""")
use_ssl = BoolOption('smtp', 'use_ssl', 'false',
"""Use ssl for smtp connection.""")
debuglevel = IntOption('smtp', 'debuglevel', 0,
"""Set to 1 for useful smtp debugging on stdout.""")
def send(self, from_addr, recipients, message):
# use defaults to make sure connect() is called in the constructor
smtpclass = smtplib.SMTP
if self.use_ssl:
smtpclass = smtplib.SMTP_SSL
args = {
'host': self.server,
'port': self.port
}
# timeout isn't supported until python 2.6
vparts = sys.version_info[0:2]
if vparts[0] >= 2 and vparts[1] >= 6:
args['timeout'] = self.timeout
smtp = smtpclass(**args)
smtp.set_debuglevel(self.debuglevel)
if self.use_tls:
smtp.ehlo()
if not smtp.esmtp_features.has_key('starttls'):
raise TracError(_("TLS enabled but server does not support " \
"TLS"))
smtp.starttls()
smtp.ehlo()
if self.user:
smtp.login(
self.user.encode('utf-8'),
self.password.encode('utf-8')
)
smtp.sendmail(from_addr, recipients, message)
if self.use_tls or self.use_ssl:
# avoid false failure detection when the server closes
# the SMTP connection with TLS/SSL enabled
import socket
try:
smtp.quit()
except socket.sslerror:
pass
else:
smtp.quit()
class SendmailEmailSender(Component):
"""E-mail sender using a locally-installed sendmail program."""
implements(IEmailSender)
sendmail_path = Option('sendmail', 'sendmail_path', 'sendmail',
"""Path to the sendmail executable.
The sendmail program must accept the `-i` and `-f` options.
""")
def send(self, from_addr, recipients, message):
self.log.info("Sending notification through sendmail at %s to %s"
% (self.sendmail_path, recipients))
cmdline = [self.sendmail_path, "-i", "-f", from_addr]
cmdline.extend(recipients)
self.log.debug("Sendmail command line: %s" % ' '.join(cmdline))
try:
child = Popen(cmdline, bufsize=-1, stdin=PIPE, stdout=PIPE,
stderr=PIPE)
(out, err) = child.communicate(message)
if child.returncode or err:
raise Exception("Sendmail failed with (%s, %s), command: '%s'"
% (child.returncode, err.strip(), cmdline))
except OSError, e:
self.log.error("Failed to run sendmail[%s] with error %s"%\
(self.sendmail_path, e))
class DeliveryThread(threading.Thread):
def __init__(self, queue, sender):
threading.Thread.__init__(self)
self._sender = sender
self._queue = queue
self.setDaemon(True)
def run(self):
while 1:
sendfrom, recipients, message = self._queue.get()
self._sender(sendfrom, recipients, message)
| |
#!/usr/bin/env python
"""
Copyright (c) 2015-2016 Roberto Christopher Salgado Bjerre.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import print_function
import re
from time import strftime
from xml.etree.ElementTree import parse, ParseError
from sys import stdout as sys_stdout
from subprocess import Popen, PIPE
from optparse import OptionParser
from getpass import getpass
from random import sample
from os import listdir, makedirs
from os.path import isfile, join, dirname, exists
from urllib2 import build_opener, install_opener, ProxyHandler
from urllib2 import HTTPCookieProcessor, HTTPHandler, HTTPSHandler, quote
from lib.website import Website
from lib.common import colorize as color, COOKIE_HANDLER as cookie_handler
from lib.settings import BW
from lib.settings import ASK, PLUS, INFO, TEST, WARN, ERROR
from lib.logger import Logger
NAME = "credmap"
VERSION = "v0.1"
URL = "https://github.com/lightos/credmap/"
# Maximum length of left option column in help listing
MAX_HELP_OPTION_LENGTH = 20
# Character used for progress rotator
ROTATOR_CHARS = "|/-\\"
BANNER_PASSWORDS = ("123456", "HUNTER2", "LOVE",
"SECRET", "ABC123", "GOD", "SEX")
BANNER = """ . .IIIII .II
I%sIIII. I II . II..IIIIIIIIIIIIIIIIIIII
. .IIIIII II IIIIII%sIIIII I.
.IIIII.III I IIIIIIIIIIIIIIIIIIIIIII
.II%sII II .IIIII IIIIIIIIIIII. I
IIIIII IIII I II%sIIIIIII I
.II IIIIIIIIIIIII IIIIIIIII
I. .III%sIIII I II I
.IIII IIIIIIIIIIII . I
IIIII. IIIIII . I.
II%sIII IIIII ..I II .
IIIIII IIII... IIII
IIII III. I II%sII
III I I III
II I .
I """
# Location of the folder containing the websites to test
SITES_DIR = "websites"
# Location of the folder where results will be written to
OUTPUT_DIR = "output"
# Location of file containing user agents
USER_AGENTS_FILE = "agents.txt"
# Location of Git repository
GIT_REPOSITORY = "https://github.com/lightos/credmap.git"
EXAMPLES = """
Examples:
./credmap.py --username janedoe --email janedoe@email.com
./credmap.py -u johndoe -e johndoe@email.com --exclude "github.com, live.com"
./credmap.py -u johndoe -p abc123 -vvv --only "linkedin.com, facebook.com"
./credmap.py -e janedoe@example.com --verbose --proxy "https://127.0.0.1:8080"
./credmap.py --load creds.txt --format "e.u.p"
./credmap.py -l creds.txt -f "u|e:p"
./credmap.py -l creds.txt
./credmap.py --list
"""
class AttribDict(dict):
"""
Gets and Sets attributes for a dict.
"""
def __getattr__(self, name):
return self.get(name)
def __setattr__(self, name, value):
return self.__setitem__(name, value)
def __init__(self, *args, **kwargs):
self.multiple_params = None
self.multiple_params_url = None
dict.__init__(self, *args, **kwargs)
def get_revision():
"""
Returns abbreviated commit hash number as retrieved with:
"git rev-parse --short HEAD".
"""
retval = None
filepath = None
_ = dirname(__file__)
while True:
filepath = join(_, ".git", "HEAD")
if exists(filepath):
break
else:
filepath = None
if _ == dirname(_):
break
else:
_ = dirname(_)
while True:
if filepath and isfile(filepath):
with open(filepath, "r") as file_:
content = file_.read()
filepath = None
if content.startswith("ref: "):
filepath = join(
_, ".git", content.replace("ref: ", "")
).strip()
else:
match = re.match(r"(?i)[0-9a-f]{32}", content)
retval = match.group(0) if match else None
break
else:
break
if not retval:
process = Popen("git rev-parse --verify HEAD", shell=True,
stdout=PIPE, stderr=PIPE)
stdout, _ = process.communicate()
match = re.search(r"(?i)[0-9a-f]{32}", stdout or "")
retval = match.group(0) if match else None
return retval[:7] if retval else None
def check_revision(version):
"""
Adapts the default version string and banner to
use the revision number.
"""
revision = get_revision()
if revision:
version = "%s-%s" % (version, revision)
return version
def update():
"""
Updates the program via git pull.
"""
print("%s Checking for updates..." % INFO)
process = Popen("git pull %s HEAD" % GIT_REPOSITORY, shell=True,
stdout=PIPE, stderr=PIPE)
stdout, stderr = process.communicate()
success = not process.returncode
if success:
updated = "Already" not in stdout
process = Popen("git rev-parse --verify HEAD", shell=True,
stdout=PIPE, stderr=PIPE)
stdout, _ = process.communicate()
revision = (stdout[:7] if stdout and
re.search(r"(?i)[0-9a-f]{32}", stdout) else "-")
print("%s the latest revision '%s'." %
("%s Already at" % INFO if not updated else
"%s Updated to" % PLUS, revision))
else:
print("%s Problem occurred while updating program.\n" % WARN)
_ = re.search(r"(?P<error>error:[^:]*files\swould\sbe\soverwritten"
r"\sby\smerge:(?:\n\t[^\n]+)*)", stderr)
if _:
def question():
"""Asks question until a valid answer of y or n is provided."""
print("\n%s Would you like to overwrite your changes and set "
"your local copy to the latest commit?" % ASK)
sys_stdout.write("%s ALL of your local changes will be deleted"
" [Y/n]: " % WARN)
_ = raw_input()
if not _:
_ = "y"
if _.lower() == "n":
exit()
elif _.lower() == "y":
return
else:
print("%s Did not understand your answer! Try again." %
ERROR)
question()
print("%s" % _.group("error"))
question()
if "untracked" in stderr:
cmd = "git clean -df"
else:
cmd = "git reset --hard"
process = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)
stdout, _ = process.communicate()
if "HEAD is now at" in stdout:
print("\n%s Local copy reset to current git branch." % INFO)
print("%s Attemping to run update again..." % INFO)
else:
print("%s Unable to reset local copy to current git branch." %
WARN)
exit()
update()
else:
print("%s Please make sure that you have "
"a 'git' package installed." % INFO)
print(stderr)
def optional_arg(arg_default):
"""
Add support to optparse for optional argument values
"""
def func(option, opt_str, value, parser):
"""Function sent to args parser."""
if parser.rargs and not parser.rargs[0].startswith('-'):
_ = parser.rargs[0]
parser.rargs.pop(0)
else:
_ = arg_default
setattr(parser.values, option.dest, _)
return func
def parse_args():
"""
Parses the command line arguments.
"""
# Override epilog formatting
OptionParser.format_epilog = lambda self, formatter: self.epilog
parser = OptionParser(usage="usage: %prog --email EMAIL | --user USER "
"| --load LIST [options]",
epilog=EXAMPLES)
parser.add_option("-v", "--verbose", action="count", dest="verbose",
help="display extra output information")
parser.add_option("-u", "--username", dest="username",
help="set the username to test with")
parser.add_option("-p", "--password", dest="password",
help="set the password to test with")
parser.add_option("-e", "--email", dest="email",
help="set an email to test with")
parser.add_option("-l", "--load", dest="load_file",
help="load list of credentials in format USER:PASSWORD")
parser.add_option("-f", "--format", dest="cred_format",
help="format to use when reading from file (e.g. u|e:p)")
parser.add_option("-x", "--exclude", dest="exclude",
help="exclude sites from testing")
parser.add_option("-o", "--only", dest="only",
help="test only listed sites")
parser.add_option("-s", "--safe-urls", dest="safe_urls",
action="store_true",
help="only test sites that use HTTPS")
parser.add_option("-i", "--ignore-proxy", dest="ignore_proxy",
action="store_true",
help="ignore system default HTTP proxy")
parser.add_option("--proxy", dest="proxy", action="callback",
callback=optional_arg("1"),
help="set proxy (e.g. \"socks5://192.168.1.2:9050\")")
parser.add_option("--list", action="store_true", dest="list",
help="list available sites to test with")
parser.add_option("--update", dest="update", action="store_true",
help="update from the official git repository")
parser.formatter.store_option_strings(parser)
parser.formatter.store_option_strings = lambda _: None
for option, value in parser.formatter.option_strings.items():
value = re.sub(r"\A(-\w+) (\w+), (--[\w-]+=(\2))\Z", r"\g<1>/\g<3>",
value)
value = value.replace(", ", '/')
if len(value) > MAX_HELP_OPTION_LENGTH:
value = ("%%.%ds.." % (MAX_HELP_OPTION_LENGTH -
parser.formatter.indent_increment)) % value
parser.formatter.option_strings[option] = value
args = parser.parse_args()[0]
if not any((args.username, args.email, args.update,
args.list, args.load_file)):
parser.error("Required argument is missing. Use '-h' for help.")
return args
def list_sites(extension=False):
"""
List available sites for testing found in the websites folder.
Read folder containing each website's XML files.
"""
return [_ if extension else _.replace(".xml", "")
for _ in listdir(SITES_DIR) if isfile(join(SITES_DIR, _))]
def populate_site(site, args):
"""
Parse sites in XML files and return objects.
"""
try:
xml_tree = parse("%s/%s.xml" % (SITES_DIR, site)).getroot()
except ParseError as parse_error:
print("%s parsing XML file \"%s\". Skipping..." % (ERROR,
color(site, BW)))
if args.verbose:
print("%s: %s" % (ERROR, parse_error.message))
print()
return
site_properties = AttribDict()
for _ in xml_tree:
if _.tag == "multiple_params":
site_properties.multiple_params = True
site_properties.multiple_params_url = _.attrib["value"]
continue
if _.tag in ("custom_search", "time_parameter", "valid_http_status",
"invalid_http_status", "custom_response_header"):
site_properties[_.tag] = _.attrib
continue
if "value" in _.attrib:
site_properties[_.tag] = _.attrib["value"]
if "type" in _.attrib:
site_properties["%s_type" % _.tag] = _.attrib["type"]
if site_properties.multiple_params:
site_properties.multiple_params = []
for _ in xml_tree.getiterator('param'):
params = {}
for k, val in _.attrib.items():
if val:
params[k] = val
if params:
site_properties.multiple_params.append(params)
match = re.match(r"(?P<type>[^:]+)://[^.]+(\.\w+)*",
site_properties.login_url, re.I)
if not match:
print("%s unable to read URL for login in XML file for \"%s\". "
"Skipping site...\n" % (ERROR, color(site_properties.name, BW)))
return
if args.safe_urls and match.group("type").upper() != "HTTPS":
if args.verbose:
print("%s URL uses an unsafe transportation mechanism: \"%s\". "
"Skipping site...\n" % (WARN, match.group("type").upper()))
return
if(not site_properties.login_parameter or
not site_properties.password_parameter):
print("%s current XML file is missing parameter(s) for login. "
"Skipping site...\n" % ERROR)
return
return site_properties
def main():
"""
Initializes and executes the program.
"""
login_sucessful = []
login_failed = []
login_skipped = []
version = check_revision(VERSION)
print("%s\n\n%s %s (%s)\n" % (
BANNER % tuple([color(_) for _ in BANNER_PASSWORDS]),
NAME, version, URL))
args = parse_args()
if args.update:
update()
exit()
sites = list_sites()
if args.list:
for _ in sites:
print("- %s" % _)
exit()
if not args.password and not args.load_file:
args.password = getpass("%s Please enter password:" % INFO)
print()
if args.ignore_proxy:
proxy_handler = ProxyHandler({})
elif args.proxy:
match = re.search(r"(?P<type>[^:]+)://(?P<address>[^:]+)"
r":(?P<port>\d+)", args.proxy, re.I)
if match:
if match.group("type").upper() in ("HTTP", "HTTPS"):
proxy_host = "%s:%s" % (match.group("address"),
match.group("port"))
proxy_handler = ProxyHandler({"http": proxy_host,
"https": proxy_host})
else:
from thirdparty.socks import socks
if match.group("type").upper() == "SOCKS4":
socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS4,
match.group("address"),
int(match.group("port")), True)
elif match.group("type").upper() == "SOCKS5":
socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5,
match.group("address"),
int(match.group("port")), True)
proxy_handler = None
else:
proxy_handler = ProxyHandler()
else:
proxy_handler = None
opener = build_opener(HTTPHandler(), HTTPSHandler(),
HTTPCookieProcessor(cookie_handler))
if proxy_handler:
opener.add_handler(proxy_handler)
install_opener(opener)
with open(USER_AGENTS_FILE, 'r') as ua_file:
args.user_agent = sample(ua_file.readlines(), 1)[0].strip()
if args.only:
sites = [site for site in sites if site in args.only]
elif args.exclude:
sites = [site for site in sites if site not in args.exclude]
print("%s Loaded %d %s to test." %
(INFO, len(sites), "site" if len(sites) == 1 else "sites"))
if args.load_file:
if not isfile(args.load_file):
print("%s could not find the file \"%s\"" %
(WARN, color(args.load_file)))
exit()
_ = sum(1 for line in open(args.load_file, "r"))
if _ < 1:
print("%s the file \"%s\" doesn't contain any valid credentials." %
(WARN, color(args.load_file)))
exit()
print("%s Loaded %d credential%s from \"%s\".\n" %
(INFO, _, "s" if _ != 1 else "", color(args.load_file)))
print("%s Starting tests at: \"%s\"\n" % (INFO, color(strftime("%X"), BW)))
if not exists(OUTPUT_DIR):
makedirs(OUTPUT_DIR)
log = Logger("%s/credmap" % OUTPUT_DIR)
log.open()
def get_targets():
"""
Retrieve and yield list of sites (targets) for testing.
"""
for site in sites:
_ = populate_site(site, args)
if not _:
continue
target = Website(_, {"verbose": args.verbose})
if not target.user_agent:
target.user_agent = args.user_agent
yield target
def login():
"""
Verify credentials for login and check if login was successful.
"""
if(target.username_or_email == "email" and not
credentials["email"] or
target.username_or_email == "username" and not
credentials["username"]):
if args.verbose:
print("%s Skipping %s\"%s\" since "
"no \"%s\" was specified.\n" %
(INFO, "[%s:%s] on " %
(credentials["username"] or
credentials["email"], credentials["password"]) if
args.load_file else "", color(target.name),
color(target.username_or_email, BW)))
login_skipped.append(target.name)
return
print("%s Testing %s\"%s\"..." %
(TEST, "[%s:%s] on " % (credentials["username"] or
credentials["email"],
credentials["password"]) if
args.load_file else "", color(target.name, BW)))
cookie_handler.clear()
if target.perform_login(credentials, cookie_handler):
log.write(">>> %s - %s:%s\n" %
(target.name, credentials["username"] or
credentials["email"], credentials["password"]))
login_sucessful.append("%s%s" %
(target.name, " [%s:%s]" %
(credentials["username"] or
credentials["email"],
credentials["password"]) if
args.load_file else ""))
else:
login_failed.append(target.name)
if args.load_file:
if args.cred_format:
separators = [re.escape(args.cred_format[1]),
re.escape(args.cred_format[3]) if
len(args.cred_format) > 3 else "\n"]
cred_format = re.match(r"(u|e|p)[^upe](u|e|p)(?:[^upe](u|e|p))?",
args.cred_format)
if not cred_format:
print("%s Could not parse --format: \"%s\""
% (ERROR, color(args.cred_format, BW)))
exit()
cred_format = [v.replace("e", "email")
.replace("u", "username")
.replace("p", "password")
for v in cred_format.groups() if v is not None]
with open(args.load_file, "r") as load_list:
for user in load_list:
if args.cred_format:
match = re.match(r"([^{0}]+){0}([^{1}]+)(?:{1}([^\n]+))?"
.format(separators[0], separators[1]),
user)
credentials = dict(zip(cred_format, match.groups()))
credentials["password"] = quote(
credentials["password"])
if("email" in credentials and
not re.match(r"^[A-Za-z0-9._%+-]+@(?:[A-Z"
r"a-z0-9-]+\.)+[A-Za-z]{2,12}$",
credentials["email"])):
print("%s Specified e-mail \"%s\" does not appear "
"to be correct. Skipping...\n" % (WARN, color(
credentials["email"], BW)))
continue
if "email" not in credentials:
credentials["email"] = None
elif "username" not in credentials:
credentials["username"] = None
else:
user = user.rstrip().split(":", 1)
if not user[0]:
if args.verbose:
print("%s Could not parse credentials: \"%s\"\n" %
(WARN, color(user, BW)))
continue
match = re.match(r"^[A-Za-z0-9._%+-]+@(?:[A-Z"
r"a-z0-9-]+\.)+[A-Za-z]{2,12}$", user[0])
credentials = {"email": user[0] if match else None,
"username": None if match else user[0],
"password": quote(user[1])}
for target in get_targets():
login()
else:
credentials = {"username": args.username, "email": args.email,
"password": quote(args.password)}
for target in get_targets():
login()
log.close()
if not args.verbose:
print()
if len(login_sucessful) > 0 or len(login_failed) > 0:
_ = "%s/%s" % (color(len(login_sucessful), BW),
color(len(login_sucessful) + len(login_failed), BW))
sign = PLUS if len(login_sucessful) > (len(login_failed) +
len(login_skipped)) else INFO
print("%s Succesfully logged in%s." %
(sign, " with %s credentials on the list." % _ if args.load_file
else "to %s websites." % _),)
print("%s An overall success rate of %s.\n" %
(sign, color("%%%s" % (100 * len(login_sucessful) /
(len(login_sucessful) +
len(login_failed))), BW)))
if len(login_sucessful) > 0:
print("%s The provided credentials worked on the following website%s: "
"%s\n" % (PLUS, "s" if len(login_sucessful) != 1 else "",
", ".join(login_sucessful)))
print("%s Finished tests at: \"%s\"\n" % (INFO, color(strftime("%X"), BW)))
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print("\n%s Ctrl-C pressed." % INFO)
| |
## @file
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
import plugins.EdkPlugins.basemodel.ini as ini
import re, os
from plugins.EdkPlugins.basemodel.message import *
class DECFile(ini.BaseINIFile):
def GetSectionInstance(self, parent, name, isCombined=False):
return DECSection(parent, name, isCombined)
def GetComponents(self):
return self.GetSectionByName('Components')
def GetPackageRootPath(self):
return os.path.dirname(self.GetFilename()).strip()
def GetBaseName(self):
return self.GetDefine("PACKAGE_NAME").strip()
def GetVersion(self):
return self.GetDefine("PACKAGE_VERSION").strip()
def GetSectionObjectsByName(self, name, arch=None):
arr = []
sects = self.GetSectionByName(name)
for sect in sects:
# skip unmatched archtecture content
if not sect.IsArchMatch(arch):
continue
for obj in sect.GetObjects():
arr.append(obj)
return arr
class DECSection(ini.BaseINISection):
def GetSectionINIObject(self, parent):
type = self.GetType()
if type.lower().find('defines') != -1:
return DECDefineSectionObject(self)
if type.lower().find('includes') != -1:
return DECIncludeObject(self)
if type.lower().find('pcd') != -1:
return DECPcdObject(self)
if type.lower() == 'libraryclasses':
return DECLibraryClassObject(self)
if type.lower() == 'guids':
return DECGuidObject(self)
if type.lower() == 'ppis':
return DECPpiObject(self)
if type.lower() == 'protocols':
return DECProtocolObject(self)
return DECSectionObject(self)
def GetType(self):
arr = self._name.split('.')
return arr[0].strip()
def GetArch(self):
arr = self._name.split('.')
if len(arr) == 1:
return 'common'
return arr[1]
def IsArchMatch(self, arch):
if arch is None or self.GetArch() == 'common':
return True
if self.GetArch().lower() != arch.lower():
return False
return True
class DECSectionObject(ini.BaseINISectionObject):
def GetArch(self):
return self.GetParent().GetArch()
class DECDefineSectionObject(DECSectionObject):
def __init__(self, parent):
DECSectionObject.__init__(self, parent)
self._key = None
self._value = None
def Parse(self):
assert (self._start == self._end), 'The object in define section must be in single line'
line = self.GetLineByOffset(self._start).strip()
line = line.split('#')[0]
arr = line.split('=')
if len(arr) != 2:
ErrorMsg('Invalid define section object',
self.GetFilename(),
self.GetParent().GetName()
)
return False
self._key = arr[0].strip()
self._value = arr[1].strip()
return True
def GetKey(self):
return self._key
def GetValue(self):
return self._value
class DECGuidObject(DECSectionObject):
_objs = {}
def __init__(self, parent):
DECSectionObject.__init__(self, parent)
self._name = None
def Parse(self):
line = self.GetLineByOffset(self._start).strip().split('#')[0]
self._name = line.split('=')[0].strip()
self._guid = line.split('=')[1].strip()
objdict = DECGuidObject._objs
if self._name not in objdict.keys():
objdict[self._name] = [self]
else:
objdict[self._name].append(self)
return True
def GetName(self):
return self._name
def GetGuid(self):
return self._guid
def Destroy(self):
objdict = DECGuidObject._objs
objdict[self._name].remove(self)
if len(objdict[self._name]) == 0:
del objdict[self._name]
@staticmethod
def GetObjectDict():
return DECGuidObject._objs
class DECPpiObject(DECSectionObject):
_objs = {}
def __init__(self, parent):
DECSectionObject.__init__(self, parent)
self._name = None
def Parse(self):
line = self.GetLineByOffset(self._start).strip().split('#')[0]
self._name = line.split('=')[0].strip()
self._guid = line.split('=')[1].strip()
objdict = DECPpiObject._objs
if self._name not in objdict.keys():
objdict[self._name] = [self]
else:
objdict[self._name].append(self)
return True
def GetName(self):
return self._name
def GetGuid(self):
return self._guid
def Destroy(self):
objdict = DECPpiObject._objs
objdict[self._name].remove(self)
if len(objdict[self._name]) == 0:
del objdict[self._name]
@staticmethod
def GetObjectDict():
return DECPpiObject._objs
class DECProtocolObject(DECSectionObject):
_objs = {}
def __init__(self, parent):
DECSectionObject.__init__(self, parent)
self._name = None
def Parse(self):
line = self.GetLineByOffset(self._start).strip().split('#')[0]
self._name = line.split('=')[0].strip()
self._guid = line.split('=')[1].strip()
objdict = DECProtocolObject._objs
if self._name not in objdict.keys():
objdict[self._name] = [self]
else:
objdict[self._name].append(self)
return True
def GetName(self):
return self._name
def GetGuid(self):
return self._guid
def Destroy(self):
objdict = DECProtocolObject._objs
objdict[self._name].remove(self)
if len(objdict[self._name]) == 0:
del objdict[self._name]
@staticmethod
def GetObjectDict():
return DECProtocolObject._objs
class DECLibraryClassObject(DECSectionObject):
_objs = {}
def __init__(self, parent):
DECSectionObject.__init__(self, parent)
self.mClassName = None
self.mHeaderFile = None
def Parse(self):
line = self.GetLineByOffset(self._start).strip().split('#')[0]
self.mClassName, self.mHeaderFile = line.split('|')
objdict = DECLibraryClassObject._objs
if self.mClassName not in objdict.keys():
objdict[self.mClassName] = [self]
else:
objdict[self.mClassName].append(self)
return True
def GetClassName(self):
return self.mClassName
def GetName(self):
return self.mClassName
def GetHeaderFile(self):
return self.mHeaderFile
def Destroy(self):
objdict = DECLibraryClassObject._objs
objdict[self.mClassName].remove(self)
if len(objdict[self.mClassName]) == 0:
del objdict[self.mClassName]
@staticmethod
def GetObjectDict():
return DECLibraryClassObject._objs
class DECIncludeObject(DECSectionObject):
def __init__(self, parent):
DECSectionObject.__init__(self, parent)
def GetPath(self):
return self.GetLineByOffset(self._start).split('#')[0].strip()
class DECPcdObject(DECSectionObject):
_objs = {}
def __init__(self, parent):
DECSectionObject.__init__(self, parent)
self.mPcdName = None
self.mPcdDefaultValue = None
self.mPcdDataType = None
self.mPcdToken = None
def Parse(self):
line = self.GetLineByOffset(self._start).strip().split('#')[0]
(self.mPcdName, self.mPcdDefaultValue, self.mPcdDataType, self.mPcdToken) = line.split('|')
objdict = DECPcdObject._objs
if self.mPcdName not in objdict.keys():
objdict[self.mPcdName] = [self]
else:
objdict[self.mPcdName].append(self)
return True
def Destroy(self):
objdict = DECPcdObject._objs
objdict[self.mPcdName].remove(self)
if len(objdict[self.mPcdName]) == 0:
del objdict[self.mPcdName]
def GetPcdType(self):
return self.GetParent().GetType()
def GetPcdName(self):
return self.mPcdName
def GetPcdValue(self):
return self.mPcdDefaultValue
def GetPcdDataType(self):
return self.mPcdDataType
def GetPcdToken(self):
return self.mPcdToken
def GetName(self):
return self.GetPcdName().split('.')[1]
@staticmethod
def GetObjectDict():
return DECPcdObject._objs
| |
from ggame import App, RectangleAsset, ImageAsset, SoundAsset, CircleAsset
from ggame import LineStyle, Color, Sprite, Sound
from random import randint
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
blue=Color(0x87cefa, 1)
purple=Color(0x7b68ee, 1)
line=LineStyle(0,blue)
black = Color(0, 1)
bg_asset = RectangleAsset(SCREEN_WIDTH, SCREEN_HEIGHT, line, black)
bg = Sprite(bg_asset, (0,0))
rond=lambda x: 2*(round(x/2,-1))
length=1
x=20
y=20
z=3
snk=[(20,20)]
go= False
dir=0
a=4
class tail(Sprite):
asset=RectangleAsset(20,20,line, purple)
def __init__(self, position):
super().__init__(tail.asset, position)
class ntail(Sprite):
asset=RectangleAsset(20,20,line, black)
def __init__(self, position):
super().__init__(ntail.asset, position)
class dots(Sprite):
asset=RectangleAsset(20,20,line, blue)
def __init__(self, position):
super().__init__(dots.asset, position)
dot=[(20*randint(0,39), 20*randint(0,29))]
for (h,k) in dot:
dots((h,k))
tail((x,y))
def playagain(event):
global dot, snk, x,y,z,go,dir
for (x,y) in dot:
ntail((x,y))
for (x,y) in snk:
ntail((x,y))
x=20
y=20
z=3
snk=[(20,20)]
go= False
dir=0
dot=[(20*randint(0,39), 20*randint(0,29))]
for (h,k) in dot:
dots((h,k))
tail((20,20))
def leftKey(event):
global dir, go
if dir!=1 or len(snk)==1:
dir=0
go=True
z=3
def rightKey(event):
global dir, go
if dir!=0 or len(snk)==1:
dir=1
go=True
z=3
def downKey(event):
global dir, go
if dir!=3 or len(snk)==1:
dir=2
go=True
z=3
def upKey(event):
global dir, go
if dir!=2 or len(snk)==1:
dir=3
go=True
z=3
def spaceKey(event):
global go
if go:
go=False
if len(snk)==20:
a=3
print(a)
if x<SCREEN_WIDTH and x>=0 and y<SCREEN_HEIGHT and y>0:
go=False
def step():
global x,y,go,dir,z, dot, snk
if go:
z=z+1
if z==4:
if dir==0:
x=x-20
for (h,k) in dot:
for (c,d) in snk:
if x==c and y==d:
print("you lose. Press r to play again.")
x=5000000000
go=False
if x==h and y==k:
snk.append((x,y))
tail((x,y))
dot[0]=(20*randint(0,39), 20*randint(0,29))
for (z,q) in dot:
dots((z,q))
elif (x+20)>SCREEN_WIDTH or (y+20)>SCREEN_HEIGHT or x<0 or y<0:
print("you lose. Press r to play again.")
go=0
x=5000000000
go=False
else:
ntail(snk[0])
tail((x,y))
snk.append((x,y))
snk.remove(snk[0])
for (z,q) in dot:
dots((z,q))
if dir ==1:
x=x+20
for (h,k) in dot:
for (c,d) in snk:
if x==c and y==d:
print("you lose. Press r to play again.")
x=5000000000
go=False
if x==h and y==k:
snk.append((x,y))
tail((x,y))
dot[0]=(20*randint(0,39), 20*randint(0,29))
for (z,q) in dot:
dots((z,q))
elif (x+20)>SCREEN_WIDTH or (y+20)>SCREEN_HEIGHT or x<0 or y<0:
print("you lose. Press r to play again.")
go=0
x=5000000000
go=False
else:
ntail(snk[0])
tail((x,y))
snk.append((x,y))
snk.remove(snk[0])
for (z,q) in dot:
dots((z,q))
if dir ==2:
y=y+20
for (h,k) in dot:
for (c,d) in snk:
if x==c and y==d:
print("you lose. Press r to play again.")
x=5000000000
go=False
if x==h and y==k:
snk.append((x,y))
tail((x,y))
dot[0]=(20*randint(0,39), 20*randint(0,29))
for (z,q) in dot:
dots((z,q))
elif (x+20)>SCREEN_WIDTH or (y+20)>SCREEN_HEIGHT or x<0 or y<0:
print("you lose. Press r to play again.")
x=5000000000
go=False
else:
ntail(snk[0])
tail((x,y))
snk.append((x,y))
#snk.append(tail((x,y)))
snk.remove(snk[0])
for (z,q) in dot:
dots((z,q))
if dir ==3:
y=y-20
for (h,k) in dot:
for (c,d) in snk:
if x==c and y==d:
print("you lose. Press r to play again.")
x=5000000000
go=False
if x==h and y==k:
snk.append((x,y))
tail((x,y))
dot[0]=(20*randint(0,39), 20*randint(0,29))
for (z,q) in dot:
dots((z,q))
elif (x+20)>SCREEN_WIDTH or (y+20)>SCREEN_HEIGHT or x<0 or y<0:
print("you lose. Press r to play again.")
x=5000000000
go=False
else:
ntail(snk[0])
tail((x,y))
snk.append((x,y))
snk.remove(snk[0])
for (z,q) in dot:
dots((z,q))
z=0
myapp = App(SCREEN_WIDTH, SCREEN_HEIGHT)
myapp.run(step)
myapp.listenKeyEvent('keydown', 'left arrow', leftKey)
myapp.listenKeyEvent('keydown', 'up arrow', upKey)
myapp.listenKeyEvent('keydown', 'down arrow', downKey)
myapp.listenKeyEvent('keydown', 'right arrow', rightKey)
myapp.listenKeyEvent('keyup', 'space', spaceKey)
myapp.listenKeyEvent('keyup', 'r', playagain)
| |
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Creates a new DAQ device class. This class assumes that there is a
DAQ connected and mapped as Dev1. It assumes a specific syndesmology on the DAQ (it is not
meant to be a generic DAQ interface). The following diagram shows the wiring for one DaqDevice
port::
Port 0
========
| A0+ <--- Vr -------------------------|
| |
| A0- <--- GND -------------------// |
| |
| A1+ <--- V+ ------------|-------V+ |
| r | |
| A1- <--- Vr --/\/\/\----| |
| | |
| | |
| |--------------------------|
========
:number_of_ports: The number of ports connected on the DAQ. Each port requires 2 DAQ Channels
one for the source voltage and one for the Voltage drop over the
resistor r (V+ - Vr) allows us to detect the current.
:resistor_value: The resistance of r. Typically a few milliOhm
:downsample: The number of samples combined to create one Power point. If set to one
each sample corresponds to one reported power point.
:sampling_rate: The rate at which DAQ takes a sample from each channel.
"""
# pylint: disable=F0401,E1101,W0621
import os
import sys
import csv
import time
import threading
from Queue import Queue, Empty
import numpy
from PyDAQmx import Task
from PyDAQmx.DAQmxFunctions import DAQmxGetSysDevNames
from PyDAQmx.DAQmxTypes import int32, byref, create_string_buffer
from PyDAQmx.DAQmxConstants import (DAQmx_Val_Diff, DAQmx_Val_Volts, DAQmx_Val_GroupByScanNumber, DAQmx_Val_Auto,
DAQmx_Val_Acquired_Into_Buffer, DAQmx_Val_Rising, DAQmx_Val_ContSamps)
from daqpower import log
def list_available_devices():
"""Returns the list of DAQ devices visible to the driver."""
bufsize = 2048 # Should be plenty for all but the most pathalogical of situations.
buf = create_string_buffer('\000' * bufsize)
DAQmxGetSysDevNames(buf, bufsize)
return buf.value.split(',')
class ReadSamplesTask(Task):
def __init__(self, config, consumer):
Task.__init__(self)
self.config = config
self.consumer = consumer
self.sample_buffer_size = (self.config.sampling_rate + 1) * self.config.number_of_ports * 2
self.samples_read = int32()
self.remainder = []
# create voltage channels
for i in xrange(0, 2 * self.config.number_of_ports, 2):
self.CreateAIVoltageChan('{}/ai{}'.format(config.device_id, config.channel_map[i]),
'', DAQmx_Val_Diff,
-config.v_range, config.v_range,
DAQmx_Val_Volts, None)
self.CreateAIVoltageChan('{}/ai{}'.format(config.device_id, config.channel_map[i + 1]),
'', DAQmx_Val_Diff,
-config.dv_range, config.dv_range,
DAQmx_Val_Volts, None)
# configure sampling rate
self.CfgSampClkTiming('',
self.config.sampling_rate,
DAQmx_Val_Rising,
DAQmx_Val_ContSamps,
self.config.sampling_rate)
# register callbacks
self.AutoRegisterEveryNSamplesEvent(DAQmx_Val_Acquired_Into_Buffer, self.config.sampling_rate // 2, 0)
self.AutoRegisterDoneEvent(0)
def EveryNCallback(self):
samples_buffer = numpy.zeros((self.sample_buffer_size,), dtype=numpy.float64)
self.ReadAnalogF64(DAQmx_Val_Auto, 0.0, DAQmx_Val_GroupByScanNumber, samples_buffer,
self.sample_buffer_size, byref(self.samples_read), None)
self.consumer.write((samples_buffer, self.samples_read.value))
def DoneCallback(self, status): # pylint: disable=W0613,R0201
return 0 # The function should return an integer
class AsyncWriter(threading.Thread):
def __init__(self, wait_period=1):
super(AsyncWriter, self).__init__()
self.daemon = True
self.wait_period = wait_period
self.running = threading.Event()
self._stop_signal = threading.Event()
self._queue = Queue()
def write(self, stuff):
if self._stop_signal.is_set():
raise IOError('Attempting to writer to {} after it has been closed.'.format(self.__class__.__name__))
self._queue.put(stuff)
def do_write(self, stuff):
raise NotImplementedError()
def run(self):
self.running.set()
while True:
if self._stop_signal.is_set() and self._queue.empty():
break
try:
self.do_write(self._queue.get(block=True, timeout=self.wait_period))
except Empty:
pass # carry on
self.running.clear()
def stop(self):
self._stop_signal.set()
def wait(self):
while self.running.is_set():
time.sleep(self.wait_period)
class PortWriter(object):
def __init__(self, path):
self.path = path
self.fh = open(path, 'w', 0)
self.writer = csv.writer(self.fh)
self.writer.writerow(['power', 'voltage'])
def write(self, row):
self.writer.writerow(row)
def close(self):
self.fh.close()
def __del__(self):
self.close()
class SamplePorcessorError(Exception):
pass
class SampleProcessor(AsyncWriter):
def __init__(self, resistor_values, output_directory, labels):
super(SampleProcessor, self).__init__()
self.resistor_values = resistor_values
self.output_directory = output_directory
self.labels = labels
self.number_of_ports = len(resistor_values)
if len(self.labels) != self.number_of_ports:
message = 'Number of labels ({}) does not match number of ports ({}).'
raise SamplePorcessorError(message.format(len(self.labels), self.number_of_ports))
self.port_writers = []
def do_write(self, sample_tuple):
samples, number_of_samples = sample_tuple
for i in xrange(0, number_of_samples * self.number_of_ports * 2, self.number_of_ports * 2):
for j in xrange(self.number_of_ports):
V = float(samples[i + 2 * j])
DV = float(samples[i + 2 * j + 1])
P = V * (DV / self.resistor_values[j])
self.port_writers[j].write([P, V])
def start(self):
for label in self.labels:
port_file = self.get_port_file_path(label)
writer = PortWriter(port_file)
self.port_writers.append(writer)
super(SampleProcessor, self).start()
def stop(self):
super(SampleProcessor, self).stop()
self.wait()
for writer in self.port_writers:
writer.close()
def get_port_file_path(self, port_id):
if port_id in self.labels:
return os.path.join(self.output_directory, port_id + '.csv')
else:
raise SamplePorcessorError('Invalid port ID: {}'.format(port_id))
def __del__(self):
self.stop()
class DaqRunner(object):
@property
def number_of_ports(self):
return self.config.number_of_ports
def __init__(self, config, output_directory):
self.config = config
self.processor = SampleProcessor(config.resistor_values, output_directory, config.labels)
self.task = ReadSamplesTask(config, self.processor)
self.is_running = False
def start(self):
log.debug('Starting sample processor.')
self.processor.start()
log.debug('Starting DAQ Task.')
self.task.StartTask()
self.is_running = True
log.debug('Runner started.')
def stop(self):
self.is_running = False
log.debug('Stopping DAQ Task.')
self.task.StopTask()
log.debug('Stopping sample processor.')
self.processor.stop()
log.debug('Runner stopped.')
def get_port_file_path(self, port_id):
return self.processor.get_port_file_path(port_id)
if __name__ == '__main__':
from collections import namedtuple
DeviceConfig = namedtuple('DeviceConfig', ['device_id', 'channel_map', 'resistor_values',
'v_range', 'dv_range', 'sampling_rate',
'number_of_ports', 'labels'])
channel_map = (0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23)
resistor_values = [0.005]
labels = ['PORT_0']
dev_config = DeviceConfig('Dev1', channel_map, resistor_values, 2.5, 0.2, 10000, len(resistor_values), labels)
if not len(sys.argv) == 3:
print 'Usage: {} OUTDIR DURATION'.format(os.path.basename(__file__))
sys.exit(1)
output_directory = sys.argv[1]
duration = float(sys.argv[2])
print "Avialable devices:", list_availabe_devices()
runner = DaqRunner(dev_config, output_directory)
runner.start()
time.sleep(duration)
runner.stop()
| |
"""
Functions:
num_headers Guess the number of headers in a matrix.
"""
# Data Types.
CHAR, INT, FLOAT, EMPTY = 1, 2, 4, 8
# Semantic Types.
HEAD, SAMPLE, ANNOT, VALUE, BLANK = 1, 2, 4, 8, 16
def _rule_no_first_row_annots(matrix, num_rows, num_cols, datatype, semtype):
# No ANNOT in the first row.
changed = False
for j in range(num_cols):
if semtype[0][j] & ANNOT:
semtype[0][j] ^= ANNOT
changed = True
return changed
def _rule_first_row_sample(matrix, num_rows, num_cols, datatype, semtype):
# SAMPLE can only be in the first row.
changed = False
for i in range(1, num_rows):
for j in range(num_cols):
if semtype[i][j] & SAMPLE:
semtype[i][j] ^= SAMPLE
changed = True
return changed
def _rule_first_row_col_head(matrix, num_rows, num_cols, datatype, semtype):
# HEAD can only be in the first row or column.
changed = False
for i in range(1, num_rows):
for j in range(1, num_cols):
if semtype[i][j] & HEAD:
semtype[i][j] ^= HEAD
changed = True
return changed
## # RULE 3: If the majority of the potential values in the matrix
## # are floating points, then all values must be floating
## # points.
## # This won't work. E.g. if you use an algorithm to zero-fill
## # missing values.
## value_type_is = INT | FLOAT | EMPTY
## int_values = float_values = 0
## for i in range(num_rows):
## for j in range(num_cols):
## if not (semtype[i][j] & VALUE):
## continue
## if datatype[i][j] == INT:
## int_values += 1
## elif datatype[i][j] == FLOAT:
## float_values += 1
## total = int_values + float_values
## if float_values >= total/2.0:
## # Values must be FLOAT or EMPTY.
## value_type_is = FLOAT | EMPTY
## for i in range(num_rows):
## for j in range(num_cols):
## if not (semtype[i][j] & VALUE):
## continue
## if value_type_is | datatype[i][j] != value_type_is:
## semtype[i][j] ^= VALUE
def _rule_no_values_then_is_head(
matrix, num_rows, num_cols, datatype, semtype):
# If there are no VALUES in a column, then the first row, from
# this down to the first column, must all be HEAD.
changed = False
for j in range(num_cols):
any_values = False
for i in range(num_rows):
if semtype[i][j] & VALUE:
any_values = True
break
if any_values:
continue
for jj in range(j + 1):
assert semtype[0][jj] & HEAD, "Missing header."
if semtype[0][jj] != HEAD:
semtype[0][jj] = HEAD
changed = True
return changed
def _rule_no_broken_values(matrix, num_rows, num_cols, datatype, semtype):
# In each row or column, the VALUEs can only appear at the end.
changed = False
for i in range(num_rows):
in_value = True
for j in range(num_cols - 1, -1, -1):
if in_value and not (semtype[i][j] & VALUE):
in_value = False
elif not in_value and (semtype[i][j] & VALUE):
semtype[i][j] ^= VALUE
changed = True
for j in range(num_cols):
in_value = True
for i in range(num_rows - 1, -1, -1):
if in_value and not (semtype[i][j] & VALUE):
in_value = False
elif not in_value and (semtype[i][j] & VALUE):
semtype[i][j] ^= VALUE
changed = True
return changed
def _rule_no_broken_head1(matrix, num_rows, num_cols, datatype, semtype):
# In each row, the header must start from column 0. There can't
# be a cell with no HEAD followed by one with HEAD. Same with
# columns.
changed = False
for i in range(num_rows):
in_header = True
for j in range(num_cols):
if in_header and not (semtype[i][j] & HEAD):
in_header = False
elif not in_header and (semtype[i][j] & HEAD):
semtype[i][j] ^= HEAD
changed = True
for j in range(num_cols):
in_header = True
for i in range(num_rows):
if in_header and not (semtype[i][j] & HEAD):
in_header = False
elif not in_header and (semtype[i][j] & HEAD):
semtype[i][j] ^= HEAD
changed = True
return changed
def _rule_no_broken_head2(matrix, num_rows, num_cols, datatype, semtype):
# If a cell is a HEAD, then all cells preceeding can only be HEAD.
changed = False
in_header = False
for j in range(num_cols - 1, -1, -1):
if semtype[0][j] == HEAD:
in_header = True
elif in_header and semtype[0][j] != HEAD:
semtype[0][j] = HEAD
changed = True
in_header = False
for i in range(num_rows - 1, -1, -1):
if semtype[i][0] == HEAD:
in_header = True
elif in_header and semtype[i][0] != HEAD:
semtype[i][0] = HEAD
changed = True
return changed
## # RULE 5: Label BLANK cells if there is a potential HEAD in the
## # above it, a potential HEAD to its left, and no FLOATs
## # anywhere to the right of it or below it.
## # This doesn't work:
## # <HEAD> <HEAD> <HEAD> <SAMPLE>
## # <HEAD> <BLANK> <BLANK> <VALUE>
## # <ANNOT> <ANNOT> <ANNOT> <VALUE>
## # If the <ANNOT> are numbers (e.g. GWEIGHT), then won't detect.
## could_be_BLANK = {} # (i, j) -> 1
## for i in range(1, num_rows):
## for j in range(1, num_rows):
## if not (semtype[i][0] & HEAD) or not (semtype[0][j] & HEAD):
## continue
## any_floats = False
## for ii in range(i+1, num_rows):
## if datatype[ii][j] == FLOAT:
## any_floats = True
## break
## if any_floats:
## continue
## for jj in range(j+1, num_cols):
## if datatype[i][jj] == FLOAT:
## any_floats = True
## break
## if any_floats:
## continue
## could_be_BLANK[(i, j)] = 1
## # Start with (1, 1) as BLANK. Then add one row and column at a
## # time, making sure everything I added is blank.
## # X X X X
## # X X
## # X X
## max_row = max_col = 1
## just_added_row = False
## while True:
## if not just_added_row:
## new_row, new_col = max_row+1, max_col
## just_added_row = True
## else:
## new_row, new_col = max_row, max_col+1
## just_added_row = False
## all_blank = True
## for i in range(1, new_row+1):
## for j in range(1, new_col+1):
## if (i, j) not in could_be_BLANK:
## all_blank = False
## # If everything is BLANK, then accept the new rows and columns
## # and try the next one.
## if all_blank:
## max_row, max_col = new_row, new_col
## just_added_row = False
## # If not everything is blank, and we just added a column, then
## # we've already tried everything, and there's no more blanks.
## elif not just_added_row:
## break
## if (max_row, max_col) not in could_be_BLANK:
## max_row = max_col = 0
## for i in range(1, max_row+1):
## for j in range(1, max_col+1):
## semtype[i][j] = BLANK
## for i in range(1, max_row+1):
## semtype[i][0] = HEAD
## for j in range(1, max_col+1):
## semtype[0][j] = HEAD
## for i in range(1, max_row+1):
## for j in range(max_col+1, num_cols):
## semtype[i][j] = ANNOT
## for j in range(1, max_col+1):
## for i in range(max_row+1, num_rows):
## semtype[i][j] = ANNOT
def _rule_no_broken_blank(matrix, num_rows, num_cols, datatype, semtype):
# BLANKs can only be preceeded by BLANKs from (1, 1). BLANKs must
# have headers in the first row and column.
changed = False
blank_indexes = [] # list of (row, col)
for i in range(num_rows):
for j in range(num_cols):
if semtype[i][j] & BLANK:
blank_indexes.append((i, j))
for i, j in blank_indexes:
all_blank = True
for ii in range(1, i):
for jj in range(1, j):
if not semtype[ii][jj] & BLANK:
all_blank = False
if not all_blank:
semtype[i][j] ^= BLANK
changed = True
continue
if not semtype[i][0] & HEAD or not semtype[0][j] & HEAD:
semtype[i][j] ^= BLANK
changed = True
continue
return changed
def _rule_known_headers(matrix, num_rows, num_cols, datatype, semtype):
# If the first row or column (except for (0, 0), because PCL files
# allow different names) match known headers, then set them to
# HEAD.
KNOWN_COL_HEADERS = [
"GID", "NA", "ID", "NAME", "LOCUSLINK",
"GWEIGHT", "GORDER", "GCLUSTER"]
KNOWN_ROW_HEADERS = ["GID", "AID", "EWEIGHT", "EORDER", "ACLUSTER"]
changed = False
if not num_rows or not num_cols:
return changed
if not semtype[0][0] & HEAD:
return changed
col_headers = [x.upper() for x in matrix[0]]
for j in range(1, num_cols):
if not semtype[0][j] & HEAD:
break
if col_headers[j] in KNOWN_COL_HEADERS:
if semtype[0][j] != HEAD:
semtype[0][j] = HEAD
changed = True
row_headers = [x[0].upper() for x in matrix]
for i in range(1, num_rows):
if not semtype[i][0] & HEAD:
break
if row_headers[i] in KNOWN_ROW_HEADERS:
if semtype[i][0] != HEAD:
semtype[i][0] = HEAD
changed = True
return changed
def _rule_no_values_by_head(
matrix, num_rows, num_cols, datatype, semtype):
# There are no VALUEs under column HEAD or to the right of row
# HEAD.
changed = False
for j in range(num_cols):
if semtype[0][j] != HEAD:
break
for i in range(num_rows):
if semtype[i][j] & VALUE:
semtype[i][j] ^= VALUE
changed = True
for i in range(num_rows):
if semtype[i][0] != HEAD:
break
for j in range(num_cols):
if semtype[i][j] & VALUE:
semtype[i][j] ^= VALUE
changed = True
return changed
def _rule_head_around_blank(matrix, num_rows, num_cols, datatype, semtype):
# RULE: If a cell has a HEAD on top and left, it must be BLANK.
changed = False
for i in range(1, num_rows):
for j in range(1, num_cols):
if not semtype[i][j] & BLANK:
continue
if semtype[i][j] == BLANK:
continue
if semtype[i][0] == HEAD and semtype[0][j] == HEAD:
semtype[i][j] = BLANK
changed = True
return changed
def _rule_no_head_around_no_blank(
matrix, num_rows, num_cols, datatype, semtype):
# RULE: If a cell is not blank, then it cannot have a HEAD on the
# top and left.
changed = False
for i in range(1, num_rows):
for j in range(1, num_cols):
if semtype[i][j] & BLANK:
continue
assert semtype[i][0] != HEAD or semtype[0][j] != HEAD, \
"Ambiguous annotation."
if semtype[i][0] == HEAD and semtype[0][j] & HEAD:
semtype[0][j] ^= HEAD
changed = True
elif semtype[0][j] == HEAD and semtype[i][0] & HEAD:
semtype[i][0] ^= HEAD
changed = True
return changed
def _rule_first_values_are_int(
matrix, num_rows, num_cols, datatype, semtype):
# RULE: If the first columns of VALUEs are INT or EMPTY, and the
# remaining are FLOAT or EMPTY, then the first columns should
# be relabeled as ANNOT. (e.g. Gene IDs).
#
# <HEAD1> <HEAD2> <HEAD3> <SAMPLE1>
# <ANNOT/STR> <ANNOT/STR> <ANNOT/INT> <VALUE/FLOAT> Last ANNOT is INT.
# <ANNOT/STR> <ANNOT/INT> <ANNOT/INT> <VALUE/FLOAT> All ANNOTs are INTs.
# <ANNOT/INT> <ANNOT/STR> <ANNOT/INT> <VALUE/FLOAT>
#_print_matrix_debug(semtype, 20, 10, 8)
# Find first column that contain VALUEs.
col = None
for j in range(num_cols):
for i in range(num_rows):
if semtype[i][j] & VALUE and not (datatype[i][j] & EMPTY):
col = j
break
if col is not None:
break
else:
return False
if col + 1 >= num_cols: # only 1 column of values.
return False
# If there are columns of INTs followed by columns of FLOATs, then
# make the INTs ANNOTs.
types = [None] * num_cols
for j in range(col, num_cols):
x = [datatype[i][j] for i in range(num_rows)
if semtype[i][j] & VALUE]
dt = EMPTY
if FLOAT in x:
dt = FLOAT
elif INT in x: # only INT if there are no FLOATs.
dt = INT
types[j] = dt
j = col
while j < num_cols and types[j] == INT:
j += 1
num_INT = j - col
num_FLOAT = 0
for j in range(j, num_cols):
if types[j] == FLOAT:
num_FLOAT += 1
if not num_INT or not num_FLOAT:
return False
if col + num_INT + num_FLOAT != num_cols: # some VALUEs are not FLOAT.
return False
for i in range(num_rows):
for j in range(col, col + num_INT):
if semtype[i][j] & VALUE:
semtype[i][j] ^= VALUE
changed = True
return changed
def _rule_scale_factor_no_value(
matrix, num_rows, num_cols, datatype, semtype):
# RULE: If the column header is SCALE_FACTOR, the row contains
# ANNOT and not VALUE.
#
# <HEAD> <HEAD> <HEAD> <SAMPLE> <SAMPLE>
# DESCRIPTION <BLANK> <BLANK> <ANNOT> <ANNOT>
# SCALE_FACTOR <BLANK> <BLANK> <ANNOT> <ANNOT>
#
# res_format generates the DESCRIPTION and SCALE_FACTOR column
# annotations. Unfortunately, SCALE_FACTOR contains all numbers,
# so it can be interpreted as gene expression values. Make sure
# this is interpreted as an annotation.
# Look for SCALE_FACTOR in the matrix.
if num_cols < 2:
return False
sf_row = None
for i in range(num_rows):
x = matrix[i][0].upper().strip()
if x.startswith("SCALE") and x.endswith("FACTOR"):
sf_row = i
break
if sf_row is None:
return False
col = None
for j in range(1, num_cols):
if semtype[sf_row][j] != BLANK:
col = j
break
# Make sure all the cells can be ANNOTs.
all_annot = True
for j in range(col, num_cols):
if not (semtype[sf_row][j] & ANNOT):
all_annot = False
break
if not all_annot:
return False
# Make sure not of the cells can be VALUEs.
changed = False
for j in range(col, num_cols):
if semtype[sf_row][j] & VALUE:
changed = True
semtype[sf_row][j] ^= VALUE
return changed
NUM_HEADERS_CACHE = None # tuple of (matrix, (nrow, ncol))
def num_headers(matrix):
"""Return (# row headers, # col headers)."""
global NUM_HEADERS_CACHE
if NUM_HEADERS_CACHE and matrix != NUM_HEADERS_CACHE[0]:
NUM_HEADERS_CACHE = None
if NUM_HEADERS_CACHE is None:
x = _num_headers_h(matrix)
NUM_HEADERS_CACHE = (matrix, x)
x1, x2 = NUM_HEADERS_CACHE
assert matrix == x1
return x2
def _print_matrix_debug(matrix, start_row, nrows, ncols):
end_row = min(start_row+nrows, len(matrix))
for i in range(start_row, end_row):
print i, matrix[i][:ncols]
def _num_headers_h(matrix):
# Try to find the number of rows and columns that contain header
# information.
# CASE 1: No headers. All <VALUES>
# CASE 2: 1 row header, 1 column header.
# <HEAD> <SAMPLE1> <SAMPLE2> [...]
# <ANNOT> <VALUE> <VALUE>
# CASE 3: 1 row header, n column headers.
# <HEAD1> <HEAD2> <HEAD3> <SAMPLE1> <SAMPLE2> <SAMPLE3>
# <ANNOT> <ANNOT> <ANNOT> <VALUE> <VALUE> <VALUE>
# CASE 4: n row headers, 1 column headers.
# <HEAD1> <SAMPLE1> <SAMPLE2> <SAMPLE3>
# <HEAD4> <ANNOT> <ANNOT> <ANNOT>
# <HEAD5> <ANNOT> <ANNOT> <ANNOT>
# <ANNOT> <VALUE> <VALUE> <VALUE>
# CASE 5: n row headers, n column headers.
# <HEAD1> <HEAD2> <HEAD3> <SAMPLE1> <SAMPLE2> <SAMPLE3>
# <HEAD4> <BLANK> <BLANK> <ANNOT> <ANNOT> <ANNOT>
# <HEAD5> <BLANK> <BLANK> <ANNOT> <ANNOT> <ANNOT>
# <ANNOT> <ANNOT> <ANNOT> <VALUE> <VALUE> <VALUE>
#
# 1 2 4 8
# 1 HEAD CHAR INT FLOAT
# 2 SAMPLE CHAR INT FLOAT
# 4 ANNOT CHAR INT FLOAT EMPTY
# 8 VALUE INT FLOAT EMPTY
# 16 BLANK EMPTY
#
# Challenges:
# - It's hard to distinguish between ANNOT, VALUE, and BLANK when
# they are EMPTY.
# - It's hard to distinguish between HEADs and SAMPLEs.
#
# RULE: No ANNOT in the first row.
# RULE: SAMPLE can only be in the first row.
# RULE: HEAD can only be in the first row or column.
# RULE: If there are no VALUES in a column, then the first row,
# from this down to the first column, must all be HEAD.
# RULE: In each row, the header must start from column 0. There
# can't be a cell with no HEAD followed by one with HEAD.
# RULE: If a cell is a HEAD, then all cells preceeding can only be
# HEAD.
# RULE: BLANKs can only be preceeded by BLANKs from (1, 1).
# BLANKs must have headers in the first row and column.
# RULE: In each row or column, the VALUEs can only appear from
# the end.
# RULE: If the first row or column (except for (0, 0), because
# PCL files allow different names) match known headers,
# then set them to HEAD.
# RULE: There are no VALUEs under column HEAD or to the right of
# row HEAD.
# RULE: If a cell has a HEAD on top and left, that cell must be
# BLANK.
# RULE: If a cell is not blank, then it cannot have a HEAD on the
# top and left.
# RULE: If the first columns of VALUEs are INT or EMPTY, and the
# remaining are FLOAT or EMPTY, then the first columns should
# be relabeled as ANNOT. (e.g. Gene IDs).
# RULE: If the column header is SCALE_FACTOR, the row contains
# ANNOT and not VALUE.
RULES = [
_rule_no_first_row_annots,
_rule_first_row_sample,
_rule_first_row_col_head,
_rule_no_values_then_is_head,
_rule_no_broken_head1,
_rule_no_broken_head2,
_rule_no_broken_blank,
_rule_no_broken_values,
_rule_known_headers,
_rule_no_values_by_head,
_rule_head_around_blank,
_rule_no_head_around_no_blank,
_rule_first_values_are_int,
_rule_scale_factor_no_value,
]
if not matrix:
return 0, 0
num_rows, num_cols = len(matrix), len(matrix[0])
# Make sure each row contains the same number of columns.
for row in matrix:
assert len(row) == num_cols, "matrix row length mismatch"
# This is REALLY SLOW for big matrices. Optimize by assuming a
# maximum number of header rows. Just look at the first rows for
# the header.
MAX_HEADER_ROWS = 100
# 50 rows might not be sufficient for affymetrix arrays. U133Av2
# has 62 AFFX genes that may or may not have annotations.
#MAX_HEADER_ROWS = 50
matrix = matrix[:MAX_HEADER_ROWS]
num_rows = len(matrix)
# Figure out the data type for each cell in the matrix.
#CHAR, INT, FLOAT, EMPTY = 1, 2, 4, 8
datatype = [[None] * num_cols for i in range(num_rows)]
for i in range(num_rows):
for j in range(num_cols):
x = matrix[i][j]
if x.strip() == "":
dt = EMPTY
elif x.strip().lower() == "null":
dt = EMPTY
elif _is_int(x):
dt = INT
elif _is_float(x):
dt = FLOAT
else:
dt = CHAR
datatype[i][j] = dt
# Make an initial guess at the semantic types of each cell.
#HEAD, SAMPLE, ANNOT, VALUE, BLANK = 1, 2, 4, 8, 16
semtype = [[0] * num_cols for i in range(num_rows)]
for i in range(num_rows):
for j in range(num_cols):
x = datatype[i][j]
if x == CHAR:
st = HEAD | SAMPLE | ANNOT
if matrix[i][j].upper() in ["NA", "-"]:
st = st | VALUE
elif x == INT:
st = HEAD | SAMPLE | ANNOT | VALUE
elif x == FLOAT:
st = HEAD | SAMPLE | ANNOT | VALUE
elif x == EMPTY:
st = ANNOT | VALUE | BLANK
if i == 0:
st = st | HEAD
else:
raise AssertionError
semtype[i][j] = st
# Apply the rules to guess the right types of each cell of the
# matrix.
iteration = 0
changed = True
while changed:
#_print_matrix_debug(semtype, 0, 5, 8)
iteration += 1
changed = False
for rule_fn in RULES:
c = rule_fn(matrix, num_rows, num_cols, datatype, semtype)
changed = changed or c
# Look for the VALUEs. Start looking at the bottom right of the
# MATRIX, and add one column and row at a time.
first_row, first_col = num_rows - 1, num_cols - 1
just_added_row = False
while True:
if not just_added_row:
new_row, new_col = first_row - 1, first_col
just_added_row = True
else:
new_row, new_col = first_row, first_col - 1
just_added_row = False
# Make sure the rows and cols are in bounds.
if new_row < 0 or new_col < 0:
if just_added_row:
continue
break
all_values = True
for i in range(new_row, num_rows):
for j in range(new_col, num_cols):
if not semtype[i][j] & VALUE:
all_values = False
# If everything is a VALUE, then accept the new rows and
# columns and try the next one.
if all_values:
first_row, first_col = new_row, new_col
just_added_row = False
# If not everything is a value, and we just added a column,
# then we've already tried everything, and there's no more
# values.
elif not just_added_row:
break
if not semtype[first_row][first_col] & VALUE:
# There are no values.
first_row, first_col = num_rows, num_cols
hrows, hcols = first_row, first_col
#print "DEBUG", hrows, hcols
#_print_matrix_debug(datatype, 0, 10, 8)
#_print_matrix_debug(semtype, 0, 10, 8)
#import sys; sys.exit(0)
# Don't allow this. It makes it too complicated to have to keep
# track of matrices with and without signal values.
## If this is a matrix that only contains annotations, then there
## can only be one header row. (Because there are no headers for
## annotations).
#if hcols == num_cols:
# hrows = 1
#assert hcols <= num_cols
assert hcols < num_cols, \
"It looks like there are annotations at the end of the matrix."
assert hrows < MAX_HEADER_ROWS, "Too many header rows."
return hrows, hcols
def _all_numeric(vec):
for n in vec:
if not _is_numeric(n):
return False
return True
def _is_numeric(n):
# empty strings are not numeric.
if n == "":
return False
try:
float(n)
except ValueError, x:
return False
return True
def _is_int(n):
try:
int(n)
except ValueError, x:
return False
return True
def _is_float(n):
try:
float(n)
except ValueError, x:
return False
return True
def _is_float_not_int(n):
if _is_int(n):
return False
try:
float(n)
except ValueError, x:
return False
return True
| |
#!/usr/bin/env python
from __future__ import print_function
__docformat__ = 'restructuredtext en'
import difflib
import operator
import os
import string
from subprocess import Popen, PIPE
import sys
import tabnanny
import tokenize
try:
import argparse
except ImportError:
raise ImportError(
"check_whitespace.py need Python module argparse introduced in"
" Python 2.7. It is available in pypi for compatibility."
" You can install it with this command 'pip install argparse'")
import reindent
from six import StringIO
SKIP_WHITESPACE_CHECK_FILENAME = ".hg/skip_whitespace_check"
def get_parse_error(code):
"""
Checks code for ambiguous tabs or other basic parsing issues.
:param code: a string containing a file's worth of Python code
:returns: a string containing a description of the first parse error encountered,
or None if the code is ok
"""
# note that this uses non-public elements from stdlib's tabnanny, because tabnanny
# is (very frustratingly) written only to be used as a script, but using it that way
# in this context requires writing temporarily files, running subprocesses, blah blah blah
code_buffer = StringIO(code)
try:
tabnanny.process_tokens(tokenize.generate_tokens(code_buffer.readline))
except tokenize.TokenError as err:
return "Could not parse code: %s" % err
except IndentationError as err:
return "Indentation error: %s" % err
except tabnanny.NannyNag as err:
return "Ambiguous tab at line %d; line is '%s'." % (err.get_lineno(), err.get_line())
return None
def clean_diff_line_for_python_bug_2142(diff_line):
if diff_line.endswith("\n"):
return diff_line
else:
return diff_line + "\n\\ No newline at end of file\n"
def get_correct_indentation_diff(code, filename):
"""
Generate a diff to make code correctly indented.
:param code: a string containing a file's worth of Python code
:param filename: the filename being considered (used in diff generation only)
:returns: a unified diff to make code correctly indented, or
None if code is already correctedly indented
"""
code_buffer = StringIO(code)
output_buffer = StringIO()
reindenter = reindent.Reindenter(code_buffer)
reindenter.run()
reindenter.write(output_buffer)
reindent_output = output_buffer.getvalue()
output_buffer.close()
if code != reindent_output:
diff_generator = difflib.unified_diff(code.splitlines(True), reindent_output.splitlines(True),
fromfile=filename, tofile=filename + " (reindented)")
# work around http://bugs.python.org/issue2142
diff_tuple = map(clean_diff_line_for_python_bug_2142, diff_generator)
diff = "".join(diff_tuple)
return diff
else:
return None
def is_merge():
parent2 = os.environ.get("HG_PARENT2", None)
return parent2 is not None and len(parent2) > 0
def parent_commit():
parent1 = os.environ.get("HG_PARENT1", None)
return parent1
class MercurialRuntimeError(Exception):
pass
def run_mercurial_command(hg_command):
hg_executable = os.environ.get("HG", "hg")
hg_command_tuple = hg_command.split()
hg_command_tuple.insert(0, hg_executable)
# If you install your own mercurial version in your home
# hg_executable does not always have execution permission.
if not os.access(hg_executable, os.X_OK):
hg_command_tuple.insert(0, sys.executable)
try:
hg_subprocess = Popen(hg_command_tuple, stdout=PIPE, stderr=PIPE)
except OSError as e:
print("Can't find the hg executable!", file=sys.stderr)
print(e)
sys.exit(1)
hg_out, hg_err = hg_subprocess.communicate()
if len(hg_err) > 0:
raise MercurialRuntimeError(hg_err)
return hg_out
def parse_stdout_filelist(hg_out_filelist):
files = hg_out_filelist.split()
files = [f.strip(string.whitespace + "'") for f in files]
files = list(filter(operator.truth, files)) # get rid of empty entries
return files
def changed_files():
hg_out = run_mercurial_command("tip --template '{file_mods}'")
return parse_stdout_filelist(hg_out)
def added_files():
hg_out = run_mercurial_command("tip --template '{file_adds}'")
return parse_stdout_filelist(hg_out)
def is_python_file(filename):
return filename.endswith(".py")
def get_file_contents(filename, revision="tip"):
hg_out = run_mercurial_command("cat -r %s %s" % (revision, filename))
return hg_out
def save_commit_message(filename):
commit_message = run_mercurial_command("tip --template '{desc}'")
save_file = open(filename, "w")
save_file.write(commit_message)
save_file.close()
def save_diffs(diffs, filename):
diff = "\n\n".join(diffs)
diff_file = open(filename, "w")
diff_file.write(diff)
diff_file.close()
def should_skip_commit():
if not os.path.exists(SKIP_WHITESPACE_CHECK_FILENAME):
return False
whitespace_check_file = open(SKIP_WHITESPACE_CHECK_FILENAME, "r")
whitespace_check_changeset = whitespace_check_file.read()
whitespace_check_file.close()
return whitespace_check_changeset == parent_commit()
def save_skip_next_commit():
whitespace_check_file = open(SKIP_WHITESPACE_CHECK_FILENAME, "w")
whitespace_check_file.write(parent_commit())
whitespace_check_file.close()
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
parser = argparse.ArgumentParser(description="Pretxncommit hook for Mercurial to check for whitespace issues")
parser.add_argument("-n", "--no-indentation",
action="store_const",
default=False,
const=True,
help="don't check indentation, just basic parsing"
)
parser.add_argument("-i", "--incremental",
action="store_const",
default=False,
const=True,
help="only block on newly introduced indentation problems; ignore all others"
)
parser.add_argument("-p", "--incremental-with-patch",
action="store_const",
default=False,
const=True,
help="only block on newly introduced indentation problems; propose a patch for all others"
)
parser.add_argument("-s", "--skip-after-failure",
action="store_const",
default=False,
const=True,
help="when this pre-commit hook fails, don't run it on the next commit; "
"this lets you check in your changes and then check in "
"any necessary whitespace changes in the subsequent commit"
)
args = parser.parse_args(argv)
# -i and -s are incompatible; if you skip checking, you end up with a not-correctly-indented
# file, which -i then causes you to ignore!
if args.skip_after_failure and args.incremental:
print("*** check whitespace hook misconfigured! -i and -s are incompatible.", file=sys.stderr)
return 1
if is_merge():
# don't inspect merges: (a) they're complex and (b) they don't really introduce new code
return 0
if args.skip_after_failure and should_skip_commit():
# we're set up to skip this one, so skip it, but
# first, make sure we don't skip the next one as well :)
os.remove(SKIP_WHITESPACE_CHECK_FILENAME)
return 0
block_commit = False
diffs = []
added_filenames = added_files()
changed_filenames = changed_files()
for filename in filter(is_python_file, added_filenames + changed_filenames):
code = get_file_contents(filename)
parse_error = get_parse_error(code)
if parse_error is not None:
print("*** %s has parse error: %s" % (filename, parse_error), file=sys.stderr)
block_commit = True
else:
# parsing succeeded, it is safe to check indentation
if not args.no_indentation:
was_clean = None # unknown
# only calculate was_clean if it will matter to us
if args.incremental or args.incremental_with_patch:
if filename in changed_filenames:
old_file_contents = get_file_contents(filename, revision=parent_commit())
was_clean = get_correct_indentation_diff(old_file_contents, "") is None
else:
was_clean = True # by default -- it was newly added and thus had no prior problems
check_indentation = was_clean or not args.incremental
if check_indentation:
indentation_diff = get_correct_indentation_diff(code, filename)
if indentation_diff is not None:
if was_clean or not args.incremental_with_patch:
block_commit = True
diffs.append(indentation_diff)
print("%s is not correctly indented" % filename, file=sys.stderr)
if len(diffs) > 0:
diffs_filename = ".hg/indentation_fixes.patch"
save_diffs(diffs, diffs_filename)
print("*** To fix all indentation issues, run: cd `hg root` && patch -p0 < %s" % diffs_filename, file=sys.stderr)
if block_commit:
save_filename = ".hg/commit_message.saved"
save_commit_message(save_filename)
print("*** Commit message saved to %s" % save_filename, file=sys.stderr)
if args.skip_after_failure:
save_skip_next_commit()
print("*** Next commit attempt will not be checked. To change this, rm %s" % SKIP_WHITESPACE_CHECK_FILENAME, file=sys.stderr)
return int(block_commit)
if __name__ == '__main__':
sys.exit(main())
| |
# This file is part of beets.
# Copyright 2013, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Matches existing metadata with canonical information to identify
releases and tracks.
"""
from __future__ import division
import logging
import re
from munkres import Munkres
from unidecode import unidecode
from beets import plugins
from beets import config
from beets.util import levenshtein, plurality
from beets.util.enumeration import enum
from beets.autotag import hooks
# Distance parameters.
# Text distance weights: proportions on the normalized intuitive edit
# distance.
ARTIST_WEIGHT = config['match']['weight']['artist'].as_number()
ALBUM_WEIGHT = config['match']['weight']['album'].as_number()
# The weight of the entire distance calculated for a given track.
TRACK_WEIGHT = config['match']['weight']['track'].as_number()
# The weight of a missing track.
MISSING_WEIGHT = config['match']['weight']['missing'].as_number()
# The weight of an extra (unmatched) track.
UNMATCHED_WEIGHT = config['match']['weight']['unmatched'].as_number()
# These distances are components of the track distance (that is, they
# compete against each other but not ARTIST_WEIGHT and ALBUM_WEIGHT;
# the overall TRACK_WEIGHT does that).
TRACK_TITLE_WEIGHT = config['match']['weight']['track_title'].as_number()
# Used instead of a global artist penalty for various-artist matches.
TRACK_ARTIST_WEIGHT = config['match']['weight']['track_artist'].as_number()
# Added when the indices of tracks don't match.
TRACK_INDEX_WEIGHT = config['match']['weight']['track_index'].as_number()
# Track length weights: no penalty before GRACE, maximum (WEIGHT)
# penalty at GRACE+MAX discrepancy.
TRACK_LENGTH_GRACE = config['match']['weight']['track_length_grace'].as_number()
TRACK_LENGTH_MAX = config['match']['weight']['track_length_max'].as_number()
TRACK_LENGTH_WEIGHT = config['match']['weight']['track_length'].as_number()
# MusicBrainz track ID matches.
TRACK_ID_WEIGHT = config['match']['weight']['track_id'].as_number()
# Parameters for string distance function.
# Words that can be moved to the end of a string using a comma.
SD_END_WORDS = ['the', 'a', 'an']
# Reduced weights for certain portions of the string.
SD_PATTERNS = [
(r'^the ', 0.1),
(r'[\[\(]?(ep|single)[\]\)]?', 0.0),
(r'[\[\(]?(featuring|feat|ft)[\. :].+', 0.1),
(r'\(.*?\)', 0.3),
(r'\[.*?\]', 0.3),
(r'(, )?(pt\.|part) .+', 0.2),
]
# Replacements to use before testing distance.
SD_REPLACE = [
(r'&', 'and'),
]
# Recommendation enumeration.
recommendation = enum('none', 'low', 'medium', 'strong', name='recommendation')
# Artist signals that indicate "various artists". These are used at the
# album level to determine whether a given release is likely a VA
# release and also on the track level to to remove the penalty for
# differing artists.
VA_ARTISTS = (u'', u'various artists', u'various', u'va', u'unknown')
# Global logger.
log = logging.getLogger('beets')
# Primary matching functionality.
def _string_dist_basic(str1, str2):
"""Basic edit distance between two strings, ignoring
non-alphanumeric characters and case. Comparisons are based on a
transliteration/lowering to ASCII characters. Normalized by string
length.
"""
str1 = unidecode(str1)
str2 = unidecode(str2)
str1 = re.sub(r'[^a-z0-9]', '', str1.lower())
str2 = re.sub(r'[^a-z0-9]', '', str2.lower())
if not str1 and not str2:
return 0.0
return levenshtein(str1, str2) / float(max(len(str1), len(str2)))
def string_dist(str1, str2):
"""Gives an "intuitive" edit distance between two strings. This is
an edit distance, normalized by the string length, with a number of
tweaks that reflect intuition about text.
"""
str1 = str1.lower()
str2 = str2.lower()
# Don't penalize strings that move certain words to the end. For
# example, "the something" should be considered equal to
# "something, the".
for word in SD_END_WORDS:
if str1.endswith(', %s' % word):
str1 = '%s %s' % (word, str1[:-len(word)-2])
if str2.endswith(', %s' % word):
str2 = '%s %s' % (word, str2[:-len(word)-2])
# Perform a couple of basic normalizing substitutions.
for pat, repl in SD_REPLACE:
str1 = re.sub(pat, repl, str1)
str2 = re.sub(pat, repl, str2)
# Change the weight for certain string portions matched by a set
# of regular expressions. We gradually change the strings and build
# up penalties associated with parts of the string that were
# deleted.
base_dist = _string_dist_basic(str1, str2)
penalty = 0.0
for pat, weight in SD_PATTERNS:
# Get strings that drop the pattern.
case_str1 = re.sub(pat, '', str1)
case_str2 = re.sub(pat, '', str2)
if case_str1 != str1 or case_str2 != str2:
# If the pattern was present (i.e., it is deleted in the
# the current case), recalculate the distances for the
# modified strings.
case_dist = _string_dist_basic(case_str1, case_str2)
case_delta = max(0.0, base_dist - case_dist)
if case_delta == 0.0:
continue
# Shift our baseline strings down (to avoid rematching the
# same part of the string) and add a scaled distance
# amount to the penalties.
str1 = case_str1
str2 = case_str2
base_dist = case_dist
penalty += weight * case_delta
dist = base_dist + penalty
return dist
def current_metadata(items):
"""Returns the most likely artist and album for a set of Items.
Each is determined by tag reflected by the plurality of the Items.
"""
likelies = {}
consensus = {}
for key in 'artist', 'album', 'albumartist':
values = [getattr(item, key) for item in items if item]
likelies[key], freq = plurality(values)
consensus[key] = (freq == len(values))
if consensus['albumartist'] and likelies['albumartist']:
artist = likelies['albumartist']
else:
artist = likelies['artist']
return artist, likelies['album'], consensus['artist']
def assign_items(items, tracks):
"""Given a list of Items and a list of TrackInfo objects, find the
best mapping between them. Returns a mapping from Items to TrackInfo
objects, a set of extra Items, and a set of extra TrackInfo
objects. These "extra" objects occur when there is an unequal number
of objects of the two types.
"""
# Construct the cost matrix.
costs = []
for item in items:
row = []
for i, track in enumerate(tracks):
row.append(track_distance(item, track))
costs.append(row)
# Find a minimum-cost bipartite matching.
matching = Munkres().compute(costs)
# Produce the output matching.
mapping = dict((items[i], tracks[j]) for (i, j) in matching)
extra_items = list(set(items) - set(mapping.keys()))
extra_items.sort(key=lambda i: (i.disc, i.track, i.title))
extra_tracks = list(set(tracks) - set(mapping.values()))
extra_tracks.sort(key=lambda t: (t.index, t.title))
return mapping, extra_items, extra_tracks
def track_index_changed(item, track_info):
"""Returns True if the item and track info index is different. Tolerates
per disc and per release numbering.
"""
return item.track not in (track_info.medium_index, track_info.index)
def track_distance(item, track_info, incl_artist=False):
"""Determines the significance of a track metadata change. Returns a
float in [0.0,1.0]. `incl_artist` indicates that a distance
component should be included for the track artist (i.e., for
various-artist releases).
"""
# Distance and normalization accumulators.
dist, dist_max = 0.0, 0.0
# Check track length.
# If there's no length to check, apply no penalty.
if track_info.length:
diff = abs(item.length - track_info.length)
diff = max(diff - TRACK_LENGTH_GRACE, 0.0)
diff = min(diff, TRACK_LENGTH_MAX)
dist += (diff / TRACK_LENGTH_MAX) * TRACK_LENGTH_WEIGHT
dist_max += TRACK_LENGTH_WEIGHT
# Track title.
dist += string_dist(item.title, track_info.title) * TRACK_TITLE_WEIGHT
dist_max += TRACK_TITLE_WEIGHT
# Track artist, if included.
# Attention: MB DB does not have artist info for all compilations,
# so only check artist distance if there is actually an artist in
# the MB track data.
if incl_artist and track_info.artist and \
item.artist.lower() not in VA_ARTISTS:
dist += string_dist(item.artist, track_info.artist) * \
TRACK_ARTIST_WEIGHT
dist_max += TRACK_ARTIST_WEIGHT
# Track index.
if track_info.index and item.track:
if track_index_changed(item, track_info):
dist += TRACK_INDEX_WEIGHT
dist_max += TRACK_INDEX_WEIGHT
# MusicBrainz track ID.
if item.mb_trackid:
if item.mb_trackid != track_info.track_id:
dist += TRACK_ID_WEIGHT
dist_max += TRACK_ID_WEIGHT
# Plugin distances.
plugin_d, plugin_dm = plugins.track_distance(item, track_info)
dist += plugin_d
dist_max += plugin_dm
return dist / dist_max
def distance(items, album_info, mapping):
"""Determines how "significant" an album metadata change would be.
Returns a float in [0.0,1.0]. `album_info` is an AlbumInfo object
reflecting the album to be compared. `items` is a sequence of all
Item objects that will be matched (order is not important).
`mapping` is a dictionary mapping Items to TrackInfo objects; the
keys are a subset of `items` and the values are a subset of
`album_info.tracks`.
"""
cur_artist, cur_album, _ = current_metadata(items)
cur_artist = cur_artist or u''
cur_album = cur_album or u''
# These accumulate the possible distance components. The final
# distance will be dist/dist_max.
dist = 0.0
dist_max = 0.0
# Artist/album metadata.
if not album_info.va:
dist += string_dist(cur_artist, album_info.artist) * ARTIST_WEIGHT
dist_max += ARTIST_WEIGHT
dist += string_dist(cur_album, album_info.album) * ALBUM_WEIGHT
dist_max += ALBUM_WEIGHT
# Matched track distances.
for item, track in mapping.iteritems():
dist += track_distance(item, track, album_info.va) * TRACK_WEIGHT
dist_max += TRACK_WEIGHT
# Extra and unmatched tracks.
for track in set(album_info.tracks) - set(mapping.values()):
dist += MISSING_WEIGHT
dist_max += MISSING_WEIGHT
for item in set(items) - set(mapping.keys()):
dist += UNMATCHED_WEIGHT
dist_max += UNMATCHED_WEIGHT
# Plugin distances.
plugin_d, plugin_dm = plugins.album_distance(items, album_info, mapping)
dist += plugin_d
dist_max += plugin_dm
# Normalize distance, avoiding divide-by-zero.
if dist_max == 0.0:
return 0.0
else:
return dist / dist_max
def match_by_id(items):
"""If the items are tagged with a MusicBrainz album ID, returns an
AlbumInfo object for the corresponding album. Otherwise, returns
None.
"""
# Is there a consensus on the MB album ID?
albumids = [item.mb_albumid for item in items if item.mb_albumid]
if not albumids:
log.debug('No album IDs found.')
return None
# If all album IDs are equal, look up the album.
if bool(reduce(lambda x,y: x if x==y else (), albumids)):
albumid = albumids[0]
log.debug('Searching for discovered album ID: ' + albumid)
return hooks._album_for_id(albumid)
else:
log.debug('No album ID consensus.')
return None
def _recommendation(results):
"""Given a sorted list of AlbumMatch or TrackMatch objects, return a
recommendation based on the results' distances.
If the recommendation is higher than the configured maximum for
certain situations, the recommendation will be downgraded to the
configured maximum.
"""
if not results:
# No candidates: no recommendation.
return recommendation.none
# Basic distance thresholding.
min_dist = results[0].distance
if min_dist < config['match']['strong_rec_thresh'].as_number():
# Strong recommendation level.
rec = recommendation.strong
elif min_dist <= config['match']['medium_rec_thresh'].as_number():
# Medium recommendation level.
rec = recommendation.medium
elif len(results) == 1:
# Only a single candidate.
rec = recommendation.low
elif results[1].distance - min_dist >= \
config['match']['rec_gap_thresh'].as_number():
# Gap between first two candidates is large.
rec = recommendation.low
else:
# No conclusion.
rec = recommendation.none
# "Downgrades" in certain configured situations.
if isinstance(results[0], hooks.AlbumMatch):
# Load the configured recommendation maxima.
max_rec = {}
for trigger in 'non_mb_source', 'partial', 'tracklength', 'tracknumber':
max_rec[trigger] = \
config['match']['max_rec'][trigger].as_choice({
'strong': recommendation.strong,
'medium': recommendation.medium,
'low': recommendation.low,
'none': recommendation.none,
})
# Non-MusicBrainz source.
if rec > max_rec['non_mb_source'] and \
results[0].info.data_source != 'MusicBrainz':
rec = max_rec['non_mb_source']
# Partial match.
if rec > max_rec['partial'] and \
(results[0].extra_items or results[0].extra_tracks):
rec = max_rec['partial']
# Check track number and duration for each item.
for item, track_info in results[0].mapping.items():
# Track length differs.
if rec > max_rec['tracklength'] and \
item.length and track_info.length and \
abs(item.length - track_info.length) > TRACK_LENGTH_GRACE:
rec = max_rec['tracklength']
# Track number differs.
if rec > max_rec['tracknumber'] and \
track_index_changed(item, track_info):
rec = max_rec['tracknumber']
return rec
def _add_candidate(items, results, info):
"""Given a candidate AlbumInfo object, attempt to add the candidate
to the output dictionary of AlbumMatch objects. This involves
checking the track count, ordering the items, checking for
duplicates, and calculating the distance.
"""
log.debug('Candidate: %s - %s' % (info.artist, info.album))
# Don't duplicate.
if info.album_id in results:
log.debug('Duplicate.')
return
# Find mapping between the items and the track info.
mapping, extra_items, extra_tracks = assign_items(items, info.tracks)
# Get the change distance.
dist = distance(items, info, mapping)
log.debug('Success. Distance: %f' % dist)
results[info.album_id] = hooks.AlbumMatch(dist, info, mapping,
extra_items, extra_tracks)
def tag_album(items, search_artist=None, search_album=None,
search_id=None):
"""Bundles together the functionality used to infer tags for a
set of items comprised by an album. Returns everything relevant:
- The current artist.
- The current album.
- A list of AlbumMatch objects. The candidates are sorted by
distance (i.e., best match first).
- A recommendation.
If search_artist and search_album or search_id are provided, then
they are used as search terms in place of the current metadata.
"""
# Get current metadata.
cur_artist, cur_album, artist_consensus = current_metadata(items)
log.debug('Tagging %s - %s' % (cur_artist, cur_album))
# The output result (distance, AlbumInfo) tuples (keyed by MB album
# ID).
candidates = {}
# Try to find album indicated by MusicBrainz IDs.
id_info = match_by_id(items)
if id_info:
_add_candidate(items, candidates, id_info)
rec = _recommendation(candidates.values())
log.debug('Album ID match recommendation is ' + str(rec))
if candidates and not config['import']['timid']:
# If we have a very good MBID match, return immediately.
# Otherwise, this match will compete against metadata-based
# matches.
if rec == recommendation.strong:
log.debug('ID match.')
return cur_artist, cur_album, candidates.values(), rec
# Search terms.
if not (search_artist and search_album):
# No explicit search terms -- use current metadata.
search_artist, search_album = cur_artist, cur_album
log.debug(u'Search terms: %s - %s' % (search_artist, search_album))
# Is this album likely to be a "various artist" release?
va_likely = ((not artist_consensus) or
(search_artist.lower() in VA_ARTISTS) or
any(item.comp for item in items))
log.debug(u'Album might be VA: %s' % str(va_likely))
# Get the results from the data sources.
if search_id:
log.debug('Searching for album ID: ' + search_id)
search_cands = hooks._album_for_id(search_id)
else:
search_cands = hooks._album_candidates(items, search_artist,
search_album, va_likely)
log.debug(u'Evaluating %i candidates.' % len(search_cands))
for info in search_cands:
_add_candidate(items, candidates, info)
# Sort and get the recommendation.
candidates = sorted(candidates.itervalues(),
key=lambda i: (i.distance, i.info.data_source != 'MusicBrainz'))
rec = _recommendation(candidates)
return cur_artist, cur_album, candidates, rec
def tag_item(item, search_artist=None, search_title=None,
search_id=None):
"""Attempts to find metadata for a single track. Returns a
`(candidates, recommendation)` pair where `candidates` is a list of
TrackMatch objects. `search_artist` and `search_title` may be used
to override the current metadata for the purposes of the MusicBrainz
title; likewise `search_id`.
"""
# Holds candidates found so far: keys are MBIDs; values are
# (distance, TrackInfo) pairs.
candidates = {}
# First, try matching by MusicBrainz ID.
trackid = search_id or item.mb_trackid
if trackid:
log.debug('Searching for track ID: ' + trackid)
track_info = hooks._track_for_id(trackid)
if track_info:
dist = track_distance(item, track_info, incl_artist=True)
candidates[track_info.track_id] = \
hooks.TrackMatch(dist, track_info)
# If this is a good match, then don't keep searching.
rec = _recommendation(candidates.values())
if rec == recommendation.strong and not config['import']['timid']:
log.debug('Track ID match.')
return candidates.values(), rec
# If we're searching by ID, don't proceed.
if search_id is not None:
if candidates:
return candidates.values(), rec
else:
return [], recommendation.none
# Search terms.
if not (search_artist and search_title):
search_artist, search_title = item.artist, item.title
log.debug(u'Item search terms: %s - %s' % (search_artist, search_title))
# Get and evaluate candidate metadata.
for track_info in hooks._item_candidates(item, search_artist, search_title):
dist = track_distance(item, track_info, incl_artist=True)
candidates[track_info.track_id] = hooks.TrackMatch(dist, track_info)
# Sort by distance and return with recommendation.
log.debug('Found %i candidates.' % len(candidates))
candidates = sorted(candidates.itervalues())
rec = _recommendation(candidates)
return candidates, rec
| |
'''
This module contais some common routines used by other samples.
'''
import numpy as np
import cv2
import os
from contextlib import contextmanager
import itertools as it
image_extensions = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.tiff', '.pbm', '.pgm', '.ppm']
class Bunch(object):
def __init__(self, **kw):
self.__dict__.update(kw)
def __str__(self):
return str(self.__dict__)
def splitfn(fn):
path, fn = os.path.split(fn)
name, ext = os.path.splitext(fn)
return path, name, ext
def anorm2(a):
return (a*a).sum(-1)
def anorm(a):
return np.sqrt( anorm2(a) )
def homotrans(H, x, y):
xs = H[0, 0]*x + H[0, 1]*y + H[0, 2]
ys = H[1, 0]*x + H[1, 1]*y + H[1, 2]
s = H[2, 0]*x + H[2, 1]*y + H[2, 2]
return xs/s, ys/s
def to_rect(a):
a = np.ravel(a)
if len(a) == 2:
a = (0, 0, a[0], a[1])
return np.array(a, np.float64).reshape(2, 2)
def rect2rect_mtx(src, dst):
src, dst = to_rect(src), to_rect(dst)
cx, cy = (dst[1] - dst[0]) / (src[1] - src[0])
tx, ty = dst[0] - src[0] * (cx, cy)
M = np.float64([[ cx, 0, tx],
[ 0, cy, ty],
[ 0, 0, 1]])
return M
def lookat(eye, target, up = (0, 0, 1)):
fwd = np.asarray(target, np.float64) - eye
fwd /= anorm(fwd)
right = np.cross(fwd, up)
right /= anorm(right)
down = np.cross(fwd, right)
R = np.float64([right, down, fwd])
tvec = -np.dot(R, eye)
return R, tvec
def mtx2rvec(R):
w, u, vt = cv2.SVDecomp(R - np.eye(3))
p = vt[0] + u[:,0]*w[0] # same as np.dot(R, vt[0])
c = np.dot(vt[0], p)
s = np.dot(vt[1], p)
axis = np.cross(vt[0], vt[1])
return axis * np.arctan2(s, c)
def draw_str(dst, (x, y), s):
cv2.putText(dst, s, (x+1, y+1), cv2.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 0), thickness = 2, lineType=cv2.CV_AA)
cv2.putText(dst, s, (x, y), cv2.FONT_HERSHEY_PLAIN, 1.0, (255, 255, 255), lineType=cv2.CV_AA)
class Sketcher:
def __init__(self, windowname, dests, colors_func):
self.prev_pt = None
self.windowname = windowname
self.dests = dests
self.colors_func = colors_func
self.dirty = False
self.show()
cv2.setMouseCallback(self.windowname, self.on_mouse)
def show(self):
cv2.imshow(self.windowname, self.dests[0])
def on_mouse(self, event, x, y, flags, param):
pt = (x, y)
if event == cv2.EVENT_LBUTTONDOWN:
self.prev_pt = pt
if self.prev_pt and flags & cv2.EVENT_FLAG_LBUTTON:
for dst, color in zip(self.dests, self.colors_func()):
cv2.line(dst, self.prev_pt, pt, color, 5)
self.dirty = True
self.prev_pt = pt
self.show()
else:
self.prev_pt = None
# palette data from matplotlib/_cm.py
_jet_data = {'red': ((0., 0, 0), (0.35, 0, 0), (0.66, 1, 1), (0.89,1, 1),
(1, 0.5, 0.5)),
'green': ((0., 0, 0), (0.125,0, 0), (0.375,1, 1), (0.64,1, 1),
(0.91,0,0), (1, 0, 0)),
'blue': ((0., 0.5, 0.5), (0.11, 1, 1), (0.34, 1, 1), (0.65,0, 0),
(1, 0, 0))}
cmap_data = { 'jet' : _jet_data }
def make_cmap(name, n=256):
data = cmap_data[name]
xs = np.linspace(0.0, 1.0, n)
channels = []
eps = 1e-6
for ch_name in ['blue', 'green', 'red']:
ch_data = data[ch_name]
xp, yp = [], []
for x, y1, y2 in ch_data:
xp += [x, x+eps]
yp += [y1, y2]
ch = np.interp(xs, xp, yp)
channels.append(ch)
return np.uint8(np.array(channels).T*255)
def nothing(*arg, **kw):
pass
def clock():
return cv2.getTickCount() / cv2.getTickFrequency()
@contextmanager
def Timer(msg):
print msg, '...',
start = clock()
try:
yield
finally:
print "%.2f ms" % ((clock()-start)*1000)
class StatValue:
def __init__(self, smooth_coef = 0.5):
self.value = None
self.smooth_coef = smooth_coef
def update(self, v):
if self.value is None:
self.value = v
else:
c = self.smooth_coef
self.value = c * self.value + (1.0-c) * v
class RectSelector:
def __init__(self, win, callback):
self.win = win
self.callback = callback
cv2.setMouseCallback(win, self.onmouse)
self.drag_start = None
self.drag_rect = None
def onmouse(self, event, x, y, flags, param):
x, y = np.int16([x, y]) # BUG
if event == cv2.EVENT_LBUTTONDOWN:
self.drag_start = (x, y)
if self.drag_start:
if flags & cv2.EVENT_FLAG_LBUTTON:
xo, yo = self.drag_start
x0, y0 = np.minimum([xo, yo], [x, y])
x1, y1 = np.maximum([xo, yo], [x, y])
self.drag_rect = None
if x1-x0 > 0 and y1-y0 > 0:
self.drag_rect = (x0, y0, x1, y1)
else:
rect = self.drag_rect
self.drag_start = None
self.drag_rect = None
if rect:
self.callback(rect)
def draw(self, vis):
if not self.drag_rect:
return False
x0, y0, x1, y1 = self.drag_rect
cv2.rectangle(vis, (x0, y0), (x1, y1), (0, 255, 0), 2)
return True
@property
def dragging(self):
return self.drag_rect is not None
def grouper(n, iterable, fillvalue=None):
'''grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx'''
args = [iter(iterable)] * n
return it.izip_longest(fillvalue=fillvalue, *args)
def mosaic(w, imgs):
'''Make a grid from images.
w -- number of grid columns
imgs -- images (must have same size and format)
'''
imgs = iter(imgs)
img0 = imgs.next()
pad = np.zeros_like(img0)
imgs = it.chain([img0], imgs)
rows = grouper(w, imgs, pad)
return np.vstack(map(np.hstack, rows))
def getsize(img):
h, w = img.shape[:2]
return w, h
def mdot(*args):
return reduce(np.dot, args)
def draw_keypoints(vis, keypoints, color = (0, 255, 255)):
for kp in keypoints:
x, y = kp.pt
cv2.circle(vis, (int(x), int(y)), 2, color)
| |
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
# The following keys are used in the segment dictionaries passed via
# the driver API. These are defined separately from similar keys in
# neutron.extensions.providernet so that drivers don't need to change
# if/when providernet moves to the core API.
#
ID = 'id'
NETWORK_TYPE = 'network_type'
PHYSICAL_NETWORK = 'physical_network'
SEGMENTATION_ID = 'segmentation_id'
MTU = 'mtu'
# The following keys are used in the binding level dictionaries
# available via the binding_levels and original_binding_levels
# PortContext properties.
BOUND_DRIVER = 'bound_driver'
BOUND_SEGMENT = 'bound_segment'
@six.add_metaclass(abc.ABCMeta)
class TypeDriver(object):
"""Define stable abstract interface for ML2 type drivers.
ML2 type drivers each support a specific network_type for provider
and/or tenant network segments. Type drivers must implement this
abstract interface, which defines the API by which the plugin uses
the driver to manage the persistent type-specific resource
allocation state associated with network segments of that type.
Network segments are represented by segment dictionaries using the
NETWORK_TYPE, PHYSICAL_NETWORK, and SEGMENTATION_ID keys defined
above, corresponding to the provider attributes. Future revisions
of the TypeDriver API may add additional segment dictionary
keys. Attributes not applicable for a particular network_type may
either be excluded or stored as None.
"""
@abc.abstractmethod
def get_type(self):
"""Get driver's network type.
:returns network_type value handled by this driver
"""
pass
@abc.abstractmethod
def initialize(self):
"""Perform driver initialization.
Called after all drivers have been loaded and the database has
been initialized. No abstract methods defined below will be
called prior to this method being called.
"""
pass
@abc.abstractmethod
def is_partial_segment(self, segment):
"""Return True if segment is a partially specified segment.
:param segment: segment dictionary
:returns: boolean
"""
@abc.abstractmethod
def validate_provider_segment(self, segment):
"""Validate attributes of a provider network segment.
:param segment: segment dictionary using keys defined above
:raises: neutron.common.exceptions.InvalidInput if invalid
Called outside transaction context to validate the provider
attributes for a provider network segment. Raise InvalidInput
if:
- any required attribute is missing
- any prohibited or unrecognized attribute is present
- any attribute value is not valid
The network_type attribute is present in segment, but
need not be validated.
"""
pass
@abc.abstractmethod
def reserve_provider_segment(self, session, segment):
"""Reserve resource associated with a provider network segment.
:param session: database session
:param segment: segment dictionary
:returns: segment dictionary
Called inside transaction context on session to reserve the
type-specific resource for a provider network segment. The
segment dictionary passed in was returned by a previous
validate_provider_segment() call.
"""
pass
@abc.abstractmethod
def allocate_tenant_segment(self, session):
"""Allocate resource for a new tenant network segment.
:param session: database session
:returns: segment dictionary using keys defined above
Called inside transaction context on session to allocate a new
tenant network, typically from a type-specific resource
pool. If successful, return a segment dictionary describing
the segment. If tenant network segment cannot be allocated
(i.e. tenant networks not supported or resource pool is
exhausted), return None.
"""
pass
@abc.abstractmethod
def release_segment(self, session, segment):
"""Release network segment.
:param session: database session
:param segment: segment dictionary using keys defined above
Called inside transaction context on session to release a
tenant or provider network's type-specific resource. Runtime
errors are not expected, but raising an exception will result
in rollback of the transaction.
"""
pass
@abc.abstractmethod
def get_mtu(self, physical):
"""Get driver's network MTU.
:returns mtu: maximum transmission unit
Returns the mtu for the network based on the config values and
the network type.
"""
pass
@six.add_metaclass(abc.ABCMeta)
class NetworkContext(object):
"""Context passed to MechanismDrivers for changes to network resources.
A NetworkContext instance wraps a network resource. It provides
helper methods for accessing other relevant information. Results
from expensive operations are cached so that other
MechanismDrivers can freely access the same information.
"""
@abc.abstractproperty
def current(self):
"""Return the network in its current configuration.
Return the network, as defined by NeutronPluginBaseV2.
create_network and all extensions in the ml2 plugin, with
all its properties 'current' at the time the context was
established.
"""
pass
@abc.abstractproperty
def original(self):
"""Return the network in its original configuration.
Return the network, with all its properties set to their
original values prior to a call to update_network. Method is
only valid within calls to update_network_precommit and
update_network_postcommit.
"""
pass
@abc.abstractproperty
def network_segments(self):
"""Return the segments associated with this network resource."""
pass
@six.add_metaclass(abc.ABCMeta)
class SubnetContext(object):
"""Context passed to MechanismDrivers for changes to subnet resources.
A SubnetContext instance wraps a subnet resource. It provides
helper methods for accessing other relevant information. Results
from expensive operations are cached so that other
MechanismDrivers can freely access the same information.
"""
@abc.abstractproperty
def current(self):
"""Return the subnet in its current configuration.
Return the subnet, as defined by NeutronPluginBaseV2.
create_subnet and all extensions in the ml2 plugin, with
all its properties 'current' at the time the context was
established.
"""
pass
@abc.abstractproperty
def original(self):
"""Return the subnet in its original configuration.
Return the subnet, with all its properties set to their
original values prior to a call to update_subnet. Method is
only valid within calls to update_subnet_precommit and
update_subnet_postcommit.
"""
pass
@six.add_metaclass(abc.ABCMeta)
class PortContext(object):
"""Context passed to MechanismDrivers for changes to port resources.
A PortContext instance wraps a port resource. It provides helper
methods for accessing other relevant information. Results from
expensive operations are cached so that other MechanismDrivers can
freely access the same information.
"""
@abc.abstractproperty
def current(self):
"""Return the port in its current configuration.
Return the port, as defined by NeutronPluginBaseV2.
create_port and all extensions in the ml2 plugin, with
all its properties 'current' at the time the context was
established.
"""
pass
@abc.abstractproperty
def original(self):
"""Return the port in its original configuration.
Return the port, with all its properties set to their
original values prior to a call to update_port. Method is
only valid within calls to update_port_precommit and
update_port_postcommit.
"""
pass
@abc.abstractproperty
def status(self):
"""Return the status of the current port."""
pass
@abc.abstractproperty
def original_status(self):
"""Return the status of the original port.
The method is only valid within calls to update_port_precommit and
update_port_postcommit.
"""
pass
@abc.abstractproperty
def network(self):
"""Return the NetworkContext associated with this port."""
pass
@abc.abstractproperty
def binding_levels(self):
"""Return dictionaries describing the current binding levels.
This property returns a list of dictionaries describing each
binding level if the port is bound or partially bound, or None
if the port is unbound. Each returned dictionary contains the
name of the bound driver under the BOUND_DRIVER key, and the
bound segment dictionary under the BOUND_SEGMENT key.
The first entry (index 0) describes the top-level binding,
which always involves one of the port's network's static
segments. In the case of a hierarchical binding, subsequent
entries describe the lower-level bindings in descending order,
which may involve dynamic segments. Adjacent levels where
different drivers bind the same static or dynamic segment are
possible. The last entry (index -1) describes the bottom-level
binding that supplied the port's binding:vif_type and
binding:vif_details attribute values.
Within calls to MechanismDriver.bind_port, descriptions of the
levels above the level currently being bound are returned.
"""
pass
@abc.abstractproperty
def original_binding_levels(self):
"""Return dictionaries describing the original binding levels.
This property returns a list of dictionaries describing each
original binding level if the port was previously bound, or
None if the port was unbound. The content is as described for
the binding_levels property.
This property is only valid within calls to
update_port_precommit and update_port_postcommit. It returns
None otherwise.
"""
pass
@abc.abstractproperty
def top_bound_segment(self):
"""Return the current top-level bound segment dictionary.
This property returns the current top-level bound segment
dictionary, or None if the port is unbound. For a bound port,
top_bound_segment is equivalent to
binding_levels[0][BOUND_SEGMENT], and returns one of the
port's network's static segments.
"""
pass
@abc.abstractproperty
def original_top_bound_segment(self):
"""Return the original top-level bound segment dictionary.
This property returns the original top-level bound segment
dictionary, or None if the port was previously unbound. For a
previously bound port, original_top_bound_segment is
equivalent to original_binding_levels[0][BOUND_SEGMENT], and
returns one of the port's network's static segments.
This property is only valid within calls to
update_port_precommit and update_port_postcommit. It returns
None otherwise.
"""
pass
@abc.abstractproperty
def bottom_bound_segment(self):
"""Return the current bottom-level bound segment dictionary.
This property returns the current bottom-level bound segment
dictionary, or None if the port is unbound. For a bound port,
bottom_bound_segment is equivalent to
binding_levels[-1][BOUND_SEGMENT], and returns the segment
whose binding supplied the port's binding:vif_type and
binding:vif_details attribute values.
"""
pass
@abc.abstractproperty
def original_bottom_bound_segment(self):
"""Return the original bottom-level bound segment dictionary.
This property returns the orignal bottom-level bound segment
dictionary, or None if the port was previously unbound. For a
previously bound port, original_bottom_bound_segment is
equivalent to original_binding_levels[-1][BOUND_SEGMENT], and
returns the segment whose binding supplied the port's previous
binding:vif_type and binding:vif_details attribute values.
This property is only valid within calls to
update_port_precommit and update_port_postcommit. It returns
None otherwise.
"""
pass
@abc.abstractproperty
def host(self):
"""Return the host with which the port is associated.
In the context of a host-specific operation on a distributed
port, the host property indicates the host for which the port
operation is being performed. Otherwise, it is the same value
as current['binding:host_id'].
"""
pass
@abc.abstractproperty
def original_host(self):
"""Return the original host with which the port was associated.
In the context of a host-specific operation on a distributed
port, the original_host property indicates the host for which
the port operation is being performed. Otherwise, it is the
same value as original['binding:host_id'].
This property is only valid within calls to
update_port_precommit and update_port_postcommit. It returns
None otherwise.
"""
pass
@abc.abstractproperty
def vif_type(self):
"""Return the vif_type indicating the binding state of the port.
In the context of a host-specific operation on a distributed
port, the vif_type property indicates the binding state for
the host for which the port operation is being
performed. Otherwise, it is the same value as
current['binding:vif_type'].
"""
pass
@abc.abstractproperty
def original_vif_type(self):
"""Return the original vif_type of the port.
In the context of a host-specific operation on a distributed
port, the original_vif_type property indicates original
binding state for the host for which the port operation is
being performed. Otherwise, it is the same value as
original['binding:vif_type'].
This property is only valid within calls to
update_port_precommit and update_port_postcommit. It returns
None otherwise.
"""
pass
@abc.abstractproperty
def vif_details(self):
"""Return the vif_details describing the binding of the port.
In the context of a host-specific operation on a distributed
port, the vif_details property describes the binding for the
host for which the port operation is being
performed. Otherwise, it is the same value as
current['binding:vif_details'].
"""
pass
@abc.abstractproperty
def original_vif_details(self):
"""Return the original vif_details of the port.
In the context of a host-specific operation on a distributed
port, the original_vif_details property describes the original
binding for the host for which the port operation is being
performed. Otherwise, it is the same value as
original['binding:vif_details'].
This property is only valid within calls to
update_port_precommit and update_port_postcommit. It returns
None otherwise.
"""
pass
@abc.abstractproperty
def segments_to_bind(self):
"""Return the list of segments with which to bind the port.
This property returns the list of segment dictionaries with
which the mechanism driver may bind the port. When
establishing a top-level binding, these will be the port's
network's static segments. For each subsequent level, these
will be the segments passed to continue_binding by the
mechanism driver that bound the level above.
This property is only valid within calls to
MechanismDriver.bind_port. It returns None otherwise.
"""
pass
@abc.abstractmethod
def host_agents(self, agent_type):
"""Get agents of the specified type on port's host.
:param agent_type: Agent type identifier
:returns: List of agents_db.Agent records
"""
pass
@abc.abstractmethod
def set_binding(self, segment_id, vif_type, vif_details,
status=None):
"""Set the bottom-level binding for the port.
:param segment_id: Network segment bound for the port.
:param vif_type: The VIF type for the bound port.
:param vif_details: Dictionary with details for VIF driver.
:param status: Port status to set if not None.
This method is called by MechanismDriver.bind_port to indicate
success and specify binding details to use for port. The
segment_id must identify an item in the current value of the
segments_to_bind property.
"""
pass
@abc.abstractmethod
def continue_binding(self, segment_id, next_segments_to_bind):
"""Continue binding the port with different segments.
:param segment_id: Network segment partially bound for the port.
:param next_segments_to_bind: Segments to continue binding with.
This method is called by MechanismDriver.bind_port to indicate
it was able to partially bind the port, but that one or more
additional mechanism drivers are required to complete the
binding. The segment_id must identify an item in the current
value of the segments_to_bind property. The list of segments
IDs passed as next_segments_to_bind identify dynamic (or
static) segments of the port's network that will be used to
populate segments_to_bind for the next lower level of a
hierarchical binding.
"""
pass
@abc.abstractmethod
def allocate_dynamic_segment(self, segment):
"""Allocate a dynamic segment.
:param segment: A partially or fully specified segment dictionary
Called by the MechanismDriver.bind_port, create_port or update_port
to dynamically allocate a segment for the port using the partial
segment specified. The segment dictionary can be a fully or partially
specified segment. At a minumim it needs the network_type populated to
call on the appropriate type driver.
"""
pass
@abc.abstractmethod
def release_dynamic_segment(self, segment_id):
"""Release an allocated dynamic segment.
:param segment_id: UUID of the dynamic network segment.
Called by the MechanismDriver.delete_port or update_port to release
the dynamic segment allocated for this port.
"""
pass
@six.add_metaclass(abc.ABCMeta)
class MechanismDriver(object):
"""Define stable abstract interface for ML2 mechanism drivers.
A mechanism driver is called on the creation, update, and deletion
of networks and ports. For every event, there are two methods that
get called - one within the database transaction (method suffix of
_precommit), one right afterwards (method suffix of _postcommit).
Exceptions raised by methods called inside the transaction can
rollback, but should not make any blocking calls (for example,
REST requests to an outside controller). Methods called after
transaction commits can make blocking external calls, though these
will block the entire process. Exceptions raised in calls after
the transaction commits may cause the associated resource to be
deleted.
Because rollback outside of the transaction is not done in the
update network/port case, all data validation must be done within
methods that are part of the database transaction.
"""
@abc.abstractmethod
def initialize(self):
"""Perform driver initialization.
Called after all drivers have been loaded and the database has
been initialized. No abstract methods defined below will be
called prior to this method being called.
"""
pass
def create_network_precommit(self, context):
"""Allocate resources for a new network.
:param context: NetworkContext instance describing the new
network.
Create a new network, allocating resources as necessary in the
database. Called inside transaction context on session. Call
cannot block. Raising an exception will result in a rollback
of the current transaction.
"""
pass
def create_network_postcommit(self, context):
"""Create a network.
:param context: NetworkContext instance describing the new
network.
Called after the transaction commits. Call can block, though
will block the entire process so care should be taken to not
drastically affect performance. Raising an exception will
cause the deletion of the resource.
"""
pass
def update_network_precommit(self, context):
"""Update resources of a network.
:param context: NetworkContext instance describing the new
state of the network, as well as the original state prior
to the update_network call.
Update values of a network, updating the associated resources
in the database. Called inside transaction context on session.
Raising an exception will result in rollback of the
transaction.
update_network_precommit is called for all changes to the
network state. It is up to the mechanism driver to ignore
state or state changes that it does not know or care about.
"""
pass
def update_network_postcommit(self, context):
"""Update a network.
:param context: NetworkContext instance describing the new
state of the network, as well as the original state prior
to the update_network call.
Called after the transaction commits. Call can block, though
will block the entire process so care should be taken to not
drastically affect performance. Raising an exception will
cause the deletion of the resource.
update_network_postcommit is called for all changes to the
network state. It is up to the mechanism driver to ignore
state or state changes that it does not know or care about.
"""
pass
def delete_network_precommit(self, context):
"""Delete resources for a network.
:param context: NetworkContext instance describing the current
state of the network, prior to the call to delete it.
Delete network resources previously allocated by this
mechanism driver for a network. Called inside transaction
context on session. Runtime errors are not expected, but
raising an exception will result in rollback of the
transaction.
"""
pass
def delete_network_postcommit(self, context):
"""Delete a network.
:param context: NetworkContext instance describing the current
state of the network, prior to the call to delete it.
Called after the transaction commits. Call can block, though
will block the entire process so care should be taken to not
drastically affect performance. Runtime errors are not
expected, and will not prevent the resource from being
deleted.
"""
pass
def create_subnet_precommit(self, context):
"""Allocate resources for a new subnet.
:param context: SubnetContext instance describing the new
subnet.
Create a new subnet, allocating resources as necessary in the
database. Called inside transaction context on session. Call
cannot block. Raising an exception will result in a rollback
of the current transaction.
"""
pass
def create_subnet_postcommit(self, context):
"""Create a subnet.
:param context: SubnetContext instance describing the new
subnet.
Called after the transaction commits. Call can block, though
will block the entire process so care should be taken to not
drastically affect performance. Raising an exception will
cause the deletion of the resource.
"""
pass
def update_subnet_precommit(self, context):
"""Update resources of a subnet.
:param context: SubnetContext instance describing the new
state of the subnet, as well as the original state prior
to the update_subnet call.
Update values of a subnet, updating the associated resources
in the database. Called inside transaction context on session.
Raising an exception will result in rollback of the
transaction.
update_subnet_precommit is called for all changes to the
subnet state. It is up to the mechanism driver to ignore
state or state changes that it does not know or care about.
"""
pass
def update_subnet_postcommit(self, context):
"""Update a subnet.
:param context: SubnetContext instance describing the new
state of the subnet, as well as the original state prior
to the update_subnet call.
Called after the transaction commits. Call can block, though
will block the entire process so care should be taken to not
drastically affect performance. Raising an exception will
cause the deletion of the resource.
update_subnet_postcommit is called for all changes to the
subnet state. It is up to the mechanism driver to ignore
state or state changes that it does not know or care about.
"""
pass
def delete_subnet_precommit(self, context):
"""Delete resources for a subnet.
:param context: SubnetContext instance describing the current
state of the subnet, prior to the call to delete it.
Delete subnet resources previously allocated by this
mechanism driver for a subnet. Called inside transaction
context on session. Runtime errors are not expected, but
raising an exception will result in rollback of the
transaction.
"""
pass
def delete_subnet_postcommit(self, context):
"""Delete a subnet.
:param context: SubnetContext instance describing the current
state of the subnet, prior to the call to delete it.
Called after the transaction commits. Call can block, though
will block the entire process so care should be taken to not
drastically affect performance. Runtime errors are not
expected, and will not prevent the resource from being
deleted.
"""
pass
def create_port_precommit(self, context):
"""Allocate resources for a new port.
:param context: PortContext instance describing the port.
Create a new port, allocating resources as necessary in the
database. Called inside transaction context on session. Call
cannot block. Raising an exception will result in a rollback
of the current transaction.
"""
pass
def create_port_postcommit(self, context):
"""Create a port.
:param context: PortContext instance describing the port.
Called after the transaction completes. Call can block, though
will block the entire process so care should be taken to not
drastically affect performance. Raising an exception will
result in the deletion of the resource.
"""
pass
def update_port_precommit(self, context):
"""Update resources of a port.
:param context: PortContext instance describing the new
state of the port, as well as the original state prior
to the update_port call.
Called inside transaction context on session to complete a
port update as defined by this mechanism driver. Raising an
exception will result in rollback of the transaction.
update_port_precommit is called for all changes to the port
state. It is up to the mechanism driver to ignore state or
state changes that it does not know or care about.
"""
pass
def update_port_postcommit(self, context):
"""Update a port.
:param context: PortContext instance describing the new
state of the port, as well as the original state prior
to the update_port call.
Called after the transaction completes. Call can block, though
will block the entire process so care should be taken to not
drastically affect performance. Raising an exception will
result in the deletion of the resource.
update_port_postcommit is called for all changes to the port
state. It is up to the mechanism driver to ignore state or
state changes that it does not know or care about.
"""
pass
def delete_port_precommit(self, context):
"""Delete resources of a port.
:param context: PortContext instance describing the current
state of the port, prior to the call to delete it.
Called inside transaction context on session. Runtime errors
are not expected, but raising an exception will result in
rollback of the transaction.
"""
pass
def delete_port_postcommit(self, context):
"""Delete a port.
:param context: PortContext instance describing the current
state of the port, prior to the call to delete it.
Called after the transaction completes. Call can block, though
will block the entire process so care should be taken to not
drastically affect performance. Runtime errors are not
expected, and will not prevent the resource from being
deleted.
"""
pass
def bind_port(self, context):
"""Attempt to bind a port.
:param context: PortContext instance describing the port
This method is called outside any transaction to attempt to
establish a port binding using this mechanism driver. Bindings
may be created at each of multiple levels of a hierarchical
network, and are established from the top level downward. At
each level, the mechanism driver determines whether it can
bind to any of the network segments in the
context.segments_to_bind property, based on the value of the
context.host property, any relevant port or network
attributes, and its own knowledge of the network topology. At
the top level, context.segments_to_bind contains the static
segments of the port's network. At each lower level of
binding, it contains static or dynamic segments supplied by
the driver that bound at the level above. If the driver is
able to complete the binding of the port to any segment in
context.segments_to_bind, it must call context.set_binding
with the binding details. If it can partially bind the port,
it must call context.continue_binding with the network
segments to be used to bind at the next lower level.
If the binding results are committed after bind_port returns,
they will be seen by all mechanism drivers as
update_port_precommit and update_port_postcommit calls. But if
some other thread or process concurrently binds or updates the
port, these binding results will not be committed, and
update_port_precommit and update_port_postcommit will not be
called on the mechanism drivers with these results. Because
binding results can be discarded rather than committed,
drivers should avoid making persistent state changes in
bind_port, or else must ensure that such state changes are
eventually cleaned up.
"""
pass
def check_vlan_transparency(self, context):
"""Check if the network supports vlan transparency.
:param context: NetworkContext instance describing the network.
Check if the network supports vlan transparency or not.
"""
pass
@six.add_metaclass(abc.ABCMeta)
class ExtensionDriver(object):
"""Define stable abstract interface for ML2 extension drivers.
An extension driver extends the core resources implemented by the
ML2 plugin with additional attributes. Methods that process create
and update operations for these resources validate and persist
values for extended attributes supplied through the API. Other
methods extend the resource dictionaries returned from the API
operations with the values of the extended attributes.
"""
@abc.abstractmethod
def initialize(self):
"""Perform driver initialization.
Called after all drivers have been loaded and the database has
been initialized. No abstract methods defined below will be
called prior to this method being called.
"""
pass
@property
def extension_alias(self):
"""Supported extension alias.
Return the alias identifying the core API extension supported
by this driver. Do not declare if API extension handling will
be left to a service plugin, and we just need to provide
core resource extension and updates.
"""
pass
def process_create_network(self, plugin_context, data, result):
"""Process extended attributes for create network.
:param plugin_context: plugin request context
:param data: dictionary of incoming network data
:param result: network dictionary to extend
Called inside transaction context on plugin_context.session to
validate and persist any extended network attributes defined by this
driver. Extended attribute values must also be added to
result.
"""
pass
def process_create_subnet(self, plugin_context, data, result):
"""Process extended attributes for create subnet.
:param plugin_context: plugin request context
:param data: dictionary of incoming subnet data
:param result: subnet dictionary to extend
Called inside transaction context on plugin_context.session to
validate and persist any extended subnet attributes defined by this
driver. Extended attribute values must also be added to
result.
"""
pass
def process_create_port(self, plugin_context, data, result):
"""Process extended attributes for create port.
:param plugin_context: plugin request context
:param data: dictionary of incoming port data
:param result: port dictionary to extend
Called inside transaction context on plugin_context.session to
validate and persist any extended port attributes defined by this
driver. Extended attribute values must also be added to
result.
"""
pass
def process_update_network(self, plugin_context, data, result):
"""Process extended attributes for update network.
:param plugin_context: plugin request context
:param data: dictionary of incoming network data
:param result: network dictionary to extend
Called inside transaction context on plugin_context.session to
validate and update any extended network attributes defined by this
driver. Extended attribute values, whether updated or not,
must also be added to result.
"""
pass
def process_update_subnet(self, plugin_context, data, result):
"""Process extended attributes for update subnet.
:param plugin_context: plugin request context
:param data: dictionary of incoming subnet data
:param result: subnet dictionary to extend
Called inside transaction context on plugin_context.session to
validate and update any extended subnet attributes defined by this
driver. Extended attribute values, whether updated or not,
must also be added to result.
"""
pass
def process_update_port(self, plugin_context, data, result):
"""Process extended attributes for update port.
:param plugin_context: plugin request context
:param data: dictionary of incoming port data
:param result: port dictionary to extend
Called inside transaction context on plugin_context.session to
validate and update any extended port attributes defined by this
driver. Extended attribute values, whether updated or not,
must also be added to result.
"""
pass
def extend_network_dict(self, session, base_model, result):
"""Add extended attributes to network dictionary.
:param session: database session
:param base_model: network model data
:param result: network dictionary to extend
Called inside transaction context on session to add any
extended attributes defined by this driver to a network
dictionary to be used for mechanism driver calls and/or
returned as the result of a network operation.
"""
pass
def extend_subnet_dict(self, session, base_model, result):
"""Add extended attributes to subnet dictionary.
:param session: database session
:param base_model: subnet model data
:param result: subnet dictionary to extend
Called inside transaction context on session to add any
extended attributes defined by this driver to a subnet
dictionary to be used for mechanism driver calls and/or
returned as the result of a subnet operation.
"""
pass
def extend_port_dict(self, session, base_model, result):
"""Add extended attributes to port dictionary.
:param session: database session
:param base_model: port model data
:param result: port dictionary to extend
Called inside transaction context on session to add any
extended attributes defined by this driver to a port
dictionary to be used for mechanism driver calls
and/or returned as the result of a port operation.
"""
pass
| |
import logging
from botocore.exceptions import ClientError
from spacel.provision import clean_name, bool_param
from spacel.provision.app.db.base import BaseDbTemplateDecorator
from spacel.provision.app.db.rds_alarm import RdsAlarmTriggerFactory
logger = logging.getLogger('spacel.provision.rds.factory')
POSTGRES_PORT = 5432
POSTGRES_VERSION = '9.5.2'
MYSQL_PORT = 3306
MYSQL_VERSION = '5.7.10'
DEFAULT_VERSIONS = {
'mysql': MYSQL_VERSION,
'postgres': POSTGRES_VERSION
}
DEFAULT_PORTS = {
'mysql': MYSQL_PORT,
'postgres': POSTGRES_PORT
}
class RdsFactory(BaseDbTemplateDecorator):
def __init__(self, clients, ingress, passwords):
super(RdsFactory, self).__init__(ingress)
self._clients = clients
self._passwords = passwords
self._alarms = RdsAlarmTriggerFactory()
def add_rds(self, app_region, template):
if not app_region.databases:
logger.debug('No databases specified.')
return
params = template['Parameters']
resources = template['Resources']
app = app_region.app
app_name = app.name
orbit_name = app.orbit.name
user_data = self._lc_user_data(resources)
db_intro = user_data.index('"databases":{') + 1
added_databases = 0
secret_params = {}
iam_statements = []
for name, db_params in app_region.databases.items():
password_label = 'rds:%s' % name
rds_resource = 'Db%s' % clean_name(name)
db_global = db_params.get('global')
if db_global and db_global != app_region.region:
# If connecting to a global DB, query for stored password:
encrypted, _ = \
self._passwords.get_password(app_region, password_label,
generate=False)
if not encrypted:
continue
rds_id = self._rds_id(app, db_global, rds_resource)
if not rds_id:
continue
iam_statements.append({
'Effect': 'Allow',
'Action': 'rds:DescribeDBInstances',
'Resource': {'Fn::Join': ['', [
'arn:aws:rds:%s:' % db_global,
{'Ref': 'AWS::AccountId'},
':db:%s' % rds_id
]]}
})
user_data.insert(db_intro, ',')
user_data.insert(db_intro, ',"region": "%s"}' % db_global)
user_data.insert(db_intro,
'","password": %s' % encrypted.json())
user_data.insert(db_intro, rds_id)
user_data.insert(db_intro, '"%s":{"name":"' % name)
added_databases += 1
continue
db_type = db_params.get('type', 'postgres')
db_version = db_params.get('version', DEFAULT_VERSIONS.get(db_type))
if not db_version:
logger.warning('Database "%s" has invalid "version".', name)
continue
db_port = db_params.get('port', DEFAULT_PORTS.get(db_type))
if not db_port:
logger.warning('Database "%s" has invalid "port".', name)
continue
instance_type = self._instance_type(db_params)
multi_az = bool_param(db_params, 'multi_az', False)
encrypted = bool_param(db_params, 'encrypted', False)
storage_size = db_params.get('size', '5')
storage_type = db_params.get('storage_type', 'gp2')
storage_iops = db_params.get('iops', None)
db_username = db_params.get('username', name)
public = (db_global == app_region.region
or bool_param(db_params, 'public', False))
db_subnet_group = '%sRdsSubnetGroup' % (public and 'Public'
or 'Private')
rds_desc = '%s for %s in %s' % (name, app_name, orbit_name)
logger.debug('Creating database "%s".', name)
# Create a parameter for the database password:
password_param = '%sPassword' % rds_resource
params[password_param] = {
'Type': 'String',
'Description': 'Password for database %s' % name,
'NoEcho': True
}
# Security group for database:
rds_sg_resource = '%sSg' % rds_resource
resources[rds_sg_resource] = {
'Type': 'AWS::EC2::SecurityGroup',
'Properties': {
'GroupDescription': rds_desc,
'VpcId': {'Ref': 'VpcId'},
'SecurityGroupIngress': [{
'IpProtocol': 'tcp',
'FromPort': db_port,
'ToPort': db_port,
'SourceSecurityGroupId': {'Ref': 'Sg'}
}]
}
}
rds_params = {
'AllocatedStorage': storage_size,
'AllowMajorVersionUpgrade': False,
'AutoMinorVersionUpgrade': True,
'DBInstanceClass': instance_type,
'DBName': name,
'DBSubnetGroupName': {'Ref': db_subnet_group},
'Engine': db_type,
'EngineVersion': db_version,
'MasterUsername': db_username,
'MasterUserPassword': {'Ref': password_param},
'MultiAZ': multi_az,
'Port': db_port,
'PubliclyAccessible': public,
'StorageEncrypted': encrypted,
'StorageType': storage_type,
'VPCSecurityGroups': [{'Ref': rds_sg_resource}]
}
if storage_iops:
rds_params['Iops'] = storage_iops
if storage_type != 'io1':
logger.warning('Overriding "storage_type" of "%s": ' +
'"iops" requires io1.', name)
rds_params['StorageType'] = 'io1'
# Workaround for the instance_type default not supporting crypt
# Other t2's fail, but at least that's the user's fault.
if encrypted and instance_type == 'db.t2.micro':
logger.warning('Overriding "instance_type" of "%s": ' +
'"encrypted" requires t2.large".', name)
rds_params['DBInstanceClass'] = 'db.t2.large'
resources[rds_resource] = {
'Type': 'AWS::RDS::DBInstance',
'Properties': rds_params
}
encrypted, plaintext_func = \
self._passwords.get_password(app_region, password_label)
# If hosting a global DB, store the password in each region:
if db_global:
region_clients = []
for other_region, other_app_region in app.regions.items():
if app_region.region == other_region:
continue
self._passwords.set_password(other_app_region,
password_label, plaintext_func)
region_clients.append(other_region)
# Inject other regions into 'clients' list
db_clients = db_params.get('clients')
if db_clients is None:
db_params['clients'] = region_clients
else:
db_clients += region_clients
iam_statements.append({
'Effect': 'Allow',
'Action': 'rds:DescribeDBInstances',
'Resource': {'Fn::Join': ['', [
'arn:aws:rds:%s:' % app_region.region,
{'Ref': 'AWS::AccountId'},
':db:',
{'Ref': rds_resource},
]]}
})
# Inject a labeled reference to this cache replication group:
# Read this backwards, and note the trailing comma.
user_data.insert(db_intro, ',')
user_data.insert(db_intro, ',"region": "%s"}' % app_region.region)
user_data.insert(db_intro, '","password": %s' % encrypted.json())
user_data.insert(db_intro, {'Ref': rds_resource})
user_data.insert(db_intro, '"%s":{"name":"' % name)
added_databases += 1
self._add_client_resources(resources, app_region, db_port,
db_params, rds_sg_resource)
secret_params[password_param] = plaintext_func
rds_alarms = db_params.get('alarms', {})
self._alarms.add_rds_alarms(app_region, resources, rds_alarms,
rds_resource)
if iam_statements:
resources['RdsPolicy'] = {
'DependsOn': 'Role',
'Type': 'AWS::IAM::Policy',
'Properties': {
'PolicyName': 'DescribeRdsDatabases',
'Roles': [{'Ref': 'Role'}],
'PolicyDocument': {
'Statement': iam_statements
}
}
}
if added_databases:
del user_data[db_intro + (5 * added_databases) - 1]
return secret_params
def _rds_id(self, app, region, rds_resource):
app_name = app.name
orbit_name = app.orbit.name
cloudformation = self._clients.cloudformation(region)
stack_name = '%s-%s' % (orbit_name, app_name)
try:
resource = cloudformation.describe_stack_resource(
StackName=stack_name,
LogicalResourceId=rds_resource)
return (resource['StackResourceDetail']
['PhysicalResourceId'])
except ClientError as e:
e_message = e.response['Error'].get('Message', '')
if 'does not exist' in e_message:
logger.debug('App %s not found in %s in %s.', app_name,
orbit_name, region)
return None
raise e
@staticmethod
def _instance_type(params):
instance_type = params.get('instance_type', 'db.t2.micro')
if not instance_type.startswith('db.'):
instance_type = 'db.' + instance_type
return instance_type
| |
#!python
# Configure -- python version
#
# this script builds Visual Studio files
import sys
import os
import os.path
import re
import subprocess
# files to configure
filelist = ["config.h",
"softhsm2.sln",
"convarch\\convarch.vcxproj.filters",
"convarch\\convarch.vcxproj",
"cryptotest\\cryptotest.vcxproj",
"datamgrtest\\datamgrtest.vcxproj",
"dump\\dump.vcxproj",
"handlemgrtest\\handlemgrtest.vcxproj",
"keyconv\\keyconv.vcxproj.filters",
"keyconv\\keyconv.vcxproj",
"objstoretest\\objstoretest.vcxproj",
"p11test\\p11test.vcxproj",
"sessionmgrtest\\sessionmgrtest.vcxproj",
"slotmgrtest\\slotmgrtest.vcxproj",
"softhsm2\\softhsm2.vcxproj",
"util\\util.vcxproj.filters",
"util\\util.vcxproj"]
# test files
testlist = ["botan",
"ecc",
"gnump",
"gost",
"ossl",
"osslv",
"rfc3394",
"rfc5649"]
# variables to expand
varvals = {}
varnames = ["CUINCPATH",
"CULIBPATH",
"DEBUGDLLPATH",
"DEBUGINCPATH",
"DEBUGLIBPATH",
"DLLPATH",
"EXTRALIBS",
"INCLUDEPATH",
"LIBNAME",
"LIBPATH",
"PLATFORM",
"PLATFORMDIR"]
# conditions to stack
condvals = {}
condnames = ["BOTAN",
"ECC",
"GOST",
"NONPAGE",
"OPENSSL",
"RFC3394",
"RFC5649",
"TESTS"]
# enable-xxx/disable-xxx arguments
enablelist = ["64bit",
"debug",
"ecc",
"gost",
"keep",
"non-paged-memory",
"verbose"]
# with-xxx/without-xxx arguments
withlist = ["botan",
"cppunit",
"crypto-backend",
"debug-botan",
"debug-openssl",
"openssl"]
# general commands
commandlist = ["help", "clean"] # verbose, keep
# usage
usage = ["Usage: python Configure.pl help",
" python Configure.pl options*",
" python Configure.pl clean"]
# help
myhelp = ["'python Configure.pl' configures SoftHSMv2 build files.\n"] +\
usage + [\
"\nGeneral Commands:",
" help print this help",
" clean clean up generated files",
" <none> print a summary of the configuration",
"\nOptional Features:",
" enable-verbose print messages [default=no]",
" enable-keep keep test files after config [default=no]",
" enable-64bit enable 64-bit compiling [default=no]",
" enable-debug enable build of Debug config [default=yes]",
" enable-ecc enable support for ECC [default=yes]",
" enable-gost enable support for GOST [default=yes]",
" enable-non-paged-memory enable non-paged memory [default=yes]",
"\nOptional Packages:",
" with-crypto-backend select the crypto backend [openssl|botan]",
" with-botan=PATH speficy prefix of path of Botan (Release)",
" with-debug-botan=PATH speficy prefix of path of Botan (Debug)",
" with-openssl=PATH speficy prefix of path of OpenSSL (Release)",
" with-debug-openssl=PATH speficy prefix of path of OpenSSL (Debug)",
" with-cppunit=PATH specify prefix of path of CppUnit"]
# variables for parsing
verbose = False
configargs = None
want_help = False
want_clean = False
want_unknown = False
unknown_value = None
enable_keep = False
enable_debug = True
enable_ecc = True
enable_gost = True
enable_non_paged = True
platform = 32
crypto_backend = "openssl"
botan_path = "..\\..\\btn"
debug_botan_path = None
openssl_path = "..\\..\\ssl"
debug_openssl_path = None
want_tests = True
cppunit_path = "..\\..\\cu"
def parseargs(args):
"""parse arguments"""
global verbose
global enable_keep
global want_help
global want_clean
global want_unknown
global unknown_value
global debug_botan_path
global debug_openssl_path
for arg in args:
if arg.lower() == "verbose":
verbose = True
continue
if arg.lower() == "keep":
enable_keep = True
continue
if arg.lower() == "help":
want_help = True
continue
di = re.match(r'disable-(.*)', arg, re.I)
if di:
appargs(arg)
myenable(di.group(1), False)
continue
en = re.match(r'enable-(.*)', arg, re.I)
if en:
appargs(arg)
myenable(en.group(1), True)
continue
wo = re.match(r'without-(.*)', arg, re.I)
if wo:
appargs(arg)
mywith(wo.group(1), False)
continue
wv = re.match(r'with-(.*)=(.*)', arg, re.I)
if wv:
appargs(arg)
if wv.group(2).lower() == "no":
mywith(wv.group(1), False)
continue
mywith(wv.group(1), True, wv.group(2))
continue
wi = re.match(r'with-(.*)', arg, re.I)
if wi:
appargs(arg)
mywith(wi.group(1), True)
continue
if arg.lower() == "clean":
want_clean = True
continue
want_unknown = True
unknown_value = arg
break
# debug
if enable_debug:
if debug_botan_path is None:
debug_botan_path = botan_path + "_d"
if debug_openssl_path is None:
debug_openssl_path = openssl_path + "_d"
def appargs(arg):
"""append seen arguments to configargs"""
global configargs
# escape backslashes, spaces and double quotes
escaped = ""
for x in arg:
if (x == "\\") or (x == " ") or (x == "\""):
escaped += "\\"
escaped += x
if configargs:
configargs += " " + escaped
else:
configargs = escaped
def myenable(key, val):
"""parse enable/disable"""
global platform
global enable_debug
global enable_ecc
global enable_gost
global enable_non_paged
global enable_keep
global verbose
global want_unknown
global unknown_value
if key.lower() == "64bit":
if val:
platform = 64
return
if key.lower() == "debug":
if not val:
enable_debug = False
return
if key.lower() == "ecc":
if not val:
enable_ecc = False
return
if key.lower() == "gost":
if not val:
enable_gost = False
return
if key.lower() == "non-paged-memory":
if not val:
enable_non_paged = False
return
if key.lower() == "keep":
if val:
enable_keep = True
return
if key.lower() == "verbose":
if val:
verbose = True
return
want_unknown = True
if not val:
unknown_value = "disable-" + key
else:
unknown_value = "enable-" + key
def mywith(key, val, detail=None):
"""parse with/without"""
global crypto_backend
global botan_path
global debug_botan_path
global openssl_path
global debug_openssl_path
global want_tests
global cppunit_path
global want_unknown
global unknown_value
if key.lower() == "crypto-backend":
if val and (detail.lower() == "openssl"):
crypto_backend = "openssl"
return
if val and (detail.lower() == "botan"):
crypto_backend = "botan"
return
want_unknown = True
unknown_value = "with-crypto-backend=" + detail
return
if key.lower() == "botan":
if not val:
want_unknown = True
unknown_value = "without-botan doesn't make sense"
return
if detail.lower() != "yes":
botan_path = detail
return
if key.lower() == "debug-botan":
if not val:
want_unknown = True
unknown_value = "without-debug-botan doesn't make sense"
return
if detail.lower() != "yes":
debug_botan_path = detail
return
if key.lower() == "openssl":
if not val:
want_unknown = True
unknown_value = "without-openssl doesn't make sense"
return
if detail.lower() != "yes":
openssl_path = detail
return
if key.lower() == "debug-openssl":
if not val:
want_unknown = True
unknown_value = "without-debug-openssl doesn't make sense"
return
if detail.lower() != "yes":
debug_openssl_path = detail
return
if key.lower() == "cppunit":
if not val:
want_tests = False
return
if detail.lower() != "yes":
cppunit_path = detail
return
want_unknown = True
if not val:
unknown_value = "without-" + key
else:
unknown_value = "with-" + key
def dohelp():
"""help"""
for line in myhelp:
print line
sys.exit(1)
def docleantest():
"""clean test files"""
for basename in testlist:
filename = "test" + basename + ".c"
if os.path.isfile(filename):
os.unlink(filename)
filename = "test" + basename + ".cpp"
if os.path.isfile(filename):
os.unlink(filename)
filename = "test" + basename + ".obj"
if os.path.isfile(filename):
os.unlink(filename)
filename = "test" + basename + ".exe"
if os.path.isfile(filename):
os.unlink(filename)
if os.path.isfile("botan.dll"):
os.unlink("botan.dll")
if os.path.isfile("libeay32.dll"):
os.unlink("libeay32.dll")
def doclean():
"""clean"""
docleantest()
for filename in filelist:
if os.path.isfile(filename):
os.unlink(filename)
sys.exit(0)
def dounknown():
"""parsing error"""
print >> sys.stderr, "can't parse " + unknown_value + ""
sys.exit(1)
def doconfig():
"""config itself"""
global botan_path
global debug_botan_path
global openssl_path
global debug_openssl_path
global cppunit_path
# configure the platform
if platform == 32:
varvals["PLATFORM"] = "Win32"
else:
varvals["PLATFORM"] = "x64"
varvals["PLATFORMDIR"] = "x64\\"
# configure ECC and GOST
if enable_ecc:
condvals["ECC"] = True
if enable_gost:
condvals["GOST"] = True
# configure the crypto
if crypto_backend == "botan":
condvals["BOTAN"] = True
varvals["LIBNAME"] = "botan.lib"
botan_path = os.path.abspath(botan_path)
botan_dll = os.path.join(botan_path, "botan.dll")
varvals["DLLPATH"] = botan_dll
botan_inc = os.path.join(botan_path, "include")
if not os.path.exists(os.path.join(botan_inc, "botan\\init.h")):
print >> sys.stderr, "can't find Botan includes"
sys.exit(1)
varvals["INCLUDEPATH"] = botan_inc
if not os.path.exists(os.path.join(botan_path, "botan.lib")):
print >> sys.stderr, "can't find Botan library"
sys.exit(1)
varvals["LIBPATH"] = botan_path
if enable_debug:
debug_botan_path = os.path.abspath(debug_botan_path)
varvals["DEBUGDLLPATH"] = \
os.path.join(debug_botan_path, "botan.dll")
debug_botan_inc = os.path.join(debug_botan_path, "include")
if not os.path.exists(os.path.join(debug_botan_inc,
"botan\\init.h")):
print >> sys.stderr, "can't find debug Botan includes"
sys.exit(1)
varvals["DEBUGINCPATH"] = debug_botan_inc
if not os.path.exists(os.path.join(debug_botan_path, "botan.lib")):
print >> sys.stderr, "can't find debug Botan library"
sys.exit(1)
varvals["DEBUGLIBPATH"] = debug_botan_path
else:
varvals["DEBUGDLLPATH"] = varvals["DLLPATH"]
varvals["DEBUGINCPATH"] = varvals["INCLUDEPATH"]
varvals["DEBUGLIBPATH"] = varvals["LIBPATH"]
# Botan version
if verbose:
print "checking Botan version"
botan_version_minor = 0
system_libs = []
if os.path.exists(botan_dll):
subprocess.call(["copy", botan_dll, "."], shell=True)
else:
system_libs = ["user32.lib", "advapi32.lib"]
inc = botan_inc
lib = os.path.join(botan_path, "botan.lib")
testfile = open("testbotan.cpp", "w")
print >>testfile, '\
#include <botan/init.h>\n\
#include <botan/version.h>\n\
int main() {\n\
using namespace Botan;\n\
LibraryInitializer::initialize();\n\
#if BOTAN_VERSION_CODE < BOTAN_VERSION_CODE_FOR(1,10,0)\n\
return 1;\n\
#endif\n\
#if BOTAN_VERSION_CODE > BOTAN_VERSION_CODE_FOR(1,11,0)\n\
return 2;\n\
#endif\n\
return 0;\n\
}'
testfile.close()
command = ["cl", "/nologo", "/MD", "/I", inc, "testbotan.cpp", lib]
command.extend(system_libs)
subprocess.check_output(command, stderr=subprocess.STDOUT)
if not os.path.exists(".\\testbotan.exe"):
print >> sys.stderr, "can't create .\\testbotan.exe"
sys.exit(1)
ret = subprocess.call(".\\testbotan.exe")
if ret == 1:
print >> sys.stderr, "Botan version too old"
sys.exit(1)
if ret == 2:
botan_version_minor = 11
print >> sys.stderr, "Botan version 11 not yet supported"
sys.exit(1)
if ret != 0:
print >> sys.stderr, "Botan test failed"
sys.exit(1)
else:
botan_version_minor = 10
# Botan ECC support
if enable_ecc:
if verbose:
print "checking Botan ECC support"
testfile = open("testecc.cpp", "w")
print >>testfile, '\
#include <botan/init.h>\n\
#include <botan/ec_group.h>\n\
#include <botan/oids.h>\n\
int main() {\n\
Botan::LibraryInitializer::initialize();\n\
const std::string name("secp256r1");\n\
const Botan::OID oid(Botan::OIDS::lookup(name));\n\
const Botan::EC_Group ecg(oid);\n\
try {\n\
const Botan::SecureVector<Botan::byte> der =\n\
ecg.DER_encode(Botan::EC_DOMPAR_ENC_OID);\n\
} catch(...) {\n\
return 1;\n\
}\n\
return 0;\n\
}'
testfile.close()
command = ["cl", "/nologo", "/MD", "/I", inc, "testecc.cpp", lib]
command.extend(system_libs)
subprocess.check_output(command, stderr=subprocess.STDOUT)
if not os.path.exists(".\\testecc.exe"):
print >> sys.stderr, "can't create .\\testecc.exe"
sys.exit(1)
if subprocess.call(".\\testecc.exe") != 0:
print >> sys.stderr, \
"can't find P256: upgrade to Botan >= 1.10.6"
sys.exit(1)
# Botan GOST support
if enable_gost:
if verbose:
print "checking Botan GOST support"
testfile = open("testgost.cpp", "w")
print >>testfile, '\
#include <botan/init.h>\n\
#include <botan/gost_3410.h>\n\
#include <botan/oids.h>\n\
int main() {\n\
Botan::LibraryInitializer::initialize();\n\
const std::string name("gost_256A");\n\
const Botan::OID oid(Botan::OIDS::lookup(name));\n\
const Botan::EC_Group ecg(oid);\n\
try {\n\
const Botan::SecureVector<Botan::byte> der =\n\
ecg.DER_encode(Botan::EC_DOMPAR_ENC_OID);\n\
} catch(...) {\n\
return 1;\n\
}\n\
return 0;\n\
}'
testfile.close()
command = ["cl", "/nologo", "/MD", "/I", inc, "testgost.cpp", lib]
command.extend(system_libs)
subprocess.check_output(command, stderr=subprocess.STDOUT)
if not os.path.exists(".\\testgost.exe"):
print >> sys.stderr, "can't create .\\testgost.exe"
sys.exit(1)
if subprocess.call(".\\testgost.exe") != 0:
print >> sys.stderr, \
"can't find GOST: upgrade to Botan >= 1.10.6"
sys.exit(1)
# no check for Botan RFC3394 support
condvals["RFC3394"] = True
# Botan RFC5649 support
if verbose:
print "checking Botan RFC5649 support"
testfile = open("testrfc5649.cpp", "w")
print >>testfile, '\
#include <botan/botan.h>\n\
#include <botan/rfc3394.h>\n\
int main() {\n\
using namespace Botan;\n\
SecureVector<byte> key(10);\n\
SymmetricKey kek("AABB");\n\
Algorithm_Factory& af = global_state().algorithm_factory();\n\
SecureVector<byte> x = rfc5649_keywrap(key, kek, af);\n\
return 1;\n\
}'
testfile.close()
command = ["cl", "/nologo", "/MD", "/I", inc, "testrfc5649.cpp", lib]
command.extend(system_libs)
subprocess.call(command)
if not os.path.exists(".\\testrfc5649.exe"):
if verbose:
print "Found AES key wrap with pad"
condvals["RFC5649"] = True
else:
if verbose:
print "can't compile Botan AES key wrap with pad"
# Botan GNU MP support
if botan_version_minor == 10:
if verbose:
print "checking Botan GNU MP support"
testfile = open("testgnump.cpp", "w")
print >>testfile, '\
#include <botan/build.h>\n\
int main() {\n\
#ifndef BOTAN_HAS_ENGINE_GNU_MP\n\
#error "No GNU MP support";\n\
#endif\n\
}'
testfile.close()
command = ["cl", "/nologo", "/MD", "/I", inc, "testgnump.cpp", lib]
command.extend(system_libs)
subprocess.call(command)
if not os.path.exists(".\\testgnump.exe"):
if verbose:
print "Botan GNU MP is supported"
else:
if verbose:
print "Botan GNU MP is not supported"
else:
condvals["OPENSSL"] = True
varvals["LIBNAME"] = "libeay32.lib"
varvals["EXTRALIBS"] = "crypt32.lib;"
openssl_path = os.path.abspath(openssl_path)
openssl_dll = os.path.join(openssl_path, "bin\\libeay32.dll")
varvals["DLLPATH"] = openssl_dll
openssl_inc = os.path.join(openssl_path, "include")
if not os.path.exists(os.path.join(openssl_inc, "openssl\\ssl.h")):
print >> sys.stderr, "can't find OpenSSL headers"
sys.exit(1)
varvals["INCLUDEPATH"] = openssl_inc
openssl_lib = os.path.join(openssl_path, "lib")
if not os.path.exists(os.path.join(openssl_lib, "libeay32.lib")):
print >> sys.stderr, "can't find OpenSSL library"
sys.exit(1)
varvals["LIBPATH"] = openssl_lib
if enable_debug:
debug_openssl_path = os.path.abspath(debug_openssl_path)
varvals["DEBUGDLLPATH"] = \
os.path.join(debug_openssl_path, "bin\\libeay32.dll")
debug_openssl_inc = os.path.join(debug_openssl_path, "include")
if not os.path.exists(os.path.join(debug_openssl_inc,
"openssl\\ssl.h")):
print >> sys.stderr, "can't find debug OpenSSL headers"
sys.exit(1)
varvals["DEBUGINCPATH"] = debug_openssl_inc
debug_openssl_lib = os.path.join(debug_openssl_path, "lib")
if not os.path.exists(os.path.join(debug_openssl_lib,
"libeay32.lib")):
print >> sys.stderr, "can't find debug OpenSSL library"
sys.exit(1)
varvals["DEBUGLIBPATH"] = debug_openssl_lib
else:
varvals["DEBUGDLLPATH"] = varvals["DLLPATH"]
varvals["DEBUGINCPATH"] = varvals["INCLUDEPATH"]
varvals["DEBUGLIBPATH"] = varvals["LIBPATH"]
# OpenSSL support
if verbose:
print "checking OpenSSL"
system_libs = []
if os.path.exists(openssl_dll):
subprocess.call(["copy", openssl_dll, "."], shell=True)
else:
system_libs = ["user32.lib", "advapi32.lib", "gdi32.lib", "crypt32.lib"]
inc = openssl_inc
lib = os.path.join(openssl_lib, "libeay32.lib")
testfile = open("testossl.c", "w")
print >>testfile, '\
#include <openssl/err.h>\n\
int main() {\n\
ERR_clear_error();\n\
return 0;\n\
}'
testfile.close()
command = ["cl", "/nologo", "/MD", "/I", inc, "testossl.c", lib]
command.extend(system_libs)
subprocess.check_output(command, stderr=subprocess.STDOUT)
if not os.path.exists(".\\testossl.exe"):
print >> sys.stderr, "can't create .\\testossl.exe"
sys.exit(1)
if subprocess.call(".\\testossl.exe") != 0:
print >> sys.stderr, "OpenSSL test failed"
sys.exit(1)
# OpenSSL version
if verbose:
print "checking OpenSSL version"
testfile = open("testosslv.c", "w")
print >>testfile, '\
#include <openssl/ssl.h>\n\
#include <openssl/opensslv.h>\n\
int main() {\n\
#ifndef OPENSSL_VERSION_NUMBER\n\
return -1;\n\
#endif\n\
#if OPENSSL_VERSION_NUMBER >= 0x010000000L\n\
return 0;\n\
#else\n\
return 1;\n\
#endif\n\
}'
testfile.close()
command = ["cl", "/nologo", "/MD", "/I", inc, "testosslv.c", lib]
command.extend(system_libs)
subprocess.check_output(command, stderr=subprocess.STDOUT)
if not os.path.exists(".\\testosslv.exe"):
print >> sys.stderr, "can't create .\\testosslv.exe"
sys.exit(1)
if subprocess.call(".\\testosslv.exe") != 0:
print >> sys.stderr, \
"OpenSLL version too old (1.0.0 or later required)"
sys.exit(1)
# OpenSSL ECC support
if enable_ecc:
if verbose:
print "checking OpenSSL ECC support"
testfile = open("testecc.c", "w")
print >>testfile, '\
#include <openssl/ecdsa.h>\n\
#include <openssl/objects.h>\n\
int main() {\n\
EC_KEY *ec256, *ec384;\n\
ec256 = EC_KEY_new_by_curve_name(NID_X9_62_prime256v1);\n\
ec384 = EC_KEY_new_by_curve_name(NID_secp384r1);\n\
if (ec256 == NULL || ec384 == NULL)\n\
return 1;\n\
return 0;\n\
}'
testfile.close()
command = ["cl", "/nologo", "/MD", "/I", inc, "testecc.c", lib]
command.extend(system_libs)
subprocess.check_output(command, stderr=subprocess.STDOUT)
if not os.path.exists(".\\testecc.exe"):
print >> sys.stderr, "can't create .\\testecc.exe"
sys.exit(1)
if subprocess.call(".\\testecc.exe") != 0:
print >> sys.stderr, "can't find P256 or P384: no ECC support"
sys.exit(1)
# OpenSSL GOST support
if enable_gost:
if verbose:
print "checking OpenSSL GOST support"
testfile = open("testgost.c", "w")
print >>testfile, '\
#include <openssl/conf.h>\n\
#include <openssl/engine.h>\n\
int main() {\n\
ENGINE *e;\n\
EC_KEY *ek;\n\
ek = NULL;\n\
OPENSSL_config(NULL);\n\
e = ENGINE_by_id("gost");\n\
if (e == NULL)\n\
return 1;\n\
if (ENGINE_init(e) <= 0)\n\
return 1;\n\
return 0;\n\
}'
testfile.close()
command = ["cl", "/nologo", "/MD", "/I", inc, "testgost.c", lib]
command.extend(system_libs)
subprocess.check_output(command, stderr=subprocess.STDOUT)
if not os.path.exists(".\\testgost.exe"):
print >> sys.stderr, "can't create .\\testgost.exe"
sys.exit(1)
if subprocess.call(".\\testgost.exe") != 0:
print >> sys.stderr, "can't find GOST: no GOST support"
sys.exit(1)
# OpenSSL EVP interface for AES key wrapping (aka RFC 3394)
if verbose:
print "checking OpenSSL EVP interface for AES key wrapping"
testfile = open("testrfc3394.c", "w")
print >>testfile, '\
#include <openssl/evp.h>\n\
int main() {\n\
EVP_aes_128_wrap();\n\
return 1;\n\
}'
testfile.close()
command = ["cl", "/nologo", "/MD", "/I", inc, "testrfc3394.c", lib]
command.extend(system_libs)
subprocess.call(command)
if os.path.exists(".\\testrfc3394.exe"):
if verbose:
print "RFC 3394 is supported"
condvals["RFC3394"] = True
else:
if verbose:
print "can't compile OpenSSL RFC 3394"
# OpenSSL EVP interface for AES key wrap with pad (aka RFC 5649)
if verbose:
print "checking OpenSSL EVP interface for AES key wrapping with pad"
testfile = open("testrfc5649.c", "w")
print >>testfile, '\
#include <openssl/evp.h>\n\
int main() {\n\
EVP_aes_128_wrap_pad();\n\
return 1;\n\
}'
testfile.close()
command = ["cl", "/nologo", "/MD", "/I", inc, "testrfc5649.c", lib]
command.extend(system_libs)
subprocess.call(command)
if os.path.exists(".\\testrfc5649.exe"):
if verbose:
print "RFC 5649 is supported"
condvals["RFC5649"] = True
else:
if verbose:
print "can't compile OpenSSL RFC 5649"
# configure CppUnit
if want_tests:
condvals["TESTS"] = True
cppunit_path = os.path.abspath(cppunit_path)
cppunit_inc = os.path.join(cppunit_path, "include")
if not os.path.exists(os.path.join(cppunit_inc, "cppunit\\Test.h")):
print >> sys.stderr, "can't find CppUnit headers"
sys.exit(1)
varvals["CUINCPATH"] = cppunit_inc
cppunit_lib = os.path.join(cppunit_path, "lib")
if not os.path.exists(os.path.join(cppunit_lib, "cppunit.lib")):
cppunit_lib = cppunit_path
if not os.path.exists(os.path.join(cppunit_lib, "cppunit.lib")):
print >> sys.stderr, "can't find CppUnit library"
sys.exit(1)
if enable_debug:
if not os.path.exists(os.path.join(cppunit_lib, "cppunitd.lib")):
print >> sys.stderr, "can't find debug CppUnit library"
sys.exit(1)
varvals["CULIBPATH"] = cppunit_lib
# misc
if enable_non_paged:
condvals["NONPAGE"] = True
def kw(path):
"""escape spaces"""
if re.search(r' ', path):
return '"' + path + '"'
else:
return path
def setupfile(filename):
"""setup files with condition stacks and variable expansions"""
cond = "@@@"
conds = []
passing = True
passes = []
filein = open(filename + ".in", "r")
fileout = open(filename, "w")
for line in filein:
line = line.rstrip("\r\n")
cif = re.match(r'@IF (.*)', line)
if cif:
conds.append(cond)
passes.append(passing)
cond = cif.group(1)
if condvals.get(cond):
# do nothing
pass
else:
passing = False
continue
celse = re.match(r'@ELSE (.*)', line)
if celse:
if cond != celse.group(1):
raise SyntaxError("@ELSE " + celse.group(1) +
" mismatch in " + filename)
if condvals.get(cond):
passing = False
else:
if len(passes) > 0:
passing = passes[-1]
else:
passing = True
continue
cend = re.match(r'@END (.*)', line)
if cend:
if cond != cend.group(1):
raise SyntaxError("@END " + cend.group(1) +
" mismatch in " + filename)
cond = conds.pop()
if len(passes) > 0:
passing = passes.pop()
else:
passing = True
continue
if not passing:
continue
while True:
vm = re.match(r'([^@]*)@([^@ ]*)@(.*)', line)
if vm:
if vm.group(2) in varnames:
if varvals.get(vm.group(2)):
val = kw(varvals[vm.group(2)])
else:
val = ""
line = vm.group(1) + val + vm.group(3)
continue
else:
raise SyntaxError("unknown control @" + vm.group(2) +
"@ in " + filename)
break
print >>fileout, line
if verbose:
print "Setting up " + filename
filein.close()
fileout.close()
def main(args):
"""run it"""
# no arguments -> usage
if len(args) <= 1:
for line in usage:
print line
sys.exit(1)
parseargs(args[1:])
if want_help:
dohelp()
if want_clean:
doclean()
if want_unknown:
dounknown()
# status before config
if verbose:
if enable_keep:
print "keep: enabled"
else:
print "keep: disabled"
if platform == 64:
print "64bit: enabled"
else:
print "64bit: disabled"
if enable_debug:
print "debug: enabled"
else:
print "debug: disabled"
if enable_ecc:
print "ecc: enabled"
else:
print "ecc: disabled"
if enable_gost:
print "gost: enabled"
else:
print "gost: disabled"
if enable_non_paged:
print "non-paged-memory: enabled"
else:
print "non-paged-memory: disabled"
print "crypto-backend: " + crypto_backend
if crypto_backend == "botan":
print "botan-path: " + botan_path
if enable_debug:
print "debug-botan-path: " + debug_botan_path
else:
print "openssl-path: " + openssl_path
if enable_debug:
print "debug-openssl-path: " + debug_openssl_path
if want_tests:
print "cppunit-path: " + cppunit_path
doconfig()
# status after config
if verbose:
print "Configuration Status"
print "\tconditions:"
for name in condnames:
if condvals.get(name):
print "\t\t" + name + " is true"
else:
print "\t\t" + name + " is false"
print "\tsubstitutions:"
for name in varnames:
if varvals.get(name):
print "\t\t" + name + '-> "' + varvals[name] + '"'
print
for filename in filelist:
setupfile(filename)
# clean test file
if not enable_keep:
cleantest()
print "Configured."
sys.exit(0)
main(sys.argv)
# Notes: Unix configure.ac options
# --enable-64bit supported
# --enable-ecc supported
# --enable-gost supported
# --enable-non-paged-memory supported
# --enable-visibility (enforced by DLLs)
# --with-crypto-backend supported
# --with-botan supported (Release and Debug)
# --with-openssl supported (Release and Debug)
# --with-migrate (useless as SoftHSMv1 is not supported)
# --with-objectstore-backend-db (TODO)
# --with-sqlite3 (useless until objectstore backend can be chosen)
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
import uuid
from .. import models
class VirtualMachinesOperations(object):
"""VirtualMachinesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client API version. Constant value: "2016-05-15".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2016-05-15"
self.config = config
def list(
self, resource_group_name, lab_name, expand=None, filter=None, top=None, orderby=None, custom_headers=None, raw=False, **operation_config):
"""List virtual machines in a given lab.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param lab_name: The name of the lab.
:type lab_name: str
:param expand: Specify the $expand query. Example:
'properties($expand=artifacts,computeVm,networkInterface,applicableSchedule)'
:type expand: str
:param filter: The filter to apply to the operation.
:type filter: str
:param top: The maximum number of resources to return from the
operation.
:type top: int
:param orderby: The ordering expression for the results, using OData
notation.
:type orderby: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`LabVirtualMachinePaged
<azure.mgmt.devtestlabs.models.LabVirtualMachinePaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/virtualmachines'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'labName': self._serialize.url("lab_name", lab_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, 'str')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.LabVirtualMachinePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.LabVirtualMachinePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def get(
self, resource_group_name, lab_name, name, expand=None, custom_headers=None, raw=False, **operation_config):
"""Get virtual machine.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param lab_name: The name of the lab.
:type lab_name: str
:param name: The name of the virtual machine.
:type name: str
:param expand: Specify the $expand query. Example:
'properties($expand=artifacts,computeVm,networkInterface,applicableSchedule)'
:type expand: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`LabVirtualMachine
<azure.mgmt.devtestlabs.models.LabVirtualMachine>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/virtualmachines/{name}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'labName': self._serialize.url("lab_name", lab_name, 'str'),
'name': self._serialize.url("name", name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('LabVirtualMachine', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, lab_name, name, lab_virtual_machine, custom_headers=None, raw=False, **operation_config):
"""Create or replace an existing Virtual machine. This operation can take
a while to complete.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param lab_name: The name of the lab.
:type lab_name: str
:param name: The name of the virtual machine.
:type name: str
:param lab_virtual_machine: A virtual machine.
:type lab_virtual_machine: :class:`LabVirtualMachine
<azure.mgmt.devtestlabs.models.LabVirtualMachine>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`LabVirtualMachine
<azure.mgmt.devtestlabs.models.LabVirtualMachine>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/virtualmachines/{name}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'labName': self._serialize.url("lab_name", lab_name, 'str'),
'name': self._serialize.url("name", name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(lab_virtual_machine, 'LabVirtualMachine')
# Construct and send request
def long_running_send():
request = self._client.put(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('LabVirtualMachine', response)
if response.status_code == 201:
deserialized = self._deserialize('LabVirtualMachine', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def delete(
self, resource_group_name, lab_name, name, custom_headers=None, raw=False, **operation_config):
"""Delete virtual machine. This operation can take a while to complete.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param lab_name: The name of the lab.
:type lab_name: str
:param name: The name of the virtual machine.
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/virtualmachines/{name}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'labName': self._serialize.url("lab_name", lab_name, 'str'),
'name': self._serialize.url("name", name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.delete(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def update(
self, resource_group_name, lab_name, name, lab_virtual_machine, custom_headers=None, raw=False, **operation_config):
"""Modify properties of virtual machines.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param lab_name: The name of the lab.
:type lab_name: str
:param name: The name of the virtual machine.
:type name: str
:param lab_virtual_machine: A virtual machine.
:type lab_virtual_machine: :class:`LabVirtualMachineFragment
<azure.mgmt.devtestlabs.models.LabVirtualMachineFragment>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`LabVirtualMachine
<azure.mgmt.devtestlabs.models.LabVirtualMachine>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/virtualmachines/{name}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'labName': self._serialize.url("lab_name", lab_name, 'str'),
'name': self._serialize.url("name", name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(lab_virtual_machine, 'LabVirtualMachineFragment')
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('LabVirtualMachine', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def add_data_disk(
self, resource_group_name, lab_name, name, data_disk_properties, custom_headers=None, raw=False, **operation_config):
"""Attach a new or existing data disk to virtual machine. This operation
can take a while to complete.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param lab_name: The name of the lab.
:type lab_name: str
:param name: The name of the virtual machine.
:type name: str
:param data_disk_properties: Request body for adding a new or existing
data disk to a virtual machine.
:type data_disk_properties: :class:`DataDiskProperties
<azure.mgmt.devtestlabs.models.DataDiskProperties>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/virtualmachines/{name}/addDataDisk'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'labName': self._serialize.url("lab_name", lab_name, 'str'),
'name': self._serialize.url("name", name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(data_disk_properties, 'DataDiskProperties')
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def apply_artifacts(
self, resource_group_name, lab_name, name, artifacts=None, custom_headers=None, raw=False, **operation_config):
"""Apply artifacts to virtual machine. This operation can take a while to
complete.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param lab_name: The name of the lab.
:type lab_name: str
:param name: The name of the virtual machine.
:type name: str
:param artifacts: The list of artifacts to apply.
:type artifacts: list of :class:`ArtifactInstallProperties
<azure.mgmt.devtestlabs.models.ArtifactInstallProperties>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
apply_artifacts_request = models.ApplyArtifactsRequest(artifacts=artifacts)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/virtualmachines/{name}/applyArtifacts'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'labName': self._serialize.url("lab_name", lab_name, 'str'),
'name': self._serialize.url("name", name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(apply_artifacts_request, 'ApplyArtifactsRequest')
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def claim(
self, resource_group_name, lab_name, name, custom_headers=None, raw=False, **operation_config):
"""Take ownership of an existing virtual machine This operation can take a
while to complete.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param lab_name: The name of the lab.
:type lab_name: str
:param name: The name of the virtual machine.
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/virtualmachines/{name}/claim'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'labName': self._serialize.url("lab_name", lab_name, 'str'),
'name': self._serialize.url("name", name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def detach_data_disk(
self, resource_group_name, lab_name, name, existing_lab_disk_id=None, custom_headers=None, raw=False, **operation_config):
"""Detach the specified disk from the virtual machine. This operation can
take a while to complete.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param lab_name: The name of the lab.
:type lab_name: str
:param name: The name of the virtual machine.
:type name: str
:param existing_lab_disk_id: Specifies the disk resource ID to detach
from virtual machine.
:type existing_lab_disk_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
detach_data_disk_properties = models.DetachDataDiskProperties(existing_lab_disk_id=existing_lab_disk_id)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/virtualmachines/{name}/detachDataDisk'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'labName': self._serialize.url("lab_name", lab_name, 'str'),
'name': self._serialize.url("name", name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(detach_data_disk_properties, 'DetachDataDiskProperties')
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def list_applicable_schedules(
self, resource_group_name, lab_name, name, custom_headers=None, raw=False, **operation_config):
"""Lists all applicable schedules.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param lab_name: The name of the lab.
:type lab_name: str
:param name: The name of the virtual machine.
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`ApplicableSchedule
<azure.mgmt.devtestlabs.models.ApplicableSchedule>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/virtualmachines/{name}/listApplicableSchedules'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'labName': self._serialize.url("lab_name", lab_name, 'str'),
'name': self._serialize.url("name", name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ApplicableSchedule', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def start(
self, resource_group_name, lab_name, name, custom_headers=None, raw=False, **operation_config):
"""Start a virtual machine. This operation can take a while to complete.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param lab_name: The name of the lab.
:type lab_name: str
:param name: The name of the virtual machine.
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/virtualmachines/{name}/start'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'labName': self._serialize.url("lab_name", lab_name, 'str'),
'name': self._serialize.url("name", name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def stop(
self, resource_group_name, lab_name, name, custom_headers=None, raw=False, **operation_config):
"""Stop a virtual machine This operation can take a while to complete.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param lab_name: The name of the lab.
:type lab_name: str
:param name: The name of the virtual machine.
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/virtualmachines/{name}/stop'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'labName': self._serialize.url("lab_name", lab_name, 'str'),
'name': self._serialize.url("name", name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
| |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import uuid
import mox
from oslo_serialization import jsonutils as json
from oslo_utils import timeutils
import six
from heat.common import exception
from heat.common import identifier
from heat.common import template_format
from heat.engine.clients.os import heat_plugin
from heat.engine.clients.os import swift as swift_plugin
from heat.engine import environment
from heat.engine.resources.openstack.heat import wait_condition_handle as h_wch
from heat.engine import stack as parser
from heat.engine import template as tmpl
from heat.objects import resource as resource_objects
from heat.tests import common
from heat.tests import utils
test_template_heat_waitcondition = '''
heat_template_version: 2013-05-23
resources:
wait_condition:
type: OS::Heat::WaitCondition
properties:
handle: {get_resource: wait_handle}
timeout: 5
wait_handle:
type: OS::Heat::WaitConditionHandle
'''
test_template_heat_waitcondition_count = '''
heat_template_version: 2013-05-23
resources:
wait_condition:
type: OS::Heat::WaitCondition
properties:
handle: {get_resource: wait_handle}
count: 3
timeout: 5
wait_handle:
type: OS::Heat::WaitConditionHandle
'''
test_template_heat_waithandle_token = '''
heat_template_version: 2013-05-23
resources:
wait_handle:
type: OS::Heat::WaitConditionHandle
'''
test_template_heat_waithandle_heat = '''
heat_template_version: 2013-05-23
resources:
wait_handle:
type: OS::Heat::WaitConditionHandle
properties:
signal_transport: HEAT_SIGNAL
'''
test_template_heat_waithandle_swift = '''
heat_template_version: 2013-05-23
resources:
wait_handle:
type: OS::Heat::WaitConditionHandle
properties:
signal_transport: TEMP_URL_SIGNAL
'''
test_template_heat_waithandle_zaqar = '''
heat_template_version: 2013-05-23
resources:
wait_handle:
type: OS::Heat::WaitConditionHandle
properties:
signal_transport: ZAQAR_SIGNAL
'''
test_template_heat_waithandle_none = '''
heat_template_version: 2013-05-23
resources:
wait_handle:
type: OS::Heat::WaitConditionHandle
properties:
signal_transport: NO_SIGNAL
'''
test_template_update_waithandle = '''
heat_template_version: 2013-05-23
resources:
update_wait_handle:
type: OS::Heat::UpdateWaitConditionHandle
'''
test_template_bad_waithandle = '''
heat_template_version: 2013-05-23
resources:
wait_condition:
type: OS::Heat::WaitCondition
properties:
handle: {get_resource: wait_handle}
timeout: 5
wait_handle:
type: OS::Heat::RandomString
'''
class HeatWaitConditionTest(common.HeatTestCase):
def setUp(self):
super(HeatWaitConditionTest, self).setUp()
self.tenant_id = 'test_tenant'
def create_stack(self, stack_id=None,
template=test_template_heat_waitcondition_count,
params={},
stub=True, stub_status=True):
temp = template_format.parse(template)
template = tmpl.Template(temp,
env=environment.Environment(params))
ctx = utils.dummy_context(tenant_id=self.tenant_id)
stack = parser.Stack(ctx, 'test_stack', template,
disable_rollback=True)
# Stub out the stack ID so we have a known value
if stack_id is None:
stack_id = str(uuid.uuid4())
self.stack_id = stack_id
with utils.UUIDStub(self.stack_id):
stack.store()
if stub:
id = identifier.ResourceIdentifier('test_tenant', stack.name,
stack.id, '', 'wait_handle')
self.m.StubOutWithMock(h_wch.HeatWaitConditionHandle,
'identifier')
h_wch.HeatWaitConditionHandle.identifier(
).MultipleTimes().AndReturn(id)
if stub_status:
self.m.StubOutWithMock(h_wch.HeatWaitConditionHandle,
'get_status')
return stack
def test_post_complete_to_handle(self):
self.stack = self.create_stack()
h_wch.HeatWaitConditionHandle.get_status().AndReturn(['SUCCESS'])
h_wch.HeatWaitConditionHandle.get_status().AndReturn(['SUCCESS',
'SUCCESS'])
h_wch.HeatWaitConditionHandle.get_status().AndReturn(['SUCCESS',
'SUCCESS',
'SUCCESS'])
self.m.ReplayAll()
self.stack.create()
rsrc = self.stack['wait_condition']
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE),
rsrc.state)
r = resource_objects.Resource.get_by_name_and_stack(
None, 'wait_handle', self.stack.id)
self.assertEqual('wait_handle', r.name)
self.m.VerifyAll()
def test_post_failed_to_handle(self):
self.stack = self.create_stack()
h_wch.HeatWaitConditionHandle.get_status().AndReturn(['SUCCESS'])
h_wch.HeatWaitConditionHandle.get_status().AndReturn(['SUCCESS',
'SUCCESS'])
h_wch.HeatWaitConditionHandle.get_status().AndReturn(['SUCCESS',
'SUCCESS',
'FAILURE'])
self.m.ReplayAll()
self.stack.create()
rsrc = self.stack['wait_condition']
self.assertEqual((rsrc.CREATE, rsrc.FAILED),
rsrc.state)
reason = rsrc.status_reason
self.assertTrue(reason.startswith('WaitConditionFailure:'))
r = resource_objects.Resource.get_by_name_and_stack(
None, 'wait_handle', self.stack.id)
self.assertEqual('wait_handle', r.name)
self.m.VerifyAll()
def test_bad_wait_handle(self):
self.stack = self.create_stack(
template=test_template_bad_waithandle)
self.m.ReplayAll()
self.stack.create()
rsrc = self.stack['wait_condition']
self.assertEqual((rsrc.CREATE, rsrc.FAILED), rsrc.state)
reason = rsrc.status_reason
self.assertEqual(reason, 'ValueError: resources.wait_condition: '
'wait_handle is not a valid wait condition '
'handle.')
def test_timeout(self):
self.stack = self.create_stack()
# Avoid the stack create exercising the timeout code at the same time
self.m.StubOutWithMock(self.stack, 'timeout_secs')
self.stack.timeout_secs().MultipleTimes().AndReturn(None)
now = timeutils.utcnow()
periods = [0, 0.001, 0.1, 4.1, 5.1]
periods.extend(range(10, 100, 5))
fake_clock = [now + datetime.timedelta(0, t) for t in periods]
timeutils.set_time_override(fake_clock)
self.addCleanup(timeutils.clear_time_override)
h_wch.HeatWaitConditionHandle.get_status(
).MultipleTimes().AndReturn([])
self.m.ReplayAll()
self.stack.create()
rsrc = self.stack['wait_condition']
self.assertEqual((rsrc.CREATE, rsrc.FAILED), rsrc.state)
reason = rsrc.status_reason
self.assertTrue(reason.startswith('WaitConditionTimeout:'))
self.m.VerifyAll()
def _create_heat_wc_and_handle(self):
self.stack = self.create_stack(
template=test_template_heat_waitcondition)
h_wch.HeatWaitConditionHandle.get_status().AndReturn(['SUCCESS'])
self.m.ReplayAll()
self.stack.create()
rsrc = self.stack['wait_condition']
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
wc_att = rsrc.FnGetAtt('data')
self.assertEqual(six.text_type({}), wc_att)
handle = self.stack['wait_handle']
self.assertEqual((handle.CREATE, handle.COMPLETE), handle.state)
return (rsrc, handle)
def test_data(self):
rsrc, handle = self._create_heat_wc_and_handle()
test_metadata = {'data': 'foo', 'reason': 'bar',
'status': 'SUCCESS', 'id': '123'}
ret = handle.handle_signal(details=test_metadata)
wc_att = rsrc.FnGetAtt('data')
self.assertEqual('{"123": "foo"}', wc_att)
self.assertEqual('status:SUCCESS reason:bar', ret)
test_metadata = {'data': 'dog', 'reason': 'cat',
'status': 'SUCCESS', 'id': '456'}
ret = handle.handle_signal(details=test_metadata)
wc_att = rsrc.FnGetAtt('data')
self.assertEqual(json.loads(u'{"123": "foo", "456": "dog"}'),
json.loads(wc_att))
self.assertEqual('status:SUCCESS reason:cat', ret)
self.m.VerifyAll()
def test_data_noid(self):
rsrc, handle = self._create_heat_wc_and_handle()
test_metadata = {'data': 'foo', 'reason': 'bar',
'status': 'SUCCESS'}
ret = handle.handle_signal(details=test_metadata)
wc_att = rsrc.FnGetAtt('data')
self.assertEqual('{"1": "foo"}', wc_att)
self.assertEqual('status:SUCCESS reason:bar', ret)
test_metadata = {'data': 'dog', 'reason': 'cat',
'status': 'SUCCESS'}
ret = handle.handle_signal(details=test_metadata)
wc_att = rsrc.FnGetAtt('data')
self.assertEqual(json.loads(u'{"1": "foo", "2": "dog"}'),
json.loads(wc_att))
self.assertEqual('status:SUCCESS reason:cat', ret)
self.m.VerifyAll()
def test_data_nodata(self):
rsrc, handle = self._create_heat_wc_and_handle()
ret = handle.handle_signal()
expected = 'status:SUCCESS reason:Signal 1 received'
self.assertEqual(expected, ret)
wc_att = rsrc.FnGetAtt('data')
self.assertEqual('{"1": null}', wc_att)
handle.handle_signal()
wc_att = rsrc.FnGetAtt('data')
self.assertEqual(json.loads(u'{"1": null, "2": null}'),
json.loads(wc_att))
self.m.VerifyAll()
def test_data_partial_complete(self):
rsrc, handle = self._create_heat_wc_and_handle()
test_metadata = {'status': 'SUCCESS'}
ret = handle.handle_signal(details=test_metadata)
expected = 'status:SUCCESS reason:Signal 1 received'
self.assertEqual(expected, ret)
wc_att = rsrc.FnGetAtt('data')
self.assertEqual('{"1": null}', wc_att)
test_metadata = {'status': 'SUCCESS'}
ret = handle.handle_signal(details=test_metadata)
expected = 'status:SUCCESS reason:Signal 2 received'
self.assertEqual(expected, ret)
wc_att = rsrc.FnGetAtt('data')
self.assertEqual(json.loads(u'{"1": null, "2": null}'),
json.loads(wc_att))
self.m.VerifyAll()
def _create_heat_handle(self,
template=test_template_heat_waithandle_token):
self.stack = self.create_stack(template=template, stub_status=False)
self.m.ReplayAll()
self.stack.create()
handle = self.stack['wait_handle']
self.assertEqual((handle.CREATE, handle.COMPLETE), handle.state)
self.assertIsNotNone(handle.password)
self.assertEqual(handle.resource_id, handle.data().get('user_id'))
return handle
def test_get_status_none_complete(self):
handle = self._create_heat_handle()
ret = handle.handle_signal()
expected = 'status:SUCCESS reason:Signal 1 received'
self.assertEqual(expected, ret)
self.assertEqual(['SUCCESS'], handle.get_status())
md_expected = {'1': {'data': None, 'reason': 'Signal 1 received',
'status': 'SUCCESS'}}
self.assertEqual(md_expected, handle.metadata_get())
self.m.VerifyAll()
def test_get_status_partial_complete(self):
handle = self._create_heat_handle()
test_metadata = {'status': 'SUCCESS'}
ret = handle.handle_signal(details=test_metadata)
expected = 'status:SUCCESS reason:Signal 1 received'
self.assertEqual(expected, ret)
self.assertEqual(['SUCCESS'], handle.get_status())
md_expected = {'1': {'data': None, 'reason': 'Signal 1 received',
'status': 'SUCCESS'}}
self.assertEqual(md_expected, handle.metadata_get())
self.m.VerifyAll()
def test_get_status_failure(self):
handle = self._create_heat_handle()
test_metadata = {'status': 'FAILURE'}
ret = handle.handle_signal(details=test_metadata)
expected = 'status:FAILURE reason:Signal 1 received'
self.assertEqual(expected, ret)
self.assertEqual(['FAILURE'], handle.get_status())
md_expected = {'1': {'data': None, 'reason': 'Signal 1 received',
'status': 'FAILURE'}}
self.assertEqual(md_expected, handle.metadata_get())
self.m.VerifyAll()
def test_getatt_token(self):
handle = self._create_heat_handle()
self.assertEqual('adomainusertoken', handle.FnGetAtt('token'))
self.m.VerifyAll()
def test_getatt_endpoint(self):
self.m.StubOutWithMock(heat_plugin.HeatClientPlugin, 'get_heat_url')
heat_plugin.HeatClientPlugin.get_heat_url().AndReturn(
'foo/%s' % self.tenant_id)
self.m.ReplayAll()
handle = self._create_heat_handle()
expected = ('foo/aprojectid/stacks/test_stack/%s/resources/'
'wait_handle/signal'
% self.stack_id)
self.assertEqual(expected, handle.FnGetAtt('endpoint'))
self.m.VerifyAll()
def test_getatt_curl_cli(self):
self.m.StubOutWithMock(heat_plugin.HeatClientPlugin, 'get_heat_url')
heat_plugin.HeatClientPlugin.get_heat_url().AndReturn(
'foo/%s' % self.tenant_id)
self.m.ReplayAll()
handle = self._create_heat_handle()
expected = ("curl -i -X POST -H 'X-Auth-Token: adomainusertoken' "
"-H 'Content-Type: application/json' "
"-H 'Accept: application/json' "
"foo/aprojectid/stacks/test_stack/%s/resources/wait_handle"
"/signal" % self.stack_id)
self.assertEqual(expected, handle.FnGetAtt('curl_cli'))
self.m.VerifyAll()
def test_getatt_signal_heat(self):
handle = self._create_heat_handle(
template=test_template_heat_waithandle_heat)
self.assertIsNone(handle.FnGetAtt('token'))
self.assertIsNone(handle.FnGetAtt('endpoint'))
self.assertIsNone(handle.FnGetAtt('curl_cli'))
signal = json.loads(handle.FnGetAtt('signal'))
self.assertIn('alarm_url', signal)
self.assertIn('username', signal)
self.assertIn('password', signal)
self.assertIn('auth_url', signal)
self.assertIn('project_id', signal)
self.assertIn('domain_id', signal)
def test_getatt_signal_swift(self):
self.m.StubOutWithMock(swift_plugin.SwiftClientPlugin, 'get_temp_url')
self.m.StubOutWithMock(swift_plugin.SwiftClientPlugin, 'client')
class mock_swift(object):
@staticmethod
def put_container(container, **kwargs):
pass
@staticmethod
def put_object(container, object, contents, **kwargs):
pass
swift_plugin.SwiftClientPlugin.client().AndReturn(mock_swift)
swift_plugin.SwiftClientPlugin.client().AndReturn(mock_swift)
swift_plugin.SwiftClientPlugin.client().AndReturn(mock_swift)
swift_plugin.SwiftClientPlugin.get_temp_url(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg()
).AndReturn('foo')
self.m.ReplayAll()
handle = self._create_heat_handle(
template=test_template_heat_waithandle_swift)
self.assertIsNone(handle.FnGetAtt('token'))
self.assertIsNone(handle.FnGetAtt('endpoint'))
self.assertIsNone(handle.FnGetAtt('curl_cli'))
signal = json.loads(handle.FnGetAtt('signal'))
self.assertIn('alarm_url', signal)
def test_getatt_signal_zaqar(self):
handle = self._create_heat_handle(
template=test_template_heat_waithandle_zaqar)
self.assertIsNone(handle.FnGetAtt('token'))
self.assertIsNone(handle.FnGetAtt('endpoint'))
self.assertIsNone(handle.FnGetAtt('curl_cli'))
signal = json.loads(handle.FnGetAtt('signal'))
self.assertIn('queue_id', signal)
self.assertIn('username', signal)
self.assertIn('password', signal)
self.assertIn('auth_url', signal)
self.assertIn('project_id', signal)
self.assertIn('domain_id', signal)
def test_getatt_signal_none(self):
handle = self._create_heat_handle(
template=test_template_heat_waithandle_none)
self.assertIsNone(handle.FnGetAtt('token'))
self.assertIsNone(handle.FnGetAtt('endpoint'))
self.assertIsNone(handle.FnGetAtt('curl_cli'))
self.assertEqual('{}', handle.FnGetAtt('signal'))
def test_create_update_updatehandle(self):
self.stack = self.create_stack(
template=test_template_update_waithandle, stub_status=False)
self.m.ReplayAll()
self.stack.create()
handle = self.stack['update_wait_handle']
self.assertEqual((handle.CREATE, handle.COMPLETE), handle.state)
self.assertRaises(
exception.UpdateReplace, handle.update, None, None)
| |
# Copyright 2011 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
from collections import defaultdict
from bisect import bisect_left
from threading import RLock
from whoosh.compat import iteritems, zip_
from whoosh.fields import UnknownFieldError
from whoosh.matching import ListMatcher, NullMatcher
from whoosh.reading import IndexReader, TermInfo, TermNotFound
from whoosh.writing import IndexWriter
from whoosh.util import synchronized
class RamIndex(IndexReader, IndexWriter):
def __init__(self, schema):
self.schema = schema
self.docnum = 0
self._sync_lock = RLock()
self.is_closed = False
self.clear()
@synchronized
def clear(self):
self.invindex = {}
self.indexfreqs = defaultdict(int)
self.storedfields = []
self.fieldlengths = defaultdict(int)
self.termstats = {}
self.vectors = {}
self.deleted = set()
self.usage = 0
@synchronized
def __contains__(self, term):
try:
self.invindex[term[0]][term[1]]
return True
except KeyError:
return False
def close(self):
pass
@synchronized
def has_deletions(self):
return bool(self.deleted)
@synchronized
def is_deleted(self, docnum):
return docnum in self.deleted
@synchronized
def delete_document(self, docnum, delete=True):
if delete:
self.deleted.add(docnum)
else:
self.deleted.remove(docnum)
@synchronized
def stored_fields(self, docnum):
return self.storedfields[docnum]
@synchronized
def all_stored_fields(self):
deleted = self.deleted
return (sf for i, sf in enumerate(self.storedfields)
if i not in deleted)
def _test_field(self, fieldname):
if fieldname not in self.schema:
raise TermNotFound("No field %r" % fieldname)
if self.schema[fieldname].format is None:
raise TermNotFound("Field %r is not indexed" % fieldname)
@synchronized
def field_length(self, fieldname):
self._test_field(fieldname)
if fieldname not in self.schema or not self.schema[fieldname].scorable:
return 0
return sum(l for docnum_fieldname, l in iteritems(self.fieldlengths)
if docnum_fieldname[1] == fieldname)
@synchronized
def max_field_length(self, fieldname):
self._test_field(fieldname)
if fieldname not in self.schema or not self.schema[fieldname].scorable:
return 0
return max(l for docnum_fieldname, l in iteritems(self.fieldlengths)
if docnum_fieldname[1] == fieldname)
@synchronized
def min_field_length(self, fieldname):
self._test_field(fieldname)
if fieldname not in self.schema or not self.schema[fieldname].scorable:
return 0
return min(l for docnum_fieldname, l in iteritems(self.fieldlengths)
if docnum_fieldname[1] == fieldname)
def doc_field_length(self, docnum, fieldname, default=0):
self._test_field(fieldname)
return self.fieldlengths.get((docnum, fieldname), default)
def has_vector(self, docnum, fieldname):
return (docnum, fieldname) in self.vectors
@synchronized
def vector(self, docnum, fieldname):
if fieldname not in self.schema:
raise TermNotFound("No field %r" % fieldname)
vformat = self.schema[fieldname].vector
if not vformat:
raise Exception("No vectors are stored for field %r" % fieldname)
vformat = self.schema[fieldname].vector
ids, weights, values = zip_(*self.vectors[docnum, fieldname])
return ListMatcher(ids, weights, values, format=vformat)
def frequency(self, fieldname, text):
self._test_field(fieldname)
return self.indexfreqs.get((fieldname, text), 0)
def doc_frequency(self, fieldname, text):
self._test_field(fieldname)
try:
return len(self.invindex[fieldname][text])
except KeyError:
return 0
def term_info(self, fieldname, text):
w = self.frequency(fieldname, text)
df = self.doc_frequency(fieldname, text)
ml, xl, xw, _ = self.termstats[fieldname, text]
plist = self.invindex[fieldname][text]
mid = plist[0][0]
xid = plist[-1][0]
return TermInfo(w, df, ml, xl, xw, mid, xid)
def all_terms(self):
invindex = self.invindex
for fieldname in sorted(invindex):
for k in sorted(invindex[fieldname]):
yield (fieldname, k)
@synchronized
def first_id(self, fieldname, text):
# Override to not construct a posting reader, just pull the first
# non-deleted docnum out of the list directly
self._test_field(fieldname)
try:
plist = self.invindex[fieldname][text]
except KeyError:
raise TermNotFound((fieldname, text))
else:
deleted = self.deleted
for x in plist:
docnum = x[0]
if docnum not in deleted:
return docnum
@synchronized
def postings(self, fieldname, text, scorer=None):
self._test_field(fieldname)
try:
terminfo = self.term_info(fieldname, text)
except KeyError:
raise TermNotFound((fieldname, text))
format = self.schema[fieldname].format
postings = self.invindex[fieldname][text]
excludeset = self.deleted
if excludeset:
postings = [x for x in postings if x[0] not in excludeset]
if not postings:
return NullMatcher()
ids, weights, values = zip_(*postings)
lm = ListMatcher(ids, weights, values, format=format, scorer=scorer,
term=(fieldname, text), terminfo=terminfo)
return lm
def reader(self):
return self
def searcher(self, **kwargs):
from whoosh.searching import Searcher
return Searcher(self.reader(), **kwargs)
def writer(self, **kwargs):
return self
def doc_count_all(self):
return len(self.storedfields)
def doc_count(self):
return len(self.storedfields) - len(self.deleted)
@synchronized
def update_document(self, **fields):
super(RamIndex, self).update_document(**fields)
@synchronized
def add_document(self, **fields):
schema = self.schema
invindex = self.invindex
indexfreqs = self.indexfreqs
fieldlengths = self.fieldlengths
termstats = self.termstats
docboost = self._doc_boost(fields)
usage = 0
fieldnames = [name for name in sorted(fields.keys())
if not name.startswith("_")]
for name in fieldnames:
if name not in schema:
raise UnknownFieldError("There is no field named %r" % name)
if name not in invindex:
invindex[name] = {}
storedvalues = {}
for name in fieldnames:
field = schema[name]
value = fields.get(name)
if value:
fielddict = invindex[name]
# If the field is indexed, add the words in the value to the
# index
if field.indexed:
fieldboost = self._field_boost(fields, name, docboost)
# Count of all terms in the value
count = 0
# Count of UNIQUE terms in the value
unique = 0
words = []
for w, freq, weight, valuestring in field.index(value):
weight *= fieldboost
words.append((w, weight))
if w not in fielddict:
fielddict[w] = []
fielddict[w].append((self.docnum, weight, valuestring))
indexfreqs[name, w] += freq
count += freq
unique += 1
usage += 44 + len(valuestring)
# Record max weight and max wol
# min_length, max_length, max_weight, max_wol
wol = weight / count
for w, weight in words:
ts = termstats.get((name, w))
# Record term stats for each term in this document
wol = weight / count
if ts is None:
termstats[name, w] = [count, count, weight, wol]
else:
if count < ts[0]:
ts[0] = count
if count > ts[1]:
ts[1] = count
if weight > ts[2]:
ts[2] = weight
if wol > ts[3]:
ts[3] = wol
if field.scorable:
fieldlengths[self.docnum, name] = count
usage += 36
vformat = field.vector
if vformat:
wvs = vformat.word_values(value, field.analyzer,
mode="index")
vlist = sorted((w, weight, valuestring)
for w, _, weight, valuestring in wvs)
self.vectors[self.docnum, name] = vlist
usage += 28
for x in vlist:
usage += 44 + len(x[2])
if field.stored:
storedname = "_stored_" + name
if storedname in fields:
stored_value = fields[storedname]
else:
stored_value = value
storedvalues[name] = stored_value
usage += 28 + len(name) # + len(stored_value)
self.storedfields.append(storedvalues)
self.usage += usage
self.docnum += 1
# @synchronized
# def optimize(self):
# deleted = self.deleted
#
# # Remove deleted documents from stored fields
# self.storedfields = [sf for i, sf in enumerate(self.storedfields)
# if i not in deleted]
#
# # Remove deleted documents from inverted index
# removedterms = defaultdict(set)
# for fn in self.invindex:
# termdict = self.invindex[fn]
# for text, postlist in termdict.items():
# newlist = [x for x in postlist if x[0] not in deleted]
# if newlist:
# termdict[text] = newlist
# else:
# removedterms[fn].add(text)
# del termdict[text]
#
# # If terms were removed as a result of document deletion, update
# # indexfreqs
# for fn, removed in removedterms.iteritems():
# for text in removed:
# del self.indexfreqs[fn, text]
#
# # Remove documents from field lengths
# fieldlengths = self.fieldlengths
# for docnum, fieldname in fieldlengths.keys():
# if docnum in deleted:
# del fieldlengths[docnum, fieldname]
#
# # Remove documents from vectors
# vectors = self.vectors
# for docnum, fieldname in vectors.keys():
# if docnum in deleted:
# del vectors[docnum, fieldname]
#
# # Reset deleted list
# self.deleted = set()
def commit(self):
pass
| |
# Copyright 2012 Nicira Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Somik Behera, Nicira Networks, Inc.
# @author: Brad Hall, Nicira Networks, Inc.
import ConfigParser
import logging
import os
import sys
import NvpApiClient
import nvplib
from quantum.common import exceptions as exception
from quantum.plugins.nicira.nicira_nvp_plugin.api_client.client_eventlet \
import (
DEFAULT_CONCURRENT_CONNECTIONS,
DEFAULT_FAILOVER_TIME,
)
from quantum.plugins.nicira.nicira_nvp_plugin.api_client.request_eventlet \
import (
DEFAULT_REQUEST_TIMEOUT,
DEFAULT_HTTP_TIMEOUT,
DEFAULT_RETRIES,
DEFAULT_REDIRECTS,
)
LOG = logging.getLogger("QuantumPlugin")
CONFIG_FILE = "nvp.ini"
CONFIG_FILE_PATHS = []
if os.environ.get('QUANTUM_HOME', None):
CONFIG_FILE_PATHS.append('%s/etc' % os.environ['QUANTUM_HOME'])
CONFIG_FILE_PATHS.append("/etc/quantum/plugins/nicira")
CONFIG_KEYS = ["DEFAULT_TZ_UUID", "NVP_CONTROLLER_IP", "PORT", "USER",
"PASSWORD"]
def initConfig(cfile=None):
config = ConfigParser.ConfigParser()
if cfile is None:
if os.path.exists(CONFIG_FILE):
cfile = CONFIG_FILE
else:
cfile = find_config(os.path.abspath(os.path.dirname(__file__)))
if cfile is None:
raise Exception("Configuration file \"%s\" doesn't exist" % (cfile))
LOG.info("Using configuration file: %s" % cfile)
config.read(cfile)
LOG.debug("Config: %s" % config)
return config
def find_config(basepath):
LOG.info("Looking for %s in %s" % (CONFIG_FILE, basepath))
for root, dirs, files in os.walk(basepath, followlinks=True):
if CONFIG_FILE in files:
return os.path.join(root, CONFIG_FILE)
for alternate_path in CONFIG_FILE_PATHS:
p = os.path.join(alternate_path, CONFIG_FILE)
if os.path.exists(p):
return p
return None
def parse_config(config):
"""Backwards compatible parsing.
:param config: ConfigParser object initilized with nvp.ini.
:returns: A tuple consisting of a control cluster object and a
plugin_config variable.
raises: In general, system exceptions are not caught but are propagated
up to the user. Config parsing is still very lightweight.
At some point, error handling needs to be significantly
enhanced to provide user friendly error messages, clean program
exists, rather than exceptions propagated to the user.
"""
# Extract plugin config parameters.
try:
failover_time = config.get('NVP', 'failover_time')
except ConfigParser.NoOptionError, e:
failover_time = str(DEFAULT_FAILOVER_TIME)
try:
concurrent_connections = config.get('NVP', 'concurrent_connections')
except ConfigParser.NoOptionError, e:
concurrent_connections = str(DEFAULT_CONCURRENT_CONNECTIONS)
plugin_config = {
'failover_time': failover_time,
'concurrent_connections': concurrent_connections,
}
LOG.info('parse_config(): plugin_config == "%s"' % plugin_config)
cluster = NVPCluster('cluster1')
# Extract connection information.
try:
defined_connections = config.get('NVP', 'NVP_CONTROLLER_CONNECTIONS')
for conn_key in defined_connections.split():
args = [config.get('NVP', 'DEFAULT_TZ_UUID')]
args.extend(config.get('NVP', conn_key).split(':'))
try:
cluster.add_controller(*args)
except Exception, e:
LOG.fatal('Invalid connection parameters: %s' % str(e))
sys.exit(1)
return cluster, plugin_config
except Exception, e:
LOG.info('No new style connections defined: %s' % e)
# Old style controller specification.
args = [config.get('NVP', k) for k in CONFIG_KEYS]
try:
cluster.add_controller(*args)
except Exception, e:
LOG.fatal('Invalid connection parameters.')
sys.exit(1)
return cluster, plugin_config
class NVPCluster(object):
"""Encapsulates controller connection and api_client.
Initialized within parse_config().
Accessed within the NvpPlugin class.
Each element in the self.controllers list is a dictionary that
contains the following keys:
ip, port, user, password, default_tz_uuid
There may be some redundancy here, but that has been done to provide
future flexibility.
"""
def __init__(self, name):
self._name = name
self.controllers = []
self.api_client = None
def __repr__(self):
ss = ['{ "NVPCluster": [']
ss.append('{ "name" : "%s" }' % self.name)
ss.append(',')
for c in self.controllers:
ss.append(str(c))
ss.append(',')
ss.append('] }')
return ''.join(ss)
def add_controller(self, default_tz_uuid, ip, port, user, password,
request_timeout=DEFAULT_REQUEST_TIMEOUT,
http_timeout=DEFAULT_HTTP_TIMEOUT,
retries=DEFAULT_RETRIES, redirects=DEFAULT_REDIRECTS):
"""Add a new set of controller parameters.
:param ip: IP address of controller.
:param port: port controller is listening on.
:param user: user name.
:param password: user password.
:param request_timeout: timeout for an entire API request.
:param http_timeout: timeout for a connect to a controller.
:param retries: maximum number of request retries.
:param redirects: maximum number of server redirect responses to
follow.
:param default_tz_uuid: default transport zone uuid.
"""
keys = ['ip', 'port', 'user', 'password', 'default_tz_uuid']
controller_dict = dict([(k, locals()[k]) for k in keys])
int_keys = ['request_timeout', 'http_timeout', 'retries', 'redirects']
for k in int_keys:
controller_dict[k] = int(locals()[k])
self.controllers.append(controller_dict)
def get_controller(self, idx):
return self.controllers[idx]
@property
def name(self):
return self._name
@name.setter
def name(self, val=None):
self._name = val
@property
def host(self):
return self.controllers[0]['ip']
@property
def port(self):
return self.controllers[0]['port']
@property
def user(self):
return self.controllers[0]['user']
@property
def password(self):
return self.controllers[0]['password']
@property
def request_timeout(self):
return self.controllers[0]['request_timeout']
@property
def http_timeout(self):
return self.controllers[0]['http_timeout']
@property
def retries(self):
return self.controllers[0]['retries']
@property
def redirects(self):
return self.controllers[0]['redirects']
@property
def default_tz_uuid(self):
return self.controllers[0]['default_tz_uuid']
class NvpPlugin(object):
"""
NvpPlugin is a Quantum plugin that provides L2 Virtual Network
functionality using NVP.
"""
supported_extension_aliases = ["portstats"]
def __init__(self, configfile=None, loglevel=None, cli=False):
if loglevel:
logging.basicConfig(level=loglevel)
nvplib.LOG.setLevel(loglevel)
NvpApiClient.LOG.setLevel(loglevel)
config = initConfig(configfile)
self.controller, self.plugin_config = parse_config(config)
c = self.controller
api_providers = [(x['ip'], x['port'], True) for x in c.controllers]
c.api_client = NvpApiClient.NVPApiHelper(
api_providers, c.user, c.password,
request_timeout=c.request_timeout, http_timeout=c.http_timeout,
retries=c.retries, redirects=c.redirects,
failover_time=int(self.plugin_config['failover_time']),
concurrent_connections=int(
self.plugin_config['concurrent_connections']))
c.api_client.login()
# For testing..
self.api_client = self.controller.api_client
def get_all_networks(self, tenant_id, **kwargs):
"""
Returns a dictionary containing all <network_uuid, network_name> for
the specified tenant.
:returns: a list of mapping sequences with the following signature:
[{'net-id': uuid that uniquely identifies
the particular quantum network,
'net-name': a human-readable name associated
with network referenced by net-id
},
....
{'net-id': uuid that uniquely identifies the
particular quantum network,
'net-name': a human-readable name associated
with network referenced by net-id
}
]
:raises: None
"""
networks = nvplib.get_all_networks(self.controller, tenant_id, [])
LOG.debug("get_all_networks() completed for tenant %s: %s" %
(tenant_id, networks))
return networks
def create_network(self, tenant_id, net_name, **kwargs):
"""
Creates a new Virtual Network, and assigns it a symbolic name.
:returns: a sequence of mappings with the following signature:
{'net-id': uuid that uniquely identifies the
particular quantum network,
'net-name': a human-readable name associated
with network referenced by net-id
}
:raises:
"""
kwargs["controller"] = self.controller
return nvplib.create_network(tenant_id, net_name, **kwargs)
def create_custom_network(self, tenant_id, net_name, transport_zone,
controller):
return self.create_network(tenant_id, net_name,
network_type="custom",
transport_zone=transport_zone,
controller=controller)
def delete_network(self, tenant_id, netw_id):
"""
Deletes the network with the specified network identifier
belonging to the specified tenant.
:returns: a sequence of mappings with the following signature:
{'net-id': uuid that uniquely identifies the
particular quantum network
}
:raises: exception.NetworkInUse
:raises: exception.NetworkNotFound
"""
if not nvplib.check_tenant(self.controller, netw_id, tenant_id):
raise exception.NetworkNotFound(net_id=netw_id)
nvplib.delete_network(self.controller, netw_id)
LOG.debug("delete_network() completed for tenant: %s" % tenant_id)
return {'net-id': netw_id}
def get_network_details(self, tenant_id, netw_id):
"""
Retrieves a list of all the remote vifs that
are attached to the network.
:returns: a sequence of mappings with the following signature:
{'net-id': uuid that uniquely identifies the
particular quantum network
'net-name': a human-readable name associated
with network referenced by net-id
'net-ifaces': ['vif1_on_network_uuid',
'vif2_on_network_uuid',...,'vifn_uuid']
}
:raises: exception.NetworkNotFound
:raises: exception.QuantumException
"""
if not nvplib.check_tenant(self.controller, netw_id, tenant_id):
raise exception.NetworkNotFound(net_id=netw_id)
result = None
remote_vifs = []
switch = netw_id
lports = nvplib.query_ports(self.controller, switch,
relations="LogicalPortAttachment")
for port in lports:
relation = port["_relations"]
vic = relation["LogicalPortAttachment"]
if "vif_uuid" in vic:
remote_vifs.append(vic["vif_uuid"])
if not result:
result = nvplib.get_network(self.controller, switch)
d = {
"net-id": netw_id,
"net-ifaces": remote_vifs,
"net-name": result["display_name"],
"net-op-status": "UP",
}
LOG.debug("get_network_details() completed for tenant %s: %s" %
(tenant_id, d))
return d
def update_network(self, tenant_id, netw_id, **kwargs):
"""
Updates the properties of a particular Virtual Network.
:returns: a sequence of mappings representing the new network
attributes, with the following signature:
{'net-id': uuid that uniquely identifies the
particular quantum network
'net-name': the new human-readable name
associated with network referenced by net-id
}
:raises: exception.NetworkNotFound
"""
if not nvplib.check_tenant(self.controller, netw_id, tenant_id):
raise exception.NetworkNotFound(net_id=netw_id)
result = nvplib.update_network(self.controller, netw_id, **kwargs)
LOG.debug("update_network() completed for tenant: %s" % tenant_id)
return {
'net-id': netw_id,
'net-name': result["display_name"],
'net-op-status': "UP",
}
def get_all_ports(self, tenant_id, netw_id, **kwargs):
"""
Retrieves all port identifiers belonging to the
specified Virtual Network.
:returns: a list of mapping sequences with the following signature:
[{'port-id': uuid representing a particular port
on the specified quantum network
},
....
{'port-id': uuid representing a particular port
on the specified quantum network
}
]
:raises: exception.NetworkNotFound
"""
ids = []
filters = kwargs.get("filter_opts") or {}
if not nvplib.check_tenant(self.controller, netw_id, tenant_id):
raise exception.NetworkNotFound(net_id=netw_id)
LOG.debug("Getting logical ports on lswitch: %s" % netw_id)
lports = nvplib.query_ports(self.controller, netw_id, fields="uuid",
filters=filters)
for port in lports:
ids.append({"port-id": port["uuid"]})
# Delete from the filter so that Quantum doesn't attempt to filter on
# this too
if filters and "attachment" in filters:
del filters["attachment"]
LOG.debug("get_all_ports() completed for tenant: %s" % tenant_id)
LOG.debug("returning port listing:")
LOG.debug(ids)
return ids
def create_port(self, tenant_id, netw_id, port_init_state=None, **params):
"""
Creates a port on the specified Virtual Network.
:returns: a mapping sequence with the following signature:
{'port-id': uuid representing the created port
on specified quantum network
}
:raises: exception.NetworkNotFound
:raises: exception.StateInvalid
"""
if not nvplib.check_tenant(self.controller, netw_id, tenant_id):
raise exception.NetworkNotFound(net_id=netw_id)
params["controller"] = self.controller
if not nvplib.check_tenant(self.controller, netw_id, tenant_id):
raise exception.NetworkNotFound(net_id=netw_id)
result = nvplib.create_port(tenant_id, netw_id, port_init_state,
**params)
d = {
"port-id": result["uuid"],
"port-op-status": result["port-op-status"],
}
LOG.debug("create_port() completed for tenant %s: %s" % (tenant_id, d))
return d
def update_port(self, tenant_id, netw_id, portw_id, **params):
"""
Updates the properties of a specific port on the
specified Virtual Network.
:returns: a mapping sequence with the following signature:
{'port-id': uuid representing the
updated port on specified quantum network
'port-state': update port state (UP or DOWN)
}
:raises: exception.StateInvalid
:raises: exception.PortNotFound
"""
if not nvplib.check_tenant(self.controller, netw_id, tenant_id):
raise exception.NetworkNotFound(net_id=netw_id)
LOG.debug("Update port request: %s" % (params))
params["controller"] = self.controller
result = nvplib.update_port(netw_id, portw_id, **params)
LOG.debug("update_port() completed for tenant: %s" % tenant_id)
port = {
'port-id': portw_id,
'port-state': result["admin_status_enabled"],
'port-op-status': result["port-op-status"],
}
LOG.debug("returning updated port %s: " % port)
return port
def delete_port(self, tenant_id, netw_id, portw_id):
"""
Deletes a port on a specified Virtual Network,
if the port contains a remote interface attachment,
the remote interface is first un-plugged and then the port
is deleted.
:returns: a mapping sequence with the following signature:
{'port-id': uuid representing the deleted port
on specified quantum network
}
:raises: exception.PortInUse
:raises: exception.PortNotFound
:raises: exception.NetworkNotFound
"""
if not nvplib.check_tenant(self.controller, netw_id, tenant_id):
raise exception.NetworkNotFound(net_id=netw_id)
nvplib.delete_port(self.controller, netw_id, portw_id)
LOG.debug("delete_port() completed for tenant: %s" % tenant_id)
return {"port-id": portw_id}
def get_port_details(self, tenant_id, netw_id, portw_id):
"""
This method allows the user to retrieve a remote interface
that is attached to this particular port.
:returns: a mapping sequence with the following signature:
{'port-id': uuid representing the port on
specified quantum network
'net-id': uuid representing the particular
quantum network
'attachment': uuid of the virtual interface
bound to the port, None otherwise
}
:raises: exception.PortNotFound
:raises: exception.NetworkNotFound
"""
if not nvplib.check_tenant(self.controller, netw_id, tenant_id):
raise exception.NetworkNotFound(net_id=netw_id)
port = nvplib.get_port(self.controller, netw_id, portw_id,
"LogicalPortAttachment")
state = "ACTIVE" if port["admin_status_enabled"] else "DOWN"
op_status = nvplib.get_port_status(self.controller, netw_id, portw_id)
relation = port["_relations"]
attach_type = relation["LogicalPortAttachment"]["type"]
vif_uuid = "None"
if attach_type == "VifAttachment":
vif_uuid = relation["LogicalPortAttachment"]["vif_uuid"]
d = {
"port-id": portw_id, "attachment": vif_uuid,
"net-id": netw_id, "port-state": state,
"port-op-status": op_status,
}
LOG.debug("Port details for tenant %s: %s" % (tenant_id, d))
return d
def plug_interface(self, tenant_id, netw_id, portw_id,
remote_interface_id):
"""
Attaches a remote interface to the specified port on the
specified Virtual Network.
:returns: None
:raises: exception.NetworkNotFound
:raises: exception.PortNotFound
:raises: exception.AlreadyAttached
(? should the network automatically unplug/replug)
"""
if not nvplib.check_tenant(self.controller, netw_id, tenant_id):
raise exception.NetworkNotFound(net_id=netw_id)
result = nvplib.plug_interface(self.controller, netw_id,
portw_id, "VifAttachment",
attachment=remote_interface_id)
LOG.debug("plug_interface() completed for %s: %s" %
(tenant_id, result))
def unplug_interface(self, tenant_id, netw_id, portw_id):
"""
Detaches a remote interface from the specified port on the
specified Virtual Network.
:returns: None
:raises: exception.NetworkNotFound
:raises: exception.PortNotFound
"""
if not nvplib.check_tenant(self.controller, netw_id, tenant_id):
raise exception.NetworkNotFound(net_id=netw_id)
result = nvplib.unplug_interface(self.controller, netw_id, portw_id)
LOG.debug("unplug_interface() completed for tenant %s: %s" %
(tenant_id, result))
def get_port_stats(self, tenant_id, network_id, port_id):
"""
Returns port statistics for a given port.
{
"rx_packets": 0,
"rx_bytes": 0,
"tx_errors": 0,
"rx_errors": 0,
"tx_bytes": 0,
"tx_packets": 0
}
:returns: dict() of stats
:raises: exception.NetworkNotFound
:raises: exception.PortNotFound
"""
if not nvplib.check_tenant(self.controller, network_id, tenant_id):
raise exception.NetworkNotFound(net_id=network_id)
return nvplib.get_port_stats(self.controller, network_id, port_id)
| |
''' Classes for read / write of matlab (TM) 5 files
The matfile specification last found here:
http://www.mathworks.com/access/helpdesk/help/pdf_doc/matlab/matfile_format.pdf
(as of December 5 2008)
'''
from __future__ import division, print_function, absolute_import
'''
=================================
Note on functions and mat files
=================================
The document above does not give any hints as to the storage of matlab
function handles, or anonymous function handles. I had therefore to
guess the format of matlab arrays of ``mxFUNCTION_CLASS`` and
``mxOPAQUE_CLASS`` by looking at example mat files.
``mxFUNCTION_CLASS`` stores all types of matlab functions. It seems to
contain a struct matrix with a set pattern of fields. For anonymous
functions, a sub-fields of one of these fields seems to contain the
well-named ``mxOPAQUE_CLASS``. This seems to cotain:
* array flags as for any matlab matrix
* 3 int8 strings
* a matrix
It seems that, whenever the mat file contains a ``mxOPAQUE_CLASS``
instance, there is also an un-named matrix (name == '') at the end of
the mat file. I'll call this the ``__function_workspace__`` matrix.
When I saved two anonymous functions in a mat file, or appended another
anonymous function to the mat file, there was still only one
``__function_workspace__`` un-named matrix at the end, but larger than
that for a mat file with a single anonymous function, suggesting that
the workspaces for the two functions had been merged.
The ``__function_workspace__`` matrix appears to be of double class
(``mxCLASS_DOUBLE``), but stored as uint8, the memory for which is in
the format of a mini .mat file, without the first 124 bytes of the file
header (the description and the subsystem_offset), but with the version
U2 bytes, and the S2 endian test bytes. There follow 4 zero bytes,
presumably for 8 byte padding, and then a series of ``miMATRIX``
entries, as in a standard mat file. The ``miMATRIX`` entries appear to
be series of un-named (name == '') matrices, and may also contain arrays
of this same mini-mat format.
I guess that:
* saving an anonymous function back to a mat file will need the
associated ``__function_workspace__`` matrix saved as well for the
anonymous function to work correctly.
* appending to a mat file that has a ``__function_workspace__`` would
involve first pulling off this workspace, appending, checking whether
there were any more anonymous functions appended, and then somehow
merging the relevant workspaces, and saving at the end of the mat
file.
The mat files I was playing with are in ``tests/data``:
* sqr.mat
* parabola.mat
* some_functions.mat
See ``tests/test_mio.py:test_mio_funcs.py`` for a debugging
script I was working with.
'''
# Small fragments of current code adapted from matfile.py by Heiko
# Henkelmann
import os
import time
import sys
import zlib
from io import BytesIO
import warnings
import numpy as np
from numpy.compat import asbytes, asstr
import scipy.sparse
from scipy._lib.six import string_types
from .byteordercodes import native_code, swapped_code
from .miobase import (MatFileReader, docfiller, matdims, read_dtype,
arr_to_chars, arr_dtype_number, MatWriteError,
MatReadError, MatReadWarning)
# Reader object for matlab 5 format variables
from .mio5_utils import VarReader5
# Constants and helper objects
from .mio5_params import (MatlabObject, MatlabFunction, MDTYPES, NP_TO_MTYPES,
NP_TO_MXTYPES, miCOMPRESSED, miMATRIX, miINT8,
miUTF8, miUINT32, mxCELL_CLASS, mxSTRUCT_CLASS,
mxOBJECT_CLASS, mxCHAR_CLASS, mxSPARSE_CLASS,
mxDOUBLE_CLASS, mclass_info)
from .streams import ZlibInputStream
class MatFile5Reader(MatFileReader):
''' Reader for Mat 5 mat files
Adds the following attribute to base class
uint16_codec - char codec to use for uint16 char arrays
(defaults to system default codec)
Uses variable reader that has the following stardard interface (see
abstract class in ``miobase``::
__init__(self, file_reader)
read_header(self)
array_from_header(self)
and added interface::
set_stream(self, stream)
read_full_tag(self)
'''
@docfiller
def __init__(self,
mat_stream,
byte_order=None,
mat_dtype=False,
squeeze_me=False,
chars_as_strings=True,
matlab_compatible=False,
struct_as_record=True,
verify_compressed_data_integrity=True,
uint16_codec=None
):
'''Initializer for matlab 5 file format reader
%(matstream_arg)s
%(load_args)s
%(struct_arg)s
uint16_codec : {None, string}
Set codec to use for uint16 char arrays (e.g. 'utf-8').
Use system default codec if None
'''
super(MatFile5Reader, self).__init__(
mat_stream,
byte_order,
mat_dtype,
squeeze_me,
chars_as_strings,
matlab_compatible,
struct_as_record,
verify_compressed_data_integrity
)
# Set uint16 codec
if not uint16_codec:
uint16_codec = sys.getdefaultencoding()
self.uint16_codec = uint16_codec
# placeholders for readers - see initialize_read method
self._file_reader = None
self._matrix_reader = None
def guess_byte_order(self):
''' Guess byte order.
Sets stream pointer to 0 '''
self.mat_stream.seek(126)
mi = self.mat_stream.read(2)
self.mat_stream.seek(0)
return mi == b'IM' and '<' or '>'
def read_file_header(self):
''' Read in mat 5 file header '''
hdict = {}
hdr_dtype = MDTYPES[self.byte_order]['dtypes']['file_header']
hdr = read_dtype(self.mat_stream, hdr_dtype)
hdict['__header__'] = hdr['description'].item().strip(b' \t\n\000')
v_major = hdr['version'] >> 8
v_minor = hdr['version'] & 0xFF
hdict['__version__'] = '%d.%d' % (v_major, v_minor)
return hdict
def initialize_read(self):
''' Run when beginning read of variables
Sets up readers from parameters in `self`
'''
# reader for top level stream. We need this extra top-level
# reader because we use the matrix_reader object to contain
# compressed matrices (so they have their own stream)
self._file_reader = VarReader5(self)
# reader for matrix streams
self._matrix_reader = VarReader5(self)
def read_var_header(self):
''' Read header, return header, next position
Header has to define at least .name and .is_global
Parameters
----------
None
Returns
-------
header : object
object that can be passed to self.read_var_array, and that
has attributes .name and .is_global
next_position : int
position in stream of next variable
'''
mdtype, byte_count = self._file_reader.read_full_tag()
if not byte_count > 0:
raise ValueError("Did not read any bytes")
next_pos = self.mat_stream.tell() + byte_count
if mdtype == miCOMPRESSED:
# Make new stream from compressed data
stream = ZlibInputStream(self.mat_stream, byte_count)
self._matrix_reader.set_stream(stream)
check_stream_limit = self.verify_compressed_data_integrity
mdtype, byte_count = self._matrix_reader.read_full_tag()
else:
check_stream_limit = False
self._matrix_reader.set_stream(self.mat_stream)
if not mdtype == miMATRIX:
raise TypeError('Expecting miMATRIX type here, got %d' % mdtype)
header = self._matrix_reader.read_header(check_stream_limit)
return header, next_pos
def read_var_array(self, header, process=True):
''' Read array, given `header`
Parameters
----------
header : header object
object with fields defining variable header
process : {True, False} bool, optional
If True, apply recursive post-processing during loading of
array.
Returns
-------
arr : array
array with post-processing applied or not according to
`process`.
'''
return self._matrix_reader.array_from_header(header, process)
def get_variables(self, variable_names=None):
''' get variables from stream as dictionary
variable_names - optional list of variable names to get
If variable_names is None, then get all variables in file
'''
if isinstance(variable_names, string_types):
variable_names = [variable_names]
elif variable_names is not None:
variable_names = list(variable_names)
self.mat_stream.seek(0)
# Here we pass all the parameters in self to the reading objects
self.initialize_read()
mdict = self.read_file_header()
mdict['__globals__'] = []
while not self.end_of_stream():
hdr, next_position = self.read_var_header()
name = asstr(hdr.name)
if name in mdict:
warnings.warn('Duplicate variable name "%s" in stream'
' - replacing previous with new\n'
'Consider mio5.varmats_from_mat to split '
'file into single variable files' % name,
MatReadWarning, stacklevel=2)
if name == '':
# can only be a matlab 7 function workspace
name = '__function_workspace__'
# We want to keep this raw because mat_dtype processing
# will break the format (uint8 as mxDOUBLE_CLASS)
process = False
else:
process = True
if variable_names is not None and name not in variable_names:
self.mat_stream.seek(next_position)
continue
try:
res = self.read_var_array(hdr, process)
except MatReadError as err:
warnings.warn(
'Unreadable variable "%s", because "%s"' %
(name, err),
Warning, stacklevel=2)
res = "Read error: %s" % err
self.mat_stream.seek(next_position)
mdict[name] = res
if hdr.is_global:
mdict['__globals__'].append(name)
if variable_names is not None:
variable_names.remove(name)
if len(variable_names) == 0:
break
return mdict
def list_variables(self):
''' list variables from stream '''
self.mat_stream.seek(0)
# Here we pass all the parameters in self to the reading objects
self.initialize_read()
self.read_file_header()
vars = []
while not self.end_of_stream():
hdr, next_position = self.read_var_header()
name = asstr(hdr.name)
if name == '':
# can only be a matlab 7 function workspace
name = '__function_workspace__'
shape = self._matrix_reader.shape_from_header(hdr)
if hdr.is_logical:
info = 'logical'
else:
info = mclass_info.get(hdr.mclass, 'unknown')
vars.append((name, shape, info))
self.mat_stream.seek(next_position)
return vars
def varmats_from_mat(file_obj):
""" Pull variables out of mat 5 file as a sequence of mat file objects
This can be useful with a difficult mat file, containing unreadable
variables. This routine pulls the variables out in raw form and puts them,
unread, back into a file stream for saving or reading. Another use is the
pathological case where there is more than one variable of the same name in
the file; this routine returns the duplicates, whereas the standard reader
will overwrite duplicates in the returned dictionary.
The file pointer in `file_obj` will be undefined. File pointers for the
returned file-like objects are set at 0.
Parameters
----------
file_obj : file-like
file object containing mat file
Returns
-------
named_mats : list
list contains tuples of (name, BytesIO) where BytesIO is a file-like
object containing mat file contents as for a single variable. The
BytesIO contains a string with the original header and a single var. If
``var_file_obj`` is an individual BytesIO instance, then save as a mat
file with something like ``open('test.mat',
'wb').write(var_file_obj.read())``
Examples
--------
>>> import scipy.io
BytesIO is from the ``io`` module in python 3, and is ``cStringIO`` for
python < 3.
>>> mat_fileobj = BytesIO()
>>> scipy.io.savemat(mat_fileobj, {'b': np.arange(10), 'a': 'a string'})
>>> varmats = varmats_from_mat(mat_fileobj)
>>> sorted([name for name, str_obj in varmats])
['a', 'b']
"""
rdr = MatFile5Reader(file_obj)
file_obj.seek(0)
# Raw read of top-level file header
hdr_len = MDTYPES[native_code]['dtypes']['file_header'].itemsize
raw_hdr = file_obj.read(hdr_len)
# Initialize variable reading
file_obj.seek(0)
rdr.initialize_read()
mdict = rdr.read_file_header()
next_position = file_obj.tell()
named_mats = []
while not rdr.end_of_stream():
start_position = next_position
hdr, next_position = rdr.read_var_header()
name = asstr(hdr.name)
# Read raw variable string
file_obj.seek(start_position)
byte_count = next_position - start_position
var_str = file_obj.read(byte_count)
# write to stringio object
out_obj = BytesIO()
out_obj.write(raw_hdr)
out_obj.write(var_str)
out_obj.seek(0)
named_mats.append((name, out_obj))
return named_mats
class EmptyStructMarker(object):
""" Class to indicate presence of empty matlab struct on output """
def to_writeable(source):
''' Convert input object ``source`` to something we can write
Parameters
----------
source : object
Returns
-------
arr : None or ndarray or EmptyStructMarker
If `source` cannot be converted to something we can write to a matfile,
return None. If `source` is equivalent to an empty dictionary, return
``EmptyStructMarker``. Otherwise return `source` converted to an
ndarray with contents for writing to matfile.
'''
if isinstance(source, np.ndarray):
return source
if source is None:
return None
# Objects that implement mappings
is_mapping = (hasattr(source, 'keys') and hasattr(source, 'values') and
hasattr(source, 'items'))
# Objects that don't implement mappings, but do have dicts
if not is_mapping and hasattr(source, '__dict__'):
source = dict((key, value) for key, value in source.__dict__.items()
if not key.startswith('_'))
is_mapping = True
if is_mapping:
dtype = []
values = []
for field, value in source.items():
if (isinstance(field, string_types) and
field[0] not in '_0123456789'):
dtype.append((field, object))
values.append(value)
if dtype:
return np.array([tuple(values)], dtype)
else:
return EmptyStructMarker
# Next try and convert to an array
narr = np.asanyarray(source)
if narr.dtype.type in (object, np.object_) and \
narr.shape == () and narr == source:
# No interesting conversion possible
return None
return narr
# Native byte ordered dtypes for convenience for writers
NDT_FILE_HDR = MDTYPES[native_code]['dtypes']['file_header']
NDT_TAG_FULL = MDTYPES[native_code]['dtypes']['tag_full']
NDT_TAG_SMALL = MDTYPES[native_code]['dtypes']['tag_smalldata']
NDT_ARRAY_FLAGS = MDTYPES[native_code]['dtypes']['array_flags']
class VarWriter5(object):
''' Generic matlab matrix writing class '''
mat_tag = np.zeros((), NDT_TAG_FULL)
mat_tag['mdtype'] = miMATRIX
def __init__(self, file_writer):
self.file_stream = file_writer.file_stream
self.unicode_strings = file_writer.unicode_strings
self.long_field_names = file_writer.long_field_names
self.oned_as = file_writer.oned_as
# These are used for top level writes, and unset after
self._var_name = None
self._var_is_global = False
def write_bytes(self, arr):
self.file_stream.write(arr.tostring(order='F'))
def write_string(self, s):
self.file_stream.write(s)
def write_element(self, arr, mdtype=None):
''' write tag and data '''
if mdtype is None:
mdtype = NP_TO_MTYPES[arr.dtype.str[1:]]
# Array needs to be in native byte order
if arr.dtype.byteorder == swapped_code:
arr = arr.byteswap().newbyteorder()
byte_count = arr.size*arr.itemsize
if byte_count <= 4:
self.write_smalldata_element(arr, mdtype, byte_count)
else:
self.write_regular_element(arr, mdtype, byte_count)
def write_smalldata_element(self, arr, mdtype, byte_count):
# write tag with embedded data
tag = np.zeros((), NDT_TAG_SMALL)
tag['byte_count_mdtype'] = (byte_count << 16) + mdtype
# if arr.tostring is < 4, the element will be zero-padded as needed.
tag['data'] = arr.tostring(order='F')
self.write_bytes(tag)
def write_regular_element(self, arr, mdtype, byte_count):
# write tag, data
tag = np.zeros((), NDT_TAG_FULL)
tag['mdtype'] = mdtype
tag['byte_count'] = byte_count
self.write_bytes(tag)
self.write_bytes(arr)
# pad to next 64-bit boundary
bc_mod_8 = byte_count % 8
if bc_mod_8:
self.file_stream.write(b'\x00' * (8-bc_mod_8))
def write_header(self,
shape,
mclass,
is_complex=False,
is_logical=False,
nzmax=0):
''' Write header for given data options
shape : sequence
array shape
mclass - mat5 matrix class
is_complex - True if matrix is complex
is_logical - True if matrix is logical
nzmax - max non zero elements for sparse arrays
We get the name and the global flag from the object, and reset
them to defaults after we've used them
'''
# get name and is_global from one-shot object store
name = self._var_name
is_global = self._var_is_global
# initialize the top-level matrix tag, store position
self._mat_tag_pos = self.file_stream.tell()
self.write_bytes(self.mat_tag)
# write array flags (complex, global, logical, class, nzmax)
af = np.zeros((), NDT_ARRAY_FLAGS)
af['data_type'] = miUINT32
af['byte_count'] = 8
flags = is_complex << 3 | is_global << 2 | is_logical << 1
af['flags_class'] = mclass | flags << 8
af['nzmax'] = nzmax
self.write_bytes(af)
# shape
self.write_element(np.array(shape, dtype='i4'))
# write name
name = np.asarray(name)
if name == '': # empty string zero-terminated
self.write_smalldata_element(name, miINT8, 0)
else:
self.write_element(name, miINT8)
# reset the one-shot store to defaults
self._var_name = ''
self._var_is_global = False
def update_matrix_tag(self, start_pos):
curr_pos = self.file_stream.tell()
self.file_stream.seek(start_pos)
byte_count = curr_pos - start_pos - 8
if byte_count >= 2**32:
raise MatWriteError("Matrix too large to save with Matlab "
"5 format")
self.mat_tag['byte_count'] = byte_count
self.write_bytes(self.mat_tag)
self.file_stream.seek(curr_pos)
def write_top(self, arr, name, is_global):
""" Write variable at top level of mat file
Parameters
----------
arr : array_like
array-like object to create writer for
name : str, optional
name as it will appear in matlab workspace
default is empty string
is_global : {False, True}, optional
whether variable will be global on load into matlab
"""
# these are set before the top-level header write, and unset at
# the end of the same write, because they do not apply for lower levels
self._var_is_global = is_global
self._var_name = name
# write the header and data
self.write(arr)
def write(self, arr):
''' Write `arr` to stream at top and sub levels
Parameters
----------
arr : array_like
array-like object to create writer for
'''
# store position, so we can update the matrix tag
mat_tag_pos = self.file_stream.tell()
# First check if these are sparse
if scipy.sparse.issparse(arr):
self.write_sparse(arr)
self.update_matrix_tag(mat_tag_pos)
return
# Try to convert things that aren't arrays
narr = to_writeable(arr)
if narr is None:
raise TypeError('Could not convert %s (type %s) to array'
% (arr, type(arr)))
if isinstance(narr, MatlabObject):
self.write_object(narr)
elif isinstance(narr, MatlabFunction):
raise MatWriteError('Cannot write matlab functions')
elif narr is EmptyStructMarker: # empty struct array
self.write_empty_struct()
elif narr.dtype.fields: # struct array
self.write_struct(narr)
elif narr.dtype.hasobject: # cell array
self.write_cells(narr)
elif narr.dtype.kind in ('U', 'S'):
if self.unicode_strings:
codec = 'UTF8'
else:
codec = 'ascii'
self.write_char(narr, codec)
else:
self.write_numeric(narr)
self.update_matrix_tag(mat_tag_pos)
def write_numeric(self, arr):
imagf = arr.dtype.kind == 'c'
logif = arr.dtype.kind == 'b'
try:
mclass = NP_TO_MXTYPES[arr.dtype.str[1:]]
except KeyError:
# No matching matlab type, probably complex256 / float128 / float96
# Cast data to complex128 / float64.
if imagf:
arr = arr.astype('c128')
elif logif:
arr = arr.astype('i1') # Should only contain 0/1
else:
arr = arr.astype('f8')
mclass = mxDOUBLE_CLASS
self.write_header(matdims(arr, self.oned_as),
mclass,
is_complex=imagf,
is_logical=logif)
if imagf:
self.write_element(arr.real)
self.write_element(arr.imag)
else:
self.write_element(arr)
def write_char(self, arr, codec='ascii'):
''' Write string array `arr` with given `codec`
'''
if arr.size == 0 or np.all(arr == ''):
# This an empty string array or a string array containing
# only empty strings. Matlab cannot distiguish between a
# string array that is empty, and a string array containing
# only empty strings, because it stores strings as arrays of
# char. There is no way of having an array of char that is
# not empty, but contains an empty string. We have to
# special-case the array-with-empty-strings because even
# empty strings have zero padding, which would otherwise
# appear in matlab as a string with a space.
shape = (0,) * np.max([arr.ndim, 2])
self.write_header(shape, mxCHAR_CLASS)
self.write_smalldata_element(arr, miUTF8, 0)
return
# non-empty string.
#
# Convert to char array
arr = arr_to_chars(arr)
# We have to write the shape directly, because we are going
# recode the characters, and the resulting stream of chars
# may have a different length
shape = arr.shape
self.write_header(shape, mxCHAR_CLASS)
if arr.dtype.kind == 'U' and arr.size:
# Make one long string from all the characters. We need to
# transpose here, because we're flattening the array, before
# we write the bytes. The bytes have to be written in
# Fortran order.
n_chars = np.product(shape)
st_arr = np.ndarray(shape=(),
dtype=arr_dtype_number(arr, n_chars),
buffer=arr.T.copy()) # Fortran order
# Recode with codec to give byte string
st = st_arr.item().encode(codec)
# Reconstruct as one-dimensional byte array
arr = np.ndarray(shape=(len(st),),
dtype='S1',
buffer=st)
self.write_element(arr, mdtype=miUTF8)
def write_sparse(self, arr):
''' Sparse matrices are 2D
'''
A = arr.tocsc() # convert to sparse CSC format
A.sort_indices() # MATLAB expects sorted row indices
is_complex = (A.dtype.kind == 'c')
is_logical = (A.dtype.kind == 'b')
nz = A.nnz
self.write_header(matdims(arr, self.oned_as),
mxSPARSE_CLASS,
is_complex=is_complex,
is_logical=is_logical,
# matlab won't load file with 0 nzmax
nzmax=1 if nz == 0 else nz)
self.write_element(A.indices.astype('i4'))
self.write_element(A.indptr.astype('i4'))
self.write_element(A.data.real)
if is_complex:
self.write_element(A.data.imag)
def write_cells(self, arr):
self.write_header(matdims(arr, self.oned_as),
mxCELL_CLASS)
# loop over data, column major
A = np.atleast_2d(arr).flatten('F')
for el in A:
self.write(el)
def write_empty_struct(self):
self.write_header((1, 1), mxSTRUCT_CLASS)
# max field name length set to 1 in an example matlab struct
self.write_element(np.array(1, dtype=np.int32))
# Field names element is empty
self.write_element(np.array([], dtype=np.int8))
def write_struct(self, arr):
self.write_header(matdims(arr, self.oned_as),
mxSTRUCT_CLASS)
self._write_items(arr)
def _write_items(self, arr):
# write fieldnames
fieldnames = [f[0] for f in arr.dtype.descr]
length = max([len(fieldname) for fieldname in fieldnames])+1
max_length = (self.long_field_names and 64) or 32
if length > max_length:
raise ValueError("Field names are restricted to %d characters" %
(max_length-1))
self.write_element(np.array([length], dtype='i4'))
self.write_element(
np.array(fieldnames, dtype='S%d' % (length)),
mdtype=miINT8)
A = np.atleast_2d(arr).flatten('F')
for el in A:
for f in fieldnames:
self.write(el[f])
def write_object(self, arr):
'''Same as writing structs, except different mx class, and extra
classname element after header
'''
self.write_header(matdims(arr, self.oned_as),
mxOBJECT_CLASS)
self.write_element(np.array(arr.classname, dtype='S'),
mdtype=miINT8)
self._write_items(arr)
class MatFile5Writer(object):
''' Class for writing mat5 files '''
@docfiller
def __init__(self, file_stream,
do_compression=False,
unicode_strings=False,
global_vars=None,
long_field_names=False,
oned_as='row'):
''' Initialize writer for matlab 5 format files
Parameters
----------
%(do_compression)s
%(unicode_strings)s
global_vars : None or sequence of strings, optional
Names of variables to be marked as global for matlab
%(long_fields)s
%(oned_as)s
'''
self.file_stream = file_stream
self.do_compression = do_compression
self.unicode_strings = unicode_strings
if global_vars:
self.global_vars = global_vars
else:
self.global_vars = []
self.long_field_names = long_field_names
self.oned_as = oned_as
self._matrix_writer = None
def write_file_header(self):
# write header
hdr = np.zeros((), NDT_FILE_HDR)
hdr['description'] = 'MATLAB 5.0 MAT-file Platform: %s, Created on: %s' \
% (os.name,time.asctime())
hdr['version'] = 0x0100
hdr['endian_test'] = np.ndarray(shape=(),
dtype='S2',
buffer=np.uint16(0x4d49))
self.file_stream.write(hdr.tostring())
def put_variables(self, mdict, write_header=None):
''' Write variables in `mdict` to stream
Parameters
----------
mdict : mapping
mapping with method ``items`` returns name, contents pairs where
``name`` which will appear in the matlab workspace in file load, and
``contents`` is something writeable to a matlab file, such as a numpy
array.
write_header : {None, True, False}, optional
If True, then write the matlab file header before writing the
variables. If None (the default) then write the file header
if we are at position 0 in the stream. By setting False
here, and setting the stream position to the end of the file,
you can append variables to a matlab file
'''
# write header if requested, or None and start of file
if write_header is None:
write_header = self.file_stream.tell() == 0
if write_header:
self.write_file_header()
self._matrix_writer = VarWriter5(self)
for name, var in mdict.items():
if name[0] == '_':
continue
is_global = name in self.global_vars
if self.do_compression:
stream = BytesIO()
self._matrix_writer.file_stream = stream
self._matrix_writer.write_top(var, asbytes(name), is_global)
out_str = zlib.compress(stream.getvalue())
tag = np.empty((), NDT_TAG_FULL)
tag['mdtype'] = miCOMPRESSED
tag['byte_count'] = len(out_str)
self.file_stream.write(tag.tostring())
self.file_stream.write(out_str)
else: # not compressing
self._matrix_writer.write_top(var, asbytes(name), is_global)
| |
""" A class for building histograms incrementally. """
import numpy as np
from collections import defaultdict
class RHist():
"""
A class for calculating histograms where the bin size
and location is set by rounding the input (i.e. use <decimals>) but
where the number and range of bins is determined by the data.
As a result, you need only know in advance the approximate scale
your data will take, i.e. the precision you're interested in.
There are a few methods that return useful statistics.
<name> is a unique identifier for this histogram.
<decimals> is an integer specifying the number of decimal places.
Negative numbers behave as expected.
"""
def __init__(self,name,decimals=1):
self.decimals = decimals
self.name = name
self.h = defaultdict(int)
self.h_norm = None
def add(self,x):
""" Add <x>, a data point, to the histogram """
# Do type checking here?
self.h[np.round(x,self.decimals)] += 1
def norm(self):
"""
Calculate the normalized histogram (i.e. a probability
mass function).
"""
from copy import deepcopy
# Borrowed from the implementation discussed in
# Think Stats Probability and Statistics for Programmers
# By Allen B. Downey, p 16.
# http://shop.oreilly.com/product/0636920020745.do
self.h_norm = deepcopy(self.h)
weight = 1./self.n()
for k in self.h_norm.keys():
self.h_norm[k] *= weight
def mean(self):
""" Estimate and return the mean. """
# Borrowed from the implementation discussed in
# Think Stats Probability and Statistics for Programmers
# By Allen B. Downey, p 16.
# http://shop.oreilly.com/product/0636920020745.do
if self.h_norm is None:
self.norm()
mean = 0.0
for x, p in self.h_norm.items():
# mean = sum_i(p_i*x_i)
mean += p * x
return mean
def median(self):
""" Estimate and return the median. """
# Get all the key values
# and sort them.
values = self.h.keys()
values = sorted(values)
# And count them too
nvalues = len(values)
# If even:
if (nvalues % 2) == 0:
median = (values[nvalues / 2] + values[(nvalues / 2) + 1]) / 2.0
# Or odd
else:
median = values[(nvalues + 1) / 2]
return median
def var(self):
""" Estimate and return the variance. """
# Borrowed from the implementation discussed in
# Think Stats Probability and Statistics for Programmers
# By Allen B. Downey, p 16.
# http://shop.oreilly.com/product/0636920020745.do
if self.h_norm is None:
self.norm()
# var = sum_i(p_i * (x_i - mean)*2)
mean = self.mean()
var = 0.0
for x, p in self.h_norm.items():
var += p * (x - mean) ** 2
return var
def n(self):
""" Count and return the total number of samples. """
return np.sum(self.h.values())
def stdev(self):
""" Estimate and return the variance. """
var = self.var()
n = self.n()
return np.sqrt(var/(n-1))
def se(self):
""" Estimate and return the standard error. """
sd = self.stdev()
n = self.n()
return sd / np.sqrt(n)
def above(self, criterion):
""" Estimate and return the percent area of the histogram at
or above the <criterion>. """
# If the bin is at or above, add to the list of values
# the sum and norm the values.
values = [value for key, value in self.h.items() if key >= criterion]
return np.sum(values) / float(self.n())
def overlap(self, Rhist):
""" Calculates the percent overlap between this histogram and
<Rhist>, another histogram instance.
Note: percent overlap is calculated by finding the difference
in absolute counts for all overlapping bins, summing these,
then normalizing by the total counts for both distributions
(all bins). """
n1 = self.n() ## Get total counts
n2 = Rhist.n()
# Tabulate the diffs for each
# overlapping bin.
diffs = []
for key, val1 in self.h.items():
try:
val2 = Rhist.h[key]
except KeyError:
pass
else:
val1 = float(val1)
val2 = float(val2)
diffs.append(max(val1, val2) - np.abs(val1 - val2))
# Sum, then normalize by total count.
return np.sum(diffs) / (n1 + n2)
def fitPDF(self, family):
""" Fit a probability density function (of type <family>) """
# TODO...
raise NotImplementedError()
def plot(self,fig=None,color='black',norm=False):
"""
Plot the histogram.
If provided current data is added to <fig>, a matplotlib plot
identifier.
<norm> indicates whether the raw counts or normalized values
should be plotted.
"""
import matplotlib.pyplot as plt
plt.ion()
## Interactive plots -- go.
xs = []; ys = []
if norm is True:
if self.h_norm is None:
self.norm()
xs,ys = zip(*sorted(self.h_norm.items()))
else:
xs,ys = zip(*sorted(self.h.items()))
ax = None
if fig is None:
fig = plt.figure()
ax = fig.add_subplot(111)
else:
ax = fig.axes[0]
# Find the min width for bars. And plot!
width = min(
[xs[ii+1] - xs[ii] for ii in range(len(xs)-1)])
ax.bar(xs,ys,
width=width,
alpha=0.4,
color=color,edgecolor=color,
align='center',
label=self.name)
plt.show()
return fig
| |
# -*- coding: utf8 -*-
"""
Tests for pika.adapters.blocking_connection.BlockingConnection
"""
# Disable pylint warnings concerning access to protected member
# pylint: disable=W0212
# Disable pylint messages concerning missing docstring
# pylint: disable=C0111
# Disable pylint messages concerning invalid method names
# pylint: disable=C0103
# Disable pylint messages concerning "method could be a function"
# pylint: disable=R0201
import socket
try:
from unittest import mock # pylint: disable=E0611
from unittest.mock import patch # pylint: disable=E0611
except ImportError:
import mock
from mock import patch
try:
import unittest2 as unittest
except ImportError:
import unittest
import pika
from pika.adapters import blocking_connection
import pika.channel
from pika.exceptions import AMQPConnectionError, ChannelClosed
class BlockingConnectionMockTemplate(blocking_connection.BlockingConnection):
pass
class SelectConnectionTemplate(blocking_connection.SelectConnection):
is_closed = None
is_closing = None
is_open = None
outbound_buffer = None
_channels = None
ioloop = None
class BlockingConnectionTests(unittest.TestCase):
"""TODO: test properties"""
@patch.object(blocking_connection, 'SelectConnection',
spec_set=SelectConnectionTemplate)
def test_constructor(self, select_connection_class_mock):
with mock.patch.object(blocking_connection.BlockingConnection,
'_process_io_for_connection_setup'):
connection = blocking_connection.BlockingConnection('params')
select_connection_class_mock.assert_called_once_with(
parameters='params',
on_open_callback=mock.ANY,
on_open_error_callback=mock.ANY,
on_close_callback=mock.ANY,
stop_ioloop_on_close=mock.ANY)
self.assertEqual(connection._impl.ioloop.activate_poller.call_count, 1)
@patch.object(blocking_connection, 'SelectConnection',
spec_set=SelectConnectionTemplate)
def test_process_io_for_connection_setup(self, select_connection_class_mock):
with mock.patch.object(blocking_connection.BlockingConnection,
'_process_io_for_connection_setup'):
connection = blocking_connection.BlockingConnection('params')
connection._opened_result.set_value_once(
select_connection_class_mock.return_value)
with mock.patch.object(
blocking_connection.BlockingConnection,
'_flush_output',
spec_set=blocking_connection.BlockingConnection._flush_output):
connection._process_io_for_connection_setup()
@patch.object(blocking_connection, 'SelectConnection',
spec_set=SelectConnectionTemplate)
def test_process_io_for_connection_setup_fails_with_open_error(
self, select_connection_class_mock):
with mock.patch.object(blocking_connection.BlockingConnection,
'_process_io_for_connection_setup'):
connection = blocking_connection.BlockingConnection('params')
exc_value = pika.exceptions.AMQPConnectionError('failed')
connection._open_error_result.set_value_once(
select_connection_class_mock.return_value, exc_value)
with mock.patch.object(
blocking_connection.BlockingConnection,
'_flush_output',
spec_set=blocking_connection.BlockingConnection._flush_output):
with self.assertRaises(pika.exceptions.AMQPConnectionError) as cm:
connection._process_io_for_connection_setup()
self.assertEqual(cm.exception, exc_value)
@patch.object(blocking_connection, 'SelectConnection',
spec_set=SelectConnectionTemplate,
is_closed=False, outbound_buffer=[])
def test_flush_output(self, select_connection_class_mock):
with mock.patch.object(blocking_connection.BlockingConnection,
'_process_io_for_connection_setup'):
connection = blocking_connection.BlockingConnection('params')
connection._opened_result.set_value_once(
select_connection_class_mock.return_value)
connection._flush_output(lambda: False, lambda: True)
@patch.object(blocking_connection, 'SelectConnection',
spec_set=SelectConnectionTemplate,
is_closed=False, outbound_buffer=[])
def test_flush_output_user_initiated_close(self,
select_connection_class_mock):
with mock.patch.object(blocking_connection.BlockingConnection,
'_process_io_for_connection_setup'):
connection = blocking_connection.BlockingConnection('params')
connection._user_initiated_close = True
connection._closed_result.set_value_once(
select_connection_class_mock.return_value,
200, 'success')
connection._flush_output(lambda: False, lambda: True)
self.assertEqual(connection._impl.ioloop.activate_poller.call_count,
1)
self.assertEqual(connection._impl.ioloop.deactivate_poller.call_count,
1)
@patch.object(blocking_connection, 'SelectConnection',
spec_set=SelectConnectionTemplate,
is_closed=False, outbound_buffer=[])
def test_flush_output_server_initiated_error_close(
self,
select_connection_class_mock):
with mock.patch.object(blocking_connection.BlockingConnection,
'_process_io_for_connection_setup'):
connection = blocking_connection.BlockingConnection('params')
connection._user_initiated_close = False
connection._closed_result.set_value_once(
select_connection_class_mock.return_value, 404, 'not found')
with self.assertRaises(pika.exceptions.ConnectionClosed) as cm:
connection._flush_output(lambda: False, lambda: True)
self.assertSequenceEqual(cm.exception.args, (404, 'not found'))
self.assertEqual(connection._impl.ioloop.activate_poller.call_count,
1)
self.assertEqual(connection._impl.ioloop.deactivate_poller.call_count,
1)
@patch.object(blocking_connection, 'SelectConnection',
spec_set=SelectConnectionTemplate,
is_closed=False, outbound_buffer=[])
def test_flush_output_server_initiated_no_error_close(
self,
select_connection_class_mock):
with mock.patch.object(blocking_connection.BlockingConnection,
'_process_io_for_connection_setup'):
connection = blocking_connection.BlockingConnection('params')
connection._user_initiated_close = False
connection._closed_result.set_value_once(
select_connection_class_mock.return_value,
200, 'ok')
with self.assertRaises(pika.exceptions.ConnectionClosed) as cm:
connection._flush_output(lambda: False, lambda: True)
self.assertSequenceEqual(cm.exception.args, (200, 'ok'))
self.assertEqual(connection._impl.ioloop.activate_poller.call_count,
1)
self.assertEqual(connection._impl.ioloop.deactivate_poller.call_count,
1)
@patch.object(blocking_connection, 'SelectConnection',
spec_set=SelectConnectionTemplate)
def test_close(self, select_connection_class_mock):
select_connection_class_mock.return_value.is_closed = False
with mock.patch.object(blocking_connection.BlockingConnection,
'_process_io_for_connection_setup'):
connection = blocking_connection.BlockingConnection('params')
impl_channel_mock = mock.Mock()
connection._impl._channels = {1: impl_channel_mock}
with mock.patch.object(
blocking_connection.BlockingConnection,
'_flush_output',
spec_set=blocking_connection.BlockingConnection._flush_output):
connection._closed_result.signal_once()
connection.close(200, 'text')
impl_channel_mock._get_cookie.return_value.close.assert_called_once_with(
200, 'text')
select_connection_class_mock.return_value.close.assert_called_once_with(
200, 'text')
@patch.object(blocking_connection, 'SelectConnection',
spec_set=SelectConnectionTemplate)
def test_close_with_channel_closed_exception(self,
select_connection_class_mock):
select_connection_class_mock.return_value.is_closed = False
with mock.patch.object(blocking_connection.BlockingConnection,
'_process_io_for_connection_setup'):
connection = blocking_connection.BlockingConnection('params')
channel1_mock = mock.Mock(
is_open=True,
close=mock.Mock(side_effect=ChannelClosed,
spec_set=pika.channel.Channel.close),
spec_set=blocking_connection.BlockingChannel)
channel2_mock = mock.Mock(
is_open=True,
spec_set=blocking_connection.BlockingChannel)
connection._impl._channels = {
1: mock.Mock(
_get_cookie=mock.Mock(
return_value=channel1_mock,
spec_set=pika.channel.Channel._get_cookie),
spec_set=pika.channel.Channel),
2: mock.Mock(
_get_cookie=mock.Mock(
return_value=channel2_mock,
spec_set=pika.channel.Channel._get_cookie),
spec_set=pika.channel.Channel)
}
with mock.patch.object(
blocking_connection.BlockingConnection,
'_flush_output',
spec_set=blocking_connection.BlockingConnection._flush_output):
connection._closed_result.signal_once()
connection.close(200, 'text')
channel1_mock.close.assert_called_once_with(200, 'text')
channel2_mock.close.assert_called_once_with(200, 'text')
select_connection_class_mock.return_value.close.assert_called_once_with(
200, 'text')
@patch.object(blocking_connection, 'SelectConnection',
spec_set=SelectConnectionTemplate)
@patch.object(blocking_connection, 'BlockingChannel',
spec_set=blocking_connection.BlockingChannel)
def test_channel(self, blocking_channel_class_mock, # pylint: disable=W0613
select_connection_class_mock): # pylint: disable=W0613
with mock.patch.object(blocking_connection.BlockingConnection,
'_process_io_for_connection_setup'):
connection = blocking_connection.BlockingConnection('params')
with mock.patch.object(
blocking_connection.BlockingConnection,
'_flush_output',
spec_set=blocking_connection.BlockingConnection._flush_output):
connection.channel()
@patch.object(blocking_connection, 'SelectConnection',
spec_set=SelectConnectionTemplate)
def test_sleep(self, select_connection_class_mock): # pylint: disable=W0613
with mock.patch.object(blocking_connection.BlockingConnection,
'_process_io_for_connection_setup'):
connection = blocking_connection.BlockingConnection('params')
with mock.patch.object(
blocking_connection.BlockingConnection,
'_flush_output',
spec_set=blocking_connection.BlockingConnection._flush_output):
connection.sleep(0.00001)
def test_connection_attempts_with_timeout(self):
# for whatever conn_attempt we try:
for conn_attempt in (1, 2, 5):
mock_sock_obj = mock.Mock(
spec_set=socket.socket,
connect=mock.Mock(side_effect=socket.timeout))
# NOTE Use short retry_delay to not wait uselessly during the retry
# process, but not too low to avoid timer_id collision on systems
# with poor timer resolution (e.g., Windows)
params = pika.ConnectionParameters(connection_attempts=conn_attempt,
retry_delay=0.01)
with self.assertRaises(AMQPConnectionError) as ctx:
with mock.patch(
'pika.SelectConnection._create_tcp_connection_socket',
return_value=mock_sock_obj) as create_sock_mock:
with mock.patch('pika.SelectConnection._getaddrinfo',
return_value=[(socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP,
'',
('127.0.0.1', 5672))]):
pika.BlockingConnection(parameters=params)
# as any attempt will timeout (directly),
# at the end there must be exactly that count of socket.connect()
# method calls:
self.assertEqual(conn_attempt,
create_sock_mock.return_value.connect.call_count)
# and each must be with the following arguments (always the same):
create_sock_mock.return_value.connect.assert_has_calls(
conn_attempt *
[mock.call(('127.0.0.1', 5672))])
# and the raised error must then looks like:
self.assertEqual('Connection to 127.0.0.1:5672 failed: timeout',
str(ctx.exception))
| |
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
import json
from rest_framework.response import Response
from rest_framework import decorators, status
from rest_framework.permissions import (AllowAny,
IsAuthenticated,
IsAuthenticatedOrReadOnly)
from django.shortcuts import get_object_or_404
from django.utils.timezone import now
from django.conf import settings
from django.db import connection
from django.contrib.gis.geos import GEOSGeometry
from celery import chain
from urlparse import urljoin
from apps.core.models import Job
from apps.core.tasks import save_job_error, save_job_result
from apps.modeling import tasks
from apps.modeling.models import Project, Scenario
from apps.modeling.serializers import (ProjectSerializer,
ProjectUpdateSerializer,
ScenarioSerializer)
@decorators.api_view(['GET', 'POST'])
@decorators.permission_classes((IsAuthenticated, ))
def projects(request):
"""Get a list of all projects with embedded scenarios available for
the logged in user. POST to create a new project associated with the
logged in user."""
if request.method == 'GET':
projects = Project.objects.filter(user=request.user)
serializer = ProjectSerializer(projects, many=True)
return Response(serializer.data)
elif request.method == 'POST':
serializer = ProjectUpdateSerializer(data=request.data,
context={"request": request})
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@decorators.api_view(['DELETE', 'GET', 'PUT'])
@decorators.permission_classes((IsAuthenticatedOrReadOnly, ))
def project(request, proj_id):
"""Retrieve, update or delete a project"""
project = get_object_or_404(Project, id=proj_id)
if request.method == 'GET':
if project.user.id != request.user.id and project.is_private:
return Response(status=status.HTTP_404_NOT_FOUND)
serializer = ProjectSerializer(project)
return Response(serializer.data)
elif project.user.id == request.user.id:
if request.method == 'PUT':
ctx = {'request': request}
serializer = ProjectUpdateSerializer(project, data=request.data,
context=ctx)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors,
status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
project.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
else:
return Response(status=status.HTTP_404_NOT_FOUND)
@decorators.api_view(['POST'])
@decorators.permission_classes((IsAuthenticated, ))
def scenarios(request):
"""Create a scenario for projects which authenticated user has access to"""
if request.method == 'POST':
serializer = ScenarioSerializer(data=request.data,
context={"request": request})
project_id = serializer.initial_data.get('project')
get_object_or_404(Project, id=project_id, user=request.user)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors,
status=status.HTTP_400_BAD_REQUEST)
@decorators.api_view(['DELETE', 'GET', 'PUT'])
@decorators.permission_classes((IsAuthenticatedOrReadOnly, ))
def scenario(request, scen_id):
"""Retrieve, update or delete a scenario"""
scenario = get_object_or_404(Scenario, id=scen_id)
if request.method == 'GET':
if (scenario.project.user.id != request.user.id and
scenario.project.is_private):
return Response(status=status.HTTP_404_NOT_FOUND)
serializer = ScenarioSerializer(scenario)
return Response(serializer.data)
elif scenario.project.user.id == request.user.id:
if request.method == 'PUT':
ctx = {'request': request}
serializer = ScenarioSerializer(scenario, data=request.data,
context=ctx)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors,
status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
scenario.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
else:
return Response(status=status.HTTP_404_NOT_FOUND)
@decorators.api_view(['POST'])
@decorators.permission_classes((AllowAny, ))
def start_analyze(request, format=None):
user = request.user if request.user.is_authenticated() else None
created = now()
area_of_interest = request.POST
job = Job.objects.create(created_at=created, result='', error='',
traceback='', user=user, status='started')
task_list = _initiate_analyze_job_chain(area_of_interest, job.id)
job.uuid = task_list.id
job.save()
return Response(
{
'job': task_list.id,
'status': 'started',
}
)
@decorators.api_view(['GET'])
@decorators.permission_classes((AllowAny, ))
def get_job(request, job_uuid, format=None):
# Get the user so that logged in users can only see jobs that they
# started.
# TODO consider if we should have some sort of session id check to ensure
# you can only view your own jobs.
user = request.user if request.user.is_authenticated() else None
job = get_object_or_404(Job, uuid=job_uuid, user=user)
# TODO Should we return the error? Might leak info about the internal
# workings that we don't want exposed.
return Response(
{
'job_uuid': job.uuid,
'status': job.status,
'result': job.result,
'error': job.error,
'started': job.created_at,
'finished': job.delivered_at,
}
)
def _initiate_analyze_job_chain(area_of_interest, job_id):
return chain(tasks.run_analyze.s(area_of_interest),
save_job_result.s(job_id, area_of_interest)) \
.apply_async(link_error=save_job_error.s(job_id))
@decorators.api_view(['POST'])
@decorators.permission_classes((AllowAny, ))
def start_tr55(request, format=None):
user = request.user if request.user.is_authenticated() else None
created = now()
model_input = json.loads(request.POST['model_input'])
if 'modifications' in model_input:
precips = filter(lambda mod: mod['name'] == 'precipitation',
model_input['modifications'])
if len(precips) == 1 and ('value' in precips[0]):
model_input['precip'] = precips[0]['value']
job = Job.objects.create(created_at=created, result='', error='',
traceback='', user=user, status='started')
task_list = _initiate_tr55_job_chain(model_input, job.id)
job.uuid = task_list.id
job.save()
return Response({
'job': task_list.id,
'status': 'started',
})
return Response({'error': 'Missing single precipitation modification.'},
status=status.HTTP_400_BAD_REQUEST)
def _initiate_tr55_job_chain(model_input, job_id):
return chain(tasks.make_gt_service_call_task.s(model_input),
tasks.run_tr55.s(model_input),
save_job_result.s(job_id, model_input)) \
.apply_async(link_error=save_job_error.s(job_id))
@decorators.api_view(['GET'])
@decorators.permission_classes((AllowAny, ))
def boundary_layers(request, table_id=None, obj_id=None):
if not table_id and not obj_id:
tiler_prefix = '//'
tiler_host = settings.TILER_HOST
tiler_postfix = '/{z}/{x}/{y}'
tiler_base = '%s%s' % (tiler_prefix, tiler_host)
def augment(index, dictionary):
retval = {}
retval['display'] = dictionary['display']
retval['tableId'] = index
retval['endpoint'] = urljoin(tiler_base, index + tiler_postfix)
return retval
layers = [augment(i, d)
for i, d in settings.BOUNDARY_LAYERS.items()]
return Response(layers)
elif table_id in settings.BOUNDARY_LAYERS and obj_id:
# obj_id = str(int(obj_id))
table_name = settings.BOUNDARY_LAYERS[table_id]['table_name']
cursor = connection.cursor()
query = 'SELECT geom FROM ' + table_name + ' WHERE id = %s'
cursor.execute(query, [int(obj_id)])
row = cursor.fetchone()
if row:
geojson = json.loads(GEOSGeometry(row[0]).geojson)
return Response(geojson)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
| |
#!/usr/bin/env python
import sys
import os
import re
import numpy
from PIL import Image
from StringIO import StringIO
# you need to install this library yourself
# recent versions handle bigtiff too...
import tifffile
"""
Extract pyramidal TIFF files with JPEG tiled storage into a tree of
separate JPEG files into DZI compliant directory that is usable
by openseadragon. One per channel.
usage: extract2dzc_rgb.py pyramid-file-dir dest-dir
The pyramid-file must be a multi-page TIFF with each page having an
image scaled by 1/2 from the previous page. All pages must be tiled
with the same tile size, and tiles must be stored using the new-style
JPEG compression format, i.e. TIFF compression == 7.
The lowest resolution page must have 4 or fewer tiles. If it has
more than 1, this script will leave space for the user to decide whether
final lowest zoom tile 0/0_0.jpg that is 1/2 scaled version of the image
represented by that last page should be generated or not.
File directory generated
dest-dir(DZC)
color_type_1
ImageProperties.xml
0
0_0.jpg
1
0_0.jpg
1_0.jpg
...
color_type_2
ImageProperties.xml
0
0_0.jpg
1
0_0.jpg
1_0.jpg
...
Since the tiled tiff kept padded tiles and openseadragon expected its
jpeg files to be cropped but not padded, the border tiles are cropped
and the width and height of image uses the actual image dimension
"""
try:
srcloc = sys.argv[1]
outloc = sys.argv[2]
if not os.path.exists(srcloc) or not os.path.isdir(srcloc):
sys.stderr.write('Pyramid directory must be given and exist')
sys.stderr.write('\nusage: extract2dzi_rgb.py pyramid-file-directory dest-dir\n\n')
sys.exit(1)
if not os.path.exists(outloc):
os.makedirs(outloc)
except:
sys.stderr.write('\nusage: extract2dzi_rgb.py pyramid-file-directory dest-dir\n\n')
raise
## 20140403-R26-Tdt-JJG-0-38-000-DAPI-Z3.tif
## 20140403-R26-Tdt-JJG-0-38-000-FITC-Z3.tif
## 20140403-R26-Tdt-JJG-0-38-000-Rhodamine-Z3.tif
## iterate through the files,
## if valid tiff file, then change the outdir to
## outdir/DAPI/.xml,0,1..
## essentialy like calling extract2dzi.py filename outdir/color
infile=None
txsize=0
tysize=0
pxsize=0
pysize=0
zoomno=0
total_tiles=0
outdirloc=0
topdir_template = '%(outdir)s'
dir_template = topdir_template +'/%(zoomno)d'
tile_template = dir_template + '/%(tcolno)d_%(trowno)d.jpg'
image_template = '%(outdir)s/ImageProperties.xml'
################# helper functions ###################
# http://www.w3.org/Graphics/JPEG/jfif3.pdf
def jpeg_assemble(jpeg_tables_bytes, jpeg_bytes):
return jpeg_bytes[0:2] + jpeg_tables_bytes + jpeg_bytes[2:]
def load_tile(tile_offset, tile_length):
infile.seek(tile_offset)
return infile.read(tile_length)
def dump_tile(tileno, trow, trows, tcol, tcols, jpeg_tables_bytes, tile_offset, tile_length):
"""Output one tile. Note this manages global state for tile grouping in subdirs."""
global zoomno
global total_tiles
cropIt = False
if (trow+1 == trows) or (tcol+1 == tcols) :
#this is a border tile, crop it if need to
if tcol+1 == tcols :
cpxsize= (pxsize-(txsize * tcol))
else:
cpxsize=txsize
if trow+1 == trows :
cpysize= (pysize-(tysize * trow))
else:
cpysize=tysize
cropIt = True
total_tiles += 1
topdir = topdir_template % dict(
outdir = outdirloc
)
if not os.path.exists(topdir):
os.makedirs(topdir, mode=0755)
dirname = dir_template % dict(
outdir = outdirloc,
zoomno = zoomno
)
if not os.path.exists(dirname):
# create tile group dir on demand
os.makedirs(dirname, mode=0755)
outname = tile_template % dict(
outdir = outdirloc,
zoomno = zoomno,
tcolno = tcol,
trowno = trow
)
data= jpeg_assemble(jpeg_tables_bytes, load_tile(tile_offset, tile_length));
image = Image.open(StringIO(data))
if cropIt :
image = image.crop((0,0, cpxsize, cpysize))
image.save(outname, 'JPEG')
return outname
def get_page_info(page):
pxsize = page.tags.image_width.value
pysize = page.tags.image_length.value
# get common JPEG tables to insert into all tiles
# ffd8 ffdb .... ffd9
if hasattr(page.tags, 'jpeg_tables'):
# trim off start-image/end-image byte markers at prefix and suffix
jpeg_tables_bytes = bytes(bytearray(page.tags.jpeg_tables.value))[2:-2]
else:
# no common tables to insert?
jpeg_tables_bytes = bytes(bytearray([]))
# this page has multiple JPEG tiles
txsize = page.tags.tile_width.value
tysize = page.tags.tile_length.value
tcols = pxsize / txsize + (pxsize % txsize > 0)
trows = pysize / tysize + (pysize % tysize > 0)
return pxsize, pysize, txsize, tysize, tcols, trows, jpeg_tables_bytes
def processOne(fname, outdirloc, color) :
global infile
global txsize
global tysize
global pxsize
global pysize
global zoomno
global total_tiles
infile=open(fname, 'rb')
t=fname.rsplit('/',1);
tiff = tifffile.TiffFile(fname)
pages = list(tiff)
outinfo = []
# we need to go from lowest to highest zoom level
pages.reverse()
# skip pages that aren't tiled... thumbnails?!
outpages = [ page for page in pages if hasattr(page.tags, 'tile_offsets') ]
if type(outpages[0].tags.tile_offsets.value) is int:
outpages[0].tags.tile_offsets.value=[outpages[0].tags.tile_offsets.value]
outpages[0].tags.tile_byte_counts.value=[outpages[0].tags.tile_byte_counts.value]
zoomno = 0
lowest_level = 0
total_tiles = 0
# remember values for debugging sanity checks
prev_page = None
tile_width = None
tile_length = None
reduce_ratio = 2 #default
for page in outpages:
# panic if these change from reverse-engineered samples
assert page.tags.fill_order.value == 1
assert page.tags.orientation.value == 1
assert page.tags.compression.value == 7 # new-style JPEG
if prev_page is not None:
reduce_ratio = (page.tags.image_width.value / prev_page.tags.image_width.value)
pxsize, pysize, txsize, tysize, tcols, trows, jpeg_tables_bytes = get_page_info(page)
for tileno in range(0, len(page.tags.tile_offsets.value)):
# figure position of tile within tile array
trow = tileno / tcols
tcol = tileno % tcols
assert trow >= 0 and trow < trows
assert tcol >= 0 and tcol < tcols
outname = dump_tile(
tileno,
trow, trows,
tcol, tcols,
jpeg_tables_bytes,
page.tags.tile_offsets.value[tileno],
page.tags.tile_byte_counts.value[tileno])
if tile_width is not None:
assert tile_width == txsize
assert tile_height == tysize
else:
tile_width = txsize
tile_height = tysize
outinfo.append(
dict(
tile_width= txsize,
tile_length= tysize,
image_width_orig= pxsize,
image_length_orig= pysize,
image_width_padded= tcols * txsize,
image_length_padded= trows * tysize,
image_level = zoomno,
total_tile_count= total_tiles,
color_type=color,
level_scale = reduce_ratio
)
)
# each page is next higher zoom level
zoomno += 1
prev_page = page
infile.close()
imageinfo=outinfo[-1]
imageinfo['image_lowest_level']=lowest_level
imageinfo['data_location']=outdirloc;
image_descriptor = """\
<?xml version="1.0" encoding="UTF-8"?>
<IMAGE_PROPERTIES
width="%(image_width_orig)d"
height="%(image_length_orig)d"
numTiles="%(total_tile_count)d"
numImages="1"
version="2.0"
meterScaleInPixels="402738.62263391056"
tileWidth="%(tile_width)d"
tileHeight="%(tile_length)d"
levelScale="%(level_scale)d"
minLevel="%(image_lowest_level)d"
maxLevel="%(image_level)d"
channelName="%(color_type)s"
data="%(data_location)s"
/>
""" % imageinfo
iname= image_template % dict(outdir = outdirloc)
f = open('%s' % iname, 'w')
f.write(image_descriptor)
f.close
###############################################
tiff_files = []
tiff_outpages = []
tiff_tifffile = []
tiff_infile = []
tiff_maxval = []
redColors = ['Rhodamine', 'RFP', 'Alexa Fluor 555', 'Alexa Fluor 594', 'tdTomato', 'Alexa Fluor 633', 'Alexa Fluor 647']
greenColors = ['FITC', 'Alexa 488', 'EGFP', 'Alexa Fluor 488']
blueColors = ['DAPI']
tiff_colors = [redColors, greenColors, blueColors]
def getFileColor(file):
colorMatched = None
for colors in tiff_colors:
for color in colors:
if re.match('.*[-]%s([-]Z[0-9]+)*[.]tif' % color, file):
colorMatched = True
return color
if not colorMatched:
sys.stderr.write('Unknown color for file "%s" \n' % file)
sys.exit(1)
def checkFileColors(files):
for file in files:
colorMatched = None
for colors in tiff_colors:
for color in colors:
if re.match('.*[-]%s[-]Z1[.]tif' % color, file):
colorMatched = True
break
if colorMatched:
break
if not colorMatched:
sys.stderr.write('Unknown color for file "%s" \n' % file)
sys.exit(1)
def colorFile(files, colors, pattern):
tifFiles = []
for color in colors:
colorFiles = [ f for f in files if re.match('.*[-]%s%s' % (color, pattern), f) ]
if len(colorFiles) == 1:
tifFiles.append(colorFiles[0])
if len(tifFiles) > 0:
return tifFiles
else:
return None
def getTiffFiles(dname):
global tiff_files
files = os.listdir(dname)
z1 = [f for f in files if re.match('.*[-]Z1[.]tif', f)]
if len(z1) > 0:
checkFileColors(z1)
stacks = len(files) / len(z1)
stackNo = stacks / 2
if stackNo * 2 < stacks:
stackNo += 1
stackPattern = '[-]Z%d[.]tif' % stackNo
else:
stackPattern = '[.]tif'
for colors in tiff_colors:
colorFiles = colorFile(files, colors, stackPattern)
if colorFiles:
for file in colorFiles:
tiff_files.append(file)
if len(tiff_files) == 0:
tiff_files = [ '%s' % (f) for f in files if re.match('.*%s' % stackPattern, f) ]
####### Main body ######
try:
getTiffFiles(srcloc)
except SystemExit:
raise
if len(tiff_files) == 0:
print 'Nothing to do'
sys.exit()
for fidx in range(0, len(tiff_files)):
fname = tiff_files[fidx]
color = getFileColor(fname)
outdirloc='%s/%s' %(outloc, color)
if not os.path.exists(outdirloc):
os.makedirs(outdirloc)
newfname="%s/%s" %(srcloc, fname)
processOne(newfname, outdirloc, color);
| |
from __future__ import division, print_function, absolute_import
import warnings
from . import _minpack
import numpy as np
from numpy import (atleast_1d, dot, take, triu, shape, eye,
transpose, zeros, product, greater, array,
all, where, isscalar, asarray, inf, abs,
finfo, inexact, issubdtype, dtype)
from scipy.linalg import svd
from scipy._lib._util import _asarray_validated, _lazywhere
from .optimize import OptimizeResult, _check_unknown_options, OptimizeWarning
from ._lsq import least_squares
from ._lsq.common import make_strictly_feasible
from ._lsq.least_squares import prepare_bounds
error = _minpack.error
__all__ = ['fsolve', 'leastsq', 'fixed_point', 'curve_fit']
def _check_func(checker, argname, thefunc, x0, args, numinputs,
output_shape=None):
res = atleast_1d(thefunc(*((x0[:numinputs],) + args)))
if (output_shape is not None) and (shape(res) != output_shape):
if (output_shape[0] != 1):
if len(output_shape) > 1:
if output_shape[1] == 1:
return shape(res)
msg = "%s: there is a mismatch between the input and output " \
"shape of the '%s' argument" % (checker, argname)
func_name = getattr(thefunc, '__name__', None)
if func_name:
msg += " '%s'." % func_name
else:
msg += "."
msg += 'Shape should be %s but it is %s.' % (output_shape, shape(res))
raise TypeError(msg)
if issubdtype(res.dtype, inexact):
dt = res.dtype
else:
dt = dtype(float)
return shape(res), dt
def fsolve(func, x0, args=(), fprime=None, full_output=0,
col_deriv=0, xtol=1.49012e-8, maxfev=0, band=None,
epsfcn=None, factor=100, diag=None):
"""
Find the roots of a function.
Return the roots of the (non-linear) equations defined by
``func(x) = 0`` given a starting estimate.
Parameters
----------
func : callable ``f(x, *args)``
A function that takes at least one (possibly vector) argument.
x0 : ndarray
The starting estimate for the roots of ``func(x) = 0``.
args : tuple, optional
Any extra arguments to `func`.
fprime : callable(x), optional
A function to compute the Jacobian of `func` with derivatives
across the rows. By default, the Jacobian will be estimated.
full_output : bool, optional
If True, return optional outputs.
col_deriv : bool, optional
Specify whether the Jacobian function computes derivatives down
the columns (faster, because there is no transpose operation).
xtol : float, optional
The calculation will terminate if the relative error between two
consecutive iterates is at most `xtol`.
maxfev : int, optional
The maximum number of calls to the function. If zero, then
``100*(N+1)`` is the maximum where N is the number of elements
in `x0`.
band : tuple, optional
If set to a two-sequence containing the number of sub- and
super-diagonals within the band of the Jacobi matrix, the
Jacobi matrix is considered banded (only for ``fprime=None``).
epsfcn : float, optional
A suitable step length for the forward-difference
approximation of the Jacobian (for ``fprime=None``). If
`epsfcn` is less than the machine precision, it is assumed
that the relative errors in the functions are of the order of
the machine precision.
factor : float, optional
A parameter determining the initial step bound
(``factor * || diag * x||``). Should be in the interval
``(0.1, 100)``.
diag : sequence, optional
N positive entries that serve as a scale factors for the
variables.
Returns
-------
x : ndarray
The solution (or the result of the last iteration for
an unsuccessful call).
infodict : dict
A dictionary of optional outputs with the keys:
``nfev``
number of function calls
``njev``
number of Jacobian calls
``fvec``
function evaluated at the output
``fjac``
the orthogonal matrix, q, produced by the QR
factorization of the final approximate Jacobian
matrix, stored column wise
``r``
upper triangular matrix produced by QR factorization
of the same matrix
``qtf``
the vector ``(transpose(q) * fvec)``
ier : int
An integer flag. Set to 1 if a solution was found, otherwise refer
to `mesg` for more information.
mesg : str
If no solution is found, `mesg` details the cause of failure.
See Also
--------
root : Interface to root finding algorithms for multivariate
functions. See the 'hybr' `method` in particular.
Notes
-----
``fsolve`` is a wrapper around MINPACK's hybrd and hybrj algorithms.
"""
options = {'col_deriv': col_deriv,
'xtol': xtol,
'maxfev': maxfev,
'band': band,
'eps': epsfcn,
'factor': factor,
'diag': diag}
res = _root_hybr(func, x0, args, jac=fprime, **options)
if full_output:
x = res['x']
info = dict((k, res.get(k))
for k in ('nfev', 'njev', 'fjac', 'r', 'qtf') if k in res)
info['fvec'] = res['fun']
return x, info, res['status'], res['message']
else:
status = res['status']
msg = res['message']
if status == 0:
raise TypeError(msg)
elif status == 1:
pass
elif status in [2, 3, 4, 5]:
warnings.warn(msg, RuntimeWarning)
else:
raise TypeError(msg)
return res['x']
def _root_hybr(func, x0, args=(), jac=None,
col_deriv=0, xtol=1.49012e-08, maxfev=0, band=None, eps=None,
factor=100, diag=None, **unknown_options):
"""
Find the roots of a multivariate function using MINPACK's hybrd and
hybrj routines (modified Powell method).
Options
-------
col_deriv : bool
Specify whether the Jacobian function computes derivatives down
the columns (faster, because there is no transpose operation).
xtol : float
The calculation will terminate if the relative error between two
consecutive iterates is at most `xtol`.
maxfev : int
The maximum number of calls to the function. If zero, then
``100*(N+1)`` is the maximum where N is the number of elements
in `x0`.
band : tuple
If set to a two-sequence containing the number of sub- and
super-diagonals within the band of the Jacobi matrix, the
Jacobi matrix is considered banded (only for ``fprime=None``).
eps : float
A suitable step length for the forward-difference
approximation of the Jacobian (for ``fprime=None``). If
`eps` is less than the machine precision, it is assumed
that the relative errors in the functions are of the order of
the machine precision.
factor : float
A parameter determining the initial step bound
(``factor * || diag * x||``). Should be in the interval
``(0.1, 100)``.
diag : sequence
N positive entries that serve as a scale factors for the
variables.
"""
_check_unknown_options(unknown_options)
epsfcn = eps
x0 = asarray(x0).flatten()
n = len(x0)
if not isinstance(args, tuple):
args = (args,)
shape, dtype = _check_func('fsolve', 'func', func, x0, args, n, (n,))
if epsfcn is None:
epsfcn = finfo(dtype).eps
Dfun = jac
if Dfun is None:
if band is None:
ml, mu = -10, -10
else:
ml, mu = band[:2]
if maxfev == 0:
maxfev = 200 * (n + 1)
retval = _minpack._hybrd(func, x0, args, 1, xtol, maxfev,
ml, mu, epsfcn, factor, diag)
else:
_check_func('fsolve', 'fprime', Dfun, x0, args, n, (n, n))
if (maxfev == 0):
maxfev = 100 * (n + 1)
retval = _minpack._hybrj(func, Dfun, x0, args, 1,
col_deriv, xtol, maxfev, factor, diag)
x, status = retval[0], retval[-1]
errors = {0: "Improper input parameters were entered.",
1: "The solution converged.",
2: "The number of calls to function has "
"reached maxfev = %d." % maxfev,
3: "xtol=%f is too small, no further improvement "
"in the approximate\n solution "
"is possible." % xtol,
4: "The iteration is not making good progress, as measured "
"by the \n improvement from the last five "
"Jacobian evaluations.",
5: "The iteration is not making good progress, "
"as measured by the \n improvement from the last "
"ten iterations.",
'unknown': "An error occurred."}
info = retval[1]
info['fun'] = info.pop('fvec')
sol = OptimizeResult(x=x, success=(status == 1), status=status)
sol.update(info)
try:
sol['message'] = errors[status]
except KeyError:
info['message'] = errors['unknown']
return sol
def leastsq(func, x0, args=(), Dfun=None, full_output=0,
col_deriv=0, ftol=1.49012e-8, xtol=1.49012e-8,
gtol=0.0, maxfev=0, epsfcn=None, factor=100, diag=None):
"""
Minimize the sum of squares of a set of equations.
::
x = arg min(sum(func(y)**2,axis=0))
y
Parameters
----------
func : callable
should take at least one (possibly length N vector) argument and
returns M floating point numbers. It must not return NaNs or
fitting might fail.
x0 : ndarray
The starting estimate for the minimization.
args : tuple, optional
Any extra arguments to func are placed in this tuple.
Dfun : callable, optional
A function or method to compute the Jacobian of func with derivatives
across the rows. If this is None, the Jacobian will be estimated.
full_output : bool, optional
non-zero to return all optional outputs.
col_deriv : bool, optional
non-zero to specify that the Jacobian function computes derivatives
down the columns (faster, because there is no transpose operation).
ftol : float, optional
Relative error desired in the sum of squares.
xtol : float, optional
Relative error desired in the approximate solution.
gtol : float, optional
Orthogonality desired between the function vector and the columns of
the Jacobian.
maxfev : int, optional
The maximum number of calls to the function. If `Dfun` is provided
then the default `maxfev` is 100*(N+1) where N is the number of elements
in x0, otherwise the default `maxfev` is 200*(N+1).
epsfcn : float, optional
A variable used in determining a suitable step length for the forward-
difference approximation of the Jacobian (for Dfun=None).
Normally the actual step length will be sqrt(epsfcn)*x
If epsfcn is less than the machine precision, it is assumed that the
relative errors are of the order of the machine precision.
factor : float, optional
A parameter determining the initial step bound
(``factor * || diag * x||``). Should be in interval ``(0.1, 100)``.
diag : sequence, optional
N positive entries that serve as a scale factors for the variables.
Returns
-------
x : ndarray
The solution (or the result of the last iteration for an unsuccessful
call).
cov_x : ndarray
Uses the fjac and ipvt optional outputs to construct an
estimate of the jacobian around the solution. None if a
singular matrix encountered (indicates very flat curvature in
some direction). This matrix must be multiplied by the
residual variance to get the covariance of the
parameter estimates -- see curve_fit.
infodict : dict
a dictionary of optional outputs with the key s:
``nfev``
The number of function calls
``fvec``
The function evaluated at the output
``fjac``
A permutation of the R matrix of a QR
factorization of the final approximate
Jacobian matrix, stored column wise.
Together with ipvt, the covariance of the
estimate can be approximated.
``ipvt``
An integer array of length N which defines
a permutation matrix, p, such that
fjac*p = q*r, where r is upper triangular
with diagonal elements of nonincreasing
magnitude. Column j of p is column ipvt(j)
of the identity matrix.
``qtf``
The vector (transpose(q) * fvec).
mesg : str
A string message giving information about the cause of failure.
ier : int
An integer flag. If it is equal to 1, 2, 3 or 4, the solution was
found. Otherwise, the solution was not found. In either case, the
optional output variable 'mesg' gives more information.
Notes
-----
"leastsq" is a wrapper around MINPACK's lmdif and lmder algorithms.
cov_x is a Jacobian approximation to the Hessian of the least squares
objective function.
This approximation assumes that the objective function is based on the
difference between some observed target data (ydata) and a (non-linear)
function of the parameters `f(xdata, params)` ::
func(params) = ydata - f(xdata, params)
so that the objective function is ::
min sum((ydata - f(xdata, params))**2, axis=0)
params
"""
x0 = asarray(x0).flatten()
n = len(x0)
if not isinstance(args, tuple):
args = (args,)
shape, dtype = _check_func('leastsq', 'func', func, x0, args, n)
m = shape[0]
if n > m:
raise TypeError('Improper input: N=%s must not exceed M=%s' % (n, m))
if epsfcn is None:
epsfcn = finfo(dtype).eps
if Dfun is None:
if maxfev == 0:
maxfev = 200*(n + 1)
retval = _minpack._lmdif(func, x0, args, full_output, ftol, xtol,
gtol, maxfev, epsfcn, factor, diag)
else:
if col_deriv:
_check_func('leastsq', 'Dfun', Dfun, x0, args, n, (n, m))
else:
_check_func('leastsq', 'Dfun', Dfun, x0, args, n, (m, n))
if maxfev == 0:
maxfev = 100 * (n + 1)
retval = _minpack._lmder(func, Dfun, x0, args, full_output, col_deriv,
ftol, xtol, gtol, maxfev, factor, diag)
errors = {0: ["Improper input parameters.", TypeError],
1: ["Both actual and predicted relative reductions "
"in the sum of squares\n are at most %f" % ftol, None],
2: ["The relative error between two consecutive "
"iterates is at most %f" % xtol, None],
3: ["Both actual and predicted relative reductions in "
"the sum of squares\n are at most %f and the "
"relative error between two consecutive "
"iterates is at \n most %f" % (ftol, xtol), None],
4: ["The cosine of the angle between func(x) and any "
"column of the\n Jacobian is at most %f in "
"absolute value" % gtol, None],
5: ["Number of calls to function has reached "
"maxfev = %d." % maxfev, ValueError],
6: ["ftol=%f is too small, no further reduction "
"in the sum of squares\n is possible.""" % ftol,
ValueError],
7: ["xtol=%f is too small, no further improvement in "
"the approximate\n solution is possible." % xtol,
ValueError],
8: ["gtol=%f is too small, func(x) is orthogonal to the "
"columns of\n the Jacobian to machine "
"precision." % gtol, ValueError],
'unknown': ["Unknown error.", TypeError]}
info = retval[-1] # The FORTRAN return value
if info not in [1, 2, 3, 4] and not full_output:
if info in [5, 6, 7, 8]:
warnings.warn(errors[info][0], RuntimeWarning)
else:
try:
raise errors[info][1](errors[info][0])
except KeyError:
raise errors['unknown'][1](errors['unknown'][0])
mesg = errors[info][0]
if full_output:
cov_x = None
if info in [1, 2, 3, 4]:
from numpy.dual import inv
from numpy.linalg import LinAlgError
perm = take(eye(n), retval[1]['ipvt'] - 1, 0)
r = triu(transpose(retval[1]['fjac'])[:n, :])
R = dot(r, perm)
try:
cov_x = inv(dot(transpose(R), R))
except (LinAlgError, ValueError):
pass
return (retval[0], cov_x) + retval[1:-1] + (mesg, info)
else:
return (retval[0], info)
def _wrap_func(func, xdata, ydata, weights):
if weights is None:
def func_wrapped(params):
return func(xdata, *params) - ydata
else:
def func_wrapped(params):
return weights * (func(xdata, *params) - ydata)
return func_wrapped
def _wrap_jac(jac, xdata, weights):
if weights is None:
def jac_wrapped(params):
return jac(xdata, *params)
else:
def jac_wrapped(params):
return weights[:, np.newaxis] * np.asarray(jac(xdata, *params))
return jac_wrapped
def _initialize_feasible(lb, ub):
p0 = np.ones_like(lb)
lb_finite = np.isfinite(lb)
ub_finite = np.isfinite(ub)
mask = lb_finite & ub_finite
p0[mask] = 0.5 * (lb[mask] + ub[mask])
mask = lb_finite & ~ub_finite
p0[mask] = lb[mask] + 1
mask = ~lb_finite & ub_finite
p0[mask] = ub[mask] - 1
return p0
def curve_fit(f, xdata, ydata, p0=None, sigma=None, absolute_sigma=False,
check_finite=True, bounds=(-np.inf, np.inf), method=None,
jac=None, **kwargs):
"""
Use non-linear least squares to fit a function, f, to data.
Assumes ``ydata = f(xdata, *params) + eps``
Parameters
----------
f : callable
The model function, f(x, ...). It must take the independent
variable as the first argument and the parameters to fit as
separate remaining arguments.
xdata : An M-length sequence or an (k,M)-shaped array
for functions with k predictors.
The independent variable where the data is measured.
ydata : M-length sequence
The dependent data --- nominally f(xdata, ...)
p0 : None, scalar, or N-length sequence, optional
Initial guess for the parameters. If None, then the initial
values will all be 1 (if the number of parameters for the function
can be determined using introspection, otherwise a ValueError
is raised).
sigma : None or M-length sequence, optional
If not None, the uncertainties in the ydata array. These are used as
weights in the least-squares problem
i.e. minimising ``np.sum( ((f(xdata, *popt) - ydata) / sigma)**2 )``
If None, the uncertainties are assumed to be 1.
absolute_sigma : bool, optional
If False, `sigma` denotes relative weights of the data points.
The returned covariance matrix `pcov` is based on *estimated*
errors in the data, and is not affected by the overall
magnitude of the values in `sigma`. Only the relative
magnitudes of the `sigma` values matter.
If True, `sigma` describes one standard deviation errors of
the input data points. The estimated covariance in `pcov` is
based on these values.
check_finite : bool, optional
If True, check that the input arrays do not contain nans of infs,
and raise a ValueError if they do. Setting this parameter to
False may silently produce nonsensical results if the input arrays
do contain nans. Default is True.
bounds : 2-tuple of array_like, optional
Lower and upper bounds on independent variables. Defaults to no bounds.
Each element of the tuple must be either an array with the length equal
to the number of parameters, or a scalar (in which case the bound is
taken to be the same for all parameters.) Use ``np.inf`` with an
appropriate sign to disable bounds on all or some parameters.
.. versionadded:: 0.17
method : {'lm', 'trf', 'dogbox'}, optional
Method to use for optimization. See `least_squares` for more details.
Default is 'lm' for unconstrained problems and 'trf' if `bounds` are
provided. The method 'lm' won't work when the number of observations
is less than the number of variables, use 'trf' or 'dogbox' in this
case.
.. versionadded:: 0.17
jac : callable, string or None, optional
Function with signature ``jac(x, ...)`` which computes the Jacobian
matrix of the model function with respect to parameters as a dense
array_like structure. It will be scaled according to provided `sigma`.
If None (default), the Jacobian will be estimated numerically.
String keywords for 'trf' and 'dogbox' methods can be used to select
a finite difference scheme, see `least_squares`.
.. versionadded:: 0.18
kwargs
Keyword arguments passed to `leastsq` for ``method='lm'`` or
`least_squares` otherwise.
Returns
-------
popt : array
Optimal values for the parameters so that the sum of the squared error
of ``f(xdata, *popt) - ydata`` is minimized
pcov : 2d array
The estimated covariance of popt. The diagonals provide the variance
of the parameter estimate. To compute one standard deviation errors
on the parameters use ``perr = np.sqrt(np.diag(pcov))``.
How the `sigma` parameter affects the estimated covariance
depends on `absolute_sigma` argument, as described above.
If the Jacobian matrix at the solution doesn't have a full rank, then
'lm' method returns a matrix filled with ``np.inf``, on the other hand
'trf' and 'dogbox' methods use Moore-Penrose pseudoinverse to compute
the covariance matrix.
Raises
------
ValueError
if either `ydata` or `xdata` contain NaNs, or if incompatible options
are used.
RuntimeError
if the least-squares minimization fails.
OptimizeWarning
if covariance of the parameters can not be estimated.
See Also
--------
least_squares : Minimize the sum of squares of nonlinear functions.
scipy.stats.linregress : Calculate a linear least squares regression for
two sets of measurements.
Notes
-----
With ``method='lm'``, the algorithm uses the Levenberg-Marquardt algorithm
through `leastsq`. Note that this algorithm can only deal with
unconstrained problems.
Box constraints can be handled by methods 'trf' and 'dogbox'. Refer to
the docstring of `least_squares` for more information.
Examples
--------
>>> import numpy as np
>>> from scipy.optimize import curve_fit
>>> def func(x, a, b, c):
... return a * np.exp(-b * x) + c
>>> xdata = np.linspace(0, 4, 50)
>>> y = func(xdata, 2.5, 1.3, 0.5)
>>> ydata = y + 0.2 * np.random.normal(size=len(xdata))
>>> popt, pcov = curve_fit(func, xdata, ydata)
Constrain the optimization to the region of ``0 < a < 3``, ``0 < b < 2``
and ``0 < c < 1``:
>>> popt, pcov = curve_fit(func, xdata, ydata, bounds=(0, [3., 2., 1.]))
"""
if p0 is None:
# determine number of parameters by inspecting the function
from scipy._lib._util import getargspec_no_self as _getargspec
args, varargs, varkw, defaults = _getargspec(f)
if len(args) < 2:
raise ValueError("Unable to determine number of fit parameters.")
n = len(args) - 1
else:
p0 = np.atleast_1d(p0)
n = p0.size
lb, ub = prepare_bounds(bounds, n)
if p0 is None:
p0 = _initialize_feasible(lb, ub)
bounded_problem = np.any((lb > -np.inf) | (ub < np.inf))
if method is None:
if bounded_problem:
method = 'trf'
else:
method = 'lm'
if method == 'lm' and bounded_problem:
raise ValueError("Method 'lm' only works for unconstrained problems. "
"Use 'trf' or 'dogbox' instead.")
# NaNs can not be handled
if check_finite:
ydata = np.asarray_chkfinite(ydata)
else:
ydata = np.asarray(ydata)
if isinstance(xdata, (list, tuple, np.ndarray)):
# `xdata` is passed straight to the user-defined `f`, so allow
# non-array_like `xdata`.
if check_finite:
xdata = np.asarray_chkfinite(xdata)
else:
xdata = np.asarray(xdata)
weights = 1.0 / asarray(sigma) if sigma is not None else None
func = _wrap_func(f, xdata, ydata, weights)
if callable(jac):
jac = _wrap_jac(jac, xdata, weights)
elif jac is None and method != 'lm':
jac = '2-point'
if method == 'lm':
# Remove full_output from kwargs, otherwise we're passing it in twice.
return_full = kwargs.pop('full_output', False)
res = leastsq(func, p0, Dfun=jac, full_output=1, **kwargs)
popt, pcov, infodict, errmsg, ier = res
cost = np.sum(infodict['fvec'] ** 2)
if ier not in [1, 2, 3, 4]:
raise RuntimeError("Optimal parameters not found: " + errmsg)
else:
# Rename maxfev (leastsq) to max_nfev (least_squares), if specified.
if 'max_nfev' not in kwargs:
kwargs['max_nfev'] = kwargs.pop('maxfev', None)
res = least_squares(func, p0, jac=jac, bounds=bounds, method=method,
**kwargs)
if not res.success:
raise RuntimeError("Optimal parameters not found: " + res.message)
cost = 2 * res.cost # res.cost is half sum of squares!
popt = res.x
# Do Moore-Penrose inverse discarding zero singular values.
_, s, VT = svd(res.jac, full_matrices=False)
threshold = np.finfo(float).eps * max(res.jac.shape) * s[0]
s = s[s > threshold]
VT = VT[:s.size]
pcov = np.dot(VT.T / s**2, VT)
return_full = False
warn_cov = False
if pcov is None:
# indeterminate covariance
pcov = zeros((len(popt), len(popt)), dtype=float)
pcov.fill(inf)
warn_cov = True
elif not absolute_sigma:
if ydata.size > p0.size:
s_sq = cost / (ydata.size - p0.size)
pcov = pcov * s_sq
else:
pcov.fill(inf)
warn_cov = True
if warn_cov:
warnings.warn('Covariance of the parameters could not be estimated',
category=OptimizeWarning)
if return_full:
return popt, pcov, infodict, errmsg, ier
else:
return popt, pcov
def check_gradient(fcn, Dfcn, x0, args=(), col_deriv=0):
"""Perform a simple check on the gradient for correctness.
"""
x = atleast_1d(x0)
n = len(x)
x = x.reshape((n,))
fvec = atleast_1d(fcn(x, *args))
m = len(fvec)
fvec = fvec.reshape((m,))
ldfjac = m
fjac = atleast_1d(Dfcn(x, *args))
fjac = fjac.reshape((m, n))
if col_deriv == 0:
fjac = transpose(fjac)
xp = zeros((n,), float)
err = zeros((m,), float)
fvecp = None
_minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 1, err)
fvecp = atleast_1d(fcn(xp, *args))
fvecp = fvecp.reshape((m,))
_minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 2, err)
good = (product(greater(err, 0.5), axis=0))
return (good, err)
def _del2(p0, p1, d):
return p0 - np.square(p1 - p0) / d
def _relerr(actual, desired):
return (actual - desired) / desired
def _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel):
p0 = x0
for i in range(maxiter):
p1 = func(p0, *args)
if use_accel:
p2 = func(p1, *args)
d = p2 - 2.0 * p1 + p0
p = _lazywhere(d != 0, (p0, p1, d), f=_del2, fillvalue=p2)
else:
p = p1
relerr = _lazywhere(p0 != 0, (p, p0), f=_relerr, fillvalue=p)
if np.all(np.abs(relerr) < xtol):
return p
p0 = p
msg = "Failed to converge after %d iterations, value is %s" % (maxiter, p)
raise RuntimeError(msg)
def fixed_point(func, x0, args=(), xtol=1e-8, maxiter=500, method='del2'):
"""
Find a fixed point of the function.
Given a function of one or more variables and a starting point, find a
fixed-point of the function: i.e. where ``func(x0) == x0``.
Parameters
----------
func : function
Function to evaluate.
x0 : array_like
Fixed point of function.
args : tuple, optional
Extra arguments to `func`.
xtol : float, optional
Convergence tolerance, defaults to 1e-08.
maxiter : int, optional
Maximum number of iterations, defaults to 500.
method : {"del2", "iteration"}, optional
Method of finding the fixed-point, defaults to "del2"
which uses Steffensen's Method with Aitken's ``Del^2``
convergence acceleration [1]_. The "iteration" method simply iterates
the function until convergence is detected, without attempting to
accelerate the convergence.
References
----------
.. [1] Burden, Faires, "Numerical Analysis", 5th edition, pg. 80
Examples
--------
>>> from scipy import optimize
>>> def func(x, c1, c2):
... return np.sqrt(c1/(x+c2))
>>> c1 = np.array([10,12.])
>>> c2 = np.array([3, 5.])
>>> optimize.fixed_point(func, [1.2, 1.3], args=(c1,c2))
array([ 1.4920333 , 1.37228132])
"""
use_accel = {'del2': True, 'iteration': False}[method]
x0 = _asarray_validated(x0, as_inexact=True)
return _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel)
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Admin views for managing volumes.
"""
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from django.views import generic
from horizon import exceptions
from horizon import forms
from horizon.utils import memoized
from openstack_dashboard import api
from openstack_dashboard.dashboards.admin.volumes.volume_types \
import forms as volume_types_forms
from openstack_dashboard.dashboards.admin.volumes.volumes \
import forms as volumes_forms
class CreateVolumeTypeView(forms.ModalFormView):
form_class = volumes_forms.CreateVolumeType
template_name = 'admin/volumes/volume_types/create_volume_type.html'
success_url = 'horizon:admin:volumes:volume_types_tab'
page_title = _("Create a Volume Type")
def get_success_url(self):
return reverse(self.success_url)
class VolumeTypeEncryptionDetailView(generic.TemplateView):
template_name = ("admin/volumes/volume_types"
"/volume_encryption_type_detail.html")
page_title = _("Volume Type Encryption Details")
def get_context_data(self, **kwargs):
context = super(VolumeTypeEncryptionDetailView, self).\
get_context_data(**kwargs)
context["volume_type_encryption"] = self.get_data()
return context
@memoized.memoized_method
def get_data(self):
try:
volume_type_id = self.kwargs['volume_type_id']
self._volume_type_encryption = api.cinder.\
volume_encryption_type_get(self.request, volume_type_id)
volume_type_list = api.cinder.volume_type_list(self.request)
for volume_type in volume_type_list:
if volume_type.id == volume_type_id:
self.name = volume_type.name
self._volume_type_encryption.name = self.name
except Exception:
redirect = reverse('horizon:admin:volumes:index')
exceptions.handle(self.request,
_('Unable to retrieve volume type encryption'
' details.'),
redirect=redirect)
return None
return self._volume_type_encryption
class CreateVolumeTypeEncryptionView(forms.ModalFormView):
form_class = volume_types_forms.CreateVolumeTypeEncryption
template_name = ("admin/volumes/volume_types/"
"create_volume_type_encryption.html")
success_url = reverse_lazy('horizon:admin:volumes:index')
page_title = _("Create an Encrypted Volume Type")
@memoized.memoized_method
def get_name(self):
try:
volume_type_list = api.cinder.volume_type_list(self.request)
for volume_type in volume_type_list:
if volume_type.id == self.kwargs['volume_type_id']:
self.name = volume_type.name
except Exception:
msg = _('Unable to retrieve volume type name.')
url = reverse('horizon:admin:volumes:index')
exceptions.handle(self.request, msg, redirect=url)
return self.name
def get_context_data(self, **kwargs):
context = super(CreateVolumeTypeEncryptionView, self).\
get_context_data(**kwargs)
context['volume_type_id'] = self.kwargs['volume_type_id']
return context
def get_initial(self):
name = self.get_name()
return {'name': name,
'volume_type_id': self.kwargs['volume_type_id']}
class CreateQosSpecView(forms.ModalFormView):
form_class = volumes_forms.CreateQosSpec
template_name = 'admin/volumes/volume_types/create_qos_spec.html'
success_url = 'horizon:admin:volumes:volume_types_tab'
page_title = _("Create a QoS Spec")
def get_success_url(self):
return reverse(self.success_url)
class EditQosSpecConsumerView(forms.ModalFormView):
form_class = volume_types_forms.EditQosSpecConsumer
template_name = 'admin/volumes/volume_types/edit_qos_spec_consumer.html'
success_url = 'horizon:admin:volumes:volume_types_tab'
page_title = _("Edit QoS Spec Consumer")
def get_success_url(self):
return reverse(self.success_url)
def get_context_data(self, **kwargs):
context = super(EditQosSpecConsumerView, self).\
get_context_data(**kwargs)
context['qos_spec_id'] = self.kwargs["qos_spec_id"]
return context
@memoized.memoized_method
def get_object(self, *args, **kwargs):
qos_spec_id = self.kwargs['qos_spec_id']
try:
self._object = api.cinder.qos_spec_get(self.request, qos_spec_id)
except Exception:
msg = _('Unable to retrieve QoS Spec details.')
exceptions.handle(self.request, msg)
return self._object
def get_initial(self):
qos_spec = self.get_object()
qos_spec_id = self.kwargs['qos_spec_id']
return {'qos_spec_id': qos_spec_id,
'qos_spec': qos_spec}
class ManageQosSpecAssociationView(forms.ModalFormView):
form_class = volume_types_forms.ManageQosSpecAssociation
template_name = 'admin/volumes/volume_types/associate_qos_spec.html'
success_url = 'horizon:admin:volumes:volume_types_tab'
page_title = _("Associate QoS Spec with Volume Type")
def get_success_url(self):
return reverse(self.success_url)
def get_context_data(self, **kwargs):
context = super(ManageQosSpecAssociationView, self).\
get_context_data(**kwargs)
context['type_id'] = self.kwargs["type_id"]
return context
@memoized.memoized_method
def get_object(self, *args, **kwargs):
type_id = self.kwargs['type_id']
try:
self._object = api.cinder.volume_type_get(self.request, type_id)
except Exception:
msg = _('Unable to retrieve volume type details.')
exceptions.handle(self.request, msg)
return self._object
@memoized.memoized_method
def get_qos_specs(self, *args, **kwargs):
try:
return api.cinder.qos_spec_list(self.request)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve QoS Specs.'))
def find_current_qos_spec_association(self, vol_type_id):
qos_specs = self.get_qos_specs()
if qos_specs:
try:
# find out which QOS Spec is currently associated with this
# volume type, if any
# NOTE - volume type can only have ONE QOS Spec association
for qos_spec in qos_specs:
type_ids = \
api.cinder.qos_spec_get_associations(self.request,
qos_spec.id)
for vtype in type_ids:
if vtype.id == vol_type_id:
return qos_spec
except Exception:
exceptions.handle(
self.request,
_('Unable to retrieve QoS Spec association.'))
return None
def get_initial(self):
volume_type = self.get_object()
vol_type_id = self.kwargs['type_id']
cur_qos_spec_id = None
cur_qos_spec_name = None
qos_spec = self.find_current_qos_spec_association(vol_type_id)
if qos_spec:
cur_qos_spec_id = qos_spec.id
cur_qos_spec_name = qos_spec.name
return {'type_id': vol_type_id,
'name': getattr(volume_type, 'name', None),
'cur_qos_spec_id': cur_qos_spec_id,
'cur_qos_spec_name': cur_qos_spec_name,
'qos_specs': self.get_qos_specs()}
| |
"""Support for Traccar device tracking."""
from datetime import datetime, timedelta
import logging
import voluptuous as vol
from homeassistant.components.device_tracker import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_HOST, CONF_PORT, CONF_SSL, CONF_VERIFY_SSL,
CONF_PASSWORD, CONF_USERNAME, ATTR_BATTERY_LEVEL,
CONF_SCAN_INTERVAL, CONF_MONITORED_CONDITIONS,
CONF_EVENT)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.util import slugify
_LOGGER = logging.getLogger(__name__)
ATTR_ADDRESS = 'address'
ATTR_CATEGORY = 'category'
ATTR_GEOFENCE = 'geofence'
ATTR_MOTION = 'motion'
ATTR_SPEED = 'speed'
ATTR_TRACKER = 'tracker'
ATTR_TRACCAR_ID = 'traccar_id'
ATTR_STATUS = 'status'
EVENT_DEVICE_MOVING = 'device_moving'
EVENT_COMMAND_RESULT = 'command_result'
EVENT_DEVICE_FUEL_DROP = 'device_fuel_drop'
EVENT_GEOFENCE_ENTER = 'geofence_enter'
EVENT_DEVICE_OFFLINE = 'device_offline'
EVENT_DRIVER_CHANGED = 'driver_changed'
EVENT_GEOFENCE_EXIT = 'geofence_exit'
EVENT_DEVICE_OVERSPEED = 'device_overspeed'
EVENT_DEVICE_ONLINE = 'device_online'
EVENT_DEVICE_STOPPED = 'device_stopped'
EVENT_MAINTENANCE = 'maintenance'
EVENT_ALARM = 'alarm'
EVENT_TEXT_MESSAGE = 'text_message'
EVENT_DEVICE_UNKNOWN = 'device_unknown'
EVENT_IGNITION_OFF = 'ignition_off'
EVENT_IGNITION_ON = 'ignition_on'
EVENT_ALL_EVENTS = 'all_events'
DEFAULT_SCAN_INTERVAL = timedelta(seconds=30)
SCAN_INTERVAL = DEFAULT_SCAN_INTERVAL
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=8082): cv.port,
vol.Optional(CONF_SSL, default=False): cv.boolean,
vol.Optional(CONF_VERIFY_SSL, default=True): cv.boolean,
vol.Optional(CONF_MONITORED_CONDITIONS,
default=[]): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_EVENT,
default=[]): vol.All(cv.ensure_list,
[vol.Any(EVENT_DEVICE_MOVING,
EVENT_COMMAND_RESULT,
EVENT_DEVICE_FUEL_DROP,
EVENT_GEOFENCE_ENTER,
EVENT_DEVICE_OFFLINE,
EVENT_DRIVER_CHANGED,
EVENT_GEOFENCE_EXIT,
EVENT_DEVICE_OVERSPEED,
EVENT_DEVICE_ONLINE,
EVENT_DEVICE_STOPPED,
EVENT_MAINTENANCE,
EVENT_ALARM,
EVENT_TEXT_MESSAGE,
EVENT_DEVICE_UNKNOWN,
EVENT_IGNITION_OFF,
EVENT_IGNITION_ON,
EVENT_ALL_EVENTS)]),
})
async def async_setup_scanner(hass, config, async_see, discovery_info=None):
"""Validate the configuration and return a Traccar scanner."""
from pytraccar.api import API
session = async_get_clientsession(hass, config[CONF_VERIFY_SSL])
api = API(hass.loop, session, config[CONF_USERNAME], config[CONF_PASSWORD],
config[CONF_HOST], config[CONF_PORT], config[CONF_SSL])
scanner = TraccarScanner(
api, hass, async_see,
config.get(CONF_SCAN_INTERVAL, SCAN_INTERVAL),
config[CONF_MONITORED_CONDITIONS], config[CONF_EVENT])
return await scanner.async_init()
class TraccarScanner:
"""Define an object to retrieve Traccar data."""
def __init__(self, api, hass, async_see, scan_interval,
custom_attributes,
event_types):
"""Initialize."""
from stringcase import camelcase
self._event_types = {camelcase(evt): evt for evt in event_types}
self._custom_attributes = custom_attributes
self._scan_interval = scan_interval
self._async_see = async_see
self._api = api
self.connected = False
self._hass = hass
async def async_init(self):
"""Further initialize connection to Traccar."""
await self._api.test_connection()
if self._api.connected and not self._api.authenticated:
_LOGGER.error("Authentication for Traccar failed")
return False
await self._async_update()
async_track_time_interval(self._hass,
self._async_update,
self._scan_interval)
return True
async def _async_update(self, now=None):
"""Update info from Traccar."""
if not self.connected:
_LOGGER.debug('Testing connection to Traccar')
await self._api.test_connection()
self.connected = self._api.connected
if self.connected:
_LOGGER.info("Connection to Traccar restored")
else:
return
_LOGGER.debug('Updating device data')
await self._api.get_device_info(self._custom_attributes)
self._hass.async_create_task(self.import_device_data())
if self._event_types:
self._hass.async_create_task(self.import_events())
self.connected = self._api.connected
async def import_device_data(self):
"""Import device data from Traccar."""
for device_unique_id in self._api.device_info:
device_info = self._api.device_info[device_unique_id]
device = None
attr = {}
attr[ATTR_TRACKER] = 'traccar'
if device_info.get('address') is not None:
attr[ATTR_ADDRESS] = device_info['address']
if device_info.get('geofence') is not None:
attr[ATTR_GEOFENCE] = device_info['geofence']
if device_info.get('category') is not None:
attr[ATTR_CATEGORY] = device_info['category']
if device_info.get('speed') is not None:
attr[ATTR_SPEED] = device_info['speed']
if device_info.get('battery') is not None:
attr[ATTR_BATTERY_LEVEL] = device_info['battery']
if device_info.get('motion') is not None:
attr[ATTR_MOTION] = device_info['motion']
if device_info.get('traccar_id') is not None:
attr[ATTR_TRACCAR_ID] = device_info['traccar_id']
for dev in self._api.devices:
if dev['id'] == device_info['traccar_id']:
device = dev
break
if device is not None and device.get('status') is not None:
attr[ATTR_STATUS] = device['status']
for custom_attr in self._custom_attributes:
if device_info.get(custom_attr) is not None:
attr[custom_attr] = device_info[custom_attr]
await self._async_see(
dev_id=slugify(device_info['device_id']),
gps=(device_info.get('latitude'),
device_info.get('longitude')),
gps_accuracy=(device_info.get('accuracy')),
attributes=attr)
async def import_events(self):
"""Import events from Traccar."""
device_ids = [device['id'] for device in self._api.devices]
end_interval = datetime.utcnow()
start_interval = end_interval - self._scan_interval
events = await self._api.get_events(
device_ids=device_ids,
from_time=start_interval,
to_time=end_interval,
event_types=self._event_types.keys())
if events is not None:
for event in events:
device_name = next((
dev.get('name') for dev in self._api.devices
if dev.get('id') == event['deviceId']), None)
self._hass.bus.async_fire(
'traccar_' + self._event_types.get(event["type"]), {
'device_traccar_id': event['deviceId'],
'device_name': device_name,
'type': event['type'],
'serverTime': event['serverTime'],
'attributes': event['attributes']
})
| |
# Copyright 2019 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A protocol for implementing high performance mixture evolutions."""
from typing import Any, cast, Iterable, Optional, Tuple, TypeVar, Union
import numpy as np
from typing_extensions import Protocol
from cirq import linalg
from cirq._doc import doc_private
from cirq.protocols.apply_unitary_protocol import (
apply_unitary,
ApplyUnitaryArgs,
)
from cirq.protocols.mixture_protocol import mixture
from cirq.protocols import qid_shape_protocol
from cirq.type_workarounds import NotImplementedType
# This is a special indicator value used by the apply_mixture method
# to determine whether or not the caller provided a 'default' argument. It must
# be of type np.ndarray to ensure the method has the correct type signature in
# that case. It is checked for using `is`, so it won't have a false positive if
# the user provides a different np.array([]) value.
RaiseTypeErrorIfNotProvided: np.ndarray = np.array([])
TDefault = TypeVar('TDefault')
class ApplyMixtureArgs:
"""Arguments for performing a mixture of unitaries.
The receiving object is expected to mutate `target_tensor` so that it
contains the state (state vector or density matrix) after applying the
mixture then return `target_tensor`. Alternatively, if workspace is
required, the receiving object can overwrite `out_buffer` with the results
and return `out_buffer`. Or, if the receiving object is attempting to
be simple instead of fast, it can create an entirely new array and
return that.
Attributes:
target_tensor: The input tensor that needs to be left (and potentially
right) multiplied and summed, representing the effect of the
mixture. The tensor will have the shape (2, 2, 2, ..., 2). It can
correspond to a state vector or a density matrix.
out_buffer: Pre-allocated workspace with the same shape and
dtype as the target tensor. If buffers are used, the result should
end up in this buffer. It is the responsibility of calling code
to notice if the result is this buffer.
auxiliary_buffer0: Pre-allocated workspace with the same shape and dtype
as the target tensor.
auxiliary_buffer1: Pre-allocated workspace with the same shape
and dtype as the target tensor.
left_axes: Which axes to multiply the left action of the mixture upon.
right_axes: Which axes to multiply the right action of the mixture upon.
If provided we will assume `target_tensor` is a density matrix,
otherwise it will be assumed `target_tensor` is a state vector.
"""
def __init__(
self,
target_tensor: np.ndarray,
out_buffer: np.ndarray,
auxiliary_buffer0: np.ndarray,
auxiliary_buffer1: np.ndarray,
left_axes: Iterable[int],
right_axes: Optional[Iterable[int]] = None,
):
"""Args for apply mixture.
Args:
target_tensor: The input tensor that needs to be left (and
potentially right) multiplied and summed, representing the
effect of the mixture. The tensor will have the shape
(2, 2, 2, ..., 2). It can correspond to a state vector or a
density matrix.
out_buffer: Pre-allocated workspace with the same shape and
dtype as the target tensor. If buffers are used, the result
should end up in this buffer. It is the responsibility of
calling code to notice if the result is this buffer.
auxiliary_buffer0: Pre-allocated workspace with the same shape and
dtype as the target tensor.
auxiliary_buffer1: Pre-allocated workspace with the same shape
and dtype as the target tensor.
left_axes: Which axes to multiply the left action of the mixture
upon.
right_axes: Which axes to multiply the right action of the mixture
upon. If provided we will assume `target_tensor` is a density
matrix, otherwise it will be assumed `target_tensor` is a
state vector.
"""
self.target_tensor = target_tensor
self.out_buffer = out_buffer
self.auxiliary_buffer0 = auxiliary_buffer0
self.auxiliary_buffer1 = auxiliary_buffer1
self.left_axes = tuple(left_axes)
self.right_axes = None
if right_axes is not None:
self.right_axes = tuple(right_axes)
class SupportsApplyMixture(Protocol):
"""An object that can efficiently implement a mixture."""
@doc_private
def _apply_mixture_(
self, args: ApplyMixtureArgs
) -> Union[np.ndarray, None, NotImplementedType]:
"""Efficiently applies a mixture.
This method is given both the target tensor and workspace of the same
shape and dtype. The method then either performs inline modifications of
the target tensor and returns it, or writes its output into the
a workspace tensor and returns that. This signature makes it possible to
write specialized simulation methods that run without performing large
allocations, significantly increasing simulation performance.
Args:
args: A `cirq.ApplyMixtureArgs` object with the `args.target_tensor`
to operate on, an `args.available_workspace` buffer to use as
temporary workspace, and the `args.axes` of the tensor to target
with the unitary operation. Note that this method is permitted
(and in fact expected) to mutate `args.target_tensor` and
`args.available_workspace`.
Returns:
If the receiving object is not able to apply its unitary effect,
None or NotImplemented should be returned.
If the receiving object is able to work inline, it should directly
mutate `args.target_tensor` and then return `args.target_tensor`.
The caller will understand this to mean that the result is in
`args.target_tensor`.
If the receiving object is unable to work inline, it can write its
output over `args.available_buffer` and then return
`args.available_buffer`. The caller will understand this to mean
that the result is in `args.available_buffer` (and so what was
`args.available_buffer` will become `args.target_tensor` in the next
call, and vice versa).
The receiving object is also permitted to allocate a new
numpy.ndarray and return that as its result.
"""
def apply_mixture(
val: Any, args: ApplyMixtureArgs, *, default: TDefault = RaiseTypeErrorIfNotProvided
) -> Union[np.ndarray, TDefault]:
"""High performance evolution under a mixture of unitaries evolution.
Follows the steps below to attempt to apply a mixture:
A. Try to use `val._apply_mixture_(args)`.
1. If `_apply_mixture_` is not present or returns NotImplemented
go to step B.
2. If '_apply_mixture_' is present and returns None conclude that
`val` has no effect and return.
3. If '_apply_mixture_' is present and returns a numpy array conclude
that the mixture was applied successfully and forward result to
caller.
B. Construct an ApplyUnitaryArgs object `uargs` from `args` and then
try to use `cirq.apply_unitary(val, uargs, None)`.
1. If `None` is returned then go to step C.
2. If a numpy array is returned forward this result back to the caller
and return.
C. Try to use `val._mixture_()`.
1. If '_mixture_' is not present or returns NotImplemented
go to step D.
2. If '_mixture_' is present and returns None conclude that `val` has
no effect and return.
3. If '_mixture_' returns a list of tuples, loop over the list and
examine each tuple. If the tuple is of the form
`(probability, np.ndarray)` use matrix multiplication to apply it.
If the tuple is of the form `(probability, op)` where op is any op,
attempt to use `cirq.apply_unitary(op, uargs, None)`. If this
operation returns None go to step D. Otherwise return the resulting
state after all of the tuples have been applied.
D. Raise TypeError or return `default`.
Args:
val: The value with a mixture to apply to the target.
args: A mutable `cirq.ApplyMixtureArgs` object describing the target
tensor, available workspace, and left and right axes to operate on.
The attributes of this object will be mutated as part of computing
the result.
default: What should be returned if `val` doesn't have a mixture. If
not specified, a TypeError is raised instead of returning a default
value.
Returns:
If the receiving object is not able to apply a mixture,
the specified default value is returned (or a TypeError is raised). If
this occurs, then `target_tensor` should not have been mutated.
If the receiving object was able to work inline, directly
mutating `target_tensor` it will return `target_tensor`. The caller is
responsible for checking if the result is `target_tensor`.
If the receiving object wrote its output over `out_buffer`, the
result will be `out_buffer`. The caller is responsible for
checking if the result is `out_buffer` (and e.g. swapping
the buffer for the target tensor before the next call).
Note that it is an error for the return object to be either of the
auxiliary buffers, and the method will raise an AssertionError if
this contract is violated.
The receiving object may also write its output over a new buffer
that it created, in which case that new array is returned.
Raises:
TypeError: `val` doesn't have a mixture and `default` wasn't specified.
ValueError: Different left and right shapes of `args.target_tensor`
selected by `left_axes` and `right_axes` or `qid_shape(val)` doesn't
equal the left and right shapes.
AssertionError: `_apply_mixture_` returned an auxiliary buffer.
"""
# Verify that val has the same qid shape as the selected axes of the density
# matrix tensor.
val, args, is_density_matrix = _validate_input(val, args)
# Check if the specialized method is present. (STEP A)
if hasattr(val, '_apply_mixture_'):
result = val._apply_mixture_(args)
if result is not NotImplemented and result is not None:
def err_str(buf_num_str):
return (
"Object of type '{}' returned a result object equal to "
"auxiliary_buffer{}. This type violates the contract "
"that appears in apply_mixture's documentation.".format(type(val), buf_num_str)
)
assert result is not args.auxiliary_buffer0, err_str('0')
assert result is not args.auxiliary_buffer1, err_str('1')
return result
# Possibly use `cirq.apply_unitary`. (STEP B)
result = _apply_unitary_strat(val, args, is_density_matrix)
if result is not None:
return result
# Fallback to using the object's `_mixture_` matrices. (STEP C)
prob_mix = mixture(val, None)
if prob_mix is not None:
return _mixture_strat(prob_mix, args, is_density_matrix)
# Don't know how to apply mixture. Fallback to specified default behavior.
# (STEP D)
if default is not RaiseTypeErrorIfNotProvided:
return default
raise TypeError(
"object of type '{}' has no _apply_mixture_, _apply_unitary_, "
"_unitary_, or _mixture_ methods (or they returned None or "
"NotImplemented).".format(type(val))
)
def _validate_input(val: Any, args: 'ApplyMixtureArgs') -> Tuple[Any, 'ApplyMixtureArgs', bool]:
"""Validate args input and determine if we are operating on a
density matrix or a state vector.
"""
is_density_matrix = False
val_qid_shape = qid_shape_protocol.qid_shape(val, (2,) * len(args.left_axes))
left_shape = tuple(args.target_tensor.shape[i] for i in args.left_axes)
if val_qid_shape != left_shape:
raise ValueError(
'Invalid mixture qid shape is not equal to the '
'selected left and right shape of target_tensor. '
'Got {!r} but expected {!r}.'.format(val_qid_shape, left_shape)
)
if args.right_axes is not None:
is_density_matrix = True
right_shape = tuple(args.target_tensor.shape[i] for i in args.right_axes)
if left_shape != right_shape:
raise ValueError(
'Invalid target_tensor shape or selected axes. '
'The selected left and right shape of '
'target_tensor are not equal. Got {!r} and {!r}.'.format(left_shape, right_shape)
)
return val, args, is_density_matrix
def _apply_unitary_strat(
val: Any, args: 'ApplyMixtureArgs', is_density_matrix: bool
) -> Optional[np.ndarray]:
"""Attempt to use `apply_unitary` and return the result.
If `val` does not support `apply_unitary` returns None.
"""
left_args = ApplyUnitaryArgs(
target_tensor=args.target_tensor,
available_buffer=args.auxiliary_buffer0,
axes=args.left_axes,
)
left_result = apply_unitary(val, left_args, None)
if left_result is None:
return None
if not is_density_matrix:
return left_result
# cast is ok, is_density_matrix being false tells us right_axes isn't None.
right_args = ApplyUnitaryArgs(
target_tensor=np.conjugate(left_result),
available_buffer=args.auxiliary_buffer0,
axes=cast(Tuple[int], args.right_axes),
)
right_result = apply_unitary(val, right_args)
np.conjugate(right_result, out=right_result)
return right_result
def _apply_unitary_from_matrix_strat(
val: np.ndarray, args: 'ApplyMixtureArgs', is_density_matrix: bool
) -> Optional[np.ndarray]:
"""Used to enact mixture tuples that are given as (probability, np.ndarray)
If `val` does not support `apply_unitary` returns None.
"""
qid_shape = tuple(args.target_tensor.shape[i] for i in args.left_axes)
matrix_tensor = np.reshape(val.astype(args.target_tensor.dtype), qid_shape * 2)
linalg.targeted_left_multiply(
matrix_tensor, args.target_tensor, args.left_axes, out=args.auxiliary_buffer0
)
if not is_density_matrix:
return args.auxiliary_buffer0
# No need to transpose as we are acting on the tensor
# representation of matrix, so transpose is done for us.
linalg.targeted_left_multiply(
np.conjugate(matrix_tensor),
args.auxiliary_buffer0,
cast(Tuple[int], args.right_axes),
out=args.target_tensor,
)
return args.target_tensor
def _mixture_strat(
val: Any, args: 'ApplyMixtureArgs', is_density_matrix: bool
) -> Optional[np.ndarray]:
"""Attempt to use unitary matrices in _mixture_ and return the result."""
args.out_buffer[:] = 0
np.copyto(dst=args.auxiliary_buffer1, src=args.target_tensor)
for prob, op in val:
np.copyto(dst=args.target_tensor, src=args.auxiliary_buffer1)
right_result = _apply_unitary_strat(op, args, is_density_matrix)
if right_result is None:
right_result = _apply_unitary_from_matrix_strat(op, args, is_density_matrix)
args.out_buffer += prob * right_result
return args.out_buffer
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django import http
from django.core.urlresolvers import reverse
from glanceclient.common import exceptions as glance_exception
from horizon import api
from horizon import test
from keystoneclient import exceptions as keystone_exceptions
from mox import IgnoreArg, IsA
IMAGES_INDEX_URL = reverse('horizon:nova:images_and_snapshots:index')
class ImageViewTests(test.TestCase):
def test_launch_get(self):
image = self.images.first()
quota_usages = self.quota_usages.first()
self.mox.StubOutWithMock(api, 'image_get')
self.mox.StubOutWithMock(api, 'tenant_quota_usages')
# Two flavor_list calls, however, flavor_list is now memoized.
self.mox.StubOutWithMock(api, 'flavor_list')
self.mox.StubOutWithMock(api, 'flavor_list')
self.mox.StubOutWithMock(api, 'keypair_list')
self.mox.StubOutWithMock(api, 'security_group_list')
api.image_get(IsA(http.HttpRequest), image.id).AndReturn(image)
api.tenant_quota_usages(IsA(http.HttpRequest)).AndReturn(quota_usages)
api.flavor_list(IsA(http.HttpRequest)).AndReturn(self.flavors.list())
api.flavor_list(IsA(http.HttpRequest)).AndReturn(self.flavors.list())
api.keypair_list(IsA(http.HttpRequest)).AndReturn(self.keypairs.list())
api.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
self.mox.ReplayAll()
url = reverse('horizon:nova:images_and_snapshots:images:launch',
args=[image.id])
res = self.client.get(url)
form = res.context['form']
self.assertTemplateUsed(res,
'nova/images_and_snapshots/images/launch.html')
self.assertEqual(res.context['image'].name, image.name)
self.assertIn(self.flavors.first().name,
form.fields['flavor'].choices[0][1])
self.assertEqual(self.keypairs.first().name,
form.fields['keypair'].choices[1][0])
def test_launch_post(self):
flavor = self.flavors.first()
image = self.images.first()
keypair = self.keypairs.first()
server = self.servers.first()
volume = self.volumes.first()
sec_group = self.security_groups.first()
USER_DATA = 'user data'
device_name = u'vda'
volume_choice = "%s:vol" % volume.id
block_device_mapping = {device_name: u"%s::0" % volume_choice}
self.mox.StubOutWithMock(api, 'image_get')
self.mox.StubOutWithMock(api, 'flavor_list')
self.mox.StubOutWithMock(api, 'keypair_list')
self.mox.StubOutWithMock(api, 'security_group_list')
self.mox.StubOutWithMock(api, 'server_create')
self.mox.StubOutWithMock(api, 'volume_list')
api.flavor_list(IsA(http.HttpRequest)).AndReturn(self.flavors.list())
api.keypair_list(IsA(http.HttpRequest)).AndReturn(self.keypairs.list())
api.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
api.image_get(IsA(http.HttpRequest), image.id).AndReturn(image)
api.volume_list(IsA(http.HttpRequest)).AndReturn(self.volumes.list())
api.server_create(IsA(http.HttpRequest),
server.name,
image.id,
flavor.id,
keypair.name,
USER_DATA,
[sec_group.name],
block_device_mapping,
instance_count=IsA(int))
self.mox.ReplayAll()
form_data = {'method': 'LaunchForm',
'flavor': flavor.id,
'image_id': image.id,
'keypair': keypair.name,
'name': server.name,
'user_data': USER_DATA,
'tenant_id': self.tenants.first().id,
'security_groups': sec_group.name,
'volume': volume_choice,
'device_name': device_name,
'count': 1}
url = reverse('horizon:nova:images_and_snapshots:images:launch',
args=[image.id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res,
reverse('horizon:nova:instances_and_volumes:index'))
def test_launch_flavorlist_error(self):
image = self.images.first()
self.mox.StubOutWithMock(api, 'image_get')
self.mox.StubOutWithMock(api, 'tenant_quota_usages')
self.mox.StubOutWithMock(api, 'flavor_list')
self.mox.StubOutWithMock(api, 'flavor_list')
self.mox.StubOutWithMock(api, 'keypair_list')
self.mox.StubOutWithMock(api, 'security_group_list')
api.image_get(IsA(http.HttpRequest),
image.id).AndReturn(image)
api.tenant_quota_usages(IsA(http.HttpRequest)).AndReturn(
self.quota_usages.first())
exc = keystone_exceptions.ClientException('Failed.')
api.flavor_list(IsA(http.HttpRequest)).AndRaise(exc)
api.flavor_list(IsA(http.HttpRequest)).AndRaise(exc)
api.keypair_list(IsA(http.HttpRequest)).AndReturn(self.keypairs.list())
api.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
self.mox.ReplayAll()
url = reverse('horizon:nova:images_and_snapshots:images:launch',
args=[image.id])
res = self.client.get(url)
self.assertTemplateUsed(res,
'nova/images_and_snapshots/images/launch.html')
def test_launch_keypairlist_error(self):
image = self.images.first()
self.mox.StubOutWithMock(api, 'image_get')
self.mox.StubOutWithMock(api, 'tenant_quota_usages')
self.mox.StubOutWithMock(api, 'flavor_list')
self.mox.StubOutWithMock(api, 'flavor_list')
self.mox.StubOutWithMock(api, 'keypair_list')
self.mox.StubOutWithMock(api, 'security_group_list')
api.image_get(IsA(http.HttpRequest), image.id).AndReturn(image)
api.tenant_quota_usages(IsA(http.HttpRequest)).AndReturn(
self.quota_usages.first())
api.flavor_list(IsA(http.HttpRequest)).AndReturn(self.flavors.list())
api.flavor_list(IsA(http.HttpRequest)).AndReturn(self.flavors.list())
exception = keystone_exceptions.ClientException('Failed.')
api.keypair_list(IsA(http.HttpRequest)).AndRaise(exception)
api.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
self.mox.ReplayAll()
url = reverse('horizon:nova:images_and_snapshots:images:launch',
args=[image.id])
res = self.client.get(url)
self.assertTemplateUsed(res,
'nova/images_and_snapshots/images/launch.html')
self.assertEqual(len(res.context['form'].fields['keypair'].choices), 1)
def test_launch_form_keystone_exception(self):
flavor = self.flavors.first()
image = self.images.first()
keypair = self.keypairs.first()
server = self.servers.first()
sec_group = self.security_groups.first()
USER_DATA = 'userData'
self.mox.StubOutWithMock(api, 'image_get')
self.mox.StubOutWithMock(api, 'flavor_list')
self.mox.StubOutWithMock(api, 'keypair_list')
self.mox.StubOutWithMock(api, 'security_group_list')
self.mox.StubOutWithMock(api, 'server_create')
self.mox.StubOutWithMock(api, 'volume_list')
api.flavor_list(IgnoreArg()).AndReturn(self.flavors.list())
api.keypair_list(IgnoreArg()).AndReturn(self.keypairs.list())
api.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
api.image_get(IgnoreArg(), image.id).AndReturn(image)
api.volume_list(IgnoreArg()).AndReturn(self.volumes.list())
exc = keystone_exceptions.ClientException('Failed')
api.server_create(IsA(http.HttpRequest),
server.name,
image.id,
flavor.id,
keypair.name,
USER_DATA,
[sec_group.name],
None,
instance_count=IsA(int)).AndRaise(exc)
self.mox.ReplayAll()
form_data = {'method': 'LaunchForm',
'flavor': flavor.id,
'image_id': image.id,
'keypair': keypair.name,
'name': server.name,
'tenant_id': self.tenant.id,
'user_data': USER_DATA,
'count': 1,
'security_groups': sec_group.name}
url = reverse('horizon:nova:images_and_snapshots:images:launch',
args=[image.id])
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, IMAGES_INDEX_URL)
def test_launch_form_instance_count_error(self):
flavor = self.flavors.first()
image = self.images.first()
keypair = self.keypairs.first()
server = self.servers.first()
volume = self.volumes.first()
sec_group = self.security_groups.first()
USER_DATA = 'user data'
device_name = u'vda'
volume_choice = "%s:vol" % volume.id
self.mox.StubOutWithMock(api, 'image_get')
self.mox.StubOutWithMock(api, 'flavor_list')
self.mox.StubOutWithMock(api, 'flavor_list')
self.mox.StubOutWithMock(api, 'keypair_list')
self.mox.StubOutWithMock(api, 'security_group_list')
self.mox.StubOutWithMock(api, 'volume_list')
self.mox.StubOutWithMock(api, 'volume_snapshot_list')
self.mox.StubOutWithMock(api, 'tenant_quota_usages')
api.flavor_list(IsA(http.HttpRequest)).AndReturn(self.flavors.list())
api.flavor_list(IsA(http.HttpRequest)).AndReturn(self.flavors.list())
api.keypair_list(IsA(http.HttpRequest)).AndReturn(self.keypairs.list())
api.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
api.image_get(IsA(http.HttpRequest), image.id).AndReturn(image)
api.volume_list(IsA(http.HttpRequest)).AndReturn(self.volumes.list())
api.volume_snapshot_list(IsA(http.HttpRequest)).AndReturn([])
api.tenant_quota_usages(IsA(http.HttpRequest)) \
.AndReturn(self.quota_usages.first())
self.mox.ReplayAll()
form_data = {'method': 'LaunchForm',
'flavor': flavor.id,
'image_id': image.id,
'keypair': keypair.name,
'name': server.name,
'user_data': USER_DATA,
'tenant_id': self.tenants.first().id,
'security_groups': sec_group.name,
'volume': volume_choice,
'device_name': device_name,
'count': 0}
url = reverse('horizon:nova:images_and_snapshots:images:launch',
args=[image.id])
res = self.client.post(url, form_data)
self.assertFormErrors(res, count=1)
def test_image_detail_get(self):
image = self.images.first()
self.mox.StubOutWithMock(api.glance, 'image_get')
api.glance.image_get(IsA(http.HttpRequest), str(image.id)) \
.AndReturn(self.images.first())
self.mox.ReplayAll()
res = self.client.get(
reverse('horizon:nova:images_and_snapshots:images:detail',
args=[image.id]))
self.assertTemplateUsed(res,
'nova/images_and_snapshots/images/detail.html')
self.assertEqual(res.context['image'].name, image.name)
def test_image_detail_get_with_exception(self):
image = self.images.first()
self.mox.StubOutWithMock(api.glance, 'image_get')
api.glance.image_get(IsA(http.HttpRequest), str(image.id)) \
.AndRaise(glance_exception.ClientException('Error'))
self.mox.ReplayAll()
url = reverse('horizon:nova:images_and_snapshots:images:detail',
args=[image.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, IMAGES_INDEX_URL)
| |
import json
import uuid
import logging
import jwe
import jwt
import waffle
from rest_framework.authentication import BaseAuthentication
from rest_framework.exceptions import AuthenticationFailed
from api.base.authentication import drf
from api.base import exceptions, settings
from framework import sentry
from framework.auth import get_or_create_user
from osf import features
from osf.models import Institution
from website.mails import send_mail, WELCOME_OSF4I
from website.settings import OSF_SUPPORT_EMAIL, DOMAIN
logger = logging.getLogger(__name__)
# This map defines how to find the secondary institution IdP which uses the shared SSO of a primary
# IdP. Each map entry has the following format.
#
# '<ID of the primary institution A>': {
# 'criteria': 'attribute',
# 'attribute': '<the attribute name for identifying secondary institutions>',
# 'institutions': {
# '<attribute value for identifying institution A1>': '<ID of secondary institution A1>',
# '<attribute value for identifying institution A2>': '<ID of secondary institution A2>',
# ...
# },
# ...
# }
#
# Currently, the only active criteria is "attribute", which the primary institution IdP releases to
# OSF for us to identify the secondary institution. Another option is "emailDomain". For example:
#
# '<ID of the primary institution B>': {
# 'criteria': 'emailDomain',
# 'institutions': {
# '<the email domain for identifying institution B1>': '<ID of secondary institution B1',
# '<the email domain for identifying institution B2>': '<ID of secondary institution B2',
# ...
# }
# ...
# }
#
INSTITUTION_SHARED_SSO_MAP = {
'brown': {
'criteria': 'attribute',
'attribute': 'isMemberOf',
'institutions': {
'thepolicylab': 'thepolicylab',
},
},
}
class InstitutionAuthentication(BaseAuthentication):
"""A dedicated authentication class for view ``InstitutionAuth``.
The ``InstitutionAuth`` view and the ``InstitutionAuthentication`` class are only and should
only be used by OSF CAS for institution login. Changing this class and related tests may break
the institution login feature. Please check with @longzeC / @mattF / @brianG before making any
changes.
"""
media_type = 'text/plain'
def authenticate(self, request):
"""
Handle CAS institution authentication request.
The JWT `data` payload is expected in the following structure:
{
"provider": {
"idp": "",
"id": "",
"user": {
"username": "",
"fullname": "",
"familyName": "",
"givenName": "",
"middleNames": "",
"suffix": "",
}
}
}
:param request: the POST request
:return: user, None if authentication succeed
:raises: AuthenticationFailed if authentication fails
"""
# Verify / decrypt / decode the payload
try:
payload = jwt.decode(
jwe.decrypt(request.body, settings.JWE_SECRET),
settings.JWT_SECRET,
options={'verify_exp': False},
algorithm='HS256',
)
except (jwt.InvalidTokenError, TypeError, jwe.exceptions.MalformedData):
raise AuthenticationFailed
# Load institution and user data
data = json.loads(payload['data'])
provider = data['provider']
institution = Institution.load(provider['id'])
if not institution:
message = 'Institution SSO Error: invalid institution ID [{}]'.format(provider['id'])
logger.error(message)
sentry.log_message(message)
raise AuthenticationFailed(message)
username = provider['user'].get('username')
fullname = provider['user'].get('fullname')
given_name = provider['user'].get('givenName')
family_name = provider['user'].get('familyName')
middle_names = provider['user'].get('middleNames')
suffix = provider['user'].get('suffix')
department = provider['user'].get('department')
# Check secondary institutions which uses the SSO of primary ones
secondary_institution = None
if provider['id'] in INSTITUTION_SHARED_SSO_MAP:
switch_map = INSTITUTION_SHARED_SSO_MAP[provider['id']]
criteria_type = switch_map.get('criteria')
if criteria_type == 'attribute':
attribute_name = switch_map.get('attribute')
attribute_value = provider['user'].get(attribute_name)
if attribute_value:
secondary_institution_id = switch_map.get(
'institutions',
{},
).get(attribute_value)
logger.info('Institution SSO: primary=[{}], secondary=[{}], '
'username=[{}]'.format(provider['id'], secondary_institution_id, username))
secondary_institution = Institution.load(secondary_institution_id)
if not secondary_institution:
# Log errors and inform Sentry but do not raise an exception if OSF fails
# to load the secondary institution from database
message = 'Institution SSO Error: invalid secondary institution [{}]; ' \
'primary=[{}], username=[{}]'.format(attribute_value, provider['id'], username)
logger.error(message)
sentry.log_message(message)
else:
# SSO from primary institution only
logger.info('Institution SSO: primary=[{}], secondary=[None], '
'username=[{}]'.format(provider['id'], username))
else:
message = 'Institution SSO Error: invalid criteria [{}]; ' \
'primary=[{}], username=[{}]'.format(criteria_type, provider['id'], username)
logger.error(message)
sentry.log_message(message)
# Use given name and family name to build full name if it is not provided
if given_name and family_name and not fullname:
fullname = given_name + ' ' + family_name
# Non-empty full name is required. Fail the auth and inform sentry if not provided.
if not fullname:
message = 'Institution SSO Error: missing fullname ' \
'for user [{}] from institution [{}]'.format(username, provider['id'])
logger.error(message)
sentry.log_message(message)
raise AuthenticationFailed(message)
# Get an existing user or create a new one. If a new user is created, the user object is
# confirmed but not registered,which is temporarily of an inactive status. If an existing
# user is found, it is also possible that the user is inactive (e.g. unclaimed, disabled,
# unconfirmed, etc.).
user, created = get_or_create_user(fullname, username, reset_password=False)
# Existing but inactive users need to be either "activated" or failed the auth
activation_required = False
new_password_required = False
if not created:
try:
drf.check_user(user)
logger.info('Institution SSO: active user [{}]'.format(username))
except exceptions.UnclaimedAccountError:
# Unclaimed user (i.e. a user that has been added as an unregistered contributor)
user.unclaimed_records = {}
activation_required = True
# Unclaimed users have an unusable password when being added as an unregistered
# contributor. Thus a random usable password must be assigned during activation.
new_password_required = True
logger.warning('Institution SSO: unclaimed contributor [{}]'.format(username))
except exceptions.UnconfirmedAccountError:
if user.has_usable_password():
# Unconfirmed user from default username / password signup
user.email_verifications = {}
activation_required = True
# Unconfirmed users already have a usable password set by the creator during
# sign-up. However, it must be overwritten by a new random one so the creator
# (if he is not the real person) can not access the account after activation.
new_password_required = True
logger.warning('Institution SSO: unconfirmed user [{}]'.format(username))
else:
# Login take-over has not been implemented for unconfirmed user created via
# external IdP login (ORCiD).
message = 'Institution SSO Error: SSO is not eligible for an unconfirmed account [{}] ' \
'created via IdP login'.format(username)
sentry.log_message(message)
logger.error(message)
return None, None
except exceptions.DeactivatedAccountError:
# Deactivated user: login is not allowed for deactivated users
message = 'Institution SSO Error: SSO is not eligible for a deactivated account: [{}]'.format(username)
sentry.log_message(message)
logger.error(message)
return None, None
except exceptions.MergedAccountError:
# Merged user: this shouldn't happen since merged users do not have an email
message = 'Institution SSO Error: SSO is not eligible for a merged account: [{}]'.format(username)
sentry.log_message(message)
logger.error(message)
return None, None
except exceptions.InvalidAccountError:
# Other invalid status: this shouldn't happen unless the user happens to be in a
# temporary state. Such state requires more updates before the user can be saved
# to the database. (e.g. `get_or_create_user()` creates a temporary-state user.)
message = 'Institution SSO Error: SSO is not eligible for an inactive account [{}] ' \
'with an unknown or invalid status'.format(username)
sentry.log_message(message)
logger.error(message)
return None, None
else:
logger.info('Institution SSO: new user [{}]'.format(username))
# The `department` field is updated each login when it was changed.
user_guid = user.guids.first()._id
if department:
if user.department != department:
user.department = department
user.save()
logger.info('Institution SSO: user w/ dept: user=[{}], email=[{}], inst=[{}], '
'dept=[{}]'.format(user_guid, username, institution._id, department))
else:
logger.info('Institution SSO: user w/o dept: user=[{}], email=[{}], '
'inst=[{}]'.format(user_guid, username, institution._id))
# Both created and activated accounts need to be updated and registered
if created or activation_required:
if given_name:
user.given_name = given_name
if family_name:
user.family_name = family_name
if middle_names:
user.middle_names = middle_names
if suffix:
user.suffix = suffix
# Users claimed or confirmed via institution SSO should have their full name updated
if activation_required:
user.fullname = fullname
user.update_date_last_login()
# Register and save user
password = str(uuid.uuid4()) if new_password_required else None
user.register(username, password=password)
user.save()
# Send confirmation email for all three: created, confirmed and claimed
send_mail(
to_addr=user.username,
mail=WELCOME_OSF4I,
user=user,
domain=DOMAIN,
osf_support_email=OSF_SUPPORT_EMAIL,
storage_flag_is_active=waffle.flag_is_active(request, features.STORAGE_I18N),
)
# Affiliate the user to the primary institution if not previously affiliated
if not user.is_affiliated_with_institution(institution):
user.affiliated_institutions.add(institution)
user.save()
# Affiliate the user to the secondary institution if not previously affiliated
if secondary_institution and not user.is_affiliated_with_institution(secondary_institution):
user.affiliated_institutions.add(secondary_institution)
user.save()
return user, None
| |
# Copyright 2016,2017 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import contextlib
import os.path
import threading
from oslo_concurrency import processutils
from oslo_log import log
import six
import nova.conf
from nova import exception
from nova.i18n import _LE, _LW
from nova import utils
CONF = nova.conf.CONF
LOG = log.getLogger(__name__)
class _HostMountStateManager(object):
"""A global manager of filesystem mounts.
_HostMountStateManager manages a _HostMountState object for the current
compute node. Primarily it creates one on host_up(), destroys it on
host_down(), and returns it via get_state().
_HostMountStateManager manages concurrency itself. Independent callers do
not need to consider interactions between multiple _HostMountStateManager
calls when designing their own locking.
_HostMountStateManager is a singleton, and must only be accessed via:
mount.get_manager()
"""
def __init__(self):
self._reset_state()
def _reset_state(self):
"""Reset state of global _HostMountStateManager.
Should only be called by __init__ and tests.
"""
self.state = None
self.use_count = 0
# Guards both state and use_count
self.cond = threading.Condition()
# Incremented each time we initialise a new mount state. Aids
# debugging.
self.generation = 0
@contextlib.contextmanager
def get_state(self):
"""Return the current mount state.
_HostMountStateManager will not permit a new state object to be
created while any previous state object is still in use.
get_state will raise HypervisorUnavailable if the libvirt connection is
currently down.
:rtype: _HostMountState
"""
# We hold the instance lock here so that if a _HostMountState is
# currently initialising we'll wait for it to complete rather than
# fail.
with self.cond:
state = self.state
if state is None:
raise exception.HypervisorUnavailable(host=CONF.host)
self.use_count += 1
try:
LOG.debug('Got _HostMountState generation %(gen)i',
{'gen': state.generation})
yield state
finally:
with self.cond:
self.use_count -= 1
self.cond.notify_all()
def host_up(self, host):
"""Inialise a new _HostMountState when the libvirt connection comes
up.
host_up will destroy and re-initialise the current state if one
already exists, but this is considered an error.
host_up will block before creating a new state until all operations
using a previous state have completed.
:param host: A connected libvirt Host object
"""
with self.cond:
if self.state is not None:
LOG.warning(_LW("host_up called, but we think host is "
"already up"))
self._host_down()
# Wait until all operations using a previous state generation are
# complete before initialising a new one. Note that self.state is
# already None, set either by initialisation or by host_down. This
# means the current state will not be returned to any new callers,
# and use_count will eventually reach zero.
# We do this to avoid a race between _HostMountState initialisation
# and an on-going mount/unmount operation
while self.use_count != 0:
self.cond.wait()
# Another thread might have initialised state while we were
# wait()ing
if self.state is None:
LOG.debug('Initialising _HostMountState generation %(gen)i',
{'gen': self.generation})
self.state = _HostMountState(host, self.generation)
self.generation += 1
def host_down(self):
"""Destroy the current _HostMountState when the libvirt connection
goes down.
"""
with self.cond:
if self.state is None:
LOG.warning(_LW("host_down called, but we don't think host "
"is up"))
return
self._host_down()
def _host_down(self):
LOG.debug('Destroying MountManager generation %(gen)i',
{'gen': self.state.generation})
self.state = None
class _HostMountState(object):
"""A data structure recording all managed mountpoints and the
attachments in use for each one. _HostMountState ensures that the compute
node only attempts to mount a single mountpoint in use by multiple
attachments once, and that it is not unmounted until it is no longer in use
by any attachments.
Callers should not create a _HostMountState directly, but should obtain
it via:
with mount.get_manager().get_state() as state:
state.mount(...)
On creation _HostMountState inspects the compute host directly to discover
all current mountpoints and the attachments on them. After creation it
expects to have exclusive control of these mountpoints until it is
destroyed.
_HostMountState manages concurrency itself. Independent callers do not need
to consider interactions between multiple _HostMountState calls when
designing their own locking.
"""
class _MountPoint(object):
"""A single mountpoint, and the set of attachments in use on it."""
def __init__(self):
# A guard for operations on this mountpoint
# N.B. Care is required using this lock, as it will be deleted
# if the containing _MountPoint is deleted.
self.lock = threading.Lock()
# The set of attachments on this mountpoint.
self.attachments = set()
def add_attachment(self, vol_name, instance_uuid):
self.attachments.add((vol_name, instance_uuid))
def remove_attachment(self, vol_name, instance_uuid):
self.attachments.remove((vol_name, instance_uuid))
def in_use(self):
return len(self.attachments) > 0
def __init__(self, host, generation):
"""Initialise a _HostMountState by inspecting the current compute
host for mountpoints and the attachments in use on them.
:param host: A connected libvirt Host object
:param generation: An integer indicating the generation of this
_HostMountState object. This is 0 for the first
_HostMountState created, and incremented for each
created subsequently. It is used in log messages to
aid debugging.
"""
self.generation = generation
self.mountpoints = collections.defaultdict(self._MountPoint)
# Iterate over all guests on the connected libvirt
for guest in host.list_guests(only_running=False):
for disk in guest.get_all_disks():
# All remote filesystem volumes are files
if disk.source_type != 'file':
continue
# NOTE(mdbooth): We're assuming that the mountpoint is our
# immediate parent, which is currently true for all
# volume drivers. We deliberately don't do anything clever
# here, because we don't want to, e.g.:
# * Add mountpoints for non-volume disks
# * Get it wrong when a non-running domain references a
# volume which isn't mounted because the host just rebooted.
# and this is good enough. We could probably do better here
# with more thought.
mountpoint = os.path.dirname(disk.source_path)
if not os.path.ismount(mountpoint):
continue
name = os.path.basename(disk.source_path)
mount = self.mountpoints[mountpoint]
mount.add_attachment(name, guest.uuid)
LOG.debug('Discovered volume %(vol)s in use for existing '
'mountpoint %(mountpoint)s',
{'vol': name, 'mountpoint': mountpoint})
@contextlib.contextmanager
def _get_locked(self, mountpoint):
"""Get a locked mountpoint object
:param mountpoint: The path of the mountpoint whose object we should
return.
:rtype: _HostMountState._MountPoint
"""
# This dance is because we delete locks. We need to be sure that the
# lock we hold does not belong to an object which has been deleted.
# We do this by checking that mountpoint still refers to this object
# when we hold the lock. This is safe because:
# * we only delete an object from mountpounts whilst holding its lock
# * mountpoints is a defaultdict which will atomically create a new
# object on access
while True:
mount = self.mountpoints[mountpoint]
with mount.lock:
if self.mountpoints[mountpoint] is mount:
yield mount
break
def mount(self, fstype, export, vol_name, mountpoint, instance, options):
"""Ensure a mountpoint is available for an attachment, mounting it
if necessary.
If this is the first attachment on this mountpoint, we will mount it
with:
mount -t <fstype> <options> <export> <mountpoint>
:param fstype: The filesystem type to be passed to mount command.
:param export: The type-specific identifier of the filesystem to be
mounted. e.g. for nfs 'host.example.com:/mountpoint'.
:param vol_name: The name of the volume on the remote filesystem.
:param mountpoint: The directory where the filesystem will be
mounted on the local compute host.
:param instance: The instance the volume will be attached to.
:param options: An arbitrary list of additional arguments to be
passed to the mount command immediate before export
and mountpoint.
"""
# NOTE(mdbooth): mount() may currently be called multiple times for a
# single attachment. Any operation which calls
# LibvirtDriver._hard_reboot will re-attach volumes which are probably
# already attached, resulting in multiple mount calls.
LOG.debug('_HostMountState.mount(fstype=%(fstype)s, '
'export=%(export)s, vol_name=%(vol_name)s, %(mountpoint)s, '
'options=%(options)s) generation %(gen)s',
{'fstype': fstype, 'export': export, 'vol_name': vol_name,
'mountpoint': mountpoint, 'options': options,
'gen': self.generation})
with self._get_locked(mountpoint) as mount:
if not os.path.ismount(mountpoint):
LOG.debug('Mounting %(mountpoint)s generation %(gen)s',
{'mountpoint': mountpoint, 'gen': self.generation})
utils.execute('mkdir', '-p', mountpoint)
mount_cmd = ['mount', '-t', fstype]
if options is not None:
mount_cmd.extend(options)
mount_cmd.extend([export, mountpoint])
try:
utils.execute(*mount_cmd, run_as_root=True)
except Exception:
# Check to see if mountpoint is mounted despite the error
# eg it was already mounted
if os.path.ismount(mountpoint):
# We're not going to raise the exception because we're
# in the desired state anyway. However, this is still
# unusual so we'll log it.
LOG.exception(_LE('Error mounting %(fstype)s export '
'%(export)s on %(mountpoint)s. '
'Continuing because mountpount is '
'mounted despite this.'),
{'fstype': fstype, 'export': export,
'mountpoint': mountpoint})
else:
# If the mount failed there's no reason for us to keep
# a record of it. It will be created again if the
# caller retries.
# Delete while holding lock
del self.mountpoints[mountpoint]
raise
mount.add_attachment(vol_name, instance.uuid)
LOG.debug('_HostMountState.mount() for %(mountpoint)s '
'generation %(gen)s completed successfully',
{'mountpoint': mountpoint, 'gen': self.generation})
def umount(self, vol_name, mountpoint, instance):
"""Mark an attachment as no longer in use, and unmount its mountpoint
if necessary.
:param vol_name: The name of the volume on the remote filesystem.
:param mountpoint: The directory where the filesystem is be
mounted on the local compute host.
:param instance: The instance the volume was attached to.
"""
LOG.debug('_HostMountState.umount(vol_name=%(vol_name)s, '
'mountpoint=%(mountpoint)s) generation %(gen)s',
{'vol_name': vol_name, 'mountpoint': mountpoint,
'gen': self.generation})
with self._get_locked(mountpoint) as mount:
try:
mount.remove_attachment(vol_name, instance.uuid)
except KeyError:
LOG.warning(_LW("Request to remove attachment "
"(%(vol_name)s, %(instance)s) from "
"%(mountpoint)s, but we don't think it's in "
"use."),
{'vol_name': vol_name, 'instance': instance.uuid,
'mountpoint': mountpoint})
if not mount.in_use():
mounted = os.path.ismount(mountpoint)
if mounted:
mounted = self._real_umount(mountpoint)
# Delete our record entirely if it's unmounted
if not mounted:
del self.mountpoints[mountpoint]
LOG.debug('_HostMountState.umount() for %(mountpoint)s '
'generation %(gen)s completed successfully',
{'mountpoint': mountpoint, 'gen': self.generation})
def _real_umount(self, mountpoint):
# Unmount and delete a mountpoint.
# Return mount state after umount (i.e. True means still mounted)
LOG.debug('Unmounting %(mountpoint)s generation %(gen)s',
{'mountpoint': mountpoint, 'gen': self.generation})
try:
utils.execute('umount', mountpoint, run_as_root=True,
attempts=3, delay_on_retry=True)
except processutils.ProcessExecutionError as ex:
LOG.error(_LE("Couldn't unmount %(mountpoint)s: %(reason)s"),
{'mountpoint': mountpoint, 'reason': six.text_type(ex)})
if not os.path.ismount(mountpoint):
try:
utils.execute('rmdir', mountpoint)
except processutils.ProcessExecutionError as ex:
LOG.error(_LE("Couldn't remove directory %(mountpoint)s: "
"%(reason)s"),
{'mountpoint': mountpoint,
'reason': six.text_type(ex)})
return False
return True
__manager__ = _HostMountStateManager()
def get_manager():
"""Return the _HostMountStateManager singleton.
:rtype: _HostMountStateManager
"""
return __manager__
def mount(fstype, export, vol_name, mountpoint, instance, options=None):
"""A convenience wrapper around _HostMountState.mount(), called via the
_HostMountStateManager singleton.
"""
with __manager__.get_state() as mount_state:
mount_state.mount(fstype, export, vol_name, mountpoint, instance,
options)
def umount(vol_name, mountpoint, instance):
"""A convenience wrapper around _HostMountState.umount(), called via the
_HostMountStateManager singleton.
"""
with __manager__.get_state() as mount_state:
mount_state.umount(vol_name, mountpoint, instance)
| |
from jsonrpc import ServiceProxy
import sys
import string
import getpass
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:8332")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:8332")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Carboncoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Carboncoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = getpass.getpass(prompt="Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = getpass.getpass(prompt="Enter old wallet passphrase: ")
pwd2 = getpass.getpass(prompt="Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
| |
# Copyright (c) 2012-2013 Paul Tagliamonte <paultag@debian.org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import os
import re
import importlib
from datetime import datetime
from firewoes.lib.orm import metadata
from firehose.model import Analysis
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import relationship, backref
from sqlalchemy import (Table, Column, ForeignKey, UniqueConstraint,
Integer, String, DateTime, Boolean)
from debile.master.utils import config
from debile.master.arches import (get_preferred_affinity, get_source_arches)
Base = declarative_base(metadata=metadata)
def _debilize(self):
def getthing(obj, name):
if obj is None:
return None
if "." in name:
local, remote = name.split(".", 1)
foo = getattr(obj, local)
return getthing(foo, remote)
if name == "__str__":
return str(obj)
if name == "__debilize__":
return _debilize(obj)
if name == "__list__":
return [_debilize(x) for x in obj]
return getattr(obj, name)
if self is None:
return None
ret = {}
for attribute, path in self._debile_objs.items():
ret[attribute] = getthing(self, path)
return ret
class Person(Base):
__tablename__ = 'people'
__table_args__ = (UniqueConstraint('email'),)
_debile_objs = {
"id": "id",
"name": "name",
"email": "email",
"pgp": "pgp",
"ssl": "ssl",
}
debilize = _debilize
id = Column(Integer, primary_key=True)
name = Column(String(255), nullable=False)
email = Column(String(255), nullable=False)
pgp = Column(String(40), nullable=True, default=None)
ssl = Column(String(40), nullable=True, default=None)
def __str__(self):
return "%s <%s>" % (self.name, self.email)
def __repr__(self):
return "<Person: %s (%s)>" % (self.email, self.id)
class Builder(Base):
__table_args__ = (UniqueConstraint('name'),)
__tablename__ = 'builders'
_debile_objs = {
"id": "id",
"name": "name",
"last_ping": "last_ping",
"maintainer_name": "maintainer.name",
"maintainer_email": "maintainer.email",
"pgp": "pgp",
"ssl": "ssl",
}
debilize = _debilize
id = Column(Integer, primary_key=True)
name = Column(String(255), nullable=False)
last_ping = Column(DateTime, nullable=False)
maintainer_id = Column(Integer, ForeignKey('people.id', ondelete="RESTRICT"), nullable=False)
maintainer = relationship("Person", foreign_keys=[maintainer_id])
pgp = Column(String(40), nullable=True, default=None)
ssl = Column(String(40), nullable=True, default=None)
def __str__(self):
return self.name
def __repr__(self):
return "<Builder: %s (%s)>" % (self.name, self.id)
class Suite(Base):
__tablename__ = 'suites'
__table_args__ = (UniqueConstraint('name'),)
_debile_objs = {
"id": "id",
"name": "name",
}
debilize = _debilize
id = Column(Integer, primary_key=True)
name = Column(String(255), nullable=False)
def __str__(self):
return self.name
def __repr__(self):
return "<Suite: %s (%s)>" % (self.name, self.id)
class Component(Base):
__tablename__ = 'components'
__table_args__ = (UniqueConstraint('name'),)
_debile_objs = {
"id": "id",
"name": "name",
}
debilize = _debilize
id = Column(Integer, primary_key=True)
name = Column(String(255), nullable=False)
def __str__(self):
return self.name
def __repr__(self):
return "<Component: %s (%s)>" % (self.name, self.id)
class Arch(Base):
__tablename__ = 'arches'
__table_args__ = (UniqueConstraint('name'),)
_debile_objs = {
"id": "id",
"name": "name",
}
debilize = _debilize
id = Column(Integer, primary_key=True)
name = Column(String(255), nullable=False)
def __str__(self):
return self.name
def __repr__(self):
return "<Arch: %s (%s)>" % (self.name, self.id)
class Check(Base):
__tablename__ = 'checks'
__table_args__ = (UniqueConstraint('name'),)
_debile_objs = {
"id": "id",
"name": "name",
"source": "source",
"binary": "binary",
"build": "build",
}
debilize = _debilize
id = Column(Integer, primary_key=True)
name = Column(String(255), nullable=False)
source = Column(Boolean, nullable=False)
binary = Column(Boolean, nullable=False)
build = Column(Boolean, nullable=False)
def __str__(self):
return self.name
def __repr__(self):
return "<Check: %s (%s)>" % (self.name, self.id)
class Group(Base):
__tablename__ = 'groups'
__table_args__ = (UniqueConstraint('name'),)
_debile_objs = {
"id": "id",
"name": "name",
"maintainer_name": "maintainer.name",
"maintainer_email": "maintainer.email",
"repo_path": "repo_path",
"repo_url": "repo_url",
"buildq_path": "buildq_path",
"buildq_url": "buildq_url",
"files_path": "files_path",
"files_url": "files_url",
}
debilize = _debilize
id = Column(Integer, primary_key=True)
name = Column(String(255), nullable=False)
maintainer_id = Column(Integer, ForeignKey('people.id', ondelete="RESTRICT"), nullable=False)
maintainer = relationship("Person", foreign_keys=[maintainer_id])
def get_repo_info(self):
conf = config.get("repo", None)
custom_resolver = conf.get("custom_resolver", None)
if custom_resolver:
module, func = custom_resolver.rsplit(".", 1)
m = importlib.import_module(module)
return getattr(m, func)(self, conf)
entires = ["repo_path", "repo_url", "files_path", "files_url"]
for entry in entires:
if conf.get(entry) is None:
raise ValueError("No configured repo info. Set in master.yaml")
entires.extend(["buildq_path", "buildq_url"])
return {x: conf[x].format(
name=self.name,
id=self.id,
) for x in entires}
@property
def repo_path(self):
return self.get_repo_info()['repo_path']
@property
def repo_url(self):
return self.get_repo_info()['repo_url']
@property
def buildq_path(self):
return self.get_repo_info().get('buildq_path')
@property
def buildq_url(self):
return self.get_repo_info().get('buildq_url')
@property
def files_path(self):
return self.get_repo_info()['files_path']
@property
def files_url(self):
return self.get_repo_info()['files_url']
def __str__(self):
return self.name
def __repr__(self):
return "<Group: %s (%s)>" % (self.name, self.id)
# Many-to-Many relationship
group_suite_component_association = (
Table('group_suite_component_association', Base.metadata,
Column('group_suite_id', Integer, ForeignKey('group_suites.id', ondelete="CASCADE"), nullable=False),
Column('component_id', Integer, ForeignKey('components.id', ondelete="RESTRICT"), nullable=False)))
group_suite_arch_association = (
Table('group_suite_arch_association', Base.metadata,
Column('group_suite_id', Integer, ForeignKey('group_suites.id', ondelete="CASCADE"), nullable=False),
Column('arch_id', Integer, ForeignKey('arches.id', ondelete="RESTRICT"), nullable=False)))
group_suite_check_association = (
Table('group_suite_check_association', Base.metadata,
Column('group_suite_id', Integer, ForeignKey('group_suites.id', ondelete="CASCADE"), nullable=False),
Column('check_id', Integer, ForeignKey('checks.id', ondelete="RESTRICT"), nullable=False)))
class GroupSuite(Base):
__tablename__ = 'group_suites'
_debile_objs = {
"id": "id",
"group": "group.name",
"suite": "suite.name",
}
debilize = _debilize
id = Column(Integer, primary_key=True)
group_id = Column(Integer, ForeignKey('groups.id', ondelete="RESTRICT"), nullable=False)
group = relationship("Group", foreign_keys=[group_id],
backref=backref("group_suites", passive_deletes=True))
suite_id = Column(Integer, ForeignKey('suites.id', ondelete="RESTRICT"), nullable=False)
suite = relationship("Suite", foreign_keys=[suite_id])
components = relationship("Component", secondary=group_suite_component_association)
arches = relationship("Arch", secondary=group_suite_arch_association)
checks = relationship("Check", secondary=group_suite_check_association)
def get_source_checks(self):
return [x for x in self.checks
if x.source == True and x.build == False]
def get_binary_checks(self):
return [x for x in self.checks
if x.binary == True and x.build == False]
def get_build_checks(self):
return [x for x in self.checks if x.build == True]
def __str__(self):
return "%s/%s" % (self.group, self.suite)
def __repr__(self):
return "<GroupSuite: %s/%s (%s)>" % (self.group, self.suite, self.id)
# Many-to-Many relationship
source_arch_association = (
Table('source_arch_association', Base.metadata,
Column('source_id', Integer, ForeignKey('sources.id', ondelete="CASCADE"), nullable=False),
Column('arch_id', Integer, ForeignKey('arches.id', ondelete="RESTRICT"), nullable=False)))
class Source(Base):
__tablename__ = 'sources'
_debile_objs = {
"id": "id",
"name": "name",
"version": "version",
"group": "group.name",
"suite": "suite.name",
"component": "component.name",
"affinity": "affinity.name",
"uploader": "uploader.__debilize__",
"uploaded_at": "uploaded_at",
"directory": "directory",
"dsc_filename": "dsc_filename",
"dsc_path": "dsc_path",
"dsc_url": "dsc_url",
"group_id": "group.id",
"maintainers": "maintainers.__list__",
}
def debilize(self):
obj = _debilize(self)
obj['group_obj'] = _debilize(self.group)
return obj
id = Column(Integer, primary_key=True)
name = Column(String(255), nullable=False)
version = Column(String(255), nullable=False)
group_suite_id = Column(Integer, ForeignKey('group_suites.id', ondelete="RESTRICT"), nullable=False)
group_suite = relationship("GroupSuite", foreign_keys=[group_suite_id])
@hybrid_property
def group(self):
return self.group_suite.group
@hybrid_property
def suite(self):
return self.group_suite.suite
component_id = Column(Integer, ForeignKey('components.id', ondelete="RESTRICT"), nullable=False)
component = relationship("Component", foreign_keys=[component_id])
arches = relationship("Arch", secondary=source_arch_association)
affinity_id = Column(Integer, ForeignKey('arches.id', ondelete="RESTRICT"), nullable=False)
affinity = relationship("Arch", foreign_keys=[affinity_id])
uploader_id = Column(Integer, ForeignKey('people.id', ondelete="RESTRICT"), nullable=False)
uploader = relationship("Person", foreign_keys=[uploader_id])
uploaded_at = Column(DateTime, nullable=False)
directory = Column(String(255), nullable=False)
dsc_filename = Column(String(255), nullable=False)
def _get_dsc_path(self, pool_root):
if not pool_root:
return False
fname = "{root}/{directory}/{filename}".format(
root = pool_root,
directory=self.directory,
filename=self.dsc_filename,
)
if not os.path.exists(fname):
return fname, False
return fname, True
@property
def dsc_path(self):
fname, found = self._get_dsc_path(self.group.buildq_path)
if not found:
fname, _ = self._get_dsc_path(self.group.repo_path)
return fname
@property
def dsc_url(self):
repo_url = None
_, found = self._get_dsc_path(self.group.buildq_path)
if found:
repo_url = self.group.buildq_url
else:
repo_url = self.group.repo_url
return "{root}/{directory}/{filename}".format(
root=repo_url,
directory=self.directory,
filename=self.dsc_filename,
)
def __str__(self):
return "%s (%s)" % (self.name, self.version)
def __repr__(self):
return "<Source: %s/%s (%s)>" % (self.name, self.version, self.id)
class Maintainer(Base):
__tablename__ = 'maintainers'
_debile_objs = {
"id": "id",
"name": "name",
"email": "email",
"comaintainer": "comaintainer",
"original_maintainer": "original_maintainer",
}
debilize = _debilize
id = Column(Integer, primary_key=True)
name = Column(String(255), nullable=False)
email = Column(String(255), nullable=False)
comaintainer = Column(Boolean, nullable=False, default=False)
original_maintainer = Column(Boolean, nullable=False, default=False)
source_id = Column(Integer, ForeignKey('sources.id', ondelete="CASCADE"), nullable=False)
source = relationship("Source", foreign_keys=[source_id],
backref=backref('maintainers', passive_deletes=True,
cascade="save-update, merge, delete"))
def __str__(self):
return "%s <%s>" % (self.name, self.email)
def __repr__(self):
return "<Maintainer: %s (%s)>" % (self.email, self.id)
class Binary(Base):
__tablename__ = 'binaries'
_debile_objs = {
"id": "id",
"name": "name",
"version": "version",
"group": "group.name",
"suite": "suite.name",
"component": "component.name",
"arch": "arch.name",
"builder": "build_job.builder.__debilize__",
"uploaded_at": "uploaded_at",
"group_id": "group.id",
"source_id": "source.id",
"debs": "debs.__list__",
}
def debilize(self):
obj = _debilize(self)
obj['group_obj'] = _debilize(self.group)
obj['source_obj'] = _debilize(self.source)
return obj
id = Column(Integer, primary_key=True)
@hybrid_property
def name(self):
return self.source.name
@hybrid_property
def version(self):
return self.source.version
arch_id = Column(Integer, ForeignKey('arches.id', ondelete="RESTRICT"), nullable=False)
arch = relationship("Arch", foreign_keys=[arch_id])
source_id = Column(Integer, ForeignKey('sources.id', ondelete="CASCADE"), nullable=False)
source = relationship("Source", foreign_keys=[source_id],
backref=backref("binaries", order_by=id, passive_deletes=True,
cascade="save-update, merge, delete"))
build_job_id = Column(Integer, ForeignKey('jobs.id', ondelete="SET NULL",
name='fk_build_job_id', use_alter=True),
nullable=True, default=None)
build_job = relationship("Job", foreign_keys=[build_job_id],
backref=backref("built_binaries", order_by=id, passive_deletes=True))
@hybrid_property
def group_suite(self):
return self.source.group_suite
@hybrid_property
def group(self):
return self.source.group
@hybrid_property
def suite(self):
return self.source.suite
@hybrid_property
def component(self):
return self.source.component
uploaded_at = Column(DateTime, nullable=False)
def __str__(self):
return "%s (%s)" % (self.name, self.version)
def __repr__(self):
return "<Binary: %s/%s (%s)>" % (self.name, self.version, self.id)
class Deb(Base):
__tablename__ = 'debs'
_debile_objs = {
"id": "id",
"directory": "directory",
"filename": "filename",
"path": "path",
"url": "url",
}
debilize = _debilize
id = Column(Integer, primary_key=True)
directory = Column(String(255), nullable=False)
filename = Column(String(255), nullable=False)
binary_id = Column(Integer, ForeignKey('binaries.id', ondelete="CASCADE"), nullable=False)
binary = relationship("Binary", foreign_keys=[binary_id],
backref=backref("debs", passive_deletes=True,
cascade="save-update, merge, delete"))
@hybrid_property
def group_suite(self):
return self.binary.group_suite
@hybrid_property
def group(self):
return self.binary.group
@hybrid_property
def suite(self):
return self.binary.suite
@hybrid_property
def component(self):
return self.binary.component
@hybrid_property
def arch(self):
return self.binary.arch
def _get_deb_path(self, pool_root):
if not pool_root:
return None, False
fname = "{root}/{directory}/{filename}".format(
root = pool_root,
directory=self.directory,
filename=self.filename,
)
if not os.path.exists(fname):
return fname, False
return fname, True
@property
def path(self):
fname, found = self._get_deb_path(self.group.repo_path)
if not found:
fname, _ = self._get_deb_path(self.group.buildq_path)
return fname
@property
def url(self):
repo_url = None
_, found = self._get_deb_path(self.group.buildq_path)
if found:
repo_url = self.group.buildq_url
else:
repo_url = self.group.repo_url
return "{root}/{directory}/{filename}".format(
root=repo_url,
directory=self.directory,
filename=self.filename,
)
# Many-to-Many relationship
job_dependencies = (
Table('job_dependencies', Base.metadata,
Column('blocked_job_id', Integer, ForeignKey('jobs.id', ondelete="CASCADE"), nullable=False),
Column('blocking_job_id', Integer, ForeignKey('jobs.id', ondelete="CASCADE"), nullable=False)))
class Job(Base):
__tablename__ = 'jobs'
_debile_objs = {
"id": "id",
"source": "source.__str__",
"name": "name",
"check": "check.name",
"group": "group.name",
"suite": "suite.name",
"component": "component.name",
"arch": "arch.name",
"builder": "builder.__debilize__",
"assigned_at": "assigned_at",
"finished_at": "finished_at",
"failed": "failed",
"group_id": "group.id",
"source_id": "source.id",
"binary_id": "binary.id",
"do_indep": "do_indep",
}
def debilize(self):
obj = _debilize(self)
obj['group_obj'] = _debilize(self.group)
obj['source_obj'] = _debilize(self.source)
obj['binary_obj'] = _debilize(self.binary)
return obj
id = Column(Integer, primary_key=True)
@hybrid_property
def name(self):
return self.check.name + " [" + self.arch.name + "]"
# This is a hack for Tanglu, so we can use dose for depwait calculations
# instead using the as-of-now unimplemented debile depwait support.
dose_report = Column(String(255), nullable=True, default=None)
check_id = Column(Integer, ForeignKey('checks.id', ondelete="RESTRICT"), nullable=False)
check = relationship("Check", foreign_keys=[check_id])
@hybrid_property
def group_suite(self):
return self.source.group_suite
@hybrid_property
def group(self):
return self.source.group
@hybrid_property
def suite(self):
return self.source.suite
@hybrid_property
def component(self):
return self.source.component
arch_id = Column(Integer, ForeignKey('arches.id', ondelete="RESTRICT"), nullable=False)
arch = relationship("Arch", foreign_keys=[arch_id])
source_id = Column(Integer, ForeignKey('sources.id', ondelete="CASCADE"), nullable=False)
source = relationship("Source", foreign_keys=[source_id],
backref=backref("jobs", order_by=id, passive_deletes=True,
cascade="save-update, merge, delete"))
binary_id = Column(Integer, ForeignKey('binaries.id', ondelete="CASCADE"),
nullable=True, default=None)
binary = relationship("Binary", foreign_keys=[binary_id],
backref=backref("jobs", order_by=id, passive_deletes=True,
cascade="save-update, merge, delete"))
builder_id = Column(Integer, ForeignKey('builders.id', ondelete="RESTRICT"),
nullable=True, default=None)
builder = relationship("Builder", foreign_keys=[builder_id])
# Todo: Rename to something better.
assigned_count = Column(Integer, nullable=False, default=0)
assigned_at = Column(DateTime, nullable=True, default=None)
finished_at = Column(DateTime, nullable=True, default=None)
failed = Column(Boolean, nullable=True, default=None)
depedencies = relationship(
"Job", secondary=job_dependencies, passive_deletes=True,
cascade="save-update, merge, delete",
backref=backref('blocking', passive_deletes=True,
cascade="save-update, merge, delete"),
primaryjoin=(id == job_dependencies.c.blocking_job_id),
secondaryjoin=(id == job_dependencies.c.blocked_job_id),
)
@property
def do_indep(self):
return (self.check.build and
self.arch == self.source.affinity and
not any(x.arch.name == "all" for x in self.source.binaries))
# Called when the .changes for a build job is processed
def new_binary(self, arch=None):
if not self.check.build:
raise ValueError("add_binary() is for build jobs only!")
arch = arch or self.arch
if not arch.name in [self.arch.name, "all"]:
raise ValueError("add_binary() called with invalid arch!")
binary = Binary(build_job=self, source=self.source, arch=arch,
uploaded_at=datetime.utcnow())
for job in self.source.jobs:
if (job.check.binary and job.source == self.source and job.arch == arch):
job.binary = binary
self.dose_report = None
for job in list(self.blocking):
job.depedencies.remove(self)
return binary
# Called when a .dud for any job is processed
def new_result(self, fire, failed):
result = Result(job=self, uploaded_at=datetime.utcnow())
result.firehose = fire
result.failed = failed
self.failed = failed
# Only delete the dependency if the job was sucessfull, and
# not if it is a build job (that is handled by add_binary().
if not result.failed and not self.check.build:
for job in list(self.blocking):
job.depedencies.remove(self)
return result
def __str__(self):
return "%s %s" % (self.source, self.name)
def __repr__(self):
return "<Job: %s %s (%s)>" % (self.source, self.name, self.id)
class Result(Base):
__tablename__ = 'results'
_debile_objs = {
"id": "id",
"job": "job.__str__",
"group": "group.name",
"suite": "suite.name",
"component": "component.name",
"arch": "arch.name",
"uploaded_at": "uploaded_at",
"failed": "failed",
"directory": "directory",
"path": "path",
"url": "url",
"group_id": "group.id",
"source_id": "source.id",
"binary_id": "binary.id",
"job_id": "job.id",
}
def debilize(self):
obj = _debilize(self)
obj['group_obj'] = _debilize(self.group)
obj['source_obj'] = _debilize(self.source)
obj['binary_obj'] = _debilize(self.binary)
obj['job_obj'] = _debilize(self.job)
return obj
id = Column(Integer, primary_key=True)
job_id = Column(Integer, ForeignKey('jobs.id', ondelete="CASCADE"), nullable=False)
job = relationship("Job", foreign_keys=[job_id],
backref=backref("results", order_by=id, passive_deletes=True,
cascade="save-update, merge, delete"))
@hybrid_property
def source(self):
return self.job.source
@hybrid_property
def binary(self):
return self.job.binary
@hybrid_property
def group_suite(self):
return self.job.group_suite
@hybrid_property
def group(self):
return self.job.group
@hybrid_property
def suite(self):
return self.job.suite
@hybrid_property
def component(self):
return self.job.component
@hybrid_property
def arch(self):
return self.job.arch
firehose_id = Column(String, ForeignKey('analysis.id', ondelete="RESTRICT"), nullable=False)
firehose = relationship(Analysis, single_parent=True)
uploaded_at = Column(DateTime, nullable=False)
failed = Column(Boolean, nullable=False)
@property
def directory(self):
return "{source}_{version}/{check}_{arch}/{id}".format(
source=self.source.name,
version=self.source.version,
check=self.job.check.name,
arch=self.job.arch.name,
id=self.id
)
@property
def path(self):
return "{root}/{directory}".format(
root=self.group.files_path,
directory=self.directory,
)
@property
def url(self):
return "{root}/{directory}".format(
root=self.group.files_url,
directory=self.directory,
)
def create_source(dsc, group_suite, component, uploader,
affinity_preference, valid_affinities):
source = Source(
name=dsc['Source'],
version=dsc['Version'],
group_suite=group_suite,
component=component,
uploader=uploader,
uploaded_at=datetime.utcnow()
)
source.arches = get_source_arches(dsc['Architecture'].split(),
group_suite.arches)
# Sources building arch-dependent packages should build any
# arch-independent packages on an architecture it is building
# arch-dependent packages on.
source.affinity = get_preferred_affinity(
affinity_preference,
valid_affinities.split(),
[x for x in source.arches if x.name not in ["source", "all"]] or
[x for x in source.group_suite.arches if x.name not in ["source", "all"]]
)
MAINTAINER = re.compile("(?P<name>.*) \<(?P<email>.*)\>")
source.maintainers.append(Maintainer(
comaintainer=False,
original_maintainer=False,
**MAINTAINER.match(dsc.get('Maintainer')).groupdict()
))
if dsc.get('XSBC-Original-Maintainer', None):
source.maintainers.append(Maintainer(
comaintainer=False,
original_maintainer=True,
**MAINTAINER.match(dsc.get('XSBC-Original-Maintainer')).groupdict()
))
whos = re.findall(r'(?:[^,"]|"(?:\\.|[^"])*")+', dsc.get("Uploaders", ""))
for who in [x.strip() for x in whos if x.strip() != ""]:
source.maintainers.append(Maintainer(
comaintainer=True,
original_maintainer=False,
**MAINTAINER.match(who).groupdict()
))
return source
def create_jobs(source, dose_report=None):
"""
Create jobs for Source `source`, using the an architecture matching
`valid_affinities` for any arch "all" jobs.
"""
arch_source = None
arch_all = None
for arch in source.group_suite.arches:
if arch.name == "source":
arch_source = arch
if arch.name == "all":
arch_all = arch
if not arch_source or not arch_all:
raise ValueError("Missing arch:all or arch:source in the group_suite.")
builds = {}
binaries = {}
for binary in source.binaries:
binaries[binary.arch] = binary
for check in source.group_suite.get_source_checks():
j = Job(check=check, arch=arch_source,
source=source, binary=None)
source.jobs.append(j)
arch_indep = None
if arch_all in source.arches and arch_all not in binaries:
# We need to build arch:all packages
if source.affinity in source.arches and source.affinity not in binaries:
# We can build them together with the arch:affinity packages
arch_indep = source.affinity
else:
# We need to build them separately
arch_indep = arch_all
for check in source.group_suite.get_build_checks():
for arch in source.arches:
if arch == arch_all and arch_indep != arch_all:
continue
if arch not in binaries:
j = Job(check=check, arch=arch,
source=source, binary=None,
dose_report=dose_report)
builds[arch] = j
source.jobs.append(j)
for check in source.group_suite.get_binary_checks():
for arch in source.arches:
deps = []
if arch in builds:
deps.append(builds[arch])
if arch_indep and arch_indep in builds and arch != arch_indep:
deps.append(builds[arch_indep])
binary = binaries.get(arch, None)
j = Job(check=check, arch=arch,
source=source, binary=binary)
source.jobs.append(j)
for dep in deps:
j.depedencies.append(dep)
for job in source.jobs:
# Fake the assigned_count to prioritize build jobs an production suites slightly.
job.assigned_count = ((4 if job.source.suite.name in ["staging", "sid", "experimental"] else 0) +
(8 if not job.check.build else 0))
| |
# Copyright 2015-2017 Capital One Services, LLC
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
"""
Reporting Tools
---------------
Provides reporting tools against cloud-custodian's json output records.
For each policy execution custodian stores structured output
in json format of the records a policy has filtered to
in an s3 bucket.
These represent the records matching the policy filters
that the policy will apply actions to.
The reporting mechanism here simply fetches those records
over a given time interval and constructs a resource type
specific report on them.
CLI Usage
=========
.. code-block:: bash
$ custodian report -s s3://cloud-custodian-xyz/policies \\
-p ec2-tag-compliance-terminate -v > terminated.csv
"""
from concurrent.futures import as_completed
import csv
from datetime import datetime
import gzip
import io
import json
import jmespath
import logging
import os
from tabulate import tabulate
from botocore.compat import OrderedDict
from dateutil.parser import parse as date_parse
from c7n.executor import ThreadPoolExecutor
from c7n.utils import local_session, dumps
log = logging.getLogger('custodian.reports')
def report(policies, start_date, options, output_fh, raw_output_fh=None):
"""Format a policy's extant records into a report."""
regions = {p.options.region for p in policies}
policy_names = {p.name for p in policies}
formatter = Formatter(
policies[0].resource_manager.resource_type,
extra_fields=options.field,
include_default_fields=not options.no_default_fields,
include_region=len(regions) > 1,
include_policy=len(policy_names) > 1
)
records = []
for policy in policies:
# initialize policy execution context for output access
policy.ctx.initialize()
if policy.ctx.output.type == 's3':
policy_records = record_set(
policy.session_factory,
policy.ctx.output.config['netloc'],
policy.ctx.output.config['path'].strip('/'),
start_date)
else:
policy_records = fs_record_set(policy.ctx.log_dir, policy.name)
log.debug("Found %d records for region %s", len(policy_records), policy.options.region)
for record in policy_records:
record['policy'] = policy.name
record['region'] = policy.options.region
records += policy_records
rows = formatter.to_csv(records)
if options.format == 'csv':
writer = csv.writer(output_fh, formatter.headers())
writer.writerow(formatter.headers())
writer.writerows(rows)
elif options.format == 'json':
print(dumps(records, indent=2))
else:
# We special case CSV, and for other formats we pass to tabulate
print(tabulate(rows, formatter.headers(), tablefmt=options.format))
if raw_output_fh is not None:
dumps(records, raw_output_fh, indent=2)
def _get_values(record, field_list, tag_map):
tag_prefix = 'tag:'
list_prefix = 'list:'
count_prefix = 'count:'
vals = []
for field in field_list:
if field.startswith(tag_prefix):
tag_field = field.replace(tag_prefix, '', 1)
value = tag_map.get(tag_field, '')
elif field.startswith(list_prefix):
list_field = field.replace(list_prefix, '', 1)
value = jmespath.search(list_field, record)
if value is None:
value = ''
else:
value = ', '.join([str(v) for v in value])
elif field.startswith(count_prefix):
count_field = field.replace(count_prefix, '', 1)
value = jmespath.search(count_field, record)
if value is None:
value = ''
else:
value = str(len(value))
else:
value = jmespath.search(field, record)
if value is None:
value = ''
if not isinstance(value, str):
value = str(value)
vals.append(value)
return vals
class Formatter:
def __init__(self, resource_type, extra_fields=(), include_default_fields=True,
include_region=False, include_policy=False, fields=()):
# Lookup default fields for resource type.
model = resource_type
self._id_field = model.id
self._date_field = getattr(model, 'date', None)
fields = OrderedDict(fields)
mfields = getattr(model, 'default_report_fields', None)
if not mfields:
mfields = [model.id]
if model.name != model.id:
mfields.append(model.name)
if getattr(model, 'date', None):
mfields.append(model.date)
if include_default_fields:
fields.update(OrderedDict(zip(mfields, mfields)))
for index, field in enumerate(extra_fields):
# TODO this type coercion should be done at cli input, not here
h, cexpr = field.split('=', 1)
fields[h] = cexpr
# Add these at the end so that they are the last fields
if include_default_fields:
if include_region:
fields['Region'] = 'region'
if include_policy:
fields['Policy'] = 'policy'
self.fields = fields
def headers(self):
return self.fields.keys()
def extract_csv(self, record):
tag_map = {t['Key']: t['Value'] for t in record.get('Tags', ())}
return _get_values(record, self.fields.values(), tag_map)
def uniq_by_id(self, records):
"""Only the first record for each id"""
uniq = []
keys = set()
for rec in records:
rec_id = rec[self._id_field]
if rec_id not in keys:
uniq.append(rec)
keys.add(rec_id)
return uniq
def to_csv(self, records, reverse=True, unique=True):
if not records:
return []
# Sort before unique to get the first/latest record
date_sort = ('CustodianDate' in records[0] and 'CustodianDate' or
self._date_field)
if date_sort:
records.sort(
key=lambda r: r[date_sort], reverse=reverse)
if unique:
uniq = self.uniq_by_id(records)
else:
uniq = records
log.debug("Uniqued from %d to %d" % (len(records), len(uniq)))
rows = list(map(self.extract_csv, uniq))
return rows
def fs_record_set(output_path, policy_name):
record_path = os.path.join(output_path, 'resources.json')
if not os.path.exists(record_path):
return []
mdate = datetime.fromtimestamp(
os.stat(record_path).st_ctime)
with open(record_path) as fh:
records = json.load(fh)
[r.__setitem__('CustodianDate', mdate) for r in records]
return records
def record_set(session_factory, bucket, key_prefix, start_date, specify_hour=False):
"""Retrieve all s3 records for the given policy output url
From the given start date.
"""
s3 = local_session(session_factory).client('s3')
records = []
key_count = 0
date = start_date.strftime('%Y/%m/%d')
if specify_hour:
date += "/{}".format(start_date.hour)
else:
date += "/00"
marker = "{}/{}/resources.json.gz".format(key_prefix.strip("/"), date)
p = s3.get_paginator('list_objects_v2').paginate(
Bucket=bucket,
Prefix=key_prefix.strip('/') + '/',
StartAfter=marker,
)
with ThreadPoolExecutor(max_workers=20) as w:
for key_set in p:
if 'Contents' not in key_set:
continue
keys = [k for k in key_set['Contents']
if k['Key'].endswith('resources.json.gz')]
key_count += len(keys)
futures = map(lambda k: w.submit(
get_records, bucket, k, session_factory), keys)
for f in as_completed(futures):
records.extend(f.result())
log.info("Fetched %d records across %d files" % (
len(records), key_count))
return records
def get_records(bucket, key, session_factory):
# we're doing a lot of this in memory, worst case
# though we're talking about a 10k objects, else
# we should spool to temp files
# key ends with 'YYYY/mm/dd/HH/resources.json.gz'
# so take the date parts only
date_str = '-'.join(key['Key'].rsplit('/', 5)[-5:-1])
custodian_date = date_parse(date_str)
s3 = local_session(session_factory).client('s3')
result = s3.get_object(Bucket=bucket, Key=key['Key'])
blob = io.BytesIO(result['Body'].read())
records = json.load(gzip.GzipFile(fileobj=blob))
log.debug("bucket: %s key: %s records: %d",
bucket, key['Key'], len(records))
for r in records:
r['CustodianDate'] = custodian_date
return records
| |
""" MPF plugin which enables the Backbox Control Protocol (BCP) v1.0alpha"""
# bcp.py
# Mission Pinball Framework
# Written by Brian Madden & Gabe Knuth
# Released under the MIT License. (See license info at the end of this file.)
# The Backbox Control Protocol was conceived and developed by:
# Quinn Capen
# Kevin Kelm
# Gabe Knuth
# Brian Madden
# Mike ORourke
# Documentation and more info at http://missionpinball.com/mpf
import logging
import socket
import threading
import sys
import traceback
import urllib
import urlparse
from Queue import Queue
import copy
from mpf.system.player import Player
from mpf.system.utility_functions import Util
from mpf.devices.shot import Shot
from mpf.system.light_controller import ExternalShow
import version
def decode_command_string(bcp_string):
"""Decodes a BCP command string into separate command and paramter parts.
Args:
bcp_string: The incoming UTF-8, URL encoded BCP command string.
Returns:
A tuple of the command string and a dictionary of kwarg pairs.
Example:
Input: trigger?name=hello&foo=Foo%20Bar
Output: ('trigger', {'name': 'hello', 'foo': 'Foo Bar'})
Note that BCP commands and parameter names are not case-sensitive and will
be converted to lowercase. Parameter values are case sensitive, and case
will be preserved.
"""
bcp_command = urlparse.urlsplit(bcp_string)
try:
kwargs = urlparse.parse_qs(bcp_command.query)
except AttributeError:
kwargs = dict()
return (bcp_command.path.lower(),
dict((k.lower(), urllib.unquote(v[0]))
for k,v in kwargs.iteritems()))
def encode_command_string(bcp_command, **kwargs):
"""Encodes a BCP command and kwargs into a valid BCP command string.
Args:
bcp_command: String of the BCP command name.
**kwargs: Optional pair(s) of kwargs which will be appended to the
command.
Returns:
A string.
Example:
Input: encode_command_string('trigger', {'name': 'hello', 'foo': 'Bar'})
Output: trigger?name=hello&foo=Bar
Note that BCP commands and parameter names are not case-sensitive and will
be converted to lowercase. Parameter values are case sensitive, and case
will be preserved.
"""
kwarg_string = ''
try:
for k, v in kwargs.iteritems():
kwarg_string += (urllib.quote(k.lower(), '') + '=' +
urllib.quote(str(v), '') + '&')
kwarg_string = kwarg_string[:-1]
except (TypeError, AttributeError):
pass
return unicode(urlparse.urlunparse((None, None, bcp_command.lower(), None,
kwarg_string, None)), 'utf-8')
class BCP(object):
"""The parent class for the BCP client.
This class can support connections with multiple remote hosts at the same
time using multiple instances of the BCPClientSocket class.
Args:
machine: A reference to the main MPF machine object.
The following BCP commands are currently implemented:
ball_start?player_num=x&ball=x
ball_end
config?volume=0.5
error
get
goodbye
hello?version=xxx&controller_name=xxx&controller_version=xxx
mode_start?name=xxx&priority=xxx
mode_stop?name=xxx
player_added?player_num=x
player_score?value=x&prev_value=x&change=x&player_num=x
player_turn_start?player_num=x
player_variable?name=x&value=x&prev_value=x&change=x&player_num=x
set
shot?name=x
switch?name=x&state=x
timer
trigger?name=xxx
"""
active_connections = 0
def __init__(self, machine):
if ('bcp' not in machine.config or
'connections' not in machine.config['bcp']):
return
self.log = logging.getLogger('BCP')
self.machine = machine
self.config = machine.config['bcp']
self.receive_queue = Queue()
self.bcp_events = dict()
self.connection_config = self.config['connections']
self.bcp_clients = list()
self.bcp_receive_commands = {'error': self.bcp_receive_error,
'switch': self.bcp_receive_switch,
'trigger': self.bcp_receive_trigger,
'get': self.bcp_receive_get,
'set': self.bcp_receive_set,
'reset_complete':
self.bcp_receive_reset_complete,
'external_show_start':
self.external_show_start,
'external_show_stop':
self.external_show_stop,
'external_show_frame':
self.external_show_frame,
}
self.dmd = None
self.filter_player_events = True
self.filter_machine_vars = True
self.filter_shots = True
self.send_player_vars = False
self.send_machine_vars = False
self.mpfmc_trigger_events = set()
self.track_volumes = dict()
self.volume_control_enabled = False
self.external_shows = dict()
self.external_show_queue = Queue()
self.light_controller_connected = False
# Add the following to the set of events that already have mpf mc
# triggers since these are all posted on the mc side already
self.mpfmc_trigger_events.add('timer_tick')
self.mpfmc_trigger_events.add('ball_started')
self.mpfmc_trigger_events.add('ball_ended')
self.mpfmc_trigger_events.add('player_add_success')
try:
if self.machine.config['dmd']['physical']:
self._setup_dmd()
except KeyError:
pass
try:
self.bcp_events = self.config['event_map']
self.process_bcp_events()
except KeyError:
pass
try:
self._setup_track_volumes(self.machine.config['volume'])
except KeyError:
self.log.warning("No 'Volume:' section in config file")
if ('player_variables' in self.config and
self.config['player_variables']):
self.send_player_vars = True
self.config['player_variables'] = (
Util.string_to_list(self.config['player_variables']))
if '__all__' in self.config['player_variables']:
self.filter_player_events = False
self._setup_player_monitor()
if ('machine_variables' in self.config and
self.config['machine_variables']):
self.send_machine_vars = True
self.config['machine_variables'] = (
Util.string_to_list(self.config['machine_variables']))
if '__all__' in self.config['machine_variables']:
self.filter_machine_vars = False
self._setup_machine_var_monitor()
if ('shots' in self.config and
self.config['shots']):
self.config['shots'] = (
Util.string_to_list(self.config['shots']))
if '__all__' in self.config['shots']:
self.filter_shots = False
self._setup_shot_monitor()
self.register_mpfmc_trigger_events(self.machine.config)
try:
self.register_triggers(self.machine.config['triggers'])
except KeyError:
pass
self.machine.events.add_handler('init_phase_2',
self._setup_bcp_connections)
self.machine.events.add_handler('timer_tick', self.get_bcp_messages)
self.machine.events.add_handler('player_add_success',
self.bcp_player_added)
self.machine.events.add_handler('machine_reset_phase_1',
self.bcp_reset)
self.machine.events.add_handler('increase_volume', self.increase_volume)
self.machine.events.add_handler('decrease_volume', self.decrease_volume)
self.machine.events.add_handler('enable_volume_keys',
self.enable_volume_keys)
self.machine.events.add_handler('disable_volume_keys',
self.disable_volume_keys)
self.machine.mode_controller.register_start_method(self.bcp_mode_start, 'mode')
self.machine.mode_controller.register_start_method(self.register_triggers,
'triggers')
self.machine.mode_controller.register_load_method(
self.register_mpfmc_trigger_events)
def __repr__(self):
return '<BCP Module>'
def _setup_dmd(self):
dmd_platform = self.machine.default_platform
if self.machine.physical_hw:
if self.machine.config['hardware']['dmd'] != 'default':
dmd_platform = (self.machine.hardware_platforms
[self.machine.config['hardware']['dmd']])
self.dmd = dmd_platform.configure_dmd()
def _setup_bcp_connections(self):
for name, settings in self.connection_config.iteritems():
if 'host' not in settings:
break
self.bcp_clients.append(BCPClientSocket(self.machine, name,
settings,
self.receive_queue))
# todo should this be here?
self._send_machine_vars()
def _send_machine_vars(self):
for var_name, settings in self.machine.machine_vars.iteritems():
self.send(bcp_command='machine_variable',
name=var_name,
value=settings['value'])
def remove_bcp_connection(self, bcp_client):
"""Removes a BCP connection to a remote BCP host.
Args:
bcp_client: A reference to the BCPClientSocket instance you want to
remove.
"""
try:
self.bcp_clients.remove(self)
except ValueError:
pass
def _setup_player_monitor(self):
Player.monitor_enabled = True
self.machine.register_monitor('player', self._player_var_change)
# Since we have a player monitor setup, we need to add whatever events
# it will send to our ignored list. Otherwise
# register_mpfmc_trigger_events() will register for them too and they'll
# be sent twice
self.mpfmc_trigger_events.add('player_score')
# figure out which player events are being sent already and add them to
# the list so we don't send them again
if self.filter_player_events:
for event in self.config['player_variables']:
self.mpfmc_trigger_events.add('player_' + event.lower())
def _setup_machine_var_monitor(self):
self.machine.machine_var_monitor = True
self.machine.register_monitor('machine_vars', self._machine_var_change)
if self.filter_machine_vars:
for event in self.config['machine_variables']:
self.mpfmc_trigger_events.add('machine_var_' + event.lower())
def _setup_shot_monitor(self):
Shot.monitor_enabled = True
self.machine.register_monitor('shots', self._shot)
def _player_var_change(self, name, value, prev_value, change, player_num):
if name == 'score':
self.send('player_score', value=value, prev_value=prev_value,
change=change, player_num=player_num)
elif self.send_player_vars and (
not self.filter_player_events or
name in self.config['player_variables']):
self.send(bcp_command='player_variable',
name=name,
value=value,
prev_value=prev_value,
change=change,
player_num=player_num)
def _machine_var_change(self, name, value, prev_value, change):
if self.send_machine_vars and (
not self.filter_machine_vars or
name in self.config['machine_variables']):
self.send(bcp_command='machine_variable',
name=name,
value=value,
prev_value=prev_value,
change=change)
def _shot(self, name, profile, state):
if self.filter_shots and name not in self.config['shots']:
return
self.send(bcp_command='shot', name=name, profile=profile, state=state)
def process_bcp_events(self):
"""Processes the BCP Events from the config."""
# config is localized to BCPEvents
for event, settings in self.bcp_events.iteritems():
if 'params' in settings:
self.machine.events.add_handler(event, self._bcp_event_callback,
command=settings['command'],
params=settings['params'])
else:
self.machine.events.add_handler(event, self._bcp_event_callback,
command=settings['command'])
def _bcp_event_callback(self, command, params=None, **kwargs):
if params:
params = copy.deepcopy(params)
for param, value in params.iteritems():
# Are there any text variables to replace on the fly?
# todo should this go here?
if '%' in value:
# first check for player vars (%var_name%)
if self.machine.game and self.machine.game.player:
for name, val in self.machine.game.player:
if '%' + name + '%' in value:
value = value.replace('%' + name + '%',
str(val))
# now check for single % which means event kwargs
for name, val in kwargs.iteritems():
if '%' + name in value:
params[param] = value.replace('%' + name, str(val))
self.send(command, **params)
else:
self.send(command)
def register_mpfmc_trigger_events(self, config, **kwargs):
"""Scans an MPF config file and creates trigger events for the config
settings that need them.
Args:
config: An MPF config dictionary (can be the machine-wide or a mode-
specific one).
**kwargs: Not used. Included to catch any additional kwargs that may
be associted with this method being registered as an event
handler.
"""
self.log.debug("Registering Trigger Events")
try:
for event in config['show_player'].keys():
self.create_trigger_event(event)
except KeyError:
pass
try:
for event in config['slide_player'].keys():
self.create_trigger_event(event)
except KeyError:
pass
try:
for event in config['event_player'].keys():
self.create_trigger_event(event)
except KeyError:
pass
try:
for k, v in config['sound_player'].iteritems():
if 'start_events' in v:
for event in Util.string_to_list(v['start_events']):
self.create_trigger_event(event)
if 'stop_events' in v:
for event in Util.string_to_list(v['stop_events']):
self.create_trigger_event(event)
except KeyError:
pass
def create_trigger_event(self, event):
"""Registers a BCP trigger based on an MPF event.
Args:
event: String name of the event you're registering this trigger for.
The BCP trigger will be registered with the same name as the MPF event.
For example, if you pass the event "foo_event", the BCP command that
will be sent when that event is posted will be trigger?name=foo_event.
"""
if event not in self.mpfmc_trigger_events:
self.machine.events.add_handler(event,
handler=self.send_trigger,
name=event)
self.mpfmc_trigger_events.add(event)
def register_triggers(self, config, priority=0, mode=None):
"""Sets up trigger events based on a 'triggers:' section of a config
dictionary.
Args:
config: A python config dictionary.
priority: (not used) Included since this method is called as part of
a mode start which passed this parameter.
mode: (not used) Included since this method is called as part of
a mode start which passed this parameter.
"""
# config is localized to 'Trigger'
event_list = list()
for event, settings in config.iteritems():
params = dict()
try:
params = copy.deepcopy(settings['params'])
except KeyError:
pass
try:
event_list.append(self.machine.events.add_handler(
event, handler=self.send, bcp_command='trigger',
name=settings['bcp_name'], **params))
except KeyError:
self.log.warning("Could not create trigger event for '%s'. "
"Settings: %s",
event, settings)
return self.machine.events.remove_handlers_by_keys, event_list
def send_trigger(self, name, **kwargs):
# Since player variables are sent automatically, if we get a trigger
# for an event that starts with "player_", we need to only send it here
# if there's *not* a player variable with that name, since if there is
# a player variable then the player variable handler will send it.
if name.startswith('player_'):
try:
if self.machine.game.player.is_player_var(name.lstrip('player_')):
return
except AttributeError:
pass
self.send(bcp_command='trigger', name=name, **kwargs)
def send(self, bcp_command, callback=None, **kwargs):
"""Sends a BCP message.
Args:
bcp_command: String name of the BCP command that will be sent.
callback: An optional callback method that will be called as soon as
the BCP command is sent.
**kwargs: Optional kwarg pairs that will be sent as parameters along
with the BCP command.
Example:
If you call this method like this:
send('trigger', ball=1, string'hello')
The BCP command that will be sent will be this:
trigger?ball=1&string=hello
"""
bcp_string = encode_command_string(bcp_command, **kwargs)
for client in self.bcp_clients:
client.send(bcp_string)
if callback:
callback()
def get_bcp_messages(self):
"""Retrieves and processes new BCP messages from the receiving queue.
"""
while not self.receive_queue.empty():
cmd, kwargs = self.receive_queue.get(False)
self.log.debug("Processing command: %s %s", cmd, kwargs)
# todo convert to try. Haven't done it yet though because I couldn't
# figure out how to make it not swallow exceptions and it was
# getting annoying to troubleshoot
if cmd in self.bcp_receive_commands:
self.bcp_receive_commands[cmd](**kwargs)
else:
self.log.warning("Received invalid BCP command: %s", cmd)
self.send('error', message='invalid command',
command=cmd)
def shutdown(self):
"""Prepares the BCP clients for MPF shutdown."""
for client in self.bcp_clients:
client.stop()
def bcp_receive_error(self, **kwargs):
"""A remote BCP host has sent a BCP error message, indicating that a
command from MPF was not recognized.
This method only posts a warning to the log. It doesn't do anything else
at this point.
"""
self.log.warning('Received Error command from host with parameters: %s',
kwargs)
def bcp_receive_get(self, names, **kwargs):
"""Processes an incoming BCP 'get' command by posting an event
'bcp_get_<name>'. It's up to an event handler to register for that
event and to send the response BCP 'set' command.
"""
for name in Util.string_to_list(names):
self.machine.events.post('bcp_get_{}'.format(name))
def bcp_receive_set(self, **kwargs):
"""Processes an incoming BCP 'set' command by posting an event
'bcp_set_<name>' with a parameter value=<value>. It's up to an event
handler to register for that event and to do something with it.
Note that BCP set commands can contain multiple key/value pairs, and
this method will post one event for each pair.
"""
for k, v in kwargs.iteritems():
self.machine.events.post('bcp_set_{}'.format(k), value=v)
def bcp_receive_reset_complete(self, **kwargs):
self.machine.bcp_reset_complete()
def bcp_mode_start(self, config, priority, mode, **kwargs):
"""Sends BCP 'mode_start' to the connected BCP hosts and schedules
automatic sending of 'mode_stop' when the mode stops.
"""
self.send('mode_start', name=mode.name, priority=priority)
return self.bcp_mode_stop, mode.name
def bcp_mode_stop(self, name, **kwargs):
"""Sends BCP 'mode_stop' to the connected BCP hosts."""
self.send('mode_stop', name=name)
def bcp_reset(self):
"""Sends the 'reset' command to the remote BCP host."""
self.send('reset')
def bcp_receive_switch(self, name, state, **kwargs):
"""Processes an incoming switch state change request from a remote BCP
host.
Args:
name: String name of the switch to set.
state: Integer representing the state this switch will be set to.
1 = active, 0 = inactive, -1 means this switch will be flipped
from whatever its current state is to the opposite state.
"""
state = int(state)
if state == -1:
if self.machine.switch_controller.is_active(name):
state = 0
else:
state = 1
self.machine.switch_controller.process_switch(name=name,
state=state,
logical=True)
def bcp_receive_dmd_frame(self, data):
"""Called when the BCP client receives a new DMD frame from the remote
BCP host. This method forwards the frame to the physical DMD.
"""
self.dmd.update(data)
def bcp_player_added(self, player, num):
"""Sends BCP 'player_added' to the connected BCP hosts."""
self.send('player_added', player_num=num)
def bcp_trigger(self, name, **kwargs):
"""Sends BCP 'trigger' to the connected BCP hosts."""
self.send('trigger', name=name, **kwargs)
def bcp_receive_trigger(self, name=None, **kwargs):
"""Processes an incoming trigger command from a remote BCP host.
"""
if not name:
return
if 'callback' in kwargs:
self.machine.events.post(event=name,
callback=self.bcp_trigger,
name=kwargs.pop('callback'),
**kwargs)
else:
self.machine.events.post(event=name, **kwargs)
def enable_bcp_switch(self, name):
"""Enables sending BCP switch commands when this switch changes state.
Args:
name: string name of the switch
"""
self.machine.switch_controller.add_switch_handler(switch_name=name,
callback=self._switch_sender_callback, state=1, return_info=True)
self.machine.switch_controller.add_switch_handler(switch_name=name,
callback=self._switch_sender_callback, state=0, return_info=True)
def enable_bcp_switches(self, tag):
"""Enables sending BCP switch commands when a switch with a certain tag
changes state.
Args:
tag: string name of the tag for the switches you want to start
sending
"""
for switch in self.machine.switches.items_tagged(tag):
self.enable_bcp_switch(switch.name)
def disable_bcp_switch(self, name):
"""Disables sending BCP switch commands when this switch changes state.
Args:
name: string name of the switch
"""
self.machine.switch_controller.remove_switch_handler(switch_name=name,
callback=self._switch_sender_callback, state=1)
self.machine.switch_controller.remove_switch_handler(switch_name=name,
callback=self._switch_sender_callback, state=0)
def disable_bcp_switches(self, tag):
"""Disables sending BCP switch commands when a switch with a certain tag
changes state.
Args:
tag: string name of the tag for the switches you want to stop
sending
"""
for switch in self.machine.switches.items_tagged(tag):
self.disable_bcp_switch(switch)
def _switch_sender_callback(self, switch_name, state, ms):
self.send('switch', name=switch_name, state=state)
def _setup_track_volumes(self, config):
# config is localized to 'Volume'
for k, v in config['tracks'].iteritems():
self.track_volumes[k] = v
def increase_volume(self, track='master', **kwargs):
"""Sends a command to the remote BCP host to increase the volume of a
track by 1 unit.
Args:
track: The string name of the track you want to increase the volume
on. Default is 'master'.
**kwargs: Ignored. Included in case this method is used as a
callback for an event which has other kwargs.
The max value of the volume for a track is set in the Volume: Steps:
entry in the config file. If this increase causes the volume to go above
the max value, the increase is ignored.
"""
try:
self.track_volumes[track] += 1
self.set_volume(self.track_volumes[track], track)
except KeyError:
self.log.warning('Received volume increase request for unknown '
'track "%s"', track)
def decrease_volume(self, track='master', **kwargs):
"""Sends a command to the remote BCP host to decrease the volume of a
track by 1 unit.
Args:
track: The string name of the track you want to decrease the volume
on. Default is 'master'.
**kwargs: Ignored. Included in case this method is used as a
callback for an event which has other kwargs.
If this decrease causes the volume to go below zero, the decrease is
ignored.
"""
try:
self.track_volumes[track] -= 1
self.set_volume(self.track_volumes[track], track)
except KeyError:
self.log.warning('Received volume decrease request for unknown '
'track "%s"', track)
def enable_volume_keys(self, up_tag='volume_up', down_tag='volume_down'):
"""Enables switch handlers to change the master system volume based on
switch tags.
Args:
up_tag: String of a switch tag name that will be used to set which
switch(es), when activated, increase the volume.
down_tag: String of a switch tag name that will be used to set which
switch(es), when activated, decrease the volume.
"""
if self.volume_control_enabled:
return
for switch in self.machine.switches.items_tagged(up_tag):
self.machine.switch_controller.add_switch_handler(switch.name,
self.increase_volume)
for switch in self.machine.switches.items_tagged(down_tag):
self.machine.switch_controller.add_switch_handler(switch.name,
self.decrease_volume)
self.volume_control_enabled = True
def disable_volume_keys(self, up_tag='volume_up', down_tag='volume_down'):
"""Disables switch handlers so that the switches no longer affect the
master system volume.
Args:
up_tag: String of a switch tag name of the switches that will no
longer be used to increase the volume.
down_tag: String of a switch tag name of the switches that will no
longer be used to decrease the volume.
"""
for switch in self.machine.switches.items_tagged(up_tag):
self.machine.switch_controller.remove_switch_handler(switch.name,
self.increase_volume)
for switch in self.machine.switches.items_tagged(down_tag):
self.machine.switch_controller.remove_switch_handler(switch.name,
self.decrease_volume)
self.volume_control_enabled = False
def set_volume(self, volume, track='master', **kwargs):
"""Sends a command to the remote BCP host to set the volume of a track
to the value specified.
Args:
volume: Int of the volume level. Valid range is 0 to the "steps"
configuration in your config file. Values outside this range are
ignored.
track: The string name of the track you want to set the volume on.
Default is 'master'.
**kwargs: Ignored. Included in case this method is used as a
callback for an event which has other kwargs.
"""
try:
volume = int(volume)
except ValueError:
self.log.warning("Received invalid volume setting: '%s'", volume)
return
try:
if volume > self.machine.config['volume']['steps']:
volume = self.machine.config['volume']['steps']
elif volume < 0:
volume = 0
self.track_volumes[track] = volume
volume_float = round(volume/float(self.machine.config['volume']
['steps']), 2)
kwargs = {'volume_' + track: volume_float}
self.send('config', **kwargs)
except KeyError:
self.log.warning('Received volume for unknown track "%s"', track)
def connect_to_light_controller(self):
# Called by worker thread
self.machine.light_controller.register_tick_handler(
self.update_external_shows)
self.light_controller_connected = True
def disconnect_from_light_controller(self):
# Called by worker thread
self.machine.light_controller.deregister_tick_handler(
self.update_external_shows)
self.light_controller_connected = False
def external_show_start(self, name, priority=0, blend=True, leds=None,
lights=None, flashers=None, gis=None):
# Called by worker thread
if not self.light_controller_connected:
self.connect_to_light_controller()
self.external_shows['name'] = ExternalShow(self.machine,
self.external_show_queue,
name, priority, blend, leds,
lights, flashers, gis)
def external_show_stop(self, name):
# Called by worker thread
try:
self.external_shows[name].stop()
del self.external_shows[name]
except KeyError:
pass
if not self.external_shows:
self.disconnect_from_light_controller()
def external_show_frame(self, name, led_data=None, light_data=None,
flasher_data=None, gi_data=None):
# Called by worker thread
if name not in self.external_shows:
return
if led_data:
self.external_shows[name].update_leds(led_data)
if light_data:
self.external_shows[name].update_lights(light_data)
if flasher_data:
self.external_shows[name].update_gis(flasher_data)
if gi_data:
self.external_shows[name].update_flashers(gi_data)
def update_external_shows(self):
# Called by the main thread
while not self.external_show_queue.empty():
update_meth, args = self.external_show_queue.get(False)
update_meth(*args)
class BCPClientSocket(object):
"""Parent class for a BCP client socket. (There can be multiple of these to
connect to multiple BCP media controllers simultaneously.)
Args:
machine: The main MachineController object.
name: String name this client.
config: A dictionary containing the configuration for this client.
receive_queue: The shared Queue() object that holds incoming BCP
messages.
"""
def __init__(self, machine, name, config, receive_queue):
self.log = logging.getLogger('BCPClientSocket.' + name)
self.log.debug('Setting up BCP Client...')
self.machine = machine
self.name = name
self.receive_queue = receive_queue
self.config = self.machine.config_processor.process_config2(
'bcp:connections', config, 'bcp:connections')
self.sending_queue = Queue()
self.receive_thread = None
self.sending_thread = None
self.socket = None
self.connection_attempts = 0
self.attempt_socket_connection = True
self.send_goodbye = True
self.bcp_commands = {'hello': self.receive_hello,
'goodbye': self.receive_goodbye,
}
self.setup_client_socket()
def setup_client_socket(self):
"""Sets up the client socket."""
self.connection_attempts += 1
if (self.config['connection_attempts'] == -1 or
self.connection_attempts < self.config['connection_attempts']):
self.log.debug("Attempting socket connection. Attempt: %s, Max: %s",
self.connection_attempts,
self.config['connection_attempts'])
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
self.socket.connect((self.config['host'], self.config['port']))
self.log.info("Connected to remote BCP host %s:%s",
self.config['host'], self.config['port'])
BCP.active_connections += 1
self.connection_attempts = 0
except socket.error, v:
self.socket = None
self.log.warning("Failed to connect to remote BCP host %s:%s. "
"Error: %s", self.config['host'],
self.config['port'], v)
if self.config['require_connection']:
self.log.critical("BCP connection 'require_connection' "
"setting is True. Unable to continue.")
self.machine.done = True
if self.create_socket_threads():
self.send_hello()
else:
self.attempt_socket_connection = False
self.log.debug("Max socket connection attempts reached. Giving up")
def create_socket_threads(self):
"""Creates and starts the sending and receiving threads for the BCP
socket.
Returns:
True if the socket exists and the threads were started. False if
not.
"""
if self.socket:
self.receive_thread = threading.Thread(target=self.receive_loop)
self.receive_thread.daemon = True
self.receive_thread.start()
self.sending_thread = threading.Thread(target=self.sending_loop)
self.sending_thread.daemon = True
self.sending_thread.start()
return True
else:
return False
def stop(self):
"""Stops and shuts down the socket client."""
self.log.info("Stopping socket client")
if self.socket:
if self.send_goodbye:
self.send('goodbye')
self.socket.close()
BCP.active_connections -= 1
self.socket = None # Socket threads will exit on this
def send(self, message):
"""Sends a message to the BCP host.
Args:
message: String of the message to send.
"""
if not self.socket and self.attempt_socket_connection:
self.setup_client_socket()
self.sending_queue.put(message)
def receive_loop(self):
"""Receive loop which reads incoming data, assembles commands, and puts
them onto the receive queue.
This method is run as a thread.
"""
socket_bytes = ''
if 'dmd' in self.machine.config:
bytes_per_pixel = 1
try:
if self.machine.config['dmd']['type'] == 'color':
bytes_per_pixel = 3
except KeyError:
pass
dmd_byte_length = (self.machine.config['dmd']['width'] *
self.machine.config['dmd']['height'] *
bytes_per_pixel)
self.log.debug("DMD frame byte length: %s*%s*%s = %s",
self.machine.config['dmd']['width'],
self.machine.config['dmd']['height'],
bytes_per_pixel, dmd_byte_length)
try:
while self.socket:
socket_bytes += self.get_from_socket()
if socket_bytes:
while socket_bytes.startswith('dmd_frame'):
# trim the `dmd_frame?` so we have just the data
socket_bytes = socket_bytes[10:]
while len(socket_bytes) < dmd_byte_length:
# If we don't have the full data, loop until we
# have it.
socket_bytes += self.get_from_socket()
# trim the dmd bytes for the dmd data
dmd_data = socket_bytes[:dmd_byte_length]
# Save the rest. This is +1 over the last step
# since we need to skip the \n separator
socket_bytes = socket_bytes[dmd_byte_length+1:]
self.machine.bcp.dmd.update(dmd_data)
if '\n' in socket_bytes:
message, socket_bytes = socket_bytes.split('\n', 1)
self.log.debug('Received "%s"', message)
cmd, kwargs = decode_command_string(message)
if cmd in self.bcp_commands:
self.bcp_commands[cmd](**kwargs)
else:
self.receive_queue.put((cmd, kwargs))
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value,
exc_traceback)
msg = ''.join(line for line in lines)
self.machine.crash_queue.put(msg)
else:
try:
while self.socket:
socket_bytes += self.get_from_socket()
if socket_bytes and '\n' in socket_bytes:
message, socket_bytes = socket_bytes.split('\n', 1)
self.log.debug('Received "%s"', message)
cmd, kwargs = decode_command_string(message)
if cmd in self.bcp_commands:
self.bcp_commands[cmd](**kwargs)
else:
self.receive_queue.put((cmd, kwargs))
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value,
exc_traceback)
msg = ''.join(line for line in lines)
self.machine.crash_queue.put(msg)
def get_from_socket(self, num_bytes=8192):
"""Reads and returns whatever data is sitting in the receiving socket.
Args:
num_bytes: Int of the max number of bytes to read.
Returns:
The data in raw string format.
"""
try:
socket_bytes = self.socket.recv(num_bytes)
except:
self.socket = None
socket_bytes = None
return socket_bytes
def sending_loop(self):
"""Sending loop which transmits data from the sending queue to the
remote socket.
This method is run as a thread.
"""
try:
while self.socket:
message = self.sending_queue.get()
try:
self.log.debug('Sending "%s"', message)
self.socket.sendall(message + '\n')
except (IOError, AttributeError):
# MPF is probably in the process of shutting down
pass
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
msg = ''.join(line for line in lines)
self.machine.crash_queue.put(msg)
def receive_hello(self, **kwargs):
"""Processes incoming BCP 'hello' command."""
self.log.debug('Received BCP Hello from host with kwargs: %s', kwargs)
def receive_goodbye(self):
"""Processes incoming BCP 'goodbye' command."""
self.send_goodbye = False
self.stop()
self.machine.bcp.remove_bcp_connection(self)
if self.config['require_connection']:
self.machine.bcp.shutdown()
self.machine.done = True
def send_hello(self):
"""Sends BCP 'hello' command."""
self.send(encode_command_string('hello',
version=version.__bcp_version__,
controller_name='Mission Pinball Framework',
controller_version=version.__version__))
def send_goodbye(self):
"""Sends BCP 'goodbye' command."""
self.send('goodbye')
# The MIT License (MIT)
# Copyright (c) 2013-2015 Brian Madden and Gabe Knuth
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
| |
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management import *
from resource_management.libraries.functions import conf_select
import os
import tarfile
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions.security_commons import build_expectations, \
cached_kinit_executor, validate_security_config_properties, get_params_from_filesystem, \
FILE_TYPE_XML
from resource_management.core.resources.system import File, Execute, Directory, Link
import sys
#if OSCheck.is_windows_family():
# from resource_management.libraries.functions.windows_service_utils import check_windows_service_status
import upgrade
from knox import knox, update_knox_logfolder_permissions
from knox_ldap import ldap
from setup_ranger_knox import setup_ranger_knox
class KnoxGateway(Script):
def get_component_name(self):
return "knox-server"
def install(self, env):
self.install_packages(env)
import params
env.set_params(params)
File(format('{knox_conf_dir}/topologies/sandbox.xml'),
action = "delete",
)
def configure(self, env, upgrade_type=None):
import params
env.set_params(params)
knox()
ldap()
#@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
#class KnoxGatewayWindows(KnoxGateway):
# def start(self, env):
# import params
# env.set_params(params)
# self.configure(env)
# # setup_ranger_knox(env)
# Service(params.knox_gateway_win_service_name, action="start")
#
# def stop(self, env):
# import params
# env.set_params(params)
# Service(params.knox_gateway_win_service_name, action="stop")
#
# def status(self, env):
# import status_params
# env.set_params(status_params)
# check_windows_service_status(status_params.knox_gateway_win_service_name)
#
# def startdemoldap(self, env):
# import params
# env.set_params(params)
# self.configureldap(env)
# Service(params.knox_ldap_win_service_name, action="start")
#
# def stopdemoldap(self, env):
# import params
# env.set_params(params)
# Service(params.knox_ldap_win_service_name, action="stop")
#@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
#class KnoxGatewayDefault(KnoxGateway):
# def get_component_name(self):
# return {"HDP": "knox-server"}
def pre_upgrade_restart(self, env, upgrade_type=None):
import params
env.set_params(params)
if params.version and compare_versions(format_stack_version(params.version), '4.0.0.0') >= 0:
absolute_backup_dir = None
if params.upgrade_direction and params.upgrade_direction == Direction.UPGRADE:
Logger.info("Backing up directories. Initial conf folder: %s" % os.path.realpath(params.knox_conf_dir))
# This will backup the contents of the conf directory into /tmp/knox-upgrade-backup/knox-conf-backup.tar
absolute_backup_dir = upgrade.backup_data()
# conf-select will change the symlink to the conf folder.
conf_select.select(params.stack_name, "knox", params.version)
# hdp_select.select("knox-server", params.version)
stack_select.select("knox-server", params.version)
# Extract the tar of the old conf folder into the new conf directory
if absolute_backup_dir is not None and params.upgrade_direction and params.upgrade_direction == Direction.UPGRADE:
conf_tar_source_path = os.path.join(absolute_backup_dir, upgrade.BACKUP_CONF_ARCHIVE)
data_tar_source_path = os.path.join(absolute_backup_dir, upgrade.BACKUP_DATA_ARCHIVE)
if os.path.exists(conf_tar_source_path):
extract_dir = os.path.realpath(params.knox_conf_dir)
conf_tar_dest_path = os.path.join(extract_dir, upgrade.BACKUP_CONF_ARCHIVE)
Logger.info("Copying %s into %s file." % (upgrade.BACKUP_CONF_ARCHIVE, conf_tar_dest_path))
Execute(('cp', conf_tar_source_path, conf_tar_dest_path),
sudo = True,
)
tar_archive.untar_archive(conf_tar_source_path, extract_dir)
File(conf_tar_dest_path,
action = "delete",
)
extract_dir = os.path.realpath(params.knox_data_dir+"-"+params.version+"/security")
if not os.path.exists(extract_dir):
Directory(extract_dir,
owner = params.knox_user,
group = params.knox_group,
create_parents = True
)
data_tar_dest_path = os.path.join(extract_dir, upgrade.BACKUP_DATA_ARCHIVE)
Logger.info("Copying %s into %s file." % (upgrade.BACKUP_DATA_ARCHIVE, data_tar_dest_path + "/security"))
Execute(('cp', data_tar_source_path, data_tar_dest_path ),
sudo = True,
)
tar_archive.untar_archive(data_tar_source_path, extract_dir)
File(data_tar_dest_path,
action = "delete",
)
populate_topology_template = format('{sudo} cp /usr/iop/{version}/etc/knox/conf.dist/topologies/* {knox_conf_dir}/topologies')
Logger.info("Prepare to populate topologies template via command: {0}".format(populate_topology_template))
Execute(populate_topology_template)
def start(self, env, upgrade_type=None):
import params
env.set_params(params)
self.configure(env)
daemon_cmd = format('{knox_bin} start')
populate_topology = format('cd {knox_conf_dir}/topologies/; {sudo} ambari-python-wrap ./generate_template.py ' + params.HAServers + ' ; {sudo} chmod 640 *.xml; {sudo} chown knox:knox *.xml')
no_op_test = format('ls {knox_pid_file} >/dev/null 2>&1 && ps -p `cat {knox_pid_file}` >/dev/null 2>&1')
setup_ranger_knox(upgrade_type=upgrade_type)
# Used to setup symlink, needed to update the knox managed symlink, in case of custom locations
if os.path.islink(params.knox_managed_pid_symlink):
Link(params.knox_managed_pid_symlink,
to = params.knox_pid_dir,
)
if os.path.islink(params.knox_managed_logs_symlink):
Link(params.knox_managed_logs_symlink,
to = params.knox_logs_dir,
)
update_knox_logfolder_permissions()
Execute(populate_topology)
Execute(daemon_cmd,
user=params.knox_user,
environment={'JAVA_HOME': params.java_home},
not_if=no_op_test
)
def stop(self, env, upgrade_type=None):
import params
env.set_params(params)
self.configure(env)
daemon_cmd = format('{knox_bin} stop')
update_knox_logfolder_permissions()
Execute(daemon_cmd,
environment={'JAVA_HOME': params.java_home},
user=params.knox_user,
)
File(params.knox_pid_file,
action="delete",
)
def status(self, env):
import status_params
env.set_params(status_params)
check_process_status(status_params.knox_pid_file)
def configureldap(self, env):
import params
env.set_params(params)
ldap()
def startdemoldap(self, env):
import params
env.set_params(params)
self.configureldap(env)
daemon_cmd = format('{ldap_bin} start')
no_op_test = format('ls {ldap_pid_file} >/dev/null 2>&1 && ps -p `cat {ldap_pid_file}` >/dev/null 2>&1')
Execute(daemon_cmd,
user=params.knox_user,
environment={'JAVA_HOME': params.java_home},
not_if=no_op_test
)
def stopdemoldap(self, env):
import params
env.set_params(params)
self.configureldap(env)
daemon_cmd = format('{ldap_bin} stop')
Execute(daemon_cmd,
environment={'JAVA_HOME': params.java_home},
user=params.knox_user,
)
Execute (format("rm -f {ldap_pid_file}"))
def security_status(self, env):
import status_params
env.set_params(status_params)
if status_params.security_enabled:
expectations = {}
expectations.update(build_expectations(
'krb5JAASLogin',
None,
['keytab', 'principal'],
None
))
expectations.update(build_expectations(
'gateway-site',
{
"gateway.hadoop.kerberos.secured" : "true"
},
None,
None
))
security_params = {
"krb5JAASLogin":
{
'keytab': status_params.knox_keytab_path,
'principal': status_params.knox_principal_name
}
}
security_params.update(get_params_from_filesystem(status_params.knox_conf_dir,
{"gateway-site.xml" : FILE_TYPE_XML}))
result_issues = validate_security_config_properties(security_params, expectations)
if not result_issues: # If all validations passed successfully
try:
# Double check the dict before calling execute
if ( 'krb5JAASLogin' not in security_params
or 'keytab' not in security_params['krb5JAASLogin']
or 'principal' not in security_params['krb5JAASLogin']):
self.put_structured_out({"securityState": "UNSECURED"})
self.put_structured_out({"securityIssuesFound": "Keytab file and principal are not set."})
return
cached_kinit_executor(status_params.kinit_path_local,
status_params.knox_user,
security_params['krb5JAASLogin']['keytab'],
security_params['krb5JAASLogin']['principal'],
status_params.hostname,
status_params.temp_dir)
self.put_structured_out({"securityState": "SECURED_KERBEROS"})
except Exception as e:
self.put_structured_out({"securityState": "ERROR"})
self.put_structured_out({"securityStateErrorInfo": str(e)})
else:
issues = []
for cf in result_issues:
issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
self.put_structured_out({"securityState": "UNSECURED"})
else:
self.put_structured_out({"securityState": "UNSECURED"})
if __name__ == "__main__":
KnoxGateway().execute()
| |
""" These classes are to help deal with ranged data
things associated with those coordinates. """
import sys, re
from collections import namedtuple
class RangeGeneric(object):
"""A generic range object
Use 1-index start and 1-index end
Slicing is permitted and returns a new RangeGeneric but keep in mind
the slicing is 1-index as well for this object
For these basic range classes I won't use an options helper class
because i prefer to keep these as lightweight as possible. they can
have payloads, but I think i'll leave out fancier attributes
"""
def __init__(self,start,end,payload=None):
self.start = start
self.end = end
self.payload = None
self._start_offset = 0
@property
def length(self):
return self.end-self.start+1
def copy(self):
return type(self)(self.start+self._start_offset,self.end,self.payload)
def set_payload(self,inpay):
"""Set the payload. Stored in a list to try to keep it as a reference
:param inpay: payload input
"""
self.payload=inpay
def __iter__(self):
"""Lets try to do it by makinga n iterator"""
for i in range(self.start,self.end+1):
yield i
def __getitem__(self,key):
if key.step:
raise ValueError('a step should not be used when slicing a sequence range')
if isinstance(key,slice):
if key.start > self.end: return None
if key.stop < self.start: return None
return RangeGeneric(max(key.start,self.start),
min(key.stop,self.end),
self.payload,
self.dir)
def __setitem__(self,key):
return
def __delitem__(self,key):
return
def __str__(self):
return str(self.start)+'-'+str(self.end)+' '+str(self.payload)+' '+str(self.dir)
class GenomicRange(RangeGeneric):
"""A basic class for keeping genomic range data. It is 1-indexed for both start and end.
It can carry directional information, but this information is
not required for equality and adjacency etc.
:param chr: chromosome name
:param start: 1-indexed starting base
:param end: 1-indexed ending base
:param options: namedtuple of parameters
:param options.dir: optional direction
:param options.payload: optional payload
:type chr: char
:type start: int
:type end: int
:type options: namedtuple()
:type options.dir: Char
:type options.payload: Object
"""
def __init__(self,chr,start,end,payload=None,dir=None):
super(GenomicRange,self).__init__(start,end,payload=payload)
self.chr = chr
self.dir = dir
def __getitem__(self,key):
if key.step:
raise ValueError('a step should not be used when slicing a sequence range')
if isinstance(key,slice):
if key.start > self.end: return None
if key.stop < self.start: return None
return GenomicRange(self.chr,
max(key.start,self.start),
min(key.stop,self.end),
self.payload,
self.dir)
def __str__(self):
return self.get_range_string()+' '+str(self.payload)+' '+str(self.dir)
def copy(self):
"""Create a new copy of selfe. does not do a deep copy for payload
:return: copied range
:rtype: GenomicRange
"""
return type(self)(self.chr,
self.start+self._start_offset,
self.end,
self.payload,
self.dir)
@property
def range(self):
"""For compatability with some range-based tools that need to call this function
Its necessary to make a new one without options since payload may get used
So if you want to do something similar to copy but specifically returns a GenomicRange object and no options, this is it
:return: this object
:rtype: GenomicRange
"""
return GenomicRange(self.chr,self.start,self.end)
def get_bed_array(self):
"""Return a basic three meber bed array representation of this range
:return: list of [chr,start (0-indexed), end (1-indexed]
:rtype: list
"""
arr = [self.chr,self.start-1,self.end]
if self.dir:
arr.append(self.dir)
return arr
@property
def direction(self):
"""return the direction
:return: the direction or strand +/- (or None if not set)
:rtype: char
"""
return self.dir
def set_direction(self,dir):
""" set he direction
:param dir: direction + or -
:type dir: char
"""
self.dir = dir
def equals(self,gr):
""" check for equality. does not consider direction
:param gr: another genomic range
:type gr: GenomicRange
:return: true if they are the same, false if they are not
:rtype: bool
"""
if self.chr == gr.chr and self.start == gr.start and self.end == gr.end:
return True
return False
def get_range_string(self):
""" get the range string represetation. similar to the default input for UCSC genome browser
:return: representation by string like chr2:801-900
:rtype: string
"""
return self.chr+":"+str(self.start)+"-"+str(self.end)
def get_bed_coordinates(self):
""" Same as get bed array.
These are the 0-indexed start, 1-indexted stop coordinates
:return: bed array [chr,start-1, end]
"""
return [self.chr,self.start-1,self.end]
def get_genomic_coordinates(self):
"""These are the 1-indexed coordiantes in list form
:return: list of coords [chr, start (1-indexed), end(1-indexed)
:rtype: list
"""
return [self.chr,self.start,self.end]
def adjacent(self,rng2):
""" Test for adjacency.
:param rng2:
:param use_direction: false by default
:param type: GenomicRange
:param type: use_direction
"""
if self.chr != rng2.chr: return False
if self.direction != rng2.direction and use_direction: return False
if self.end == rng2.start-1: return True
if self.start-1 == rng2.end: return True
return False
def overlaps(self,in_genomic_range,padding=0):
"""do the ranges overlap?
:param in_genomic_range: range to compare to
:param padding: add to the ends this many (default 0)
:type in_genomic_range: GenomicRange
:type padding: int
:return: True if they overlap
:rtype: bool
"""
if padding > 0:
in_genomic_range = GenomicRange(in_genomic_range.chr,max([1,in_genomic_range.start-padding]),in_genomic_range.end+padding)
if self.chr != in_genomic_range.chr:
return False
if self.end < in_genomic_range.start:
return False
if in_genomic_range.end < self.start:
return False
if self.start > in_genomic_range.end:
return False
if in_genomic_range.start > self.end:
return False
if self.start <= in_genomic_range.start and self.end >= in_genomic_range.start:
return True
if self.start <= in_genomic_range.end and self.end >= in_genomic_range.end:
return True
if self.start >= in_genomic_range.start and self.end <= in_genomic_range.end:
return True
if self.start <= in_genomic_range.start and self.end >= in_genomic_range.end:
return True
if in_genomic_range.start <= self.start and in_genomic_range.end >= self.start:
return True
if in_genomic_range.start <= self.end and in_genomic_range.end >= self.end:
return True
sys.stderr.write("overlaps: unprogrammed error\n")
return False
def overlap_size(self,in_genomic_range):
""" The size of the overlap
:param in_genomic_range: the range to intersect
:type in_genomic_range: GenomicRange
:return: count of overlapping bases
:rtype: int
"""
if self.chr != in_genomic_range.chr:
return 0
if self.end < in_genomic_range.start:
return 0
if in_genomic_range.end < self.start:
return 0
if self.start > in_genomic_range.end:
return 0
if self.start >= in_genomic_range.start and self.end <= in_genomic_range.end:
return self.end-self.start+1
if self.start <= in_genomic_range.start and self.end >= in_genomic_range.end:
return in_genomic_range.end-in_genomic_range.start+1
if self.start <= in_genomic_range.start and self.end >= in_genomic_range.start:
return self.end-in_genomic_range.start+1
if self.start <= in_genomic_range.end and self.end >= in_genomic_range.end:
return in_genomic_range.end-self.start+1
if in_genomic_range.start <= self.start and in_genomic_range.end >= self.start:
return in_genomic_range.end-self.start+1
if in_genomic_range.start <= self.end and in_genomic_range.end >= self.end:
return self.end-in_genomic_range.start+1
sys.stderr.write("overlap_size: unprogrammed error\n")
return 0
def merge(self,range2):
"""merge this bed with another bed to make a longer bed. Returns None if on different chromosomes.
keeps the options of this class (not range2)
:param range2:
:type range2: GenomicRange
:return: bigger range with both
:rtype: GenomicRange
"""
if self.chr != range2.chr:
return None
o = type(self)(self.chr,min(self.start,range2.start)+self._start_offset,max(self.end,range2.end),self.payload,self.dir)
return o
def intersect(self,range2):
"""Return the chunk they overlap as a range.
options is passed to result from this object
:param range2:
:type range2: GenomicRange
:return: Range with the intersecting segement, or None if not overlapping
:rtype: GenomicRange
"""
if not self.overlaps(range2): return None
return type(self)(self.chr,max(self.start,range2.start)+self._start_offset,min(self.end,range2.end),self.payload,self.dir)
def cmp(self,range2,overlap_size=0):
"""the comparitor for ranges
* return 1 if greater than range2
* return -1 if less than range2
* return 0 if overlapped
:param range2:
:param overlap_size: allow some padding for an 'equal' comparison (default 0)
:type range2: GenomicRange
:type overlap_size: int
"""
if self.overlaps(range2,padding=overlap_size): return 0
if self.chr < range2.chr: return -1
elif self.chr > range2.chr: return 1
elif self.end < range2.start: return -1
elif self.start > range2.end: return 1
sys.stderr.write("ERROR: cmp function unexpcted state\n")
sys.exit()
return 0
def subtract(self,range2):
"""Take another range, and list of ranges after removing range2, keep options from self
:param range2:
:type range2: GenomicRange
:return: List of Genomic Ranges
:rtype: GenomicRange[]
"""
outranges = []
if self.chr != range2.chr:
outranges.append(self.copy())
return outranges
if not self.overlaps(range2):
outranges.append(self.copy())
return outranges
if range2.start <= self.start and range2.end >= self.end:
return outranges #delete all
if range2.start > self.start: #left side
nrng = type(self)(self.chr,self.start+self._start_offset,range2.start-1,self.payload,self.dir)
outranges.append(nrng)
if range2.end < self.end: #right side
#ugly addon to make it work for either 0 or 1 index start
nrng = type(self)(self.chr,range2.end+1+self._start_offset,self.end,self.payload,self.dir)
outranges.append(nrng)
return outranges
def equals(self,rng):
if self.chr != rng.chr: return False
if self.start != rng.start: return False
if self.end != rng.end: return False
return True
def distance(self,rng):
"""The distance between two ranges.
:param rng: another range
:type rng: GenomicRange
:returns: bases separting, 0 if overlapped or adjacent, -1 if on different chromsomes
:rtype: int
"""
if self.chr != rng.chr: return -1
c = self.cmp(rng)
if c == 0: return 0
if c < 0:
return rng.start - self.end-1
return self.start - rng.end-1
def GenomicRangeFromString(range_string,payload=None,dir=None):
"""Constructor for a GenomicRange object that takes a string"""
m = re.match('^(.+):(\d+)-(\d+)$',range_string)
if not m:
sys.stderr.write("ERROR bad genomic range string\n"+range_string+"\n")
sys.exit()
chr = m.group(1)
start = int(m.group(2))
end = int(m.group(3))
return GenomicRange(chr,start,end,payload,dir)
class Bed(GenomicRange):
""" Bed format is a chromosome, start (0-index), end (1-index).
It is a child of GenomicRange but modifies the class
to use the 0-based start 1-based end style of a bed file
:param chrom:
:param start: 0-indexed
:param finish: 1-indexed
:param options:
:type chrom: string
:type start: int
:type finish: int
:type options: namedtuple
"""
def __init__(self,chrom,start0,finish,payload=None,dir=None):
super(Bed,self).__init__(chrom,start0+1,finish,payload,dir)
self._start_offset = -1 #override this to indicate on outputs to offset -1 on start
def __str__(self):
return str(self.chr)+"\t"+str(self.start-1)+"\t"+str(self.end)+"\t"+str(self.payload)+"\t"+str(self.dir)
| |
# coding: utf-8
from unittest import skip
from uuid import UUID
import arrow
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.test import TestCase
from django.utils import timezone
from certificate_engine.types import CertificateTypes
from x509_pki.models import Certificate, DistinguishedName
from x509_pki.tests.factories import CertificateFactory, DistinguishedNameFactory, UserFactory
class ModelDistinguishedNameTest(TestCase):
def test_distinguished_name_to_dn(self):
dn = DistinguishedNameFactory(
countryName="NL",
stateOrProvinceName="Noord-Holland",
localityName="Amsterdam",
organizationName="Repleo",
organizationalUnitName="IT Department",
emailAddress="info@repleo.nl",
commonName="test.bounca.org",
subjectAltNames=["demo.bounca.org"],
)
self.assertEqual(
dn.dn,
"CN=test.bounca.org, O=Repleo, OU=IT Department, L=Amsterdam, ST=Noord-Holland, "
"EMAIL=info@repleo.nl, C=NL",
)
self.assertEqual(
dn.subj,
"/CN=test.bounca.org/O=Repleo/OU=IT Department/L=Amsterdam/ST=Noord-Holland"
"/emailAddress=info@repleo.nl/C=NL",
)
self.assertEqual(dn.countryName, "NL")
self.assertEqual(dn.stateOrProvinceName, "Noord-Holland")
self.assertEqual(dn.localityName, "Amsterdam")
self.assertEqual(dn.organizationName, "Repleo")
self.assertEqual(dn.organizationalUnitName, "IT Department")
self.assertEqual(dn.emailAddress, "info@repleo.nl")
self.assertEqual(dn.commonName, "test.bounca.org")
self.assertEqual(dn.subjectAltNames, ["demo.bounca.org"])
self.assertEqual(dn.slug_commonName, "testbouncaorg")
def test_distinguished_name_update_not_allowed(self):
dn = DistinguishedNameFactory(
countryName="NL",
stateOrProvinceName="Noord-Holland",
localityName="Amsterdam",
organizationName="Repleo",
organizationalUnitName="IT Department",
emailAddress="info@repleo.nl",
commonName="test.bounca.org",
subjectAltNames=["demo.bounca.org"],
)
dn = DistinguishedName.objects.get(id=dn.id)
dn.commonName = "www.bounca.org"
with self.assertRaises(ValidationError) as c:
dn.save()
self.assertEqual(c.exception.message, "Not allowed to update a DistinguishedName record")
def test_distinguished_name_validation_in_future_not_allowed(self):
dn = DistinguishedNameFactory(
countryName="NL",
stateOrProvinceName="Noord-Holland",
localityName="Amsterdam",
organizationName="Repleo",
organizationalUnitName="IT Department",
emailAddress="info@repleo.nl",
commonName="test.bounca.org",
subjectAltNames=["demo.bounca.org"],
)
dn = DistinguishedName.objects.get(id=dn.id)
dn.commonName = "www.bounca.org"
with self.assertRaises(ValidationError) as c:
dn.save()
self.assertEqual(c.exception.message, "Not allowed to update a DistinguishedName record")
class ModelCertificateTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.root_dn = DistinguishedNameFactory(
countryName="NL",
stateOrProvinceName="Noord-Holland",
localityName="Amsterdam",
organizationName="Repleo",
organizationalUnitName="IT Department",
emailAddress="info@repleo.nl",
commonName="ca.bounca.org",
subjectAltNames=["demo.bounca.org"],
)
cls.user = UserFactory()
cls.ca = Certificate()
cls.ca.type = CertificateTypes.ROOT
cls.ca.name = "repleo root ca"
cls.ca.dn = cls.root_dn
cls.ca.expires_at = arrow.get(timezone.now()).shift(years=+10).date()
cls.ca.revoked_at = None
cls.ca.owner = cls.user
cls.ca.save()
cls.ca.refresh_from_db()
cls.ca = Certificate.objects.get(pk=cls.ca.pk)
cls.int_dn = DistinguishedNameFactory(
countryName="NL",
stateOrProvinceName="Noord-Holland",
localityName="Amsterdam",
organizationName="Repleo",
organizationalUnitName="IT Department",
emailAddress="info@repleo.nl",
commonName="int.bounca.org",
subjectAltNames=["demo.bounca.org"],
)
cls.int = Certificate(parent=cls.ca)
cls.int.type = CertificateTypes.INTERMEDIATE
cls.int.name = "repleo int ca"
cls.int.dn = cls.int_dn
cls.int.crl_distribution_url = "https://ca.demo.repleo.nl/crl/test.crl.pem"
cls.int.ocsp_distribution_host = "https://ca.demo.repleo.nl/ocsp"
cls.int.expires_at = arrow.get(timezone.now()).shift(years=+5).date()
cls.int.revoked_at = None
cls.int.owner = cls.user
cls.int.save()
cls.int.refresh_from_db()
def test_generate_root_certificate(self):
dn = DistinguishedNameFactory(
countryName="NL",
stateOrProvinceName="Noord-Holland",
localityName="Amsterdam",
organizationName="Repleo",
organizationalUnitName="IT Department",
emailAddress="info@repleo.nl",
commonName="test bounca org",
subjectAltNames=["demo.bounca.org"],
)
cert = Certificate()
cert.type = CertificateTypes.ROOT
cert.name = "repleo root ca1"
cert.dn = dn
cert.expires_at = arrow.get(timezone.now()).shift(years=+10).date()
cert.revoked_at = None
cert.owner = self.user
cert.save()
cert.refresh_from_db()
self.assertEqual(
cert.dn.dn,
"CN=test bounca org, O=Repleo, OU=IT Department, "
"L=Amsterdam, ST=Noord-Holland, EMAIL=info@repleo.nl, C=NL",
)
self.assertEqual(cert.type, CertificateTypes.ROOT)
self.assertEqual(cert.name, "repleo root ca1")
self.assertEqual(cert.created_at, arrow.get(cert.expires_at).shift(years=-10).date())
self.assertEqual(cert.expires_at, arrow.get(cert.created_at).shift(years=+10).date())
self.assertIsNone(cert.revoked_at)
self.assertEqual(cert.owner, self.user)
self.assertEqual(cert.revoked_uuid, UUID(int=0))
self.assertNotEqual(cert.serial, 0)
self.assertIsNone(cert.slug_revoked_at)
self.assertFalse(cert.revoked)
self.assertFalse(cert.expired)
self.assertEqual(cert.slug_name, "repleo-root-ca1")
def test_generate_intermediate_certificate(self):
dn = DistinguishedNameFactory(
countryName="NL",
stateOrProvinceName="Noord-Holland",
localityName="Amsterdam",
organizationName="Repleo",
organizationalUnitName="IT Department",
emailAddress="info@repleo.nl",
commonName="test.bounca.org",
subjectAltNames=["demo.bounca.org"],
)
cert = Certificate(parent=self.ca)
cert.type = CertificateTypes.INTERMEDIATE
cert.name = "repleo int ca1"
cert.dn = dn
cert.crl_distribution_url = "https://ca.demo.repleo.nl/crl/test.crl.pem"
cert.ocsp_distribution_host = "https://ca.demo.repleo.nl/ocsp"
cert.expires_at = arrow.get(timezone.now()).shift(years=+5).date()
cert.revoked_at = None
cert.owner = self.user
cert.save()
cert.refresh_from_db()
self.assertEqual(
cert.dn.dn,
"CN=test.bounca.org, O=Repleo, OU=IT Department, "
"L=Amsterdam, ST=Noord-Holland, EMAIL=info@repleo.nl, C=NL",
)
self.assertEqual(cert.type, CertificateTypes.INTERMEDIATE)
self.assertEqual(cert.name, "repleo int ca1")
self.assertEqual(cert.crl_distribution_url, "https://ca.demo.repleo.nl/crl/test.crl.pem")
self.assertEqual(cert.ocsp_distribution_host, "https://ca.demo.repleo.nl/ocsp")
self.assertEqual(cert.created_at, arrow.get(cert.expires_at).shift(years=-5).date())
self.assertEqual(cert.expires_at, arrow.get(cert.created_at).shift(years=+5).date())
self.assertIsNone(cert.revoked_at)
self.assertEqual(cert.owner, self.user)
self.assertEqual(cert.revoked_uuid, UUID(int=0))
self.assertNotEqual(cert.serial, 0)
self.assertIsNone(cert.slug_revoked_at)
self.assertFalse(cert.revoked)
self.assertFalse(cert.expired)
self.assertIsNotNone(cert.crlstore.crl)
def test_generate_server_certificate(self):
dn = DistinguishedNameFactory(
countryName="NL",
stateOrProvinceName="Noord-Holland",
localityName="Amsterdam",
organizationName="Repleo",
organizationalUnitName="IT Department",
emailAddress="info@repleo.nl",
commonName="www.repleo.nl",
subjectAltNames=["repleo.nl"],
)
cert = Certificate(parent=self.int, dn=dn)
cert.type = CertificateTypes.SERVER_CERT
cert.name = "www.repleo.nl"
cert.dn = dn
cert.expires_at = arrow.get(timezone.now()).shift(years=+1).date()
cert.revoked_at = None
cert.owner = self.user
cert.save()
cert.refresh_from_db()
self.assertEqual(
cert.dn.dn,
"CN=www.repleo.nl, O=Repleo, OU=IT Department, "
"L=Amsterdam, ST=Noord-Holland, EMAIL=info@repleo.nl, C=NL",
)
self.assertEqual(cert.type, CertificateTypes.SERVER_CERT)
self.assertEqual(cert.name, "www.repleo.nl")
self.assertEqual(cert.created_at, arrow.get(cert.expires_at).shift(years=-1).date())
self.assertEqual(cert.expires_at, arrow.get(cert.created_at).shift(years=+1).date())
self.assertIsNone(cert.revoked_at)
self.assertEqual(cert.owner, self.user)
self.assertEqual(cert.revoked_uuid, UUID(int=0))
self.assertNotEqual(cert.serial, 0)
self.assertIsNone(cert.slug_revoked_at)
self.assertFalse(cert.revoked)
self.assertFalse(cert.expired)
with self.assertRaises(ObjectDoesNotExist) as c:
cert.crlstore
self.assertEqual(str(c.exception), "Certificate has no crlstore.")
cert.delete()
cert.refresh_from_db()
self.assertIsNotNone(cert.revoked_at)
self.assertIsNotNone(cert.slug_revoked_at)
self.assertNotEqual(cert.revoked_uuid, UUID(int=0))
def test_generate_client_certificate(self):
dn = DistinguishedNameFactory(
countryName="NL",
stateOrProvinceName="Noord-Holland",
localityName="Amsterdam",
organizationName="Repleo",
organizationalUnitName="IT Department",
emailAddress="info@repleo.nl",
commonName="info@bounca.org",
)
cert = Certificate(parent=self.int, dn=dn)
cert.type = CertificateTypes.CLIENT_CERT
cert.name = "info@bounca.org"
cert.dn = dn
cert.expires_at = arrow.get(timezone.now()).shift(years=+1).date()
cert.revoked_at = None
cert.owner = self.user
cert.save()
cert.refresh_from_db()
self.assertEqual(
cert.dn.dn,
"CN=info@bounca.org, O=Repleo, OU=IT Department, "
"L=Amsterdam, ST=Noord-Holland, EMAIL=info@repleo.nl, C=NL",
)
self.assertEqual(cert.type, CertificateTypes.CLIENT_CERT)
self.assertEqual(cert.name, "info@bounca.org")
self.assertEqual(cert.created_at, arrow.get(cert.expires_at).shift(years=-1).date())
self.assertEqual(cert.expires_at, arrow.get(cert.created_at).shift(years=+1).date())
self.assertIsNone(cert.revoked_at)
self.assertEqual(cert.owner, self.user)
self.assertEqual(cert.revoked_uuid, UUID(int=0))
self.assertNotEqual(cert.serial, 0)
self.assertIsNone(cert.slug_revoked_at)
self.assertFalse(cert.revoked)
self.assertFalse(cert.expired)
with self.assertRaises(ObjectDoesNotExist) as c:
cert.crlstore
self.assertEqual(str(c.exception), "Certificate has no crlstore.")
cert.delete()
cert.refresh_from_db()
self.assertIsNotNone(cert.revoked_at)
self.assertIsNotNone(cert.slug_revoked_at)
self.assertNotEqual(cert.revoked_uuid, UUID(int=0))
@skip("TODO check if values are valid")
def test_generate_ocsp_certificate(self):
dn = DistinguishedNameFactory(
countryName="NL",
stateOrProvinceName="Noord-Holland",
localityName="Amsterdam",
organizationName="Repleo",
organizationalUnitName="IT Department",
emailAddress="info@repleo.nl",
commonName="ca.demo.repleo.nl",
)
cert = Certificate(parent=self.int, dn=dn)
cert.type = CertificateTypes.OCSP
cert.name = "ca.demo.repleo.nl"
cert.dn = dn
cert.expires_at = arrow.get(timezone.now()).shift(years=+1).date()
cert.revoked_at = None
cert.owner = self.user
cert.save()
cert.refresh_from_db()
self.assertEqual(
cert.dn.dn,
"CN=https://ca.demo.repleo.nl/ocsp, O=Repleo, OU=IT Department, "
"L=Amsterdam, ST=Noord-Holland, EMAIL=info@repleo.nl, C=NL",
)
self.assertEqual(cert.type, CertificateTypes.OCSP)
self.assertEqual(cert.name, "https://ca.demo.repleo.nl/ocsp")
self.assertEqual(cert.created_at, arrow.get(cert.expires_at).shift(years=-1).date())
self.assertEqual(cert.expires_at, arrow.get(cert.created_at).shift(years=+1).date())
self.assertIsNone(cert.revoked_at)
self.assertEqual(cert.owner, self.user)
self.assertEqual(cert.revoked_uuid, UUID(int=0))
self.assertNotEqual(cert.serial, 0)
self.assertIsNone(cert.slug_revoked_at)
self.assertFalse(cert.revoked)
self.assertFalse(cert.expired)
with self.assertRaises(ValidationError) as c:
cert.generate_crl()
self.assertEqual(c.exception.message, "CRL File can only be generated for Intermediate Certificates")
def test_days_valid(self):
dn_ca = DistinguishedNameFactory(
countryName="NL",
stateOrProvinceName="Noord-Holland",
localityName="Amsterdam",
organizationName="Repleo",
organizationalUnitName="IT Department",
emailAddress="info@repleo.nl",
commonName="test.bounca.org",
subjectAltNames=["demo.bounca.org"],
)
cert = CertificateFactory(dn=dn_ca, type=CertificateTypes.ROOT)
cert.expires_at = arrow.get(timezone.now()).shift(years=+10).date()
self.assertEqual(cert.days_valid, 3652)
cert.save()
cert.refresh_from_db()
self.assertEqual(cert.days_valid, 3652)
def test_set_name_to_common_name(self):
dn_ca = DistinguishedNameFactory(
countryName="NL",
stateOrProvinceName="Noord-Holland",
localityName="Amsterdam",
organizationName="Repleo",
organizationalUnitName="IT Department",
emailAddress="info@repleo.nl",
commonName="test.bounca.org",
subjectAltNames=["demo.bounca.org"],
)
cert = CertificateFactory(name="", dn=dn_ca, type=CertificateTypes.ROOT)
cert.save()
cert.refresh_from_db()
self.assertEqual(cert.name, cert.dn.commonName)
self.assertEqual(cert.slug_name, "testbouncaorg")
def test_generate_root_certificate_unique_violate_name(self):
cert = CertificateFactory()
cert.type = CertificateTypes.ROOT
cert.name = "repleo root ca 1"
cert.save()
cert = CertificateFactory()
cert.type = CertificateTypes.ROOT
cert.name = "repleo root ca 1"
with self.assertRaises(ValidationError):
cert.save()
def test_generate_root_certificate_unique_violate_dn(self):
dn_ca = DistinguishedNameFactory(
countryName="NL",
stateOrProvinceName="Noord-Holland",
localityName="Amsterdam",
organizationName="Repleo",
organizationalUnitName="IT Department",
emailAddress="info@repleo.nl",
commonName="test.bounca.org",
subjectAltNames=["demo.bounca.org"],
)
cert = CertificateFactory()
cert.dn = dn_ca
cert.type = CertificateTypes.ROOT
cert.name = "repleo root ca 1"
cert.save()
cert = CertificateFactory()
cert.dn = dn_ca
cert.type = CertificateTypes.ROOT
cert.name = "repleo root ca 2"
with self.assertRaises(ValidationError):
cert.save()
def test_parent_not_allowed_for_root_certificate(self):
ca = CertificateFactory(type=CertificateTypes.ROOT)
ca.save()
cert = CertificateFactory(type=CertificateTypes.ROOT, parent=ca)
cert.type = CertificateTypes.ROOT
cert.name = "repleo root ca 1"
with self.assertRaises(ValidationError) as c:
cert.save()
self.assertEqual(c.exception.message, "Not allowed to have a parent certificate for a Root CA certificate")
def test_parent_intermediate_has_no_root_parent(self):
cert = CertificateFactory(type=CertificateTypes.INTERMEDIATE)
with self.assertRaises(ValidationError) as c:
cert.save()
self.assertEqual(c.exception.message, "Non Root certificate should have a parent")
def test_client_cert_parent_no_intermediate_parent(self):
ca = CertificateFactory(type=CertificateTypes.ROOT)
ca.save()
cert = CertificateFactory(
type=CertificateTypes.CLIENT_CERT, parent=ca, crl_distribution_url=None, ocsp_distribution_host=None
)
with self.assertRaises(ValidationError) as c:
cert.save()
self.assertEqual(c.exception.message, "Client certificate can only be generated for intermediate CA parent")
def test_server_cert_parent_no_intermediate_parent(self):
ca = CertificateFactory(type=CertificateTypes.ROOT)
ca.save()
cert = CertificateFactory(
type=CertificateTypes.SERVER_CERT, parent=ca, crl_distribution_url=None, ocsp_distribution_host=None
)
with self.assertRaises(ValidationError) as c:
cert.save()
self.assertEqual(c.exception.message, "Server certificate can only be generated for intermediate CA parent")
def test_ocsp_cert_parent_no_intermediate_parent(self):
ca = CertificateFactory(type=CertificateTypes.ROOT)
ca.save()
cert = CertificateFactory(
type=CertificateTypes.OCSP, parent=ca, crl_distribution_url=None, ocsp_distribution_host=None
)
with self.assertRaises(ValidationError) as c:
cert.save()
self.assertEqual(c.exception.message, "OCSP certificate can only be generated for intermediate CA parent")
def test_ocsp_cert_parent_is_not_intermediate_parent(self):
ca = CertificateFactory(type=CertificateTypes.ROOT)
ca.save()
cert = CertificateFactory(
type=CertificateTypes.OCSP, parent=ca, crl_distribution_url=None, ocsp_distribution_host=None
)
with self.assertRaises(ValidationError) as c:
cert.save()
self.assertEqual(c.exception.message, "OCSP certificate can only be generated for intermediate CA parent")
def test_intermediate_dn_country_difference(self):
dn_ca = DistinguishedNameFactory(
countryName="NL",
stateOrProvinceName="Noord-Holland",
localityName="Amsterdam",
organizationName="Repleo",
organizationalUnitName="IT Department",
emailAddress="info@repleo.nl",
commonName="test.bounca.org",
subjectAltNames=["demo.bounca.org"],
)
dn_im = DistinguishedNameFactory(
countryName="IT",
stateOrProvinceName="Noord-Holland",
localityName="Amsterdam",
organizationName="Repleo",
organizationalUnitName="IT Department",
emailAddress="info@repleo.nl",
commonName="test.bounca.org",
subjectAltNames=["demo.bounca.org"],
)
ca = CertificateFactory(type=CertificateTypes.ROOT, dn=dn_ca)
ca.save()
cert = CertificateFactory(type=CertificateTypes.INTERMEDIATE, parent=ca, dn=dn_im)
with self.assertRaises(ValidationError) as c:
cert.save()
self.assertEqual(
c.exception.message, "Country name of Intermediate CA and Root CA should match (policy strict)"
)
def test_intermediate_dn_state_difference(self):
dn_ca = DistinguishedNameFactory(
countryName="NL",
stateOrProvinceName="Noord-Holland",
localityName="Amsterdam",
organizationName="Repleo",
organizationalUnitName="IT Department",
emailAddress="info@repleo.nl",
commonName="test.bounca.org",
subjectAltNames=["demo.bounca.org"],
)
dn_im = DistinguishedNameFactory(
countryName="NL",
stateOrProvinceName="Zuid-Holland",
localityName="Amsterdam",
organizationName="Repleo",
organizationalUnitName="IT Department",
emailAddress="info@repleo.nl",
commonName="test.bounca.org",
subjectAltNames=["demo.bounca.org"],
)
ca = CertificateFactory(type=CertificateTypes.ROOT, dn=dn_ca)
ca.save()
cert = CertificateFactory(type=CertificateTypes.INTERMEDIATE, parent=ca, dn=dn_im)
with self.assertRaises(ValidationError) as c:
cert.save()
self.assertEqual(
c.exception.message, "State Or Province Name of Intermediate CA and Root CA should match (policy strict)"
)
def test_intermediate_dn_organization_difference(self):
dn_ca = DistinguishedNameFactory(
countryName="NL",
stateOrProvinceName="Noord-Holland",
localityName="Amsterdam",
organizationName="Repleo",
organizationalUnitName="IT Department",
emailAddress="info@repleo.nl",
commonName="test.bounca.org",
subjectAltNames=["demo.bounca.org"],
)
dn_im = DistinguishedNameFactory(
countryName="NL",
stateOrProvinceName="Noord-Holland",
localityName="Amsterdam",
organizationName="BJA Electronics",
organizationalUnitName="IT Department",
emailAddress="info@repleo.nl",
commonName="test.bounca.org",
subjectAltNames=["demo.bounca.org"],
)
ca = CertificateFactory(type=CertificateTypes.ROOT, dn=dn_ca)
ca.save()
cert = CertificateFactory(type=CertificateTypes.INTERMEDIATE, parent=ca, dn=dn_im)
with self.assertRaises(ValidationError) as c:
cert.save()
self.assertEqual(
c.exception.message, "Organization Name of Intermediate CA and Root CA should match (policy strict)"
)
def test_child_expire_date_exceeds_parent_expire_date(self):
dn_ca = DistinguishedNameFactory(
countryName="NL",
stateOrProvinceName="Noord-Holland",
localityName="Amsterdam",
organizationName="Repleo",
organizationalUnitName="IT Department",
emailAddress="info@repleo.nl",
commonName="test.bounca.org",
subjectAltNames=["demo.bounca.org"],
)
dn_im = DistinguishedNameFactory(
countryName="NL",
stateOrProvinceName="Noord-Holland",
localityName="Amsterdam",
organizationName="Repleo",
organizationalUnitName="IT Department",
emailAddress="info@repleo.nl",
commonName="test.bounca.org",
subjectAltNames=["demo.bounca.org"],
)
ca = CertificateFactory(type=CertificateTypes.ROOT, dn=dn_ca)
ca.expires_at = arrow.get(timezone.now()).shift(years=+10).date()
ca.save()
cert = CertificateFactory(type=CertificateTypes.INTERMEDIATE, parent=ca, dn=dn_im)
cert.expires_at = arrow.get(timezone.now()).shift(years=+20).date()
with self.assertRaises(ValidationError) as c:
cert.save()
self.assertEqual(
c.exception.message,
"Child Certificate (expire date: {}) should not "
"expire later than parent CA (expire date: {})".format(cert.expires_at, ca.expires_at),
)
def test_passphrase_out_not_matching(self):
cert = CertificateFactory(type=CertificateTypes.ROOT)
cert.passphrase_out = "test"
cert.passphrase_out_confirmation = "test2"
with self.assertRaises(ValidationError) as c:
cert.save()
self.assertEqual(c.exception.message, "The two passphrase fields didn't match.")
| |
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from future.builtins import zip
from unittest import TestCase, main
from tempfile import mkstemp
from os import close, remove
from os.path import join
from collections import Iterable
from copy import deepcopy
import numpy.testing as npt
import pandas as pd
from pandas.util.testing import assert_frame_equal
from qiita_core.util import qiita_test_checker
from qiita_core.exceptions import IncompetentQiitaDeveloperError
from qiita_db.exceptions import (QiitaDBUnknownIDError,
QiitaDBNotImplementedError,
QiitaDBDuplicateHeaderError,
QiitaDBExecutionError,
QiitaDBColumnError,
QiitaDBWarning,
QiitaDBError,
QiitaDBDuplicateSamplesError)
from qiita_db.study import Study
from qiita_db.data import RawData, ProcessedData
from qiita_db.util import exists_table, get_mountpoint, get_count
from qiita_db.metadata_template.prep_template import PrepTemplate, PrepSample
from qiita_db.metadata_template.sample_template import SampleTemplate, Sample
from qiita_db.metadata_template.constants import (FALSE_VALUES, TRUE_VALUES,
NA_VALUES)
from qiita_db.metadata_template import (PREP_TEMPLATE_COLUMNS,
PREP_TEMPLATE_COLUMNS_TARGET_GENE)
class BaseTestPrepSample(TestCase):
def setUp(self):
self.prep_template = PrepTemplate(1)
self.sample_id = '1.SKB8.640193'
self.tester = PrepSample(self.sample_id, self.prep_template)
self.exp_categories = {'center_name', 'center_project_name',
'emp_status', 'barcode',
'library_construction_protocol',
'primer', 'target_subfragment',
'target_gene', 'run_center', 'run_prefix',
'run_date', 'experiment_center',
'experiment_design_description',
'experiment_title', 'platform', 'samp_size',
'sequencing_meth', 'illumina_technology',
'sample_center', 'pcr_primers', 'study_center'}
class TestPrepSampleReadOnly(BaseTestPrepSample):
def test_init_unknown_error(self):
"""Init errors if the PrepSample id is not found in the template"""
with self.assertRaises(QiitaDBUnknownIDError):
PrepSample('Not_a_Sample', self.prep_template)
def test_init_wrong_template(self):
"""Raises an error if using a SampleTemplate instead of PrepTemplate"""
with self.assertRaises(IncompetentQiitaDeveloperError):
PrepSample('1.SKB8.640193', SampleTemplate(1))
def test_init(self):
"""Init correctly initializes the PrepSample object"""
sample = PrepSample(self.sample_id, self.prep_template)
# Check that the internal id have been correctly set
self.assertEqual(sample._id, '1.SKB8.640193')
# Check that the internal template have been correctly set
self.assertEqual(sample._md_template, self.prep_template)
# Check that the internal dynamic table name have been correctly set
self.assertEqual(sample._dynamic_table, "prep_1")
def test_eq_true(self):
"""Equality correctly returns true"""
other = PrepSample(self.sample_id, self.prep_template)
self.assertTrue(self.tester == other)
def test_eq_false_type(self):
"""Equality returns false if types are not equal"""
other = Sample(self.sample_id, SampleTemplate(1))
self.assertFalse(self.tester == other)
def test_eq_false_id(self):
"""Equality returns false if ids are different"""
other = PrepSample('1.SKD8.640184', self.prep_template)
self.assertFalse(self.tester == other)
def test_exists_true(self):
"""Exists returns true if the PrepSample exists"""
self.assertTrue(PrepSample.exists(self.sample_id, self.prep_template))
def test_exists_false(self):
"""Exists returns false if the PrepSample does not exists"""
self.assertFalse(PrepSample.exists('Not_a_Sample', self.prep_template))
def test_get_categories(self):
"""Correctly returns the set of category headers"""
obs = self.tester._get_categories()
self.assertEqual(obs, self.exp_categories)
def test_len(self):
"""Len returns the correct number of categories"""
self.assertEqual(len(self.tester), 21)
def test_getitem_required(self):
"""Get item returns the correct metadata value from the required table
"""
self.assertEqual(self.tester['center_name'], 'ANL')
self.assertTrue(self.tester['center_project_name'] is None)
def test_getitem_dynamic(self):
"""Get item returns the correct metadata value from the dynamic table
"""
self.assertEqual(self.tester['pcr_primers'],
'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT')
self.assertEqual(self.tester['barcode'], 'AGCGCTCACATC')
def test_getitem_id_column(self):
"""Get item returns the correct metadata value from the changed column
"""
self.assertEqual(self.tester['emp_status'], 'EMP')
def test_getitem_error(self):
"""Get item raises an error if category does not exists"""
with self.assertRaises(KeyError):
self.tester['Not_a_Category']
def test_iter(self):
"""iter returns an iterator over the category headers"""
obs = self.tester.__iter__()
self.assertTrue(isinstance(obs, Iterable))
self.assertEqual(set(obs), self.exp_categories)
def test_contains_true(self):
"""contains returns true if the category header exists"""
self.assertTrue('Barcode' in self.tester)
self.assertTrue('barcode' in self.tester)
def test_contains_false(self):
"""contains returns false if the category header does not exists"""
self.assertFalse('Not_a_Category' in self.tester)
def test_keys(self):
"""keys returns an iterator over the metadata headers"""
obs = self.tester.keys()
self.assertTrue(isinstance(obs, Iterable))
self.assertEqual(set(obs), self.exp_categories)
def test_values(self):
"""values returns an iterator over the values"""
obs = self.tester.values()
self.assertTrue(isinstance(obs, Iterable))
exp = {'ANL', None, None, None, 'EMP', 'AGCGCTCACATC',
'This analysis was done as in Caporaso et al 2011 Genome '
'research. The PCR primers (F515/R806) were developed against '
'the V4 region of the 16S rRNA (both bacteria and archaea), '
'which we determined would yield optimal community clustering '
'with reads of this length using a procedure similar to that of'
' ref. 15. [For reference, this primer pair amplifies the '
'region 533_786 in the Escherichia coli strain 83972 sequence '
'(greengenes accession no. prokMSA_id:470367).] The reverse PCR'
' primer is barcoded with a 12-base error-correcting Golay code'
' to facilitate multiplexing of up to 1,500 samples per lane, '
'and both PCR primers contain sequencer adapter regions.',
'GTGCCAGCMGCCGCGGTAA', 'V4', '16S rRNA', 'ANL',
's_G1_L001_sequences', '8/1/12', 'ANL',
'micro biome of soil and rhizosphere of cannabis plants from '
'CA', 'Cannabis Soil Microbiome', 'Illumina', '.25,g',
'Sequencing by synthesis', 'MiSeq', 'ANL',
'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT', 'CCME'}
self.assertEqual(set(obs), exp)
def test_items(self):
"""items returns an iterator over the (key, value) tuples"""
obs = self.tester.items()
self.assertTrue(isinstance(obs, Iterable))
exp = {('center_name', 'ANL'), ('center_project_name', None),
('emp_status', 'EMP'), ('barcode', 'AGCGCTCACATC'),
('library_construction_protocol',
'This analysis was done as in Caporaso et al 2011 Genome '
'research. The PCR primers (F515/R806) were developed against '
'the V4 region of the 16S rRNA (both bacteria and archaea), '
'which we determined would yield optimal community clustering '
'with reads of this length using a procedure similar to that '
'of ref. 15. [For reference, this primer pair amplifies the '
'region 533_786 in the Escherichia coli strain 83972 sequence '
'(greengenes accession no. prokMSA_id:470367).] The reverse '
'PCR primer is barcoded with a 12-base error-correcting Golay '
'code to facilitate multiplexing of up to 1,500 samples per '
'lane, and both PCR primers contain sequencer adapter '
'regions.'), ('primer', 'GTGCCAGCMGCCGCGGTAA'),
('target_subfragment', 'V4'), ('target_gene', '16S rRNA'),
('run_center', 'ANL'), ('run_prefix', 's_G1_L001_sequences'),
('run_date', '8/1/12'), ('experiment_center', 'ANL'),
('experiment_design_description',
'micro biome of soil and rhizosphere of cannabis plants '
'from CA'), ('experiment_title', 'Cannabis Soil Microbiome'),
('platform', 'Illumina'), ('samp_size', '.25,g'),
('sequencing_meth', 'Sequencing by synthesis'),
('illumina_technology', 'MiSeq'), ('sample_center', 'ANL'),
('pcr_primers',
'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT'),
('study_center', 'CCME')}
self.assertEqual(set(obs), exp)
def test_get(self):
"""get returns the correct sample object"""
self.assertEqual(self.tester.get('barcode'), 'AGCGCTCACATC')
def test_get_none(self):
"""get returns none if the sample id is not present"""
self.assertTrue(self.tester.get('Not_a_Category') is None)
def test_columns_restrictions(self):
"""that it returns SAMPLE_TEMPLATE_COLUMNS"""
exp = deepcopy(PREP_TEMPLATE_COLUMNS)
exp.update(PREP_TEMPLATE_COLUMNS_TARGET_GENE)
self.assertEqual(self.prep_template.columns_restrictions, exp)
def test_can_be_updated(self):
"""test if the template can be updated"""
# you can't update restricted colums in a pt with data
self.assertFalse(self.prep_template.can_be_updated({'barcode'}))
# but you can if not restricted
self.assertTrue(self.prep_template.can_be_updated({'center_name'}))
def test_can_be_extended(self):
"""test if the template can be extended"""
# You can always add columns
obs_bool, obs_msg = self.prep_template.can_be_extended([], ["NEW_COL"])
self.assertTrue(obs_bool)
self.assertEqual(obs_msg, "")
# You can't add samples if there are preprocessed data generated
obs_bool, obs_msg = self.prep_template.can_be_extended(
["NEW_SAMPLE"], [])
self.assertFalse(obs_bool)
self.assertEqual(obs_msg,
"Preprocessed data have already been generated (%s). "
"No new samples can be added to the prep template."
% ', '.join(
map(str, self.prep_template.preprocessed_data)))
@qiita_test_checker()
class TestPrepSampleReadWrite(BaseTestPrepSample):
"""Tests the PrepSample class"""
def test_setitem(self):
with self.assertRaises(QiitaDBColumnError):
self.tester['column that does not exist'] = 0.3
self.assertEqual(self.tester['center_name'], 'ANL')
self.tester['center_name'] = "FOO"
self.assertEqual(self.tester['center_name'], "FOO")
def test_delitem(self):
"""delitem raises an error (currently not allowed)"""
with self.assertRaises(QiitaDBNotImplementedError):
del self.tester['pcr_primers']
class BaseTestPrepTemplate(TestCase):
def _set_up(self):
self.metadata_dict = {
'SKB8.640193': {'center_name': 'ANL',
'center_project_name': 'Test Project',
'ebi_submission_accession': None,
'EMP_status': 'EMP',
'str_column': 'Value for sample 1',
'primer': 'GTGCCAGCMGCCGCGGTAA',
'barcode': 'GTCCGCAAGTTA',
'run_prefix': "s_G1_L001_sequences",
'platform': 'ILLUMINA',
'library_construction_protocol': 'AAAA',
'experiment_design_description': 'BBBB'},
'SKD8.640184': {'center_name': 'ANL',
'center_project_name': 'Test Project',
'ebi_submission_accession': None,
'EMP_status': 'EMP',
'str_column': 'Value for sample 2',
'primer': 'GTGCCAGCMGCCGCGGTAA',
'barcode': 'CGTAGAGCTCTC',
'run_prefix': "s_G1_L001_sequences",
'platform': 'ILLUMINA',
'library_construction_protocol': 'AAAA',
'experiment_design_description': 'BBBB'},
'SKB7.640196': {'center_name': 'ANL',
'center_project_name': 'Test Project',
'ebi_submission_accession': None,
'EMP_status': 'EMP',
'str_column': 'Value for sample 3',
'primer': 'GTGCCAGCMGCCGCGGTAA',
'barcode': 'CCTCTGAGAGCT',
'run_prefix': "s_G1_L002_sequences",
'platform': 'ILLUMINA',
'library_construction_protocol': 'AAAA',
'experiment_design_description': 'BBBB'}
}
self.metadata = pd.DataFrame.from_dict(self.metadata_dict,
orient='index')
metadata_prefixed_dict = {
'1.SKB8.640193': {'center_name': 'ANL',
'center_project_name': 'Test Project',
'ebi_submission_accession': None,
'EMP_status': 'EMP',
'str_column': 'Value for sample 1',
'primer': 'GTGCCAGCMGCCGCGGTAA',
'barcode': 'GTCCGCAAGTTA',
'run_prefix': "s_G1_L001_sequences",
'platform': 'ILLUMINA',
'library_construction_protocol': 'AAAA',
'experiment_design_description': 'BBBB'},
'1.SKD8.640184': {'center_name': 'ANL',
'center_project_name': 'Test Project',
'ebi_submission_accession': None,
'EMP_status': 'EMP',
'str_column': 'Value for sample 2',
'primer': 'GTGCCAGCMGCCGCGGTAA',
'barcode': 'CGTAGAGCTCTC',
'run_prefix': "s_G1_L001_sequences",
'platform': 'ILLUMINA',
'library_construction_protocol': 'AAAA',
'experiment_design_description': 'BBBB'},
'1.SKB7.640196': {'center_name': 'ANL',
'center_project_name': 'Test Project',
'ebi_submission_accession': None,
'EMP_status': 'EMP',
'str_column': 'Value for sample 3',
'primer': 'GTGCCAGCMGCCGCGGTAA',
'barcode': 'CCTCTGAGAGCT',
'run_prefix': "s_G1_L002_sequences",
'platform': 'ILLUMINA',
'library_construction_protocol': 'AAAA',
'experiment_design_description': 'BBBB'}
}
self.metadata_prefixed = pd.DataFrame.from_dict(metadata_prefixed_dict,
orient='index')
self.test_study = Study(1)
self.data_type = "18S"
self.data_type_id = 2
self.tester = PrepTemplate(1)
self.exp_sample_ids = {
'1.SKB1.640202', '1.SKB2.640194', '1.SKB3.640195', '1.SKB4.640189',
'1.SKB5.640181', '1.SKB6.640176', '1.SKB7.640196', '1.SKB8.640193',
'1.SKB9.640200', '1.SKD1.640179', '1.SKD2.640178', '1.SKD3.640198',
'1.SKD4.640185', '1.SKD5.640186', '1.SKD6.640190', '1.SKD7.640191',
'1.SKD8.640184', '1.SKD9.640182', '1.SKM1.640183', '1.SKM2.640199',
'1.SKM3.640197', '1.SKM4.640180', '1.SKM5.640177', '1.SKM6.640187',
'1.SKM7.640188', '1.SKM8.640201', '1.SKM9.640192'}
self._clean_up_files = []
def tearDown(self):
for f in self._clean_up_files:
remove(f)
class TestPrepTemplateReadOnly(BaseTestPrepTemplate):
def setUp(self):
self._set_up()
def test_study_id(self):
"""Ensure that the correct study ID is returned"""
self.assertEqual(self.tester.study_id, 1)
def test_init_unknown_error(self):
"""Init raises an error if the id is not known"""
with self.assertRaises(QiitaDBUnknownIDError):
PrepTemplate(2)
def test_init(self):
"""Init successfully instantiates the object"""
st = PrepTemplate(1)
self.assertTrue(st.id, 1)
def test_table_name(self):
"""Table name return the correct string"""
obs = PrepTemplate._table_name(1)
self.assertEqual(obs, "prep_1")
def test_exists_true(self):
"""Exists returns true when the PrepTemplate already exists"""
self.assertTrue(PrepTemplate.exists(1))
def test_exists_false(self):
"""Exists returns false when the PrepTemplate does not exists"""
self.assertFalse(PrepTemplate.exists(2))
def test_get_sample_ids(self):
"""get_sample_ids returns the correct set of sample ids"""
obs = self.tester._get_sample_ids()
self.assertEqual(obs, self.exp_sample_ids)
def test_len(self):
"""Len returns the correct number of sample ids"""
self.assertEqual(len(self.tester), 27)
def test_getitem(self):
"""Get item returns the correct sample object"""
obs = self.tester['1.SKM7.640188']
exp = PrepSample('1.SKM7.640188', self.tester)
self.assertEqual(obs, exp)
def test_getitem_error(self):
"""Get item raises an error if key does not exists"""
with self.assertRaises(KeyError):
self.tester['Not_a_Sample']
def test_iter(self):
"""iter returns an iterator over the sample ids"""
obs = self.tester.__iter__()
self.assertTrue(isinstance(obs, Iterable))
self.assertEqual(set(obs), self.exp_sample_ids)
def test_contains_true(self):
"""contains returns true if the sample id exists"""
self.assertTrue('1.SKM7.640188' in self.tester)
def test_contains_false(self):
"""contains returns false if the sample id does not exists"""
self.assertFalse('Not_a_Sample' in self.tester)
def test_keys(self):
"""keys returns an iterator over the sample ids"""
obs = self.tester.keys()
self.assertTrue(isinstance(obs, Iterable))
self.assertEqual(set(obs), self.exp_sample_ids)
def test_values(self):
"""values returns an iterator over the values"""
obs = self.tester.values()
self.assertTrue(isinstance(obs, Iterable))
exp = {PrepSample('1.SKB1.640202', self.tester),
PrepSample('1.SKB2.640194', self.tester),
PrepSample('1.SKB3.640195', self.tester),
PrepSample('1.SKB4.640189', self.tester),
PrepSample('1.SKB5.640181', self.tester),
PrepSample('1.SKB6.640176', self.tester),
PrepSample('1.SKB7.640196', self.tester),
PrepSample('1.SKB8.640193', self.tester),
PrepSample('1.SKB9.640200', self.tester),
PrepSample('1.SKD1.640179', self.tester),
PrepSample('1.SKD2.640178', self.tester),
PrepSample('1.SKD3.640198', self.tester),
PrepSample('1.SKD4.640185', self.tester),
PrepSample('1.SKD5.640186', self.tester),
PrepSample('1.SKD6.640190', self.tester),
PrepSample('1.SKD7.640191', self.tester),
PrepSample('1.SKD8.640184', self.tester),
PrepSample('1.SKD9.640182', self.tester),
PrepSample('1.SKM1.640183', self.tester),
PrepSample('1.SKM2.640199', self.tester),
PrepSample('1.SKM3.640197', self.tester),
PrepSample('1.SKM4.640180', self.tester),
PrepSample('1.SKM5.640177', self.tester),
PrepSample('1.SKM6.640187', self.tester),
PrepSample('1.SKM7.640188', self.tester),
PrepSample('1.SKM8.640201', self.tester),
PrepSample('1.SKM9.640192', self.tester)}
# Creating a list and looping over it since unittest does not call
# the __eq__ function on the objects
for o, e in zip(sorted(list(obs), key=lambda x: x.id),
sorted(exp, key=lambda x: x.id)):
self.assertEqual(o, e)
def test_items(self):
"""items returns an iterator over the (key, value) tuples"""
obs = self.tester.items()
self.assertTrue(isinstance(obs, Iterable))
exp = [('1.SKB1.640202', PrepSample('1.SKB1.640202', self.tester)),
('1.SKB2.640194', PrepSample('1.SKB2.640194', self.tester)),
('1.SKB3.640195', PrepSample('1.SKB3.640195', self.tester)),
('1.SKB4.640189', PrepSample('1.SKB4.640189', self.tester)),
('1.SKB5.640181', PrepSample('1.SKB5.640181', self.tester)),
('1.SKB6.640176', PrepSample('1.SKB6.640176', self.tester)),
('1.SKB7.640196', PrepSample('1.SKB7.640196', self.tester)),
('1.SKB8.640193', PrepSample('1.SKB8.640193', self.tester)),
('1.SKB9.640200', PrepSample('1.SKB9.640200', self.tester)),
('1.SKD1.640179', PrepSample('1.SKD1.640179', self.tester)),
('1.SKD2.640178', PrepSample('1.SKD2.640178', self.tester)),
('1.SKD3.640198', PrepSample('1.SKD3.640198', self.tester)),
('1.SKD4.640185', PrepSample('1.SKD4.640185', self.tester)),
('1.SKD5.640186', PrepSample('1.SKD5.640186', self.tester)),
('1.SKD6.640190', PrepSample('1.SKD6.640190', self.tester)),
('1.SKD7.640191', PrepSample('1.SKD7.640191', self.tester)),
('1.SKD8.640184', PrepSample('1.SKD8.640184', self.tester)),
('1.SKD9.640182', PrepSample('1.SKD9.640182', self.tester)),
('1.SKM1.640183', PrepSample('1.SKM1.640183', self.tester)),
('1.SKM2.640199', PrepSample('1.SKM2.640199', self.tester)),
('1.SKM3.640197', PrepSample('1.SKM3.640197', self.tester)),
('1.SKM4.640180', PrepSample('1.SKM4.640180', self.tester)),
('1.SKM5.640177', PrepSample('1.SKM5.640177', self.tester)),
('1.SKM6.640187', PrepSample('1.SKM6.640187', self.tester)),
('1.SKM7.640188', PrepSample('1.SKM7.640188', self.tester)),
('1.SKM8.640201', PrepSample('1.SKM8.640201', self.tester)),
('1.SKM9.640192', PrepSample('1.SKM9.640192', self.tester))]
# Creating a list and looping over it since unittest does not call
# the __eq__ function on the objects
for o, e in zip(sorted(list(obs)), sorted(exp)):
self.assertEqual(o, e)
def test_get(self):
"""get returns the correct PrepSample object"""
obs = self.tester.get('1.SKM7.640188')
exp = PrepSample('1.SKM7.640188', self.tester)
self.assertEqual(obs, exp)
def test_get_none(self):
"""get returns none if the sample id is not present"""
self.assertTrue(self.tester.get('Not_a_Sample') is None)
def test_data_type(self):
"""data_type returns the string with the data_type"""
self.assertTrue(self.tester.data_type(), "18S")
def test_data_type_id(self):
"""data_type returns the int with the data_type_id"""
self.assertTrue(self.tester.data_type(ret_id=True), 2)
def test_preprocessed_data(self):
"""Returns the preprocessed data list generated from this template"""
self.assertEqual(self.tester.preprocessed_data, [1, 2])
def test_investigation_type(self):
"""investigation_type works correctly"""
self.assertEqual(self.tester.investigation_type, "Metagenomics")
def test_to_dataframe(self):
obs = self.tester.to_dataframe()
# We don't test the specific values as this would blow up the size
# of this file as the amount of lines would go to ~1000
# 27 samples
self.assertEqual(len(obs), 27)
self.assertEqual(set(obs.index), {
u'1.SKB1.640202', u'1.SKB2.640194', u'1.SKB3.640195',
u'1.SKB4.640189', u'1.SKB5.640181', u'1.SKB6.640176',
u'1.SKB7.640196', u'1.SKB8.640193', u'1.SKB9.640200',
u'1.SKD1.640179', u'1.SKD2.640178', u'1.SKD3.640198',
u'1.SKD4.640185', u'1.SKD5.640186', u'1.SKD6.640190',
u'1.SKD7.640191', u'1.SKD8.640184', u'1.SKD9.640182',
u'1.SKM1.640183', u'1.SKM2.640199', u'1.SKM3.640197',
u'1.SKM4.640180', u'1.SKM5.640177', u'1.SKM6.640187',
u'1.SKM7.640188', u'1.SKM8.640201', u'1.SKM9.640192'})
self.assertEqual(set(obs.columns), {
u'center_name', u'center_project_name',
u'emp_status', u'barcode',
u'library_construction_protocol', u'primer',
u'target_subfragment', u'target_gene', u'run_center',
u'run_prefix', u'run_date', u'experiment_center',
u'experiment_design_description', u'experiment_title', u'platform',
u'samp_size', u'sequencing_meth', u'illumina_technology',
u'sample_center', u'pcr_primers', u'study_center'})
def test_clean_validate_template_error_bad_chars(self):
"""Raises an error if there are invalid characters in the sample names
"""
self.metadata.index = ['o()xxxx[{::::::::>', 'sample.1', 'sample.3']
with self.assertRaises(QiitaDBColumnError):
PrepTemplate._clean_validate_template(self.metadata, 2,
PREP_TEMPLATE_COLUMNS)
def test_clean_validate_template_error_duplicate_cols(self):
"""Raises an error if there are duplicated columns in the template"""
self.metadata['STR_COLUMN'] = pd.Series(['', '', ''],
index=self.metadata.index)
with self.assertRaises(QiitaDBDuplicateHeaderError):
PrepTemplate._clean_validate_template(self.metadata, 2,
PREP_TEMPLATE_COLUMNS)
def test_clean_validate_template_error_duplicate_samples(self):
"""Raises an error if there are duplicated samples in the templates"""
self.metadata.index = ['sample.1', 'sample.1', 'sample.3']
with self.assertRaises(QiitaDBDuplicateSamplesError):
PrepTemplate._clean_validate_template(self.metadata, 2,
PREP_TEMPLATE_COLUMNS)
def test_clean_validate_template_warning_missing(self):
"""Raises an error if the template is missing a required column"""
metadata_dict = {
'SKB8.640193': {'center_name': 'ANL',
'center_project_name': 'Test Project',
'ebi_submission_accession': None,
'linkerprimersequence': 'GTGCCAGCMGCCGCGGTAA',
'barcodesequence': 'GTCCGCAAGTTA',
'run_prefix': "s_G1_L001_sequences",
'platform': 'ILLUMINA',
'library_construction_protocol': 'AAAA',
'experiment_design_description': 'BBBB'}
}
metadata = pd.DataFrame.from_dict(metadata_dict, orient='index')
obs = npt.assert_warns(
QiitaDBWarning, PrepTemplate._clean_validate_template, metadata, 2,
PREP_TEMPLATE_COLUMNS)
metadata_dict = {
'2.SKB8.640193': {'center_name': 'ANL',
'center_project_name': 'Test Project',
'ebi_submission_accession': None,
'linkerprimersequence': 'GTGCCAGCMGCCGCGGTAA',
'barcodesequence': 'GTCCGCAAGTTA',
'run_prefix': "s_G1_L001_sequences",
'platform': 'ILLUMINA',
'library_construction_protocol': 'AAAA',
'experiment_design_description': 'BBBB'}
}
exp = pd.DataFrame.from_dict(metadata_dict, orient='index')
obs.sort_index(axis=0, inplace=True)
obs.sort_index(axis=1, inplace=True)
exp.sort_index(axis=0, inplace=True)
exp.sort_index(axis=1, inplace=True)
assert_frame_equal(obs, exp)
def test_clean_validate_template(self):
obs = PrepTemplate._clean_validate_template(self.metadata, 2,
PREP_TEMPLATE_COLUMNS)
metadata_dict = {
'2.SKB8.640193': {'center_name': 'ANL',
'center_project_name': 'Test Project',
'ebi_submission_accession': None,
'emp_status': 'EMP',
'str_column': 'Value for sample 1',
'primer': 'GTGCCAGCMGCCGCGGTAA',
'barcode': 'GTCCGCAAGTTA',
'run_prefix': "s_G1_L001_sequences",
'platform': 'ILLUMINA',
'library_construction_protocol': 'AAAA',
'experiment_design_description': 'BBBB'},
'2.SKD8.640184': {'center_name': 'ANL',
'center_project_name': 'Test Project',
'ebi_submission_accession': None,
'emp_status': 'EMP',
'str_column': 'Value for sample 2',
'primer': 'GTGCCAGCMGCCGCGGTAA',
'barcode': 'CGTAGAGCTCTC',
'run_prefix': "s_G1_L001_sequences",
'platform': 'ILLUMINA',
'library_construction_protocol': 'AAAA',
'experiment_design_description': 'BBBB'},
'2.SKB7.640196': {'center_name': 'ANL',
'center_project_name': 'Test Project',
'ebi_submission_accession': None,
'emp_status': 'EMP',
'str_column': 'Value for sample 3',
'primer': 'GTGCCAGCMGCCGCGGTAA',
'barcode': 'CCTCTGAGAGCT',
'run_prefix': "s_G1_L002_sequences",
'platform': 'ILLUMINA',
'library_construction_protocol': 'AAAA',
'experiment_design_description': 'BBBB'}
}
exp = pd.DataFrame.from_dict(metadata_dict, orient='index')
obs.sort_index(axis=0, inplace=True)
obs.sort_index(axis=1, inplace=True)
exp.sort_index(axis=0, inplace=True)
exp.sort_index(axis=1, inplace=True)
assert_frame_equal(obs, exp)
@qiita_test_checker()
class TestPrepTemplateReadWrite(BaseTestPrepTemplate):
"""Tests the PrepTemplate class"""
def setUp(self):
self._set_up()
self._clean_up_files = []
def test_create_duplicate_header(self):
"""Create raises an error when duplicate headers are present"""
self.metadata['STR_COLUMN'] = pd.Series(['', '', ''],
index=self.metadata.index)
with self.assertRaises(QiitaDBDuplicateHeaderError):
PrepTemplate.create(self.metadata, self.test_study, self.data_type)
def test_create_bad_sample_names(self):
# set a horrible list of sample names
self.metadata.index = ['o()xxxx[{::::::::>', 'sample.1', 'sample.3']
with self.assertRaises(QiitaDBColumnError):
PrepTemplate.create(self.metadata, self.test_study, self.data_type)
def test_create_unknown_sample_names(self):
# set two real and one fake sample name
self.metadata_dict['NOTREAL'] = self.metadata_dict['SKB7.640196']
del self.metadata_dict['SKB7.640196']
self.metadata = pd.DataFrame.from_dict(self.metadata_dict,
orient='index')
# Test error raised and correct error given
with self.assertRaises(QiitaDBExecutionError) as err:
PrepTemplate.create(self.metadata, self.test_study, self.data_type)
self.assertEqual(
str(err.exception),
'Samples found in prep template but not sample template: 1.NOTREAL'
)
def test_create_shorter_prep_template(self):
# remove one sample so not all samples in the prep template
del self.metadata_dict['SKB7.640196']
self.metadata = pd.DataFrame.from_dict(self.metadata_dict,
orient='index')
pt = PrepTemplate.create(self.metadata, self.test_study,
self.data_type)
# make sure the two samples were added correctly
self.assertEqual(pt.id, 2)
obs = self.conn_handler.execute_fetchall(
"SELECT sample_id FROM qiita.prep_2")
exp = [['1.SKB8.640193'], ['1.SKD8.640184']]
self.assertEqual(obs, exp)
def test_create_error_cleanup(self):
"""Create does not modify the database if an error happens"""
metadata_dict = {
'SKB8.640193': {'center_name': 'ANL',
'center_project_name': 'Test Project',
'ebi_submission_accession': None,
'EMP_status': 'EMP',
'group': 2,
'primer': 'GTGCCAGCMGCCGCGGTAA',
'barcode': 'GTCCGCAAGTTA',
'run_prefix': "s_G1_L001_sequences",
'platform': 'ILLUMINA',
'library_construction_protocol': 'AAAA',
'experiment_design_description': 'BBBB'},
'SKD8.640184': {'center_name': 'ANL',
'center_project_name': 'Test Project',
'ebi_submission_accession': None,
'EMP_status': 'EMP',
'group': 1,
'primer': 'GTGCCAGCMGCCGCGGTAA',
'barcode': 'CGTAGAGCTCTC',
'run_prefix': "s_G1_L001_sequences",
'platform': 'ILLUMINA',
'library_construction_protocol': 'AAAA',
'experiment_design_description': 'BBBB'},
'SKB7.640196': {'center_name': 'ANL',
'center_project_name': 'Test Project',
'ebi_submission_accession': None,
'EMP_status': 'EMP',
'group': 'Value for sample 3',
'primer': 'GTGCCAGCMGCCGCGGTAA',
'barcode': 'CCTCTGAGAGCT',
'run_prefix': "s_G1_L002_sequences",
'platform': 'ILLUMINA',
'library_construction_protocol': 'AAAA',
'experiment_design_description': 'BBBB'}
}
metadata = pd.DataFrame.from_dict(metadata_dict, orient='index')
exp_id = get_count("qiita.prep_template") + 1
with self.assertRaises(ValueError):
PrepTemplate.create(metadata, self.test_study, self.data_type)
sql = """SELECT EXISTS(
SELECT * FROM qiita.prep_template
WHERE prep_template_id=%s)"""
self.assertFalse(self.conn_handler.execute_fetchone(sql, (exp_id,))[0])
sql = """SELECT EXISTS(
SELECT * FROM qiita.prep_template_sample
WHERE prep_template_id=%s)"""
self.assertFalse(self.conn_handler.execute_fetchone(sql, (exp_id,))[0])
sql = """SELECT EXISTS(
SELECT * FROM qiita.prep_columns
WHERE prep_template_id=%s)"""
self.assertFalse(self.conn_handler.execute_fetchone(sql, (exp_id,))[0])
sql = """SELECT EXISTS(
SELECT * FROM qiita.study_prep_template
WHERE prep_template_id=%s)"""
self.assertFalse(self.conn_handler.execute_fetchone(sql, (exp_id,))[0])
self.assertFalse(exists_table("prep_%d" % exp_id))
def _common_creation_checks(self, new_id, pt, fp_count):
# The returned object has the correct id
self.assertEqual(pt.id, new_id)
# The row in the prep template table has been created
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.prep_template WHERE prep_template_id=%s",
(new_id,))
# prep_template_id, data_type_id, raw_data_id, preprocessing_status,
# investigation_type
self.assertEqual(obs, [[new_id, 2, None, 'not_preprocessed', None]])
# The prep template has been linked to the study
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.study_prep_template "
"WHERE prep_template_id=%s", (new_id,))
self.assertEqual(obs, [[self.test_study.id, new_id]])
# The relevant rows to prep_template_sample have been added.
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.prep_template_sample "
"WHERE prep_template_id=%s", (new_id,))
# prep_template_id, sample_id, center_name,
# center_project_name, emp_status_id
exp = [[new_id, '1.SKB8.640193'],
[new_id, '1.SKD8.640184'],
[new_id, '1.SKB7.640196']]
self.assertItemsEqual(obs, exp)
# The relevant rows have been added to the prep_columns table
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.prep_columns WHERE prep_template_id=2")
# prep_template_id, column_name, column_type
exp = [[new_id, 'str_column', 'varchar'],
[new_id, 'ebi_submission_accession', 'varchar'],
[new_id, 'run_prefix', 'varchar'],
[new_id, 'barcode', 'varchar'],
[new_id, 'primer', 'varchar'],
[new_id, 'platform', 'varchar'],
[new_id, 'experiment_design_description', 'varchar'],
[new_id, 'library_construction_protocol', 'varchar'],
[new_id, 'center_name', 'varchar'],
[new_id, 'center_project_name', 'varchar'],
[new_id, 'emp_status', 'varchar']]
self.assertItemsEqual(obs, exp)
# The new table exists
self.assertTrue(exists_table("prep_%s" % new_id))
# The new table hosts the correct values
obs = [dict(o) for o in self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.prep_%s" % new_id)]
exp = [{'sample_id': '1.SKB7.640196',
'barcode': 'CCTCTGAGAGCT',
'ebi_submission_accession': None,
'experiment_design_description': 'BBBB',
'library_construction_protocol': 'AAAA',
'primer': 'GTGCCAGCMGCCGCGGTAA',
'platform': 'ILLUMINA',
'run_prefix': 's_G1_L002_sequences',
'str_column': 'Value for sample 3',
'center_name': 'ANL',
'center_project_name': 'Test Project',
'emp_status': 'EMP'},
{'sample_id': '1.SKB8.640193',
'barcode': 'GTCCGCAAGTTA',
'ebi_submission_accession': None,
'experiment_design_description': 'BBBB',
'library_construction_protocol': 'AAAA',
'primer': 'GTGCCAGCMGCCGCGGTAA',
'platform': 'ILLUMINA',
'run_prefix': 's_G1_L001_sequences',
'str_column': 'Value for sample 1',
'center_name': 'ANL',
'center_project_name': 'Test Project',
'emp_status': 'EMP'},
{'sample_id': '1.SKD8.640184',
'barcode': 'CGTAGAGCTCTC',
'ebi_submission_accession': None,
'experiment_design_description': 'BBBB',
'library_construction_protocol': 'AAAA',
'primer': 'GTGCCAGCMGCCGCGGTAA',
'platform': 'ILLUMINA',
'run_prefix': 's_G1_L001_sequences',
'str_column': 'Value for sample 2',
'center_name': 'ANL',
'center_project_name': 'Test Project',
'emp_status': 'EMP'}]
self.assertItemsEqual(obs, exp)
# prep and qiime files have been created
filepaths = pt.get_filepaths()
self.assertEqual(len(filepaths), 2)
self.assertEqual(filepaths[0][0], fp_count + 2)
self.assertEqual(filepaths[1][0], fp_count + 1)
def test_create(self):
"""Creates a new PrepTemplate"""
fp_count = get_count('qiita.filepath')
new_id = get_count('qiita.prep_template') + 1
pt = PrepTemplate.create(self.metadata, self.test_study,
self.data_type)
self._common_creation_checks(new_id, pt, fp_count)
def test_create_already_prefixed_samples(self):
"""Creates a new PrepTemplate"""
fp_count = get_count('qiita.filepath')
new_id = get_count('qiita.prep_template') + 1
pt = npt.assert_warns(QiitaDBWarning, PrepTemplate.create,
self.metadata_prefixed, self.test_study,
self.data_type)
self._common_creation_checks(new_id, pt, fp_count)
def test_generate_files(self):
fp_count = get_count("qiita.filepath")
self.tester.generate_files()
obs = get_count("qiita.filepath")
# We just make sure that the count has been increased by 2, since
# the contents of the files have been tested elsewhere.
self.assertEqual(obs, fp_count + 2)
def test_create_qiime_mapping_file(self):
pt = PrepTemplate(1)
# creating prep template file
_id, fp = get_mountpoint('templates')[0]
obs_fp = pt.create_qiime_mapping_file()
exp_fp = join(fp, '1_prep_1_qiime_19700101-000000.txt')
obs = pd.read_csv(obs_fp, sep='\t', infer_datetime_format=True,
parse_dates=True, index_col=False, comment='\t')
exp = pd.read_csv(exp_fp, sep='\t', infer_datetime_format=True,
parse_dates=True, index_col=False, comment='\t',
na_values=NA_VALUES, true_values=TRUE_VALUES,
false_values=FALSE_VALUES)
assert_frame_equal(obs, exp)
def test_create_data_type_id(self):
"""Creates a new PrepTemplate passing the data_type_id"""
fp_count = get_count('qiita.filepath')
new_id = get_count('qiita.prep_template') + 1
pt = PrepTemplate.create(self.metadata, self.test_study,
self.data_type_id)
self._common_creation_checks(new_id, pt, fp_count)
def test_create_warning(self):
"""Warns if a required columns is missing for a given functionality
"""
fp_count = get_count("qiita.filepath")
new_id = get_count('qiita.prep_template') + 1
del self.metadata['barcode']
pt = npt.assert_warns(QiitaDBWarning, PrepTemplate.create,
self.metadata, self.test_study, self.data_type)
# The returned object has the correct id
self.assertEqual(pt.id, new_id)
# The row in the prep template table has been created
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.prep_template WHERE prep_template_id=%s",
(new_id,))
# prep_template_id, data_type_id, raw_data_id, preprocessing_status,
# investigation_type
self.assertEqual(obs, [[new_id, 2, None, 'not_preprocessed', None]])
# The prep template has been linked to the study
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.study_prep_template "
"WHERE prep_template_id=%s", (new_id,))
self.assertEqual(obs, [[self.test_study.id, new_id]])
# The relevant rows to prep_template_sample have been added.
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.prep_template_sample "
"WHERE prep_template_id=%s", (new_id,))
# prep_template_id, sample_id, center_name,
# center_project_name, emp_status_id
exp = [[new_id, '1.SKB8.640193'],
[new_id, '1.SKD8.640184'],
[new_id, '1.SKB7.640196']]
self.assertItemsEqual(obs, exp)
# The relevant rows have been added to the prep_columns table
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.prep_columns WHERE prep_template_id=2")
# prep_template_id, column_name, column_type
exp = [[new_id, 'str_column', 'varchar'],
[new_id, 'ebi_submission_accession', 'varchar'],
[new_id, 'run_prefix', 'varchar'],
[new_id, 'primer', 'varchar'],
[new_id, 'platform', 'varchar'],
[new_id, 'experiment_design_description', 'varchar'],
[new_id, 'library_construction_protocol', 'varchar'],
[new_id, 'center_name', 'varchar'],
[new_id, 'center_project_name', 'varchar'],
[new_id, 'emp_status', 'varchar']]
self.assertItemsEqual(obs, exp)
# The new table exists
self.assertTrue(exists_table("prep_%s" % new_id))
# The new table hosts the correct values
obs = [dict(o) for o in self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.prep_%s" % new_id)]
exp = [{'sample_id': '1.SKB7.640196',
'ebi_submission_accession': None,
'experiment_design_description': 'BBBB',
'library_construction_protocol': 'AAAA',
'primer': 'GTGCCAGCMGCCGCGGTAA',
'platform': 'ILLUMINA',
'run_prefix': 's_G1_L002_sequences',
'str_column': 'Value for sample 3',
'center_name': 'ANL',
'center_project_name': 'Test Project',
'emp_status': 'EMP'},
{'sample_id': '1.SKB8.640193',
'ebi_submission_accession': None,
'experiment_design_description': 'BBBB',
'library_construction_protocol': 'AAAA',
'primer': 'GTGCCAGCMGCCGCGGTAA',
'platform': 'ILLUMINA',
'run_prefix': 's_G1_L001_sequences',
'str_column': 'Value for sample 1',
'center_name': 'ANL',
'center_project_name': 'Test Project',
'emp_status': 'EMP'},
{'sample_id': '1.SKD8.640184',
'ebi_submission_accession': None,
'experiment_design_description': 'BBBB',
'library_construction_protocol': 'AAAA',
'primer': 'GTGCCAGCMGCCGCGGTAA',
'platform': 'ILLUMINA',
'run_prefix': 's_G1_L001_sequences',
'str_column': 'Value for sample 2',
'center_name': 'ANL',
'center_project_name': 'Test Project',
'emp_status': 'EMP'}]
self.assertItemsEqual(obs, exp)
# prep and qiime files have been created
filepaths = pt.get_filepaths()
self.assertEqual(len(filepaths), 2)
self.assertEqual(filepaths[0][0], fp_count + 2)
self.assertEqual(filepaths[1][0], fp_count + 1)
def test_create_investigation_type_error(self):
"""Create raises an error if the investigation_type does not exists"""
with self.assertRaises(QiitaDBColumnError):
PrepTemplate.create(self.metadata, self.test_study,
self.data_type_id, 'Not a term')
def test_delete_error(self):
"""Try to delete a prep template that already has preprocessed data"""
with self.assertRaises(QiitaDBExecutionError):
PrepTemplate.delete(1)
def test_delete_unkonwn_id_error(self):
"""Try to delete a non existent prep template"""
with self.assertRaises(QiitaDBUnknownIDError):
PrepTemplate.delete(5)
def test_delete_error_raw_data(self):
"""Try to delete a prep template with a raw data attached to id"""
pt = PrepTemplate.create(self.metadata, self.test_study,
self.data_type_id)
pt.raw_data = RawData(1)
with self.assertRaises(QiitaDBExecutionError):
PrepTemplate.delete(pt.id)
def test_delete(self):
"""Deletes prep template 2"""
pt = PrepTemplate.create(self.metadata, self.test_study,
self.data_type_id)
PrepTemplate.delete(pt.id)
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.prep_template WHERE prep_template_id=%s",
(pt.id,))
exp = []
self.assertEqual(obs, exp)
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.study_prep_template "
"WHERE prep_template_id=%s", (pt.id,))
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.prep_template_sample "
"WHERE prep_template_id=%s", (pt.id,))
exp = []
self.assertEqual(obs, exp)
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.prep_columns WHERE prep_template_id=%s",
(pt.id,))
exp = []
self.assertEqual(obs, exp)
with self.assertRaises(ValueError):
self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.prep_%d" % pt.id)
def test_setitem(self):
"""setitem raises an error (currently not allowed)"""
with self.assertRaises(QiitaDBNotImplementedError):
self.tester['1.SKM7.640188'] = PrepSample('1.SKM7.640188',
self.tester)
def test_delitem(self):
"""delitem raises an error (currently not allowed)"""
with self.assertRaises(QiitaDBNotImplementedError):
del self.tester['1.SKM7.640188']
def test_to_file(self):
"""to file writes a tab delimited file with all the metadata"""
fd, fp = mkstemp()
close(fd)
pt = PrepTemplate.create(self.metadata, self.test_study,
self.data_type)
pt.to_file(fp)
self._clean_up_files.append(fp)
with open(fp, 'U') as f:
obs = f.read()
self.assertEqual(obs, EXP_PREP_TEMPLATE)
def test_preprocessing_status(self):
"""preprocessing_status works correctly"""
# Success case
pt = PrepTemplate(1)
self.assertEqual(pt.preprocessing_status, 'success')
# not preprocessed case
pt = PrepTemplate.create(self.metadata, self.test_study,
self.data_type_id)
self.assertEqual(pt.preprocessing_status, 'not_preprocessed')
def test_preprocessing_status_setter(self):
"""Able to update the preprocessing status"""
pt = PrepTemplate.create(self.metadata, self.test_study,
self.data_type_id)
self.assertEqual(pt.preprocessing_status, 'not_preprocessed')
pt.preprocessing_status = 'preprocessing'
self.assertEqual(pt.preprocessing_status, 'preprocessing')
pt.preprocessing_status = 'success'
self.assertEqual(pt.preprocessing_status, 'success')
def test_preprocessing_status_setter_failed(self):
"""Able to update preprocessing_status with a failure message"""
pt = PrepTemplate.create(self.metadata, self.test_study,
self.data_type_id)
state = 'failed: some error message'
self.assertEqual(pt.preprocessing_status, 'not_preprocessed')
pt.preprocessing_status = state
self.assertEqual(pt.preprocessing_status, state)
def test_preprocessing_status_setter_valueerror(self):
"""Raises an error if the status is not recognized"""
with self.assertRaises(ValueError):
self.tester.preprocessing_status = 'not a valid state'
def test_investigation_type_setter(self):
"""Able to update the investigation type"""
pt = PrepTemplate.create(self.metadata, self.test_study,
self.data_type_id)
self.assertEqual(pt.investigation_type, None)
pt.investigation_type = "Other"
self.assertEqual(pt.investigation_type, 'Other')
with self.assertRaises(QiitaDBColumnError):
pt.investigation_type = "should fail"
def test_investigation_type_instance_setter(self):
pt = PrepTemplate(1)
pt.investigation_type = 'RNASeq'
self.assertEqual(pt.investigation_type, 'RNASeq')
def test_status(self):
pt = PrepTemplate(1)
self.assertEqual(pt.status, 'private')
# Check that changing the status of the processed data, the status
# of the prep template changes
pd = ProcessedData(1)
pd.status = 'public'
self.assertEqual(pt.status, 'public')
# New prep templates have the status to sandbox because there is no
# processed data associated with them
pt = PrepTemplate.create(self.metadata, self.test_study,
self.data_type_id)
self.assertEqual(pt.status, 'sandbox')
def test_update_category(self):
with self.assertRaises(QiitaDBUnknownIDError):
self.tester.update_category('barcode', {"foo": "bar"})
with self.assertRaises(QiitaDBColumnError):
self.tester.update_category('missing column',
{'1.SKB7.640196': 'bar'})
neg_test = self.tester['1.SKB7.640196']['barcode']
mapping = {'1.SKB8.640193': 'AAAAAAAAAAAA',
'1.SKD8.640184': 'CCCCCCCCCCCC'}
self.tester.update_category('barcode', mapping)
self.assertEqual(self.tester['1.SKB7.640196']['barcode'],
neg_test)
self.assertEqual(self.tester['1.SKB8.640193']['barcode'],
'AAAAAAAAAAAA')
self.assertEqual(self.tester['1.SKD8.640184']['barcode'],
'CCCCCCCCCCCC')
neg_test = self.tester['1.SKB7.640196']['center_name']
mapping = {'1.SKB8.640193': 'FOO',
'1.SKD8.640184': 'BAR'}
self.tester.update_category('center_name', mapping)
self.assertEqual(self.tester['1.SKB7.640196']['center_name'], neg_test)
self.assertEqual(self.tester['1.SKB8.640193']['center_name'], 'FOO')
self.assertEqual(self.tester['1.SKD8.640184']['center_name'], 'BAR')
def test_qiime_map_fp(self):
pt = PrepTemplate(1)
exp = join(get_mountpoint('templates')[0][1],
'1_prep_1_qiime_19700101-000000.txt')
self.assertEqual(pt.qiime_map_fp, exp)
def test_check_restrictions(self):
obs = self.tester.check_restrictions([PREP_TEMPLATE_COLUMNS['EBI']])
self.assertEqual(obs, set())
del self.metadata['primer']
pt = npt.assert_warns(QiitaDBWarning, PrepTemplate.create,
self.metadata, self.test_study, self.data_type)
obs = pt.check_restrictions(
[PREP_TEMPLATE_COLUMNS['EBI'],
PREP_TEMPLATE_COLUMNS_TARGET_GENE['demultiplex']])
self.assertEqual(obs, {'primer'})
def test_raw_data(self):
"""Returns the raw_data associated with the prep template"""
self.assertEqual(self.tester.raw_data, 1)
pt = PrepTemplate.create(self.metadata, self.test_study,
self.data_type_id)
self.assertEqual(pt.raw_data, None)
def test_raw_data_setter_error(self):
rd = RawData(1)
with self.assertRaises(QiitaDBError):
self.tester.raw_data = rd
def test_raw_data_setter(self):
rd = RawData(1)
pt = PrepTemplate.create(self.metadata, self.test_study,
self.data_type_id)
self.assertEqual(pt.raw_data, None)
pt.raw_data = rd
self.assertEqual(pt.raw_data, rd.id)
def test_can_be_updated_on_new(self):
"""test if the template can be updated"""
# you can update a newly created pt
pt = PrepTemplate.create(self.metadata, self.test_study,
self.data_type)
self.assertTrue(pt.can_be_updated({'barcode'}))
def test_extend_add_samples(self):
"""extend correctly works adding new samples"""
md_2_samples = self.metadata.loc[('SKB8.640193', 'SKD8.640184'), :]
pt = PrepTemplate.create(md_2_samples, self.test_study, self.data_type)
npt.assert_warns(QiitaDBWarning, pt.extend, self.metadata)
# Test samples were appended successfully to the prep template sample
sql = """SELECT *
FROM qiita.prep_template_sample
WHERE prep_template_id = %s"""
obs = [dict(o)
for o in self.conn_handler.execute_fetchall(sql, (pt.id,))]
exp = [{'prep_template_id': 2, 'sample_id': '1.SKB8.640193'},
{'prep_template_id': 2, 'sample_id': '1.SKD8.640184'},
{'prep_template_id': 2, 'sample_id': '1.SKB7.640196'}]
self.assertItemsEqual(obs, exp)
def test_extend_add_samples_error(self):
"""extend fails adding samples to an already preprocessed template"""
df = pd.DataFrame.from_dict(
{'new_sample': {'barcode': 'CCTCTGAGAGCT'}},
orient='index')
with self.assertRaises(QiitaDBError):
PrepTemplate(1).extend(df)
def test_extend_add_cols(self):
"""extend correctly adds a new columns"""
pt = PrepTemplate.create(self.metadata, self.test_study,
self.data_type)
self.metadata['new_col'] = pd.Series(['val1', 'val2', 'val3'],
index=self.metadata.index)
npt.assert_warns(QiitaDBWarning, pt.extend, self.metadata)
sql = "SELECT * FROM qiita.prep_{0}".format(pt.id)
obs = [dict(o) for o in self.conn_handler.execute_fetchall(sql)]
exp = [{'sample_id': '1.SKB7.640196',
'barcode': 'CCTCTGAGAGCT',
'ebi_submission_accession': None,
'experiment_design_description': 'BBBB',
'library_construction_protocol': 'AAAA',
'primer': 'GTGCCAGCMGCCGCGGTAA',
'platform': 'ILLUMINA',
'run_prefix': 's_G1_L002_sequences',
'str_column': 'Value for sample 3',
'center_name': 'ANL',
'center_project_name': 'Test Project',
'emp_status': 'EMP',
'new_col': 'val1'},
{'sample_id': '1.SKB8.640193',
'barcode': 'GTCCGCAAGTTA',
'ebi_submission_accession': None,
'experiment_design_description': 'BBBB',
'library_construction_protocol': 'AAAA',
'primer': 'GTGCCAGCMGCCGCGGTAA',
'platform': 'ILLUMINA',
'run_prefix': 's_G1_L001_sequences',
'str_column': 'Value for sample 1',
'center_name': 'ANL',
'center_project_name': 'Test Project',
'emp_status': 'EMP',
'new_col': 'val2'},
{'sample_id': '1.SKD8.640184',
'barcode': 'CGTAGAGCTCTC',
'ebi_submission_accession': None,
'experiment_design_description': 'BBBB',
'library_construction_protocol': 'AAAA',
'primer': 'GTGCCAGCMGCCGCGGTAA',
'platform': 'ILLUMINA',
'run_prefix': 's_G1_L001_sequences',
'str_column': 'Value for sample 2',
'center_name': 'ANL',
'center_project_name': 'Test Project',
'emp_status': 'EMP',
'new_col': 'val3'}]
self.assertItemsEqual(obs, exp)
def test_extend_update(self):
pt = PrepTemplate.create(self.metadata, self.test_study,
self.data_type)
self.metadata['new_col'] = pd.Series(['val1', 'val2', 'val3'],
index=self.metadata.index)
self.metadata['str_column']['SKB7.640196'] = 'NEW VAL'
npt.assert_warns(QiitaDBWarning, pt.extend, self.metadata)
pt.update(self.metadata)
sql = "SELECT * FROM qiita.prep_{0}".format(pt.id)
obs = [dict(o) for o in self.conn_handler.execute_fetchall(sql)]
exp = [{'sample_id': '1.SKB7.640196',
'barcode': 'CCTCTGAGAGCT',
'ebi_submission_accession': None,
'experiment_design_description': 'BBBB',
'library_construction_protocol': 'AAAA',
'primer': 'GTGCCAGCMGCCGCGGTAA',
'platform': 'ILLUMINA',
'run_prefix': 's_G1_L002_sequences',
'str_column': 'NEW VAL',
'center_name': 'ANL',
'center_project_name': 'Test Project',
'emp_status': 'EMP',
'new_col': 'val1'},
{'sample_id': '1.SKB8.640193',
'barcode': 'GTCCGCAAGTTA',
'ebi_submission_accession': None,
'experiment_design_description': 'BBBB',
'library_construction_protocol': 'AAAA',
'primer': 'GTGCCAGCMGCCGCGGTAA',
'platform': 'ILLUMINA',
'run_prefix': 's_G1_L001_sequences',
'str_column': 'Value for sample 1',
'center_name': 'ANL',
'center_project_name': 'Test Project',
'emp_status': 'EMP',
'new_col': 'val2'},
{'sample_id': '1.SKD8.640184',
'barcode': 'CGTAGAGCTCTC',
'ebi_submission_accession': None,
'experiment_design_description': 'BBBB',
'library_construction_protocol': 'AAAA',
'primer': 'GTGCCAGCMGCCGCGGTAA',
'platform': 'ILLUMINA',
'run_prefix': 's_G1_L001_sequences',
'str_column': 'Value for sample 2',
'center_name': 'ANL',
'center_project_name': 'Test Project',
'emp_status': 'EMP',
'new_col': 'val3'}]
self.assertItemsEqual(obs, exp)
EXP_PREP_TEMPLATE = (
'sample_name\tbarcode\tcenter_name\tcenter_project_name\t'
'ebi_submission_accession\temp_status\texperiment_design_description\t'
'library_construction_protocol\tplatform\tprimer\t'
'run_prefix\tstr_column\n'
'1.SKB7.640196\tCCTCTGAGAGCT\tANL\tTest Project\t\tEMP\tBBBB\tAAAA\t'
'ILLUMINA\tGTGCCAGCMGCCGCGGTAA\ts_G1_L002_sequences\tValue for sample 3\n'
'1.SKB8.640193\tGTCCGCAAGTTA\tANL\tTest Project\t\tEMP\tBBBB\tAAAA\t'
'ILLUMINA\tGTGCCAGCMGCCGCGGTAA\ts_G1_L001_sequences\tValue for sample 1\n'
'1.SKD8.640184\tCGTAGAGCTCTC\tANL\tTest Project\t\tEMP\tBBBB\tAAAA\t'
'ILLUMINA\tGTGCCAGCMGCCGCGGTAA\ts_G1_L001_sequences\tValue for sample 2\n')
if __name__ == '__main__':
main()
| |
# -*- coding: utf-8 -*-
import pytest
import importlib
from comport.content.models import ChartBlock
from comport.department.models import Department
from comport.data.models import OfficerInvolvedShootingBPD, UseOfForceIncidentBPD, CitizenComplaintBPD
@pytest.mark.usefixtures('db')
class TestDepartmentModelBPD:
def test_get_complaint_blocks(self):
''' Set and get complaint chart blocks.
'''
department = Department.create(name="B Police Department", short_name="BPD", load_defaults=False)
# create & append chart blocks with the expected slugs
complaint_intro = ChartBlock(title="INTRO", dataset="intros", slug="complaints-introduction")
complaint_bm = ChartBlock(title="BYMONTH", dataset="bymonth", slug="complaints-by-month")
complaint_bya = ChartBlock(title="BYALLEGATION", dataset="bya", slug="complaints-by-allegation")
complaint_bdis = ChartBlock(title="BYDISPOSITION", dataset="bdis", slug="complaints-by-disposition")
complaint_bass = ChartBlock(title="BYPRECINCT", dataset="bpre", slug="complaints-by-assignment")
complaint_od = ChartBlock(title="OFFICERDEMOS", dataset="od", slug="officer-demographics")
complaint_bde = ChartBlock(title="BYDEMO", dataset="bde", slug="complaints-by-demographic")
complaint_bof = ChartBlock(title="BYOFFICER", dataset="bof", slug="complaints-by-officer-with-cap")
department.chart_blocks.append(complaint_intro)
department.chart_blocks.append(complaint_bm)
department.chart_blocks.append(complaint_bya)
department.chart_blocks.append(complaint_bdis)
department.chart_blocks.append(complaint_bass)
department.chart_blocks.append(complaint_od)
department.chart_blocks.append(complaint_bde)
department.chart_blocks.append(complaint_bof)
department.save()
# verify that the blocks are returned in the expected structure
complaint_blocks = department.get_complaint_blocks()
assert complaint_blocks['introduction'] == complaint_intro
assert complaint_blocks['first-block'] == complaint_bm
assert complaint_blocks['blocks'][0] == complaint_bya
assert complaint_blocks['blocks'][1] == complaint_bdis
assert complaint_blocks['blocks'][2] == complaint_bass
assert complaint_blocks['blocks'][3] == complaint_od
assert complaint_blocks['blocks'][4] == complaint_bde
assert complaint_blocks['blocks'][5] == complaint_bof
def test_get_complaint_schema_blocks(self):
''' Set and get complaint schema chart blocks.
'''
department = Department.create(name="B Police Department", short_name="BPD", load_defaults=False)
# create & append chart blocks with the expected slugs
complaint_intro = ChartBlock(title="INTRO", dataset="intros", slug="complaints-schema-introduction")
complaint_id = ChartBlock(title="FIELDID", dataset="fid", slug="complaints-schema-field-id")
complaint_od = ChartBlock(title="OCCURREDDATE", dataset="fod", slug="complaints-schema-field-occurred-date")
complaint_div = ChartBlock(title="DIVISION", dataset="div", slug="complaints-schema-field-division")
complaint_dis = ChartBlock(title="DISTRICT", dataset="dis", slug="complaints-schema-field-district")
complaint_shift = ChartBlock(title="SHIFT", dataset="shift", slug="complaints-schema-field-shift")
complaint_footer = ChartBlock(title="FOOTER", dataset="footer", slug="complaints-schema-footer")
complaint_disclaimer = ChartBlock(title="DISCLAIMER", dataset="disclaimer", slug="complaints-schema-disclaimer")
department.chart_blocks.append(complaint_intro)
department.chart_blocks.append(complaint_id)
department.chart_blocks.append(complaint_od)
department.chart_blocks.append(complaint_div)
department.chart_blocks.append(complaint_dis)
department.chart_blocks.append(complaint_shift)
department.chart_blocks.append(complaint_footer)
department.chart_blocks.append(complaint_disclaimer)
department.save()
# verify that the blocks are returned in the expected structure
complaint_blocks = department.get_complaint_schema_blocks()
assert complaint_blocks['introduction'] == complaint_intro
assert complaint_blocks['footer'] == complaint_footer
assert complaint_blocks['disclaimer'] == complaint_disclaimer
assert complaint_id in complaint_blocks['blocks']
assert complaint_od in complaint_blocks['blocks']
assert complaint_div in complaint_blocks['blocks']
assert complaint_dis in complaint_blocks['blocks']
assert complaint_shift in complaint_blocks['blocks']
def test_get_uof_blocks(self):
''' Set and get uof chart blocks.
'''
department = Department.create(name="B Police Department", short_name="BPD", load_defaults=False)
# create & append chart blocks with the expected slugs
uof_intro = ChartBlock(title="INTRO", dataset="intros", slug="uof-introduction")
uof_bm = ChartBlock(title="BYMONTH", dataset="bymonth", slug="uof-by-month")
uof_ft = ChartBlock(title="FORCETYPE", dataset="forcetype", slug="uof-force-type")
uof_bass = ChartBlock(title="BYASSIGNMENT", dataset="bid", slug="uof-by-assignment")
uof_od = ChartBlock(title="OFFICERDEMOS", dataset="od", slug="officer-demographics")
uof_race = ChartBlock(title="RACE", dataset="race", slug="uof-race")
department.chart_blocks.append(uof_intro)
department.chart_blocks.append(uof_bm)
department.chart_blocks.append(uof_ft)
department.chart_blocks.append(uof_bass)
department.chart_blocks.append(uof_od)
department.chart_blocks.append(uof_race)
department.save()
# verify that the blocks are returned in the expected structure
uof_blocks = department.get_uof_blocks()
assert uof_blocks['introduction'] == uof_intro
assert uof_blocks['first-block'] == uof_bm
assert uof_blocks['blocks'][0] == uof_ft
assert uof_blocks['blocks'][1] == uof_bass
assert uof_blocks['blocks'][2] == uof_od
assert uof_blocks['blocks'][3] == uof_race
def test_get_uof_schema_blocks(self):
''' Set and get uof schema chart blocks.
'''
department = Department.create(name="B Police Department", short_name="BPD", load_defaults=False)
# create & append chart blocks with the expected slugs
uof_intro = ChartBlock(title="INTRO", dataset="intros", slug="uof-schema-introduction")
uof_id = ChartBlock(title="FIELDID", dataset="fid", slug="uof-schema-field-id")
uof_od = ChartBlock(title="OCCURREDDATE", dataset="fod", slug="uof-schema-field-occurred-date")
uof_div = ChartBlock(title="DIVISION", dataset="div", slug="uof-schema-field-division")
uof_dis = ChartBlock(title="DISTRICT", dataset="dis", slug="uof-schema-field-district")
uof_shift = ChartBlock(title="SHIFT", dataset="shift", slug="uof-schema-field-shift")
uof_footer = ChartBlock(title="FOOTER", dataset="footer", slug="uof-schema-footer")
uof_disclaimer = ChartBlock(title="DISCLAIMER", dataset="disclaimer", slug="uof-schema-disclaimer")
department.chart_blocks.append(uof_intro)
department.chart_blocks.append(uof_id)
department.chart_blocks.append(uof_od)
department.chart_blocks.append(uof_div)
department.chart_blocks.append(uof_dis)
department.chart_blocks.append(uof_shift)
department.chart_blocks.append(uof_footer)
department.chart_blocks.append(uof_disclaimer)
department.save()
# verify that the blocks are returned in the expected structure
uof_blocks = department.get_uof_schema_blocks()
assert uof_blocks['introduction'] == uof_intro
assert uof_blocks['footer'] == uof_footer
assert uof_blocks['disclaimer'] == uof_disclaimer
assert uof_id in uof_blocks['blocks']
assert uof_od in uof_blocks['blocks']
assert uof_div in uof_blocks['blocks']
assert uof_dis in uof_blocks['blocks']
assert uof_shift in uof_blocks['blocks']
def test_get_ois_blocks(self):
''' Set and get ois chart blocks.
'''
department = Department.create(name="B Police Department", short_name="BPD", load_defaults=False)
# create & append chart blocks with the expected slugs
ois_intro = ChartBlock(title="INTRO", dataset="intros", slug="ois-introduction")
ois_bm = ChartBlock(title="BYMONTH", dataset="bm", slug="ois-by-month")
ois_bid = ChartBlock(title="BYASSIGNMENT", dataset="bid", slug="ois-by-assignment")
ois_od = ChartBlock(title="OFFICERDEMOS", dataset="od", slug="officer-demographics")
ois_race = ChartBlock(title="RACE", dataset="race", slug="ois-race")
department.chart_blocks.append(ois_intro)
department.chart_blocks.append(ois_bm)
department.chart_blocks.append(ois_bid)
department.chart_blocks.append(ois_od)
department.chart_blocks.append(ois_race)
department.save()
# verify that the blocks are returned in the expected structure
ois_blocks = department.get_ois_blocks()
assert ois_blocks['introduction'] == ois_intro
assert ois_blocks['first-block'] == ois_bm
assert ois_blocks['blocks'][0] == ois_bid
assert ois_blocks['blocks'][1] == ois_od
assert ois_blocks['blocks'][2] == ois_race
def test_get_ois_schema_blocks(self):
''' Set and get ois schema chart blocks.
'''
department = Department.create(name="B Police Department", short_name="BPD", load_defaults=False)
# create & append chart blocks with the expected slugs
ois_intro = ChartBlock(title="INTRO", dataset="intros", slug="ois-schema-introduction")
ois_id = ChartBlock(title="FIELDID", dataset="fid", slug="ois-schema-field-id")
ois_od = ChartBlock(title="OCCURREDDATE", dataset="fod", slug="ois-schema-field-occurred-date")
ois_div = ChartBlock(title="DIVISION", dataset="div", slug="ois-schema-field-division")
ois_dis = ChartBlock(title="DISTRICT", dataset="dis", slug="ois-schema-field-district")
ois_shift = ChartBlock(title="SHIFT", dataset="shift", slug="ois-schema-field-shift")
ois_footer = ChartBlock(title="FOOTER", dataset="footer", slug="ois-schema-footer")
ois_disclaimer = ChartBlock(title="DISCLAIMER", dataset="disclaimer", slug="ois-schema-disclaimer")
department.chart_blocks.append(ois_intro)
department.chart_blocks.append(ois_id)
department.chart_blocks.append(ois_od)
department.chart_blocks.append(ois_div)
department.chart_blocks.append(ois_dis)
department.chart_blocks.append(ois_shift)
department.chart_blocks.append(ois_footer)
department.chart_blocks.append(ois_disclaimer)
department.save()
# verify that the blocks are returned in the expected structure
ois_blocks = department.get_ois_schema_blocks()
assert ois_blocks['introduction'] == ois_intro
assert ois_blocks['footer'] == ois_footer
assert ois_blocks['disclaimer'] == ois_disclaimer
assert ois_id in ois_blocks['blocks']
assert ois_od in ois_blocks['blocks']
assert ois_div in ois_blocks['blocks']
assert ois_dis in ois_blocks['blocks']
assert ois_shift in ois_blocks['blocks']
def test_get_assaults_blocks(self):
''' Set and get ois chart blocks.
'''
department = Department.create(name="B Police Department", short_name="BPD", load_defaults=False)
# create & append chart blocks with the expected slugs
assault_intro = ChartBlock(title="INTRO", dataset="intros", slug="assaults-introduction")
assault_bst = ChartBlock(title="BYINCDISTRICT", dataset="bst", slug="assaults-by-service-type")
assault_bft = ChartBlock(title="WEAPONTYPE", dataset="bft", slug="assaults-by-force-type")
assault_bo = ChartBlock(title="OFFICERDEMOS", dataset="bo", slug="assaults-by-officer")
department.chart_blocks.append(assault_intro)
department.chart_blocks.append(assault_bst)
department.chart_blocks.append(assault_bft)
department.chart_blocks.append(assault_bo)
department.save()
# verify that the blocks are returned in the expected structure
assault_blocks = department.get_assaults_blocks()
assert assault_blocks['introduction'] == assault_intro
assert assault_blocks['first-block'] == assault_bst
assert assault_blocks['blocks'][0] == assault_bft
assert assault_blocks['blocks'][1] == assault_bo
def test_get_assaults_schema_blocks(self):
''' Set and get assaults schema chart blocks.
'''
department = Department.create(name="B Police Department", short_name="BPD", load_defaults=False)
# create & append chart blocks with the expected slugs
assaults_intro = ChartBlock(title="INTRO", dataset="intros", slug="assaults-schema-introduction")
assaults_fid = ChartBlock(title="FIELDID", dataset="fid", slug="assaults-schema-field-id")
assaults_foi = ChartBlock(title="OCCURREDDATE", dataset="fod", slug="assaults-schema-field-officer-identifier")
assaults_fst = ChartBlock(title="DIVISION", dataset="div", slug="assaults-schema-field-service-type")
assaults_fft = ChartBlock(title="DISTRICT", dataset="dis", slug="assaults-schema-field-force-type")
assaults_ffa = ChartBlock(title="SHIFT", dataset="shift", slug="assaults-schema-field-assignment")
assaults_footer = ChartBlock(title="FOOTER", dataset="footer", slug="assaults-schema-footer")
assaults_disclaimer = ChartBlock(title="DISCLAIMER", dataset="disclaimer", slug="assaults-schema-disclaimer")
department.chart_blocks.append(assaults_intro)
department.chart_blocks.append(assaults_fid)
department.chart_blocks.append(assaults_foi)
department.chart_blocks.append(assaults_fst)
department.chart_blocks.append(assaults_fft)
department.chart_blocks.append(assaults_ffa)
department.chart_blocks.append(assaults_footer)
department.chart_blocks.append(assaults_disclaimer)
department.save()
# verify that the blocks are returned in the expected structure
assaults_blocks = department.get_assaults_schema_blocks()
assert assaults_blocks['introduction'] == assaults_intro
assert assaults_blocks['footer'] == assaults_footer
assert assaults_blocks['disclaimer'] == assaults_disclaimer
assert assaults_fid in assaults_blocks['blocks']
assert assaults_foi in assaults_blocks['blocks']
assert assaults_fst in assaults_blocks['blocks']
assert assaults_fft in assaults_blocks['blocks']
assert assaults_ffa in assaults_blocks['blocks']
def test_get_dataset_lookup(self):
''' The dataset lookup returns usable information
'''
# create a department
department = Department.create(name="B Police Department", short_name="BPD", load_defaults=True)
complaints_lookup = department.get_dataset_lookup("complaints")
uof_lookup = department.get_dataset_lookup("uof")
ois_lookup = department.get_dataset_lookup("ois")
assaults_lookup = department.get_dataset_lookup("assaults")
# TODO: how to test that paths are valid?
# test that the var suffixes are valid
try:
getattr(department, "is_public_{}".format(complaints_lookup["var_suffix"]))
except AttributeError:
pytest.fail("Unexpected AttributeError")
try:
getattr(department, "is_public_{}".format(uof_lookup["var_suffix"]))
except AttributeError:
pytest.fail("Unexpected AttributeError")
try:
getattr(department, "is_public_{}".format(ois_lookup["var_suffix"]))
except AttributeError:
pytest.fail("Unexpected AttributeError")
try:
getattr(department, "is_public_{}".format(assaults_lookup["var_suffix"]))
except AttributeError:
pytest.fail("Unexpected AttributeError")
# test that the class prefixes are valid
try:
getattr(importlib.import_module("comport.data.models"), "{}{}".format(complaints_lookup["class_prefix"], department.short_name))
except AttributeError:
pytest.fail("Unexpected AttributeError")
try:
getattr(importlib.import_module("comport.data.models"), "{}{}".format(uof_lookup["class_prefix"], department.short_name))
except AttributeError:
pytest.fail("Unexpected AttributeError")
try:
getattr(importlib.import_module("comport.data.models"), "{}{}".format(ois_lookup["class_prefix"], department.short_name))
except AttributeError:
pytest.fail("Unexpected AttributeError")
# BPD doesn't have assaults data when this test is written
with pytest.raises(AttributeError):
getattr(importlib.import_module("comport.data.models"), "{}{}".format(assaults_lookup["class_prefix"], department.short_name))
def test_dataset_is_public_and_has_data(self):
''' We can accurately tell if a dataset is public and has data.
'''
# create a department
department = Department.create(name="B Police Department", short_name="BPD", load_defaults=True)
# none of the datasets have data, so they should all return false
assert department.dataset_is_public_and_has_data("complaints") == False
assert department.dataset_is_public_and_has_data("uof") == False
assert department.dataset_is_public_and_has_data("ois") == False
assert department.dataset_is_public_and_has_data("assaults") == False
# the total count should be zero
assert department.displayable_dataset_count() == 0
# create incidents and verify that the datasets are now displayable
CitizenComplaintBPD.create(department_id=department.id, opaque_id="12345abcde")
assert department.dataset_is_public_and_has_data("complaints") == True
assert department.displayable_dataset_count() == 1
UseOfForceIncidentBPD.create(department_id=department.id, opaque_id="23456bcdef")
assert department.dataset_is_public_and_has_data("uof") == True
assert department.displayable_dataset_count() == 2
OfficerInvolvedShootingBPD.create(department_id=department.id, opaque_id="34567cdefg")
assert department.dataset_is_public_and_has_data("ois") == True
assert department.displayable_dataset_count() == 3
# now make them all not public, and they should be false again
department.is_public_citizen_complaints = False
assert department.dataset_is_public_and_has_data("complaints") == False
department.is_public_use_of_force_incidents = False
assert department.dataset_is_public_and_has_data("uof") == False
department.is_public_officer_involved_shootings = False
assert department.dataset_is_public_and_has_data("ois") == False
assert department.displayable_dataset_count() == 0
| |
import datetime
import aiohttp_jinja2
from aiohttp import web
from aiohttp_session import get_session
from bson import ObjectId
from . import db
from .security import generate_password_hash, check_password_hash
from .utils import redirect
class SiteHandler:
def __init__(self, mongo):
self._mongo = mongo
@property
def mongo(self):
return self._mongo
@aiohttp_jinja2.template('timeline.html')
async def timeline(self, request):
session = await get_session(request)
user_id = session.get('user_id')
if user_id is None:
router = request.app.router
location = router['public_timeline'].url_for().human_repr()
raise web.HTTPFound(location=location)
user = await self.mongo.user.find_one({'_id': ObjectId(user_id)})
query = {'who_id': ObjectId(user_id)}
filter = {'whom_id': 1}
followed = await self.mongo.follower.find_one(query, filter)
if followed is None:
followed = {'whom_id': []}
query = {'$or': [{'author_id': ObjectId(user_id)},
{'author_id': {'$in': followed['whom_id']}}]}
messages = await self.mongo.message\
.find(query)\
.sort('pub_date', -1)\
.to_list(30)
endpoint = request.match_info.route.name
return {"messages": messages,
"user": user,
"endpoint": endpoint}
@aiohttp_jinja2.template('timeline.html')
async def public_timeline(self, request):
messages = await self.mongo.message\
.find()\
.sort('pub_date', -1)\
.to_list(30)
return {"messages": messages,
"endpoint": request.match_info.route.name}
@aiohttp_jinja2.template('timeline.html')
async def user_timeline(self, request):
username = request.match_info['username']
profile_user = await self.mongo.user.find_one({'username': username})
if profile_user is None:
raise web.HTTPNotFound()
followed = False
session = await get_session(request)
user_id = session.get('user_id')
user = None
if user_id:
user = await self.mongo.user.find_one({'_id': ObjectId(user_id)})
followed = await self.mongo.follower.find_one(
{'who_id': ObjectId(session['user_id']),
'whom_id': {'$in': [ObjectId(profile_user['_id'])]}})
followed = followed is not None
messages = await self.mongo.message\
.find({'author_id': ObjectId(profile_user['_id'])})\
.sort('pub_date', -1)\
.to_list(30)
profile_user['_id'] = str(profile_user['_id'])
return {"messages": messages,
"followed": followed,
"profile_user": profile_user,
"user": user,
"endpoint": request.match_info.route.name}
@aiohttp_jinja2.template('login.html')
async def login(self, request):
session = await get_session(request)
user_id = session.get('user_id')
if user_id:
return redirect(request, 'timeline')
error = None
form = None
if request.method == 'POST':
form = await request.post()
user = await self.mongo.user.find_one(
{'username': form['username']})
if user is None:
error = 'Invalid username'
elif not check_password_hash(user['pw_hash'], form['password']):
error = 'Invalid password'
else:
session['user_id'] = str(user['_id'])
return redirect(request, 'timeline')
return {"error": error, "form": form}
async def logout(self, request):
session = await get_session(request)
session.pop('user_id', None)
return redirect(request, 'public_timeline')
@aiohttp_jinja2.template('register.html')
async def register(self, request):
"""Registers the user."""
session = await get_session(request)
user_id = session.get('user_id')
if user_id:
return redirect(request, 'timeline')
error = None
form = None
if request.method == 'POST':
form = await request.post()
user_id = await db.get_user_id(self.mongo.user, form['username'])
if not form['username']:
error = 'You have to enter a username'
elif not form['email'] or '@' not in form['email']:
error = 'You have to enter a valid email address'
elif not form['password']:
error = 'You have to enter a password'
elif form['password'] != form['password2']:
error = 'The two passwords do not match'
elif user_id is not None:
error = 'The username is already taken'
else:
await self.mongo.user.insert_one(
{'username': form['username'],
'email': form['email'],
'pw_hash': generate_password_hash(form['password'])})
return redirect(request, 'login')
return {"error": error, "form": form}
async def follow_user(self, request):
"""Adds the current user as follower of the given user."""
username = request.match_info['username']
session = await get_session(request)
user_id = session.get('user_id')
if not user_id:
raise web.HTTPNotAuthorized()
whom_id = await db.get_user_id(self.mongo.user, username)
if whom_id is None:
raise web.HTTPFound()
await self.mongo.follower.update_many(
{'who_id': ObjectId(user_id)},
{'$push': {'whom_id': whom_id}}, upsert=True)
return redirect(request, 'user_timeline', username=username)
async def unfollow_user(self, request):
"""Removes the current user as follower of the given user."""
username = request.match_info['username']
session = await get_session(request)
user_id = session.get('user_id')
if not user_id:
raise web.HTTPNotAuthorized()
whom_id = await db.get_user_id(self.mongo.user, username)
if whom_id is None:
raise web.HTTPFound()
await self.mongo.follower.update_many(
{'who_id': ObjectId(session['user_id'])},
{'$pull': {'whom_id': whom_id}})
return redirect(request, 'user_timeline', username=username)
async def add_message(self, request):
"""Registers a new message for the user."""
session = await get_session(request)
user_id = session.get('user_id')
if not user_id:
raise web.HTTPNotAuthorized()
form = await request.post()
if form.get('text'):
user = await self.mongo.user.find_one(
{'_id': ObjectId(session['user_id'])},
{'email': 1, 'username': 1})
await self.mongo.message.insert_one(
{'author_id': ObjectId(user_id),
'email': user['email'],
'username': user['username'],
'text': form['text'],
'pub_date': datetime.datetime.utcnow()})
return redirect(request, 'timeline')
| |
"""Helpers to resolve client ID/secret."""
import asyncio
from html.parser import HTMLParser
from ipaddress import ip_address
import logging
from urllib.parse import urljoin, urlparse
import aiohttp
from homeassistant.util.network import is_local
_LOGGER = logging.getLogger(__name__)
async def verify_redirect_uri(hass, client_id, redirect_uri):
"""Verify that the client and redirect uri match."""
try:
client_id_parts = _parse_client_id(client_id)
except ValueError:
return False
redirect_parts = _parse_url(redirect_uri)
# Verify redirect url and client url have same scheme and domain.
is_valid = (
client_id_parts.scheme == redirect_parts.scheme
and client_id_parts.netloc == redirect_parts.netloc
)
if is_valid:
return True
# Whitelist the iOS and Android callbacks so that people can link apps
# without being connected to the internet.
if redirect_uri == "homeassistant://auth-callback" and client_id in (
"https://home-assistant.io/android",
"https://home-assistant.io/iOS",
):
return True
# IndieAuth 4.2.2 allows for redirect_uri to be on different domain
# but needs to be specified in link tag when fetching `client_id`.
redirect_uris = await fetch_redirect_uris(hass, client_id)
return redirect_uri in redirect_uris
class LinkTagParser(HTMLParser):
"""Parser to find link tags."""
def __init__(self, rel):
"""Initialize a link tag parser."""
super().__init__()
self.rel = rel
self.found = []
def handle_starttag(self, tag, attrs):
"""Handle finding a start tag."""
if tag != "link":
return
attrs = dict(attrs)
if attrs.get("rel") == self.rel:
self.found.append(attrs.get("href"))
async def fetch_redirect_uris(hass, url):
"""Find link tag with redirect_uri values.
IndieAuth 4.2.2
The client SHOULD publish one or more <link> tags or Link HTTP headers with
a rel attribute of redirect_uri at the client_id URL.
We limit to the first 10kB of the page.
We do not implement extracting redirect uris from headers.
"""
parser = LinkTagParser("redirect_uri")
chunks = 0
try:
async with aiohttp.ClientSession() as session:
async with session.get(url, timeout=5) as resp:
async for data in resp.content.iter_chunked(1024):
parser.feed(data.decode())
chunks += 1
if chunks == 10:
break
except asyncio.TimeoutError:
_LOGGER.error("Timeout while looking up redirect_uri %s", url)
except aiohttp.client_exceptions.ClientSSLError:
_LOGGER.error("SSL error while looking up redirect_uri %s", url)
except aiohttp.client_exceptions.ClientOSError as ex:
_LOGGER.error("OS error while looking up redirect_uri %s: %s", url, ex.strerror)
except aiohttp.client_exceptions.ClientConnectionError:
_LOGGER.error(
"Low level connection error while looking up redirect_uri %s", url
)
except aiohttp.client_exceptions.ClientError:
_LOGGER.error("Unknown error while looking up redirect_uri %s", url)
# Authorization endpoints verifying that a redirect_uri is allowed for use
# by a client MUST look for an exact match of the given redirect_uri in the
# request against the list of redirect_uris discovered after resolving any
# relative URLs.
return [urljoin(url, found) for found in parser.found]
def verify_client_id(client_id):
"""Verify that the client id is valid."""
try:
_parse_client_id(client_id)
return True
except ValueError:
return False
def _parse_url(url):
"""Parse a url in parts and canonicalize according to IndieAuth."""
parts = urlparse(url)
# Canonicalize a url according to IndieAuth 3.2.
# SHOULD convert the hostname to lowercase
parts = parts._replace(netloc=parts.netloc.lower())
# If a URL with no path component is ever encountered,
# it MUST be treated as if it had the path /.
if parts.path == "":
parts = parts._replace(path="/")
return parts
def _parse_client_id(client_id):
"""Test if client id is a valid URL according to IndieAuth section 3.2.
https://indieauth.spec.indieweb.org/#client-identifier
"""
parts = _parse_url(client_id)
# Client identifier URLs
# MUST have either an https or http scheme
if parts.scheme not in ("http", "https"):
raise ValueError()
# MUST contain a path component
# Handled by url canonicalization.
# MUST NOT contain single-dot or double-dot path segments
if any(segment in (".", "..") for segment in parts.path.split("/")):
raise ValueError(
"Client ID cannot contain single-dot or double-dot path segments"
)
# MUST NOT contain a fragment component
if parts.fragment != "":
raise ValueError("Client ID cannot contain a fragment")
# MUST NOT contain a username or password component
if parts.username is not None:
raise ValueError("Client ID cannot contain username")
if parts.password is not None:
raise ValueError("Client ID cannot contain password")
# MAY contain a port
try:
# parts raises ValueError when port cannot be parsed as int
parts.port
except ValueError as ex:
raise ValueError("Client ID contains invalid port") from ex
# Additionally, hostnames
# MUST be domain names or a loopback interface and
# MUST NOT be IPv4 or IPv6 addresses except for IPv4 127.0.0.1
# or IPv6 [::1]
# We are not goint to follow the spec here. We are going to allow
# any internal network IP to be used inside a client id.
address = None
try:
netloc = parts.netloc
# Strip the [, ] from ipv6 addresses before parsing
if netloc[0] == "[" and netloc[-1] == "]":
netloc = netloc[1:-1]
address = ip_address(netloc)
except ValueError:
# Not an ip address
pass
if address is None or is_local(address):
return parts
raise ValueError("Hostname should be a domain name or local IP address")
| |
# The code for AlexNet is copied and adapted from the TensorFlow repository
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/models/image/alexnet/alexnet_benchmark.py.
import ray
import numpy as np
import tarfile, io
import boto3
import PIL.Image as Image
import tensorflow as tf
import ray.array.remote as ra
STDDEV = 0.001 # The standard deviation of the network weight initialization.
def load_chunk(tarfile, size=None):
"""Load a number of images from a single imagenet .tar file.
This function also converts the image from grayscale to RGB if necessary.
Args:
tarfile (tarfile.TarFile): The archive from which the files get loaded.
size (Optional[Tuple[int, int]]): Resize the image to this size if provided.
Returns:
numpy.ndarray: Contains the image data in format [batch, w, h, c]
"""
result = []
filenames = []
for member in tarfile.getmembers():
filename = member.path
content = tarfile.extractfile(member)
img = Image.open(content)
rgbimg = Image.new("RGB", img.size)
rgbimg.paste(img)
if size != None:
rgbimg = rgbimg.resize(size, Image.ANTIALIAS)
result.append(np.array(rgbimg).reshape(1, rgbimg.size[0], rgbimg.size[1], 3))
filenames.append(filename)
return np.concatenate(result), filenames
@ray.remote(num_return_vals=2)
def load_tarfile_from_s3(bucket, s3_key, size=[]):
"""Load an imagenet .tar file.
Args:
bucket (str): Bucket holding the imagenet .tar.
s3_key (str): s3 key from which the .tar file is loaded.
size (List[int]): Resize the image to this size if size != []; len(size) == 2 required.
Returns:
np.ndarray: The image data (see load_chunk).
"""
s3 = boto3.client("s3")
response = s3.get_object(Bucket=bucket, Key=s3_key)
output = io.BytesIO()
chunk = response["Body"].read(1024 * 8)
while chunk:
output.write(chunk)
chunk = response["Body"].read(1024 * 8)
output.seek(0) # go to the beginning of the .tar file
tar = tarfile.open(mode="r", fileobj=output)
return load_chunk(tar, size=size if size != [] else None)
def load_tarfiles_from_s3(bucket, s3_keys, size=[]):
"""Load a number of imagenet .tar files.
Args:
bucket (str): Bucket holding the imagenet .tars.
s3_keys (List[str]): List of s3 keys from which the .tar files are being
loaded.
size (List[int]): Resize the image to this size if size does not equal [].
The length of size must be 2.
Returns:
np.ndarray: Contains object IDs to the chunks of the images (see load_chunk).
"""
return [load_tarfile_from_s3.remote(bucket, s3_key, size) for s3_key in s3_keys]
def setup_variables(params, placeholders, kernelshape, biasshape):
"""Create the variables for each layer.
Args:
params (List): Network parameters used for creating feed_dicts
placeholders (List): Placeholders used for feeding weights into
kernelshape (List): Shape of the kernel used for the conv layer
biasshape (List): Shape of the bias used
Returns:
None
"""
kernel = tf.Variable(tf.truncated_normal(kernelshape, stddev=STDDEV))
biases = tf.Variable(tf.constant(0.0, shape=biasshape, dtype=tf.float32),
trainable=True, name='biases')
kernel_new = tf.placeholder(tf.float32, shape=kernel.get_shape())
biases_new = tf.placeholder(tf.float32, shape=biases.get_shape())
update_kernel = kernel.assign(kernel_new)
update_biases = biases.assign(biases_new)
params += [kernel, biases]
placeholders += [kernel_new, biases_new]
def conv_layer(parameters, prev_layer, shape, scope):
"""Constructs a convolutional layer for the network.
Args:
parameters (List): Parameters used in constructing layer.
prevlayer (Tensor): The previous layer to connect the network together.
shape (List): The strides used for convolution
scope (Scope): Current scope of tensorflow
Returns:
Tensor: Activation of layer
"""
kernel = parameters[-2]
bias = parameters[-1]
conv = tf.nn.conv2d(prev_layer, kernel, shape, padding='SAME')
add_bias = tf.nn.bias_add(conv, bias)
return tf.nn.relu(add_bias, name=scope)
def net_initialization():
images = tf.placeholder(tf.float32, shape=[None, 224, 224, 3])
y_true = tf.placeholder(tf.float32, shape=[None, 1000])
parameters = []
placeholders = []
# conv1
with tf.name_scope('conv1') as scope:
setup_variables(parameters, placeholders, [11, 11, 3, 96], [96])
conv1 = conv_layer(parameters, images, [1, 4, 4, 1], scope)
# pool1
pool1 = tf.nn.max_pool(conv1,
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding='VALID',
name='pool1')
# lrn1
pool1_lrn = tf.nn.lrn(pool1, depth_radius=5, bias=1.0,
alpha=0.0001, beta=0.75,
name="LocalResponseNormalization")
# conv2
with tf.name_scope('conv2') as scope:
setup_variables(parameters, placeholders, [5, 5, 96, 256], [256])
conv2 = conv_layer(parameters, pool1_lrn, [1, 1, 1, 1], scope)
pool2 = tf.nn.max_pool(conv2,
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding='VALID',
name='pool2')
# lrn2
pool2_lrn = tf.nn.lrn(pool2, depth_radius=5, bias=1.0,
alpha=0.0001, beta=0.75,
name="LocalResponseNormalization")
# conv3
with tf.name_scope('conv3') as scope:
setup_variables(parameters, placeholders, [3, 3, 256, 384], [384])
conv3 = conv_layer(parameters, pool2_lrn, [1, 1, 1, 1], scope)
# conv4
with tf.name_scope('conv4') as scope:
setup_variables(parameters, placeholders, [3, 3, 384, 384], [384])
conv4 = conv_layer(parameters, conv3, [1, 1, 1, 1], scope)
# conv5
with tf.name_scope('conv5') as scope:
setup_variables(parameters, placeholders, [3, 3, 384, 256], [256])
conv5 = conv_layer(parameters, conv4, [1, 1, 1, 1], scope)
# pool5
pool5 = tf.nn.max_pool(conv5,
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding='VALID',
name='pool5')
# lrn5
pool5_lrn = tf.nn.lrn(pool5, depth_radius=5, bias=1.0,
alpha=0.0001, beta=0.75,
name="LocalResponseNormalization")
dropout = tf.placeholder(tf.float32)
with tf.name_scope('fc1') as scope:
n_input = int(np.prod(pool5_lrn.get_shape().as_list()[1:]))
setup_variables(parameters, placeholders, [n_input, 4096], [4096])
fc_in = tf.reshape(pool5_lrn, [-1, n_input])
fc_layer1 = tf.nn.tanh(tf.nn.bias_add(tf.matmul(fc_in, parameters[-2]), parameters[-1]))
fc_out1 = tf.nn.dropout(fc_layer1, dropout)
with tf.name_scope('fc2') as scope:
n_input = int(np.prod(fc_out1.get_shape().as_list()[1:]))
setup_variables(parameters, placeholders, [n_input, 4096], [4096])
fc_in = tf.reshape(fc_out1, [-1, n_input])
fc_layer2 = tf.nn.tanh(tf.nn.bias_add(tf.matmul(fc_in, parameters[-2]), parameters[-1]))
fc_out2 = tf.nn.dropout(fc_layer2, dropout)
with tf.name_scope('fc3') as scope:
n_input = int(np.prod(fc_out2.get_shape().as_list()[1:]))
setup_variables(parameters, placeholders, [n_input, 1000], [1000])
fc_in = tf.reshape(fc_out2, [-1, n_input])
fc_layer3 = tf.nn.softmax(tf.nn.bias_add(tf.matmul(fc_in, parameters[-2]), parameters[-1]))
y_pred = fc_layer3 / tf.reduce_sum(fc_layer3,
reduction_indices=len(fc_layer3.get_shape()) - 1,
keep_dims=True)
# manual computation of crossentropy
y_pred = tf.clip_by_value(y_pred, tf.cast(1e-10, dtype=tf.float32),
tf.cast(1. - 1e-10, dtype=tf.float32))
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_true * tf.log(y_pred),
reduction_indices=len(y_pred.get_shape()) - 1))
opt = tf.train.MomentumOptimizer(learning_rate=0.01, momentum=0.9) # Any other optimizier can be placed here
correct_pred = tf.equal(tf.argmax(y_pred, 1), tf.argmax(y_true, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
comp_grads = opt.compute_gradients(cross_entropy, parameters)
application = opt.apply_gradients(zip(placeholders, parameters))
sess = tf.Session()
init_all_variables = tf.initialize_all_variables()
# In order to set the weights of the TensorFlow graph on a worker, we add
# assignment nodes. To get the network weights (as a list of numpy arrays)
# and to set the network weights (from a list of numpy arrays), use the
# methods get_weights and set_weights. This can be done from within a remote
# function or on the driver.
def get_and_set_weights_methods():
assignment_placeholders = []
assignment_nodes = []
for var in tf.trainable_variables():
assignment_placeholders.append(tf.placeholder(var.value().dtype, var.get_shape().as_list()))
assignment_nodes.append(var.assign(assignment_placeholders[-1]))
def get_weights():
return [v.eval(session=sess) for v in tf.trainable_variables()]
def set_weights(new_weights):
sess.run(assignment_nodes, feed_dict={p: w for p, w in zip(assignment_placeholders, new_weights)})
return get_weights, set_weights
get_weights, set_weights = get_and_set_weights_methods()
return comp_grads, sess, application, accuracy, images, y_true, dropout, placeholders, init_all_variables, get_weights, set_weights
def net_reinitialization(net_vars):
return net_vars
@ray.remote
def num_images(batches):
"""Counts number of images in batches.
Args:
batches (List): Collection of batches of images and labels.
Returns:
int: The number of images
"""
shape_ids = [ra.shape.remote(batch) for batch in batches]
return sum([shape[0] for shape in ray.get(shape_ids)])
@ray.remote
def compute_mean_image(batches):
"""Computes the mean image given a list of batches of images.
Args:
batches (List[ObjectID]): A list of batches of images.
Returns:
ndarray: The mean image
"""
if len(batches) == 0:
raise Exception("No images were passed into `compute_mean_image`.")
sum_image_ids = [ra.sum.remote(batch, axis=0) for batch in batches]
n_images = num_images.remote(batches)
return np.sum(ray.get(sum_image_ids), axis=0).astype("float64") / ray.get(n_images)
@ray.remote(num_return_vals=4)
def shuffle_arrays(first_images, first_labels, second_images, second_labels):
"""Shuffles the images and labels from two batches.
Args:
first_images (ndarray): First batch of images.
first_labels (ndarray): First batch of labels.
second_images (ndarray): Second batch of images.
second_labels (ndarray): Second batch of labels.
Returns:
ndarray: First batch of shuffled images.
ndarray: First batch of shuffled labels.
ndarray: Second bach of shuffled images.
ndarray: Second batch of shuffled labels.
"""
images = np.concatenate((first_images, second_images))
labels = np.concatenate((first_labels, second_labels))
total_length = len(images)
first_len = len(first_images)
random_indices = np.random.permutation(total_length)
new_first_images = images[random_indices[0:first_len]]
new_first_labels = labels[random_indices[0:first_len]]
new_second_images = images[random_indices[first_len:total_length]]
new_second_labels = labels[random_indices[first_len:total_length]]
return new_first_images, new_first_labels, new_second_images, new_second_labels
def shuffle_pair(first_batch, second_batch):
"""Shuffle two batches of data.
Args:
first_batch (Tuple[ObjectID. ObjectID]): The first batch to be shuffled. The
first component is the object ID of a batch of images, and the second
component is the object ID of the corresponding batch of labels.
second_batch (Tuple[ObjectID, ObjectID]): The second batch to be shuffled.
The first component is the object ID of a batch of images, and the second
component is the object ID of the corresponding batch of labels.
Returns:
Tuple[ObjectID, ObjectID]: The first batch of shuffled data.
Tuple[ObjectID, ObjectID]: Two second bach of shuffled data.
"""
images1, labels1, images2, labels2 = shuffle_arrays.remote(first_batch[0], first_batch[1], second_batch[0], second_batch[1])
return (images1, labels1), (images2, labels2)
@ray.remote
def filenames_to_labels(filenames, filename_label_dict):
"""Converts filename strings to integer labels.
Args:
filenames (List[str]): The filenames of the images.
filename_label_dict (Dict[str, int]): A dictionary mapping filenames to
integer labels.
Returns:
ndarray: Integer labels
"""
return np.asarray([int(filename_label_dict[filename]) for filename in filenames])
def one_hot(x):
"""Converts integer labels to one hot vectors.
Args:
x (int): Index to be set to one
Returns:
ndarray: One hot vector.
"""
zero = np.zeros([1000])
zero[x] = 1.0
return zero
def crop_images(images):
"""Randomly crop a batch of images.
This is used to generate many slightly different images from each training
example.
Args:
images (ndarray): A batch of images to crop. The shape of images should be
batch_size x height x width x channels.
Returns:
ndarray: A batch of cropped images.
"""
original_height = 256
original_width = 256
cropped_height = 224
cropped_width = 224
height_offset = np.random.randint(original_height - cropped_height + 1)
width_offset = np.random.randint(original_width - cropped_width + 1)
return images[:, height_offset:(height_offset + cropped_height), width_offset:(width_offset + cropped_width), :]
def shuffle(batches):
"""Shuffle the data.
This method groups the batches together in pairs and within each pair shuffles
the data between the two members.
Args:
batches (List[Tuple[ObjectID, ObjectID]]): This is a list of tuples, where
each tuple consists of two object IDs. The first component is an object ID
for a batch of images, and the second component is an object ID for the
corresponding batch of labels.
Returns:
List[Tuple[ObjectID, ObjectID]]: The shuffled data.
"""
# Randomly permute the order of the batches.
permuted_batches = np.random.permutation(batches)
new_batches = []
for i in range(len(batches) / 2):
# Swap data between consecutive batches.
shuffled_batch1, shuffled_batch2 = shuffle_pair(permuted_batches[2 * i], permuted_batches[2 * i + 1])
new_batches += [shuffled_batch1, shuffled_batch2]
if len(batches) % 2 == 1:
# If there is an odd number of batches, don't forget the last one.
new_batches.append(permuted_batches[-1])
return new_batches
@ray.remote
def compute_grad(X, Y, mean, weights):
"""Computes the gradient of the network.
Args:
X (ndarray): Numpy array of images in the form of [224, 224,3]
Y (ndarray): Labels corresponding to each image
mean (ndarray): Mean image to subtract from images
weights (List[ndarray]): The network weights.
Returns:
List of gradients for each variable
"""
comp_grads, sess, _, _, images, y_true, dropout, placeholders, _, get_weights, set_weights = ray.reusables.net_vars
# Set the network weights.
set_weights(weights)
# Choose a subset of the batch to compute on and crop the images.
random_indices = np.random.randint(0, len(X), size=128)
subset_X = crop_images(X[random_indices] - mean)
subset_Y = np.asarray([one_hot(label) for label in Y[random_indices]])
# Compute the gradients.
return sess.run([g for (g, v) in comp_grads], feed_dict={images: subset_X, y_true: subset_Y, dropout: 0.5})
@ray.remote
def compute_accuracy(X, Y, weights):
"""Returns the accuracy of the network
Args:
X (ndarray): A batch of images.
Y (ndarray): A batch of labels.
weights (List[ndarray]): The network weights.
Returns:
The accuracy of the network on the given batch.
"""
_, sess, _, accuracy, images, y_true, dropout, placeholders, _, get_weights, set_weights = ray.reusables.net_vars
# Set the network weights.
set_weights(weights)
one_hot_Y = np.asarray([one_hot(label) for label in Y])
cropped_X = crop_images(X)
return sess.run(accuracy, feed_dict={images: cropped_X, y_true: one_hot_Y, dropout: 1.0})
| |
# Copyright 2006-2009 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Dictionary that is passed as defines for js2c.py.
# Used for defines that must be defined for all native JS files.
define NONE = 0;
define READ_ONLY = 1;
define DONT_ENUM = 2;
define DONT_DELETE = 4;
# 2^53 - 1
define kMaxSafeInteger = 9007199254740991;
# 2^32 - 1
define kMaxUint32 = 4294967295;
# Type query macros.
#
# Note: We have special support for typeof(foo) === 'bar' in the compiler.
# It will *not* generate a runtime typeof call for the most important
# values of 'bar'.
macro IS_ARRAY(arg) = (%_IsArray(arg));
macro IS_ARRAYBUFFER(arg) = (%_ClassOf(arg) === 'ArrayBuffer');
macro IS_BOOLEAN(arg) = (typeof(arg) === 'boolean');
macro IS_DATAVIEW(arg) = (%_ClassOf(arg) === 'DataView');
macro IS_DATE(arg) = (%IsDate(arg));
macro IS_ERROR(arg) = (%_ClassOf(arg) === 'Error');
macro IS_FUNCTION(arg) = (%IsFunction(arg));
macro IS_GENERATOR(arg) = (%_ClassOf(arg) === 'Generator');
macro IS_GLOBAL(arg) = (%_ClassOf(arg) === 'global');
macro IS_MAP(arg) = (%_ClassOf(arg) === 'Map');
macro IS_MAP_ITERATOR(arg) = (%_ClassOf(arg) === 'Map Iterator');
macro IS_NULL(arg) = (arg === null);
macro IS_NULL_OR_UNDEFINED(arg) = (arg == null);
macro IS_NUMBER(arg) = (typeof(arg) === 'number');
macro IS_OBJECT(arg) = (typeof(arg) === 'object');
macro IS_PROXY(arg) = (%_IsJSProxy(arg));
macro IS_REGEXP(arg) = (%_IsRegExp(arg));
macro IS_SCRIPT(arg) = (%_ClassOf(arg) === 'Script');
macro IS_SET(arg) = (%_ClassOf(arg) === 'Set');
macro IS_SET_ITERATOR(arg) = (%_ClassOf(arg) === 'Set Iterator');
macro IS_SHAREDARRAYBUFFER(arg) = (%_ClassOf(arg) === 'SharedArrayBuffer');
macro IS_SIMD_VALUE(arg) = (%IsSimdValue(arg));
macro IS_STRING(arg) = (typeof(arg) === 'string');
macro IS_SYMBOL(arg) = (typeof(arg) === 'symbol');
macro IS_TYPEDARRAY(arg) = (%_IsTypedArray(arg));
macro IS_UNDEFINED(arg) = (arg === (void 0));
macro IS_WEAKMAP(arg) = (%_ClassOf(arg) === 'WeakMap');
macro IS_WEAKSET(arg) = (%_ClassOf(arg) === 'WeakSet');
# Macro for ES queries of the type: "Type(O) is Object."
macro IS_RECEIVER(arg) = (%_IsJSReceiver(arg));
# Macro for ES queries of the type: "IsCallable(O)"
macro IS_CALLABLE(arg) = (typeof(arg) === 'function');
# Macro for ES6 CheckObjectCoercible
# Will throw a TypeError of the form "[functionName] called on null or undefined".
macro CHECK_OBJECT_COERCIBLE(arg, functionName) = if (IS_NULL(%IS_VAR(arg)) || IS_UNDEFINED(arg)) throw %make_type_error(kCalledOnNullOrUndefined, functionName);
# Inline macros. Use %IS_VAR to make sure arg is evaluated only once.
macro NUMBER_IS_NAN(arg) = (!%_IsSmi(%IS_VAR(arg)) && !(arg == arg));
macro NUMBER_IS_FINITE(arg) = (%_IsSmi(%IS_VAR(arg)) || ((arg == arg) && (arg != 1/0) && (arg != -1/0)));
macro TO_BOOLEAN(arg) = (!!(arg));
macro TO_INTEGER(arg) = (%_ToInteger(arg));
macro TO_INT32(arg) = ((arg) | 0);
macro TO_UINT32(arg) = ((arg) >>> 0);
macro INVERT_NEG_ZERO(arg) = ((arg) + 0);
macro TO_LENGTH(arg) = (%_ToLength(arg));
macro TO_STRING(arg) = (%_ToString(arg));
macro TO_NUMBER(arg) = (%_ToNumber(arg));
macro TO_OBJECT(arg) = (%_ToObject(arg));
macro HAS_OWN_PROPERTY(obj, key) = (%_Call(ObjectHasOwnProperty, obj, key));
# Private names.
macro IS_PRIVATE(sym) = (%SymbolIsPrivate(sym));
macro HAS_PRIVATE(obj, key) = HAS_OWN_PROPERTY(obj, key);
macro HAS_DEFINED_PRIVATE(obj, sym) = (!IS_UNDEFINED(obj[sym]));
macro GET_PRIVATE(obj, sym) = (obj[sym]);
macro SET_PRIVATE(obj, sym, val) = (obj[sym] = val);
# To avoid ES2015 Function name inference.
macro ANONYMOUS_FUNCTION(fn) = (0, (fn));
# Constants. The compiler constant folds them.
define INFINITY = (1/0);
define UNDEFINED = (void 0);
# Macros implemented in Python.
python macro CHAR_CODE(str) = ord(str[1]);
# For messages.js
# Matches Script::Type from objects.h
define TYPE_NATIVE = 0;
define TYPE_EXTENSION = 1;
define TYPE_NORMAL = 2;
# Matches Script::CompilationType from objects.h
define COMPILATION_TYPE_HOST = 0;
define COMPILATION_TYPE_EVAL = 1;
define COMPILATION_TYPE_JSON = 2;
# Must match PropertyFilter in property-details.h
define PROPERTY_FILTER_NONE = 0;
define PROPERTY_FILTER_ONLY_ENUMERABLE = 2;
define PROPERTY_FILTER_SKIP_STRINGS = 8;
define PROPERTY_FILTER_SKIP_SYMBOLS = 16;
# Use for keys, values and entries iterators.
define ITERATOR_KIND_KEYS = 1;
define ITERATOR_KIND_VALUES = 2;
define ITERATOR_KIND_ENTRIES = 3;
macro FIXED_ARRAY_GET(array, index) = (%_FixedArrayGet(array, (index) | 0));
macro FIXED_ARRAY_SET(array, index, value) = (%_FixedArraySet(array, (index) | 0, value));
# TODO(adamk): Find a more robust way to force Smi representation.
macro FIXED_ARRAY_SET_SMI(array, index, value) = (FIXED_ARRAY_SET(array, index, (value) | 0));
macro ORDERED_HASH_TABLE_BUCKET_COUNT(table) = (FIXED_ARRAY_GET(table, 0));
macro ORDERED_HASH_TABLE_ELEMENT_COUNT(table) = (FIXED_ARRAY_GET(table, 1));
macro ORDERED_HASH_TABLE_SET_ELEMENT_COUNT(table, count) = (FIXED_ARRAY_SET_SMI(table, 1, count));
macro ORDERED_HASH_TABLE_DELETED_COUNT(table) = (FIXED_ARRAY_GET(table, 2));
macro ORDERED_HASH_TABLE_SET_DELETED_COUNT(table, count) = (FIXED_ARRAY_SET_SMI(table, 2, count));
macro ORDERED_HASH_TABLE_BUCKET_AT(table, bucket) = (FIXED_ARRAY_GET(table, 3 + (bucket)));
macro ORDERED_HASH_TABLE_SET_BUCKET_AT(table, bucket, entry) = (FIXED_ARRAY_SET(table, 3 + (bucket), entry));
macro ORDERED_HASH_TABLE_HASH_TO_BUCKET(hash, numBuckets) = (hash & ((numBuckets) - 1));
macro ORDERED_HASH_SET_ENTRY_TO_INDEX(entry, numBuckets) = (3 + (numBuckets) + ((entry) << 1));
macro ORDERED_HASH_SET_KEY_AT(table, entry, numBuckets) = (FIXED_ARRAY_GET(table, ORDERED_HASH_SET_ENTRY_TO_INDEX(entry, numBuckets)));
macro ORDERED_HASH_SET_CHAIN_AT(table, entry, numBuckets) = (FIXED_ARRAY_GET(table, ORDERED_HASH_SET_ENTRY_TO_INDEX(entry, numBuckets) + 1));
macro ORDERED_HASH_MAP_ENTRY_TO_INDEX(entry, numBuckets) = (3 + (numBuckets) + ((entry) * 3));
macro ORDERED_HASH_MAP_KEY_AT(table, entry, numBuckets) = (FIXED_ARRAY_GET(table, ORDERED_HASH_MAP_ENTRY_TO_INDEX(entry, numBuckets)));
macro ORDERED_HASH_MAP_VALUE_AT(table, entry, numBuckets) = (FIXED_ARRAY_GET(table, ORDERED_HASH_MAP_ENTRY_TO_INDEX(entry, numBuckets) + 1));
macro ORDERED_HASH_MAP_CHAIN_AT(table, entry, numBuckets) = (FIXED_ARRAY_GET(table, ORDERED_HASH_MAP_ENTRY_TO_INDEX(entry, numBuckets) + 2));
# Must match OrderedHashTable::kNotFound.
define NOT_FOUND = -1;
# Check whether debug is active.
define DEBUG_IS_ACTIVE = (%_DebugIsActive() != 0);
# UseCounters from include/v8.h
define kUseAsm = 0;
define kBreakIterator = 1;
define kLegacyConst = 2;
define kMarkDequeOverflow = 3;
define kStoreBufferOverflow = 4;
define kSlotsBufferOverflow = 5;
define kForcedGC = 7;
define kSloppyMode = 8;
define kStrictMode = 9;
define kRegExpPrototypeStickyGetter = 11;
define kRegExpPrototypeToString = 12;
define kRegExpPrototypeUnicodeGetter = 13;
define kIntlV8Parse = 14;
define kIntlPattern = 15;
define kIntlResolved = 16;
define kPromiseChain = 17;
define kPromiseAccept = 18;
define kPromiseDefer = 19;
define kHtmlCommentInExternalScript = 20;
define kHtmlComment = 21;
define kSloppyModeBlockScopedFunctionRedefinition = 22;
define kForInInitializer = 23;
define kArrayProtectorDirtied = 24;
define kArraySpeciesModified = 25;
define kArrayPrototypeConstructorModified = 26;
define kArrayInstanceProtoModified = 27;
define kArrayInstanceConstructorModified = 28;
define kLegacyFunctionDeclaration = 29;
define kRegExpPrototypeSourceGetter = 30;
define kRegExpPrototypeOldFlagGetter = 31;
# [[PromiseState]] values:
# These values should be kept in sync with PromiseStatus in globals.h
define kPending = 0;
define kFulfilled = 1;
define kRejected = 2;
define kResolveCallback = 0;
define kRejectCallback = 1;
| |
# Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for the conductor service."""
import mox
from nova.compute import instance_types
from nova.compute import vm_states
from nova import conductor
from nova.conductor import api as conductor_api
from nova.conductor import manager as conductor_manager
from nova.conductor import rpcapi as conductor_rpcapi
from nova import context
from nova import db
from nova.db.sqlalchemy import models
from nova import exception as exc
from nova.openstack.common import jsonutils
from nova.openstack.common.rpc import common as rpc_common
from nova.openstack.common import timeutils
from nova import test
FAKE_IMAGE_REF = 'fake-image-ref'
class FakeContext(context.RequestContext):
def elevated(self):
"""Return a consistent elevated context so we can detect it."""
if not hasattr(self, '_elevated'):
self._elevated = super(FakeContext, self).elevated()
return self._elevated
class _BaseTestCase(object):
def setUp(self):
super(_BaseTestCase, self).setUp()
self.db = None
self.user_id = 'fake'
self.project_id = 'fake'
self.context = FakeContext(self.user_id, self.project_id)
def stub_out_client_exceptions(self):
def passthru(exceptions, func, *args, **kwargs):
return func(*args, **kwargs)
self.stubs.Set(rpc_common, 'catch_client_exception', passthru)
def _create_fake_instance(self, params=None, type_name='m1.tiny'):
if not params:
params = {}
inst = {}
inst['vm_state'] = vm_states.ACTIVE
inst['image_ref'] = FAKE_IMAGE_REF
inst['reservation_id'] = 'r-fakeres'
inst['launch_time'] = '10'
inst['user_id'] = self.user_id
inst['project_id'] = self.project_id
inst['host'] = 'fake_host'
type_id = instance_types.get_instance_type_by_name(type_name)['id']
inst['instance_type_id'] = type_id
inst['ami_launch_index'] = 0
inst['memory_mb'] = 0
inst['vcpus'] = 0
inst['root_gb'] = 0
inst['ephemeral_gb'] = 0
inst['architecture'] = 'x86_64'
inst['os_type'] = 'Linux'
inst.update(params)
return db.instance_create(self.context, inst)
def _do_update(self, instance_uuid, **updates):
return self.conductor.instance_update(self.context, instance_uuid,
updates)
def test_instance_update(self):
instance = self._create_fake_instance()
new_inst = self._do_update(instance['uuid'],
vm_state=vm_states.STOPPED)
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertEqual(instance['vm_state'], vm_states.STOPPED)
self.assertEqual(new_inst['vm_state'], instance['vm_state'])
def test_action_event_start(self):
self.mox.StubOutWithMock(db, 'action_event_start')
db.action_event_start(self.context, mox.IgnoreArg())
self.mox.ReplayAll()
self.conductor.action_event_start(self.context, {})
def test_action_event_finish(self):
self.mox.StubOutWithMock(db, 'action_event_finish')
db.action_event_finish(self.context, mox.IgnoreArg())
self.mox.ReplayAll()
self.conductor.action_event_finish(self.context, {})
def test_instance_update_invalid_key(self):
# NOTE(danms): the real DB API call ignores invalid keys
if self.db == None:
self.assertRaises(KeyError,
self._do_update, 'any-uuid', foobar=1)
def test_migration_get(self):
migration = db.migration_create(self.context.elevated(),
{'instance_uuid': 'fake-uuid',
'status': 'migrating'})
self.assertEqual(jsonutils.to_primitive(migration),
self.conductor.migration_get(self.context,
migration['id']))
def test_migration_get_unconfirmed_by_dest_compute(self):
self.mox.StubOutWithMock(db,
'migration_get_unconfirmed_by_dest_compute')
db.migration_get_unconfirmed_by_dest_compute(self.context,
'fake-window',
'fake-host')
self.mox.ReplayAll()
self.conductor.migration_get_unconfirmed_by_dest_compute(self.context,
'fake-window',
'fake-host')
def test_migration_get_in_progress_by_host_and_node(self):
self.mox.StubOutWithMock(db,
'migration_get_in_progress_by_host_and_node')
db.migration_get_in_progress_by_host_and_node(
self.context, 'fake-host', 'fake-node').AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.migration_get_in_progress_by_host_and_node(
self.context, 'fake-host', 'fake-node')
self.assertEqual(result, 'fake-result')
def test_migration_create(self):
inst = {'uuid': 'fake-uuid',
'host': 'fake-host',
'node': 'fake-node'}
self.mox.StubOutWithMock(db, 'migration_create')
db.migration_create(self.context.elevated(),
{'instance_uuid': inst['uuid'],
'source_compute': inst['host'],
'source_node': inst['node'],
'fake-key': 'fake-value'}).AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.migration_create(self.context, inst,
{'fake-key': 'fake-value'})
self.assertEqual(result, 'result')
def test_migration_update(self):
migration = db.migration_create(self.context.elevated(),
{'instance_uuid': 'fake-uuid',
'status': 'migrating'})
migration_p = jsonutils.to_primitive(migration)
migration = self.conductor.migration_update(self.context, migration_p,
'finished')
self.assertEqual(migration['status'], 'finished')
def test_instance_get_by_uuid(self):
orig_instance = self._create_fake_instance()
copy_instance = self.conductor.instance_get_by_uuid(
self.context, orig_instance['uuid'])
self.assertEqual(orig_instance['name'],
copy_instance['name'])
def _setup_aggregate_with_host(self):
aggregate_ref = db.aggregate_create(self.context.elevated(),
{'name': 'foo'}, metadata={'availability_zone': 'foo'})
self.conductor.aggregate_host_add(self.context, aggregate_ref, 'bar')
aggregate_ref = db.aggregate_get(self.context.elevated(),
aggregate_ref['id'])
return aggregate_ref
def test_aggregate_host_add(self):
aggregate_ref = self._setup_aggregate_with_host()
self.assertTrue(any([host == 'bar'
for host in aggregate_ref['hosts']]))
db.aggregate_delete(self.context.elevated(), aggregate_ref['id'])
def test_aggregate_host_delete(self):
aggregate_ref = self._setup_aggregate_with_host()
self.conductor.aggregate_host_delete(self.context, aggregate_ref,
'bar')
aggregate_ref = db.aggregate_get(self.context.elevated(),
aggregate_ref['id'])
self.assertFalse(any([host == 'bar'
for host in aggregate_ref['hosts']]))
db.aggregate_delete(self.context.elevated(), aggregate_ref['id'])
def test_aggregate_get(self):
aggregate_ref = self._setup_aggregate_with_host()
aggregate = self.conductor.aggregate_get(self.context,
aggregate_ref['id'])
self.assertEqual(jsonutils.to_primitive(aggregate_ref), aggregate)
db.aggregate_delete(self.context.elevated(), aggregate_ref['id'])
def test_aggregate_get_by_host(self):
self._setup_aggregate_with_host()
aggregates = self.conductor.aggregate_get_by_host(self.context, 'bar')
self.assertEqual(aggregates[0]['availability_zone'], 'foo')
def test_aggregate_metadata_add(self):
aggregate = {'name': 'fake aggregate', 'id': 'fake-id'}
metadata = {'foo': 'bar'}
self.mox.StubOutWithMock(db, 'aggregate_metadata_add')
db.aggregate_metadata_add(
mox.IgnoreArg(), aggregate['id'], metadata, False).AndReturn(
metadata)
self.mox.ReplayAll()
result = self.conductor.aggregate_metadata_add(self.context,
aggregate,
metadata)
self.assertEqual(result, metadata)
def test_aggregate_metadata_delete(self):
aggregate = {'name': 'fake aggregate', 'id': 'fake-id'}
self.mox.StubOutWithMock(db, 'aggregate_metadata_delete')
db.aggregate_metadata_delete(mox.IgnoreArg(), aggregate['id'], 'fake')
self.mox.ReplayAll()
result = self.conductor.aggregate_metadata_delete(self.context,
aggregate,
'fake')
def test_bw_usage_update(self):
self.mox.StubOutWithMock(db, 'bw_usage_update')
self.mox.StubOutWithMock(db, 'bw_usage_get')
update_args = (self.context, 'uuid', 'mac', 0, 10, 20, 5, 10, 20)
get_args = (self.context, 'uuid', 0, 'mac')
db.bw_usage_update(*update_args)
db.bw_usage_get(*get_args).AndReturn('foo')
self.mox.ReplayAll()
result = self.conductor.bw_usage_update(*update_args)
self.assertEqual(result, 'foo')
def test_get_backdoor_port(self):
backdoor_port = 59697
def fake_get_backdoor_port(self, context):
return backdoor_port
if isinstance(self.conductor, conductor_api.API):
self.stubs.Set(conductor_manager.ConductorManager,
'get_backdoor_port', fake_get_backdoor_port)
port = self.conductor.get_backdoor_port(self.context, 'fake_host')
elif isinstance(self.conductor, conductor_api.LocalAPI):
try:
self.conductor.get_backdoor_port(self.context, 'fake_host')
except exc.InvalidRequest:
port = backdoor_port
else:
if isinstance(self.conductor, conductor_rpcapi.ConductorAPI):
self.stubs.Set(conductor_manager.ConductorManager,
'get_backdoor_port', fake_get_backdoor_port)
self.conductor.backdoor_port = backdoor_port
port = self.conductor.get_backdoor_port(self.context)
self.assertEqual(port, backdoor_port)
def test_security_group_get_by_instance(self):
fake_instance = {'id': 'fake-instance'}
self.mox.StubOutWithMock(db, 'security_group_get_by_instance')
db.security_group_get_by_instance(
self.context, fake_instance['id']).AndReturn('it worked')
self.mox.ReplayAll()
result = self.conductor.security_group_get_by_instance(self.context,
fake_instance)
self.assertEqual(result, 'it worked')
def test_security_group_rule_get_by_security_group(self):
fake_secgroup = {'id': 'fake-secgroup'}
self.mox.StubOutWithMock(db,
'security_group_rule_get_by_security_group')
db.security_group_rule_get_by_security_group(
self.context, fake_secgroup['id']).AndReturn('it worked')
self.mox.ReplayAll()
result = self.conductor.security_group_rule_get_by_security_group(
self.context, fake_secgroup)
self.assertEqual(result, 'it worked')
def test_provider_fw_rule_get_all(self):
fake_rules = ['a', 'b', 'c']
self.mox.StubOutWithMock(db, 'provider_fw_rule_get_all')
db.provider_fw_rule_get_all(self.context).AndReturn(fake_rules)
self.mox.ReplayAll()
result = self.conductor.provider_fw_rule_get_all(self.context)
self.assertEqual(result, fake_rules)
def test_agent_build_get_by_triple(self):
self.mox.StubOutWithMock(db, 'agent_build_get_by_triple')
db.agent_build_get_by_triple(self.context, 'fake-hv', 'fake-os',
'fake-arch').AndReturn('it worked')
self.mox.ReplayAll()
result = self.conductor.agent_build_get_by_triple(self.context,
'fake-hv',
'fake-os',
'fake-arch')
self.assertEqual(result, 'it worked')
def test_block_device_mapping_get_all_by_instance(self):
fake_inst = {'uuid': 'fake-uuid'}
self.mox.StubOutWithMock(db,
'block_device_mapping_get_all_by_instance')
db.block_device_mapping_get_all_by_instance(
self.context, fake_inst['uuid']).AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.block_device_mapping_get_all_by_instance(
self.context, fake_inst)
self.assertEqual(result, 'fake-result')
def test_instance_get_all_hung_in_rebooting(self):
self.mox.StubOutWithMock(db, 'instance_get_all_hung_in_rebooting')
db.instance_get_all_hung_in_rebooting(self.context, 123)
self.mox.ReplayAll()
self.conductor.instance_get_all_hung_in_rebooting(self.context, 123)
def test_instance_get_active_by_window(self):
self.mox.StubOutWithMock(db, 'instance_get_active_by_window_joined')
db.instance_get_active_by_window(self.context, 'fake-begin',
'fake-end', 'fake-proj',
'fake-host')
self.mox.ReplayAll()
self.conductor.instance_get_active_by_window(self.context,
'fake-begin', 'fake-end',
'fake-proj', 'fake-host')
def test_instance_get_active_by_window_joined(self):
self.mox.StubOutWithMock(db, 'instance_get_active_by_window_joined')
db.instance_get_active_by_window_joined(self.context, 'fake-begin',
'fake-end', 'fake-proj',
'fake-host')
self.mox.ReplayAll()
self.conductor.instance_get_active_by_window_joined(
self.context, 'fake-begin', 'fake-end', 'fake-proj', 'fake-host')
def test_instance_destroy(self):
self.mox.StubOutWithMock(db, 'instance_destroy')
db.instance_destroy(self.context, 'fake-uuid')
self.mox.ReplayAll()
self.conductor.instance_destroy(self.context, {'uuid': 'fake-uuid'})
def test_instance_info_cache_delete(self):
self.mox.StubOutWithMock(db, 'instance_info_cache_delete')
db.instance_info_cache_delete(self.context, 'fake-uuid')
self.mox.ReplayAll()
self.conductor.instance_info_cache_delete(self.context,
{'uuid': 'fake-uuid'})
def test_instance_info_cache_update(self):
fake_values = {'key1': 'val1', 'key2': 'val2'}
fake_instance = {'uuid': 'fake-uuid'}
self.mox.StubOutWithMock(db, 'instance_info_cache_update')
db.instance_info_cache_update(self.context, 'fake-uuid',
fake_values)
self.mox.ReplayAll()
self.conductor.instance_info_cache_update(self.context,
fake_instance,
fake_values)
def test_instance_type_get(self):
self.mox.StubOutWithMock(db, 'instance_type_get')
db.instance_type_get(self.context, 'fake-id').AndReturn('fake-type')
self.mox.ReplayAll()
result = self.conductor.instance_type_get(self.context, 'fake-id')
self.assertEqual(result, 'fake-type')
def test_vol_get_usage_by_time(self):
self.mox.StubOutWithMock(db, 'vol_get_usage_by_time')
db.vol_get_usage_by_time(self.context, 'fake-time').AndReturn(
'fake-usage')
self.mox.ReplayAll()
result = self.conductor.vol_get_usage_by_time(self.context,
'fake-time')
self.assertEqual(result, 'fake-usage')
def test_vol_usage_update(self):
self.mox.StubOutWithMock(db, 'vol_usage_update')
db.vol_usage_update(self.context, 'fake-vol', 'rd-req', 'rd-bytes',
'wr-req', 'wr-bytes', 'fake-id', 'fake-refr',
'fake-bool')
self.mox.ReplayAll()
self.conductor.vol_usage_update(self.context, 'fake-vol', 'rd-req',
'rd-bytes', 'wr-req', 'wr-bytes',
{'uuid': 'fake-id'}, 'fake-refr',
'fake-bool')
def test_ping(self):
result = self.conductor.ping(self.context, 'foo')
self.assertEqual(result, {'service': 'conductor', 'arg': 'foo'})
def test_compute_node_create(self):
self.mox.StubOutWithMock(db, 'compute_node_create')
db.compute_node_create(self.context, 'fake-values').AndReturn(
'fake-result')
self.mox.ReplayAll()
result = self.conductor.compute_node_create(self.context,
'fake-values')
self.assertEqual(result, 'fake-result')
def test_compute_node_update(self):
node = {'id': 'fake-id'}
self.mox.StubOutWithMock(db, 'compute_node_update')
db.compute_node_update(self.context, node['id'], 'fake-values',
False).AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.compute_node_update(self.context, node,
'fake-values', False)
self.assertEqual(result, 'fake-result')
class ConductorTestCase(_BaseTestCase, test.TestCase):
"""Conductor Manager Tests."""
def setUp(self):
super(ConductorTestCase, self).setUp()
self.conductor = conductor_manager.ConductorManager()
self.stub_out_client_exceptions()
def test_block_device_mapping_update_or_create(self):
fake_bdm = {'id': 'fake-id'}
self.mox.StubOutWithMock(db, 'block_device_mapping_create')
self.mox.StubOutWithMock(db, 'block_device_mapping_update')
self.mox.StubOutWithMock(db, 'block_device_mapping_update_or_create')
db.block_device_mapping_create(self.context, fake_bdm)
db.block_device_mapping_update(self.context, fake_bdm['id'], fake_bdm)
db.block_device_mapping_update_or_create(self.context, fake_bdm)
self.mox.ReplayAll()
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm,
create=True)
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm,
create=False)
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm)
def test_block_device_mapping_destroy(self):
fake_bdm = {'id': 'fake-bdm'}
fake_bdm2 = {'id': 'fake-bdm-2'}
fake_inst = {'uuid': 'fake-uuid'}
self.mox.StubOutWithMock(db, 'block_device_mapping_destroy')
self.mox.StubOutWithMock(
db, 'block_device_mapping_destroy_by_instance_and_device')
self.mox.StubOutWithMock(
db, 'block_device_mapping_destroy_by_instance_and_volume')
db.block_device_mapping_destroy(self.context, 'fake-bdm')
db.block_device_mapping_destroy(self.context, 'fake-bdm-2')
db.block_device_mapping_destroy_by_instance_and_device(self.context,
'fake-uuid',
'fake-device')
db.block_device_mapping_destroy_by_instance_and_volume(self.context,
'fake-uuid',
'fake-volume')
self.mox.ReplayAll()
self.conductor.block_device_mapping_destroy(self.context,
[fake_bdm,
fake_bdm2])
self.conductor.block_device_mapping_destroy(self.context,
instance=fake_inst,
device_name='fake-device')
self.conductor.block_device_mapping_destroy(self.context,
instance=fake_inst,
volume_id='fake-volume')
def test_instance_get_all_by_filters(self):
filters = {'foo': 'bar'}
self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
db.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort')
self.mox.ReplayAll()
self.conductor.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort')
def test_instance_get_all_by_host(self):
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
self.mox.StubOutWithMock(db, 'instance_get_all_by_host_and_node')
db.instance_get_all_by_host(self.context.elevated(),
'host').AndReturn('result')
db.instance_get_all_by_host_and_node(self.context.elevated(), 'host',
'node').AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.instance_get_all_by_host(self.context, 'host')
self.assertEqual(result, 'result')
result = self.conductor.instance_get_all_by_host(self.context, 'host',
'node')
self.assertEqual(result, 'result')
def _test_stubbed(self, name, dbargs, condargs,
db_result_listified=False):
self.mox.StubOutWithMock(db, name)
getattr(db, name)(self.context, *dbargs).AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.service_get_all_by(self.context, **condargs)
if db_result_listified:
self.assertEqual(['fake-result'], result)
else:
self.assertEqual('fake-result', result)
def test_service_get_all(self):
self._test_stubbed('service_get_all', (), {})
def test_service_get_by_host_and_topic(self):
self._test_stubbed('service_get_by_host_and_topic',
('host', 'topic'),
dict(topic='topic', host='host'))
def test_service_get_all_by_topic(self):
self._test_stubbed('service_get_all_by_topic',
('topic',),
dict(topic='topic'))
def test_service_get_all_by_host(self):
self._test_stubbed('service_get_all_by_host',
('host',),
dict(host='host'))
def test_service_get_by_compute_host(self):
self._test_stubbed('service_get_by_compute_host',
('host',),
dict(topic='compute', host='host'),
db_result_listified=True)
def test_service_get_by_args(self):
self._test_stubbed('service_get_by_args',
('host', 'binary'),
dict(host='host', binary='binary'))
class ConductorRPCAPITestCase(_BaseTestCase, test.TestCase):
"""Conductor RPC API Tests."""
def setUp(self):
super(ConductorRPCAPITestCase, self).setUp()
self.conductor_service = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
self.conductor = conductor_rpcapi.ConductorAPI()
def test_block_device_mapping_update_or_create(self):
fake_bdm = {'id': 'fake-id'}
self.mox.StubOutWithMock(db, 'block_device_mapping_create')
self.mox.StubOutWithMock(db, 'block_device_mapping_update')
self.mox.StubOutWithMock(db, 'block_device_mapping_update_or_create')
db.block_device_mapping_create(self.context, fake_bdm)
db.block_device_mapping_update(self.context, fake_bdm['id'], fake_bdm)
db.block_device_mapping_update_or_create(self.context, fake_bdm)
self.mox.ReplayAll()
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm,
create=True)
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm,
create=False)
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm)
def test_block_device_mapping_destroy(self):
fake_bdm = {'id': 'fake-bdm'}
fake_inst = {'uuid': 'fake-uuid'}
self.mox.StubOutWithMock(db, 'block_device_mapping_destroy')
self.mox.StubOutWithMock(
db, 'block_device_mapping_destroy_by_instance_and_device')
self.mox.StubOutWithMock(
db, 'block_device_mapping_destroy_by_instance_and_volume')
db.block_device_mapping_destroy(self.context, 'fake-bdm')
db.block_device_mapping_destroy_by_instance_and_device(self.context,
'fake-uuid',
'fake-device')
db.block_device_mapping_destroy_by_instance_and_volume(self.context,
'fake-uuid',
'fake-volume')
self.mox.ReplayAll()
self.conductor.block_device_mapping_destroy(self.context,
bdms=[fake_bdm])
self.conductor.block_device_mapping_destroy(self.context,
instance=fake_inst,
device_name='fake-device')
self.conductor.block_device_mapping_destroy(self.context,
instance=fake_inst,
volume_id='fake-volume')
def test_instance_get_all_by_filters(self):
filters = {'foo': 'bar'}
self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
db.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort')
self.mox.ReplayAll()
self.conductor.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort')
def _test_stubbed(self, name, dbargs, condargs,
db_result_listified=False):
self.mox.StubOutWithMock(db, name)
getattr(db, name)(self.context, *dbargs).AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.service_get_all_by(self.context, **condargs)
if db_result_listified:
self.assertEqual(['fake-result'], result)
else:
self.assertEqual('fake-result', result)
def test_service_get_all(self):
self._test_stubbed('service_get_all', (), {})
def test_service_get_by_host_and_topic(self):
self._test_stubbed('service_get_by_host_and_topic',
('host', 'topic'),
dict(topic='topic', host='host'))
def test_service_get_all_by_topic(self):
self._test_stubbed('service_get_all_by_topic',
('topic',),
dict(topic='topic'))
def test_service_get_all_by_host(self):
self._test_stubbed('service_get_all_by_host',
('host',),
dict(host='host'))
def test_service_get_by_compute_host(self):
self._test_stubbed('service_get_by_compute_host',
('host',),
dict(topic='compute', host='host'),
db_result_listified=True)
class ConductorAPITestCase(_BaseTestCase, test.TestCase):
"""Conductor API Tests."""
def setUp(self):
super(ConductorAPITestCase, self).setUp()
self.conductor_service = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
self.conductor = conductor_api.API()
self.db = None
def _do_update(self, instance_uuid, **updates):
# NOTE(danms): the public API takes actual keyword arguments,
# so override the base class here to make the call correctly
return self.conductor.instance_update(self.context, instance_uuid,
**updates)
def test_bw_usage_get(self):
self.mox.StubOutWithMock(db, 'bw_usage_update')
self.mox.StubOutWithMock(db, 'bw_usage_get')
get_args = (self.context, 'uuid', 0, 'mac')
db.bw_usage_get(*get_args).AndReturn('foo')
self.mox.ReplayAll()
result = self.conductor.bw_usage_get(*get_args)
self.assertEqual(result, 'foo')
def test_block_device_mapping_update_or_create(self):
self.mox.StubOutWithMock(db, 'block_device_mapping_create')
self.mox.StubOutWithMock(db, 'block_device_mapping_update')
self.mox.StubOutWithMock(db, 'block_device_mapping_update_or_create')
db.block_device_mapping_create(self.context, 'fake-bdm')
db.block_device_mapping_update(self.context,
'fake-id', {'id': 'fake-id'})
db.block_device_mapping_update_or_create(self.context, 'fake-bdm')
self.mox.ReplayAll()
self.conductor.block_device_mapping_create(self.context, 'fake-bdm')
self.conductor.block_device_mapping_update(self.context, 'fake-id', {})
self.conductor.block_device_mapping_update_or_create(self.context,
'fake-bdm')
def test_block_device_mapping_destroy(self):
fake_bdm = {'id': 'fake-bdm'}
fake_inst = {'uuid': 'fake-uuid'}
self.mox.StubOutWithMock(db, 'block_device_mapping_destroy')
self.mox.StubOutWithMock(
db, 'block_device_mapping_destroy_by_instance_and_device')
self.mox.StubOutWithMock(
db, 'block_device_mapping_destroy_by_instance_and_volume')
db.block_device_mapping_destroy(self.context, 'fake-bdm')
db.block_device_mapping_destroy_by_instance_and_device(self.context,
'fake-uuid',
'fake-device')
db.block_device_mapping_destroy_by_instance_and_volume(self.context,
'fake-uuid',
'fake-volume')
self.mox.ReplayAll()
self.conductor.block_device_mapping_destroy(self.context, [fake_bdm])
self.conductor.block_device_mapping_destroy_by_instance_and_device(
self.context, fake_inst, 'fake-device')
self.conductor.block_device_mapping_destroy_by_instance_and_volume(
self.context, fake_inst, 'fake-volume')
def test_instance_get_all(self):
self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
db.instance_get_all(self.context)
db.instance_get_all_by_filters(self.context, {'name': 'fake-inst'},
'updated_at', 'asc')
self.mox.ReplayAll()
self.conductor.instance_get_all(self.context)
self.conductor.instance_get_all_by_filters(self.context,
{'name': 'fake-inst'},
'updated_at', 'asc')
def _test_stubbed(self, name, *args, **kwargs):
if args and isinstance(args[0], FakeContext):
ctxt = args[0]
args = args[1:]
else:
ctxt = self.context
self.mox.StubOutWithMock(db, name)
getattr(db, name)(ctxt, *args).AndReturn('fake-result')
if name == 'service_destroy':
# TODO(russellb) This is a hack ... SetUp() starts the conductor()
# service. There is a cleanup step that runs after this test which
# also deletes the associated service record. This involves a call
# to db.service_destroy(), which we have stubbed out.
db.service_destroy(mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
result = getattr(self.conductor, name)(self.context, *args)
self.assertEqual(
result, 'fake-result' if kwargs.get('returns', True) else None)
def test_service_get_all(self):
self._test_stubbed('service_get_all')
def test_service_get_by_host_and_topic(self):
self._test_stubbed('service_get_by_host_and_topic', 'host', 'topic')
def test_service_get_all_by_topic(self):
self._test_stubbed('service_get_all_by_topic', 'topic')
def test_service_get_all_by_host(self):
self._test_stubbed('service_get_all_by_host', 'host')
def test_service_get_by_compute_host(self):
self._test_stubbed('service_get_by_compute_host', 'host')
def test_service_create(self):
self._test_stubbed('service_create', {})
def test_service_destroy(self):
self._test_stubbed('service_destroy', '', returns=False)
def test_service_update(self):
ctxt = self.context
self.mox.StubOutWithMock(db, 'service_update')
db.service_update(ctxt, '', {}).AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.service_update(self.context, {'id': ''}, {})
self.assertEqual(result, 'fake-result')
def test_instance_get_all_by_host(self):
self._test_stubbed('instance_get_all_by_host',
self.context.elevated(), 'host')
def test_instance_get_all_by_host_and_node(self):
self._test_stubbed('instance_get_all_by_host_and_node',
self.context.elevated(), 'host', 'node')
def test_ping(self):
timeouts = []
calls = dict(count=0)
def fake_ping(_self, context, message, timeout):
timeouts.append(timeout)
calls['count'] += 1
if calls['count'] < 15:
raise rpc_common.Timeout("fake")
self.stubs.Set(conductor_api.API, 'ping', fake_ping)
self.conductor.wait_until_ready(self.context)
self.assertEqual(timeouts.count(10), 10)
self.assertTrue(None in timeouts)
class ConductorLocalAPITestCase(ConductorAPITestCase):
"""Conductor LocalAPI Tests."""
def setUp(self):
super(ConductorLocalAPITestCase, self).setUp()
self.conductor = conductor_api.LocalAPI()
self.db = db
self.stub_out_client_exceptions()
def test_client_exceptions(self):
instance = self._create_fake_instance()
# NOTE(danms): The LocalAPI should not raise exceptions wrapped
# in ClientException. KeyError should be raised if an invalid
# update key is passed, so use that to validate.
self.assertRaises(KeyError,
self._do_update, instance['uuid'], foo='bar')
def test_ping(self):
# Override test in ConductorAPITestCase
pass
class ConductorImportTest(test.TestCase):
def test_import_conductor_local(self):
self.flags(use_local=True, group='conductor')
self.assertTrue(isinstance(conductor.API(),
conductor_api.LocalAPI))
def test_import_conductor_rpc(self):
self.flags(use_local=False, group='conductor')
self.assertTrue(isinstance(conductor.API(),
conductor_api.API))
def test_import_conductor_override_to_local(self):
self.flags(use_local=False, group='conductor')
self.assertTrue(isinstance(conductor.API(use_local=True),
conductor_api.LocalAPI))
class ConductorPolicyTest(test.TestCase):
def test_all_allowed_keys(self):
def fake_db_instance_update(self, *args, **kwargs):
return None, None
self.stubs.Set(db, 'instance_update_and_get_original',
fake_db_instance_update)
ctxt = context.RequestContext('fake-user', 'fake-project')
conductor = conductor_api.LocalAPI()
updates = {}
for key in conductor_manager.allowed_updates:
if key in conductor_manager.datetime_fields:
updates[key] = timeutils.utcnow()
else:
updates[key] = 'foo'
conductor.instance_update(ctxt, 'fake-instance', **updates)
def test_allowed_keys_are_real(self):
instance = models.Instance()
keys = list(conductor_manager.allowed_updates)
# NOTE(danms): expected_task_state is a parameter that gets
# passed to the db layer, but is not actually an instance attribute
del keys[keys.index('expected_task_state')]
for key in keys:
self.assertTrue(hasattr(instance, key))
| |
#!/usr/bin/env python2.7
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# checkstyle: noqa
from __future__ import print_function
import os
import re
import sys
from optparse import OptionParser
class Type(object):
'''A data type.'''
def __init__(self, name, package=None, immutable=False):
self.name = name
self.package = package
self.immutable = immutable
def absolute_name(self):
return '%s.%s' % (self.package, self.name) if self.package else self.name
def codegen_name(self):
return self.name
def __str__(self):
return '%s (%smutable)' % (self.absolute_name(), 'im' if self.immutable else '')
class PrimitiveType(Type):
'''A primitive type, with its associated typeboxed name.'''
def __init__(self, name, boxed_name):
Type.__init__(self, name, package=None, immutable=True)
self.boxed_name = boxed_name
class ParameterizedType(Type):
'''A parameterized type, usually a collection.'''
def __init__(self, name, params):
Type.__init__(self, name, None)
self.params = params
def param_names(self):
def name(t):
if isinstance(t, StructType) and not t.immutable:
return t.codegen_name()
elif isinstance(t, PrimitiveType):
return t.boxed_name
else:
return t.name
return ', '.join([name(p) for p in self.params])
class StructType(Type):
'''A thrift-defined type, which composes other types as fields.'''
def __init__(self, name, package, kind, fields):
Type.__init__(self, name, package, kind == 'enum')
self.kind = kind
self.fields = fields
def codegen_name(self):
return 'I%s' % self.name
def __str__(self):
return '%s %s { %s }' % (self.kind, self.name, ', '.join(map(str, self.fields)))
class EnumType(StructType):
'''A thrift-defined value enumeration.'''
def __init__(self, name, package, values):
StructType.__init__(self, name, package, 'enum', [])
self.values = values
def __str__(self):
return '%s (%s)' % (self.name, ', '.join(self.values))
class Field(object):
'''A field within a thrift structure.'''
def __init__(self, ttype, name):
self.ttype = ttype
self.name = name
def capitalized_name(self):
return self.name[:1].capitalize() + self.name[1:]
def accessor_method(self):
return '%s%s' % (
'is' if self.ttype.name == 'boolean' else 'get',
self.capitalized_name())
def isset_method(self):
return 'isSet%s' % (self.name[0].upper() + self.name[1:])
def __str__(self):
return '%s: %s' % (self.name, self.ttype)
FIELD_TEMPLATE = ''' public %(type)s %(fn_name)s() {
return %(field)s;
}'''
UNION_FIELD_TEMPLATE = ''' public %(type)s %(fn_name)s() {
if (getSetField() == %(enum_value)s) {
return (%(type)s) value;
} else {
throw new RuntimeException("Cannot get field '%(enum_value)s' "
+ "because union is currently set to " + getSetField());
}
}'''
UNION_SWITCH_CASE = '''case %(case)s:
%(body)s'''
UNION_DEFAULT_ERROR = 'throw new RuntimeException("Unrecognized field " + getSetField())';
UNION_FIELD_SWITCH = '''switch (%(switch_by)s) {
%(cases)s
default:
%(error)s;
}'''
UNION_COPY_CONSTRUCTOR_2 = ''' public static %(wrapped)s newBuilder(int id, Object value) {
%(body)s
}'''
UNION_VALUE_ACCESSOR = ''' public Object getRawValue() {
return value;
}'''
SIMPLE_ASSIGNMENT = 'this.%(field)s = wrapped.%(fn_name)s();'
FIELD_DECLARATION = '''private final %(type)s %(field)s;'''
STRUCT_ASSIGNMENT = '''this.%(field)s = wrapped.%(isset)s()
? %(type)s.build(wrapped.%(fn_name)s())
: null;'''
IMMUTABLE_COLLECTION_DECLARATION = (
'''private final Immutable%(collection)s<%(params)s> %(field)s;''')
IMMUTABLE_COLLECTION_ASSIGNMENT = '''this.%(field)s = wrapped.%(isset)s()
? Immutable%(collection)s.copyOf(wrapped.%(fn_name)s())
: Immutable%(collection)s.of();'''
# Template string for assignment for a collection field containing a struct.
STRUCT_COLLECTION_FIELD_ASSIGNMENT = '''this.%(field)s = wrapped.%(isset)s()
? FluentIterable.from(wrapped.%(fn_name)s())
.transform(%(params)s::build)
.to%(collection)s()
: Immutable%(collection)s.<%(params)s>of();'''
PACKAGE_NAME = 'org.apache.aurora.scheduler.storage.entities'
CLASS_TEMPLATE = '''package %(package)s;
%(imports)s
/**
* An immutable wrapper class.
* <p>
* This code is auto-generated, and should not be directly modified.
*/
public final class %(name)s {
private int cachedHashCode = 0;
%(fields)s
private %(name)s(%(wrapped)s wrapped) {%(assignments)s
}
public static %(name)s build(%(wrapped)s wrapped) {
return new %(name)s(wrapped);
}
public static ImmutableList<%(wrapped)s> toBuildersList(Iterable<%(name)s> w) {
return FluentIterable.from(w).transform(%(name)s::newBuilder).toList();
}
static List<%(wrapped)s> toMutableBuildersList(Iterable<%(name)s> w) {
return Lists.newArrayList(Iterables.transform(w, %(name)s::newBuilder));
}
public static ImmutableList<%(name)s> listFromBuilders(Iterable<%(wrapped)s> b) {
return FluentIterable.from(b).transform(%(name)s::build).toList();
}
public static ImmutableSet<%(wrapped)s> toBuildersSet(Iterable<%(name)s> w) {
return FluentIterable.from(w).transform(%(name)s::newBuilder).toSet();
}
static Set<%(wrapped)s> toMutableBuildersSet(Iterable<%(name)s> w) {
return Sets.newHashSet(Iterables.transform(w, %(name)s::newBuilder));
}
public static ImmutableSet<%(name)s> setFromBuilders(Iterable<%(wrapped)s> b) {
return FluentIterable.from(b).transform(%(name)s::build).toSet();
}
public %(wrapped)s newBuilder() {
%(copy_constructor)s
}
%(accessors)s
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof %(name)s)) {
return false;
}
%(name)s other = (%(name)s) o;
return %(equals)s;
}
@Override
public int hashCode() {
// Following java.lang.String's example of caching hashCode.
// This is thread safe in that multiple threads may wind up
// computing the value, which is apparently favorable to constant
// synchronization overhead.
if (cachedHashCode == 0) {
cachedHashCode = Objects.hash(%(hashcode)s);
}
return cachedHashCode;
}
@Override
public String toString() {
return MoreObjects.toStringHelper(this)%(to_string)s
.toString();
}
}'''
class GeneratedCode(object):
def __init__(self, class_name, wrapped_type):
self._class_name = class_name
self._wrapped_type = wrapped_type
self._imports = set()
self._accessors = []
self._fields = []
self._assignments = []
self.to_string = 'unset'
self.hash_code = 'unset'
self.equals = 'unset'
self.builder = 'unset'
self.copy_constructor = 'unset'
def add_import(self, import_class):
self._imports.add(import_class)
def add_field(self, field):
self._fields.append(field)
def add_assignment(self, assignment):
self._assignments.append(assignment)
def add_accessor(self, accessor_method):
self._accessors.append(accessor_method)
def dump(self, out_file):
remaining_imports = list(self._imports)
import_groups = []
def remove_by_prefix(prefix):
group = [i for i in remaining_imports if i.startswith(prefix)]
remaining_imports[:] = [i for i in remaining_imports if not i.startswith(prefix)]
return group
def add_import_group(group):
if group:
import_groups.append('\n'.join(['import %s;' % i for i in sorted(group)]))
twitter_imports = remove_by_prefix('com.twitter')
add_import_group(remove_by_prefix('java'))
add_import_group(remove_by_prefix('com'))
add_import_group(remove_by_prefix('net'))
add_import_group(remove_by_prefix('org'))
add_import_group(twitter_imports)
print(CLASS_TEMPLATE % {
'package': PACKAGE_NAME,
'name': self._class_name,
'wrapped': self._wrapped_type,
'imports': '\n\n'.join(import_groups),
'accessors': '\n\n'.join(self._accessors),
'fields': (' ' + '\n '.join(self._fields) + '\n') if self._fields else '',
'assignments': ('\n ' + '\n '.join(self._assignments)) if self._assignments else '',
'to_string': self.to_string,
'equals': self.equals,
'hashcode': self.hash_code,
'copy_constructor': self.copy_constructor,
}, file=out_file)
# A namespace declaration, e.g.:
# namespace java org.apache.aurora.gen
NAMESPACE_RE = 'namespace\s+(?P<lang>\w+)\s+(?P<namespace>[^\s]+)'
# Matches a complete struct definition, capturing the type and body.
STRUCT_RE = '(?P<kind>enum|struct|union)\s+(?P<name>\w+)\s+{(?P<body>[^}]+)}'
# A possibly-parameterized type name, e.g.:
# int
# TaskConfig
# Set<String>
# Map<String, TaskConfig>
TYPE_PATTERN = '(?P<type>\w+)(?:<(?P<params>[^>]+)>)?'
# A field definition within a struct, e.g.:
# 1: string name
# 15: Map<String, TaskConfig> configs # Configs mapped by name.
FIELD_RE = '\s*\d+:\s+(?:(?:required|optional)\s+)?(%s)\s+(?P<name>\w+).*' % TYPE_PATTERN
# An enum value definition, e.g.:
# INVALID_REQUEST = 0,
ENUM_VALUE_RE = '\s*(?P<name>\w+)\s*=\s*\d+,?'
class Service(object):
def __init__(self, name, parent, methods):
self.name = name
self.parent = parent
self.methods = methods
def __str__(self):
return ''.join([self.name, self.parent or '', ' ' + '\n '.join(map(str, self.methods))])
class Method(object):
def __init__(self, name, parameters, return_type):
self.name = name
self.parameters = parameters
self.return_type = return_type
def __str__(self):
return '%s(%s)' % (self.name, ', '.join(self.parameters))
class Parameter(object):
def __init__(self, name, type_name):
self.name = name
self.type_name = type_name
def __str__(self):
return '%s %s' % (self.type_name, self.name)
class GenericParameter(Parameter):
def __init__(self, name, type_name, parameters):
Parameter.__init__(self, name, type_name)
self.parameters = parameters
GET_SUPER_METHODS = '.putAll(%(super)sMetadata.METHODS)'
PARAM_METADATA_TEMPLATE = '%(type)s.class,'
METHOD_METADATA_TEMPLATE = '''.put(
"%(name)s",
new Class<?>[] {%(params)s
})'''
SERVICE_METADATA_TEMPLATE = '''package %(package)s;
import java.util.List;
import java.util.Map;
import java.util.Set;
import com.google.common.collect.ImmutableMap;
import org.apache.aurora.gen.*;
public final class %(name)sMetadata {
public static final ImmutableMap<String, Class<?>[]> METHODS =
ImmutableMap.<String, Class<?>[]>builder()
%(methods)s
.build();
private %(name)sMetadata() {
// Utility class
}
}
'''
SERVICE_RE = 'service (?P<name>\w+)\s+(extends\s+(?P<super>\w+)\s+)?{(?P<body>[^}]+)}'
METHOD_RE = '\s*(?P<return>\w+)\s+(?P<name>\w+)\((?P<params>[^\)]*)\)'
PARAM_RE = '\d+\:\s+%s\s+(?:\w+)' % TYPE_PATTERN
THRIFT_TYPES = {
'bool': PrimitiveType('boolean', 'Boolean'),
'i32': PrimitiveType('int', 'Integer'),
'i64': PrimitiveType('long', 'Long'),
'double': PrimitiveType('double', 'Double'),
'string': PrimitiveType('String', 'String'),
'list': Type('List'),
'set': Type('Set'),
'map': Type('Map'),
'binary': PrimitiveType('byte[]', 'byte[]'),
}
def parse_structs(thrift_defs):
'''Read all thrift structures found in a file.
This returns a list of Type objects representing the structs found
and the fields they contain.
'''
# Capture all namespace definitions.
namespaces = dict(re.findall(NAMESPACE_RE, thrift_defs))
# Keep track of structs already seen, to identify referenced types.
structs = []
def parse_field(field):
def make_type(name):
if name in ['list', 'map', 'set']:
return Type(name.title())
elif name in THRIFT_TYPES:
return THRIFT_TYPES[name]
else:
return [s for s in structs if s.name == name][0]
type_name = field.group('type')
type_params = field.group('params')
if type_params:
params = [make_type(p) for p in type_params.replace(' ', '').split(',')]
ttype = ParameterizedType(type_name.title(), params)
else:
ttype = make_type(type_name)
return Field(ttype, field.group('name'))
def parse_fields(field_str):
return map(parse_field, re.finditer(FIELD_RE, field_str))
def parse_values(enum_str):
return [m.group('name') for m in re.finditer(ENUM_VALUE_RE, enum_str)]
for s in re.finditer(STRUCT_RE, thrift_defs, flags=re.MULTILINE):
if s.group('kind') == 'enum':
struct = EnumType(s.group('name'),
namespaces.get('java', ''),
parse_values(s.group('body')))
else:
struct = StructType(s.group('name'),
namespaces.get('java', ''),
s.group('kind'),
parse_fields(s.group('body')))
structs.append(struct)
return structs
def parse_services(service_defs):
services = []
for s in re.finditer(SERVICE_RE, service_defs, flags=re.MULTILINE):
methods = []
for method in re.finditer(METHOD_RE, s.group('body'), flags=re.MULTILINE):
params = []
for param in re.finditer(PARAM_RE, method.group('params'), flags=re.MULTILINE):
params.append(param.group('type'))
methods.append(Method(method.group('name'),
params,
method.group('return')))
services.append(Service(s.group('name'), s.group('super'), methods))
return services
def to_upper_snake_case(s):
return re.sub('([A-Z])', '_\\1', s).upper()
def generate_union_field(code, struct, field):
field_enum_value = '%s._Fields.%s' % (struct.name, to_upper_snake_case(field.name))
code.add_accessor(FIELD_TEMPLATE % {'type': 'boolean',
'fn_name': field.isset_method(),
'field': 'setField == %s' % field_enum_value})
code.add_accessor(UNION_FIELD_TEMPLATE % {'type': field.ttype.codegen_name(),
'fn_name': field.accessor_method(),
'enum_value': field_enum_value})
def generate_struct_field(code, field, builder_calls):
field_type = field.ttype.codegen_name()
assignment = SIMPLE_ASSIGNMENT
assignment_args = {
'field': field.name,
'fn_name': field.accessor_method()
}
builder_assignment = field.name
if field.ttype.immutable:
code.add_accessor(FIELD_TEMPLATE % {'type': field.ttype.name,
'fn_name': field.accessor_method(),
'field': field.name})
else:
if isinstance(field.ttype, ParameterizedType):
# Add imports for any referenced enum types. This is not necessary for other
# types since they are either primitives or struct types, which will be in
# the same package.
for param_type in field.ttype.params:
if isinstance(param_type, StructType) and param_type.kind == 'enum':
code.add_import(param_type.absolute_name())
field_type = 'Immutable%s<%s>' % (field.ttype.name, field.ttype.param_names())
code.add_accessor(FIELD_TEMPLATE % {'type': field_type,
'fn_name': field.accessor_method(),
'field': field.name})
if isinstance(field.ttype, StructType):
if field.ttype.kind == 'enum':
field_type = field.ttype.name
code.add_import(field.ttype.absolute_name())
if not field.ttype.immutable:
assignment = STRUCT_ASSIGNMENT
assignment_args = {
'field': field.name,
'fn_name': field.accessor_method(),
'isset': field.isset_method(),
'type': field.ttype.codegen_name(),
}
builder_assignment = '%s.newBuilder()' % field.name
elif isinstance(field.ttype, ParameterizedType):
# Add necessary imports, supporting only List, Map, Set.
assert field.ttype.name in ['List', 'Map', 'Set'], 'Unrecognized type %s' % field.ttype.name
code.add_import('com.google.common.collect.Immutable%s' % field.ttype.name)
params = field.ttype.params
if all([p.immutable for p in params]):
# All parameter types are immutable.
assignment = IMMUTABLE_COLLECTION_ASSIGNMENT
elif len(params) == 1:
# Only one non-immutable parameter.
# Assumes the parameter type is a struct and our code generator
# will make a compatible wrapper class and constructor.
assignment = STRUCT_COLLECTION_FIELD_ASSIGNMENT
builder_assignment = '%s.toMutableBuilders%s(%s)' % (params[0].codegen_name(), field.ttype.name, field.name)
else:
assert False, 'Unable to codegen accessor field for %s' % field.name
assignment_args = {'collection': field.ttype.name,
'field': field.name,
'fn_name': field.accessor_method(),
'isset': field.isset_method(),
'params': field.ttype.param_names()}
code.add_field(FIELD_DECLARATION % {'field': field.name, 'type': field_type })
nullable = field.ttype.name == 'String' or not isinstance(field.ttype, (PrimitiveType, ParameterizedType))
if nullable:
code.add_accessor(FIELD_TEMPLATE % {'type': 'boolean',
'fn_name': field.isset_method(),
'field': '%s != null' % field.name})
builder_calls.append('.set%s(%s == null ? null : %s)' % (field.capitalized_name(), field.name, builder_assignment))
else:
builder_calls.append('.set%s(%s)' % (field.capitalized_name(), builder_assignment))
code.add_assignment(assignment % assignment_args)
def generate_java(struct):
code = GeneratedCode(struct.codegen_name(), struct.name)
code.add_import('java.util.Objects')
code.add_import('java.util.List')
code.add_import('java.util.Set')
code.add_import('com.google.common.base.MoreObjects')
code.add_import('com.google.common.collect.ImmutableList')
code.add_import('com.google.common.collect.ImmutableSet')
code.add_import('com.google.common.collect.FluentIterable')
code.add_import('com.google.common.collect.Iterables')
code.add_import('com.google.common.collect.Lists')
code.add_import('com.google.common.collect.Sets')
code.add_import(struct.absolute_name())
if struct.kind == 'union':
assign_cases = []
copy_cases = []
copy_2_cases = []
for field in struct.fields:
generate_union_field(code, struct, field)
assert field.ttype.immutable or isinstance(field.ttype, StructType), 'Unrecognized type %s' % field.ttype.name
if field.ttype.immutable:
assign_case_body = 'value = wrapped.%s();\nbreak;' % field.accessor_method()
copy_case_body = 'return new %s(setField, %s());' % (struct.name, field.accessor_method())
copy_2_case_cast = field.ttype.codegen_name()
else:
assign_case_body = 'value = %(codegen_name)s.build(wrapped.%(accessor_method)s());\nbreak;' % {
'codegen_name': field.ttype.codegen_name(),
'accessor_method': field.accessor_method()}
copy_case_body = 'return new %s(setField, %s().newBuilder());' % (struct.name, field.accessor_method())
code.add_import('org.apache.aurora.gen.%s' % field.ttype.name)
copy_2_case_cast = field.ttype.name
assign_cases.append(UNION_SWITCH_CASE % {'case': to_upper_snake_case(field.name),
'body': assign_case_body})
copy_cases.append(UNION_SWITCH_CASE % {'case': to_upper_snake_case(field.name),
'body': copy_case_body})
copy_2_case_body = 'return %(wrapped)s.%(method)s((%(cast)s) value);' % {
'wrapped': struct.name,
'method': field.name,
'cast': copy_2_case_cast}
copy_2_cases.append(UNION_SWITCH_CASE % {'case': to_upper_snake_case(field.name),
'body': copy_2_case_body})
set_field_type = '%s._Fields' % struct.name
code.add_accessor(FIELD_TEMPLATE % {'type': set_field_type, 'fn_name': 'getSetField', 'field': 'setField'})
code.add_field(FIELD_DECLARATION % {'field': 'setField', 'type': set_field_type})
code.add_assignment(SIMPLE_ASSIGNMENT % {'field': 'setField',
'fn_name': 'getSetField'})
code.add_field(FIELD_DECLARATION % {'field': 'value', 'type': 'Object'})
code.add_assignment(UNION_FIELD_SWITCH % {'cases': '\n '.join(assign_cases),
'switch_by': 'getSetField()',
'error': UNION_DEFAULT_ERROR})
code.copy_constructor = UNION_FIELD_SWITCH % {'cases': '\n '.join(copy_cases),
'switch_by': 'getSetField()',
'error': UNION_DEFAULT_ERROR}
copy_2_switch = UNION_FIELD_SWITCH % {'cases': '\n '.join(copy_2_cases),
'switch_by': '%s.findByThriftId(id)' % set_field_type,
'error': 'throw new RuntimeException("Unrecognized id " + id)'}
code.add_accessor(UNION_VALUE_ACCESSOR)
code.add_accessor(UNION_COPY_CONSTRUCTOR_2 % {'wrapped': struct.name, 'body': copy_2_switch})
code.to_string = '.add("setField", setField).add("value", value)'
code.equals = 'Objects.equals(setField, other.setField) && Objects.equals(value, other.value)'
code.hash_code = 'setField, value'
else:
builder_calls = []
for field in struct.fields:
generate_struct_field(code, field, builder_calls)
field_names = [f.name for f in struct.fields]
code.copy_constructor = 'return new %s()%s;' % (struct.name, '\n ' + '\n '.join(builder_calls))
code.to_string = '\n ' + '\n '.join(['.add("%s", %s)' % (f, f) for f in field_names])
code.equals = '\n && '.join(['Objects.equals(%s, other.%s)' % (f, f) for f in field_names])
code.hash_code = '\n ' + ',\n '.join([f for f in field_names])
# Special case for structs with no fields.
if not struct.fields:
code.equals = 'true'
return code
if __name__ == '__main__':
parser = OptionParser()
parser.add_option('-v', '--verbose',
dest='verbose',
action='store_true',
help='Display extra information about code generation.')
options, args = parser.parse_args()
def log(value):
if options.verbose:
print(value)
if len(args) != 3:
print('usage: %s thrift_file code_output_dir resource_output_dir' % sys.argv[0])
sys.exit(1)
thrift_file, code_output_dir, resource_output_dir = args
with open(thrift_file) as f:
# Load all structs found in the thrift file.
file_contents = f.read()
services = parse_services(file_contents)
if not services:
log('Skipping generation for %s since there are no services.' % thrift_file)
sys.exit(0)
structs = parse_structs(file_contents)
package_dir = os.path.join(code_output_dir, PACKAGE_NAME.replace('.', os.path.sep))
if not os.path.isdir(package_dir):
os.makedirs(package_dir)
for struct in structs:
# Skip generation for enums, since they are immutable.
if struct.kind == 'enum':
continue
gen_file = os.path.join(package_dir, '%s.java' % struct.codegen_name())
log('Generating %s' % gen_file)
with open(gen_file, 'w') as f:
code = generate_java(struct)
code.dump(f)
resource_dir = os.path.join(resource_output_dir, PACKAGE_NAME.replace('.', os.path.sep), 'help')
if not os.path.isdir(resource_dir):
os.makedirs(resource_dir)
methods_dir = os.path.join(resource_dir, 'method')
if not os.path.isdir(methods_dir):
os.makedirs(methods_dir)
types_dir = os.path.join(resource_dir, 'type')
if not os.path.isdir(types_dir):
os.makedirs(types_dir)
def get_service(name):
return [s for s in services if s.name == name][0]
service = get_service('AuroraAdmin')
all_methods = [] + service.methods
cur_service = service
while cur_service.parent:
cur_service = get_service(cur_service.parent)
all_methods += cur_service.methods
def get_type_name(name):
if name in THRIFT_TYPES:
thrift_type = THRIFT_TYPES[name]
if isinstance(thrift_type, PrimitiveType):
return thrift_type.boxed_name
else:
return thrift_type.name
return name
def add_param(param):
return PARAM_METADATA_TEMPLATE % {
'type': get_type_name(param)
}
def add_method(method):
spacing = '\n '
return METHOD_METADATA_TEMPLATE % {
'name': method.name,
'params': (spacing if method.parameters else '') + spacing.join(map(add_param, method.parameters))
}
method_metadata = '\n '.join(map(add_method, all_methods))
service_metadata = SERVICE_METADATA_TEMPLATE % {
'package': PACKAGE_NAME,
'methods': method_metadata,
'name': service.name
}
gen_file = os.path.join(package_dir, '%sMetadata.java' % service.name)
log('Generating service metadata file %s' % gen_file)
with open(gen_file, 'w') as f:
print(service_metadata, file=f)
| |
"""
Parsers are used to parse the content of incoming HTTP requests.
They give us a generic way of being able to handle various media types
on the request, such as form content or json encoded data.
"""
from __future__ import unicode_literals
import codecs
from django.conf import settings
from django.core.files.uploadhandler import StopFutureHandlers
from django.http import QueryDict
from django.http.multipartparser import ChunkIter
from django.http.multipartparser import \
MultiPartParser as DjangoMultiPartParser
from django.http.multipartparser import MultiPartParserError, parse_header
from django.utils import six
from django.utils.encoding import force_text
from django.utils.six.moves.urllib import parse as urlparse
from rest_framework import renderers
from rest_framework.exceptions import ParseError
from rest_framework.settings import api_settings
from rest_framework.utils import json
class DataAndFiles(object):
def __init__(self, data, files):
self.data = data
self.files = files
class BaseParser(object):
"""
All parsers should extend `BaseParser`, specifying a `media_type`
attribute, and overriding the `.parse()` method.
"""
media_type = None
def parse(self, stream, media_type=None, parser_context=None):
"""
Given a stream to read from, return the parsed representation.
Should return parsed data, or a `DataAndFiles` object consisting of the
parsed data and files.
"""
raise NotImplementedError(".parse() must be overridden.")
class JSONParser(BaseParser):
"""
Parses JSON-serialized data.
"""
media_type = 'application/json'
renderer_class = renderers.JSONRenderer
strict = api_settings.STRICT_JSON
def parse(self, stream, media_type=None, parser_context=None):
"""
Parses the incoming bytestream as JSON and returns the resulting data.
"""
parser_context = parser_context or {}
encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)
try:
decoded_stream = codecs.getreader(encoding)(stream)
parse_constant = json.strict_constant if self.strict else None
return json.load(decoded_stream, parse_constant=parse_constant)
except ValueError as exc:
raise ParseError('JSON parse error - %s' % six.text_type(exc))
class FormParser(BaseParser):
"""
Parser for form data.
"""
media_type = 'application/x-www-form-urlencoded'
def parse(self, stream, media_type=None, parser_context=None):
"""
Parses the incoming bytestream as a URL encoded form,
and returns the resulting QueryDict.
"""
parser_context = parser_context or {}
encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)
data = QueryDict(stream.read(), encoding=encoding)
return data
class MultiPartParser(BaseParser):
"""
Parser for multipart form data, which may include file data.
"""
media_type = 'multipart/form-data'
def parse(self, stream, media_type=None, parser_context=None):
"""
Parses the incoming bytestream as a multipart encoded form,
and returns a DataAndFiles object.
`.data` will be a `QueryDict` containing all the form parameters.
`.files` will be a `QueryDict` containing all the form files.
"""
parser_context = parser_context or {}
request = parser_context['request']
encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)
meta = request.META.copy()
meta['CONTENT_TYPE'] = media_type
upload_handlers = request.upload_handlers
try:
parser = DjangoMultiPartParser(meta, stream, upload_handlers, encoding)
data, files = parser.parse()
return DataAndFiles(data, files)
except MultiPartParserError as exc:
raise ParseError('Multipart form parse error - %s' % six.text_type(exc))
class FileUploadParser(BaseParser):
"""
Parser for file upload data.
"""
media_type = '*/*'
errors = {
'unhandled': 'FileUpload parse error - none of upload handlers can handle the stream',
'no_filename': 'Missing filename. Request should include a Content-Disposition header with a filename parameter.',
}
def parse(self, stream, media_type=None, parser_context=None):
"""
Treats the incoming bytestream as a raw file upload and returns
a `DataAndFiles` object.
`.data` will be None (we expect request body to be a file content).
`.files` will be a `QueryDict` containing one 'file' element.
"""
parser_context = parser_context or {}
request = parser_context['request']
encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)
meta = request.META
upload_handlers = request.upload_handlers
filename = self.get_filename(stream, media_type, parser_context)
if not filename:
raise ParseError(self.errors['no_filename'])
# Note that this code is extracted from Django's handling of
# file uploads in MultiPartParser.
content_type = meta.get('HTTP_CONTENT_TYPE',
meta.get('CONTENT_TYPE', ''))
try:
content_length = int(meta.get('HTTP_CONTENT_LENGTH',
meta.get('CONTENT_LENGTH', 0)))
except (ValueError, TypeError):
content_length = None
# See if the handler will want to take care of the parsing.
for handler in upload_handlers:
result = handler.handle_raw_input(stream,
meta,
content_length,
None,
encoding)
if result is not None:
return DataAndFiles({}, {'file': result[1]})
# This is the standard case.
possible_sizes = [x.chunk_size for x in upload_handlers if x.chunk_size]
chunk_size = min([2 ** 31 - 4] + possible_sizes)
chunks = ChunkIter(stream, chunk_size)
counters = [0] * len(upload_handlers)
for index, handler in enumerate(upload_handlers):
try:
handler.new_file(None, filename, content_type,
content_length, encoding)
except StopFutureHandlers:
upload_handlers = upload_handlers[:index + 1]
break
for chunk in chunks:
for index, handler in enumerate(upload_handlers):
chunk_length = len(chunk)
chunk = handler.receive_data_chunk(chunk, counters[index])
counters[index] += chunk_length
if chunk is None:
break
for index, handler in enumerate(upload_handlers):
file_obj = handler.file_complete(counters[index])
if file_obj is not None:
return DataAndFiles({}, {'file': file_obj})
raise ParseError(self.errors['unhandled'])
def get_filename(self, stream, media_type, parser_context):
"""
Detects the uploaded file name. First searches a 'filename' url kwarg.
Then tries to parse Content-Disposition header.
"""
try:
return parser_context['kwargs']['filename']
except KeyError:
pass
try:
meta = parser_context['request'].META
disposition = parse_header(meta['HTTP_CONTENT_DISPOSITION'].encode('utf-8'))
filename_parm = disposition[1]
if 'filename*' in filename_parm:
return self.get_encoded_filename(filename_parm)
return force_text(filename_parm['filename'])
except (AttributeError, KeyError, ValueError):
pass
def get_encoded_filename(self, filename_parm):
"""
Handle encoded filenames per RFC6266. See also:
https://tools.ietf.org/html/rfc2231#section-4
"""
encoded_filename = force_text(filename_parm['filename*'])
try:
charset, lang, filename = encoded_filename.split('\'', 2)
filename = urlparse.unquote(filename)
except (ValueError, LookupError):
filename = force_text(filename_parm['filename'])
return filename
| |
import datetime
import pymysql
import os
# import sys
# import locale
# import codecs
import pandas as pd
settings = {
'MYSQL_HOST': 'localhost',
'MYSQL_DBNAME': 'il_sites_datas',
'MYSQL_USER': 'root',
'MYSQL_PASSWORD': 'root'
}
class ClientChanges:
def __init__(self):
# sys.stdout = codecs.getwriter(
# locale.getpreferredencoding())(sys.stdout)
# reload(sys)
# sys.setdefaultencoding('utf-8')
self.today = datetime.date.today()
""" For testing purpose will """
# self.today_str = "05/10/2016"
# self.today = datetime.datetime.strptime(self.today_str, "%d/%m/%Y")
""" End Testing """
self.today_str = self.today.strftime("%d/%m/%Y")
self.today_file = self.today.strftime("%Y_%m_%d")
self.yesterday = self.today - datetime.timedelta(days=1)
self.yesterday_str = self.yesterday.strftime("%d/%m/%Y")
# Generate range of dates for a week
self.date_range = []
for i in range(8):
item = (self.today - datetime.timedelta(days=i)).strftime(
"%d/%m/%Y")
item = '"' + item + '"'
self.date_range.append(item)
self.excel_file_path = self.create_file()
def start(self):
self.df_main = self.read_sql()
self.excel_writer()
def get_stats(self):
new_removed_stats = self.get_removed_stats()
total_stats = self.get_total_stats()
new_removed_stats.update(total_stats)
return new_removed_stats
def create_file(self):
""" Create directory and file for client changes
and return excel file path"""
directory_name = "daily_competitor_client_changes"
if not os.path.exists(directory_name):
os.mkdir(directory_name)
filename = "{}_Daily-Competitor-Client-Change.xlsx".format(
self.today_file)
excel_file_path = "./{}/{}".format(directory_name, filename)
return excel_file_path
def read_sql(self):
""" Read sql query (database table) and return pandas dataframe"""
conn = pymysql.connect(
host=settings.get('MYSQL_HOST'), port=3306,
user=settings.get('MYSQL_USER'),
passwd=settings.get('MYSQL_PASSWORD'),
db=settings.get('MYSQL_DBNAME'),
charset='utf8'
)
format_strings = ','.join(['%s'] * len(self.date_range))
sql = """SELECT Site,Company, Company_jobs,Crawl_Date,Job_Post_Date,unique_id
FROM sites_datas
WHERE Crawl_Date IN (%s)""" % format_strings
sql = sql % tuple(self.date_range)
df_main = pd.read_sql(
sql, conn
)
return df_main
def get_total_stats(self):
""" Read sql query (database table) and return pandas dataframe"""
conn = pymysql.connect(
host=settings.get('MYSQL_HOST'), port=3306,
user=settings.get('MYSQL_USER'),
passwd=settings.get('MYSQL_PASSWORD'),
db=settings.get('MYSQL_DBNAME'),
charset='utf8'
)
data = {'total_jobs': {}, 'total_companies': {}}
for company in ["Drushim", "AllJobs", "JobMaster"]:
sql = """select count(*) as count from sites_datas where
Site= "%s" and
Crawl_Date= "%s";""" % (company, self.today_str)
result = pd.read_sql(
sql, conn
)
data['total_jobs'][company] = result['count'][0]
sql_company = """select count(Distinct(Company)) as count from sites_datas
where Site="%s" and Crawl_Date="%s";""" % (company, self.today_str)
result_company = pd.read_sql(
sql_company, conn
)
data['total_companies'][company] = result_company['count'][0]
return data
def excel_writer(self):
""""write to excel file using pandas """
# Remove duplicates
writer = pd.ExcelWriter(self.excel_file_path)
columns = ['Site', 'Company', 'Company_jobs', 'Num_Company_jobs']
new_columns = [
'Site', 'Company', 'Company_jobs', 'Num_Company_jobs',
'Company Site URL', 'Company Phone', 'Company Email']
# Groupby site,company and crawl_date and transform total number
# of jobs on particualr crawl
# date for the each company of the each sites
self.df_main['Num_Company_jobs'] = self.df_main.groupby(
['Site', 'Company', 'Crawl_Date']
)['unique_id'].transform('count')
# Drop duplicates companies for each site on each crawl date
self.df_main = self.df_main.drop_duplicates(
subset=['Site', 'Company', 'Crawl_Date'])
# ****** GET NEW COMPANIES **********
# ***********************************
df_new = self.df_main.copy()
df_new['crawl_date_count'] = df_new.groupby(
['Site', 'Company']
)['unique_id'].transform('count')
df_new = df_new[df_new.crawl_date_count == 1]
df_new_companies = df_new[
df_new.Crawl_Date == self.today_str]
df_new_companies = df_new_companies.sort_values(
by=['Site', 'Company'])
df_new_companies['Company Site URL'] = ""
df_new_companies['Company Phone'] = ""
df_new_companies['Company Email'] = ""
df_new_companies.to_excel(
writer, index=False, sheet_name='New_Companies',
columns=new_columns, encoding='utf-8')
# ****** GET REMOVED COMPANIES ******
# ***********************************
# Get yesterday and today only data
df_removed = self.df_main.copy()
df_removed = df_removed[
(df_removed['Crawl_Date'] == self.yesterday_str) |
(df_removed['Crawl_Date'] == self.today_str)
]
# get number of crawl date
df_removed['crawl_date_count'] = df_removed.groupby(
['Site', 'Company']
)['unique_id'].transform('count')
df_removed = df_removed[df_removed.crawl_date_count == 1]
df_removed_companies = df_removed[
df_removed.Crawl_Date == self.yesterday_str]
df_removed_companies = df_removed_companies.sort_values(
by=['Site', 'Company'])
df_removed_companies.to_excel(
writer, index=False, sheet_name='Companies_That_left',
columns=columns, encoding='utf-8')
# save the excel
writer.save()
def get_removed_stats(self):
stats = {'new': {}, 'removed': {}}
df_new_companies = pd.read_excel(
self.excel_file_path, sheetname='New_Companies')
df_removed_companies = pd.read_excel(
self.excel_file_path, sheetname='Companies_That_left')
for company in ["Drushim", "AllJobs", "JobMaster"]:
stats['new'][company] = len(
df_new_companies[df_new_companies['Site'] == company])
stats['removed'][company] = len(
df_removed_companies[df_removed_companies['Site'] == company])
return stats
def clean_residual_database(self, month_range):
format_strings = ','.join(['"%s"'] * len(month_range))
conn = pymysql.connect(
host=settings.get('MYSQL_HOST'), port=3306,
user=settings.get('MYSQL_USER'),
passwd=settings.get('MYSQL_PASSWORD'),
db=settings.get('MYSQL_DBNAME'),
charset='utf8'
)
sql = """DELETE FROM sites_datas WHERE Crawl_Date NOT IN (%s)""" % format_strings
sql = sql % tuple(month_range)
try:
with conn.cursor() as cursor:
# Create a new record
cursor.execute(sql)
# connection is not autocommit by default.
# So you must commit to save your changes.
conn.commit()
finally:
conn.close()
if __name__ == '__main__':
c = ClientChanges()
# c.start()
# c.get_stats()
month_range = []
for i in range(70):
item = (c.today - datetime.timedelta(days=i)).strftime("%d/%m/%Y")
month_range.append(item)
# print(month_range)
c.clean_residual_database(month_range)
print('success')
| |
#
# Copyright (c) 2021 Arm Limited and Contributors. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
import inspect
import six
from time import time
from inspect import isfunction, ismethod
class BaseHostTestAbstract(object):
"""Base class for host-test test cases.
Defines an interface of setup, test and teardown methods subclasses should
implement.
This class also performs common 'housekeeping' tasks such as pushing/popping
messages on the event_queue and handling test config.
"""
name = "" # name of the host test (used for local registration)
__event_queue = None # To main even loop
__dut_event_queue = None # To DUT
script_location = None # Path to source file used to load host test
__config = {}
def __notify_prn(self, text):
if self.__event_queue:
self.__event_queue.put(("__notify_prn", text, time()))
def __notify_conn_lost(self, text):
if self.__event_queue:
self.__event_queue.put(("__notify_conn_lost", text, time()))
def __notify_sync_failed(self, text):
if self.__event_queue:
self.__event_queue.put(("__notify_sync_failed", text, time()))
def __notify_dut(self, key, value):
"""Send data over serial to DUT."""
if self.__dut_event_queue:
self.__dut_event_queue.put((key, value, time()))
def notify_complete(self, result=None):
"""Notify the main event loop that a host test is complete.
Args:
result: True for success, False failure.
"""
if self.__event_queue:
self.__event_queue.put(("__notify_complete", result, time()))
def reset_dut(self, value):
"""Reset the device under test.
Args:
value: Value to send with the reset message.
"""
if self.__event_queue:
self.__event_queue.put(("__reset_dut", value, time()))
def reset(self):
"""Reset the device under test and continue running the host test."""
if self.__event_queue:
self.__event_queue.put(("__reset", "0", time()))
def notify_conn_lost(self, text):
"""Notify main event loop of a DUT-host connection error.
Args:
text: Additional text to send with the notification.
"""
self.__notify_conn_lost(text)
def log(self, text):
"""Send log message to main event loop.
Args:
text: Additional text to send with the notification.
"""
self.__notify_prn(text)
def send_kv(self, key, value):
"""Send Key-Value pair to the DUT.
Args:
key: Key part of KV pair.
value: Value part of KV pair.
"""
self.__notify_dut(key, value)
def setup_communication(self, event_queue, dut_event_queue, config={}):
"""Setup queues used for comms between DUT and host.
Args:
event_queue: List of KV messages sent toward the host.
dut_event_queue: List of KV messages sent toward the DUT.
config: Test config.
"""
self.__event_queue = event_queue # To main even loop
self.__dut_event_queue = dut_event_queue # To DUT
self.__config = config
def get_config_item(self, name):
"""Get an item from the config by name.
Args:
name: Name of config parameter to get.
Returns:
Value of the config parameter with the given name. None if not found.
"""
return self.__config.get(name, None)
def setup(self):
"""Setup tests and callbacks."""
raise NotImplementedError
def result(self):
"""Return host test result (True, False or None)."""
raise NotImplementedError
def teardown(self):
"""Test teardown."""
raise NotImplementedError
def event_callback(key):
"""Decorator for defining a event callback method.
Adds an "event_key" attribute to the decorated function, which is set to the passed
key.
"""
def decorator(func):
func.event_key = key
return func
return decorator
class HostTestCallbackBase(BaseHostTestAbstract):
def __init__(self):
BaseHostTestAbstract.__init__(self)
self.__callbacks = {}
self.__restricted_callbacks = [
"__coverage_start",
"__testcase_start",
"__testcase_finish",
"__testcase_summary",
"__exit",
"__exit_event_queue",
]
self.__consume_by_default = [
"__coverage_start",
"__testcase_start",
"__testcase_finish",
"__testcase_count",
"__testcase_name",
"__testcase_summary",
"__rxd_line",
]
self.__assign_default_callbacks()
self.__assign_decorated_callbacks()
def __callback_default(self, key, value, timestamp):
"""Default callback."""
# self.log("CALLBACK: key=%s, value=%s, timestamp=%f"% (key, value, timestamp))
pass
def __default_end_callback(self, key, value, timestamp):
"""Default handler for event 'end' that gives test result from target.
This callback is not decorated as we don't know in what order this
callback will be registered. We want to let users override this callback.
Hence it should be registered before registering user defined callbacks.
"""
self.notify_complete(value == "success")
def __assign_default_callbacks(self):
"""Assign default callback handlers."""
for key in self.__consume_by_default:
self.__callbacks[key] = self.__callback_default
# Register default handler for event 'end' before assigning user defined
# callbacks to let users over write it.
self.register_callback("end", self.__default_end_callback)
def __assign_decorated_callbacks(self):
"""Look for any callback methods decorated with @event_callback
Example:
Define a method with @event_callback decorator like:
@event_callback('<event key>')
def event_handler(self, key, value, timestamp):
do something..
"""
for name, method in inspect.getmembers(self, inspect.ismethod):
key = getattr(method, "event_key", None)
if key:
self.register_callback(key, method)
def register_callback(self, key, callback, force=False):
"""Register callback for a specific event (key: event name).
Args:
key: Name of the event.
callback: Callable which will be registered for event "key".
force: God mode.
"""
# Non-string keys are not allowed
if type(key) is not str:
raise TypeError("event non-string keys are not allowed")
# And finally callback should be callable
if not callable(callback):
raise TypeError("event callback should be callable")
# Check if callback has all three required parameters (key, value, timestamp)
# When callback is class method should have 4 arguments (self, key, value,
# timestamp)
if ismethod(callback):
arg_count = six.get_function_code(callback).co_argcount
if arg_count != 4:
err_msg = "callback 'self.%s('%s', ...)' defined with %d arguments" % (
callback.__name__,
key,
arg_count,
)
err_msg += (
", should have 4 arguments: self.%s(self, key, value, timestamp)"
% callback.__name__
)
raise TypeError(err_msg)
# When callback is just a function should have 3 arguments func(key, value,
# timestamp)
if isfunction(callback):
arg_count = six.get_function_code(callback).co_argcount
if arg_count != 3:
err_msg = "callback '%s('%s', ...)' defined with %d arguments" % (
callback.__name__,
key,
arg_count,
)
err_msg += (
", should have 3 arguments: %s(key, value, timestamp)"
% callback.__name__
)
raise TypeError(err_msg)
if not force:
# Event starting with '__' are reserved
if key.startswith("__"):
raise ValueError("event key starting with '__' are reserved")
# We predefined few callbacks you can't use
if key in self.__restricted_callbacks:
raise ValueError(
"we predefined few callbacks you can't use e.g. '%s'" % key
)
self.__callbacks[key] = callback
def get_callbacks(self):
return self.__callbacks
def setup(self):
pass
def result(self):
pass
def teardown(self):
pass
class BaseHostTest(HostTestCallbackBase):
__BaseHostTest_Called = False
def base_host_test_inited(self):
"""Check if BaseHostTest ctor was called.
Call to BaseHostTest is required in order to force required
interfaces implementation.
Returns:
True if ctor was called.
"""
return self.__BaseHostTest_Called
def __init__(self):
HostTestCallbackBase.__init__(self)
self.__BaseHostTest_Called = True
| |
from . import settings
from . import int2bit, bit2int
from bitcoin.rpc import Proxy, InWarmupError
from bitcoin.core import b2lx, lx
from http.client import CannotSendRequest, BadStatusLine
from gevent import sleep
import os
from os import listdir, path, makedirs
import subprocess
import re
import shutil
from logging import getLogger
log = getLogger(__name__)
def load_confs():
confs = {}
for conf in BitcoindConf.enumerate_confs():
log.info("Found conf:{}".format(conf.filename))
confs[conf.filename] = conf
return confs
class BitcoindConf:
"""
Wrapper around a bitcoind configuration file
"""
conf_line = re.compile('\s*([a-z]*)\s*=\s*(.*)')
@classmethod
def list_conf_files(cls):
return (f for f in listdir(settings.BITCOIN_CONF_DIR) if path.isfile(path.join(settings.BITCOIN_CONF_DIR, f)))
@classmethod
def from_file(cls, filename):
full_path = path.join(settings.BITCOIN_CONF_DIR, filename)
if not path.exists(full_path):
log.error("Conf: {} does not exist".format(full_path))
conf = {}
with open(full_path, 'r') as f:
for line in f.readlines():
match = cls.conf_line.match(line)
if match is None:
continue
conf[match.group(1)] = match.group(2)
return cls(filename, conf)
@classmethod
def enumerate_confs(cls):
return (cls.from_file(fn) for fn in cls.list_conf_files())
@classmethod
def get_conf(cls, filename):
return
def __init__(self, filename, conf):
self.filename = filename
self.conf = conf
def path(self):
return path.join(settings.BITCOIN_CONF_DIR, self.filename)
def datadir(self):
dd = path.join(settings.BITCOIN_DATA_DIR, self.filename)
makedirs(dd, mode=0o700, exist_ok=True)
return dd
def clean_regtest(self):
rtd = path.join(self.datadir(), 'regtest')
if path.exists(rtd):
log.info("Removing regtest directory:{}".format(rtd))
shutil.rmtree(rtd)
def start_bitcoind(conf: BitcoindConf):
log.info("Starting: {}".format(conf.filename))
# Create conf
# Check conf
# Check ports
# Write conf
# Connect to rpc
rpc = connect_rpc(conf)
try:
log.info("Testing RPC connection: http://{}:{}".format(conf.conf['rpcbind'], conf.conf['rpcport']))
rpc.get_info()
except ConnectionRefusedError:
log.info("Connection failed, starting bitcoind...")
subprocess.Popen(['bitcoind', '-conf={}'.format(conf.path()), "-datadir={}".format(conf.datadir())], preexec_fn=os.setsid)
while True:
try:
rpc.get_info()
break
except ConnectionRefusedError:
log.info("Retrying...")
sleep(1)
log.info("Connection successful, bitcoind:{} is running".format(conf.filename))
conn = {}
def connect_rpc(conf: BitcoindConf):
"""
:rtype: BitcoindRPC
"""
global conn
if conf.filename not in conn:
conn[conf.filename] = BitcoindRPC(conf)
return conn[conf.filename]
def try_robustly(f):
def attempt(self, *args, **kwargs):
try:
try:
return f(self, *args, **kwargs)
except (CannotSendRequest, BadStatusLine):
# Handle reconnection if a service restarts
self.connect()
try:
return f(self, *args, **kwargs)
except (CannotSendRequest, BadStatusLine):
log.error("Error sending request for {}, {}".format(args, kwargs))
except InWarmupError:
while True:
log.info("Bitcoin still warming up, retrying...")
sleep(5)
try:
return f(self, *args, **kwargs)
except InWarmupError:
continue
return attempt
class BitcoindRPC(object):
p = None
def __init__(self, conf):
self.conf = conf
self.connect()
def connect(self):
self.p = Proxy(service_url=('{}://{}:{}@{}:{}'.format(
'http',
self.conf.conf['rpcuser'],
self.conf.conf['rpcpassword'],
self.conf.conf['rpcbind'],
self.conf.conf['rpcport'])))
@try_robustly
def get_info(self):
return self.p.getinfo()
@try_robustly
def create_address(self):
return str(self.p.getnewaddress())
@try_robustly
def get_address_balance(self, addr, minconf=0):
return int2bit(self.p.getreceivedbyaddress(addr, minconf=minconf))
@try_robustly
def list_address_amounts(self, minconf=0, include_empty=True):
# TODO: PR
addresses = self.p._call('listreceivedbyaddress', minconf, include_empty)
return dict((a['address'], a['amount']) for a in addresses if a['confirmations'] >= minconf)
@try_robustly
def send(self, addr, amount):
return b2lx(self.p.sendtoaddress(addr, bit2int(amount)))
@try_robustly
def get_transaction(self, txid):
return self.p.gettransaction(lx(txid))
@try_robustly
def get_block(self, blockid):
return self.p.getblock(lx(blockid))
@try_robustly
def get_blockchain_info(self):
return self.p._call('getblockchaininfo')
@try_robustly
def generate(self, numblocks):
return self.p.generate(numblocks)
@try_robustly
def list_transactions(self, count=10, skip=0):
return self.p._call('listtransactions', '*', count, skip)
@try_robustly
def get_peer_info(self):
return self.p._call('getpeerinfo')
@try_robustly
def get_wallet_info(self):
return self.p._call('getwalletinfo')
| |
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from maeplot.utils import sameFloat, coverageToPercent, msecToMin
from maeplot.experiment import AVAILABLE_WORLDS, AVAILABLE_ALGORITHMS, ALGORITHM_NAMES,\
getAlgorithmNames, AVAILABLE_ROBOT_COUNTS, WORLD_NAMES, getWorldNames
import os
class ColorCyle:
def __init__(self):
self.current_ = 0
self.colors_ = []
alphaVal = 0.6
colConv = colors.ColorConverter()
self.colors_.append(colConv.to_rgba('red', alphaVal))
self.colors_.append(colConv.to_rgba('blue', alphaVal))
self.colors_.append(colConv.to_rgba('orange', alphaVal))
self.colors_.append(colConv.to_rgba('olive', alphaVal))
self.colors_.append(colConv.to_rgba('cyan', alphaVal))
def next(self):
result = self.colors_[self.current_]
self.current_ = (self.current_ + 1) % len(self.colors_)
return result
def plotCoverageEventsPerCount(data, algorithm, worldType, endCoverage, outfile):
titleFmt = "Coverage over Time for {0} in {1}"
plt.figure("coverage-events-per-count")
plt.clf()
plt.xlabel('minutes')
plt.ylabel('coverage')
plt.title(titleFmt.format(ALGORITHM_NAMES[algorithm], WORLD_NAMES[worldType]))
for robotCount in sorted(data):
meanData = data[robotCount].getMean(convertTime=msecToMin, convertCoverage=coverageToPercent)
labelText = ""
if robotCount == 1:
labelText = str(robotCount) + " Robot"
else:
labelText = str(robotCount) + " Robots"
dataToPlot = [[], []]
for coverageData, timeData in zip(meanData[0], meanData[1]):
if coverageData > endCoverage:
continue
dataToPlot[0].append(coverageData)
dataToPlot[1].append(timeData)
plt.plot(dataToPlot[1], dataToPlot[0], linestyle='-', marker='s', label=labelText)
plt.legend(loc='lower right')
plt.savefig(outfile, dpi=100)
def plotBarChartPerAlgorithmPerTerrain(data, dataErr=None, outfile="", yAxLabel="", plotTitle="", maxVal=0, legendPos='upper right'):
assert(len(data) == len(AVAILABLE_ALGORITHMS))
colors = ColorCyle()
barWidth = 1.0 / (len(data) + 1)
fig, ax = plt.subplots()
ax.set_ylabel(yAxLabel)
ax.set_title(plotTitle)
if maxVal > 0:
ax.set_autoscaley_on(False)
ax.set_ylim([0,maxVal])
for algoCount, algoName in enumerate(AVAILABLE_ALGORITHMS):
algoData = data[algoName]
algoErr = None
if dataErr != None:
algoErr = dataErr[algoName]
leftBorders = [i + (algoCount * barWidth) for i in xrange(len(algoData))]
rects = ax.bar(leftBorders, algoData, yerr=algoErr, width=barWidth, label=ALGORITHM_NAMES[algoName], color=colors.next())
# add value label on top of bars
for rect in rects:
barHeight = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2.0, 1.0 * barHeight, '%.1f'%barHeight,
ha='center', va='bottom')
ax.set_xticks([(i + (len(data) * barWidth) / 2) for i in xrange(len(AVAILABLE_WORLDS))])
ax.set_xticklabels(getWorldNames())
ax.legend(loc=legendPos)
fig.set_size_inches(9.6,5.4)
if len(outfile) > 0:
plt.savefig(outfile, dpi=100)
else:
plt.show()
def plotBarChartPerTerrainPerAlgorithm(data, dataErr=None, outfile="", yAxLabel="", plotTitle="", maxVal=0, legendPos='upper right'):
assert(len(data) == len(AVAILABLE_WORLDS))
colors = ColorCyle()
barWidth = 1.0 / (len(data) + 1)
fig, ax = plt.subplots()
ax.set_ylabel(yAxLabel)
ax.set_title(plotTitle)
if maxVal > 0:
ax.set_autoscaley_on(False)
ax.set_ylim([0,maxVal])
for worldCount, worldName in enumerate(AVAILABLE_WORLDS):
worldData = data[worldName]
worldErr = None
if dataErr != None:
worldErr = dataErr[worldName]
leftBorders = [i + (worldCount * barWidth) for i in xrange(len(worldData))]
rects = ax.bar(leftBorders, worldData, yerr=worldErr, width=barWidth, label=WORLD_NAMES[worldName], color=colors.next())
# add value label on top of bars
for rect in rects:
barHeight = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2.0, 1.0 * barHeight, '%.1f'%barHeight,
ha='center', va='bottom')
ax.set_xticks([(i + (len(data) * barWidth) / 2) for i in xrange(len(AVAILABLE_ALGORITHMS))])
ax.set_xticklabels(getAlgorithmNames())
ax.legend(loc=legendPos)
fig.set_size_inches(9.6,5.4)
if len(outfile) > 0:
plt.savefig(outfile, dpi=100)
else:
plt.show()
def plotBarChartPerRobotCountPerAlgorithm(data, dataErr=None, outfile="", yAxLabel="", plotTitle="", maxVal=0, legendPos='upper right'):
assert(len(data) == len(AVAILABLE_ROBOT_COUNTS))
colors = ColorCyle()
barWidth = 1.0 / (len(data) + 1)
fig, ax = plt.subplots()
ax.set_ylabel(yAxLabel)
ax.set_title(plotTitle)
if maxVal > 0:
ax.set_autoscaley_on(False)
ax.set_ylim([0,maxVal])
for robotNum, robotCount in enumerate(AVAILABLE_ROBOT_COUNTS):
robotCountData = data[robotCount]
robotCountErr = None
if dataErr != None:
robotCountErr = dataErr[robotCount]
leftBorders = [i + (robotNum * barWidth) for i in xrange(len(robotCountData))]
labelStr = ""
if robotCount == 1:
labelStr = "1 Robot"
else:
labelStr = str(robotCount) + " Robots"
rects = ax.bar(leftBorders, robotCountData, yerr=robotCountErr, width=barWidth, label=labelStr, color=colors.next())
# add value label on top of bars
for rect in rects:
barHeight = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2.0, 1.0 * barHeight, '%.1f'%barHeight,
ha='center', va='bottom')
ax.set_xticks([(i + (len(data) * barWidth) / 2) for i in xrange(len(AVAILABLE_ALGORITHMS))])
ax.set_xticklabels(getAlgorithmNames())
ax.legend(loc=legendPos)
fig.set_size_inches(9.6,5.4)
if len(outfile) > 0:
plt.savefig(outfile, dpi=100)
else:
plt.show()
def plotTimeToReachCoverage(data, outdir, coverageToPlot):
assert(len(data) > 0)
assert(len(data.values()[0]) > 0)
coveragePercent = int(coverageToPercent(coverageToPlot))
TIME_TO_REACH_COVERAGE_FILE = "time-to-reach-coverage-{0}.png".format(coveragePercent)
dataPerAlgo = dict()
errPerAlgo = dict()
for algoName, worldDict in data.iteritems():
algoData = []
algoErr = []
for worldType in AVAILABLE_WORLDS:
meanData = worldDict[worldType].getMean(convertTime=msecToMin)
found = False
for coverage, coverageTime, stdDev in zip(*meanData):
if sameFloat(coverage, coverageToPlot, 0.01):
found = True
algoData.append(coverageTime)
algoErr.append(stdDev)
break
if not found:
algoData.append(0)
algoErr.append(0.0)
assert(len(algoData) == len(AVAILABLE_WORLDS))
dataPerAlgo[algoName] = algoData
errPerAlgo[algoName] = algoErr
title = "Time to reach {0}% Coverage".format(coveragePercent)
outfile = os.path.join(outdir, TIME_TO_REACH_COVERAGE_FILE)
yAxLabel = "minutes"
plotBarChartPerAlgorithmPerTerrain(dataPerAlgo, dataErr=errPerAlgo, outfile=outfile, yAxLabel=yAxLabel, plotTitle=title, maxVal=270, legendPos='upper left')
def plotCoverageReachedAfterTime(data, outdir, time):
assert(len(data) > 0)
assert(len(data.values()[0]) > 0)
COVERAGE_REACHED_AFTER_TIME_FILE = "coverage-reached-after-time-{0}.png"
dataPerAlgo = dict()
errPerAlgo = dict()
for algoName, worldDict in data.iteritems():
algoData = []
algoErr = []
for worldName in AVAILABLE_WORLDS:
meanData = worldDict[worldName].getMean(convertTime=msecToMin, convertCoverage=coverageToPercent)
found = False
# search for time in mean values
for coverageEvent, coverageTime, stdDev in zip(*meanData):
if int(coverageTime) == time:
found = True
algoData.append(coverageEvent)
algoErr.append(stdDev)
break
if not found:
algoData.append(0)
algoErr.append(0.0)
assert(len(algoData) == len(AVAILABLE_WORLDS))
dataPerAlgo[algoName] = algoData
errPerAlgo[algoName] = algoErr
title = "Coverage reached after {0} minutes".format(time)
outfile = os.path.join(outdir, COVERAGE_REACHED_AFTER_TIME_FILE).format(time)
yAxLabel = "coverage"
plotBarChartPerAlgorithmPerTerrain(dataPerAlgo, dataErr=errPerAlgo, outfile=outfile, yAxLabel=yAxLabel, plotTitle=title, maxVal=150)
def plotNumberOfVisits(data, outdir, coverageToPlot):
assert(len(data) > 0)
coveragePercent = int(coverageToPercent(coverageToPlot))
MEAN_NUMBER_OF_VISITS_FILE = "number-of-visits-mean-to-{0}-coverage.png".format(coveragePercent)
STD_DEV_NUMBER_OF_VISITS_FILE = "number-of-visits-standard-deviation-to-{0}-coverage.png".format(coveragePercent)
dataPerWorld = dict()
errPerWorld = dict()
for worldName, algoDict in data.iteritems():
worldData = []
worldErr = []
for algoName in AVAILABLE_ALGORITHMS:
meanData = algoDict[algoName].getMean()
found = False
for coverage, visits, stdDev in zip(*meanData):
if sameFloat(coverageToPlot, coverage, 0.01):
worldData.append(visits)
worldErr.append(stdDev)
found = True
break
if not found:
worldData.append(0.0)
worldErr.append(0.0)
dataPerWorld[worldName] = worldData
errPerWorld[worldName] = worldErr
outfile = os.path.join(outdir, MEAN_NUMBER_OF_VISITS_FILE)
title = "Mean Number of Visits to {0}% Coverage".format(coveragePercent)
yAxisLabel = "visits"
plotBarChartPerTerrainPerAlgorithm(dataPerWorld, dataErr=None, outfile=outfile, yAxLabel=yAxisLabel, plotTitle=title)
outfile = os.path.join(outdir, STD_DEV_NUMBER_OF_VISITS_FILE)
title = "Standard Deviation of Number of Visits to {0}% Coverage".format(coveragePercent)
yAxisLabel = "standard deviation"
plotBarChartPerTerrainPerAlgorithm(errPerWorld, dataErr=None, outfile=outfile, yAxLabel=yAxisLabel, plotTitle=title)
def plotTimeBetweenVisits(data, outdir, coverageToPlot):
assert(len(data) > 0)
coveragePercent = int(coverageToPercent(coverageToPlot))
MEAN_NUMBER_OF_VISITS_FILE = "time-between-visits-mean-to-{0}-coverage.png".format(coveragePercent)
STD_DEV_NUMBER_OF_VISITS_FILE = "time-between-visits-standard-deviation-to-{0}-coverage.png".format(coveragePercent)
dataPerWorld = dict()
errPerWorld = dict()
for worldName, algoDict in data.iteritems():
worldData = []
worldErr = []
for algoName in AVAILABLE_ALGORITHMS:
meanData = algoDict[algoName].getMean(convertTime=msecToMin)
found = False
for coverage, time, stdDev in zip(*meanData):
if sameFloat(coverageToPlot, coverage, 0.01):
worldData.append(time)
worldErr.append(stdDev)
found = True
break
if not found:
worldData.append(0)
worldErr.append(0.0)
dataPerWorld[worldName] = worldData
errPerWorld[worldName] = worldErr
outfile = os.path.join(outdir, MEAN_NUMBER_OF_VISITS_FILE)
title = "Mean Time between Visits to {0}% Coverage".format(coveragePercent)
yAxisLabel = "minutes"
plotBarChartPerTerrainPerAlgorithm(dataPerWorld, dataErr=None, outfile=outfile, yAxLabel=yAxisLabel, plotTitle=title, maxVal=13)
outfile = os.path.join(outdir, STD_DEV_NUMBER_OF_VISITS_FILE)
title = "Standard Deviation of Time between Visits to {0}% Coverage".format(coveragePercent)
yAxisLabel = "standard deviation"
plotBarChartPerTerrainPerAlgorithm(errPerWorld, dataErr=None, outfile=outfile, yAxLabel=yAxisLabel, plotTitle=title, maxVal=13)
def plotTimeToReachCoveragePerRobotCount(data, outdir, coverageToPlot):
assert(len(data) > 0)
assert(len(data.values()[0]) > 0)
coveragePercent = int(coverageToPercent(coverageToPlot))
TIME_TO_REACH_COVERAGE_FILE = "time-to-reach-coverage-{0}-per-robot-count.png".format(coveragePercent)
dataPerRobotCount = dict()
errPerRobotCount = dict()
for robotCount, algoDict in data.iteritems():
robotCountData = []
robotCountErr = []
for algorithm in AVAILABLE_ALGORITHMS:
meanData = algoDict[algorithm].getMean(convertTime=msecToMin)
found = False
for coverage, coverageTime, stdDev in zip(*meanData):
if sameFloat(coverage, coverageToPlot, 0.01):
found = True
robotCountData.append(coverageTime)
robotCountErr.append(stdDev)
break
if not found:
robotCountData.append(0)
robotCountErr.append(0.0)
assert(len(robotCountData) == len(AVAILABLE_ALGORITHMS))
dataPerRobotCount[robotCount] = robotCountData
errPerRobotCount[robotCount] = robotCountErr
title = "Time to reach {0}% Coverage".format(coveragePercent)
outfile = os.path.join(outdir, TIME_TO_REACH_COVERAGE_FILE)
yAxLabel = "minutes"
plotBarChartPerRobotCountPerAlgorithm(dataPerRobotCount, dataErr=errPerRobotCount, outfile=outfile, yAxLabel=yAxLabel, plotTitle=title, maxVal=320)
def plotCoverageReachedAfterTimePerRobotCount(data, outdir, time):
assert(len(data) > 0)
assert(len(data.values()[0]) > 0)
COVERAGE_REACHED_AFTER_TIME_FILE = "coverage-reached-after-time-{0}-per-robot-count.png"
dataPerRobotCount = dict()
errPerRobotCount = dict()
for robotCount, algoDict in data.iteritems():
robotCountData = []
robotCountErr = []
for algorithm in AVAILABLE_ALGORITHMS:
meanData = algoDict[algorithm].getMean(convertTime=msecToMin, convertCoverage=coverageToPercent)
found = False
# search for time in mean values
for coverageEvent, coverageTime, stdDev in zip(*meanData):
if int(coverageTime) == time:
found = True
robotCountData.append(coverageEvent)
robotCountErr.append(stdDev)
break
if not found:
robotCountData.append(0)
robotCountErr.append(0.0)
assert(len(robotCountData) == len(AVAILABLE_ALGORITHMS))
dataPerRobotCount[robotCount] = robotCountData
errPerRobotCount[robotCount] = robotCountErr
title = "Coverage reached after {0} minutes".format(time)
outfile = os.path.join(outdir, COVERAGE_REACHED_AFTER_TIME_FILE).format(time)
yAxLabel = "coverage"
plotBarChartPerRobotCountPerAlgorithm(dataPerRobotCount, dataErr=errPerRobotCount, outfile=outfile, yAxLabel=yAxLabel, plotTitle=title, maxVal=150)
| |
import os, pdb
from mongoengine.connection import (connect, disconnect)
def connect_db():
ENV = os.environ.get('SERVER_ENV')
print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), datetime.now().strftime("%A"))
if ENV == "development":
from configurations.development import MONGO_URI, MONGO_DBNAME
connect(
db=MONGO_DBNAME,
host=MONGO_URI
)
print "Mongodb(development) is runing on: "
print MONGO_URI
else:
from configurations.production import MONGO_URI, MONGO_DBNAME
connect(
db=MONGO_DBNAME,
host=MONGO_URI
)
print "Mongodb(production) is runing on: "
print MONGO_URI
def disconnect_db():
disconnect()
from datetime import datetime
# from pymongo import MongoClient
from configurations.env_configs import *
from configurations.constants import *
from models.event_type_similarity import EventTypeSimilarity
from models.interest_similarity import InterestSimilarity
from models.job import Job
# def create_db():
# ENV = os.environ.get('SERVER_ENV')
# if ENV != "production":
# from configurations.development import MONGO_URI, MONGO_DBNAME
# client = MongoClient(MONGO_URI)
# print "Mongodb(development) is runing on: "
# print MONGO_URI
# else:
# from configurations.production import MONGO_URI, MONGO_DBNAME
# client = MongoClient(MONGO_URI)
# print "Mongodb(production) is runing on: "
# print MONGO_URI
# return client[MONGO_DBNAME]
# def update_events_table(id, df):
# db = create_db()
# # db.events_similarity_table.remove({})
# for index, row in df.iterrows():
# db.events_similarity_table.update_one(
# {
# "account_id": id,
# "user_id": row['account_id']
# },
# {
# "$set": {
# "similarity_percentage": row['similarity_percentage']
# }
# },
# upsert=True
# )
# print db.events_similarity_table.count()
def update_events_table(id, df):
for index, row in df.iterrows():
EventTypeSimilarity.objects(account_id=id, user_id=row['account_id']) \
.modify(upsert=True,
new=True,
set__similarity_percentage=row['similarity_percentage']) \
.save()
print ("Updated:", EventTypeSimilarity.objects.count())
def update_interests_table(id, df, type):
time_now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
count = 0
for index, row in df.iterrows():
if id == row['account_id']:
continue
interest_count = row['interest_count']
interest_similarity = row['interest_similarity']
if type == INTEREST_TYPES['SOCIAL']:
InterestSimilarity.objects(account_id=id,
user_id=row['account_id']) \
.modify(upsert=True,
new=True,
set__social_interest_count=interest_count,
set__social_interest_similarity=interest_similarity,
set__created_at=time_now) \
.save()
elif type == INTEREST_TYPES['BUSINESS']:
InterestSimilarity.objects(account_id=id,
user_id=row['account_id']) \
.modify(upsert=True,
new=True,
set__business_interest_count=interest_count,
set__business_interest_similarity=interest_similarity,
set__created_at=time_now) \
.save()
elif type == INTEREST_TYPES['LIFESTYLE']:
InterestSimilarity.objects(account_id=id,
user_id=row['account_id']) \
.modify(upsert=True,
new=True,
set__lifestyle_interest_count=interest_count,
set__lifestyle_interest_similarity=interest_similarity,
set__created_at=time_now) \
.save()
count += 1
print 'Saved', count, 'interest similarity for account: ', id
# print ("Updated:", InterestSimilarity.objects.count())
def add_job(job_id, name):
print('starting saving job')
time_now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
new_job = Job(job_id=job_id, name=name, state=0, duration=0, created_at=time_now)
new_job.save()
# new_job = Job.objects(job_id=job_id,
# name=name) \
# .modify(upsert=True,
# new=True,
# set__state=0,
# set__duration=0,
# set__created_at=datetime.now()) \
# .save()
if new_job.id == new_job.pk:
print('Job: ' + str(new_job.name) + ', state: ' + str(new_job.state))
return new_job.id
else:
print("Couldn't create the job.")
return None
def update_job_state(job_obj_id, state):
print "updating job"
job = Job.objects.with_id(job_obj_id)
time_now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
dt = datetime.strptime(time_now, "%Y-%m-%d %H:%M:%S") - job.created_at
duration = dt.seconds
try:
job.state = state
job.duration = duration
job.ended_at = time_now
job.save()
print('Job: ' + str(job.name) + ', state: ' + str(job.state) + ', duration: ' + str(job.duration))
except:
print('ERROR!')
def get_jobs():
connect_db()
print 'receiving jobs ...'
jobs = Job.objects
job_list = []
for job in jobs:
job_list.append(job.to_json())
disconnect_db()
return job_list
from models.mutual_friend import MutualFriend
def update_mutual_friend_recommendations(commons):
time_now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
for key, value in commons.iteritems():
MutualFriend.objects(account_id=value['parent'],
user_id=key) \
.modify(upsert=True,
new=True,
set__connection_level=value['level'],
set__num_of_mutual_friends=value['num_of_commons'],
set__created_at=time_now) \
.save()
def build_interest_recommendation_vault_objects():
connect_db()
vaults = []
if os.environ.get('SERVER_ENV') == 'production' or (os.environ.get('SERVER_ENV') == 'staging' and os.environ.get('FULL_TEST') == 'true'):
objects = InterestSimilarity.objects
elif os.environ.get('SERVER_ENV') == 'staging' and os.environ.get('FULL_TEST') != 'true':
objects = InterestSimilarity.objects(account_id__in=[790, 28071, 45622])
for obj in objects:
if (obj.social_interest_similarity != 0 and obj.social_interest_count != 0) or \
(obj.business_interest_similarity != 0 and obj.business_interest_count != 0) or \
(obj.lifestyle_interest_similarity != 0 and obj.lifestyle_interest_count != 0):
vault = _build_vault_object(obj.to_vault_object(), obj.to_vault_target(), obj.to_interest_vault_context())
vaults.append(vault)
vault = _build_vault_object(obj.to_vault_target(), obj.to_vault_object(), obj.to_interest_vault_context_ops())
vaults.append(vault)
disconnect_db()
return vaults
def build_mutual_recommendation_vault_objects():
connect_db()
vaults = []
if os.environ.get('SERVER_ENV') == 'production' or (os.environ.get('SERVER_ENV') == 'staging' and os.environ.get('FULL_TEST') == 'true'):
objects = MutualFriend.objects
elif os.environ.get('SERVER_ENV') == 'staging' and os.environ.get('FULL_TEST') != 'true':
objects = MutualFriend.objects(account_id__in=[790, 28071, 45622])
for obj in objects:
if obj.num_of_mutual_friends != 0:
vault = _build_vault_object(obj.to_vault_object(), obj.to_vault_target(), obj.to_mutual_vault_context())
vaults.append(vault)
vault = _build_vault_object(obj.to_vault_target(), obj.to_vault_object(), obj.to_mutual_vault_context_ops())
vaults.append(vault)
disconnect_db()
return vaults
def _build_vault_object(obj, target, context):
return {
'actor': {
'type': "Service",
'name': "IVY Recommendation"
},
'object': obj,
'target': target,
'context': context
}
def drop_mutual_friend_collection():
print "Droping mutual friend collection ..."
MutualFriend.drop_collection()
print "Mutual friend droped"
def drop_interest_similarity_collection():
print "Droping interest similarity collection ..."
InterestSimilarity.drop_collection()
print "Interest similarity collection droped"
| |
# Copyright 2013 OpenStack Foundation
# Copyright 2013 IBM Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
import six
from oslo_log import log as logging
from tempest.api.image import base
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
CONF = config.CONF
LOG = logging.getLogger(__name__)
class BasicOperationsImagesTest(base.BaseV2ImageTest):
"""Here we test the basic operations of images"""
@decorators.attr(type='smoke')
@decorators.idempotent_id('139b765e-7f3d-4b3d-8b37-3ca3876ee318')
def test_register_upload_get_image_file(self):
"""Here we test these functionalities
Register image, upload the image file, get image and get image
file api's
"""
uuid = '00000000-1111-2222-3333-444455556666'
image_name = data_utils.rand_name('image')
container_format = CONF.image.container_formats[0]
disk_format = CONF.image.disk_formats[0]
image = self.create_image(name=image_name,
container_format=container_format,
disk_format=disk_format,
visibility='private',
ramdisk_id=uuid)
self.assertIn('name', image)
self.assertEqual(image_name, image['name'])
self.assertIn('visibility', image)
self.assertEqual('private', image['visibility'])
self.assertIn('status', image)
self.assertEqual('queued', image['status'])
# NOTE: This Glance API returns different status codes for image
# condition. In this empty data case, Glance should return 204,
# so here should check the status code.
image_file = self.client.show_image_file(image['id'])
self.assertEqual(0, len(image_file.data))
self.assertEqual(204, image_file.response.status)
# Now try uploading an image file
file_content = data_utils.random_bytes()
image_file = six.BytesIO(file_content)
self.client.store_image_file(image['id'], image_file)
# Now try to get image details
body = self.client.show_image(image['id'])
self.assertEqual(image['id'], body['id'])
self.assertEqual(image_name, body['name'])
self.assertEqual(uuid, body['ramdisk_id'])
self.assertIn('size', body)
self.assertEqual(1024, body.get('size'))
# Now try get image file
# NOTE: This Glance API returns different status codes for image
# condition. In this non-empty data case, Glance should return 200,
# so here should check the status code.
body = self.client.show_image_file(image['id'])
self.assertEqual(file_content, body.data)
self.assertEqual(200, body.response.status)
@decorators.attr(type='smoke')
@decorators.idempotent_id('f848bb94-1c6e-45a4-8726-39e3a5b23535')
def test_delete_image(self):
"""Test deleting an image by image_id"""
# Create image
image_name = data_utils.rand_name('image')
container_format = CONF.image.container_formats[0]
disk_format = CONF.image.disk_formats[0]
image = self.create_image(name=image_name,
container_format=container_format,
disk_format=disk_format,
visibility='private')
# Delete Image
self.client.delete_image(image['id'])
self.client.wait_for_resource_deletion(image['id'])
# Verifying deletion
images = self.client.list_images()['images']
images_id = [item['id'] for item in images]
self.assertNotIn(image['id'], images_id)
@decorators.attr(type='smoke')
@decorators.idempotent_id('f66891a7-a35c-41a8-b590-a065c2a1caa6')
def test_update_image(self):
"""Test updating an image by image_id"""
# Create image
image_name = data_utils.rand_name('image')
container_format = CONF.image.container_formats[0]
disk_format = CONF.image.disk_formats[0]
image = self.create_image(name=image_name,
container_format=container_format,
disk_format=disk_format,
visibility='private')
self.assertEqual('queued', image['status'])
# Update Image
new_image_name = data_utils.rand_name('new-image')
self.client.update_image(image['id'], [
dict(replace='/name', value=new_image_name)])
# Verifying updating
body = self.client.show_image(image['id'])
self.assertEqual(image['id'], body['id'])
self.assertEqual(new_image_name, body['name'])
@decorators.idempotent_id('951ebe01-969f-4ea9-9898-8a3f1f442ab0')
def test_deactivate_reactivate_image(self):
"""Test deactivating and reactivating an image"""
# Create image
image_name = data_utils.rand_name('image')
image = self.create_image(name=image_name,
container_format='bare',
disk_format='raw',
visibility='private')
# Upload an image file
content = data_utils.random_bytes()
image_file = six.BytesIO(content)
self.client.store_image_file(image['id'], image_file)
# Deactivate image
self.client.deactivate_image(image['id'])
body = self.client.show_image(image['id'])
self.assertEqual("deactivated", body['status'])
# User unable to download deactivated image
self.assertRaises(lib_exc.Forbidden, self.client.show_image_file,
image['id'])
# Reactivate image
self.client.reactivate_image(image['id'])
body = self.client.show_image(image['id'])
self.assertEqual("active", body['status'])
# User able to download image after reactivation
body = self.client.show_image_file(image['id'])
self.assertEqual(content, body.data)
class ListUserImagesTest(base.BaseV2ImageTest):
"""Here we test the listing of image information"""
@classmethod
def resource_setup(cls):
super(ListUserImagesTest, cls).resource_setup()
# We add a few images here to test the listing functionality of
# the images API
container_fmts = CONF.image.container_formats
disk_fmts = CONF.image.disk_formats
all_pairs = [(container_fmt, disk_fmt)
for container_fmt in container_fmts
for disk_fmt in disk_fmts]
for (container_fmt, disk_fmt) in all_pairs[:6]:
LOG.debug("Creating an image "
"(Container format: %s, Disk format: %s).",
container_fmt, disk_fmt)
cls._create_standard_image(container_fmt, disk_fmt)
@classmethod
def _create_standard_image(cls, container_format, disk_format):
"""Create a new standard image and return the newly-registered image-id
Note that the size of the new image is a random number between
1024 and 4096
"""
size = random.randint(1024, 4096)
image_file = six.BytesIO(data_utils.random_bytes(size))
tags = [data_utils.rand_name('tag'), data_utils.rand_name('tag')]
image = cls.create_image(container_format=container_format,
disk_format=disk_format,
visibility='private',
tags=tags)
cls.client.store_image_file(image['id'], data=image_file)
# Keep the data of one test image so it can be used to filter lists
cls.test_data = image
return image['id']
def _list_by_param_value_and_assert(self, params):
"""Perform list action with given params and validates result."""
# Retrieve the list of images that meet the filter
images_list = self.client.list_images(params=params)['images']
# Validating params of fetched images
msg = 'No images were found that met the filter criteria.'
self.assertNotEmpty(images_list, msg)
for image in images_list:
for key in params:
msg = "Failed to list images by %s" % key
self.assertEqual(params[key], image[key], msg)
def _list_sorted_by_image_size_and_assert(self, params, desc=False):
"""Validate an image list that has been sorted by size
Perform list action with given params and validates the results are
sorted by image size in either ascending or descending order.
"""
# Retrieve the list of images that meet the filter
images_list = self.client.list_images(params=params)['images']
# Validate that the list was fetched sorted accordingly
msg = 'No images were found that met the filter criteria.'
self.assertNotEmpty(images_list, msg)
sorted_list = [image['size'] for image in images_list]
msg = 'The list of images was not sorted correctly.'
self.assertEqual(sorted(sorted_list, reverse=desc), sorted_list, msg)
@decorators.idempotent_id('1e341d7a-90a9-494c-b143-2cdf2aeb6aee')
def test_list_no_params(self):
"""Simple test to see all fixture images returned"""
images_list = self.client.list_images()['images']
image_list = [image['id'] for image in images_list]
for image in self.created_images:
self.assertIn(image, image_list)
@decorators.idempotent_id('9959ca1d-1aa7-4b7a-a1ea-0fff0499b37e')
def test_list_images_param_container_format(self):
"""Test to get all images with a specific container_format"""
params = {"container_format": self.test_data['container_format']}
self._list_by_param_value_and_assert(params)
@decorators.idempotent_id('4a4735a7-f22f-49b6-b0d9-66e1ef7453eb')
def test_list_images_param_disk_format(self):
"""Test to get all images with disk_format = raw"""
params = {"disk_format": "raw"}
self._list_by_param_value_and_assert(params)
@decorators.idempotent_id('7a95bb92-d99e-4b12-9718-7bc6ab73e6d2')
def test_list_images_param_visibility(self):
"""Test to get all images with visibility = private"""
params = {"visibility": "private"}
self._list_by_param_value_and_assert(params)
@decorators.idempotent_id('cf1b9a48-8340-480e-af7b-fe7e17690876')
def test_list_images_param_size(self):
"""Test to get all images by size"""
image_id = self.created_images[0]
# Get image metadata
image = self.client.show_image(image_id)
params = {"size": image['size']}
self._list_by_param_value_and_assert(params)
@decorators.idempotent_id('4ad8c157-971a-4ba8-aa84-ed61154b1e7f')
def test_list_images_param_min_max_size(self):
"""Test to get all images with min size and max size"""
image_id = self.created_images[0]
# Get image metadata
image = self.client.show_image(image_id)
size = image['size']
params = {"size_min": size - 500, "size_max": size + 500}
images_list = self.client.list_images(params=params)['images']
image_size_list = map(lambda x: x['size'], images_list)
for image_size in image_size_list:
self.assertGreaterEqual(image_size, params['size_min'],
"Failed to get images by size_min")
self.assertLessEqual(image_size, params['size_max'],
"Failed to get images by size_max")
@decorators.idempotent_id('7fc9e369-0f58-4d05-9aa5-0969e2d59d15')
def test_list_images_param_status(self):
"""Test to get all active images"""
params = {"status": "active"}
self._list_by_param_value_and_assert(params)
@decorators.idempotent_id('e914a891-3cc8-4b40-ad32-e0a39ffbddbb')
def test_list_images_param_limit(self):
"""Test to get images by limit"""
params = {"limit": 1}
images_list = self.client.list_images(params=params)['images']
self.assertEqual(len(images_list), params['limit'],
"Failed to get images by limit")
@decorators.idempotent_id('e9a44b91-31c8-4b40-a332-e0a39ffb4dbb')
def test_list_image_param_owner(self):
"""Test to get images by owner"""
image_id = self.created_images[0]
# Get image metadata
image = self.client.show_image(image_id)
params = {"owner": image['owner']}
self._list_by_param_value_and_assert(params)
@decorators.idempotent_id('55c8f5f5-bfed-409d-a6d5-4caeda985d7b')
def test_list_images_param_name(self):
"""Test to get images by name"""
params = {'name': self.test_data['name']}
self._list_by_param_value_and_assert(params)
@decorators.idempotent_id('aa8ac4df-cff9-418b-8d0f-dd9c67b072c9')
def test_list_images_param_tag(self):
"""Test to get images matching a tag"""
params = {'tag': self.test_data['tags'][0]}
images_list = self.client.list_images(params=params)['images']
# Validating properties of fetched images
self.assertNotEmpty(images_list)
for image in images_list:
msg = ("The image {image_name} does not have the expected tag "
"{expected_tag} among its tags: {observerd_tags}."
.format(image_name=image['name'],
expected_tag=self.test_data['tags'][0],
observerd_tags=image['tags']))
self.assertIn(self.test_data['tags'][0], image['tags'], msg)
@decorators.idempotent_id('eeadce49-04e0-43b7-aec7-52535d903e7a')
def test_list_images_param_sort(self):
"""Test listing images sorting in descending order"""
params = {'sort': 'size:desc'}
self._list_sorted_by_image_size_and_assert(params, desc=True)
@decorators.idempotent_id('9faaa0c2-c3a5-43e1-8f61-61c54b409a49')
def test_list_images_param_sort_key_dir(self):
"""Test listing images sorting by size in descending order"""
params = {'sort_key': 'size', 'sort_dir': 'desc'}
self._list_sorted_by_image_size_and_assert(params, desc=True)
@decorators.idempotent_id('622b925c-479f-4736-860d-adeaf13bc371')
def test_get_image_schema(self):
"""Test to get image schema"""
schema = "image"
body = self.schemas_client.show_schema(schema)
self.assertEqual("image", body['name'])
@decorators.idempotent_id('25c8d7b2-df21-460f-87ac-93130bcdc684')
def test_get_images_schema(self):
"""Test to get images schema"""
schema = "images"
body = self.schemas_client.show_schema(schema)
self.assertEqual("images", body['name'])
class ListSharedImagesTest(base.BaseV2ImageTest):
"""Here we test the listing of a shared image information"""
credentials = ['primary', 'alt']
@classmethod
def setup_clients(cls):
super(ListSharedImagesTest, cls).setup_clients()
cls.image_member_client = cls.os_primary.image_member_client_v2
cls.alt_img_client = cls.os_alt.image_client_v2
@decorators.idempotent_id('3fa50be4-8e38-4c02-a8db-7811bb780122')
def test_list_images_param_member_status(self):
"""Test listing images by member_status and visibility"""
# Create an image to be shared using default visibility
image_file = six.BytesIO(data_utils.random_bytes(2048))
container_format = CONF.image.container_formats[0]
disk_format = CONF.image.disk_formats[0]
image = self.create_image(container_format=container_format,
disk_format=disk_format)
self.client.store_image_file(image['id'], data=image_file)
# Share the image created with the alt user
self.image_member_client.create_image_member(
image_id=image['id'], member=self.alt_img_client.tenant_id)
# As an image consumer you need to provide the member_status parameter
# along with the visibility=shared parameter in order for it to show
# results
params = {'member_status': 'pending', 'visibility': 'shared'}
fetched_images = self.alt_img_client.list_images(params)['images']
self.assertEqual(1, len(fetched_images))
self.assertEqual(image['id'], fetched_images[0]['id'])
| |
#!/usr/bin/env python
"""
A simple script that will load the app data using the data files found in static/initialdata/
This script assumes that the database is empty and does not attempt to clear existing nodes.
"""
__author__ = "Carson McDonald <carson@ioncannon.net>"
__copyright__ = "Copyright 2012 Carson McDonald"
__license__ = "See LICENSE"
import os
import gzip
from urlparse import urlparse
from py2neo import neo4j
from py2neo import gremlin
"""
A couple setup variables that can be changed. The load directory is where the static
initialization files are assumed to be. The batch size is how large of a request is
made for each call to the REST service.
"""
load_dir = "static/initialdata/"
batch_size = 500
def get_graph_db():
if os.environ.get('NEO4J_REST_URL'):
graph_db_url = urlparse(os.environ.get('NEO4J_REST_URL'))
return neo4j.GraphDatabaseService('http://{host}:{port}{path}'.format(host=graph_db_url.hostname, port=graph_db_url.port, path=graph_db_url.path), user_name=graph_db_url.username, password=graph_db_url.password)
else:
return neo4j.GraphDatabaseService("http://localhost:7474/db/data")
def load_post_nodes(graph_db):
"""
Load post nodes
File format: ntype|postId|favoriteCount|score
@param: graph_db a connection to the neo4j database
"""
posts_idx = graph_db.get_node_index("Posts")
print "Loading post nodes"
postnodesfile = gzip.open(load_dir + 'postnodes.dat.gz', 'rb')
data_set = []
count = 0
for line in postnodesfile:
if count > 0:
values = line.rstrip().split('|')
data_set.append({ 'ntype':values[0], 'postId':int(values[1]), 'favoriteCount':int(values[2]), 'score':int(values[3]) })
count += 1
if count % batch_size == 0:
if count % (batch_size * 10) == 0:
print "\tSaving batch: {count}".format(count=count)
nodes = graph_db.create_nodes(*data_set)
data_set = []
posts_idx.start_batch()
for node in nodes: posts_idx.add(node, 'postId', node.get_properties()['postId'])
posts_idx.submit_batch()
if len(data_set) != 0:
print "\tSaving batch: {count}".format(count=count)
nodes = graph_db.create_nodes(*data_set)
data_set = []
posts_idx.start_batch()
for node in nodes: posts_idx.add(node, 'postId', node.get_properties()['postId'])
posts_idx.submit_batch()
postnodesfile.close()
def load_tag_nodes(graph_db):
"""
Load tag nodes
File format: id|ntype|tagName
@param: graph_db a connection to the neo4j database
"""
tags_idx = graph_db.get_node_index("Tags")
print "Loading tag nodes"
tagnodesfile = gzip.open(load_dir + 'tagnodes.dat.gz', 'rb')
data_set = []
count = 0
for line in tagnodesfile:
if count > 0:
values = line.rstrip().split('|')
data_set.append({ 'ntype':'tag', 'tagName':values[0] })
count += 1
if count % batch_size == 0:
if count % (batch_size * 10) == 0:
print "\tSaving batch: {count}".format(count=count)
nodes = graph_db.create_nodes(*data_set)
data_set = []
tags_idx.start_batch()
for node in nodes: tags_idx.add(node, 'tagName', node.get_properties()['tagName'])
tags_idx.submit_batch()
if len(data_set) != 0:
print "\tSaving batch: {count}".format(count=count)
nodes = graph_db.create_nodes(*data_set)
data_set = []
tags_idx.start_batch()
for node in nodes: tags_idx.add(node, 'tagName', node.get_properties()['tagName'])
tags_idx.submit_batch()
tagnodesfile.close()
def load_user_nodes(graph_db):
"""
Load user nodes
File format: id|ntype|userId|name
@param: graph_db a connection to the neo4j database
"""
users_idx = graph_db.get_node_index("Users")
print "Loading user nodes"
usernodesfile = gzip.open(load_dir + 'usernodes.dat.gz', 'rb')
data_set = []
count = 0
for line in usernodesfile:
if count > 0:
values = line.rstrip().split('|')
data_set.append({ 'ntype':'user', 'userId':int(values[0]), 'name':values[1] })
count += 1
if count % batch_size == 0:
if count % (batch_size * 10) == 0:
print "\tSaving batch: {count}".format(count=count)
nodes = graph_db.create_nodes(*data_set)
data_set = []
users_idx.start_batch()
for node in nodes: users_idx.add(node, 'userId', node.get_properties()['userId'])
users_idx.submit_batch()
if len(data_set) != 0:
print "\tSaving batch: {count}".format(count=count)
nodes = graph_db.create_nodes(*data_set)
data_set = []
users_idx.start_batch()
for node in nodes: users_idx.add(node, 'userId', node.get_properties()['userId'])
users_idx.submit_batch()
usernodesfile.close()
def load_user_to_post_rels(graph_db):
"""
Load user to post relationships
File format: source|target|etype
@param: graph_db a connection to the neo4j database
"""
users_idx = graph_db.get_node_index("Users")
posts_idx = graph_db.get_node_index("Posts")
print "Loading user to post relations"
usertopostfile = gzip.open(load_dir + 'usertopost.dat.gz', 'rb')
data_set = []
count = 0
for line in usertopostfile:
if count > 0:
values = line.rstrip().split('|')
source_node = users_idx.search('userId', int(values[0]))
target_node = posts_idx.search('postId', int(values[1]))
if len(source_node) != 1 or len(target_node) != 1:
print "Could not find node that should be available: {start} -> {end}".format(start=values[0], end=values[1])
else:
data_set.append({ 'start_node':source_node[0], 'end_node':target_node[0], 'type':'posted', 'data': {'etype':values[2]} })
count += 1
if count % batch_size == 0:
if count % (batch_size * 10) == 0:
print "\tSaving batch: {count}".format(count=count)
graph_db.create_relationships(*data_set)
data_set = []
if len(data_set) != 0:
print "\tSaving batch: {count}".format(count=count)
graph_db.create_relationships(*data_set)
data_set = []
usertopostfile.close()
def load_tag_to_post_rels(graph_db):
"""
Load tag to post relationships
File format: source|target
@param: graph_db a connection to the neo4j database
"""
posts_idx = graph_db.get_node_index("Posts")
tags_idx = graph_db.get_node_index("Tags")
print "Loading tag to post relations"
tagtopostfile = gzip.open(load_dir + 'tagtopost.dat.gz', 'rb')
data_set = []
count = 0
for line in tagtopostfile:
if count > 0:
values = line.rstrip().split('|')
source_node = posts_idx.search('postId', int(values[0]))
target_node = tags_idx.search('tagName', values[1])
if len(source_node) != 1 or len(target_node) != 1:
print "Could not find node that should be available: {start} -> {end}".format(start=values[0], end=values[1])
else:
data_set.append({ 'start_node':source_node[0], 'end_node':target_node[0], 'type':'tagged' })
count += 1
if count % batch_size == 0:
if count % (batch_size * 10) == 0:
print "\tSaving batch: {count}".format(count=count)
graph_db.create_relationships(*data_set)
data_set = []
if len(data_set) != 0:
print "\tSaving batch: {count}".format(count=count)
graph_db.create_relationships(*data_set)
data_set = []
tagtopostfile.close()
def load_post_to_user_rels(graph_db):
"""
Load post to user relationships
File format: source|target
@param: graph_db a connection to the neo4j database
"""
users_idx = graph_db.get_node_index("Users")
posts_idx = graph_db.get_node_index("Posts")
print "Loading post to user relations"
posttouserfile = gzip.open(load_dir + 'posttouser.dat.gz', 'rb')
data_set = []
count = 0
for line in posttouserfile:
if count > 0:
values = line.rstrip().split('|')
source_node = posts_idx.search('postId', int(values[0]))
target_node = users_idx.search('userId', int(values[1]))
if len(source_node) != 1 or len(target_node) != 1:
print "Could not find node that should be available: {start} -> {end}".format(start=values[0], end=values[1])
else:
data_set.append({ 'start_node':source_node[0], 'end_node':target_node[0], 'type':'posted_by' })
count += 1
if count % batch_size == 0:
if count % (batch_size * 10) == 0:
print "\tSaving batch: {count}".format(count=count)
graph_db.create_relationships(*data_set)
data_set = []
if len(data_set) != 0:
print "\tSaving batch: {count}".format(count=count)
graph_db.create_relationships(*data_set)
data_set = []
posttouserfile.close()
def load_child_to_parent_rels(graph_db):
"""
Load post child parent relationships
File format: source|target|accepted
@param: graph_db a connection to the neo4j database
"""
posts_idx = graph_db.get_node_index("Posts")
print "Loading post child to parent relations"
postchildparentfile = gzip.open(load_dir + 'postchildparent.dat.gz', 'rb')
data_set = []
count = 0
for line in postchildparentfile:
if count > 0:
values = line.rstrip().split('|')
source_node = posts_idx.search('postId', int(values[0]))
target_node = posts_idx.search('postId', int(values[1]))
if len(source_node) != 1 or len(target_node) != 1:
print "Could not find node that should be available: {start} -> {end}".format(start=values[0], end=values[1])
else:
data_set.append({ 'start_node':source_node[0], 'end_node':target_node[0], 'type':'answer', 'data':{'accepted':values[2] == 'true'} })
count += 1
if count % batch_size == 0:
if count % (batch_size * 10) == 0:
print "\tSaving batch: {count}".format(count=count)
graph_db.create_relationships(*data_set)
data_set = []
if len(data_set) != 0:
print "\tSaving batch: {count}".format(count=count)
graph_db.create_relationships(*data_set)
data_set = []
postchildparentfile.close()
def load_parent_to_child_rels(graph_db):
"""
Load post parent child relationships
File format: source|target
@param: graph_db a connection to the neo4j database
"""
posts_idx = graph_db.get_node_index("Posts")
print "Loading post parent to child relations"
postparentchildfile = gzip.open(load_dir + 'postparentchild.dat.gz', 'rb')
data_set = []
count = 0
for line in postparentchildfile:
if count > 0:
values = line.rstrip().split('|')
source_node = posts_idx.search('postId', int(values[0]))
target_node = posts_idx.search('postId', int(values[1]))
if len(source_node) != 1 or len(target_node) != 1:
print "Could not find node that should be available: {start} -> {end}".format(start=values[0], end=values[1])
else:
data_set.append({ 'start_node':source_node[0], 'end_node':target_node[0], 'type':'question' })
count += 1
if count % batch_size == 0:
if count % (batch_size * 10) == 0:
print "\tSaving batch: {count}".format(count=count)
graph_db.create_relationships(*data_set)
data_set = []
if len(data_set) != 0:
print "\tSaving batch: {count}".format(count=count)
graph_db.create_relationships(*data_set)
data_set = []
postparentchildfile.close()
if __name__ == '__main__':
print "Starting load script"
load_post_nodes(get_graph_db())
load_tag_nodes(get_graph_db())
load_user_nodes(get_graph_db())
load_user_to_post_rels(get_graph_db())
load_tag_to_post_rels(get_graph_db())
load_post_to_user_rels(get_graph_db())
load_child_to_parent_rels(get_graph_db())
load_parent_to_child_rels(get_graph_db())
print "Loading data complete"
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_messaging.rpc import dispatcher
from heat.common import exception
from heat.common import service_utils
from heat.engine import service
from heat.engine import stack as parser
from heat.engine import stack_lock
from heat.objects import stack as stack_object
from heat.objects import stack_lock as stack_lock_object
from heat.tests import common
from heat.tests.engine import tools
from heat.tests import utils
class StackDeleteTest(common.HeatTestCase):
def setUp(self):
super(StackDeleteTest, self).setUp()
self.ctx = utils.dummy_context()
self.man = service.EngineService('a-host', 'a-topic')
self.man.create_periodic_tasks()
@mock.patch.object(parser.Stack, 'load')
def test_stack_delete(self, mock_load):
stack_name = 'service_delete_test_stack'
stack = tools.get_stack(stack_name, self.ctx)
sid = stack.store()
mock_load.return_value = stack
s = stack_object.Stack.get_by_id(self.ctx, sid)
self.assertIsNone(self.man.delete_stack(self.ctx, stack.identifier()))
self.man.thread_group_mgr.groups[sid].wait()
mock_load.assert_called_once_with(self.ctx, stack=s)
def test_stack_delete_nonexist(self):
stack_name = 'service_delete_nonexist_test_stack'
stack = tools.get_stack(stack_name, self.ctx)
ex = self.assertRaises(dispatcher.ExpectedException,
self.man.delete_stack,
self.ctx, stack.identifier())
self.assertEqual(exception.EntityNotFound, ex.exc_info[0])
@mock.patch.object(parser.Stack, 'load')
@mock.patch.object(stack_lock.StackLock, 'try_acquire')
def test_stack_delete_acquired_lock(self, mock_acquire, mock_load):
mock_acquire.return_value = self.man.engine_id
stack_name = 'service_delete_test_stack_acquired_lock'
stack = tools.get_stack(stack_name, self.ctx)
sid = stack.store()
mock_load.return_value = stack
st = stack_object.Stack.get_by_id(self.ctx, sid)
self.assertIsNone(self.man.delete_stack(self.ctx, stack.identifier()))
self.man.thread_group_mgr.groups[sid].wait()
mock_acquire.assert_called_once_with()
mock_load.assert_called_once_with(self.ctx, stack=st)
@mock.patch.object(parser.Stack, 'load')
@mock.patch.object(stack_lock.StackLock, 'try_acquire')
def test_stack_delete_acquired_lock_stop_timers(self, mock_acquire,
mock_load):
mock_acquire.return_value = self.man.engine_id
stack_name = 'service_delete_test_stack_stop_timers'
stack = tools.get_stack(stack_name, self.ctx)
sid = stack.store()
mock_load.return_value = stack
st = stack_object.Stack.get_by_id(self.ctx, sid)
self.man.thread_group_mgr.add_timer(stack.id, 'test')
self.assertEqual(1, len(self.man.thread_group_mgr.groups[sid].timers))
self.assertIsNone(self.man.delete_stack(self.ctx, stack.identifier()))
self.assertEqual(0, len(self.man.thread_group_mgr.groups[sid].timers))
self.man.thread_group_mgr.groups[sid].wait()
mock_acquire.assert_called_once_with()
mock_load.assert_called_once_with(self.ctx, stack=st)
@mock.patch.object(parser.Stack, 'load')
@mock.patch.object(stack_lock.StackLock, 'try_acquire')
@mock.patch.object(stack_lock.StackLock, 'acquire')
def test_stack_delete_current_engine_active_lock(self, mock_acquire,
mock_try, mock_load):
self.man.start()
stack_name = 'service_delete_test_stack_current_active_lock'
stack = tools.get_stack(stack_name, self.ctx)
sid = stack.store()
# Insert a fake lock into the db
stack_lock_object.StackLock.create(
self.ctx, stack.id, self.man.engine_id)
# Create a fake ThreadGroup too
self.man.thread_group_mgr.groups[stack.id] = tools.DummyThreadGroup()
st = stack_object.Stack.get_by_id(self.ctx, sid)
mock_load.return_value = stack
mock_try.return_value = self.man.engine_id
mock_stop = self.patchobject(self.man.thread_group_mgr, 'stop')
self.assertIsNone(self.man.delete_stack(self.ctx, stack.identifier()))
mock_load.assert_called_with(self.ctx, stack=st)
self.assertEqual(2, len(mock_load.mock_calls))
mock_try.assert_called_once_with()
mock_acquire.assert_called_once_with(True)
mock_stop.assert_called_once_with(stack.id)
@mock.patch.object(parser.Stack, 'load')
@mock.patch.object(stack_lock.StackLock, 'try_acquire')
@mock.patch.object(service_utils, 'engine_alive')
def test_stack_delete_other_engine_active_lock_failed(self, mock_alive,
mock_try, mock_load):
OTHER_ENGINE = "other-engine-fake-uuid"
self.man.start()
stack_name = 'service_delete_test_stack_other_engine_lock_fail'
stack = tools.get_stack(stack_name, self.ctx)
sid = stack.store()
# Insert a fake lock into the db
stack_lock_object.StackLock.create(self.ctx, stack.id, OTHER_ENGINE)
st = stack_object.Stack.get_by_id(self.ctx, sid)
mock_load.return_value = stack
mock_try.return_value = OTHER_ENGINE
mock_alive.return_value = True
mock_call = self.patchobject(self.man, '_remote_call',
return_value=False)
ex = self.assertRaises(dispatcher.ExpectedException,
self.man.delete_stack,
self.ctx, stack.identifier())
self.assertEqual(exception.StopActionFailed, ex.exc_info[0])
mock_load.assert_called_once_with(self.ctx, stack=st)
mock_try.assert_called_once_with()
mock_alive.assert_called_once_with(self.ctx, OTHER_ENGINE)
mock_call.assert_called_once_with(self.ctx, OTHER_ENGINE, "stop_stack",
stack_identity=mock.ANY)
@mock.patch.object(parser.Stack, 'load')
@mock.patch.object(stack_lock.StackLock, 'try_acquire')
@mock.patch.object(service_utils, 'engine_alive')
@mock.patch.object(stack_lock.StackLock, 'acquire')
def test_stack_delete_other_engine_active_lock_succeeded(
self, mock_acquire, mock_alive, mock_try, mock_load):
OTHER_ENGINE = "other-engine-fake-uuid"
self.man.start()
stack_name = 'service_delete_test_stack_other_engine_lock'
stack = tools.get_stack(stack_name, self.ctx)
sid = stack.store()
# Insert a fake lock into the db
stack_lock_object.StackLock.create(self.ctx, stack.id, OTHER_ENGINE)
st = stack_object.Stack.get_by_id(self.ctx, sid)
mock_load.return_value = stack
mock_try.return_value = OTHER_ENGINE
mock_alive.return_value = True
mock_call = self.patchobject(self.man, '_remote_call',
return_value=None)
self.assertIsNone(self.man.delete_stack(self.ctx, stack.identifier()))
self.man.thread_group_mgr.groups[sid].wait()
self.assertEqual(2, len(mock_load.mock_calls))
mock_load.assert_called_with(self.ctx, stack=st)
mock_try.assert_called_once_with()
mock_alive.assert_called_once_with(self.ctx, OTHER_ENGINE)
mock_call.assert_called_once_with(self.ctx, OTHER_ENGINE, "stop_stack",
stack_identity=mock.ANY)
mock_acquire.assert_called_once_with(True)
@mock.patch.object(parser.Stack, 'load')
@mock.patch.object(stack_lock.StackLock, 'try_acquire')
@mock.patch.object(service_utils, 'engine_alive')
@mock.patch.object(stack_lock.StackLock, 'acquire')
def test_stack_delete_other_dead_engine_active_lock(
self, mock_acquire, mock_alive, mock_try, mock_load):
OTHER_ENGINE = "other-engine-fake-uuid"
stack_name = 'service_delete_test_stack_other_dead_engine'
stack = tools.get_stack(stack_name, self.ctx)
sid = stack.store()
# Insert a fake lock into the db
stack_lock_object.StackLock.create(
self.ctx, stack.id, "other-engine-fake-uuid")
st = stack_object.Stack.get_by_id(self.ctx, sid)
mock_load.return_value = stack
mock_try.return_value = OTHER_ENGINE
mock_alive.return_value = False
self.assertIsNone(self.man.delete_stack(self.ctx, stack.identifier()))
self.man.thread_group_mgr.groups[sid].wait()
mock_load.assert_called_with(self.ctx, stack=st)
mock_try.assert_called_once_with()
mock_acquire.assert_called_once_with(True)
mock_alive.assert_called_once_with(self.ctx, OTHER_ENGINE)
| |
#!/usr/bin/env python
"""
Compliance Checker
"""
from __future__ import unicode_literals
from functools import wraps
import pprint
from netCDF4 import Dataset
from owslib.swe.observation.sos100 import SensorObservationService_1_0_0
from owslib.swe.sensor.sml import SensorML
from owslib.namespaces import Namespaces
from compliance_checker import __version__
from lxml import etree
import sys
def get_namespaces():
n = Namespaces()
ns = n.get_namespaces(["ogc", "sml", "gml", "sos", "swe", "xlink"])
ns["ows"] = n.get_namespace("ows110")
return ns
class BaseCheck(object):
HIGH = 3
MEDIUM = 2
LOW = 1
_cc_checker_version = __version__
supported_ds = []
def setup(self, ds):
"""
Common setup method for a Checker.
Automatically run when running a CheckSuite. Define this method in your Checker class.
"""
pass
class BaseNCCheck(object):
"""
Base Class for NetCDF Dataset supporting Check Suites.
"""
supported_ds = [Dataset]
@classmethod
def std_check_in(cls, dataset, name, allowed_vals):
"""
Returns 0 if attr not present, 1 if present but not in correct value, 2 if good
"""
if name not in dataset.ncattrs():
return 0
ret_val = 1
if dataset.getncattr(name) in allowed_vals:
ret_val += 1
return ret_val
@classmethod
def std_check(cls, dataset, name):
return name in dataset.ncattrs()
class BaseSOSGCCheck(object):
"""
Base class for SOS-GetCapabilities supporting Check Suites.
"""
supported_ds = [SensorObservationService_1_0_0]
class BaseSOSDSCheck(object):
"""
Base class for SOS-DescribeSensor supporting Check Suites.
"""
supported_ds = [SensorML]
class Result(object):
"""
Holds the result of a check method.
Stores such information as the check's value (True, False, a 2-tuple of (pass, total) or None for a skip),
weight of the check, any granular messages, or a hierarchy of results. If given value is not a tuple, it
is cast as a boolean using the bool() function.
Stores the checker instance and the check method that produced this result.
"""
def __init__(self,
weight=BaseCheck.MEDIUM,
value=None,
name=None,
msgs=None,
children=None,
checker=None,
check_method=None,
variable_name=None):
self.weight = weight
if value is None:
self.value = None
elif isinstance(value, tuple):
assert len(value) == 2, 'Result value must be 2-tuple or boolean!'
self.value = value
else:
self.value = bool(value)
self.name = name
self.msgs = msgs or []
self.children = children or []
self.checker = checker
self.check_method = check_method
self.variable_name = variable_name
def __repr__(self):
ret = '{} (*{}): {}'.format(self.name, self.weight, self.value)
if len(self.msgs):
if len(self.msgs) == 1:
ret += ' ({})'.format(self.msgs[0])
else:
ret += ' ({!s} msgs)'.format(len(self.msgs))
if len(self.children):
ret += ' ({!s} children)'.format(len(self.children))
ret += '\n' + pprint.pformat(self.children)
# python 2 requires repr to be an ASCII string
# python 3 requires repr to be a unicode string
if sys.version_info[0] < 3:
return ret.encode("utf-8")
return ret
def serialize(self):
'''
Returns a serializable dictionary that represents the result object
'''
return {
'name' : self.name,
'weight' : self.weight,
'value' : self.value,
'msgs' : self.msgs,
'children' : [i.serialize() for i in self.children]
}
def __eq__(self, other):
return self.serialize() == other.serialize()
class TestCtx(object):
'''
Simple struct object that holds score values and messages to compile into a result
'''
def __init__(self, category=None, description='', out_of=0, score=0,
messages=None, variable=None):
self.category = category or BaseCheck.LOW
self.out_of = out_of
self.score = score
self.messages = messages or []
self.description = description or ''
self.variable = variable
def to_result(self):
return Result(self.category, (self.score, self.out_of), self.description, self.messages, variable_name=self.variable)
def assert_true(self, test, message):
'''
Increments score if test is true otherwise appends a message
'''
self.out_of += 1
if test:
self.score += 1
else:
self.messages.append(message)
def std_check_in(dataset, name, allowed_vals):
"""
Returns 0 if attr not present, 1 if present but not in correct value, 2 if good
"""
if not hasattr(dataset, name):
return 0
ret_val = 1
if getattr(dataset, name) in allowed_vals:
ret_val += 1
return ret_val
def std_check(dataset, name):
if hasattr(dataset, name):
getattr(dataset, name)
return True
return False
def xpath_check(tree, xpath):
"""Checks whether tree contains one or more elements matching xpath"""
return len(xpath(tree)) > 0
def attr_check(l, ds, priority, ret_val):
"""
Handles attribute checks for simple presence of an attribute, presence of
one of several attributes, and passing a validation function. Returns a
status along with an error message in the event of a failure. Mutates
ret_val parameter
"""
msgs = []
if isinstance(l, tuple):
name, other = l
if hasattr(other, '__iter__'):
# redundant, we could easily do this with a hasattr
# check instead
res = std_check_in(ds, name, other)
if res == 0:
msgs.append("Attr %s not present" % name)
elif res == 1:
msgs.append("Attr %s present, but not in expected value list (%s)" % (name, other))
ret_val.append(Result(priority, (res, 2), name, msgs))
# if we have an XPath expression, call it on the document
elif type(other) is etree.XPath:
# TODO: store tree instead of creating it each time?
res = xpath_check(ds._root, other)
if not res:
msgs = ["XPath for {} not found".format(name)]
ret_val.append(Result(priority, res, name, msgs))
# if the attribute is a function, call it
# right now only supports single attribute
# important note: current magic approach uses all functions
# starting with "check". Avoid naming check functions
# starting with check if you want to pass them in with
# a tuple to avoid them being checked more than once
elif hasattr(other, '__call__'):
# check that the attribute is actually present.
# This reduces boilerplate in functions by not needing
# to check whether the attribute is present every time
# and instead focuses on the core functionality of the
# test
res = std_check(ds, name)
if not res:
msgs = ["Attr %s not present" % name]
ret_val.append(Result(priority, res, name, msgs))
else:
ret_val.append(other(ds)(priority))
# unsupported second type in second
else:
raise TypeError("Second arg in tuple has unsupported type: {}".format(type(other)))
else:
res = std_check(ds, l)
if not res:
msgs = ["Attr %s not present" % l]
else:
try:
# see if this attribute is a string, try stripping
# whitespace, and return an error if empty
att_strip = getattr(ds, l).strip()
if not att_strip:
res = False
msgs = ["Attr %s is empty or completely whitespace" % l]
# if not a string/has no strip method we should be OK
except AttributeError:
pass
ret_val.append(Result(priority, res, l, msgs))
return ret_val
def check_has(priority=BaseCheck.HIGH):
def _inner(func):
def _dec(s, ds):
list_vars = func(s, ds)
ret_val = []
# could potentially run tests in parallel if we eliminated side
# effects on `ret_val`
for l in list_vars:
# function mutates ret_val
attr_check(l, ds, priority, ret_val)
return ret_val
return wraps(func)(_dec)
return _inner
def fix_return_value(v, method_name, method=None, checker=None):
"""
Transforms scalar return values into Result.
"""
method_name = (method_name or method.__func__.__name__).replace("check_", "") # remove common check prefix
if v is None or not isinstance(v, Result):
v = Result(value=v, name=method_name)
v.name = v.name or method_name
v.checker = checker
v.check_method = method
return v
def ratable_result(value, name, msgs):
"""Returns a partial function with a Result that has not been weighted."""
return lambda w: Result(w, value, name, msgs)
def score_group(group_name=None):
def _inner(func):
def _dec(s, ds):
ret_val = func(s, ds)
"""
if group_name != None and not isinstance(ret_val[0], tuple):
return tuple([(group_name, ret_val[0])] + list(ret_val[1:]))
"""
# multiple returns
if not isinstance(ret_val, list):
ret_val = [ret_val]
def dogroup(r):
cur_grouping = r.name
if isinstance(cur_grouping, tuple):
cur_grouping = list(cur_grouping)
elif not isinstance(cur_grouping, list):
cur_grouping = [cur_grouping]
cur_grouping.insert(0, group_name)
return Result(r.weight, r.value, tuple(cur_grouping), r.msgs)
ret_val = [fix_return_value(x, func.__name__, func, s) for x in
ret_val]
ret_val = list(map(dogroup, ret_val))
return ret_val
return wraps(func)(_dec)
return _inner
| |
import socket, time, warnings
###
import rtypes
from rexceptions import RConnectionRefused, REvalError, PyRserveClosed
from rserializer import rEval, rAssign
from rparser import rparse
RSERVEPORT = 6311
DEBUG = False
def connect(host='', port=RSERVEPORT, atomicArray=False):
"""Open a connection to a Rserve instance"""
# if host in (None, ''):
# # On Win32 it seems that passing an empty string as 'localhost' does not work
# # So just to be sure provide the full local hostname if None or '' were passed.
# host = socket.gethostname()
assert port is not None, 'port number must be given'
return RConnector(host, port, atomicArray)
def rconnect(host='', port=RSERVEPORT):
"""Deprecated method - use connect() instead """
warnings.warn("pyRserve.rconnect() is deprecated, use pyRserve.connect() instead.", DeprecationWarning)
return connect(host=host, port=port)
def checkIfClosed(func):
def decoCheckIfClosed(self, *args, **kw):
if self.isClosed:
raise PyRserveClosed('Connection to Rserve already closed')
return func(self, *args, **kw)
return decoCheckIfClosed
class RConnector(object):
'@brief Provides a network connector to an Rserve process'
def __init__(self, host, port, atomicArray):
self.host = host
self.port = port
self.atomicArray = atomicArray
self.connect()
self.r = RNameSpace(self)
self.ref = RNameSpaceReference(self)
def __repr__(self):
txt = 'Closed handle' if self.isClosed else 'Handle'
return '<%s to Rserve on %s:%s>' % (txt, self.host or 'localhost', self.port)
@property
def isClosed(self):
return self.__closed
def connect(self):
self.sock = socket.socket()
try:
self.sock.connect((self.host, self.port))
except socket.error:
raise RConnectionRefused('Connection denied, server not reachable or not accepting connections')
time.sleep(0.2)
hdr = self.sock.recv(1024)
self.__closed = False
if DEBUG:
print 'received hdr %s from rserve' % hdr
assert hdr.startswith('Rsrv01') # make sure we are really connected with rserv
# TODO: possibly also do version checking here to make sure we understand the protocol...
@checkIfClosed
def close(self):
'@brief Close network connection to rserve'
self.sock.close()
self.__closed = True
@checkIfClosed
def __call__(self, aString):
warnings.warn("conn() is deprecated, use conn.r() instead.", DeprecationWarning)
return self.eval(aString)
def _reval(self, aString):
rEval(aString, fp=self.sock)
@checkIfClosed
def eval(self, aString):
'@brief Evaluate a string expression through Rserve and return the result transformed into python objects'
if type(aString) != str:
raise TypeError('Only string evaluation is allowed')
self._reval(aString)
if DEBUG:
# Read entire data into memory en block, it's easier to debug
src = self._receive()
print 'Raw response:', repr(src)
else:
src = self.sock.makefile()
try:
return rparse(src, atomicArray=self.atomicArray)
except REvalError:
# R has reported an evaulation error, so let's obtain a descriptive explanation
# about why the error has occurred. R allows to retrieve the error message
# of the last exception via a built-in function called 'geterrmessage()'.
errorMsg = self.eval('geterrmessage()').strip()
raise REvalError(errorMsg)
@checkIfClosed
def _receive(self):
'@brief Receive the result from a previous call to rserve.'
raw = self.sock.recv(rtypes.SOCKET_BLOCK_SIZE)
d = [raw]
while len(raw) == rtypes.SOCKET_BLOCK_SIZE:
raw = self.sock.recv(rtypes.SOCKET_BLOCK_SIZE)
d.append(raw)
return ''.join(d)
# @checkIfClosed
# def _raw(self, *args, **kw):
# self.send(*args)
# return self.receive()
@checkIfClosed
def setRexp(self, name, o):
'@brief Convert a python object into an RExp and bind it to a variable called "name" in the R namespace'
rAssign(name, o, self.sock)
# Rserv sends an emtpy confirmation message, or error message in case of an error.
# rparse() will raise an Exception in the latter case.
rparse(self.sock, atomicArray=self.atomicArray)
@checkIfClosed
def getRexp(self, name):
'@brief Retrieve a Rexp stored in a variable called "name"'
return self.eval(name)
@checkIfClosed
def callFunc(self, name, *args, **kw):
'''
@brief make a call to a function "name" through Rserve
@detail positional and keyword arguments are first stored as local variables in
the R namespace and then delivered to the function.
@result Whatever the result of the called function is.
'''
if name == 'rm':
# SPECIAL HANDLING FOR "rm()":
# Calling "rm" with real values instead of reference to values works, however
# it doesn't produce the desired effect (it only removes our temporaily created
# variables). To avoid confusion for the users a check is applied here to make
# sure that "args" only contains variable or function references (proxies) and
# NOT values!
assert filter(lambda x:not isinstance(x, RBaseProxy), args) == (), \
'Only references to variables or functions allowed for "rm()"'
argNames = []
for idx, arg in enumerate(args):
if isinstance(arg, RBaseProxy):
argName = arg.name
else:
argName = 'arg_%d_' % idx
self.setRexp(argName, arg)
argNames.append(argName)
for key, value in kw.items():
if isinstance(value, RBaseProxy):
argName = value.name
else:
argName = 'kwarg_%s_' % key
self.setRexp(argName, value)
argNames.append('%s=%s' % (key, argName))
return self.eval(name+'(%s)' % ', '.join(argNames))
@checkIfClosed
def assign(self, aDict):
'@brief Assign all items of the dictionary to the default R namespace'
for k, v in aDict.items():
self.setRexp(k, v)
@checkIfClosed
def isFunction(self, name):
'@Checks whether given name references an existing function in R'
return self.eval('is.function(%s)' % name)
class RNameSpace(object):
'An instance of this class serves as access point to the default namesspace of an Rserve connection'
def __init__(self, rconn):
self.__dict__['_rconn'] = rconn
def __setattr__(self, name, o):
'@brief Assign an rExpr to a variable called "name"'
self._rconn.setRexp(name, o)
def __getattr__(self, name):
'@brief Either retrieve Rexp stored in a variable called "name" or make call to function called "name"'
realname = name[1:] if name.startswith('_') else name
try:
isFunction = self._rconn.isFunction(realname)
except:
# an error is only raised if neither such a function or variable exists at all!
raise NameError('no such variable or function "%s" defined in Rserve' % realname)
if isFunction:
return RFuncProxy(realname, self._rconn)
elif name.startswith('_'):
return RVarProxy(realname, self._rconn)
else:
return self._rconn.getRexp(name)
def __call__(self, aString):
return self._rconn.eval(aString)
class RNameSpaceReference(object):
'Provides references to R objects (a proxy), NOT directly to their values'
def __init__(self, rconn):
self.__dict__['_rconn'] = rconn
def __getattr__(self, name):
'@brief Returns either a reference proxy to a variable to to a function'
try:
isFunction = self._rconn.isFunction(name)
except:
# an error is only raised if neither such a function or variable exists at all!
raise NameError('no such variable or function "%s" defined in Rserve' % name)
if isFunction:
return RFuncProxy(name, self._rconn)
else:
return RVarProxy(name, self._rconn)
class RBaseProxy(object):
'Proxy for a reference to a variable or function in R. Do not use this directly, only its subclasses'
def __init__(self, name, rconn):
self._name = name
self._rconn = rconn
class RVarProxy(RBaseProxy):
'Proxy for a reference to a variable in R'
def __repr__(self):
return '<RVarProxy to variable "%s">' % self._name
def value(self):
return self._rconn.getRexp(self._name)
class RFuncProxy(RBaseProxy):
'Proxy for function calls to Rserve'
def __repr__(self):
return '<RFuncProxy to function "%s">' % self._name
def __call__(self, *args, **kw):
return self._rconn.callFunc(self._name, *args, **kw)
@property
def __doc__(self):
try:
d = self._rconn.eval('readLines(as.character(help(%s)))' % self._name)
except REvalError:
# probably no help available, unfortunately there is no specific code for this...
return None
helpstring = '\n'.join(d)
helpstring = helpstring.replace('_\x08', '')
return helpstring
def help(self):
print self.__doc__
def __getattr__(self, name):
"""Allow for nested name space calls, e.g. 't.test' """
concatName = "%s.%s" % (self._name, name)
try:
isFunction = self._rconn.isFunction(concatName)
except:
# an error is only raised if neither such a function or variable exists at all!
raise NameError('no such variable or function "%s" defined in Rserve' % concatName)
return RFuncProxy(concatName, self._rconn)
if __name__ == '__main__':
import os, readline, atexit
# Setup history and readline facility for remote q:
histfile = os.path.join(os.environ['HOME'], '.pyhistory')
try:
readline.read_history_file(histfile)
except IOError:
pass
atexit.register(readline.write_history_file, histfile)
conn = rconnect()
print '''"conn" is your handle to rserve. Type e.g. "conn('1')" for string evaluation.'''
#r('x<-1:20; y<-x*2; lm(y~x)')
sc = open('../testData/test-script.R').read()
v = conn(sc)
open('r-test-png.png', 'w').write(v[3])
conn.r.v = 'abc'
conn('func0 <- function() { 3 }')
conn('func1 <- function(a1) { a1 }')
conn('func2 <- function(a1, a2) { list(a1, a2) }')
conn('funcKW <- function(a1=1, a2=4) { list(a1, a2) }')
conn('squared<-function(t) t^2')
| |
from copy import deepcopy, copy
from math import sqrt
import math
import os
import subprocess
import time
import vtk
from vtk import vtkTransform
from ddapp import objectmodel as om
from ddapp import ikplanner
from ddapp import ik
import bot_frames
from PythonQt import QtCore, QtGui
from ddapp import botpy, filterUtils
from ddapp import lcmUtils
from ddapp import planplayback
from ddapp import transformUtils
from ddapp import visualization as vis
from ddapp.simpletimer import SimpleTimer
from ddapp.drilldemo import Drill
import drc as lcmdrc
import numpy as np
class Timer(object):
def __init__(self, name=None):
self.name = name
def __enter__(self):
self.tstart = time.time()
def __exit__(self, type, value, traceback):
if self.name:
print '[%s]' % self.name,
print 'Elapsed: %s' % (time.time() - self.tstart)
class FTContactSensor(object):
# for zero offsetting
nvalue = 12
offset = np.zeros(nvalue)
count = 0
ncount = 10
resetting = True # mode
threshold = 0.05
onContactCallback = None
lcmsub = None
graspingHand = None
def __init__(self, onContactCallback, graspingHand):
lcmUtils.addSubscriber('WRIST_STRAIN_GAUGES', lcmdrc.atlas_strain_gauges_t, self.handleFTSignal)
self.onContactCallback = onContactCallback
self.graspingHand = graspingHand
def handleFTSignal(self, data):
if self.resetting:
#print 'resetting'
self.count = self.count + 1
self.offset = np.asarray(data.strain_gauges) / float(self.ncount) + self.offset
if self.count == self.ncount:
self.resetting = False
else: # detecting mode
filtered = data.strain_gauges - self.offset
#print 'detecting mode', filtered[0:3]
if self.graspingHand == 'left': index = 2
else : index = 8
if filtered[index] < -0.05: # this is the threshold for detecting hitting a drill on a table, [2] means the z axis which is parallel to hand direction
if self.onContactCallback is not None:
self.onContactCallback()
self.onContactCallback = None
def reset(self):
self.count = 0
self.resetting = True
self.offset = np.zeros(12)
def __exit__(self):
if self.lcmsub is not None:
lcmUtils.removeSubscriber(self.lcmsub)
class PFGrasp(object):
graspingHand = 'left'
TLDResultMsg = None
ui = None
contactDetector = None
def __init__(self, drillDemo, robotModel, playbackRobotModel, teleopRobotModel, footstepPlanner, manipPlanner, ikPlanner,
lhandDriver, rhandDriver, atlasDriver, multisenseDriver, affordanceFitFunction, sensorJointController,
planPlaybackFunction, showPoseFunction, cameraView, segmentationpanel):
self.drillDemo = drillDemo
self.robotModel = robotModel
self.playbackRobotModel = playbackRobotModel # not used inside the demo
self.teleopRobotModel = teleopRobotModel # not used inside the demo
self.footstepPlanner = footstepPlanner
self.manipPlanner = manipPlanner
self.ikPlanner = ikPlanner
self.lhandDriver = lhandDriver
self.rhandDriver = rhandDriver
self.atlasDriver = atlasDriver
self.multisenseDriver = multisenseDriver
self.affordanceFitFunction = affordanceFitFunction
self.sensorJointController = sensorJointController
self.planPlaybackFunction = planPlaybackFunction
self.showPoseFunction = showPoseFunction
self.cameraView = cameraView
self.segmentationpanel = segmentationpanel
self.pointerTracker = None
self.projectCallback = None
self.drillYawSliderValue = 0.0
self.segmentationpanel.init() # TODO: check with Pat. I added dependency on segmentationpanel, but am sure its appropriate
self.defaultGraspingHand = "left"
self.imageViewName = 'CAMERALHAND'
self.setGraspingHand(self.defaultGraspingHand)
self.TLDResultMsg = lcmdrc.image_roi_t()
self.tldsub = lcmUtils.addSubscriber('TLD_OBJECT_ROI_RESULT', lcmdrc.image_roi_t, self.TLDReceived)
self.targetsub = lcmUtils.addSubscriber('REACH_TARGET_POSE', bot_frames.update_t, self.TargetReceived)
self.autoMode = False
self.drill = Drill()
def setGraspingHand(self, graspingHand):
self.graspingHand = graspingHand
self.drillDemo.setGraspingHand(graspingHand)
if graspingHand == 'left':
self.imageViewName = 'CAMERALHAND'
else:
self.imageViewName = 'CAMERARHAND'
def log(self, str):
if self.ui is not None:
self.ui.statusTextEdit.plainText = self.ui.statusTextEdit.plainText + '\n' + str
cursor = self.ui.statusTextEdit.textCursor()
cursor.movePosition(QtGui.QTextCursor.End, QtGui.QTextCursor.MoveAnchor)
self.ui.statusTextEdit.setTextCursor(cursor)
self.ui.statusTextEdit.moveCursor(QtGui.QTextCursor.End)
def getEstimatedRobotStatePose(self):
return self.sensorJointController.getPose('EST_ROBOT_STATE')
def getPlanningStartPose(self):
return self.getEstimatedRobotStatePose()
def start(self, autoMode):
# start pfgrasp c++ program
startPfgraspHere = False
# initialize pfgrasp particles
msg = lcmdrc.pfgrasp_command_t()
msg.command = lcmdrc.pfgrasp_command_t.START
lcmUtils.publish('PFGRASP_CMD', msg)
self.autoMode = autoMode
if self.autoMode:
self.runoneiter()
def runoneiter(self):
msg = lcmdrc.pfgrasp_command_t()
msg.command = lcmdrc.pfgrasp_command_t.RUN_ONE_ITER
lcmUtils.publish('PFGRASP_CMD', msg)
def planDeltaMove(self, Direction, LocalOrWorld, Amount):
linkMap = { 'left' : 'l_hand_face', 'right': 'r_hand_face'}
linkName = linkMap[self.graspingHand]
#handToWorld = self.robotModel.getLinkFrame(linkName)
if Direction == 'X':
delta = transformUtils.frameFromPositionAndRPY([Amount,0,0],[0,0,0])
elif Direction == 'Y':
delta = transformUtils.frameFromPositionAndRPY([0,Amount,0],[0,0,0])
else:
delta = transformUtils.frameFromPositionAndRPY([0,0,Amount],[0,0,0])
startPose = self.getPlanningStartPose()
constraintSet = self.ikPlanner.planEndEffectorDelta(startPose, self.graspingHand,
delta.GetPosition(), constraints=None, LocalOrWorldDelta=LocalOrWorld)
handfaceToWorld = self.ikPlanner.getLinkFrameAtPose(linkName, self.getPlanningStartPose())
# constraint orientation
p,q = self.ikPlanner.createPositionOrientationGraspConstraints(self.graspingHand,handfaceToWorld)
q.tspan=[0.5,np.inf]
constraintSet.constraints.append(q)
##
endPose, info = constraintSet.runIk()
if info>10:
return None
graspPlan = constraintSet.runIkTraj()
return graspPlan
def delay(self, delayTimeInSeconds):
yield
t = SimpleTimer()
while t.elapsed() < delayTimeInSeconds:
yield
def waitForPlanExecution(self, plan):
self.log('in waitForPlanExecution')
planElapsedTime = planplayback.PlanPlayback.getPlanElapsedTime(plan)
self.log('waiting for plan execution: %f' % planElapsedTime)
time.sleep(planElapsedTime + 1.0)
#return self.delay(planElapsedTime + 1.0)
def onRetreatPlanCommitted(self, plan):
self.log('in onRetreatPlanCommitted')
self.manipPlanner.disconnectPlanCommitted(self.onRetreatConnector)
self.waitForPlanExecution(plan)
self.log('in onRetreatPlanCommitted:PlanExecuted')
self.log('done')
def onHoldPlanCommitted(self, plan):
self.log('in onHoldPlanCommitted')
self.manipPlanner.disconnectPlanCommitted(self.onHoldConnector)
self.waitForPlanExecution(plan)
self.log('in onHoldPlanCommitted:PlanExecuted')
# retreat
plan = self.planDeltaMove('Y', 'Local', -0.20)
self.onRetreatConnector = self.manipPlanner.connectPlanCommitted(self.onRetreatPlanCommitted)
if self.autoMode:
self.manipPlanner.commitManipPlan(plan)
def grasp(self):
# close the hand
if self.graspingHand == 'left':
self.lhandDriver.sendClose(100)
self.delay(1.5)
self.lhandDriver.sendClose(100)
else:
self.rhandDriver.sendClose(100)
self.delay(1.5)
self.rhandDriver.sendClose(100)
def onModifiedDrillFrame(self, frame):
self.drawDrill()
def spawnDrillAffordance(self):
if om.findObjectByName('drill') is None:
self.drillDemo.spawnDrillAffordance()
if self.graspingHand == 'left':
self.moveDrill()
else:
self.moveDrill(RPY=[0,180,0])
om.findObjectByName('drill frame').connectFrameModified(self.onModifiedDrillFrame)
def apply3DFit(self):
if om.findObjectByName('drill') is None:
self.log('No 3D fit of drill. Click Spawn Drill button to provide a fit.')
msg = lcmdrc.pfgrasp_command_t()
msg.command = lcmdrc.pfgrasp_command_t.RUN_ONE_ITER_W_3D_PRIOR
affordanceReach = om.findObjectByName('grasp frame')
affordanceReach.actor.GetUserTransform().GetPosition(msg.pos)
lcmUtils.publish('PFGRASP_CMD', msg)
def moveDrill(self,Pos=[0,0,0],RPY=[0,0,0],Style='Local'):
linkMap = { 'left' : 'l_hand_face', 'right': 'r_hand_face'}
linkName = linkMap[self.graspingHand]
affordance = om.findObjectByName('drill')
affordanceReach = om.findObjectByName('reach frame')
frame = om.findObjectByName('drill frame')
drillTransform = affordance.actor.GetUserTransform()
reach = transformUtils.copyFrame(affordanceReach.actor.GetUserTransform())
drillTransformCopy = transformUtils.copyFrame(affordance.actor.GetUserTransform())
drillToReach = vtkTransform()
drillToReach.Identity()
drillToReach.PostMultiply()
drillToReach.Concatenate(drillTransformCopy)
drillToReach.Concatenate(reach.GetLinearInverse())
handfaceToWorld = self.ikPlanner.getLinkFrameAtPose(linkName, self.getPlanningStartPose())
# find a transform that move forward wrt hand palm
delta = transformUtils.frameFromPositionAndRPY(Pos, RPY)
drillTransform.Identity()
drillTransform.PostMultiply()
drillTransform.Concatenate(drillToReach)
drillTransform.Concatenate(delta)
drillTransform.Concatenate(handfaceToWorld)
def stop(self):
self.manipPlanner.sendPlanPause()
if self.contactDetector is not None:
self.contactDetector.onContactCallback = None
self.contactDetector = None
def onContactCallback(self):
self.log('in onContactCallback')
self.manipPlanner.sendPlanPause()
self.grasp()
# hold it by moving up
plan = self.planDeltaMove('Z', 'World', 0.10)
self.onHoldConnector = self.manipPlanner.connectPlanCommitted(self.onHoldPlanCommitted)
if self.autoMode:
self.manipPlanner.commitManipPlan(plan)
self.contactDetector = None
def guardedMoveForwardAndGraspHoldRetreat(self):
self.log('in guardedMoveForward')
max_dist = float(self.ui.disttomoveEdit.text)
for forwardDist in np.linspace(max_dist, 0.01, num=5):
plan = self.planDeltaMove('Y', 'Local', forwardDist)
if plan is not None:
self.log('in guardedMoveForward: forward %f' % forwardDist)
break
if plan is None:
self.log('in guardedMoveForward: Bad move')
return
self.contactDetector = FTContactSensor(self.onContactCallback, self.graspingHand)
if self.autoMode:
self.manipPlanner.commitManipPlan(plan)
def TLDReceived(self, data):
#print 'TLD received', data
self.TLDResultMsg = deepcopy(data)
def TargetReceived(self, data):
self.log( 'Target received (%.3f,%.3f,%.3f), (%.3f,%.3f,%.3f,%.3f)' % \
(data.trans[0], data.trans[1], data.trans[2], \
data.quat[0], data.quat[1], data.quat[2], data.quat[3]) )
if math.isnan(data.trans[0]):
self.log('Getting NaN target, stop')
return
self.TargetMsg = deepcopy(data)
targetToWorld = transformUtils.frameFromPositionAndRPY(self.TargetMsg.trans,
np.degrees(botpy.quat_to_roll_pitch_yaw(self.TargetMsg.quat)))
startPose = self.getPlanningStartPose()
handToWorld= self.ikPlanner.getLinkFrameAtPose( 'l_hand_face', startPose)
goalFrame = vis.updateFrame(handToWorld, 'OriginalFrame', parent='Pfgrasp', visible=True, scale=0.25)
goalFrame2 = vis.updateFrame(targetToWorld, 'PeterFrame', parent='Pfgrasp', visible=True, scale=0.25)
handToWorld_XYZ = handToWorld.GetPosition()
targetToWorld_XYZ = targetToWorld.GetPosition()
dist = sqrt( (handToWorld_XYZ[0]-targetToWorld_XYZ[0])**2 + (handToWorld_XYZ[1]-targetToWorld_XYZ[1])**2 + (handToWorld_XYZ[2]-targetToWorld_XYZ[2])**2 )
self.log( "dist %.3f" % dist )
threshold = float(self.ui.criterionEdit.text)
if(dist < threshold):
#easygui.msgbox("The correction movement is less than 0.015, you can go grasp it", title="Done")
self.log("The correction movement is %.3f less than %.3f, you can go grasp it" % (dist, threshold))
if self.autoMode: self.guardedMoveForwardAndGraspHoldRetreat()
else:
#print "startPose", startPose
#print "targetToWorld", targetToWorld
#print "graspingHand", self.graspingHand
constraintSet = self.ikPlanner.planEndEffectorGoal(startPose, self.graspingHand, targetToWorld, lockBase=False, lockBack=True)
endPose, info = constraintSet.runIk()
if info > 10:
self.log("in Target received: Bad movement")
return
graspPlan = constraintSet.runIkTraj()
if self.autoMode:
self.manipPlanner.commitManipPlan(graspPlan)
self.waitForPlanExecution(graspPlan)
self.runoneiter()
def turnPointwiseOffSlow(self):
ikplanner.getIkOptions().setProperty('Use pointwise', False)
ikplanner.getIkOptions().setProperty('Quasistatic shrink factor', 0.1)
ikplanner.getIkOptions().setProperty('Max joint degrees/s',15)
def planGraspLineMotion(self):
self.turnPointwiseOffSlow()
startPose = self.getPlanningStartPose()
graspFrame = vtk.vtkTransform()
graspFrame.Identity()
graspFrame.PostMultiply()
if self.graspingHand == 'right':
graspFrame.Concatenate(transformUtils.frameFromPositionAndRPY([0,0,0], [0,180,0]))
graspFrame.Concatenate(transformUtils.copyFrame(om.findObjectByName('grasp frame').actor.GetUserTransform()))
constraintSet = self.ikPlanner.planEndEffectorGoal(startPose, self.graspingHand, graspFrame, \
lockBase=False, lockBack=True)
# constraint orientation
p,q = self.ikPlanner.createPositionOrientationGraspConstraints(self.graspingHand, graspFrame)
q.tspan=[0.5,1]
constraintSet.constraints.append(q)
# constraint line axis
positionConstraint, orientationConstraint, axisConstraint = self.ikPlanner.createMoveOnLineConstraints(startPose, graspFrame)
## broken robot arm has a new joint limit
if self.graspingHand == 'left':
constraintSet.constraints.append(self.createBrokenArmConstraint())
constraintSet.constraints.append(axisConstraint)
constraintSet.constraints[-1].tspan = [0.5,np.inf]
endPose, info = constraintSet.runIk()
#print endPose
if info > 10:
self.log("in Target received: Bad movement")
return
graspPlan = constraintSet.runIkTraj()
def createBrokenArmConstraint(self):
p = ik.PostureConstraint()
p.joints = ['l_arm_elx']
p.jointsLowerBound = [0.673677]
p.jointsUpperBound = [np.inf]
p.tspan = [1, 1]
return p
def planReach(self):
startPose = self.getPlanningStartPose()
reachFrame = vtk.vtkTransform()
reachFrame.Identity()
reachFrame.PostMultiply()
if self.graspingHand == 'right':
reachFrame.Concatenate(transformUtils.frameFromPositionAndRPY([0,0,0], [0,180,0]))
reachFrame.Concatenate(transformUtils.copyFrame(om.findObjectByName('reach frame').actor.GetUserTransform()))
constraintSet = self.ikPlanner.planEndEffectorGoal(startPose, self.graspingHand, reachFrame, lockBase=False, lockBack=True)
## broken robot arm has a new joint limit
if self.graspingHand == 'left':
constraintSet.constraints.append(self.createBrokenArmConstraint())
endPose, info = constraintSet.runIk()
#print endPose
if info > 10:
self.log("in Target received: Bad movement")
return
reachPlan = constraintSet.runIkTraj()
#print reachPlan
def drawFrameInCamera(self, t, frameName='new frame',visible=True):
imageView = self.cameraView.views[self.imageViewName]
v = imageView.view
q = self.cameraView.imageManager.queue
localToCameraT = vtk.vtkTransform()
q.getTransform('local', self.imageViewName, localToCameraT)
res = vis.showFrame( vtk.vtkTransform() , 'temp', view=v, visible=True, scale = 0.2)
om.removeFromObjectModel(res)
pd = res.polyData
pd = filterUtils.transformPolyData(pd, t)
pd = filterUtils.transformPolyData(pd, localToCameraT)
q.projectPoints(self.imageViewName, pd )
vis.showPolyData(pd, ('overlay ' + frameName), view=v, colorByName='Axes',parent='camera overlay',visible=visible)
def drawObjectInCamera(self,objectName,visible=True):
imageView = self.cameraView.views[self.imageViewName]
v = imageView.view
q = self.cameraView.imageManager.queue
localToCameraT = vtk.vtkTransform()
q.getTransform('local', self.imageViewName, localToCameraT)
obj = om.findObjectByName(objectName)
if obj is None:
return
objToLocalT = transformUtils.copyFrame(obj.actor.GetUserTransform() or vtk.vtkTransform())
objPolyDataOriginal = obj.polyData
pd = objPolyDataOriginal
pd = filterUtils.transformPolyData(pd, objToLocalT)
pd = filterUtils.transformPolyData(pd, localToCameraT)
q.projectPoints(self.imageViewName, pd)
vis.showPolyData(pd, ('overlay ' + objectName), view=v, color=[0,1,0],parent='camera overlay',visible=visible)
def drawDrill(self, mustVisible = False):
# on creation
visible = True
visibleframe = False
if mustVisible:
visible = True
else:
# know previous preference to be visible or not
overlayobj = om.findObjectByName('overlay ' + 'drill')
if overlayobj is not None:
visible = overlayobj.getProperty('Visible')
q = om.findObjectByName('camera overlay')
# get preference on visibility
overlayobj = om.findObjectByName('overlay ' + 'grasp frame')
if overlayobj is not None:
visibleframe = overlayobj.getProperty('Visible')
###
q = om.findObjectByName('camera overlay')
if q is not None: om.removeFromObjectModel(q)
imageView = self.cameraView.views[self.imageViewName]
imageView.imageActor.SetOpacity(.5)
self.drawObjectInCamera('drill',visible=visible)
obj = om.findObjectByName('grasp frame')
if obj is None:
return
objToLocalT = transformUtils.copyFrame(obj.actor.GetUserTransform())
self.drawFrameInCamera(objToLocalT, 'grasp frame',visible=visibleframe)
q = om.findObjectByName('camera overlay')
v = imageView.view
v.render()
| |
# Copyright (c) 2012 - 2015 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from oslo_log import log as logging
from cinder import exception
from cinder.i18n import _, _LE
from cinder.volume.drivers.emc import emc_vmax_utils
LOG = logging.getLogger(__name__)
STORAGEGROUPTYPE = 4
POSTGROUPTYPE = 3
EMC_ROOT = 'root/emc'
THINPROVISIONINGCOMPOSITE = 32768
THINPROVISIONING = 5
class EMCVMAXProvisionV3(object):
"""Provisioning Class for SMI-S based EMC volume drivers.
This Provisioning class is for EMC volume drivers based on SMI-S.
It supports VMAX arrays.
"""
def __init__(self, prtcl):
self.protocol = prtcl
self.utils = emc_vmax_utils.EMCVMAXUtils(prtcl)
def delete_volume_from_pool(
self, conn, storageConfigservice, volumeInstanceName, volumeName,
extraSpecs):
"""Given the volume instance remove it from the pool.
:param conn: connection the the ecom server
:param storageConfigservice: volume created from job
:param volumeInstanceName: the volume instance name
:param volumeName: the volume name (String)
:param extraSpecs: additional info
:returns: int -- return code
:raises: VolumeBackendAPIException
"""
startTime = time.time()
if isinstance(volumeInstanceName, list):
theElements = volumeInstanceName
volumeName = 'Bulk Delete'
else:
theElements = [volumeInstanceName]
rc, job = conn.InvokeMethod(
'ReturnElementsToStoragePool', storageConfigservice,
TheElements=theElements)
if rc != 0L:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
if rc != 0L:
exceptionMessage = (_(
"Error Delete Volume: %(volumeName)s. "
"Return code: %(rc)lu. Error: %(error)s.")
% {'volumeName': volumeName,
'rc': rc,
'error': errordesc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
LOG.debug("InvokeMethod ReturnElementsToStoragePool took: "
"%(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(startTime,
time.time())})
return rc
def create_volume_from_sg(
self, conn, storageConfigService, volumeName,
sgInstanceName, volumeSize, extraSpecs):
"""Create the volume and associate it with a storage group.
We use EMCCollections parameter to supply a Device Masking Group
to contain a newly created storage volume.
:param conn: the connection information to the ecom server
:param storageConfigService: the storage configuration service
:param volumeName: the volume name (String)
:param sgInstanceName: the storage group instance name
associated with an SLO
:param volumeSize: volume size (String)
:param extraSpecs: additional info
:returns: dict -- volumeDict - the volume dict
:returns: int -- return code
:raises: VolumeBackendAPIException
"""
startTime = time.time()
rc, job = conn.InvokeMethod(
'CreateOrModifyElementFromStoragePool',
storageConfigService, ElementName=volumeName,
EMCCollections=[sgInstanceName],
ElementType=self.utils.get_num(THINPROVISIONING, '16'),
Size=self.utils.get_num(volumeSize, '64'))
LOG.debug("Create Volume: %(volumename)s. Return code: %(rc)lu.",
{'volumename': volumeName,
'rc': rc})
if rc != 0L:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
if rc != 0L:
exceptionMessage = (_(
"Error Create Volume: %(volumeName)s. "
"Return code: %(rc)lu. Error: %(error)s.")
% {'volumeName': volumeName,
'rc': rc,
'error': errordesc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
LOG.debug("InvokeMethod CreateOrModifyElementFromStoragePool "
"took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(startTime,
time.time())})
# Find the newly created volume.
volumeDict = self.get_volume_dict_from_job(conn, job['Job'])
return volumeDict, rc
def _find_new_storage_group(
self, conn, maskingGroupDict, storageGroupName):
"""After creating an new storage group find it and return it.
:param conn: connection to the ecom server
:param maskingGroupDict: the maskingGroupDict dict
:param storageGroupName: storage group name (String)
:returns: maskingGroupDict['MaskingGroup'] or None
"""
foundStorageGroupInstanceName = None
if 'MaskingGroup' in maskingGroupDict:
foundStorageGroupInstanceName = maskingGroupDict['MaskingGroup']
return foundStorageGroupInstanceName
def get_volume_dict_from_job(self, conn, jobInstance):
"""Given the jobInstance determine the volume Instance.
:param conn: the ecom connection
:param jobInstance: the instance of a job
:returns: dict -- volumeDict - an instance of a volume
"""
associators = conn.Associators(
jobInstance,
ResultClass='EMC_StorageVolume')
volpath = associators[0].path
volumeDict = {}
volumeDict['classname'] = volpath.classname
keys = {}
keys['CreationClassName'] = volpath['CreationClassName']
keys['SystemName'] = volpath['SystemName']
keys['DeviceID'] = volpath['DeviceID']
keys['SystemCreationClassName'] = volpath['SystemCreationClassName']
volumeDict['keybindings'] = keys
return volumeDict
def create_element_replica(
self, conn, repServiceInstanceName,
cloneName, syncType, sourceInstance, extraSpecs,
targetInstance=None):
"""Make SMI-S call to create replica for source element.
:param conn: the connection to the ecom server
:param repServiceInstanceName: replication service
:param cloneName: clone volume name
:param syncType: 7=snapshot, 8=clone
:param sourceInstance: source volume instance
:param extraSpecs: additional info
:param targetInstance: target volume instance. Defaults to None
:returns: int -- rc - return code
:returns: job - job object of the replica creation operation
:raises: VolumeBackendAPIException
"""
startTime = time.time()
if targetInstance is None:
LOG.debug("Create targetless replica: %(clone)s "
"syncType: %(syncType)s Source: %(source)s.",
{'clone': cloneName,
'syncType': syncType,
'source': sourceInstance.path})
rc, job = conn.InvokeMethod(
'CreateElementReplica', repServiceInstanceName,
ElementName=cloneName, SyncType=syncType,
SourceElement=sourceInstance.path)
else:
LOG.debug(
"Create replica: %(clone)s syncType: %(syncType)s "
"Source: %(source)s target: %(target)s.",
{'clone': cloneName,
'syncType': syncType,
'source': sourceInstance.path,
'target': targetInstance.path})
rc, job = conn.InvokeMethod(
'CreateElementReplica', repServiceInstanceName,
ElementName=cloneName, SyncType=syncType,
SourceElement=sourceInstance.path,
TargetElement=targetInstance.path)
if rc != 0L:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
if rc != 0L:
exceptionMessage = (_(
"Error Create Cloned Volume: %(cloneName)s "
"Return code: %(rc)lu. Error: %(error)s.")
% {'cloneName': cloneName,
'rc': rc,
'error': errordesc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
LOG.debug("InvokeMethod CreateElementReplica "
"took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(startTime,
time.time())})
return rc, job
def break_replication_relationship(
self, conn, repServiceInstanceName, syncInstanceName,
operation, extraSpecs, force=False):
"""Deletes the relationship between the clone/snap and source volume.
Makes an SMI-S call to break clone relationship between the clone
volume and the source.
:param conn: the connection to the ecom server
:param repServiceInstanceName: instance name of the replication service
:param syncInstanceName: instance name of the
SE_StorageSynchronized_SV_SV object
:param operation: operation code
:param extraSpecs: additional info
:param force: force to break replication relationship if True
:returns: rc - return code
:returns: job - job object of the replica creation operation
"""
LOG.debug("Break replication relationship: %(sv)s "
"operation: %(operation)s.",
{'sv': syncInstanceName, 'operation': operation})
return self._modify_replica_synchronization(
conn, repServiceInstanceName, syncInstanceName, operation,
extraSpecs, force)
def create_storage_group_v3(self, conn, controllerConfigService,
groupName, srp, slo, workload, extraSpecs):
"""Create the volume in the specified pool.
:param conn: the connection information to the ecom server
:param controllerConfigService: the controller configuration service
:param groupName: the group name (String)
:param srp: the SRP (String)
:param slo: the SLO (String)
:param workload: the workload (String)
:param extraSpecs: additional info
:returns: storageGroupInstanceName - storage group instance name
"""
startTime = time.time()
rc, job = conn.InvokeMethod(
'CreateGroup',
controllerConfigService,
GroupName=groupName,
Type=self.utils.get_num(4, '16'),
EMCSRP=srp,
EMCSLO=slo,
EMCWorkload=workload)
if rc != 0L:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
if rc != 0L:
LOG.error(_LE(
"Error Create Group: %(groupName)s. "
"Return code: %(rc)lu. Error: %(error)s."),
{'groupName': groupName,
'rc': rc,
'error': errordesc})
raise
LOG.debug("InvokeMethod CreateGroup "
"took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(startTime,
time.time())})
foundStorageGroupInstanceName = self._find_new_storage_group(
conn, job, groupName)
return foundStorageGroupInstanceName
def _get_storage_pool_capability(self, conn, poolInstanceName):
"""Get the pool capability.
:param conn: the connection information to the ecom server
:param poolInstanceName: the pool instance
:returns: the storage pool capability instance. None if not found
"""
storagePoolCapability = None
associators = (
conn.AssociatorNames(poolInstanceName,
ResultClass='Symm_StoragePoolCapabilities'))
if len(associators) > 0:
storagePoolCapability = associators[0]
return storagePoolCapability
def _get_storage_pool_setting(
self, conn, storagePoolCapability, slo, workload):
"""Get the pool setting for pool capability.
:param conn: the connection information to the ecom server
:param storagePoolCapability: the storage pool capability instance
:param slo: the slo string e.g Bronze
:param workload: the workload string e.g DSS_REP
:returns: the storage pool setting instance
"""
foundStoragePoolSetting = None
storagePoolSettings = (
conn.AssociatorNames(storagePoolCapability,
ResultClass='CIM_storageSetting'))
for storagePoolSetting in storagePoolSettings:
settingInstanceID = storagePoolSetting['InstanceID']
matchString = ("%(slo)s:%(workload)s"
% {'slo': slo,
'workload': workload})
if matchString in settingInstanceID:
foundStoragePoolSetting = storagePoolSetting
break
return foundStoragePoolSetting
def _get_supported_size_range_for_SLO(
self, conn, storageConfigService,
srpPoolInstanceName, storagePoolSettingInstanceName, extraSpecs):
"""Gets available performance capacity per SLO.
:param conn: the connection information to the ecom server
:param storageConfigService: the storage configuration service instance
:param srpPoolInstanceName: the SRP storage pool instance
:param storagePoolSettingInstanceName: the SLO type, e.g Bronze
:param extraSpecs: additional info
:returns: dict -- supportedSizeDict - the supported size dict
:raises: VolumeBackendAPIException
"""
startTime = time.time()
rc, supportedSizeDict = conn.InvokeMethod(
'GetSupportedSizeRange',
srpPoolInstanceName,
ElementType=self.utils.get_num(3, '16'),
Goal=storagePoolSettingInstanceName)
if rc != 0L:
rc, errordesc = self.utils.wait_for_job_complete(
conn, supportedSizeDict, extraSpecs)
if rc != 0L:
exceptionMessage = (_(
"Cannot get supported size range for %(sps)s "
"Return code: %(rc)lu. Error: %(error)s.")
% {'sps': storagePoolSettingInstanceName,
'rc': rc,
'error': errordesc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
LOG.debug("InvokeMethod GetSupportedSizeRange "
"took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(startTime,
time.time())})
return supportedSizeDict
def get_volume_range(
self, conn, storageConfigService, poolInstanceName, slo, workload,
extraSpecs):
"""Get upper and lower range for volume for slo/workload combination.
:param conn: the connection information to the ecom server
:param storageConfigService: the storage config service
:param poolInstanceName: the pool instance
:param slo: slo string e.g Bronze
:param workload: workload string e.g DSS
:param extraSpecs: additional info
:returns: maximumVolumeSize - the maximum volume size supported
:returns: minimumVolumeSize - the minimum volume size supported
"""
maximumVolumeSize = None
minimumVolumeSize = None
storagePoolCapabilityInstanceName = self._get_storage_pool_capability(
conn, poolInstanceName)
if storagePoolCapabilityInstanceName:
storagePoolSettingInstanceName = self._get_storage_pool_setting(
conn, storagePoolCapabilityInstanceName, slo, workload)
if storagePoolCapabilityInstanceName:
supportedSizeDict = self._get_supported_size_range_for_SLO(
conn, storageConfigService, poolInstanceName,
storagePoolSettingInstanceName, extraSpecs)
maximumVolumeSize = supportedSizeDict['MaximumVolumeSize']
minimumVolumeSize = supportedSizeDict['MinimumVolumeSize']
return maximumVolumeSize, minimumVolumeSize
def activate_snap_relationship(
self, conn, repServiceInstanceName, syncInstanceName, extraSpecs):
"""Activate snap relationship and start copy operation.
:param conn: the connection to the ecom server
:param repServiceInstanceName: instance name of the replication service
:param syncInstanceName: instance name of the
SE_StorageSynchronized_SV_SV object
:param extraSpecs: additional info
:returns: int -- return code
:returns: job object of the replica creation operation
"""
# Operation 4: activate the snapVx.
operation = self.utils.get_num(4, '16')
LOG.debug("Activate snap: %(sv)s operation: %(operation)s.",
{'sv': syncInstanceName, 'operation': operation})
return self._modify_replica_synchronization(
conn, repServiceInstanceName, syncInstanceName, operation,
extraSpecs)
def return_to_resource_pool(self, conn, repServiceInstanceName,
syncInstanceName, extraSpecs):
"""Return the snap target resources back to the pool.
:param conn: the connection to the ecom server
:param repServiceInstanceName: instance name of the replication service
:param syncInstanceName: instance name of the
:param extraSpecs: additional info
:returns: rc - return code
:returns: job object of the replica creation operation
"""
# Operation 4: activate the snapVx.
operation = self.utils.get_num(19, '16')
LOG.debug("Return snap resource back to pool: "
"%(sv)s operation: %(operation)s.",
{'sv': syncInstanceName, 'operation': operation})
return self._modify_replica_synchronization(
conn, repServiceInstanceName, syncInstanceName, operation,
extraSpecs)
def _modify_replica_synchronization(
self, conn, repServiceInstanceName, syncInstanceName,
operation, extraSpecs, force=False):
"""Modify the relationship between the clone/snap and source volume.
Helper function that makes an SMI-S call to break clone relationship
between the clone volume and the source.
:param conn: the connection to the ecom server
:param repServiceInstanceName: instance name of the replication service
:param syncInstanceName: instance name of the
SE_StorageSynchronized_SV_SV object
:param operation: operation code
:param extraSpecs: additional info
:param force: force to modify replication synchronization if True
:returns: int -- return code
:returns: job object of the replica creation operation
:raises: VolumeBackendAPIException
"""
startTime = time.time()
rc, job = conn.InvokeMethod(
'ModifyReplicaSynchronization', repServiceInstanceName,
Operation=operation,
Synchronization=syncInstanceName,
Force=force)
LOG.debug("_modify_replica_synchronization: %(sv)s "
"operation: %(operation)s Return code: %(rc)lu.",
{'sv': syncInstanceName, 'operation': operation, 'rc': rc})
if rc != 0L:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
if rc != 0L:
exceptionMessage = (_(
"Error modify replica synchronization: %(sv)s "
"operation: %(operation)s. "
"Return code: %(rc)lu. Error: %(error)s.")
% {'sv': syncInstanceName, 'operation': operation,
'rc': rc, 'error': errordesc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
LOG.debug("InvokeMethod ModifyReplicaSynchronization "
"took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(startTime,
time.time())})
return rc, job
def create_group_replica(
self, conn, replicationService,
srcGroupInstanceName, tgtGroupInstanceName, relationName,
extraSpecs):
"""Make SMI-S call to create replica for source group.
:param conn: the connection to the ecom server
:param replicationService: replication service
:param srcGroupInstanceName: source group instance name
:param tgtGroupInstanceName: target group instance name
:param relationName: replica relationship name
:param extraSpecs: additional info
:returns: int -- return code
:returns: job object of the replica creation operation
:raises: VolumeBackendAPIException
"""
LOG.debug(
"Creating CreateGroupReplica V3: "
"replicationService: %(replicationService)s "
"RelationName: %(relationName)s "
"sourceGroup: %(srcGroup)s "
"targetGroup: %(tgtGroup)s.",
{'replicationService': replicationService,
'relationName': relationName,
'srcGroup': srcGroupInstanceName,
'tgtGroup': tgtGroupInstanceName})
# 7 for snap.
syncType = 7
rc, job = conn.InvokeMethod(
'CreateGroupReplica',
replicationService,
RelationshipName=relationName,
SourceGroup=srcGroupInstanceName,
TargetGroup=tgtGroupInstanceName,
SyncType=self.utils.get_num(syncType, '16'))
if rc != 0L:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
if rc != 0L:
exceptionMsg = (_("Error CreateGroupReplica: "
"source: %(source)s target: %(target)s. "
"Return code: %(rc)lu. Error: %(error)s.")
% {'source': srcGroupInstanceName,
'target': tgtGroupInstanceName,
'rc': rc,
'error': errordesc})
LOG.error(exceptionMsg)
raise exception.VolumeBackendAPIException(data=exceptionMsg)
return rc, job
| |
# Default Django settings. Override these with settings in the module
# pointed-to by the DJANGO_SETTINGS_MODULE environment variable.
# This is defined here as a do-nothing function because we can't import
# django.utils.translation -- that module depends on the secure.
gettext_noop = lambda s: s
####################
# CORE #
####################
DEBUG = False
TEMPLATE_DEBUG = False
# Whether the framework should propagate raw exceptions rather than catching
# them. This is useful under some testing situations and should never be used
# on a live site.
DEBUG_PROPAGATE_EXCEPTIONS = False
# Whether to use the "Etag" header. This saves bandwidth but slows down performance.
USE_ETAGS = False
# People who get code error notifications.
# In the format [('Full Name', 'email@example.com'), ('Full Name', 'anotheremail@example.com')]
ADMINS = []
# List of IP addresses, as strings, that:
# * See debug comments, when DEBUG is true
# * Receive x-headers
INTERNAL_IPS = []
# Hosts/domain names that are valid for this site.
# "*" matches anything, ".example.com" matches example.com and all subdomains
ALLOWED_HOSTS = []
# Local time zone for this installation. All choices can be found here:
# https://en.wikipedia.org/wiki/List_of_tz_zones_by_name (although not all
# systems may support all possibilities). When USE_TZ is True, this is
# interpreted as the default user time zone.
TIME_ZONE = 'America/Chicago'
# If you set this to True, Django will use timezone-aware datetimes.
USE_TZ = False
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# Languages we provide translations for, out of the box.
LANGUAGES = [
('af', gettext_noop('Afrikaans')),
('ar', gettext_noop('Arabic')),
('ast', gettext_noop('Asturian')),
('az', gettext_noop('Azerbaijani')),
('bg', gettext_noop('Bulgarian')),
('be', gettext_noop('Belarusian')),
('bn', gettext_noop('Bengali')),
('br', gettext_noop('Breton')),
('bs', gettext_noop('Bosnian')),
('ca', gettext_noop('Catalan')),
('cs', gettext_noop('Czech')),
('cy', gettext_noop('Welsh')),
('da', gettext_noop('Danish')),
('de', gettext_noop('German')),
('el', gettext_noop('Greek')),
('en', gettext_noop('English')),
('en-au', gettext_noop('Australian English')),
('en-gb', gettext_noop('British English')),
('eo', gettext_noop('Esperanto')),
('es', gettext_noop('Spanish')),
('es-ar', gettext_noop('Argentinian Spanish')),
('es-co', gettext_noop('Colombian Spanish')),
('es-mx', gettext_noop('Mexican Spanish')),
('es-ni', gettext_noop('Nicaraguan Spanish')),
('es-ve', gettext_noop('Venezuelan Spanish')),
('et', gettext_noop('Estonian')),
('eu', gettext_noop('Basque')),
('fa', gettext_noop('Persian')),
('fi', gettext_noop('Finnish')),
('fr', gettext_noop('French')),
('fy', gettext_noop('Frisian')),
('ga', gettext_noop('Irish')),
('gd', gettext_noop('Scottish Gaelic')),
('gl', gettext_noop('Galician')),
('he', gettext_noop('Hebrew')),
('hi', gettext_noop('Hindi')),
('hr', gettext_noop('Croatian')),
('hu', gettext_noop('Hungarian')),
('ia', gettext_noop('Interlingua')),
('id', gettext_noop('Indonesian')),
('io', gettext_noop('Ido')),
('is', gettext_noop('Icelandic')),
('it', gettext_noop('Italian')),
('ja', gettext_noop('Japanese')),
('ka', gettext_noop('Georgian')),
('kk', gettext_noop('Kazakh')),
('km', gettext_noop('Khmer')),
('kn', gettext_noop('Kannada')),
('ko', gettext_noop('Korean')),
('lb', gettext_noop('Luxembourgish')),
('lt', gettext_noop('Lithuanian')),
('lv', gettext_noop('Latvian')),
('mk', gettext_noop('Macedonian')),
('ml', gettext_noop('Malayalam')),
('mn', gettext_noop('Mongolian')),
('mr', gettext_noop('Marathi')),
('my', gettext_noop('Burmese')),
('nb', gettext_noop('Norwegian Bokmal')),
('ne', gettext_noop('Nepali')),
('nl', gettext_noop('Dutch')),
('nn', gettext_noop('Norwegian Nynorsk')),
('os', gettext_noop('Ossetic')),
('pa', gettext_noop('Punjabi')),
('pl', gettext_noop('Polish')),
('pt', gettext_noop('Portuguese')),
('pt-br', gettext_noop('Brazilian Portuguese')),
('ro', gettext_noop('Romanian')),
('ru', gettext_noop('Russian')),
('sk', gettext_noop('Slovak')),
('sl', gettext_noop('Slovenian')),
('sq', gettext_noop('Albanian')),
('sr', gettext_noop('Serbian')),
('sr-latn', gettext_noop('Serbian Latin')),
('sv', gettext_noop('Swedish')),
('sw', gettext_noop('Swahili')),
('ta', gettext_noop('Tamil')),
('te', gettext_noop('Telugu')),
('th', gettext_noop('Thai')),
('tr', gettext_noop('Turkish')),
('tt', gettext_noop('Tatar')),
('udm', gettext_noop('Udmurt')),
('uk', gettext_noop('Ukrainian')),
('ur', gettext_noop('Urdu')),
('vi', gettext_noop('Vietnamese')),
('zh-hans', gettext_noop('Simplified Chinese')),
('zh-hant', gettext_noop('Traditional Chinese')),
]
# Languages using BiDi (right-to-left) layout
LANGUAGES_BIDI = ["he", "ar", "fa", "ur"]
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
LOCALE_PATHS = []
# Settings for language cookie
LANGUAGE_COOKIE_NAME = 'django_language'
LANGUAGE_COOKIE_AGE = None
LANGUAGE_COOKIE_DOMAIN = None
LANGUAGE_COOKIE_PATH = '/'
# If you set this to True, Django will format dates, numbers and calendars
# according to user current locale.
USE_L10N = False
# Not-necessarily-technical managers of the site. They get broken link
# notifications and other various emails.
MANAGERS = ADMINS
# Default content type and charset to use for all HttpResponse objects, if a
# MIME type isn't manually specified. These are used to construct the
# Content-Type header.
DEFAULT_CONTENT_TYPE = 'text/html'
DEFAULT_CHARSET = 'utf-8'
# Encoding of files read from disk (template and initial SQL files).
FILE_CHARSET = 'utf-8'
# Email address that error messages come from.
SERVER_EMAIL = 'root@localhost'
# Database connection info. If left empty, will default to the dummy backend.
DATABASES = {}
# Classes used to implement DB routing behavior.
DATABASE_ROUTERS = []
# The email backend to use. For possible shortcuts see django.core.mail.
# The default is to use the SMTP backend.
# Third-party backends can be specified by providing a Python path
# to a module that defines an EmailBackend class.
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# Host for sending email.
EMAIL_HOST = 'localhost'
# Port for sending email.
EMAIL_PORT = 25
# Optional SMTP authentication information for EMAIL_HOST.
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_USE_TLS = False
EMAIL_USE_SSL = False
EMAIL_SSL_CERTFILE = None
EMAIL_SSL_KEYFILE = None
EMAIL_TIMEOUT = None
# List of strings representing installed apps.
INSTALLED_APPS = []
# List of locations of the template source files, in search order.
TEMPLATE_DIRS = []
# List of callables that know how to import templates from various sources.
# See the comments in django/core/template/loader.py for interface
# documentation.
TEMPLATE_LOADERS = [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]
# List of processors used by RequestContext to populate the context.
# Each one should be a callable that takes the request object as its
# only parameter and returns a dictionary to add to the context.
TEMPLATE_CONTEXT_PROCESSORS = [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
# 'django.template.context_processors.request',
'django.contrib.messages.context_processors.messages',
]
# Output to use in template system for invalid (e.g. misspelled) variables.
TEMPLATE_STRING_IF_INVALID = ''
TEMPLATES = []
# Default email address to use for various automated correspondence from
# the site managers.
DEFAULT_FROM_EMAIL = 'webmaster@localhost'
# Subject-line prefix for email messages send with django.core.mail.mail_admins
# or ...mail_managers. Make sure to include the trailing space.
EMAIL_SUBJECT_PREFIX = '[Django] '
# Whether to append trailing slashes to URLs.
APPEND_SLASH = True
# Whether to prepend the "www." subdomain to URLs that don't have it.
PREPEND_WWW = False
# Override the server-derived value of SCRIPT_NAME
FORCE_SCRIPT_NAME = None
# List of compiled regular expression objects representing User-Agent strings
# that are not allowed to visit any page, systemwide. Use this for bad
# robots/crawlers. Here are a few examples:
# import re
# DISALLOWED_USER_AGENTS = [
# re.compile(r'^NaverBot.*'),
# re.compile(r'^EmailSiphon.*'),
# re.compile(r'^SiteSucker.*'),
# re.compile(r'^sohu-search')
# ]
DISALLOWED_USER_AGENTS = []
ABSOLUTE_URL_OVERRIDES = {}
# List of strings representing allowed prefixes for the {% ssi %} tag.
# Example: ['/home/html', '/var/www']
ALLOWED_INCLUDE_ROOTS = []
# List of compiled regular expression objects representing URLs that need not
# be reported by BrokenLinkEmailsMiddleware. Here are a few examples:
# import re
# IGNORABLE_404_URLS = [
# re.compile(r'^/apple-touch-icon.*\.png$'),
# re.compile(r'^/favicon.ico$),
# re.compile(r'^/robots.txt$),
# re.compile(r'^/phpmyadmin/),
# re.compile(r'\.(cgi|php|pl)$'),
# ]
IGNORABLE_404_URLS = []
# A secret key for this particular Django installation. Used in secret-key
# hashing algorithms. Set this in your secure, or Django will complain
# loudly.
SECRET_KEY = ''
# Default file storage mechanism that holds media.
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = None
# URL that handles the static files served from STATIC_ROOT.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = None
# List of upload handler classes to be applied in order.
FILE_UPLOAD_HANDLERS = [
'django.core.files.uploadhandler.MemoryFileUploadHandler',
'django.core.files.uploadhandler.TemporaryFileUploadHandler',
]
# Maximum size, in bytes, of a request before it will be streamed to the
# file system instead of into memory.
FILE_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB
# Directory in which upload streamed files will be temporarily saved. A value of
# `None` will make Django use the operating system's default temporary directory
# (i.e. "/tmp" on *nix systems).
FILE_UPLOAD_TEMP_DIR = None
# The numeric mode to set newly-uploaded files to. The value should be a mode
# you'd pass directly to os.chmod; see http://docs.python.org/lib/os-file-dir.html.
FILE_UPLOAD_PERMISSIONS = None
# The numeric mode to assign to newly-created directories, when uploading files.
# The value should be a mode as you'd pass to os.chmod;
# see http://docs.python.org/lib/os-file-dir.html.
FILE_UPLOAD_DIRECTORY_PERMISSIONS = None
# Python module path where user will place custom format definition.
# The directory where this setting is pointing should contain subdirectories
# named as the locales, containing a formats.py file
# (i.e. "myproject.locale" for myproject/locale/en/formats.py etc. use)
FORMAT_MODULE_PATH = None
# Default formatting for date objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'N j, Y'
# Default formatting for datetime objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATETIME_FORMAT = 'N j, Y, P'
# Default formatting for time objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
TIME_FORMAT = 'P'
# Default formatting for date objects when only the year and month are relevant.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
YEAR_MONTH_FORMAT = 'F Y'
# Default formatting for date objects when only the month and day are relevant.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
MONTH_DAY_FORMAT = 'F j'
# Default short formatting for date objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATE_FORMAT = 'm/d/Y'
# Default short formatting for datetime objects.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATETIME_FORMAT = 'm/d/Y P'
# Default formats to be used when parsing dates from input boxes, in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATE_INPUT_FORMATS = [
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
'%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
'%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
'%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
'%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
]
# Default formats to be used when parsing times from input boxes, in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
TIME_INPUT_FORMATS = [
'%H:%M:%S', # '14:30:59'
'%H:%M:%S.%f', # '14:30:59.000200'
'%H:%M', # '14:30'
]
# Default formats to be used when parsing dates and times from input boxes,
# in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.000200'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M:%S.%f', # '10/25/06 14:30:59.000200'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
]
# First day of week, to be used on calendars
# 0 means Sunday, 1 means Monday...
FIRST_DAY_OF_WEEK = 0
# Decimal separator symbol
DECIMAL_SEPARATOR = '.'
# Boolean that sets whether to add thousand separator when formatting numbers
USE_THOUSAND_SEPARATOR = False
# Number of digits that will be together, when splitting them by
# THOUSAND_SEPARATOR. 0 means no grouping, 3 means splitting by thousands...
NUMBER_GROUPING = 0
# Thousand separator symbol
THOUSAND_SEPARATOR = ','
# The tablespaces to use for each model when not specified otherwise.
DEFAULT_TABLESPACE = ''
DEFAULT_INDEX_TABLESPACE = ''
# Default X-Frame-Options header value
X_FRAME_OPTIONS = 'SAMEORIGIN'
USE_X_FORWARDED_HOST = False
USE_X_FORWARDED_PORT = False
# The Python dotted path to the WSGI application that Django's internal server
# (runserver) will use. If `None`, the return value of
# 'django.core.wsgi.get_wsgi_application' is used, thus preserving the same
# behavior as previous versions of Django. Otherwise this should point to an
# actual WSGI application object.
WSGI_APPLICATION = None
# If your Django app is behind a proxy that sets a header to specify secure
# connections, AND that proxy ensures that user-submitted headers with the
# same name are ignored (so that people can't spoof it), set this value to
# a tuple of (header_name, header_value). For any requests that come in with
# that header/value, request.is_secure() will return True.
# WARNING! Only set this if you fully understand what you're doing. Otherwise,
# you may be opening yourself up to a security risk.
SECURE_PROXY_SSL_HEADER = None
##############
# MIDDLEWARE #
##############
# List of middleware classes to use. Order is important; in the request phase,
# this middleware classes will be applied in the order given, and in the
# response phase the middleware will be applied in reverse order.
MIDDLEWARE_CLASSES = [
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
]
############
# SESSIONS #
############
# Cache to store session data if using the cache session backend.
SESSION_CACHE_ALIAS = 'default'
# Cookie name. This can be whatever you want.
SESSION_COOKIE_NAME = 'sessionid'
# Age of cookie, in seconds (default: 2 weeks).
SESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 2
# A string like ".example.com", or None for standard domain cookie.
SESSION_COOKIE_DOMAIN = None
# Whether the session cookie should be secure (https:// only).
SESSION_COOKIE_SECURE = False
# The path of the session cookie.
SESSION_COOKIE_PATH = '/'
# Whether to use the non-RFC standard httpOnly flag (IE, FF3+, others)
SESSION_COOKIE_HTTPONLY = True
# Whether to save the session data on every request.
SESSION_SAVE_EVERY_REQUEST = False
# Whether a user's session cookie expires when the Web browser is closed.
SESSION_EXPIRE_AT_BROWSER_CLOSE = False
# The module to store session data
SESSION_ENGINE = 'django.contrib.sessions.backends.db'
# Directory to store session files if using the file session module. If None,
# the backend will use a sensible default.
SESSION_FILE_PATH = None
# class to serialize session data
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
#########
# CACHE #
#########
# The cache backends to use.
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
CACHE_MIDDLEWARE_KEY_PREFIX = ''
CACHE_MIDDLEWARE_SECONDS = 600
CACHE_MIDDLEWARE_ALIAS = 'default'
##################
# AUTHENTICATION #
##################
AUTH_USER_MODEL = 'auth.User'
AUTHENTICATION_BACKENDS = ['django.contrib.auth.backends.ModelBackend']
LOGIN_URL = '/accounts/login/'
LOGOUT_URL = '/accounts/logout/'
LOGIN_REDIRECT_URL = '/accounts/profile/'
# The number of days a password reset link is valid for
PASSWORD_RESET_TIMEOUT_DAYS = 3
# the first hasher in this list is the preferred algorithm. any
# password using different algorithms will be converted automatically
# upon login
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
'django.contrib.auth.hashers.SHA1PasswordHasher',
'django.contrib.auth.hashers.MD5PasswordHasher',
'django.contrib.auth.hashers.UnsaltedSHA1PasswordHasher',
'django.contrib.auth.hashers.UnsaltedMD5PasswordHasher',
'django.contrib.auth.hashers.CryptPasswordHasher',
]
AUTH_PASSWORD_VALIDATORS = []
###########
# SIGNING #
###########
SIGNING_BACKEND = 'django.core.signing.TimestampSigner'
########
# CSRF #
########
# Dotted path to callable to be used as view when a request is
# rejected by the CSRF middleware.
CSRF_FAILURE_VIEW = 'django.views.csrf.csrf_failure'
# Settings for CSRF cookie.
CSRF_COOKIE_NAME = 'csrftoken'
CSRF_COOKIE_AGE = 60 * 60 * 24 * 7 * 52
CSRF_COOKIE_DOMAIN = None
CSRF_COOKIE_PATH = '/'
CSRF_COOKIE_SECURE = False
CSRF_COOKIE_HTTPONLY = False
CSRF_HEADER_NAME = 'HTTP_X_CSRFTOKEN'
CSRF_TRUSTED_ORIGINS = []
############
# MESSAGES #
############
# Class to use as messages backend
MESSAGE_STORAGE = 'django.contrib.messages.storage.fallback.FallbackStorage'
# Default values of MESSAGE_LEVEL and MESSAGE_TAGS are defined within
# django.contrib.messages to avoid imports in this secure file.
###########
# LOGGING #
###########
# The callable to use to configure logging
LOGGING_CONFIG = 'logging.config.dictConfig'
# Custom logging configuration.
LOGGING = {}
# Default exception reporter filter class used in case none has been
# specifically assigned to the HttpRequest instance.
DEFAULT_EXCEPTION_REPORTER_FILTER = 'django.views.debug.SafeExceptionReporterFilter'
###########
# TESTING #
###########
# The name of the class to use to run the test suite
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Apps that don't need to be serialized at test database creation time
# (only apps with migrations are to start with)
TEST_NON_SERIALIZED_APPS = []
############
# FIXTURES #
############
# The list of directories to search for fixtures
FIXTURE_DIRS = []
###############
# STATICFILES #
###############
# A list of locations of additional static files
STATICFILES_DIRS = []
# The default file storage backend used during the build process
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
]
##############
# MIGRATIONS #
##############
# Migration module overrides for apps, by app label.
MIGRATION_MODULES = {}
#################
# SYSTEM CHECKS #
#################
# List of all issues generated by system checks that should be silenced. Light
# issues like warnings, infos or debugs will not generate a message. Silencing
# serious issues like errors and criticals does not result in hiding the
# message, but Django will not stop you from e.g. running server.
SILENCED_SYSTEM_CHECKS = []
#######################
# SECURITY MIDDLEWARE #
#######################
SECURE_BROWSER_XSS_FILTER = False
SECURE_CONTENT_TYPE_NOSNIFF = False
SECURE_HSTS_INCLUDE_SUBDOMAINS = False
SECURE_HSTS_SECONDS = 0
SECURE_REDIRECT_EXEMPT = []
SECURE_SSL_HOST = None
SECURE_SSL_REDIRECT = False
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return,invalid-name,consider-using-enumerate,abstract-method
"""Base class for model-based tuner
This type of tuner will fit a cost model and use some optimization methods to
find optimums points of cost model in space.
"""
import gc
import numpy as np
from .tuner import Tuner
from ..env import GLOBAL_SCOPE
class FeatureCache(object):
"""Feature cache manager for cache sharing between different cost models"""
def __init__(self):
self.feature_cache = {}
def get(self, key):
"""Get feature cache dictionary for a key
Parameters
----------
key: str
The key of a feature type
Returns
-------
fea_cache: dict
cache dictionary
"""
if key not in self.feature_cache:
self.feature_cache[key] = {}
return self.feature_cache[key]
def size(self, key):
""" " Get the size of a feature cache dictionary
Parameters
----------
key: str
The key of a feature type
Returns
-------
n: int
"""
return len(self.feature_cache.get(key, tuple()))
def clear(self, key):
"""Clear feature cache for a key
Parameters
----------
key: str
The key of a feature type
"""
del self.feature_cache[key]
self.feature_cache[key] = {}
gc.collect()
class CostModel(object):
"""Cost model to predict the speed of a config"""
def __init__(self):
pass
def fit(self, xs, ys, plan_size):
"""Fit to training data
Parameters
----------
xs: Array of int
indexes of configs in the config space
ys: Array of float
The speed (flop, float number operations per second)
plan_size: int
The plan size of tuner
"""
raise NotImplementedError()
def fit_log(self, records, plan_size):
"""Fit training data from log.
Parameters
----------
records: Array of Tuple(MeasureInput, MeasureResult)
The tuning records
plan_size: int
The plan size of tuner
"""
raise NotImplementedError()
def predict(self, xs, output_margin=False):
"""Predict the speed of configs
Parameters
----------
xs: Array of int
The indexes of configs to predict
output_margin: bool, optional
Whether output the untransformed margin.
When a model is used as base model, it should output untransformed margin
Returns
-------
preds: Array of float
The prediction
"""
raise NotImplementedError()
def load_basemodel(self, base_model):
"""Load base model for transfer learning
Parameters
----------
base_model: CostModel
base model
"""
raise NotImplementedError()
def spawn_base_model(self):
"""Clone a base model with the same parameters.
The base model is used to fit history data in transfer learning.
Returns
-------
model: CostModel
A model with the same hyperparameter (argument)
"""
raise NotImplementedError()
class ModelOptimizer(object):
"""Optimizer used to find optimal points of cost model"""
def __init__(self):
pass
def find_maximums(self, model, num, exclusive):
"""Find maximum of a cost model
Note we use cost model to predict GFLOPS, so we should find the maximum
Parameters
----------
model: CostModel
Cost model
num: int
The number of returned maximum points
exclusive: set, optional
The excluded set of this optimizer. Return results won't include any
elements in this set.
"""
raise NotImplementedError()
class ModelBasedTuner(Tuner):
"""Base class for model based tuner
This type of tuner will fit a cost model and use an optimizer to
find the maximums of the cost model as next trials
Parameters
----------
task: autotvm.task.Task
The tuning task
cost_model: CostModel
The cost model that predicts the speed of a config (IR)
model_optimizer:
The optimizer to find local optimum points of cost model in tuning search space
plan_size: int
Tuner will re-fit model per `plan_size` new measure samples
diversity_filter_ratio: int or float, optional
If is not None, the tuner will first select
top-(plan_size * diversity_filter_ratio) candidates according to the cost model
and then pick plan_size of them according to the diversity metric.
"""
def __init__(self, task, cost_model, model_optimizer, plan_size, diversity_filter_ratio=None):
super(ModelBasedTuner, self).__init__(task)
# space
self.task = task
self.target = task.target
self.plan_size = plan_size
self.space = task.config_space
self.space_len = len(task.config_space)
self.dims = [len(x) for x in self.space.space_map.values()]
self.cost_model = cost_model
self.model_optimizer = model_optimizer
self.diversity_filter_ratio = diversity_filter_ratio
if self.diversity_filter_ratio:
assert self.diversity_filter_ratio >= 1, (
"Diversity filter ratio " "must be larger than one"
)
# trial plan
self.trials = []
self.trial_pt = 0
self.visited = set()
# observed samples
self.xs = []
self.ys = []
self.flops_max = 0.0
self.train_ct = 0
def next_batch(self, batch_size):
ret = []
counter = 0
while counter < batch_size:
if len(self.visited) >= len(self.space):
break
while self.trial_pt < len(self.trials):
index = self.trials[self.trial_pt]
if index not in self.visited:
break
self.trial_pt += 1
if self.trial_pt >= len(self.trials) - int(0.05 * self.plan_size):
# if the trial list is empty or
# the tuner is doing the last 5% trials (e-greedy), choose randomly
index = np.random.randint(len(self.space))
while index in self.visited:
index = np.random.randint(len(self.space))
ret.append(self.space.get(index))
self.visited.add(index)
counter += 1
return ret
def update(self, inputs, results):
for inp, res in zip(inputs, results):
index = inp.config.index
if res.error_no == 0:
self.xs.append(index)
flops = inp.task.flop / np.mean(res.costs)
self.flops_max = max(self.flops_max, flops)
self.ys.append(flops)
else:
self.xs.append(index)
self.ys.append(0.0)
# if we have enough new training samples
if len(self.xs) >= self.plan_size * (self.train_ct + 1) and self.flops_max > 1e-6:
self.cost_model.fit(self.xs, self.ys, self.plan_size)
if self.diversity_filter_ratio:
candidate = self.model_optimizer.find_maximums(
self.cost_model, self.plan_size * self.diversity_filter_ratio, self.visited
)
scores = self.cost_model.predict(candidate)
knobs = [point2knob(x, self.dims) for x in candidate]
pick_index = submodular_pick(0 * scores, knobs, self.plan_size, knob_weight=1)
maximums = np.array(candidate)[pick_index]
else:
maximums = self.model_optimizer.find_maximums(
self.cost_model, self.plan_size, self.visited
)
self.trials = maximums
self.trial_pt = 0
self.train_ct += 1
def load_history(self, data_set):
# set in_tuning as True to make the feature extraction consistent
GLOBAL_SCOPE.in_tuning = True
# fit base model
base_model = self.cost_model.spawn_base_model()
success = base_model.fit_log(data_set, self.plan_size)
if not success:
GLOBAL_SCOPE.in_tuning = False
return
# use base model to select initial points
if not self.trials:
# no plan yet, use base model to select initial trials
maximums = self.model_optimizer.find_maximums(base_model, self.plan_size, self.visited)
self.trials = maximums
self.trial_pt = 0
self.cost_model.load_basemodel(base_model)
GLOBAL_SCOPE.in_tuning = False
def has_next(self):
return len(self.visited) < len(self.space)
def point2knob(p, dims):
"""convert point form (single integer) to knob form (vector)"""
knob = []
for dim in dims:
knob.append(p % dim)
p //= dim
return knob
def knob2point(knob, dims):
"""convert knob form (vector) to point form (single integer)"""
p = 0
for j, k in enumerate(knob):
p += int(np.prod(dims[:j])) * k
return p
def submodular_pick(scores, knobs, n_pick, knob_weight=1.0):
"""Run greedy optimization to pick points with regard to both score and diversity.
DiversityScore = knob_weight * number of unique knobs in the selected set
Obj = sum(scores[i] for i in pick) + DiversityScore
Note that this objective function is a monotone submodular function.
Parameters
----------
scores: Array of float
score of every points
knobs: Array of Array of int
feature vector (tunable knobs) of every points
n_pick: int
number of points to pick
knob_weight: float
weight of an unique knob feature
"""
n = len(scores)
assert n == len(knobs)
n_knobs = len(knobs[0])
knobs_set = [set() for _ in range(n_knobs)]
ret = []
remain = list(range(len(scores)))
for _ in range(n_pick):
max_x = -1
max_delta = -1e9
for x in remain:
tmp_delta = scores[x]
for i in range(n_knobs):
if knobs[x][i] not in knobs_set[i]:
tmp_delta += knob_weight
if tmp_delta > max_delta:
max_delta, max_x = tmp_delta, x
ret.append(max_x)
remain.remove(max_x)
for i in range(n_knobs):
knobs_set[i].add(knobs[max_x][i])
return ret
| |
from __future__ import unicode_literals
from .abc import ABCIE
from .academicearth import AcademicEarthCourseIE
from .addanime import AddAnimeIE
from .adultswim import AdultSwimIE
from .aftonbladet import AftonbladetIE
from .anitube import AnitubeIE
from .anysex import AnySexIE
from .aol import AolIE
from .allocine import AllocineIE
from .aparat import AparatIE
from .appletrailers import AppleTrailersIE
from .archiveorg import ArchiveOrgIE
from .ard import ARDIE, ARDMediathekIE
from .arte import (
ArteTvIE,
ArteTVPlus7IE,
ArteTVCreativeIE,
ArteTVConcertIE,
ArteTVFutureIE,
ArteTVDDCIE,
ArteTVEmbedIE,
)
from .audiomack import AudiomackIE
from .auengine import AUEngineIE
from .bambuser import BambuserIE, BambuserChannelIE
from .bandcamp import BandcampIE, BandcampAlbumIE
from .bbccouk import BBCCoUkIE
from .beeg import BeegIE
from .behindkink import BehindKinkIE
from .bild import BildIE
from .bilibili import BiliBiliIE
from .blinkx import BlinkxIE
from .bliptv import BlipTVIE, BlipTVUserIE
from .bloomberg import BloombergIE
from .bpb import BpbIE
from .br import BRIE
from .breakcom import BreakIE
from .brightcove import BrightcoveIE
from .buzzfeed import BuzzFeedIE
from .byutv import BYUtvIE
from .c56 import C56IE
from .canal13cl import Canal13clIE
from .canalplus import CanalplusIE
from .canalc2 import Canalc2IE
from .cbs import CBSIE
from .cbsnews import CBSNewsIE
from .ceskatelevize import CeskaTelevizeIE
from .channel9 import Channel9IE
from .chilloutzone import ChilloutzoneIE
from .cinemassacre import CinemassacreIE
from .clipfish import ClipfishIE
from .cliphunter import CliphunterIE
from .clipsyndicate import ClipsyndicateIE
from .cloudy import CloudyIE
from .clubic import ClubicIE
from .cmt import CMTIE
from .cnet import CNETIE
from .cnn import (
CNNIE,
CNNBlogsIE,
)
from .collegehumor import CollegeHumorIE
from .comedycentral import ComedyCentralIE, ComedyCentralShowsIE
from .condenast import CondeNastIE
from .cracked import CrackedIE
from .criterion import CriterionIE
from .crunchyroll import (
CrunchyrollIE,
CrunchyrollShowPlaylistIE
)
from .cspan import CSpanIE
from .dailymotion import (
DailymotionIE,
DailymotionPlaylistIE,
DailymotionUserIE,
)
from .daum import DaumIE
from .dbtv import DBTVIE
from .deezer import DeezerPlaylistIE
from .dfb import DFBIE
from .dotsub import DotsubIE
from .dreisat import DreiSatIE
from .drtuber import DrTuberIE
from .drtv import DRTVIE
from .dump import DumpIE
from .defense import DefenseGouvFrIE
from .discovery import DiscoveryIE
from .divxstage import DivxStageIE
from .dropbox import DropboxIE
from .ebaumsworld import EbaumsWorldIE
from .ehow import EHowIE
from .eighttracks import EightTracksIE
from .einthusan import EinthusanIE
from .eitb import EitbIE
from .ellentv import (
EllenTVIE,
EllenTVClipsIE,
)
from .elpais import ElPaisIE
from .empflix import EMPFlixIE
from .engadget import EngadgetIE
from .eporner import EpornerIE
from .escapist import EscapistIE
from .everyonesmixtape import EveryonesMixtapeIE
from .exfm import ExfmIE
from .expotv import ExpoTVIE
from .extremetube import ExtremeTubeIE
from .facebook import FacebookIE
from .faz import FazIE
from .fc2 import FC2IE
from .firedrive import FiredriveIE
from .firstpost import FirstpostIE
from .firsttv import FirstTVIE
from .fivemin import FiveMinIE
from .fktv import (
FKTVIE,
FKTVPosteckeIE,
)
from .flickr import FlickrIE
from .folketinget import FolketingetIE
from .fourtube import FourTubeIE
from .franceculture import FranceCultureIE
from .franceinter import FranceInterIE
from .francetv import (
PluzzIE,
FranceTvInfoIE,
FranceTVIE,
GenerationQuoiIE,
CultureboxIE,
)
from .freesound import FreesoundIE
from .freespeech import FreespeechIE
from .freevideo import FreeVideoIE
from .funnyordie import FunnyOrDieIE
from .gamekings import GamekingsIE
from .gameone import (
GameOneIE,
GameOnePlaylistIE,
)
from .gamespot import GameSpotIE
from .gamestar import GameStarIE
from .gametrailers import GametrailersIE
from .gdcvault import GDCVaultIE
from .generic import GenericIE
from .glide import GlideIE
from .globo import GloboIE
from .godtube import GodTubeIE
from .goldenmoustache import GoldenMoustacheIE
from .golem import GolemIE
from .googleplus import GooglePlusIE
from .googlesearch import GoogleSearchIE
from .gorillavid import GorillaVidIE
from .goshgay import GoshgayIE
from .grooveshark import GroovesharkIE
from .hark import HarkIE
from .heise import HeiseIE
from .helsinki import HelsinkiIE
from .hentaistigma import HentaiStigmaIE
from .hornbunny import HornBunnyIE
from .hostingbulk import HostingBulkIE
from .hotnewhiphop import HotNewHipHopIE
from .howcast import HowcastIE
from .howstuffworks import HowStuffWorksIE
from .huffpost import HuffPostIE
from .hypem import HypemIE
from .iconosquare import IconosquareIE
from .ign import IGNIE, OneUPIE
from .imdb import (
ImdbIE,
ImdbListIE
)
from .ina import InaIE
from .infoq import InfoQIE
from .instagram import InstagramIE, InstagramUserIE
from .internetvideoarchive import InternetVideoArchiveIE
from .iprima import IPrimaIE
from .ivi import (
IviIE,
IviCompilationIE
)
from .izlesene import IzleseneIE
from .jadorecettepub import JadoreCettePubIE
from .jeuxvideo import JeuxVideoIE
from .jove import JoveIE
from .jukebox import JukeboxIE
from .jpopsukitv import JpopsukiIE
from .kankan import KankanIE
from .keezmovies import KeezMoviesIE
from .khanacademy import KhanAcademyIE
from .kickstarter import KickStarterIE
from .keek import KeekIE
from .kontrtube import KontrTubeIE
from .krasview import KrasViewIE
from .ku6 import Ku6IE
from .la7 import LA7IE
from .laola1tv import Laola1TvIE
from .lifenews import LifeNewsIE
from .liveleak import LiveLeakIE
from .livestream import (
LivestreamIE,
LivestreamOriginalIE,
LivestreamShortenerIE,
)
from .lrt import LRTIE
from .lynda import (
LyndaIE,
LyndaCourseIE
)
from .m6 import M6IE
from .macgamestore import MacGameStoreIE
from .mailru import MailRuIE
from .malemotion import MalemotionIE
from .mdr import MDRIE
from .metacafe import MetacafeIE
from .metacritic import MetacriticIE
from .mgoon import MgoonIE
from .ministrygrid import MinistryGridIE
from .mit import TechTVMITIE, MITIE, OCWMITIE
from .mitele import MiTeleIE
from .mixcloud import MixcloudIE
from .mlb import MLBIE
from .mpora import MporaIE
from .moevideo import MoeVideoIE
from .mofosex import MofosexIE
from .mojvideo import MojvideoIE
from .moniker import MonikerIE
from .mooshare import MooshareIE
from .morningstar import MorningstarIE
from .motherless import MotherlessIE
from .motorsport import MotorsportIE
from .movieclips import MovieClipsIE
from .moviezine import MoviezineIE
from .movshare import MovShareIE
from .mtv import (
MTVIE,
MTVServicesEmbeddedIE,
MTVIggyIE,
)
from .muenchentv import MuenchenTVIE
from .musicplayon import MusicPlayOnIE
from .musicvault import MusicVaultIE
from .muzu import MuzuTVIE
from .myspace import MySpaceIE
from .myspass import MySpassIE
from .myvideo import MyVideoIE
from .naver import NaverIE
from .nba import NBAIE
from .nbc import (
NBCIE,
NBCNewsIE,
)
from .ndr import NDRIE
from .ndtv import NDTVIE
from .newgrounds import NewgroundsIE
from .newstube import NewstubeIE
from .nfb import NFBIE
from .nfl import NFLIE
from .nhl import NHLIE, NHLVideocenterIE
from .niconico import NiconicoIE, NiconicoPlaylistIE
from .ninegag import NineGagIE
from .noco import NocoIE
from .normalboots import NormalbootsIE
from .nosvideo import NosVideoIE
from .novamov import NovaMovIE
from .nowness import NownessIE
from .nowvideo import NowVideoIE
from .npo import (
NPOIE,
TegenlichtVproIE,
)
from .nrk import (
NRKIE,
NRKTVIE,
)
from .ntv import NTVIE
from .nytimes import NYTimesIE
from .nuvid import NuvidIE
from .oktoberfesttv import OktoberfestTVIE
from .ooyala import OoyalaIE
from .orf import (
ORFTVthekIE,
ORFOE1IE,
ORFFM4IE,
)
from .parliamentliveuk import ParliamentLiveUKIE
from .patreon import PatreonIE
from .pbs import PBSIE
from .phoenix import PhoenixIE
from .photobucket import PhotobucketIE
from .planetaplay import PlanetaPlayIE
from .played import PlayedIE
from .playfm import PlayFMIE
from .playvid import PlayvidIE
from .podomatic import PodomaticIE
from .pornhd import PornHdIE
from .pornhub import PornHubIE
from .pornotube import PornotubeIE
from .pornoxo import PornoXOIE
from .promptfile import PromptFileIE
from .prosiebensat1 import ProSiebenSat1IE
from .pyvideo import PyvideoIE
from .quickvid import QuickVidIE
from .radiofrance import RadioFranceIE
from .rai import RaiIE
from .rbmaradio import RBMARadioIE
from .redtube import RedTubeIE
from .reverbnation import ReverbNationIE
from .ringtv import RingTVIE
from .ro220 import Ro220IE
from .rottentomatoes import RottenTomatoesIE
from .roxwel import RoxwelIE
from .rtbf import RTBFIE
from .rtlnl import RtlXlIE
from .rtlnow import RTLnowIE
from .rts import RTSIE
from .rtve import RTVEALaCartaIE, RTVELiveIE
from .ruhd import RUHDIE
from .rutube import (
RutubeIE,
RutubeChannelIE,
RutubeMovieIE,
RutubePersonIE,
)
from .rutv import RUTVIE
from .sapo import SapoIE
from .savefrom import SaveFromIE
from .sbs import SBSIE
from .scivee import SciVeeIE
from .screencast import ScreencastIE
from .servingsys import ServingSysIE
from .sexu import SexuIE
from .sexykarma import SexyKarmaIE
from .shared import SharedIE
from .sharesix import ShareSixIE
from .sina import SinaIE
from .slideshare import SlideshareIE
from .slutload import SlutloadIE
from .smotri import (
SmotriIE,
SmotriCommunityIE,
SmotriUserIE,
SmotriBroadcastIE,
)
from .snotr import SnotrIE
from .sockshare import SockshareIE
from .sohu import SohuIE
from .soundcloud import (
SoundcloudIE,
SoundcloudSetIE,
SoundcloudUserIE,
SoundcloudPlaylistIE
)
from .soundgasm import SoundgasmIE
from .southpark import (
SouthParkIE,
SouthparkDeIE,
)
from .space import SpaceIE
from .spankwire import SpankwireIE
from .spiegel import SpiegelIE, SpiegelArticleIE
from .spiegeltv import SpiegeltvIE
from .spike import SpikeIE
from .sport5 import Sport5IE
from .sportbox import SportBoxIE
from .sportdeutschland import SportDeutschlandIE
from .srmediathek import SRMediathekIE
from .stanfordoc import StanfordOpenClassroomIE
from .steam import SteamIE
from .streamcloud import StreamcloudIE
from .streamcz import StreamCZIE
from .sunporno import SunPornoIE
from .swrmediathek import SWRMediathekIE
from .syfy import SyfyIE
from .sztvhu import SztvHuIE
from .tagesschau import TagesschauIE
from .tapely import TapelyIE
from .tass import TassIE
from .teachertube import (
TeacherTubeIE,
TeacherTubeUserIE,
)
from .teachingchannel import TeachingChannelIE
from .teamcoco import TeamcocoIE
from .techtalks import TechTalksIE
from .ted import TEDIE
from .telebruxelles import TeleBruxellesIE
from .telecinco import TelecincoIE
from .telemb import TeleMBIE
from .tenplay import TenPlayIE
from .testurl import TestURLIE
from .tf1 import TF1IE
from .theonion import TheOnionIE
from .theplatform import ThePlatformIE
from .thesixtyone import TheSixtyOneIE
from .thisav import ThisAVIE
from .tinypic import TinyPicIE
from .tlc import TlcIE, TlcDeIE
from .tmz import TMZIE
from .tnaflix import TNAFlixIE
from .thvideo import (
THVideoIE,
THVideoPlaylistIE
)
from .toutv import TouTvIE
from .toypics import ToypicsUserIE, ToypicsIE
from .traileraddict import TrailerAddictIE
from .trilulilu import TriluliluIE
from .trutube import TruTubeIE
from .tube8 import Tube8IE
from .tudou import TudouIE
from .tumblr import TumblrIE
from .tunein import TuneInIE
from .turbo import TurboIE
from .tutv import TutvIE
from .tvigle import TvigleIE
from .tvp import TvpIE
from .tvplay import TVPlayIE
from .twitch import TwitchIE
from .ubu import UbuIE
from .udemy import (
UdemyIE,
UdemyCourseIE
)
from .unistra import UnistraIE
from .urort import UrortIE
from .ustream import UstreamIE, UstreamChannelIE
from .vbox7 import Vbox7IE
from .veehd import VeeHDIE
from .veoh import VeohIE
from .vesti import VestiIE
from .vevo import VevoIE
from .vgtv import VGTVIE
from .vh1 import VH1IE
from .vice import ViceIE
from .viddler import ViddlerIE
from .videobam import VideoBamIE
from .videodetective import VideoDetectiveIE
from .videolecturesnet import VideoLecturesNetIE
from .videofyme import VideofyMeIE
from .videomega import VideoMegaIE
from .videopremium import VideoPremiumIE
from .videott import VideoTtIE
from .videoweed import VideoWeedIE
from .vidme import VidmeIE
from .vidzi import VidziIE
from .vimeo import (
VimeoIE,
VimeoAlbumIE,
VimeoChannelIE,
VimeoGroupsIE,
VimeoLikesIE,
VimeoReviewIE,
VimeoUserIE,
VimeoWatchLaterIE,
)
from .vimple import VimpleIE
from .vine import (
VineIE,
VineUserIE,
)
from .viki import VikiIE
from .vk import (
VKIE,
VKUserVideosIE,
)
from .vodlocker import VodlockerIE
from .vporn import VpornIE
from .vrt import VRTIE
from .vube import VubeIE
from .vuclip import VuClipIE
from .vulture import VultureIE
from .walla import WallaIE
from .washingtonpost import WashingtonPostIE
from .wat import WatIE
from .wayofthemaster import WayOfTheMasterIE
from .wdr import (
WDRIE,
WDRMobileIE,
WDRMausIE,
)
from .weibo import WeiboIE
from .wimp import WimpIE
from .wistia import WistiaIE
from .worldstarhiphop import WorldStarHipHopIE
from .wrzuta import WrzutaIE
from .xbef import XBefIE
from .xboxclips import XboxClipsIE
from .xhamster import XHamsterIE
from .xminus import XMinusIE
from .xnxx import XNXXIE
from .xvideos import XVideosIE
from .xtube import XTubeUserIE, XTubeIE
from .yahoo import (
YahooIE,
YahooSearchIE,
)
from .ynet import YnetIE
from .youjizz import YouJizzIE
from .youku import YoukuIE
from .youporn import YouPornIE
from .yourupload import YourUploadIE
from .youtube import (
YoutubeIE,
YoutubeChannelIE,
YoutubeFavouritesIE,
YoutubeHistoryIE,
YoutubePlaylistIE,
YoutubeRecommendedIE,
YoutubeSearchDateIE,
YoutubeSearchIE,
YoutubeSearchURLIE,
YoutubeShowIE,
YoutubeSubscriptionsIE,
YoutubeTopListIE,
YoutubeTruncatedURLIE,
YoutubeUserIE,
YoutubeWatchLaterIE,
)
from .zdf import ZDFIE
from .zingmp3 import (
ZingMp3SongIE,
ZingMp3AlbumIE,
)
_ALL_CLASSES = [
klass
for name, klass in globals().items()
if name.endswith('IE') and name != 'GenericIE'
]
_ALL_CLASSES.append(GenericIE)
def gen_extractors():
""" Return a list of an instance of every supported extractor.
The order does matter; the first extractor matched is the one handling the URL.
"""
return [klass() for klass in _ALL_CLASSES]
def get_info_extractor(ie_name):
"""Returns the info extractor class with the given ie_name"""
return globals()[ie_name + 'IE']
| |
# -*- coding: utf-8 -*-
"""
eve.methods.delete
~~~~~~~~~~~~~~~~~~
This module imlements the DELETE method.
:copyright: (c) 2016 by Nicola Iarocci.
:license: BSD, see LICENSE for more details.
"""
from flask import current_app as app, abort
from eve.utils import config, ParsedRequest
from eve.auth import requires_auth
from eve.methods.common import get_document, ratelimit, pre_event, \
oplog_push, resolve_document_etag
from eve.versioning import versioned_id_field, resolve_document_version, \
insert_versioning_documents, late_versioning_catch
from datetime import datetime
import copy
@ratelimit()
@requires_auth('item')
@pre_event
def deleteitem(resource, **lookup):
"""
Default function for handling DELETE requests, it has decorators for
rate limiting, authentication and for raising pre-request events.
After the decorators are applied forwards to call to
:func:`deleteitem_internal`
.. versionchanged:: 0.5
Split into deleteitem() and deleteitem_internal().
"""
return deleteitem_internal(resource, concurrency_check=True, **lookup)
def deleteitem_internal(
resource, concurrency_check=False, suppress_callbacks=False, **lookup):
""" Intended for internal delete calls, this method is not rate limited,
authentication is not checked, pre-request events are not raised, and
concurrency checking is optional. Deletes a resource item.
:param resource: name of the resource to which the item(s) belong.
:param concurrency_check: concurrency check switch (bool)
:param **lookup: item lookup query.
.. versionchanged:: 0.6
Support for soft delete.
.. versionchanged:: 0.5
Return 204 NoContent instead of 200.
Push updates to OpLog.
Original deleteitem() has been split into deleteitem() and
deleteitem_internal().
.. versionchanged:: 0.4
Fix #284: If you have a media field, and set datasource projection to
0 for that field, the media will not be deleted.
Support for document versioning.
'on_delete_item' events raised before performing the delete.
'on_deleted_item' events raised after performing the delete.
.. versionchanged:: 0.3
Delete media files as needed.
Pass the explicit query filter to the data driver, as it does not
support the id argument anymore.
.. versionchanged:: 0.2
Raise pre_<method> event.
.. versionchanged:: 0.0.7
Support for Rate-Limiting.
.. versionchanged:: 0.0.5
Pass current resource to ``parse_request``, allowing for proper
processing of new configuration settings: `filters`, `sorting`, `paging`.
.. versionchanged:: 0.0.4
Added the ``requires_auth`` decorator.
"""
resource_def = config.DOMAIN[resource]
soft_delete_enabled = resource_def['soft_delete']
original = get_document(resource, concurrency_check, **lookup)
if not original or (soft_delete_enabled and
original.get(config.DELETED) is True):
abort(404)
# notify callbacks
if suppress_callbacks is not True:
getattr(app, "on_delete_item")(resource, original)
getattr(app, "on_delete_item_%s" % resource)(original)
if soft_delete_enabled:
# Instead of removing the document from the db, just mark it as deleted
marked_document = copy.deepcopy(original)
# Set DELETED flag and update metadata
last_modified = datetime.utcnow().replace(microsecond=0)
marked_document[config.DELETED] = True
marked_document[config.LAST_UPDATED] = last_modified
if config.IF_MATCH:
resolve_document_etag(marked_document, resource)
resolve_document_version(marked_document, resource, 'DELETE', original)
# Update document in database (including version collection if needed)
id = original[resource_def['id_field']]
try:
app.data.replace(resource, id, marked_document, original)
except app.data.OriginalChangedError:
if concurrency_check:
abort(412, description='Client and server etags don\'t match')
# create previous version if it wasn't already there
late_versioning_catch(original, resource)
# and add deleted version
insert_versioning_documents(resource, marked_document)
# update oplog if needed
oplog_push(resource, marked_document, 'DELETE', id)
else:
# Delete the document for real
# media cleanup
media_fields = app.config['DOMAIN'][resource]['_media']
# document might miss one or more media fields because of datasource
# and/or client projection.
missing_media_fields = [f for f in media_fields if f not in original]
if len(missing_media_fields):
# retrieve the whole document so we have all media fields available
# Should be very a rare occurence. We can't get rid of the
# get_document() call since it also deals with etag matching, which
# is still needed. Also, this lookup should never fail.
# TODO not happy with this hack. Not at all. Is there a better way?
original = app.data.find_one_raw(
resource, original[resource_def['id_field']])
for field in media_fields:
if field in original:
app.media.delete(original[field], resource)
id = original[resource_def['id_field']]
app.data.remove(resource, {resource_def['id_field']: id})
# TODO: should attempt to delete version collection even if setting is
# off
if app.config['DOMAIN'][resource]['versioning'] is True:
app.data.remove(
resource + config.VERSIONS,
{versioned_id_field(resource_def):
original[resource_def['id_field']]})
# update oplog if needed
oplog_push(resource, original, 'DELETE', id)
if suppress_callbacks is not True:
getattr(app, "on_deleted_item")(resource, original)
getattr(app, "on_deleted_item_%s" % resource)(original)
return {}, None, None, 204
@requires_auth('resource')
@pre_event
def delete(resource, **lookup):
""" Deletes all item of a resource (collection in MongoDB terms). Won't
drop indexes. Use with caution!
.. versionchanged:: 0.5
Return 204 NoContent instead of 200.
.. versionchanged:: 0.4
Support for document versioning.
'on_delete_resource' raised before performing the actual delete.
'on_deleted_resource' raised after performing the delete
.. versionchanged:: 0.3
Support for the lookup filter, which allows for develtion of
sub-resources (only delete documents that match a given condition).
.. versionchanged:: 0.0.4
Added the ``requires_auth`` decorator.
.. versionadded:: 0.0.2
"""
getattr(app, "on_delete_resource")(resource)
getattr(app, "on_delete_resource_%s" % resource)()
resource_def = config.DOMAIN[resource]
if resource_def['soft_delete']:
# Soft delete all items not already marked deleted
# (by default, data.find doesn't return soft deleted items)
default_request = ParsedRequest()
cursor = app.data.find(resource, default_request, lookup)
for document in list(cursor):
document_id = document[resource_def['id_field']]
deleteitem_internal(resource, concurrency_check=False,
suppress_callbacks=True, _id=document_id)
else:
# TODO if the resource schema includes media files, these won't be
# deleted by use of this global method (it should be disabled). Media
# cleanup is handled at the item endpoint by the delete() method
# (see above).
app.data.remove(resource, lookup)
# TODO: should attempt to delete version collection even if setting is
# off
if resource_def['versioning'] is True:
app.data.remove(resource + config.VERSIONS, lookup)
getattr(app, "on_deleted_resource")(resource)
getattr(app, "on_deleted_resource_%s" % resource)()
return {}, None, None, 204
| |
import os
import logging
from django.conf import settings
from django.utils import translation
from django.utils.translation import gettext_lazy as _
from django.db import transaction
from django.core.files.base import ContentFile
from celery.exceptions import SoftTimeLimitExceeded
from filingcabinet.pdf_utils import convert_to_pdf, convert_images_to_ocred_pdf, run_ocr
from froide.celery import app as celery_app
from froide.publicbody.models import PublicBody
from froide.upload.models import Upload
from froide.helper.redaction import redact_file
from .models import FoiRequest, FoiMessage, FoiAttachment, FoiProject
from .foi_mail import _process_mail, _fetch_mail
from .notifications import send_classification_reminder
logger = logging.getLogger(__name__)
@celery_app.task(
name="froide.foirequest.tasks.process_mail", acks_late=True, time_limit=60
)
def process_mail(*args, **kwargs):
translation.activate(settings.LANGUAGE_CODE)
with transaction.atomic():
_process_mail(*args, **kwargs)
@celery_app.task(name="froide.foirequest.tasks.fetch_mail", expires=60)
def fetch_mail():
for mail_uid, rfc_data in _fetch_mail():
process_mail.delay(rfc_data, mail_uid=mail_uid)
@celery_app.task
def detect_overdue():
translation.activate(settings.LANGUAGE_CODE)
for foirequest in FoiRequest.objects.get_to_be_overdue():
foirequest.set_overdue()
@celery_app.task
def detect_asleep():
translation.activate(settings.LANGUAGE_CODE)
for foirequest in FoiRequest.objects.get_to_be_asleep():
foirequest.set_asleep()
@celery_app.task
def classification_reminder():
translation.activate(settings.LANGUAGE_CODE)
for foirequest in FoiRequest.objects.get_unclassified():
send_classification_reminder(foirequest)
@celery_app.task
def check_delivery_status(message_id, count=None, extended=False):
try:
message = FoiMessage.objects.get(id=message_id)
except FoiMessage.DoesNotExist:
return
message.check_delivery_status(count=count, extended=extended)
@celery_app.task
def create_project_requests(project_id, publicbody_ids, **kwargs):
for seq, pb_id in enumerate(publicbody_ids):
create_project_request.delay(project_id, pb_id, sequence=seq, **kwargs)
@celery_app.task
def create_project_request(project_id, publicbody_id, sequence=0, **kwargs):
from .services import CreateRequestFromProjectService
try:
project = FoiProject.objects.get(id=project_id)
except FoiProject.DoesNotExist:
# project does not exist anymore?
return
try:
pb = PublicBody.objects.get(id=publicbody_id)
except PublicBody.DoesNotExist:
# pb was deleted?
return
kwargs.update(
{
"project": project,
"publicbody": pb,
"subject": project.title,
"user": project.user,
"body": project.description,
"public": project.public,
"reference": project.reference,
"tags": [t.name for t in project.tags.all()],
"project_order": sequence,
}
)
service = CreateRequestFromProjectService(kwargs)
foirequest = service.execute()
if project.request_count == project.foirequest_set.all().count():
project.status = FoiProject.STATUS_READY
project.save()
return foirequest.pk
@celery_app.task(name="froide.foirequest.tasks.convert_attachment_task", time_limit=60)
def convert_attachment_task(instance_id):
try:
att = FoiAttachment.objects.get(pk=instance_id)
except FoiAttachment.DoesNotExist:
return
if att.can_convert_to_pdf():
return convert_attachment(att)
def ocr_pdf_attachment(att):
if att.converted:
ocred_att = att.converted
else:
name, ext = os.path.splitext(att.name)
name = _("{name}_ocr{ext}").format(name=name, ext=".pdf")
ocred_att = FoiAttachment.objects.create(
name=name,
belongs_to=att.belongs_to,
approved=False,
filetype="application/pdf",
is_converted=True,
can_approve=att.can_approve,
)
att.converted = ocred_att
att.can_approve = False
att.approved = False
att.save()
ocr_pdf_task.delay(
att.pk,
ocred_att.pk,
)
def convert_attachment(att):
output_bytes = convert_to_pdf(
att.file.path,
binary_name=settings.FROIDE_CONFIG.get("doc_conversion_binary"),
construct_call=settings.FROIDE_CONFIG.get("doc_conversion_call_func"),
)
if output_bytes is None:
return
if att.converted:
new_att = att.converted
else:
name, ext = os.path.splitext(att.name)
name = _("{name}_converted{ext}").format(name=name, ext=".pdf")
new_att = FoiAttachment(
name=name,
belongs_to=att.belongs_to,
approved=False,
filetype="application/pdf",
is_converted=True,
can_approve=att.can_approve,
)
new_file = ContentFile(output_bytes)
new_att.size = new_file.size
new_att.file.save(new_att.name, new_file)
new_att.save()
att.converted = new_att
att.can_approve = False
att.approved = False
att.save()
@celery_app.task(
name="froide.foirequest.tasks.convert_images_to_pdf_task",
time_limit=60 * 5,
soft_time_limit=60 * 4,
)
def convert_images_to_pdf_task(att_ids, target_id, instructions, can_approve=True):
att_qs = FoiAttachment.objects.filter(id__in=att_ids)
att_map = {a.id: a for a in att_qs}
atts = [att_map[a_id] for a_id in att_ids]
try:
target = FoiAttachment.objects.get(id=target_id)
except FoiAttachment.DoesNotExist:
return
paths = [a.file.path for a in atts]
try:
pdf_bytes = convert_images_to_ocred_pdf(paths, instructions=instructions)
except SoftTimeLimitExceeded:
pdf_bytes = None
if pdf_bytes is None:
att_qs.update(can_approve=can_approve)
target.delete()
return
new_file = ContentFile(pdf_bytes)
target.size = new_file.size
target.file.save(target.name, new_file)
target.save()
@celery_app.task(
name="froide.foirequest.tasks.ocr_pdf_task",
time_limit=60 * 5,
soft_time_limit=60 * 4,
)
def ocr_pdf_task(att_id, target_id, can_approve=True):
try:
attachment = FoiAttachment.objects.get(pk=att_id)
except FoiAttachment.DoesNotExist:
return
try:
target = FoiAttachment.objects.get(pk=target_id)
except FoiAttachment.DoesNotExist:
return
try:
pdf_bytes = run_ocr(
attachment.file.path, language=settings.TESSERACT_LANGUAGE if hasattr(
settings, 'TESSERACT_LANGUAGE') else settings.LANGUAGE_CODE, timeout=180
)
except SoftTimeLimitExceeded:
pdf_bytes = None
if pdf_bytes is None:
attachment.can_approve = can_approve
attachment.save()
target.delete()
return
new_file = ContentFile(pdf_bytes)
target.size = new_file.size
target.file.save(target.name, new_file)
target.save()
@celery_app.task(
name="froide.foirequest.tasks.redact_attachment_task",
time_limit=60 * 6,
soft_time_limit=60 * 5,
)
def redact_attachment_task(att_id, target_id, instructions):
try:
attachment = FoiAttachment.objects.get(pk=att_id)
except FoiAttachment.DoesNotExist:
return
if att_id != target_id:
try:
target = FoiAttachment.objects.get(pk=target_id)
except FoiAttachment.DoesNotExist:
return
else:
target = attachment
logger.info("Trying redaction of %s", attachment.id)
try:
pdf_bytes = redact_file(attachment.file, instructions)
except Exception:
logger.error("PDF redaction error", exc_info=True)
pdf_bytes = None
if pdf_bytes is None:
logger.info("Redaction failed %s", attachment.id)
# Redaction has failed, remove empty attachment
if attachment.redacted:
attachment.redacted = None
if attachment.is_redacted:
attachment.approved = True
attachment.can_approve = True
attachment.pending = False
attachment.save()
if not target.file:
target.delete()
return
logger.info("Redaction successful %s", attachment.id)
pdf_file = ContentFile(pdf_bytes)
target.size = pdf_file.size
target.file.save(target.name, pdf_file, save=False)
logger.info("Trying OCR %s", target.id)
try:
pdf_bytes = run_ocr(
target.file.path, language=settings.TESSERACT_LANGUAGE if hasattr(settings, 'TESSERACT_LANGUAGE') else settings.LANGUAGE_CODE, timeout=60 * 4
)
except SoftTimeLimitExceeded:
pdf_bytes = None
if pdf_bytes is not None:
logger.info("OCR successful %s", target.id)
pdf_file = ContentFile(pdf_bytes)
target.size = pdf_file.size
target.file.save(target.name, pdf_file, save=False)
else:
logger.info("OCR failed %s", target.id)
target.can_approve = True
target.pending = False
target.approve_and_save()
FoiAttachment.attachment_published.send(sender=target, user=None)
@celery_app.task(name="froide.foirequest.tasks.move_upload_to_attachment")
def move_upload_to_attachment(att_id, upload_id):
try:
att = FoiAttachment.objects.get(pk=att_id)
except FoiAttachment.DoesNotExist:
return
try:
upload = Upload.objects.get(pk=upload_id)
except FoiAttachment.DoesNotExist:
return
file = upload.get_file()
if file:
att.pending = False
att.file.save(att.name, file, save=True)
upload.finish()
upload.delete()
if att.can_convert_to_pdf():
convert_attachment_task.delay(att.id)
| |
#!/usr/bin/python
# Copyright (2014) Thomas Huang
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import MySQLdb
import pprint
import re
def get_con(host, db, user, passwd):
con = None
try:
con = MySQLdb.connect(host, user, passwd, db)
except MySQLdb.Error, e:
print "MySQL Connect Error: %s" % (e.args[1])
return con
def get_tables(con):
cur = con.cursor()
cur.execute("SHOW TABLES")
tables = [t[0] for t in cur.fetchall()]
cur.close()
return tables
def get_table_desc(con, table_name):
cur = con.cursor(MySQLdb.cursors.DictCursor)
cur.execute("desc %s" % (table_name))
desc = cur.fetchall()
for d in desc:
pprint.pprint(Column(d))
cur.close()
print desc
class Database:
def __init__(self, host, db, user, passwd):
self.db = db
self.host = host
self.con = get_con(host, db, user, passwd)
self.init_tables()
def get_tables(self):
tables = set()
for t in self.tables:
tables.add(t)
return tables
def printf(self):
print "Database : %s\n" % (self.db)
for t in self.tables:
print t
def __repr__(self):
db_desc = "Database : %s\n" % (self.db)
tables_desc = "\n".join([str(t) for t in self.tables])
return db_desc + tables_desc
def init_tables(self):
self.tables = {}
cur = self.con.cursor()
try:
cur.execute("SHOW TABLES")
tables = [t[0] for t in cur.fetchall()]
print "Loading tables on %s/%s..." % (self.host, self.db)
for t in tables:
cols = self.fetch_table_columns(t)
indexes = self.fetch_table_indexes(t)
sql = self.fetch_sql(t)
self.tables[t] = Table(t, cols, indexes, sql)
finally:
cur.close()
def fetch_table_columns(self, table_name):
columns = {}
cur = self.con.cursor(MySQLdb.cursors.DictCursor)
try:
cur.execute("desc %s" % (table_name))
for c in cur.fetchall():
columns[c['Field']] = c
finally:
cur.close()
return columns
def fetch_table_indexes(self, table_name):
indexes = {}
cur = self.con.cursor(MySQLdb.cursors.DictCursor)
try:
cur.execute("show index in %s" % (table_name))
for index in cur.fetchall():
i = indexes.get(index['Key_name'], None)
if i:
i['Columns'] += ", " + index['Column_name']
i['Null'] += ", " + index['Null']
else:
col_name = index['Column_name']
del index['Column_name']
index['Columns'] = col_name
indexes[index['Key_name']] = Index(index)
finally:
cur.close()
# pprint.pprint(indexes)
return indexes
def fetch_sql(self, table):
cur = self.con.cursor()
cur.execute("SHOW CREATE TABLE %s" % (table))
create_sql = cur.fetchone()[1]
cur.close()
return create_sql
def __del__(self):
self.con.close()
class Table:
def __init__(self, table_name, cols, indexes, sql):
self.table_name = table_name
self.columns = cols
self.indexes = indexes
self.sql = sql
# def __eq__(self, other):
# return self.table_name == other.table_name
@property
def columns_set(self):
cols = set()
for c in self.columns:
cols.add(c)
return cols
@property
def indexes_set(self):
indexes = set()
for c in self.indexes:
indexes.add(c)
return indexes
def _get_index_lines(self):
index_lines = []
lines = self.sql.split("\n")
for line in lines:
if "KEY" in line:
index_lines.append(line.strip(", "))
return index_lines
index_lines = property(_get_index_lines)
def col_desc(self, column):
column_regex = re.compile(r'\s+(`*%s`*[^,\n\r]*)' % (column))
colunm_lines = column_regex.findall(self.sql)
return colunm_lines[0]
def __str__(self):
return "<Tables : %s>\n%s\n" % (self.table_name,
"\n".join([str(c) for c in self.columns]))
def __repr__(self):
return "<Tables : %s>\n%s\n" % (self.table_name,
"\n".join([str(c) for c in self.columns]))
class Column:
def __init__(self, desc):
self.desc = desc
def __getitem__(self, name):
return self.desc[name]
def __str__(self):
return "\t%s %s" % (self.desc['Field'], self.desc['Type'])
def __repr__(self):
return "\t%s %s" % (self.desc['Field'], self.desc['Type'])
def __eq__(self, other):
for d in self.desc:
if self.desc[d] != other.desc[d]:
return False
return True
class Index:
def __init__(self, desc):
self.desc = desc
def __getitem__(self, name):
return self.desc[name]
def __setitem__(self, key, name):
self.desc[key] = name
def __str__(self):
return str(self.desc)
def __repr__(self):
return str(self.desc)
def __eq__(self, other):
for d in self.desc:
if self.desc[d] != other.desc[d]:
return False
return True
class MySQLDiff:
def __init__(self, db1, db2):
self.db1 = db1
self.db2 = db2
self.db1_tables = self.db1.get_tables()
self.db2_tables = self.db2.get_tables()
def diff(self):
print "## Diff between %s/%s and %s/%s\n" % (
self.db1.host, self.db1.db, self.db2.host, self.db2.db)
self.compare_tables(self.db1, self.db2)
self.compare_tables(self.db2, self.db1)
self.compare_tables_colunms()
def compare_tables(self, db1, db2):
miss_tables = db1.get_tables() - db2.get_tables()
if len(miss_tables):
print "\tIn %s:<%s> doesn't exist table(s):" % (db2.host, db2.db)
print "\t\t", "\n\t\t".join(miss_tables), "\n"
def compare_tables_colunms(self):
inter_tables = self.db1_tables & self.db2_tables
for t in inter_tables:
print "In table %s\n-----------------------\n" % (t)
t1 = self.db1.tables[t]
t2 = self.db2.tables[t]
self.compare_columns(self.db2, t1, t2)
self.compare_columns(self.db1, t2, t1)
self.diff_columns(t1, t2)
self.compare_indexes(self.db2, t1, t2)
self.compare_indexes(self.db1, t2, t1)
self.diff_indexes(t1, t2)
print("\n")
def compare_columns(self, db, t1, t2):
miss_cols = t1.columns_set - t2.columns_set
if miss_cols:
print "\tOn %s doesn't exist colunm(s):" % (db.host)
for col in miss_cols:
print "\t", col.rjust(20), ": ", t1.col_desc(col)
print ""
def diff_columns(self, t1, t2):
inter_cols = t1.columns_set & t2.columns_set
for col in inter_cols:
t1_col = t1.columns[col]
t2_col = t2.columns[col]
diff_keys = self.col_diff_keys(t1_col, t2_col)
if len(diff_keys):
print "\tColunm *%s* is different:" % (t1_col['Field'])
print "\t", self.db1.host.rjust(20), ": ", t1.col_desc(col)
print "\t", self.db2.host.rjust(20), ": ", t2.col_desc(col)
for key in diff_keys:
print "\t", ("different %s" % (key)).rjust(20), ": *%s* *%s*" % \
(t1_col[key], t2_col[key])
print("")
# for key in ['Type', 'Null', 'Key', 'Default', 'Extra']:
# self.cmp_col_key(t1_col, t2_col, key)
def col_diff_keys(self, col1, col2):
diff_keys = []
for key in ['Type', 'Null', 'Key', 'Default', 'Extra']:
if col1[key] != col2[key]:
diff_keys.append(key)
return diff_keys
# def cmp_col_key(self, col1, col2, key):
# if col1[key] != col2[key]:
# print "\tColumn *%s* is different %s: *%s* *%s*" % \
# (col1['Field'], key, col1[key], col2[key])
# return False
# else:
# print "%s : %s" % (col, t1_col['Type'])
# return True
def compare_indexes(self, db, t1, t2):
miss_indexes = t1.indexes_set - t2.indexes_set
if miss_indexes:
print "\tOn %s doesn't exist index(es):" % (db.host)
for index in miss_indexes:
line = self.find_line(t1.index_lines, index)
print "\t", index.rjust(20), ": ", line
print ""
def diff_indexes(self, t1, t2):
inter_indexes = t1.indexes_set & t2.indexes_set
index_names = []
for i in inter_indexes:
t1_index = t1.indexes[i]
t2_index = t2.indexes[i]
if len(self.cmp_index_key(t1_index, t2_index)):
index_names.append(t1_index['Key_name'])
if len(index_names):
lines1 = t1.index_lines
lines2 = t2.index_lines
for name in index_names:
line = self.find_line(lines1, name)
if line:
print "\t", self.db1.host.rjust(20), ": ", line
line = self.find_line(lines2, name)
if line:
print "\t", self.db2.host.rjust(20), ": ", line
def find_line(self, lines, name):
for line in lines:
if name in line:
return line
return None
def cmp_index_key(self, i1, i2):
key_names = []
for key in ['Columns', 'Null']:
if i1[key] != i2[key]:
print "\tIndex *%s* is different %s: *%s* *%s*" % \
(i1['Key_name'], key, i1[key], i2[key])
key_names.append(key)
return key_names
def get_commandline_options():
from argparse import ArgumentParser
parser = ArgumentParser(usage="usage: PROG [options] mysql_urls...]")
parser.add_argument(
"mysql_urls", default=[], nargs='*', help="the mysqls to diff")
return parser.parse_args()
def parse_mysql_url(mysql_url):
user_info, url = mysql_url.split("@")
host, db = url.split("/")
user_info = user_info.split(":", 1)
if len(user_info) != 2:
import getpass
print "Input password for %s@%s" % (user_info[0], host)
user_info.append(getpass.getpass())
user, passwd = user_info
return host, db, user, passwd
if __name__ == '__main__':
options = get_commandline_options()
pprint.pprint(options)
print("\nStarting diff ........\n")
src = Database(*parse_mysql_url(options.mysql_urls[0]))
dest = Database(*parse_mysql_url(options.mysql_urls[1]))
db_diff = MySQLDiff(src, dest)
db_diff.diff()
| |
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Simple REST server that takes commands in a JSON payload
Interface to the :py:class:`~luigi.scheduler.CentralPlannerScheduler` class.
See :doc:`/central_scheduler` for more info.
"""
#
# Description: Added codes for visualization of how long each task takes
# running-time until it reaches the next status (failed or done)
# At "{base_url}/tasklist", all completed(failed or done) tasks are shown.
# At "{base_url}/tasklist", a user can select one specific task to see
# how its running-time has changed over time.
# At "{base_url}/tasklist/{task_name}", it visualizes a multi-bar graph
# that represents the changes of the running-time for a selected task
# up to the next status (failed or done).
# This visualization let us know how the running-time of the specific task
# has changed over time.
#
# Copyright 2015 Naver Corp.
# Author Yeseul Park (yeseul.park@navercorp.com)
#
import atexit
import json
import logging
import mimetypes
import os
import posixpath
import signal
import sys
import datetime
import time
import pkg_resources
import tornado.httpclient
import tornado.httpserver
import tornado.ioloop
import tornado.netutil
import tornado.web
from luigi.scheduler import CentralPlannerScheduler, RPC_METHODS
logger = logging.getLogger("luigi.server")
class RPCHandler(tornado.web.RequestHandler):
"""
Handle remote scheduling calls using rpc.RemoteSchedulerResponder.
"""
def initialize(self, scheduler):
self._scheduler = scheduler
def get(self, method):
if method not in RPC_METHODS:
self.send_error(404)
return
payload = self.get_argument('data', default="{}")
arguments = json.loads(payload)
# TODO: we should probably denote all methods on the scheduler that are "API-level"
# versus internal methods. Right now you can do a REST method call to any method
# defined on the scheduler, which is pretty bad from a security point of view.
if hasattr(self._scheduler, method):
result = getattr(self._scheduler, method)(**arguments)
self.write({"response": result}) # wrap all json response in a dictionary
else:
self.send_error(404)
post = get
class BaseTaskHistoryHandler(tornado.web.RequestHandler):
def initialize(self, scheduler):
self._scheduler = scheduler
def get_template_path(self):
return pkg_resources.resource_filename(__name__, 'templates')
class AllRunHandler(BaseTaskHistoryHandler):
def get(self):
all_tasks = self._scheduler.task_history.find_all_runs()
tasknames = []
for task in all_tasks:
tasknames.append(task.name)
# show all tasks with their name list to be selected
# why all tasks? the duration of the event history of a selected task
# can be more than 24 hours.
self.render("menu.html", tasknames=tasknames)
class SelectedRunHandler(BaseTaskHistoryHandler):
def get(self, name):
tasks = {}
statusResults = {}
taskResults = []
# get all tasks that has been updated
all_tasks = self._scheduler.task_history.find_all_runs()
# get events history for all tasks
all_tasks_event_history = self._scheduler.task_history.find_all_events()
for task in all_tasks:
task_seq = task.id
task_name = task.name
# build the dictionary, tasks with index: id, value: task_name
tasks[task_seq] = str(task_name)
for task in all_tasks_event_history:
# if the name of user-selected task is in tasks, get its task_id
if tasks.get(task.task_id) == str(name):
status = str(task.event_name)
if status not in statusResults:
statusResults[status] = []
# append the id, task_id, ts, y with 0, next_process with null
# for the status(running/failed/done) of the selected task
statusResults[status].append(({
'id': str(task.id), 'task_id': str(task.task_id),
'x': from_utc(str(task.ts)), 'y': 0, 'next_process': ''}))
# append the id, task_name, task_id, status, datetime, timestamp
# for the selected task
taskResults.append({
'id': str(task.id), 'taskName': str(name), 'task_id': str(task.task_id),
'status': str(task.event_name), 'datetime': str(task.ts),
'timestamp': from_utc(str(task.ts))})
statusResults = json.dumps(statusResults)
taskResults = json.dumps(taskResults)
statusResults = tornado.escape.xhtml_unescape(str(statusResults))
taskResults = tornado.escape.xhtml_unescape(str(taskResults))
self.render('history.html', name=name, statusResults=statusResults, taskResults=taskResults)
def from_utc(utcTime, fmt=None):
"""convert UTC time string to time.struct_time: change datetime.datetime to time, return time.struct_time type"""
if fmt is None:
try_formats = ["%Y-%m-%d %H:%M:%S.%f", "%Y-%m-%d %H:%M:%S"]
else:
try_formats = [fmt]
for fmt in try_formats:
try:
time_struct = datetime.datetime.strptime(utcTime, fmt)
except ValueError:
pass
else:
date = int(time.mktime(time_struct.timetuple()))
return date
else:
raise ValueError("No UTC format matches {}".format(utcTime))
class RecentRunHandler(BaseTaskHistoryHandler):
def get(self):
tasks = self._scheduler.task_history.find_latest_runs()
self.render("recent.html", tasks=tasks)
class ByNameHandler(BaseTaskHistoryHandler):
def get(self, name):
tasks = self._scheduler.task_history.find_all_by_name(name)
self.render("recent.html", tasks=tasks)
class ByIdHandler(BaseTaskHistoryHandler):
def get(self, id):
task = self._scheduler.task_history.find_task_by_id(id)
self.render("show.html", task=task)
class ByParamsHandler(BaseTaskHistoryHandler):
def get(self, name):
payload = self.get_argument('data', default="{}")
arguments = json.loads(payload)
tasks = self._scheduler.task_history.find_all_by_parameters(name, session=None, **arguments)
self.render("recent.html", tasks=tasks)
class StaticFileHandler(tornado.web.RequestHandler):
def get(self, path):
# Path checking taken from Flask's safe_join function:
# https://github.com/mitsuhiko/flask/blob/1d55b8983/flask/helpers.py#L563-L587
path = posixpath.normpath(path)
if os.path.isabs(path) or path.startswith(".."):
return self.send_error(404)
extension = os.path.splitext(path)[1]
if extension in mimetypes.types_map:
self.set_header("Content-Type", mimetypes.types_map[extension])
data = pkg_resources.resource_string(__name__, os.path.join("static", path))
self.write(data)
class RootPathHandler(BaseTaskHistoryHandler):
def get(self):
self.redirect("/static/visualiser/index.html")
def app(scheduler):
settings = {"static_path": os.path.join(os.path.dirname(__file__), "static"),
"unescape": tornado.escape.xhtml_unescape}
handlers = [
(r'/api/(.*)', RPCHandler, {"scheduler": scheduler}),
(r'/static/(.*)', StaticFileHandler),
(r'/', RootPathHandler, {'scheduler': scheduler}),
(r'/tasklist', AllRunHandler, {'scheduler': scheduler}),
(r'/tasklist/(.*?)', SelectedRunHandler, {'scheduler': scheduler}),
(r'/history', RecentRunHandler, {'scheduler': scheduler}),
(r'/history/by_name/(.*?)', ByNameHandler, {'scheduler': scheduler}),
(r'/history/by_id/(.*?)', ByIdHandler, {'scheduler': scheduler}),
(r'/history/by_params/(.*?)', ByParamsHandler, {'scheduler': scheduler})
]
api_app = tornado.web.Application(handlers, **settings)
return api_app
def _init_api(scheduler, responder=None, api_port=None, address=None, unix_socket=None):
if responder:
raise Exception('The "responder" argument is no longer supported')
api_app = app(scheduler)
if unix_socket is not None:
api_sockets = [tornado.netutil.bind_unix_socket(unix_socket)]
else:
api_sockets = tornado.netutil.bind_sockets(api_port, address=address)
server = tornado.httpserver.HTTPServer(api_app)
server.add_sockets(api_sockets)
# Return the bound socket names. Useful for connecting client in test scenarios.
return [s.getsockname() for s in api_sockets]
def run(api_port=8082, address=None, unix_socket=None, scheduler=None, responder=None):
"""
Runs one instance of the API server.
"""
if scheduler is None:
scheduler = CentralPlannerScheduler()
# load scheduler state
scheduler.load()
_init_api(
scheduler=scheduler,
responder=responder,
api_port=api_port,
address=address,
unix_socket=unix_socket,
)
# prune work DAG every 60 seconds
pruner = tornado.ioloop.PeriodicCallback(scheduler.prune, 60000)
pruner.start()
def shutdown_handler(signum, frame):
exit_handler()
sys.exit(0)
@atexit.register
def exit_handler():
logger.info("Scheduler instance shutting down")
scheduler.dump()
stop()
signal.signal(signal.SIGINT, shutdown_handler)
signal.signal(signal.SIGTERM, shutdown_handler)
if os.name == 'nt':
signal.signal(signal.SIGBREAK, shutdown_handler)
else:
signal.signal(signal.SIGQUIT, shutdown_handler)
logger.info("Scheduler starting up")
tornado.ioloop.IOLoop.instance().start()
def stop():
tornado.ioloop.IOLoop.instance().stop()
if __name__ == "__main__":
run()
| |
"""
Tests for EnvironmentResource api.
"""
from tests.case.api.crud import ApiCrudCases
import logging
logger = logging.getLogger("moztrap.test")
class EnvironmentResourceTest(ApiCrudCases):
@property
def factory(self):
"""The model factory for this object."""
return self.F.EnvironmentFactory()
@property
def resource_name(self):
return "environment"
@property
def permission(self):
"""The permissions needed to modify this object type."""
return "environments.manage_environments"
@property
def new_object_data(self):
"""Generates a dictionary containing the field names and auto-generated
values needed to create a unique object.
The output of this method can be sent in the payload parameter of a
POST message.
"""
self.profile_fixture = self.F.ProfileFactory()
self.category_fixture1 = self.F.CategoryFactory(name="A")
self.category_fixture2 = self.F.CategoryFactory(name="B")
self.category_fixture3 = self.F.CategoryFactory(name="C")
self.element_fixture1 = self.F.ElementFactory(category=self.category_fixture1, name="A 2")
self.element_fixture2 = self.F.ElementFactory(category=self.category_fixture2, name="B 2")
self.element_fixture3 = self.F.ElementFactory(category=self.category_fixture3, name="C 2")
self.element_fixture_list = [
self.element_fixture1, self.element_fixture2, self.element_fixture3]
return {
u"profile": unicode(
self.get_detail_url("profile", str(self.profile_fixture.id))),
u"elements": [unicode(
self.get_detail_url(
"element", str(elem.id))
) for elem in self.element_fixture_list],
}
def backend_object(self, id):
"""Returns the object from the backend, so you can query it's values in
the database for validation.
"""
return self.model.Environment.everything.get(id=id)
def backend_data(self, backend_obj):
"""Query's the database for the object's current values. Output is a
dictionary that should match the result of getting the object's detail
via the API, and can be used to verify API output.
Note: both keys and data should be in unicode
"""
return {
u"id": unicode(str(backend_obj.id)),
u"profile": unicode(self.get_detail_url("profile", str(backend_obj.profile.id))),
u"elements": [unicode(
self.get_detail_url("element", str(elem.id))
) for elem in backend_obj.elements.all()],
u"resource_uri": unicode(
self.get_detail_url(self.resource_name, str(backend_obj.id))),
}
def test_elements_must_be_from_different_categories(self):
"""A post with two elements from the same category should error."""
logger.info("test_elements_must_be_from_different_categories")
# get data for creation & munge it
fields = self.new_object_data
self.element_fixture2.category = self.element_fixture1.category
self.element_fixture2.save()
# do the create
res = self.post(
self.get_list_url(self.resource_name),
params=self.credentials,
payload=fields,
status=400,
)
error_msg = "Elements must each belong to a different Category."
self.assertEqual(res.text, error_msg)
def test_basic_combinatorics_patch(self):
"""A Patch request with profile and categories should do combinatorics
on the categories and create environments."""
logger.info("test_basic_combinatorics_patch")
fields = self.new_object_data
# create more elements for each category
for x in range(2):
self.F.ElementFactory(category=self.category_fixture1, name="A %s" % x)
self.F.ElementFactory(category=self.category_fixture2, name="B %s" % x)
self.F.ElementFactory(category=self.category_fixture3, name="C %s" % x)
# modify fields to send categories rather than elements
fields.pop('elements')
fields['categories'] = [
unicode(self.get_detail_url(
"category", str(self.category_fixture1.id))),
unicode(self.get_detail_url(
"category", str(self.category_fixture2.id))),
unicode(self.get_detail_url(
"category", str(self.category_fixture3.id))),
]
# do the create
res = self.patch(
self.get_list_url(self.resource_name),
params=self.credentials,
payload=fields,
)
# check that it made the right number of environments
self._test_filter_list_by(u'profile', self.profile_fixture.id, 27)
def test_patch_without_categories_error(self):
"""'categories' must be provided in PATCH."""
logger.info("test_patch_without_categories_error")
fields = self.new_object_data
# do the create
res = self.patch(
self.get_list_url(self.resource_name),
params=self.credentials,
payload=fields,
status=400,
)
error_msg = "PATCH request must contain categories list."
self.assertEqual(res.text, error_msg)
def test_patch_categories_not_list_error(self):
"""'categories' must be a list in PATCH."""
logger.info("test_patch_categories_not_list_error")
fields = self.new_object_data
fields.pop("elements")
fields[u'categories'] = unicode(
self.get_detail_url("category", str(self.category_fixture1.id)))
# do the create
res = self.patch(
self.get_list_url(self.resource_name),
params=self.credentials,
payload=fields,
status=400,
)
error_msg = "PATCH request must contain categories list."
self.assertEqual(res.text, error_msg)
def test_patch_categories_list_not_string_or_hash_error(self):
"""'categories' must be a list in PATCH."""
logger.info("test_patch_categories_list_not_string_or_hash_error")
fields = self.new_object_data
fields.pop("elements")
fields[u'categories'] = [1, 2, 3]
# do the create
res = self.patch(
self.get_list_url(self.resource_name),
params=self.credentials,
payload=fields,
status=400,
)
error_msg = "categories list must contain resource uris or hashes."
self.assertEqual(res.text, error_msg)
def test_patch_with_exclude(self):
"""Combinatorics excluding some elements."""
logger.info("test_patch_with_exclude")
fields = self.new_object_data
# create more elements for each category
for x in range(2):
self.F.ElementFactory(category=self.category_fixture1, name="A %s" % x)
self.F.ElementFactory(category=self.category_fixture2, name="B %s" % x)
self.F.ElementFactory(category=self.category_fixture3, name="C %s" % x)
# modify fields to send categories rather than elements
fields.pop('elements')
fields['categories'] = [
{
u'category': unicode(self.get_detail_url(
"category", str(self.category_fixture1.id))),
u'exclude': [unicode(self.get_detail_url(
"element", str(self.element_fixture1.id))), ],
},
{
u'category': unicode(self.get_detail_url(
"category", str(self.category_fixture2.id))),
u'exclude': [unicode(self.get_detail_url(
"element", str(self.element_fixture2.id))), ],
},
{
u'category': unicode(self.get_detail_url(
"category", str(self.category_fixture3.id))),
u'exclude': [unicode(self.get_detail_url(
"element", str(self.element_fixture3.id))), ],
}, ]
# do the create
res = self.patch(
self.get_list_url(self.resource_name),
params=self.credentials,
payload=fields,
)
# check that it made the right number of environments
self._test_filter_list_by(u'profile', self.profile_fixture.id, 8)
def test_patch_with_include(self):
"""Combinatorics including some elements."""
logger.info("test_patch_with_include")
fields = self.new_object_data
# create more elements for each category
for x in range(2):
self.F.ElementFactory(category=self.category_fixture1, name="A %s" % x)
self.F.ElementFactory(category=self.category_fixture2, name="B %s" % x)
self.F.ElementFactory(category=self.category_fixture3, name="C %s" % x)
# modify fields to send categories rather than elements
fields.pop('elements')
fields['categories'] = [
{
u'category': unicode(self.get_detail_url(
"category", str(self.category_fixture1.id))),
u'include': [unicode(self.get_detail_url(
"element", str(self.element_fixture1.id))), ],
},
{
u'category': unicode(self.get_detail_url(
"category", str(self.category_fixture2.id))),
u'include': [unicode(self.get_detail_url(
"element", str(self.element_fixture2.id))), ],
},
{
u'category': unicode(self.get_detail_url(
"category", str(self.category_fixture3.id))),
u'include': [unicode(self.get_detail_url(
"element", str(self.element_fixture3.id))), ],
}, ]
# do the create
res = self.patch(
self.get_list_url(self.resource_name),
params=self.credentials,
payload=fields,
)
# check that it made the right number of environments
self._test_filter_list_by(u'profile', self.profile_fixture.id, 1)
def test_patch_no_include_no_exclude(self):
"""Sending hashes without include or exclude should do the same as
sending regular uri strings."""
logger.info("test_patch_no_include_no_exclude")
fields = self.new_object_data
# create more elements for each category
for x in range(2):
self.F.ElementFactory(category=self.category_fixture1, name="A %s" % x)
self.F.ElementFactory(category=self.category_fixture2, name="B %s" % x)
self.F.ElementFactory(category=self.category_fixture3, name="C %s" % x)
# modify fields to send categories rather than elements
fields.pop('elements')
fields['categories'] = [
{
u'category': unicode(self.get_detail_url(
"category", str(self.category_fixture1.id))),
},
{
u'category': unicode(self.get_detail_url(
"category", str(self.category_fixture2.id))),
},
{
u'category': unicode(self.get_detail_url(
"category", str(self.category_fixture3.id))),
}, ]
# do the create
res = self.patch(
self.get_list_url(self.resource_name),
params=self.credentials,
payload=fields,
)
# check that it made the right number of environments
self._test_filter_list_by(u'profile', self.profile_fixture.id, 27)
| |
# Copyright 2016 Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Spanner read-write transaction support."""
from google.protobuf.struct_pb2 import Struct
from google.cloud.spanner_v1._helpers import (
_make_value_pb,
_merge_query_options,
_metadata_with_prefix,
)
from google.cloud.spanner_v1 import CommitRequest
from google.cloud.spanner_v1 import ExecuteBatchDmlRequest
from google.cloud.spanner_v1 import ExecuteSqlRequest
from google.cloud.spanner_v1 import TransactionSelector
from google.cloud.spanner_v1 import TransactionOptions
from google.cloud.spanner_v1.snapshot import _SnapshotBase
from google.cloud.spanner_v1.batch import _BatchBase
from google.cloud.spanner_v1._opentelemetry_tracing import trace_call
from google.cloud.spanner_v1 import RequestOptions
from google.api_core import gapic_v1
class Transaction(_SnapshotBase, _BatchBase):
"""Implement read-write transaction semantics for a session.
:type session: :class:`~google.cloud.spanner_v1.session.Session`
:param session: the session used to perform the commit
:raises ValueError: if session has an existing transaction
"""
committed = None
"""Timestamp at which the transaction was successfully committed."""
rolled_back = False
commit_stats = None
_multi_use = True
_execute_sql_count = 0
def __init__(self, session):
if session._transaction is not None:
raise ValueError("Session has existing transaction.")
super(Transaction, self).__init__(session)
def _check_state(self):
"""Helper for :meth:`commit` et al.
:raises: :exc:`ValueError` if the object's state is invalid for making
API requests.
"""
if self._transaction_id is None:
raise ValueError("Transaction is not begun")
if self.committed is not None:
raise ValueError("Transaction is already committed")
if self.rolled_back:
raise ValueError("Transaction is already rolled back")
def _make_txn_selector(self):
"""Helper for :meth:`read`.
:rtype:
:class:`~.transaction_pb2.TransactionSelector`
:returns: a selector configured for read-write transaction semantics.
"""
self._check_state()
return TransactionSelector(id=self._transaction_id)
def begin(self):
"""Begin a transaction on the database.
:rtype: bytes
:returns: the ID for the newly-begun transaction.
:raises ValueError:
if the transaction is already begun, committed, or rolled back.
"""
if self._transaction_id is not None:
raise ValueError("Transaction already begun")
if self.committed is not None:
raise ValueError("Transaction already committed")
if self.rolled_back:
raise ValueError("Transaction is already rolled back")
database = self._session._database
api = database.spanner_api
metadata = _metadata_with_prefix(database.name)
txn_options = TransactionOptions(read_write=TransactionOptions.ReadWrite())
with trace_call("CloudSpanner.BeginTransaction", self._session):
response = api.begin_transaction(
session=self._session.name, options=txn_options, metadata=metadata
)
self._transaction_id = response.id
return self._transaction_id
def rollback(self):
"""Roll back a transaction on the database."""
self._check_state()
database = self._session._database
api = database.spanner_api
metadata = _metadata_with_prefix(database.name)
with trace_call("CloudSpanner.Rollback", self._session):
api.rollback(
session=self._session.name,
transaction_id=self._transaction_id,
metadata=metadata,
)
self.rolled_back = True
del self._session._transaction
def commit(self, return_commit_stats=False, request_options=None):
"""Commit mutations to the database.
:type return_commit_stats: bool
:param return_commit_stats:
If true, the response will return commit stats which can be accessed though commit_stats.
:type request_options:
:class:`google.cloud.spanner_v1.types.RequestOptions`
:param request_options:
(Optional) Common options for this request.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.spanner_v1.types.RequestOptions`.
:rtype: datetime
:returns: timestamp of the committed changes.
:raises ValueError: if there are no mutations to commit.
"""
self._check_state()
database = self._session._database
api = database.spanner_api
metadata = _metadata_with_prefix(database.name)
trace_attributes = {"num_mutations": len(self._mutations)}
if request_options is None:
request_options = RequestOptions()
elif type(request_options) == dict:
request_options = RequestOptions(request_options)
if self.transaction_tag is not None:
request_options.transaction_tag = self.transaction_tag
# Request tags are not supported for commit requests.
request_options.request_tag = None
request = CommitRequest(
session=self._session.name,
mutations=self._mutations,
transaction_id=self._transaction_id,
return_commit_stats=return_commit_stats,
request_options=request_options,
)
with trace_call("CloudSpanner.Commit", self._session, trace_attributes):
response = api.commit(request=request, metadata=metadata,)
self.committed = response.commit_timestamp
if return_commit_stats:
self.commit_stats = response.commit_stats
del self._session._transaction
return self.committed
@staticmethod
def _make_params_pb(params, param_types):
"""Helper for :meth:`execute_update`.
:type params: dict, {str -> column value}
:param params: values for parameter replacement. Keys must match
the names used in ``dml``.
:type param_types: dict[str -> Union[dict, .types.Type]]
:param param_types:
(Optional) maps explicit types for one or more param values;
required if parameters are passed.
:rtype: Union[None, :class:`Struct`]
:returns: a struct message for the passed params, or None
:raises ValueError:
If ``param_types`` is None but ``params`` is not None.
:raises ValueError:
If ``params`` is None but ``param_types`` is not None.
"""
if params is not None:
if param_types is None:
raise ValueError("Specify 'param_types' when passing 'params'.")
return Struct(
fields={key: _make_value_pb(value) for key, value in params.items()}
)
else:
if param_types is not None:
raise ValueError("Specify 'params' when passing 'param_types'.")
return {}
def execute_update(
self,
dml,
params=None,
param_types=None,
query_mode=None,
query_options=None,
request_options=None,
*,
retry=gapic_v1.method.DEFAULT,
timeout=gapic_v1.method.DEFAULT,
):
"""Perform an ``ExecuteSql`` API request with DML.
:type dml: str
:param dml: SQL DML statement
:type params: dict, {str -> column value}
:param params: values for parameter replacement. Keys must match
the names used in ``dml``.
:type param_types: dict[str -> Union[dict, .types.Type]]
:param param_types:
(Optional) maps explicit types for one or more param values;
required if parameters are passed.
:type query_mode:
:class:`~google.cloud.spanner_v1.types.ExecuteSqlRequest.QueryMode`
:param query_mode: Mode governing return of results / query plan.
See:
`QueryMode <https://cloud.google.com/spanner/reference/rpc/google.spanner.v1#google.spanner.v1.ExecuteSqlRequest.QueryMode>`_.
:type query_options:
:class:`~google.cloud.spanner_v1.types.ExecuteSqlRequest.QueryOptions`
or :class:`dict`
:param query_options: (Optional) Options that are provided for query plan stability.
:type request_options:
:class:`google.cloud.spanner_v1.types.RequestOptions`
:param request_options:
(Optional) Common options for this request.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.spanner_v1.types.RequestOptions`.
:type retry: :class:`~google.api_core.retry.Retry`
:param retry: (Optional) The retry settings for this request.
:type timeout: float
:param timeout: (Optional) The timeout for this request.
:rtype: int
:returns: Count of rows affected by the DML statement.
"""
params_pb = self._make_params_pb(params, param_types)
database = self._session._database
metadata = _metadata_with_prefix(database.name)
transaction = self._make_txn_selector()
api = database.spanner_api
seqno, self._execute_sql_count = (
self._execute_sql_count,
self._execute_sql_count + 1,
)
# Query-level options have higher precedence than client-level and
# environment-level options
default_query_options = database._instance._client._query_options
query_options = _merge_query_options(default_query_options, query_options)
if request_options is None:
request_options = RequestOptions()
elif type(request_options) == dict:
request_options = RequestOptions(request_options)
request_options.transaction_tag = self.transaction_tag
trace_attributes = {"db.statement": dml}
request = ExecuteSqlRequest(
session=self._session.name,
sql=dml,
transaction=transaction,
params=params_pb,
param_types=param_types,
query_mode=query_mode,
query_options=query_options,
seqno=seqno,
request_options=request_options,
)
with trace_call(
"CloudSpanner.ReadWriteTransaction", self._session, trace_attributes
):
response = api.execute_sql(
request=request, metadata=metadata, retry=retry, timeout=timeout
)
return response.stats.row_count_exact
def batch_update(self, statements, request_options=None):
"""Perform a batch of DML statements via an ``ExecuteBatchDml`` request.
:type statements:
Sequence[Union[ str, Tuple[str, Dict[str, Any], Dict[str, Union[dict, .types.Type]]]]]
:param statements:
List of DML statements, with optional params / param types.
If passed, 'params' is a dict mapping names to the values
for parameter replacement. Keys must match the names used in the
corresponding DML statement. If 'params' is passed, 'param_types'
must also be passed, as a dict mapping names to the type of
value passed in 'params'.
:type request_options:
:class:`google.cloud.spanner_v1.types.RequestOptions`
:param request_options:
(Optional) Common options for this request.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.spanner_v1.types.RequestOptions`.
:rtype:
Tuple(status, Sequence[int])
:returns:
Status code, plus counts of rows affected by each completed DML
statement. Note that if the status code is not ``OK``, the
statement triggering the error will not have an entry in the
list, nor will any statements following that one.
"""
parsed = []
for statement in statements:
if isinstance(statement, str):
parsed.append(ExecuteBatchDmlRequest.Statement(sql=statement))
else:
dml, params, param_types = statement
params_pb = self._make_params_pb(params, param_types)
parsed.append(
ExecuteBatchDmlRequest.Statement(
sql=dml, params=params_pb, param_types=param_types
)
)
database = self._session._database
metadata = _metadata_with_prefix(database.name)
transaction = self._make_txn_selector()
api = database.spanner_api
seqno, self._execute_sql_count = (
self._execute_sql_count,
self._execute_sql_count + 1,
)
if request_options is None:
request_options = RequestOptions()
elif type(request_options) == dict:
request_options = RequestOptions(request_options)
request_options.transaction_tag = self.transaction_tag
trace_attributes = {
# Get just the queries from the DML statement batch
"db.statement": ";".join([statement.sql for statement in parsed])
}
request = ExecuteBatchDmlRequest(
session=self._session.name,
transaction=transaction,
statements=parsed,
seqno=seqno,
request_options=request_options,
)
with trace_call("CloudSpanner.DMLTransaction", self._session, trace_attributes):
response = api.execute_batch_dml(request=request, metadata=metadata)
row_counts = [
result_set.stats.row_count_exact for result_set in response.result_sets
]
return response.status, row_counts
def __enter__(self):
"""Begin ``with`` block."""
self.begin()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""End ``with`` block."""
if exc_type is None:
self.commit()
else:
self.rollback()
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Cisco Systems, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Sumit Naiksatam, Cisco Systems, Inc.
# @author: Rohit Agarwalla, Cisco Systems, Inc.
#
import inspect
import logging
import sys
from oslo.config import cfg
from neutron.api.v2 import attributes
from neutron.db import api as db_api
from neutron.extensions import portbindings
from neutron.extensions import providernet as provider
from neutron import neutron_plugin_base_v2
from neutron.openstack.common import importutils
from neutron.plugins.cisco.common import cisco_constants as const
from neutron.plugins.cisco.common import cisco_credentials_v2 as cred
from neutron.plugins.cisco.common import cisco_exceptions as cexc
from neutron.plugins.cisco.common import config as conf
from neutron.plugins.cisco.db import network_db_v2 as cdb
from neutron.plugins.openvswitch import ovs_db_v2 as odb
LOG = logging.getLogger(__name__)
class VirtualPhysicalSwitchModelV2(neutron_plugin_base_v2.NeutronPluginBaseV2):
"""Virtual Physical Switch Model.
This implementation works with OVS and Nexus plugin for the
following topology:
One or more servers to a nexus switch.
"""
MANAGE_STATE = True
__native_bulk_support = True
supported_extension_aliases = ["provider", "binding"]
_plugins = {}
_methods_to_delegate = ['create_network_bulk',
'get_network', 'get_networks',
'create_port_bulk',
'get_port', 'get_ports',
'create_subnet', 'create_subnet_bulk',
'delete_subnet', 'update_subnet',
'get_subnet', 'get_subnets',
'create_or_update_agent', 'report_state']
def __init__(self):
"""Initialize the segmentation manager.
Checks which device plugins are configured, and load the inventories
those device plugins for which the inventory is configured.
"""
conf.CiscoConfigOptions()
for key in conf.CISCO_PLUGINS.keys():
plugin_obj = conf.CISCO_PLUGINS[key]
if plugin_obj is not None:
self._plugins[key] = importutils.import_object(plugin_obj)
LOG.debug(_("Loaded device plugin %s"),
conf.CISCO_PLUGINS[key])
if ((const.VSWITCH_PLUGIN in self._plugins) and
hasattr(self._plugins[const.VSWITCH_PLUGIN],
"supported_extension_aliases")):
self.supported_extension_aliases.extend(
self._plugins[const.VSWITCH_PLUGIN].
supported_extension_aliases)
# At this point, all the database models should have been loaded. It's
# possible that configure_db() may have been called by one of the
# plugins loaded in above. Otherwise, this call is to make sure that
# the database is initialized
db_api.configure_db()
# Initialize credential store after database initialization
cred.Store.initialize()
LOG.debug(_("%(module)s.%(name)s init done"),
{'module': __name__,
'name': self.__class__.__name__})
# Check whether we have a valid Nexus driver loaded
self.config_nexus = False
nexus_driver = cfg.CONF.CISCO.nexus_driver
if nexus_driver.endswith('CiscoNEXUSDriver'):
self.config_nexus = True
def __getattribute__(self, name):
"""Delegate calls to OVS sub-plugin.
This delegates the calls to the methods implemented only by the OVS
sub-plugin. Note: Currently, bulking is handled by the caller
(PluginV2), and this model class expects to receive only non-bulking
calls. If, however, a bulking call is made, this will method will
delegate the call to the OVS plugin.
"""
super_getattribute = super(VirtualPhysicalSwitchModelV2,
self).__getattribute__
methods = super_getattribute('_methods_to_delegate')
if name in methods:
plugin = super_getattribute('_plugins')[const.VSWITCH_PLUGIN]
return getattr(plugin, name)
try:
return super_getattribute(name)
except AttributeError:
plugin = super_getattribute('_plugins')[const.VSWITCH_PLUGIN]
return getattr(plugin, name)
def _func_name(self, offset=0):
"""Get the name of the calling function."""
frame_record = inspect.stack()[1 + offset]
func_name = frame_record[3]
return func_name
def _invoke_plugin_per_device(self, plugin_key, function_name, args):
"""Invoke plugin per device.
Invokes a device plugin's relevant functions (based on the
plugin implementation) for completing this operation.
"""
if plugin_key not in self._plugins:
LOG.info(_("No %s Plugin loaded"), plugin_key)
LOG.info(_("%(plugin_key)s: %(function_name)s with args %(args)s "
"ignored"),
{'plugin_key': plugin_key, 'function_name': function_name,
'args': args})
return
device_params = {const.DEVICE_IP: []}
return [self._invoke_plugin(plugin_key, function_name, args,
device_params)]
def _invoke_plugin(self, plugin_key, function_name, args, kwargs):
"""Invoke plugin.
Invokes the relevant function on a device plugin's
implementation for completing this operation.
"""
func = getattr(self._plugins[plugin_key], function_name)
func_args_len = int(inspect.getargspec(func).args.__len__()) - 1
fargs, varargs, varkw, defaults = inspect.getargspec(func)
if args.__len__() > func_args_len:
func_args = args[:func_args_len]
extra_args = args[func_args_len:]
for dict_arg in extra_args:
for k, v in dict_arg.iteritems():
kwargs[k] = v
return func(*func_args, **kwargs)
else:
if (varkw == 'kwargs'):
return func(*args, **kwargs)
else:
return func(*args)
def _get_segmentation_id(self, network_id):
binding_seg_id = odb.get_network_binding(None, network_id)
if not binding_seg_id:
raise cexc.NetworkSegmentIDNotFound(net_id=network_id)
return binding_seg_id.segmentation_id
def _get_provider_vlan_id(self, network):
if (all(attributes.is_attr_set(network.get(attr))
for attr in (provider.NETWORK_TYPE,
provider.PHYSICAL_NETWORK,
provider.SEGMENTATION_ID))
and
network[provider.NETWORK_TYPE] == const.NETWORK_TYPE_VLAN):
return network[provider.SEGMENTATION_ID]
def create_network(self, context, network):
"""Create network.
Perform this operation in the context of the configured device
plugins.
"""
LOG.debug(_("create_network() called"))
provider_vlan_id = self._get_provider_vlan_id(network[const.NETWORK])
args = [context, network]
ovs_output = self._invoke_plugin_per_device(const.VSWITCH_PLUGIN,
self._func_name(),
args)
# The vswitch plugin did all the verification. If it's a provider
# vlan network, save it for the nexus plugin to use later.
if provider_vlan_id:
network_id = ovs_output[0][const.NET_ID]
cdb.add_provider_network(network_id,
const.NETWORK_TYPE_VLAN,
provider_vlan_id)
LOG.debug(_("Provider network added to DB: %(network_id)s, "
"%(vlan_id)s"),
{'network_id': network_id, 'vlan_id': provider_vlan_id})
return ovs_output[0]
def update_network(self, context, id, network):
"""Update network.
Perform this operation in the context of the configured device
plugins.
Note that the Nexus sub-plugin does not need to be notified
(and the Nexus switch does not need to be [re]configured)
for an update network operation because the Nexus sub-plugin
is agnostic of all network-level attributes except the
segmentation ID. Furthermore, updating of the segmentation ID
is not supported by the OVS plugin since it is considered a
provider attribute, so it is not supported by this method.
"""
LOG.debug(_("update_network() called"))
# We can only support updating of provider attributes if all the
# configured sub-plugins support it. Currently we have no method
# in place for checking whether a sub-plugin supports it,
# so assume not.
provider._raise_if_updates_provider_attributes(network['network'])
args = [context, id, network]
ovs_output = self._invoke_plugin_per_device(const.VSWITCH_PLUGIN,
self._func_name(),
args)
return ovs_output[0]
def delete_network(self, context, id):
"""Delete network.
Perform this operation in the context of the configured device
plugins.
"""
args = [context, id]
ovs_output = self._invoke_plugin_per_device(const.VSWITCH_PLUGIN,
self._func_name(),
args)
if cdb.remove_provider_network(id):
LOG.debug(_("Provider network removed from DB: %s"), id)
return ovs_output[0]
def get_network(self, context, id, fields=None):
"""For this model this method will be delegated to vswitch plugin."""
pass
def get_networks(self, context, filters=None, fields=None):
"""For this model this method will be delegated to vswitch plugin."""
pass
def _invoke_nexus_for_net_create(self, context, tenant_id, net_id,
instance_id, host_id):
if not self.config_nexus:
return False
network = self.get_network(context, net_id)
vlan_id = self._get_segmentation_id(net_id)
vlan_name = conf.CISCO.vlan_name_prefix + str(vlan_id)
network[const.NET_VLAN_ID] = vlan_id
network[const.NET_VLAN_NAME] = vlan_name
attachment = {
const.TENANT_ID: tenant_id,
const.INSTANCE_ID: instance_id,
const.HOST_NAME: host_id,
}
self._invoke_plugin_per_device(
const.NEXUS_PLUGIN,
'create_network',
[network, attachment])
def _check_valid_port_device_owner(self, port):
"""Check the port for valid device_owner.
Don't call the nexus plugin for router and dhcp
port owners.
"""
return port['device_owner'].startswith('compute')
def _get_port_host_id_from_bindings(self, port):
"""Get host_id from portbindings."""
host_id = None
if (portbindings.HOST_ID in port and
attributes.is_attr_set(port[portbindings.HOST_ID])):
host_id = port[portbindings.HOST_ID]
return host_id
def create_port(self, context, port):
"""Create port.
Perform this operation in the context of the configured device
plugins.
"""
LOG.debug(_("create_port() called"))
args = [context, port]
ovs_output = self._invoke_plugin_per_device(const.VSWITCH_PLUGIN,
self._func_name(),
args)
instance_id = port['port']['device_id']
# Only call nexus plugin if there's a valid instance_id, host_id
# and device_owner
try:
host_id = self._get_port_host_id_from_bindings(port['port'])
if (instance_id and host_id and
self._check_valid_port_device_owner(port['port'])):
net_id = port['port']['network_id']
tenant_id = port['port']['tenant_id']
self._invoke_nexus_for_net_create(
context, tenant_id, net_id, instance_id, host_id)
except Exception:
# Create network on the Nexus plugin has failed, so we need
# to rollback the port creation on the VSwitch plugin.
exc_info = sys.exc_info()
try:
id = ovs_output[0]['id']
args = [context, id]
ovs_output = self._invoke_plugin_per_device(
const.VSWITCH_PLUGIN,
'delete_port',
args)
finally:
# Re-raise the original exception
raise exc_info[0], exc_info[1], exc_info[2]
return ovs_output[0]
def get_port(self, context, id, fields=None):
"""For this model this method will be delegated to vswitch plugin."""
pass
def get_ports(self, context, filters=None, fields=None):
"""For this model this method will be delegated to vswitch plugin."""
pass
def update_port(self, context, id, port):
"""Update port.
Perform this operation in the context of the configured device
plugins.
"""
LOG.debug(_("update_port() called"))
old_port = self.get_port(context, id)
old_device = old_port['device_id']
args = [context, id, port]
ovs_output = self._invoke_plugin_per_device(const.VSWITCH_PLUGIN,
self._func_name(),
args)
net_id = old_port['network_id']
instance_id = ''
if 'device_id' in port['port']:
instance_id = port['port']['device_id']
# Check if there's a new device_id
try:
host_id = self._get_port_host_id_from_bindings(port['port'])
if (instance_id and not old_device and host_id and
self._check_valid_port_device_owner(port['port'])):
tenant_id = old_port['tenant_id']
self._invoke_nexus_for_net_create(
context, tenant_id, net_id, instance_id, host_id)
return ovs_output[0]
except Exception:
exc_info = sys.exc_info()
LOG.error(_("Unable to update port '%s' on Nexus switch"),
old_port['name'], exc_info=exc_info)
try:
# Roll back vSwitch plugin to original port attributes.
args = [context, id, {'port': old_port}]
ovs_output = self._invoke_plugin_per_device(
const.VSWITCH_PLUGIN,
self._func_name(),
args)
finally:
# Re-raise the original exception
raise exc_info[0], exc_info[1], exc_info[2]
def delete_port(self, context, id):
"""Delete port.
Perform this operation in the context of the configured device
plugins.
"""
LOG.debug(_("delete_port() called"))
port = self.get_port(context, id)
host_id = self._get_port_host_id_from_bindings(port)
if (self.config_nexus and host_id and
self._check_valid_port_device_owner(port)):
vlan_id = self._get_segmentation_id(port['network_id'])
n_args = [port['device_id'], vlan_id]
self._invoke_plugin_per_device(const.NEXUS_PLUGIN,
self._func_name(),
n_args)
try:
args = [context, id]
ovs_output = self._invoke_plugin_per_device(const.VSWITCH_PLUGIN,
self._func_name(),
args)
except Exception:
exc_info = sys.exc_info()
# Roll back the delete port on the Nexus plugin
try:
tenant_id = port['tenant_id']
net_id = port['network_id']
instance_id = port['device_id']
host_id = port[portbindings.HOST_ID]
self._invoke_nexus_for_net_create(context, tenant_id, net_id,
instance_id, host_id)
finally:
# Raise the original exception.
raise exc_info[0], exc_info[1], exc_info[2]
return ovs_output[0]
def add_router_interface(self, context, router_id, interface_info):
"""Add a router interface on a subnet.
Only invoke the Nexus plugin to create SVI if a Nexus
plugin is loaded, otherwise send it to the vswitch plugin
"""
nexus_driver = cfg.CONF.CISCO.nexus_driver
if nexus_driver.endswith('CiscoNEXUSDriver'):
LOG.debug(_("Nexus plugin loaded, creating SVI on switch"))
if 'subnet_id' not in interface_info:
raise cexc.SubnetNotSpecified()
if 'port_id' in interface_info:
raise cexc.PortIdForNexusSvi()
subnet = self.get_subnet(context, interface_info['subnet_id'])
gateway_ip = subnet['gateway_ip']
# Get gateway IP address and netmask
cidr = subnet['cidr']
netmask = cidr.split('/', 1)[1]
gateway_ip = gateway_ip + '/' + netmask
network_id = subnet['network_id']
vlan_id = self._get_segmentation_id(network_id)
vlan_name = conf.CISCO.vlan_name_prefix + str(vlan_id)
n_args = [vlan_name, vlan_id, subnet['id'], gateway_ip, router_id]
return self._invoke_plugin_per_device(const.NEXUS_PLUGIN,
self._func_name(),
n_args)
else:
LOG.debug(_("No Nexus plugin, sending to vswitch"))
n_args = [context, router_id, interface_info]
return self._invoke_plugin_per_device(const.VSWITCH_PLUGIN,
self._func_name(),
n_args)
def remove_router_interface(self, context, router_id, interface_info):
"""Remove a router interface.
Only invoke the Nexus plugin to delete SVI if a Nexus
plugin is loaded, otherwise send it to the vswitch plugin
"""
nexus_driver = cfg.CONF.CISCO.nexus_driver
if nexus_driver.endswith('CiscoNEXUSDriver'):
LOG.debug(_("Nexus plugin loaded, deleting SVI from switch"))
subnet = self.get_subnet(context, interface_info['subnet_id'])
network_id = subnet['network_id']
vlan_id = self._get_segmentation_id(network_id)
n_args = [vlan_id, router_id]
return self._invoke_plugin_per_device(const.NEXUS_PLUGIN,
self._func_name(),
n_args)
else:
LOG.debug(_("No Nexus plugin, sending to vswitch"))
n_args = [context, router_id, interface_info]
return self._invoke_plugin_per_device(const.VSWITCH_PLUGIN,
self._func_name(),
n_args)
def create_subnet(self, context, subnet):
"""For this model this method will be delegated to vswitch plugin."""
pass
def update_subnet(self, context, id, subnet):
"""For this model this method will be delegated to vswitch plugin."""
pass
def get_subnet(self, context, id, fields=None):
"""For this model this method will be delegated to vswitch plugin."""
pass
def delete_subnet(self, context, id, kwargs):
"""For this model this method will be delegated to vswitch plugin."""
pass
def get_subnets(self, context, filters=None, fields=None):
"""For this model this method will be delegated to vswitch plugin."""
pass
| |
#!/usr/bin/env python
# encoding: utf-8
"""Convert (to and) from rdflib graphs to other well known graph libraries.
Currently the following libraries are supported:
- networkx: MultiDiGraph, DiGraph, Graph
- graph_tool: Graph
Doctests in this file are all skipped, as we can't run them conditionally if
networkx or graph_tool are available and they would err otherwise.
see ../../test/test_extras_external_graph_libs.py for conditional tests
"""
import logging
logger = logging.getLogger(__name__)
def _identity(x):
return x
def _rdflib_to_networkx_graph(
graph,
nxgraph,
calc_weights,
edge_attrs,
transform_s=_identity,
transform_o=_identity,
):
"""Helper method for multidigraph, digraph and graph.
Modifies nxgraph in-place!
Arguments:
graph: an rdflib.Graph.
nxgraph: a networkx.Graph/DiGraph/MultiDigraph.
calc_weights: If True adds a 'weight' attribute to each edge according
to the count of s,p,o triples between s and o, which is meaningful
for Graph/DiGraph.
edge_attrs: Callable to construct edge data from s, p, o.
'triples' attribute is handled specially to be merged.
'weight' should not be generated if calc_weights==True.
(see invokers below!)
transform_s: Callable to transform node generated from s.
transform_o: Callable to transform node generated from o.
"""
assert callable(edge_attrs)
assert callable(transform_s)
assert callable(transform_o)
import networkx as nx
for s, p, o in graph:
ts, to = transform_s(s), transform_o(o) # apply possible transformations
data = nxgraph.get_edge_data(ts, to)
if data is None or isinstance(nxgraph, nx.MultiDiGraph):
# no edge yet, set defaults
data = edge_attrs(s, p, o)
if calc_weights:
data["weight"] = 1
nxgraph.add_edge(ts, to, **data)
else:
# already have an edge, just update attributes
if calc_weights:
data["weight"] += 1
if "triples" in data:
d = edge_attrs(s, p, o)
data["triples"].extend(d["triples"])
def rdflib_to_networkx_multidigraph(
graph, edge_attrs=lambda s, p, o: {"key": p}, **kwds
):
"""Converts the given graph into a networkx.MultiDiGraph.
The subjects and objects are the later nodes of the MultiDiGraph.
The predicates are used as edge keys (to identify multi-edges).
:Parameters:
- graph: a rdflib.Graph.
- edge_attrs: Callable to construct later edge_attributes. It receives
3 variables (s, p, o) and should construct a dictionary that is
passed to networkx's add_edge(s, o, \*\*attrs) function.
By default this will include setting the MultiDiGraph key=p here.
If you don't want to be able to re-identify the edge later on, you
can set this to `lambda s, p, o: {}`. In this case MultiDiGraph's
default (increasing ints) will be used.
Returns:
networkx.MultiDiGraph
>>> from rdflib import Graph, URIRef, Literal
>>> g = Graph()
>>> a, b, l = URIRef('a'), URIRef('b'), Literal('l')
>>> p, q = URIRef('p'), URIRef('q')
>>> edges = [(a, p, b), (a, q, b), (b, p, a), (b, p, l)]
>>> for t in edges:
... g.add(t)
...
>>> mdg = rdflib_to_networkx_multidigraph(g)
>>> len(mdg.edges())
4
>>> mdg.has_edge(a, b)
True
>>> mdg.has_edge(a, b, key=p)
True
>>> mdg.has_edge(a, b, key=q)
True
>>> mdg = rdflib_to_networkx_multidigraph(g, edge_attrs=lambda s,p,o: {})
>>> mdg.has_edge(a, b, key=0)
True
>>> mdg.has_edge(a, b, key=1)
True
"""
import networkx as nx
mdg = nx.MultiDiGraph()
_rdflib_to_networkx_graph(graph, mdg, False, edge_attrs, **kwds)
return mdg
def rdflib_to_networkx_digraph(
graph,
calc_weights=True,
edge_attrs=lambda s, p, o: {"triples": [(s, p, o)]},
**kwds,
):
"""Converts the given graph into a networkx.DiGraph.
As an rdflib.Graph() can contain multiple edges between nodes, by default
adds the a 'triples' attribute to the single DiGraph edge with a list of
all triples between s and o.
Also by default calculates the edge weight as the length of triples.
:Parameters:
- `graph`: a rdflib.Graph.
- `calc_weights`: If true calculate multi-graph edge-count as edge 'weight'
- `edge_attrs`: Callable to construct later edge_attributes. It receives
3 variables (s, p, o) and should construct a dictionary that is passed to
networkx's add_edge(s, o, \*\*attrs) function.
By default this will include setting the 'triples' attribute here,
which is treated specially by us to be merged. Other attributes of
multi-edges will only contain the attributes of the first edge.
If you don't want the 'triples' attribute for tracking, set this to
`lambda s, p, o: {}`.
Returns: networkx.DiGraph
>>> from rdflib import Graph, URIRef, Literal
>>> g = Graph()
>>> a, b, l = URIRef('a'), URIRef('b'), Literal('l')
>>> p, q = URIRef('p'), URIRef('q')
>>> edges = [(a, p, b), (a, q, b), (b, p, a), (b, p, l)]
>>> for t in edges:
... g.add(t)
...
>>> dg = rdflib_to_networkx_digraph(g)
>>> dg[a][b]['weight']
2
>>> sorted(dg[a][b]['triples']) == [(a, p, b), (a, q, b)]
True
>>> len(dg.edges())
3
>>> dg.size()
3
>>> dg.size(weight='weight')
4.0
>>> dg = rdflib_to_networkx_graph(g, False, edge_attrs=lambda s,p,o:{})
>>> 'weight' in dg[a][b]
False
>>> 'triples' in dg[a][b]
False
"""
import networkx as nx
dg = nx.DiGraph()
_rdflib_to_networkx_graph(graph, dg, calc_weights, edge_attrs, **kwds)
return dg
def rdflib_to_networkx_graph(
graph,
calc_weights=True,
edge_attrs=lambda s, p, o: {"triples": [(s, p, o)]},
**kwds,
):
"""Converts the given graph into a networkx.Graph.
As an rdflib.Graph() can contain multiple directed edges between nodes, by
default adds the a 'triples' attribute to the single DiGraph edge with a
list of triples between s and o in graph.
Also by default calculates the edge weight as the len(triples).
:Parameters:
- graph: a rdflib.Graph.
- calc_weights: If true calculate multi-graph edge-count as edge 'weight'
- edge_attrs: Callable to construct later edge_attributes. It receives
3 variables (s, p, o) and should construct a dictionary that is
passed to networkx's add_edge(s, o, \*\*attrs) function.
By default this will include setting the 'triples' attribute here,
which is treated specially by us to be merged. Other attributes of
multi-edges will only contain the attributes of the first edge.
If you don't want the 'triples' attribute for tracking, set this to
`lambda s, p, o: {}`.
Returns:
networkx.Graph
>>> from rdflib import Graph, URIRef, Literal
>>> g = Graph()
>>> a, b, l = URIRef('a'), URIRef('b'), Literal('l')
>>> p, q = URIRef('p'), URIRef('q')
>>> edges = [(a, p, b), (a, q, b), (b, p, a), (b, p, l)]
>>> for t in edges:
... g.add(t)
...
>>> ug = rdflib_to_networkx_graph(g)
>>> ug[a][b]['weight']
3
>>> sorted(ug[a][b]['triples']) == [(a, p, b), (a, q, b), (b, p, a)]
True
>>> len(ug.edges())
2
>>> ug.size()
2
>>> ug.size(weight='weight')
4.0
>>> ug = rdflib_to_networkx_graph(g, False, edge_attrs=lambda s,p,o:{})
>>> 'weight' in ug[a][b]
False
>>> 'triples' in ug[a][b]
False
"""
import networkx as nx
g = nx.Graph()
_rdflib_to_networkx_graph(graph, g, calc_weights, edge_attrs, **kwds)
return g
def rdflib_to_graphtool(
graph,
v_prop_names=[str("term")],
e_prop_names=[str("term")],
transform_s=lambda s, p, o: {str("term"): s},
transform_p=lambda s, p, o: {str("term"): p},
transform_o=lambda s, p, o: {str("term"): o},
):
"""Converts the given graph into a graph_tool.Graph().
The subjects and objects are the later vertices of the Graph.
The predicates become edges.
:Parameters:
- graph: a rdflib.Graph.
- v_prop_names: a list of names for the vertex properties. The default is set
to ['term'] (see transform_s, transform_o below).
- e_prop_names: a list of names for the edge properties.
- transform_s: callable with s, p, o input. Should return a dictionary
containing a value for each name in v_prop_names. By default is set
to {'term': s} which in combination with v_prop_names = ['term']
adds s as 'term' property to the generated vertex for s.
- transform_p: similar to transform_s, but wrt. e_prop_names. By default
returns {'term': p} which adds p as a property to the generated
edge between the vertex for s and the vertex for o.
- transform_o: similar to transform_s.
Returns: graph_tool.Graph()
>>> from rdflib import Graph, URIRef, Literal
>>> g = Graph()
>>> a, b, l = URIRef('a'), URIRef('b'), Literal('l')
>>> p, q = URIRef('p'), URIRef('q')
>>> edges = [(a, p, b), (a, q, b), (b, p, a), (b, p, l)]
>>> for t in edges:
... g.add(t)
...
>>> mdg = rdflib_to_graphtool(g)
>>> len(list(mdg.edges()))
4
>>> from graph_tool import util as gt_util
>>> vpterm = mdg.vertex_properties['term']
>>> va = gt_util.find_vertex(mdg, vpterm, a)[0]
>>> vb = gt_util.find_vertex(mdg, vpterm, b)[0]
>>> vl = gt_util.find_vertex(mdg, vpterm, l)[0]
>>> (va, vb) in [(e.source(), e.target()) for e in list(mdg.edges())]
True
>>> epterm = mdg.edge_properties['term']
>>> len(list(gt_util.find_edge(mdg, epterm, p))) == 3
True
>>> len(list(gt_util.find_edge(mdg, epterm, q))) == 1
True
>>> mdg = rdflib_to_graphtool(
... g,
... e_prop_names=[str('name')],
... transform_p=lambda s, p, o: {str('name'): unicode(p)})
>>> epterm = mdg.edge_properties['name']
>>> len(list(gt_util.find_edge(mdg, epterm, unicode(p)))) == 3
True
>>> len(list(gt_util.find_edge(mdg, epterm, unicode(q)))) == 1
True
"""
import graph_tool as gt
g = gt.Graph()
vprops = [(vpn, g.new_vertex_property("object")) for vpn in v_prop_names]
for vpn, vprop in vprops:
g.vertex_properties[vpn] = vprop
eprops = [(epn, g.new_edge_property("object")) for epn in e_prop_names]
for epn, eprop in eprops:
g.edge_properties[epn] = eprop
node_to_vertex = {}
for s, p, o in graph:
sv = node_to_vertex.get(s)
if sv is None:
v = g.add_vertex()
node_to_vertex[s] = v
tmp_props = transform_s(s, p, o)
for vpn, vprop in vprops:
vprop[v] = tmp_props[vpn]
sv = v
ov = node_to_vertex.get(o)
if ov is None:
v = g.add_vertex()
node_to_vertex[o] = v
tmp_props = transform_o(s, p, o)
for vpn, vprop in vprops:
vprop[v] = tmp_props[vpn]
ov = v
e = g.add_edge(sv, ov)
tmp_props = transform_p(s, p, o)
for epn, eprop in eprops:
eprop[e] = tmp_props[epn]
return g
| |
# Copyright (c) 2012 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import worker
import lock
import logging
import logging.config
import rpc
import optparse
import scheduler
import warnings
import configuration
import task
import parameter
import re
import argparse
import sys
import os
from task import Register
def setup_interface_logging(conf_file=None):
# use a variable in the function object to determine if it has run before
if getattr(setup_interface_logging, "has_run", False):
return
if conf_file is None:
logger = logging.getLogger('luigi-interface')
logger.setLevel(logging.DEBUG)
streamHandler = logging.StreamHandler()
streamHandler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(levelname)s: %(message)s')
streamHandler.setFormatter(formatter)
logger.addHandler(streamHandler)
else:
logging.config.fileConfig(conf_file, disable_existing_loggers=False)
setup_interface_logging.has_run = True
def get_config():
warnings.warn('Use luigi.configuration.get_config() instead')
return configuration.get_config()
class EnvironmentParamsContainer(task.Task):
''' Keeps track of a bunch of environment params.
Uses the internal luigi parameter mechanism.
The nice thing is that we can instantiate this class
and get an object with all the environment variables set.
This is arguably a bit of a hack.'''
local_scheduler = parameter.BooleanParameter(
is_global=True, default=False,
description='Use local scheduling')
scheduler_host = parameter.Parameter(
is_global=True,
default=None,
description='Hostname of machine running remote scheduler')
scheduler_port = parameter.IntParameter(
is_global=True, default=None,
description='Port of remote scheduler api process')
lock = parameter.BooleanParameter(
is_global=True, default=False,
description='(Deprecated, replaced by no_lock)'
'Do not run if similar process is already running')
lock_size = parameter.IntParameter(
is_global=True, default=1,
description="Maximum number of workers running the same command")
no_lock = parameter.BooleanParameter(
is_global=True, default=False,
description='Ignore if similar process is already running')
lock_pid_dir = parameter.Parameter(
is_global=True, default='/var/tmp/luigi',
description='Directory to store the pid file')
workers = parameter.IntParameter(
is_global=True, default=1,
description='Maximum number of parallel tasks to run')
logging_conf_file = parameter.Parameter(
is_global=True, default=None,
description='Configuration file for logging')
module = parameter.Parameter(
is_global=True, default=None,
description='Used for dynamic loading of modules') # see DynamicArgParseInterface
@classmethod
def apply_config_defaults(cls):
cls.scheduler_host.set_default(
configuration.get_config().get(
'core', 'default-scheduler-host', 'localhost'))
cls.scheduler_port.set_default(
configuration.get_config().get(
'core', 'default-scheduler-port', 8082))
cls.logging_conf_file.set_default(
configuration.get_config().get(
'core', 'logging_conf_file', None))
@classmethod
def env_params(cls, override_defaults):
cls.apply_config_defaults()
# Override any global parameter with whatever is in override_defaults
for param_name, param_obj in cls.get_global_params():
if param_name in override_defaults:
param_obj.set_default(override_defaults[param_name])
return cls() # instantiate an object with the global params set on it
def expose(cls):
warnings.warn('expose is no longer used, everything is autoexposed', DeprecationWarning)
return cls
def expose_main(cls):
warnings.warn('expose_main is no longer supported, use luigi.run(..., main_task_cls=cls) instead', DeprecationWarning)
return cls
def reset():
warnings.warn('reset is no longer supported')
class WorkerSchedulerFactory(object):
def create_local_scheduler(self):
return scheduler.CentralPlannerScheduler()
def create_remote_scheduler(self, host, port):
return rpc.RemoteScheduler(host=host, port=port)
def create_worker(self, scheduler, worker_processes):
return worker.Worker(
scheduler=scheduler, worker_processes=worker_processes)
class Interface(object):
def parse(self):
raise NotImplementedError
@staticmethod
def run(tasks, worker_scheduler_factory=None, override_defaults={}):
if worker_scheduler_factory is None:
worker_scheduler_factory = WorkerSchedulerFactory()
env_params = EnvironmentParamsContainer.env_params(override_defaults)
# search for logging configuration path first on the command line, then
# in the application config file
logging_conf = env_params.logging_conf_file
if logging_conf is not None and not os.path.exists(logging_conf):
raise Exception(
"Error: Unable to locate specified logging configuration file!"
)
if not configuration.get_config().getboolean(
'core', 'no_configure_logging', False):
setup_interface_logging(logging_conf)
if env_params.lock:
warnings.warn(
"The --lock flag is deprecated and will be removed."
"Locking is now the default behavior."
"Use --no-lock to override to not use lock",
DeprecationWarning
)
if (not env_params.no_lock and
not(lock.acquire_for(env_params.lock_pid_dir, env_params.lock_size))):
sys.exit(1)
if env_params.local_scheduler:
sch = worker_scheduler_factory.create_local_scheduler()
else:
sch = worker_scheduler_factory.create_remote_scheduler(
host=env_params.scheduler_host,
port=env_params.scheduler_port)
w = worker_scheduler_factory.create_worker(
scheduler=sch, worker_processes=env_params.workers)
for t in tasks:
w.add(t)
logger = logging.getLogger('luigi-interface')
logger.info('Done scheduling tasks')
w.run()
w.stop()
class ErrorWrappedArgumentParser(argparse.ArgumentParser):
''' Wraps ArgumentParser's error message to suggested similar tasks
'''
# Simple unweighted Levenshtein distance
def _editdistance(self, a, b):
r0 = range(0, len(b) + 1)
r1 = [0] * (len(b) + 1)
for i in range(0, len(a)):
r1[0] = i + 1
for j in range(0, len(b)):
c = 0 if a[i] is b[j] else 1
r1[j + 1] = min(r1[j] + 1, r0[j + 1] + 1, r0[j] + c)
r0 = r1[:]
return r1[len(b)]
def error(self, message):
result = re.match("argument .+: invalid choice: '(\w+)'.+", message)
if result:
arg = result.group(1)
weightedTasks = [(self._editdistance(arg, task), task) for task in Register.get_reg().keys()]
orderedTasks = sorted(weightedTasks, key=lambda pair: pair[0])
candidates = [task for (dist, task) in orderedTasks if dist <= 5 and dist < len(task)]
displaystring = ""
if candidates:
displaystring = "No task %s. Did you mean:\n%s" % (arg, '\n'.join(candidates))
else:
displaystring = "No task %s." % arg
super(ErrorWrappedArgumentParser, self).error(displaystring)
else:
super(ErrorWrappedArgumentParser, self).error(message)
class ArgParseInterface(Interface):
''' Takes the task as the command, with parameters specific to it
'''
@classmethod
def add_parameter(cls, parser, param_name, param, prefix=None):
description = []
if prefix:
description.append('%s.%s' % (prefix, param_name))
else:
description.append(param_name)
if param.description:
description.append(param.description)
if param.has_default:
description.append(" [default: %s]" % (param.default,))
if param.is_list:
action = "append"
elif param.is_boolean:
action = "store_true"
else:
action = "store"
parser.add_argument('--' + param_name.replace('_', '-'), help=' '.join(description), default=None, action=action)
@classmethod
def add_task_parameters(cls, parser, task_cls):
for param_name, param in task_cls.get_nonglobal_params():
cls.add_parameter(parser, param_name, param, task_cls.task_family)
@classmethod
def add_global_parameters(cls, parser):
for param_name, param in Register.get_global_params():
cls.add_parameter(parser, param_name, param)
def parse_task(self, cmdline_args=None, main_task_cls=None):
parser = ErrorWrappedArgumentParser()
self.add_global_parameters(parser)
if main_task_cls:
self.add_task_parameters(parser, main_task_cls)
else:
orderedtasks = '{%s}' % ','.join(sorted(Register.get_reg().keys()))
subparsers = parser.add_subparsers(dest='command', metavar=orderedtasks)
for name, cls in Register.get_reg().iteritems():
subparser = subparsers.add_parser(name)
if cls == Register.AMBIGUOUS_CLASS:
continue
self.add_task_parameters(subparser, cls)
# Add global params here as well so that we can support both:
# test.py --global-param xyz Test --n 42
# test.py Test --n 42 --global-param xyz
self.add_global_parameters(subparser)
args = parser.parse_args(args=cmdline_args)
params = vars(args) # convert to a str -> str hash
if main_task_cls:
task_cls = main_task_cls
else:
task_cls = Register.get_reg()[args.command]
if task_cls == Register.AMBIGUOUS_CLASS:
raise Exception('%s is ambigiuous' % args.command)
# Notice that this is not side effect free because it might set global params
task = task_cls.from_input(params, Register.get_global_params())
return [task]
def parse(self, cmdline_args=None, main_task_cls=None):
return self.parse_task(cmdline_args, main_task_cls)
class DynamicArgParseInterface(ArgParseInterface):
''' Uses --module as a way to load modules dynamically
Usage:
python whatever.py --module foo_module FooTask --blah xyz --x 123
This will dynamically import foo_module and then try to create FooTask from this
'''
def parse(self, cmdline_args=None, main_task_cls=None):
parser = ErrorWrappedArgumentParser()
self.add_global_parameters(parser)
args, unknown = parser.parse_known_args(args=cmdline_args)
module = args.module
__import__(module)
return self.parse_task(cmdline_args, main_task_cls)
class PassThroughOptionParser(optparse.OptionParser):
'''
An unknown option pass-through implementation of OptionParser.
When unknown arguments are encountered, bundle with largs and try again,
until rargs is depleted.
sys.exit(status) will still be called if a known argument is passed
incorrectly (e.g. missing arguments or bad argument types, etc.)
'''
def _process_args(self, largs, rargs, values):
while rargs:
try:
optparse.OptionParser._process_args(self, largs, rargs, values)
except (optparse.BadOptionError, optparse.AmbiguousOptionError), e:
largs.append(e.opt_str)
class OptParseInterface(Interface):
''' Supported for legacy reasons where it's necessary to interact with an existing parser.
Takes the task using --task. All parameters to all possible tasks will be defined globally
in a big unordered soup.
'''
def __init__(self, existing_optparse):
self.__existing_optparse = existing_optparse
def parse(self, cmdline_args=None, main_task_cls=None):
global_params = list(Register.get_global_params())
parser = PassThroughOptionParser()
tasks_str = '/'.join(sorted([name for name in Register.get_reg()]))
def add_task_option(p):
if main_task_cls:
p.add_option('--task', help='Task to run (' + tasks_str + ') [default: %default]', default=main_task_cls.task_family)
else:
p.add_option('--task', help='Task to run (%s)' % tasks_str)
def _add_parameter(parser, param_name, param):
description = [param_name]
if param.description:
description.append(param.description)
if param.has_default:
description.append(" [default: %s]" % (param.default,))
if param.is_list:
action = "append"
elif param.is_boolean:
action = "store_true"
else:
action = "store"
parser.add_option('--' + param_name.replace('_', '-'),
help=' '.join(description),
default=None,
action=action)
for param_name, param in global_params:
_add_parameter(parser, param_name, param)
add_task_option(parser)
options, args = parser.parse_args(args=cmdline_args)
task_cls_name = options.task
if self.__existing_optparse:
parser = self.__existing_optparse
else:
parser = optparse.OptionParser()
add_task_option(parser)
if task_cls_name not in Register.get_reg():
raise Exception('Error: %s is not a valid tasks (must be %s)' % (task_cls_name, tasks_str))
# Register all parameters as a big mess
task_cls = Register.get_reg()[task_cls_name]
if task_cls == Register.AMBIGUOUS_CLASS:
raise Exception('%s is ambiguous' % task_cls_name)
params = task_cls.get_nonglobal_params()
for param_name, param in global_params:
_add_parameter(parser, param_name, param)
for param_name, param in params:
_add_parameter(parser, param_name, param)
# Parse and run
options, args = parser.parse_args(args=cmdline_args)
params = {}
for k, v in vars(options).iteritems():
if k != 'task':
params[k] = v
task = task_cls.from_input(params, global_params)
return [task]
class LuigiConfigParser(configuration.LuigiConfigParser):
''' Deprecated class, use configuration.LuigiConfigParser instead. Left for backwards compatibility '''
pass
def run(cmdline_args=None, existing_optparse=None, use_optparse=False, main_task_cls=None, worker_scheduler_factory=None, use_dynamic_argparse=False):
''' Run from cmdline.
The default parser uses argparse.
However for legacy reasons we support optparse that optionally allows for
overriding an existing option parser with new args.
'''
if use_optparse:
interface = OptParseInterface(existing_optparse)
elif use_dynamic_argparse:
interface = DynamicArgParseInterface()
else:
interface = ArgParseInterface()
tasks = interface.parse(cmdline_args, main_task_cls=main_task_cls)
interface.run(tasks, worker_scheduler_factory)
def build(tasks, worker_scheduler_factory=None, **env_params):
''' Run internally, bypassing the cmdline parsing.
Useful if you have some luigi code that you want to run internally.
Example
luigi.build([MyTask1(), MyTask2()], local_scheduler=True)
One notable difference is that `build` defaults to not using
the identical process lock. Otherwise, `build` would only be
callable once from each process.
'''
if "no_lock" not in env_params and "lock" not in env_params:
env_params["no_lock"] = True
env_params["lock"] = False
Interface.run(tasks, worker_scheduler_factory, env_params)
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from datetime import timedelta
from time import sleep
from unittest.mock import Mock, patch
from freezegun import freeze_time
from airflow.exceptions import AirflowException, AirflowRescheduleException, AirflowSensorTimeout
from airflow.models import DagBag, TaskInstance, TaskReschedule
from airflow.models.dag import DAG
from airflow.operators.dummy import DummyOperator
from airflow.sensors.base import BaseSensorOperator, poke_mode_only
from airflow.ti_deps.deps.ready_to_reschedule import ReadyToRescheduleDep
from airflow.utils import timezone
from airflow.utils.state import State
from airflow.utils.timezone import datetime
from airflow.utils.types import DagRunType
from tests.test_utils import db
DEFAULT_DATE = datetime(2015, 1, 1)
TEST_DAG_ID = 'unit_test_dag'
DUMMY_OP = 'dummy_op'
SENSOR_OP = 'sensor_op'
DEV_NULL = 'dev/null'
class DummySensor(BaseSensorOperator):
def __init__(self, return_value=False, **kwargs):
super().__init__(**kwargs)
self.return_value = return_value
def poke(self, context):
return self.return_value
class TestBaseSensor(unittest.TestCase):
@staticmethod
def clean_db():
db.clear_db_runs()
db.clear_db_task_reschedule()
db.clear_db_xcom()
def setUp(self):
args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
self.dag = DAG(TEST_DAG_ID, default_args=args)
self.clean_db()
def tearDown(self) -> None:
self.clean_db()
def _make_dag_run(self):
return self.dag.create_dagrun(
run_type=DagRunType.MANUAL,
start_date=timezone.utcnow(),
execution_date=DEFAULT_DATE,
state=State.RUNNING,
)
def _make_sensor(self, return_value, task_id=SENSOR_OP, **kwargs):
poke_interval = 'poke_interval'
timeout = 'timeout'
if poke_interval not in kwargs:
kwargs[poke_interval] = 0
if timeout not in kwargs:
kwargs[timeout] = 0
sensor = DummySensor(task_id=task_id, return_value=return_value, dag=self.dag, **kwargs)
dummy_op = DummyOperator(task_id=DUMMY_OP, dag=self.dag)
dummy_op.set_upstream(sensor)
return sensor
@classmethod
def _run(cls, task):
task.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_ok(self):
sensor = self._make_sensor(True)
dr = self._make_dag_run()
self._run(sensor)
tis = dr.get_task_instances()
self.assertEqual(len(tis), 2)
for ti in tis:
if ti.task_id == SENSOR_OP:
self.assertEqual(ti.state, State.SUCCESS)
if ti.task_id == DUMMY_OP:
self.assertEqual(ti.state, State.NONE)
def test_fail(self):
sensor = self._make_sensor(False)
dr = self._make_dag_run()
with self.assertRaises(AirflowSensorTimeout):
self._run(sensor)
tis = dr.get_task_instances()
self.assertEqual(len(tis), 2)
for ti in tis:
if ti.task_id == SENSOR_OP:
self.assertEqual(ti.state, State.FAILED)
if ti.task_id == DUMMY_OP:
self.assertEqual(ti.state, State.NONE)
def test_soft_fail(self):
sensor = self._make_sensor(False, soft_fail=True)
dr = self._make_dag_run()
self._run(sensor)
tis = dr.get_task_instances()
self.assertEqual(len(tis), 2)
for ti in tis:
if ti.task_id == SENSOR_OP:
self.assertEqual(ti.state, State.SKIPPED)
if ti.task_id == DUMMY_OP:
self.assertEqual(ti.state, State.NONE)
def test_soft_fail_with_retries(self):
sensor = self._make_sensor(
return_value=False, soft_fail=True, retries=1, retry_delay=timedelta(milliseconds=1)
)
dr = self._make_dag_run()
# first run fails and task instance is marked up to retry
with self.assertRaises(AirflowSensorTimeout):
self._run(sensor)
tis = dr.get_task_instances()
self.assertEqual(len(tis), 2)
for ti in tis:
if ti.task_id == SENSOR_OP:
self.assertEqual(ti.state, State.UP_FOR_RETRY)
if ti.task_id == DUMMY_OP:
self.assertEqual(ti.state, State.NONE)
sleep(0.001)
# after retry DAG run is skipped
self._run(sensor)
tis = dr.get_task_instances()
self.assertEqual(len(tis), 2)
for ti in tis:
if ti.task_id == SENSOR_OP:
self.assertEqual(ti.state, State.SKIPPED)
if ti.task_id == DUMMY_OP:
self.assertEqual(ti.state, State.NONE)
def test_ok_with_reschedule(self):
sensor = self._make_sensor(return_value=None, poke_interval=10, timeout=25, mode='reschedule')
sensor.poke = Mock(side_effect=[False, False, True])
dr = self._make_dag_run()
# first poke returns False and task is re-scheduled
date1 = timezone.utcnow()
with freeze_time(date1):
self._run(sensor)
tis = dr.get_task_instances()
self.assertEqual(len(tis), 2)
for ti in tis:
if ti.task_id == SENSOR_OP:
# verify task is re-scheduled, i.e. state set to NONE
self.assertEqual(ti.state, State.UP_FOR_RESCHEDULE)
# verify task start date is the initial one
self.assertEqual(ti.start_date, date1)
# verify one row in task_reschedule table
task_reschedules = TaskReschedule.find_for_task_instance(ti)
self.assertEqual(len(task_reschedules), 1)
self.assertEqual(task_reschedules[0].start_date, date1)
self.assertEqual(
task_reschedules[0].reschedule_date, date1 + timedelta(seconds=sensor.poke_interval)
)
if ti.task_id == DUMMY_OP:
self.assertEqual(ti.state, State.NONE)
# second poke returns False and task is re-scheduled
date2 = date1 + timedelta(seconds=sensor.poke_interval)
with freeze_time(date2):
self._run(sensor)
tis = dr.get_task_instances()
self.assertEqual(len(tis), 2)
for ti in tis:
if ti.task_id == SENSOR_OP:
# verify task is re-scheduled, i.e. state set to NONE
self.assertEqual(ti.state, State.UP_FOR_RESCHEDULE)
# verify task start date is the initial one
self.assertEqual(ti.start_date, date1)
# verify two rows in task_reschedule table
task_reschedules = TaskReschedule.find_for_task_instance(ti)
self.assertEqual(len(task_reschedules), 2)
self.assertEqual(task_reschedules[1].start_date, date2)
self.assertEqual(
task_reschedules[1].reschedule_date, date2 + timedelta(seconds=sensor.poke_interval)
)
if ti.task_id == DUMMY_OP:
self.assertEqual(ti.state, State.NONE)
# third poke returns True and task succeeds
date3 = date2 + timedelta(seconds=sensor.poke_interval)
with freeze_time(date3):
self._run(sensor)
tis = dr.get_task_instances()
self.assertEqual(len(tis), 2)
for ti in tis:
if ti.task_id == SENSOR_OP:
self.assertEqual(ti.state, State.SUCCESS)
# verify task start date is the initial one
self.assertEqual(ti.start_date, date1)
if ti.task_id == DUMMY_OP:
self.assertEqual(ti.state, State.NONE)
def test_fail_with_reschedule(self):
sensor = self._make_sensor(return_value=False, poke_interval=10, timeout=5, mode='reschedule')
dr = self._make_dag_run()
# first poke returns False and task is re-scheduled
date1 = timezone.utcnow()
with freeze_time(date1):
self._run(sensor)
tis = dr.get_task_instances()
self.assertEqual(len(tis), 2)
for ti in tis:
if ti.task_id == SENSOR_OP:
self.assertEqual(ti.state, State.UP_FOR_RESCHEDULE)
if ti.task_id == DUMMY_OP:
self.assertEqual(ti.state, State.NONE)
# second poke returns False, timeout occurs
date2 = date1 + timedelta(seconds=sensor.poke_interval)
with freeze_time(date2):
with self.assertRaises(AirflowSensorTimeout):
self._run(sensor)
tis = dr.get_task_instances()
self.assertEqual(len(tis), 2)
for ti in tis:
if ti.task_id == SENSOR_OP:
self.assertEqual(ti.state, State.FAILED)
if ti.task_id == DUMMY_OP:
self.assertEqual(ti.state, State.NONE)
def test_soft_fail_with_reschedule(self):
sensor = self._make_sensor(
return_value=False, poke_interval=10, timeout=5, soft_fail=True, mode='reschedule'
)
dr = self._make_dag_run()
# first poke returns False and task is re-scheduled
date1 = timezone.utcnow()
with freeze_time(date1):
self._run(sensor)
tis = dr.get_task_instances()
self.assertEqual(len(tis), 2)
for ti in tis:
if ti.task_id == SENSOR_OP:
self.assertEqual(ti.state, State.UP_FOR_RESCHEDULE)
if ti.task_id == DUMMY_OP:
self.assertEqual(ti.state, State.NONE)
# second poke returns False, timeout occurs
date2 = date1 + timedelta(seconds=sensor.poke_interval)
with freeze_time(date2):
self._run(sensor)
tis = dr.get_task_instances()
self.assertEqual(len(tis), 2)
for ti in tis:
if ti.task_id == SENSOR_OP:
self.assertEqual(ti.state, State.SKIPPED)
if ti.task_id == DUMMY_OP:
self.assertEqual(ti.state, State.NONE)
def test_ok_with_reschedule_and_retry(self):
sensor = self._make_sensor(
return_value=None,
poke_interval=10,
timeout=5,
retries=1,
retry_delay=timedelta(seconds=10),
mode='reschedule',
)
sensor.poke = Mock(side_effect=[False, False, False, True])
dr = self._make_dag_run()
# first poke returns False and task is re-scheduled
date1 = timezone.utcnow()
with freeze_time(date1):
self._run(sensor)
tis = dr.get_task_instances()
self.assertEqual(len(tis), 2)
for ti in tis:
if ti.task_id == SENSOR_OP:
self.assertEqual(ti.state, State.UP_FOR_RESCHEDULE)
# verify one row in task_reschedule table
task_reschedules = TaskReschedule.find_for_task_instance(ti)
self.assertEqual(len(task_reschedules), 1)
self.assertEqual(task_reschedules[0].start_date, date1)
self.assertEqual(
task_reschedules[0].reschedule_date, date1 + timedelta(seconds=sensor.poke_interval)
)
self.assertEqual(task_reschedules[0].try_number, 1)
if ti.task_id == DUMMY_OP:
self.assertEqual(ti.state, State.NONE)
# second poke fails and task instance is marked up to retry
date2 = date1 + timedelta(seconds=sensor.poke_interval)
with freeze_time(date2):
with self.assertRaises(AirflowSensorTimeout):
self._run(sensor)
tis = dr.get_task_instances()
self.assertEqual(len(tis), 2)
for ti in tis:
if ti.task_id == SENSOR_OP:
self.assertEqual(ti.state, State.UP_FOR_RETRY)
if ti.task_id == DUMMY_OP:
self.assertEqual(ti.state, State.NONE)
# third poke returns False and task is rescheduled again
date3 = date2 + timedelta(seconds=sensor.poke_interval) + sensor.retry_delay
with freeze_time(date3):
self._run(sensor)
tis = dr.get_task_instances()
self.assertEqual(len(tis), 2)
for ti in tis:
if ti.task_id == SENSOR_OP:
self.assertEqual(ti.state, State.UP_FOR_RESCHEDULE)
# verify one row in task_reschedule table
task_reschedules = TaskReschedule.find_for_task_instance(ti)
self.assertEqual(len(task_reschedules), 1)
self.assertEqual(task_reschedules[0].start_date, date3)
self.assertEqual(
task_reschedules[0].reschedule_date, date3 + timedelta(seconds=sensor.poke_interval)
)
self.assertEqual(task_reschedules[0].try_number, 2)
if ti.task_id == DUMMY_OP:
self.assertEqual(ti.state, State.NONE)
# fourth poke return True and task succeeds
date4 = date3 + timedelta(seconds=sensor.poke_interval)
with freeze_time(date4):
self._run(sensor)
tis = dr.get_task_instances()
self.assertEqual(len(tis), 2)
for ti in tis:
if ti.task_id == SENSOR_OP:
self.assertEqual(ti.state, State.SUCCESS)
if ti.task_id == DUMMY_OP:
self.assertEqual(ti.state, State.NONE)
def test_should_include_ready_to_reschedule_dep_in_reschedule_mode(self):
sensor = self._make_sensor(True, mode='reschedule')
deps = sensor.deps
self.assertIn(ReadyToRescheduleDep(), deps)
def test_should_not_include_ready_to_reschedule_dep_in_poke_mode(self):
sensor = self._make_sensor(True)
deps = sensor.deps
self.assertNotIn(ReadyToRescheduleDep(), deps)
def test_invalid_mode(self):
with self.assertRaises(AirflowException):
self._make_sensor(return_value=True, mode='foo')
def test_ok_with_custom_reschedule_exception(self):
sensor = self._make_sensor(return_value=None, mode='reschedule')
date1 = timezone.utcnow()
date2 = date1 + timedelta(seconds=60)
date3 = date1 + timedelta(seconds=120)
sensor.poke = Mock(
side_effect=[
AirflowRescheduleException(date2),
AirflowRescheduleException(date3),
True,
]
)
dr = self._make_dag_run()
# first poke returns False and task is re-scheduled
with freeze_time(date1):
self._run(sensor)
tis = dr.get_task_instances()
self.assertEqual(len(tis), 2)
for ti in tis:
if ti.task_id == SENSOR_OP:
# verify task is re-scheduled, i.e. state set to NONE
self.assertEqual(ti.state, State.UP_FOR_RESCHEDULE)
# verify one row in task_reschedule table
task_reschedules = TaskReschedule.find_for_task_instance(ti)
self.assertEqual(len(task_reschedules), 1)
self.assertEqual(task_reschedules[0].start_date, date1)
self.assertEqual(task_reschedules[0].reschedule_date, date2)
if ti.task_id == DUMMY_OP:
self.assertEqual(ti.state, State.NONE)
# second poke returns False and task is re-scheduled
with freeze_time(date2):
self._run(sensor)
tis = dr.get_task_instances()
self.assertEqual(len(tis), 2)
for ti in tis:
if ti.task_id == SENSOR_OP:
# verify task is re-scheduled, i.e. state set to NONE
self.assertEqual(ti.state, State.UP_FOR_RESCHEDULE)
# verify two rows in task_reschedule table
task_reschedules = TaskReschedule.find_for_task_instance(ti)
self.assertEqual(len(task_reschedules), 2)
self.assertEqual(task_reschedules[1].start_date, date2)
self.assertEqual(task_reschedules[1].reschedule_date, date3)
if ti.task_id == DUMMY_OP:
self.assertEqual(ti.state, State.NONE)
# third poke returns True and task succeeds
with freeze_time(date3):
self._run(sensor)
tis = dr.get_task_instances()
self.assertEqual(len(tis), 2)
for ti in tis:
if ti.task_id == SENSOR_OP:
self.assertEqual(ti.state, State.SUCCESS)
if ti.task_id == DUMMY_OP:
self.assertEqual(ti.state, State.NONE)
def test_reschedule_with_test_mode(self):
sensor = self._make_sensor(return_value=None, poke_interval=10, timeout=25, mode='reschedule')
sensor.poke = Mock(side_effect=[False])
dr = self._make_dag_run()
# poke returns False and AirflowRescheduleException is raised
date1 = timezone.utcnow()
with freeze_time(date1):
for date in self.dag.date_range(DEFAULT_DATE, end_date=DEFAULT_DATE):
TaskInstance(sensor, date).run(ignore_ti_state=True, test_mode=True)
tis = dr.get_task_instances()
self.assertEqual(len(tis), 2)
for ti in tis:
if ti.task_id == SENSOR_OP:
# in test mode state is not modified
self.assertEqual(ti.state, State.NONE)
# in test mode no reschedule request is recorded
task_reschedules = TaskReschedule.find_for_task_instance(ti)
self.assertEqual(len(task_reschedules), 0)
if ti.task_id == DUMMY_OP:
self.assertEqual(ti.state, State.NONE)
def test_sensor_with_invalid_poke_interval(self):
negative_poke_interval = -10
non_number_poke_interval = "abcd"
positive_poke_interval = 10
with self.assertRaises(AirflowException):
self._make_sensor(
task_id='test_sensor_task_1',
return_value=None,
poke_interval=negative_poke_interval,
timeout=25,
)
with self.assertRaises(AirflowException):
self._make_sensor(
task_id='test_sensor_task_2',
return_value=None,
poke_interval=non_number_poke_interval,
timeout=25,
)
self._make_sensor(
task_id='test_sensor_task_3', return_value=None, poke_interval=positive_poke_interval, timeout=25
)
def test_sensor_with_invalid_timeout(self):
negative_timeout = -25
non_number_timeout = "abcd"
positive_timeout = 25
with self.assertRaises(AirflowException):
self._make_sensor(
task_id='test_sensor_task_1', return_value=None, poke_interval=10, timeout=negative_timeout
)
with self.assertRaises(AirflowException):
self._make_sensor(
task_id='test_sensor_task_2', return_value=None, poke_interval=10, timeout=non_number_timeout
)
self._make_sensor(
task_id='test_sensor_task_3', return_value=None, poke_interval=10, timeout=positive_timeout
)
def test_sensor_with_exponential_backoff_off(self):
sensor = self._make_sensor(return_value=None, poke_interval=5, timeout=60, exponential_backoff=False)
started_at = timezone.utcnow() - timedelta(seconds=10)
def run_duration():
return (timezone.utcnow - started_at).total_seconds()
self.assertEqual(sensor._get_next_poke_interval(started_at, run_duration, 1), sensor.poke_interval)
self.assertEqual(sensor._get_next_poke_interval(started_at, run_duration, 2), sensor.poke_interval)
def test_sensor_with_exponential_backoff_on(self):
sensor = self._make_sensor(return_value=None, poke_interval=5, timeout=60, exponential_backoff=True)
with patch('airflow.utils.timezone.utcnow') as mock_utctime:
mock_utctime.return_value = DEFAULT_DATE
started_at = timezone.utcnow() - timedelta(seconds=10)
def run_duration():
return (timezone.utcnow - started_at).total_seconds()
interval1 = sensor._get_next_poke_interval(started_at, run_duration, 1)
interval2 = sensor._get_next_poke_interval(started_at, run_duration, 2)
self.assertTrue(interval1 >= 0)
self.assertTrue(interval1 <= sensor.poke_interval)
self.assertTrue(interval2 >= sensor.poke_interval)
self.assertTrue(interval2 > interval1)
@poke_mode_only
class DummyPokeOnlySensor(BaseSensorOperator):
def __init__(self, poke_changes_mode=False, **kwargs):
self.mode = kwargs['mode']
super().__init__(**kwargs)
self.poke_changes_mode = poke_changes_mode
self.return_value = True
def poke(self, context):
if self.poke_changes_mode:
self.change_mode('reschedule')
return self.return_value
def change_mode(self, mode):
self.mode = mode
class TestPokeModeOnly(unittest.TestCase):
def setUp(self):
self.dagbag = DagBag(dag_folder=DEV_NULL, include_examples=True)
self.args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
self.dag = DAG(TEST_DAG_ID, default_args=self.args)
def test_poke_mode_only_allows_poke_mode(self):
try:
sensor = DummyPokeOnlySensor(task_id='foo', mode='poke', poke_changes_mode=False, dag=self.dag)
except ValueError:
self.fail("__init__ failed with mode='poke'.")
try:
sensor.poke({})
except ValueError:
self.fail("poke failed without changing mode from 'poke'.")
try:
sensor.change_mode('poke')
except ValueError:
self.fail("class method failed without changing mode from 'poke'.")
def test_poke_mode_only_bad_class_method(self):
sensor = DummyPokeOnlySensor(task_id='foo', mode='poke', poke_changes_mode=False, dag=self.dag)
with self.assertRaises(ValueError):
sensor.change_mode('reschedule')
def test_poke_mode_only_bad_init(self):
with self.assertRaises(ValueError):
DummyPokeOnlySensor(task_id='foo', mode='reschedule', poke_changes_mode=False, dag=self.dag)
def test_poke_mode_only_bad_poke(self):
sensor = DummyPokeOnlySensor(task_id='foo', mode='poke', poke_changes_mode=True, dag=self.dag)
with self.assertRaises(ValueError):
sensor.poke({})
| |
#!/usr/bin/env python
import os, sys, re, string, glob
from optparse import OptionParser
# Black list for classes and methods that does not implemented in Java API
# Created to exclude referencies to them in @see tag
JAVADOC_ENTITY_BLACK_LIST = set(["org.opencv.core.Core#abs", \
"org.opencv.core.Core#theRNG", \
"org.opencv.core.Core#extractImageCOI", \
"org.opencv.core.PCA", \
"org.opencv.core.SVD", \
"org.opencv.core.RNG", \
"org.opencv.imgproc.Imgproc#createMorphologyFilter", \
"org.opencv.imgproc.Imgproc#createLinearFilter", \
"org.opencv.imgproc.Imgproc#createSeparableLinearFilter", \
"org.opencv.imgproc.FilterEngine"])
class JavadocGenerator(object):
def __init__(self, definitions = {}, modules= [], javadoc_marker = "//javadoc:"):
self.definitions = definitions
self.javadoc_marker = javadoc_marker
self.markers_processed = 0
self.markers_documented = 0
self.params_documented = 0
self.params_undocumented = 0
self.known_modules = modules
self.verbose = False
self.show_warnings = True
self.show_errors = True
def parceJavadocMarker(self, line):
assert line.lstrip().startswith(self.javadoc_marker)
offset = line[:line.find(self.javadoc_marker)]
line = line.strip()[len(self.javadoc_marker):]
args_start = line.rfind("(")
args_end = line.rfind(")")
assert args_start * args_end > 0
if args_start >= 0:
assert args_start < args_end
name = line[:args_start].strip()
if name.startswith("java"):
name = name[4:]
return (name, offset, filter(None, list(arg.strip() for arg in line[args_start+1:args_end].split(","))))
name = line.strip()
if name.startswith("java"):
name = name[4:]
return (name, offset, [])
def document(self, infile, outfile):
inf = open(infile, "rt")
outf = open(outfile, "wt")
module = os.path.splitext(os.path.basename(infile))[0].split("+")[0]
if module not in self.known_modules:
module = "unknown"
try:
for l in inf.readlines():
org = l
l = l.replace(" ", "").replace("\t", "")#remove all whitespace
if l.startswith(self.javadoc_marker):
marker = self.parceJavadocMarker(l)
self.markers_processed += 1
decl = self.definitions.get(marker[0],None)
if decl:
javadoc = self.makeJavadoc(decl, marker[2])
if self.verbose:
print
print "Javadoc for \"%s\" File: %s (line %s)" % (decl["name"], decl["file"], decl["line"])
print javadoc
for line in javadoc.split("\n"):
outf.write(marker[1] + line + "\n")
self.markers_documented += 1
elif self.show_errors:
print >> sys.stderr, "gen_javadoc error: could not find documentation for %s (module: %s)" % (l.lstrip()[len(self.javadoc_marker):-1].strip(), module)
else:
outf.write(org.replace("\t", " ").rstrip()+"\n")
except:
inf.close()
outf.close()
os.remove(outfile)
raise
else:
inf.close()
outf.close()
def FinishParagraph(self, text):
return text[:-1] + "</p>\n"
def ReformatForJavadoc(self, s):
out = ""
in_paragraph = False
in_list = False
for term in s.split("\n"):
in_list_item = False
if term.startswith("*"):
in_list_item = True
if in_paragraph:
out = self.FinishParagraph(out)
in_paragraph = False
if not in_list:
out += " * <ul>\n"
in_list = True
term = " <li>" + term[1:]
if term.startswith("#."):
in_list_item = True
if in_paragraph:
out = self.FinishParagraph(out)
in_paragraph = False
if not in_list:
out += " * <ul>\n"
in_list = True
term = " <li>" + term[2:]
if not term:
if in_paragraph:
out = self.FinishParagraph(out)
in_paragraph = False
out += " *\n"
else:
if in_list and not in_list_item:
in_list = False
if out.endswith(" *\n"):
out = out[:-3] + " * </ul>\n *\n"
else:
out += " * </ul>\n"
pos_start = 0
pos_end = min(77, len(term)-1)
while pos_start < pos_end:
if pos_end - pos_start == 77:
while pos_end >= pos_start+60:
if not term[pos_end].isspace():
pos_end -= 1
else:
break
if pos_end < pos_start+60:
pos_end = min(pos_start + 77, len(term)-1)
while pos_end < len(term):
if not term[pos_end].isspace():
pos_end += 1
else:
break
if in_paragraph or term.startswith("@") or in_list_item:
out += " * "
else:
in_paragraph = True
out += " * <p>"
out += term[pos_start:pos_end+1].rstrip() + "\n"
pos_start = pos_end + 1
pos_end = min(pos_start + 77, len(term)-1)
if in_paragraph:
out = self.FinishParagraph(out)
if in_list:
out += " * </ul>\n"
return out
def getJavaName(self, decl, methodSeparator = "."):
name = "org.opencv."
name += decl["module"]
if "class" in decl:
name += "." + decl["class"]
else:
name += "." + decl["module"].capitalize()
if "method" in decl:
name += methodSeparator + decl["method"]
return name
def getDocURL(self, decl):
url = "http://docs.opencv.org/modules/"
url += decl["module"]
url += "/doc/"
url += os.path.basename(decl["file"]).replace(".rst",".html")
url += "#" + decl["name"].replace("::","-").replace("()","").replace("=","").strip().rstrip("_").replace(" ","-").replace("_","-").lower()
return url
def makeJavadoc(self, decl, args = None):
doc = ""
prefix = "/**\n"
if decl.get("isclass", False):
decl_type = "class"
elif decl.get("isstruct", False):
decl_type = "struct"
elif "class" in decl:
decl_type = "method"
else:
decl_type = "function"
# brief goes first
if "brief" in decl:
doc += prefix + self.ReformatForJavadoc(decl["brief"])
prefix = " *\n"
elif "long" not in decl:
if self.show_warnings:
print >> sys.stderr, "gen_javadoc warning: no description for " + decl_type + " \"%s\" File: %s (line %s)" % (func["name"], func["file"], func["line"])
doc += prefix + self.ReformatForJavadoc("This " + decl_type + " is undocumented")
prefix = " *\n"
# long goes after brief
if "long" in decl:
doc += prefix + self.ReformatForJavadoc(decl["long"])
prefix = " *\n"
# @param tags
if args and (decl_type == "method" or decl_type == "function"):
documented_params = decl.get("params",{})
for arg in args:
arg_doc = documented_params.get(arg, None)
if not arg_doc:
arg_doc = "a " + arg
if self.show_warnings:
print >> sys.stderr, "gen_javadoc warning: parameter \"%s\" of \"%s\" is undocumented. File: %s (line %s)" % (arg, decl["name"], decl["file"], decl["line"])
self.params_undocumented += 1
else:
self.params_documented += 1
doc += prefix + self.ReformatForJavadoc("@param " + arg + " " + arg_doc)
prefix = ""
prefix = " *\n"
# @see tags
# always link to documentation
doc += prefix + " * @see <a href=\"" + self.getDocURL(decl) + "\">" + self.getJavaName(decl) + "</a>\n"
prefix = ""
# other links
if "seealso" in decl:
for see in decl["seealso"]:
seedecl = self.definitions.get(see,None)
if seedecl:
javadoc_name = self.getJavaName(seedecl, "#")
if (javadoc_name not in JAVADOC_ENTITY_BLACK_LIST):
doc += prefix + " * @see " + javadoc_name + "\n"
prefix = " *\n"
#doc += prefix + " * File: " + decl["file"] + " (line " + str(decl["line"]) + ")\n"
return (doc + " */").replace("::",".")
def printSummary(self):
print "Javadoc Generator Summary:"
print " Total markers: %s" % self.markers_processed
print " Undocumented markers: %s" % (self.markers_processed - self.markers_documented)
print " Generated comments: %s" % self.markers_documented
print
print " Documented params: %s" % self.params_documented
print " Undocumented params: %s" % self.params_undocumented
print
if __name__ == "__main__":
selfpath = os.path.dirname(os.path.abspath(sys.argv[0]))
hdr_parser_path = os.path.join(selfpath, "../../python/src2")
sys.path.append(selfpath)
sys.path.append(hdr_parser_path)
import hdr_parser
import rst_parser
parser = OptionParser()
parser.add_option("-v", "--verbose", dest="verbose", help="Print verbose log to stdout", action="store_true", default=False)
parser.add_option("", "--no-warnings", dest="warnings", help="Hide warning messages", action="store_false", default=True)
parser.add_option("", "--no-errors", dest="errors", help="Hide error messages", action="store_false", default=True)
parser.add_option("", "--modules", dest="modules", help="comma-separated list of modules to generate comments", metavar="MODS", default=",".join(rst_parser.allmodules))
(options, args) = parser.parse_args(sys.argv)
options.modules = options.modules.split(",")
if len(args) < 2 or len(options.modules) < 1:
parser.print_help()
exit(0)
parser = rst_parser.RstParser(hdr_parser.CppHeaderParser())
for m in options.modules:
parser.parse(m, os.path.join(selfpath, "../../" + m))
parser.printSummary()
generator = JavadocGenerator(parser.definitions, options.modules)
generator.verbose = options.verbose
generator.show_warnings = options.warnings
generator.show_errors = options.errors
for path in args:
folder = os.path.abspath(path)
for jfile in [f for f in glob.glob(os.path.join(folder,"*.java")) if not f.endswith("-jdoc.java")]:
outfile = os.path.abspath(os.path.basename(jfile).replace(".java", "-jdoc.java"))
generator.document(jfile, outfile)
generator.printSummary()
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for dense Bayesian layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.bayesflow.python.ops import layers_dense_variational_impl as prob_layers_lib
from tensorflow.contrib.bayesflow.python.ops import layers_util as prob_layers_util
from tensorflow.contrib.distributions.python.ops import independent as independent_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import normal as normal_lib
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.platform import test
class Counter(object):
"""Helper class to manage incrementing a counting `int`."""
def __init__(self):
self._value = -1
@property
def value(self):
return self._value
def __call__(self):
self._value += 1
return self._value
class MockDistribution(independent_lib.Independent):
"""Monitors layer calls to the underlying distribution."""
def __init__(self, result_sample, result_log_prob, loc=None, scale=None):
self.result_sample = result_sample
self.result_log_prob = result_log_prob
self.result_loc = loc
self.result_scale = scale
self.result_distribution = normal_lib.Normal(loc=0.0, scale=1.0)
if loc is not None and scale is not None:
self.result_distribution = normal_lib.Normal(loc=self.result_loc,
scale=self.result_scale)
self.called_log_prob = Counter()
self.called_sample = Counter()
self.called_loc = Counter()
self.called_scale = Counter()
def log_prob(self, *args, **kwargs):
self.called_log_prob()
return self.result_log_prob
def sample(self, *args, **kwargs):
self.called_sample()
return self.result_sample
@property
def distribution(self): # for dummy check on Independent(Normal)
return self.result_distribution
@property
def loc(self):
self.called_loc()
return self.result_loc
@property
def scale(self):
self.called_scale()
return self.result_scale
class MockKLDivergence(object):
"""Monitors layer calls to the divergence implementation."""
def __init__(self, result):
self.result = result
self.args = []
self.called = Counter()
def __call__(self, *args, **kwargs):
self.called()
self.args.append(args)
return self.result
class DenseVariational(test.TestCase):
def _testKLPenaltyKernel(self, layer_class):
with self.test_session():
layer = layer_class(units=2)
inputs = random_ops.random_uniform([2, 3], seed=1)
# No keys.
losses = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(losses), 0)
self.assertListEqual(layer.losses, losses)
_ = layer(inputs)
# Yes keys.
losses = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(losses), 1)
self.assertListEqual(layer.losses, losses)
def _testKLPenaltyBoth(self, layer_class):
def _make_normal(dtype, *args): # pylint: disable=unused-argument
return normal_lib.Normal(
loc=dtype.as_numpy_dtype(0.), scale=dtype.as_numpy_dtype(1.))
with self.test_session():
layer = layer_class(
units=2,
bias_posterior_fn=prob_layers_util.default_mean_field_normal_fn(),
bias_prior_fn=_make_normal)
inputs = random_ops.random_uniform([2, 3], seed=1)
# No keys.
losses = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(losses), 0)
self.assertListEqual(layer.losses, losses)
_ = layer(inputs)
# Yes keys.
losses = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(losses), 2)
self.assertListEqual(layer.losses, losses)
def _testDenseSetUp(self, layer_class, batch_size, in_size, out_size,
**kwargs):
seed = Counter()
inputs = random_ops.random_uniform([batch_size, in_size], seed=seed())
kernel_size = [in_size, out_size]
kernel_posterior = MockDistribution(
loc=random_ops.random_uniform(kernel_size, seed=seed()),
scale=random_ops.random_uniform(kernel_size, seed=seed()),
result_log_prob=random_ops.random_uniform(kernel_size, seed=seed()),
result_sample=random_ops.random_uniform(kernel_size, seed=seed()))
kernel_prior = MockDistribution(
result_log_prob=random_ops.random_uniform(kernel_size, seed=seed()),
result_sample=random_ops.random_uniform(kernel_size, seed=seed()))
kernel_divergence = MockKLDivergence(
result=random_ops.random_uniform(kernel_size, seed=seed()))
bias_size = [out_size]
bias_posterior = MockDistribution(
result_log_prob=random_ops.random_uniform(bias_size, seed=seed()),
result_sample=random_ops.random_uniform(bias_size, seed=seed()))
bias_prior = MockDistribution(
result_log_prob=random_ops.random_uniform(bias_size, seed=seed()),
result_sample=random_ops.random_uniform(bias_size, seed=seed()))
bias_divergence = MockKLDivergence(
result=random_ops.random_uniform(bias_size, seed=seed()))
layer = layer_class(
units=out_size,
kernel_posterior_fn=lambda *args: kernel_posterior,
kernel_posterior_tensor_fn=lambda d: d.sample(seed=42),
kernel_prior_fn=lambda *args: kernel_prior,
kernel_divergence_fn=kernel_divergence,
bias_posterior_fn=lambda *args: bias_posterior,
bias_posterior_tensor_fn=lambda d: d.sample(seed=43),
bias_prior_fn=lambda *args: bias_prior,
bias_divergence_fn=bias_divergence,
**kwargs)
outputs = layer(inputs)
kl_penalty = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
return (kernel_posterior, kernel_prior, kernel_divergence,
bias_posterior, bias_prior, bias_divergence,
layer, inputs, outputs, kl_penalty)
def testKLPenaltyKernelReparameterization(self):
self._testKLPenaltyKernel(prob_layers_lib.DenseReparameterization)
def testKLPenaltyKernelLocalReparameterization(self):
self._testKLPenaltyKernel(prob_layers_lib.DenseLocalReparameterization)
def testKLPenaltyKernelFlipout(self):
self._testKLPenaltyKernel(prob_layers_lib.DenseFlipout)
def testKLPenaltyBothReparameterization(self):
self._testKLPenaltyBoth(prob_layers_lib.DenseReparameterization)
def testKLPenaltyBothLocalReparameterization(self):
self._testKLPenaltyBoth(prob_layers_lib.DenseLocalReparameterization)
def testKLPenaltyBothFlipout(self):
self._testKLPenaltyBoth(prob_layers_lib.DenseFlipout)
def testDenseReparameterization(self):
batch_size, in_size, out_size = 2, 3, 4
with self.test_session() as sess:
(kernel_posterior, kernel_prior, kernel_divergence,
bias_posterior, bias_prior, bias_divergence, layer, inputs,
outputs, kl_penalty) = self._testDenseSetUp(
prob_layers_lib.DenseReparameterization,
batch_size, in_size, out_size)
expected_outputs = (
math_ops.matmul(inputs, kernel_posterior.result_sample) +
bias_posterior.result_sample)
[
expected_outputs_, actual_outputs_,
expected_kernel_, actual_kernel_,
expected_kernel_divergence_, actual_kernel_divergence_,
expected_bias_, actual_bias_,
expected_bias_divergence_, actual_bias_divergence_,
] = sess.run([
expected_outputs, outputs,
kernel_posterior.result_sample, layer.kernel_posterior_tensor,
kernel_divergence.result, kl_penalty[0],
bias_posterior.result_sample, layer.bias_posterior_tensor,
bias_divergence.result, kl_penalty[1],
])
self.assertAllClose(
expected_kernel_, actual_kernel_,
rtol=1e-6, atol=0.)
self.assertAllClose(
expected_bias_, actual_bias_,
rtol=1e-6, atol=0.)
self.assertAllClose(
expected_outputs_, actual_outputs_,
rtol=1e-6, atol=0.)
self.assertAllClose(
expected_kernel_divergence_, actual_kernel_divergence_,
rtol=1e-6, atol=0.)
self.assertAllClose(
expected_bias_divergence_, actual_bias_divergence_,
rtol=1e-6, atol=0.)
self.assertAllEqual(
[[kernel_posterior.distribution,
kernel_prior.distribution,
kernel_posterior.result_sample]],
kernel_divergence.args)
self.assertAllEqual(
[[bias_posterior.distribution,
bias_prior.distribution,
bias_posterior.result_sample]],
bias_divergence.args)
def testDenseLocalReparameterization(self):
batch_size, in_size, out_size = 2, 3, 4
with self.test_session() as sess:
(kernel_posterior, kernel_prior, kernel_divergence,
bias_posterior, bias_prior, bias_divergence, layer, inputs,
outputs, kl_penalty) = self._testDenseSetUp(
prob_layers_lib.DenseLocalReparameterization,
batch_size, in_size, out_size)
expected_kernel_posterior_affine = normal_lib.Normal(
loc=math_ops.matmul(inputs, kernel_posterior.result_loc),
scale=math_ops.matmul(
inputs**2., kernel_posterior.result_scale**2)**0.5)
expected_kernel_posterior_affine_tensor = (
expected_kernel_posterior_affine.sample(seed=42))
expected_outputs = (expected_kernel_posterior_affine_tensor +
bias_posterior.result_sample)
[
expected_outputs_, actual_outputs_,
expected_kernel_divergence_, actual_kernel_divergence_,
expected_bias_, actual_bias_,
expected_bias_divergence_, actual_bias_divergence_,
] = sess.run([
expected_outputs, outputs,
kernel_divergence.result, kl_penalty[0],
bias_posterior.result_sample, layer.bias_posterior_tensor,
bias_divergence.result, kl_penalty[1],
])
self.assertAllClose(
expected_bias_, actual_bias_,
rtol=1e-6, atol=0.)
self.assertAllClose(
expected_outputs_, actual_outputs_,
rtol=1e-6, atol=0.)
self.assertAllClose(
expected_kernel_divergence_, actual_kernel_divergence_,
rtol=1e-6, atol=0.)
self.assertAllClose(
expected_bias_divergence_, actual_bias_divergence_,
rtol=1e-6, atol=0.)
self.assertAllEqual(
[[kernel_posterior.distribution,
kernel_prior.distribution,
None]],
kernel_divergence.args)
self.assertAllEqual(
[[bias_posterior.distribution,
bias_prior.distribution,
bias_posterior.result_sample]],
bias_divergence.args)
def testDenseFlipout(self):
batch_size, in_size, out_size = 2, 3, 4
with self.test_session() as sess:
(kernel_posterior, kernel_prior, kernel_divergence,
bias_posterior, bias_prior, bias_divergence, layer, inputs,
outputs, kl_penalty) = self._testDenseSetUp(
prob_layers_lib.DenseFlipout,
batch_size, in_size, out_size, seed=44)
expected_kernel_posterior_affine = normal_lib.Normal(
loc=array_ops.zeros_like(kernel_posterior.result_loc),
scale=kernel_posterior.result_scale)
expected_kernel_posterior_affine_tensor = (
expected_kernel_posterior_affine.sample(seed=42))
sign_input = random_ops.random_uniform(
[batch_size, in_size],
minval=0,
maxval=2,
dtype=dtypes.int32,
seed=layer.seed)
sign_input = math_ops.cast(2 * sign_input - 1, inputs.dtype)
sign_output = random_ops.random_uniform(
[batch_size, out_size],
minval=0,
maxval=2,
dtype=dtypes.int32,
seed=distribution_util.gen_new_seed(
layer.seed, salt="dense_flipout"))
sign_output = math_ops.cast(2 * sign_output - 1, inputs.dtype)
perturbed_inputs = math_ops.matmul(
inputs * sign_input, expected_kernel_posterior_affine_tensor)
perturbed_inputs *= sign_output
expected_outputs = math_ops.matmul(inputs, kernel_posterior.result_loc)
expected_outputs += perturbed_inputs
expected_outputs += bias_posterior.result_sample
[
expected_outputs_, actual_outputs_,
expected_kernel_divergence_, actual_kernel_divergence_,
expected_bias_, actual_bias_,
expected_bias_divergence_, actual_bias_divergence_,
] = sess.run([
expected_outputs, outputs,
kernel_divergence.result, kl_penalty[0],
bias_posterior.result_sample, layer.bias_posterior_tensor,
bias_divergence.result, kl_penalty[1],
])
self.assertAllClose(
expected_bias_, actual_bias_,
rtol=1e-6, atol=0.)
self.assertAllClose(
expected_outputs_, actual_outputs_,
rtol=1e-6, atol=0.)
self.assertAllClose(
expected_kernel_divergence_, actual_kernel_divergence_,
rtol=1e-6, atol=0.)
self.assertAllClose(
expected_bias_divergence_, actual_bias_divergence_,
rtol=1e-6, atol=0.)
self.assertAllEqual(
[[kernel_posterior.distribution, kernel_prior.distribution, None]],
kernel_divergence.args)
self.assertAllEqual(
[[bias_posterior.distribution,
bias_prior.distribution,
bias_posterior.result_sample]],
bias_divergence.args)
def testRandomDenseFlipout(self):
batch_size, in_size, out_size = 2, 3, 4
with self.test_session() as sess:
seed = Counter()
inputs = random_ops.random_uniform([batch_size, in_size], seed=seed())
kernel_posterior = MockDistribution(
loc=random_ops.random_uniform(
[in_size, out_size], seed=seed()),
scale=random_ops.random_uniform(
[in_size, out_size], seed=seed()),
result_log_prob=random_ops.random_uniform(
[in_size, out_size], seed=seed()),
result_sample=random_ops.random_uniform(
[in_size, out_size], seed=seed()))
bias_posterior = MockDistribution(
loc=random_ops.random_uniform(
[out_size], seed=seed()),
scale=random_ops.random_uniform(
[out_size], seed=seed()),
result_log_prob=random_ops.random_uniform(
[out_size], seed=seed()),
result_sample=random_ops.random_uniform(
[out_size], seed=seed()))
layer_one = prob_layers_lib.DenseFlipout(
units=out_size,
kernel_posterior_fn=lambda *args: kernel_posterior,
kernel_posterior_tensor_fn=lambda d: d.sample(seed=42),
bias_posterior_fn=lambda *args: bias_posterior,
bias_posterior_tensor_fn=lambda d: d.sample(seed=43),
seed=44)
layer_two = prob_layers_lib.DenseFlipout(
units=out_size,
kernel_posterior_fn=lambda *args: kernel_posterior,
kernel_posterior_tensor_fn=lambda d: d.sample(seed=42),
bias_posterior_fn=lambda *args: bias_posterior,
bias_posterior_tensor_fn=lambda d: d.sample(seed=43),
seed=45)
outputs_one = layer_one(inputs)
outputs_two = layer_two(inputs)
outputs_one_, outputs_two_ = sess.run([
outputs_one, outputs_two])
self.assertLess(np.sum(np.isclose(outputs_one_, outputs_two_)), out_size)
if __name__ == "__main__":
test.main()
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._subscription_policy_operations import build_add_update_policy_for_tenant_request, build_get_policy_for_tenant_request, build_list_policy_for_tenant_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class SubscriptionPolicyOperations:
"""SubscriptionPolicyOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.subscription.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def add_update_policy_for_tenant(
self,
body: "_models.PutTenantPolicyRequestProperties",
**kwargs: Any
) -> "_models.GetTenantPolicyResponse":
"""Create or Update Subscription tenant policy for user's tenant.
:param body:
:type body: ~azure.mgmt.subscription.models.PutTenantPolicyRequestProperties
:keyword callable cls: A custom type or function that will be passed the direct response
:return: GetTenantPolicyResponse, or the result of cls(response)
:rtype: ~azure.mgmt.subscription.models.GetTenantPolicyResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.GetTenantPolicyResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(body, 'PutTenantPolicyRequestProperties')
request = build_add_update_policy_for_tenant_request(
content_type=content_type,
json=_json,
template_url=self.add_update_policy_for_tenant.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponseBody, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('GetTenantPolicyResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
add_update_policy_for_tenant.metadata = {'url': '/providers/Microsoft.Subscription/policies/default'} # type: ignore
@distributed_trace_async
async def get_policy_for_tenant(
self,
**kwargs: Any
) -> "_models.GetTenantPolicyResponse":
"""Get the subscription tenant policy for the user's tenant.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: GetTenantPolicyResponse, or the result of cls(response)
:rtype: ~azure.mgmt.subscription.models.GetTenantPolicyResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.GetTenantPolicyResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_policy_for_tenant_request(
template_url=self.get_policy_for_tenant.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponseBody, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('GetTenantPolicyResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_policy_for_tenant.metadata = {'url': '/providers/Microsoft.Subscription/policies/default'} # type: ignore
@distributed_trace
def list_policy_for_tenant(
self,
**kwargs: Any
) -> AsyncIterable["_models.GetTenantPolicyListResponse"]:
"""Get the subscription tenant policy for the user's tenant.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either GetTenantPolicyListResponse or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.subscription.models.GetTenantPolicyListResponse]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.GetTenantPolicyListResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_policy_for_tenant_request(
template_url=self.list_policy_for_tenant.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_policy_for_tenant_request(
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("GetTenantPolicyListResponse", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponseBody, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_policy_for_tenant.metadata = {'url': '/providers/Microsoft.Subscription/policies'} # type: ignore
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This module customizes `test_combinations` for `tf.distribute.Strategy`.
Additionally it provides `generate()`, `combine()` and `times()` with
`tf.distribute.Strategy` customizations as a default.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import re
import sys
import types
import unittest
from absl import app
import six
from tensorflow.python.client import session
from tensorflow.python.distribute import collective_all_reduce_strategy
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import multi_process_runner
from tensorflow.python.distribute import multi_worker_test_base
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import combinations as framework_combinations
from tensorflow.python.framework import config
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_combinations as combinations_lib
from tensorflow.python.framework import test_util
from tensorflow.python.platform import flags
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import tf_export
# TODO(rchao): Rename `distribution` parameter to `strategy` or
# `distribute_strategy` in all tests.
class DistributionParameter(combinations_lib.ParameterModifier):
"""Transforms arguments of type `NamedDistribution`.
Convert all arguments of type `NamedDistribution` to the value of their
`strategy` property.
"""
def modified_arguments(self, kwargs, requested_parameters):
# Get the parameter that indicates if we need to set the `_use_policy` flag
# on the strategy object. This is a temporary flag for testing the variable
# policy rollout.
use_var_policy = kwargs.get("use_var_policy", None)
distribution_arguments = {}
for k, v in kwargs.items():
if isinstance(v, NamedDistribution):
strategy = v.strategy
if use_var_policy:
strategy.extended._use_var_policy = use_var_policy
distribution_arguments[k] = strategy
return distribution_arguments
class ClusterParameters(combinations_lib.ParameterModifier):
"""Adds cluster parameters if a `NamedDistribution` has it.
It needs to be before DistributionParameter.
"""
def modified_arguments(self, kwargs, requested_parameters):
strategy = None
for _, v in kwargs.items():
if isinstance(v, NamedDistribution):
if strategy is not None and _num_total_workers(v.has_chief,
v.num_workers) > 1:
raise ValueError("Only support one NamedDistribution for multi worker"
"tests.")
strategy = v
if strategy:
has_chief = strategy.has_chief
num_workers = strategy.num_workers
runner = strategy.runner
share_gpu = strategy.share_gpu
num_ps = strategy.num_ps
if "has_chief" in kwargs and kwargs["has_chief"] != has_chief:
raise ValueError(
"both has_chief and strategy specified but are not compatible")
if "num_workers" in kwargs and kwargs["num_workers"] != num_workers:
raise ValueError(
"both num_workers and strategy specified but are not compatible")
else:
has_chief = kwargs.get("has_chief", False)
num_workers = kwargs.get("num_workers", 1)
runner = kwargs.get("runner", None)
share_gpu = kwargs.get("share_gpu", True)
num_ps = kwargs.get("num_ps", 0)
# Always set cluster parameters if they're requested. So that generate()
# works when there's no startegy in the combinations.
update = {}
if "has_chief" in requested_parameters:
update["has_chief"] = has_chief
if "num_workers" in requested_parameters:
update["num_workers"] = num_workers
if "runner" in requested_parameters:
update["runner"] = runner
if "share_gpu" in requested_parameters:
update["share_gpu"] = share_gpu
if "num_ps" in requested_parameters:
update["num_ps"] = num_ps
return update
class DistributionCombination(combinations_lib.TestCombination):
"""Sets up distribution strategy for tests."""
def should_execute_combination(self, kwargs):
distributions = [
v for v in kwargs.values() if isinstance(v, NamedDistribution)
]
if test_util.is_xla_enabled() and any(d.no_xla for d in distributions):
return (
False,
"n/a: skipping strategy combination with no_xla=True in XLA tests")
return (True, None)
def parameter_modifiers(self):
return [
DistributionParameter(),
combinations_lib.OptionalParameter("use_var_policy"),
]
class ClusterCombination(combinations_lib.TestCombination):
"""Sets up multi worker tests."""
def parameter_modifiers(self):
return [ClusterParameters()]
class GPUCombination(combinations_lib.TestCombination):
"""Enable tests to request GPU hardware and skip non-GPU combinations.
This class expects test_combinations to be generated with `NamedDistribution`
wrapping instances of `tf.distribute.Strategy`.
Optionally, the `required_gpus` argument is supported. GPU hardware is
required, if its value is `True` or > 0.
Attributes:
GPU_TEST: The environment is considered to have GPU hardware available if
the name of the program contains "test_gpu" or "test_xla_gpu".
"""
GPU_TEST = re.search(r"(test_2?gpu|test_xla_2?gpu)$", sys.argv[0])
def should_execute_combination(self, kwargs):
distributions = [
v for v in kwargs.values() if isinstance(v, NamedDistribution)
]
required_gpus = kwargs.get("required_gpus", 0)
required_physical_gpus = kwargs.get("required_physical_gpus", 0)
if distributions and required_gpus:
raise ValueError("Do not use `required_gpus` and arguments of type "
"NamedDistribution together.")
number_of_required_gpus = max(
[required_gpus] + [required_physical_gpus] +
[d.required_physical_gpus or 0 for d in distributions] +
[d.required_gpus or 0 for d in distributions])
number_of_required_physical_gpus = max(
[required_physical_gpus] +
[d.required_physical_gpus or 0 for d in distributions])
if (required_physical_gpus and required_gpus):
raise ValueError("Only one of `required_physical_gpus`(number of physical"
" GPUs required) and `required_gpus`(total number of "
"GPUs required) should be set. ")
if not number_of_required_gpus and GPUCombination.GPU_TEST:
return (False, "Test that doesn't require GPUs.")
elif (number_of_required_gpus > 0
and context.num_gpus() < number_of_required_gpus):
return (False, ("Only {} of {} required GPUs are available.".format(
context.num_gpus(), number_of_required_gpus)))
elif number_of_required_physical_gpus > len(
config.list_physical_devices("GPU")):
return (False,
("Only {} of {} required physical GPUs are available.".format(
config.list_physical_devices("GPU"), required_physical_gpus)))
else:
return (True, None)
def parameter_modifiers(self):
return [combinations_lib.OptionalParameter("required_gpus"),
combinations_lib.OptionalParameter("required_physical_gpus")]
class TPUCombination(combinations_lib.TestCombination):
"""Allow to request TPU hardware and skip non-TPU combinations.
This class expects test_combinations to be generated with `NamedDistribution`
wrapping instances of `tf.distribute.Strategy`.
Optionally, the `required_tpus` parameter is supported. TPU hardware is
required, if its argument is `True` or > 0.
Optionally, the `use_cloud_tpu` parameter is supported. If TPU hardware is
required by `required_tpus`, it specifically must be a Cloud TPU (specified
with `--tpu`) if `use_cloud_tpu` is `True`.
Attributes:
TPU_TEST: The environment is considered to have TPU hardware available if
the name of the program contains "test_tpu".
"""
TPU_TEST = "test_tpu" in sys.argv[0]
def should_execute_combination(self, kwargs):
distributions = [
v for v in kwargs.values() if isinstance(v, NamedDistribution)
]
# TODO(isaprykin): Migrate all tests away from using 'required_tpu' in favor
# of 'required_tpus'.
if "required_tpus" in kwargs and "required_tpu" in kwargs:
raise ValueError("Do not use `required_tpu`. Both `required_tpus` and "
"`required_tpu` were specified.")
required_tpus = kwargs.get("required_tpus", None) or kwargs.get(
"required_tpu", None)
if distributions and required_tpus:
raise ValueError("Do not use `required_tpus` and arguments of type "
"NamedDistribution together.")
# TODO(isaprykin): Add support for a particular number of TPUs. Right now
# it's binary.
number_of_required_tpus = max([required_tpus or 0] +
[d.required_tpu or 0 for d in distributions])
use_cloud_tpu = any([kwargs.get("use_cloud_tpu")] +
[d.use_cloud_tpu for d in distributions])
tpu = hasattr(flags.FLAGS, "tpu") and flags.FLAGS.tpu or ""
if not number_of_required_tpus and TPUCombination.TPU_TEST:
return (False, "Test that doesn't require TPUs.")
if number_of_required_tpus and not TPUCombination.TPU_TEST:
return (False, "Test requires a TPU, but it's not available.")
if use_cloud_tpu and not tpu:
return (False, "Test requires a Cloud TPU, but none specified.")
if not use_cloud_tpu and tpu:
return (False, "Test requires local TPU, but Cloud TPU specified.")
return (True, None)
def parameter_modifiers(self):
return [
combinations_lib.OptionalParameter("required_tpus"),
combinations_lib.OptionalParameter("required_tpu"),
combinations_lib.OptionalParameter("use_cloud_tpu"),
]
class NamedDistribution(object):
"""Wraps a `tf.distribute.Strategy` and adds a name for test titles."""
def __init__(self,
name,
distribution_fn,
required_gpus=None,
required_physical_gpus=0,
required_tpu=False,
use_cloud_tpu=False,
has_chief=False,
num_workers=1,
num_ps=0,
share_gpu=True,
pool_runner_fn=None,
no_xla=False):
"""Initialize NamedDistribution.
Args:
name: Name that will be a part of the name of the test case.
distribution_fn: A callable that creates a `tf.distribute.Strategy`.
required_gpus: The number of GPUs that the strategy requires. Only one of
`required_gpus` and `required_physical_gpus` should be set.
required_physical_gpus: Number of physical GPUs required. Only one of
`required_gpus` and `required_physical_gpus` should be set.
required_tpu: Whether the strategy requires TPU.
use_cloud_tpu: Whether the strategy requires cloud TPU.
has_chief: Whether the strategy requires a chief worker.
num_workers: The number of workers that the strategy requires.
num_ps: The number of parameter servers.
share_gpu: Whether to share GPUs among workers.
pool_runner_fn: An optional callable that returns a MultiProcessPoolRunner
to run the test.
no_xla: Whether to skip in XLA tests.
"""
object.__init__(self)
self._name = name
self._distribution_fn = distribution_fn
self.required_gpus = required_gpus
self.required_physical_gpus = required_physical_gpus
self.required_tpu = required_tpu
self.use_cloud_tpu = use_cloud_tpu
self.has_chief = has_chief
self.num_workers = num_workers
self.num_ps = num_ps
self.share_gpu = share_gpu
self._pool_runner_fn = pool_runner_fn
self.no_xla = no_xla
@property
def runner(self):
if self._pool_runner_fn is not None:
return self._pool_runner_fn()
return None
@property
def strategy(self):
return self._distribution_fn()
def __repr__(self):
return self._name
# This is to allow adding combinations that runs a function both as a
# tf.function and eagerly.
#
# @combinations.generate(
# combinations.combine(
# tf_function = [combinations.tf_function, combinations.no_tf_function]
# )
# )
# def testXXX(tf_function):
# @tf_function
# def foo():
# tf.add(1., 1.)
#
# foo()
tf_function = combinations_lib.NamedObject("TfFunction", def_function.function)
no_tf_function = combinations_lib.NamedObject("NoTfFunction", lambda f: f)
def concat(*combined):
"""Concats combinations."""
result = []
for one in combined:
result += one
return result
@tf_export("__internal__.distribute.combinations.generate", v1=[])
def generate(combinations, test_combinations=()):
# pylint: disable=g-doc-args,g-doc-return-or-yield
"""Distributed adapter of `tf.__internal__.test.combinations.generate`.
All tests with distributed strategy should use this one instead of
`tf.__internal__.test.combinations.generate`. This function has support of
strategy combinations, GPU/TPU and multi worker support.
See `tf.__internal__.test.combinations.generate` for usage.
"""
# pylint: enable=g-doc-args,g-doc-return-or-yield
default_combinations = (
framework_combinations.EagerGraphCombination(),
framework_combinations.TFVersionCombination(),
ClusterCombination(),
DistributionCombination(),
GPUCombination(),
TPUCombination(),
)
# We apply our own decoration to handle multi worker tests before applying
# framework.test_combinations.generate. The order is important since we need
# framework.test_combinations.generate to apply all parameter modifiers first.
combination_decorator = combinations_lib.generate(
combinations, test_combinations=default_combinations + test_combinations)
def decorator(test_method_or_class):
if isinstance(test_method_or_class, type):
# If it's a test class.
class_object = test_method_or_class
# Decorate each test method with _multi_worker_test.
for name, test_method in six.iteritems(class_object.__dict__.copy()):
if (name.startswith(unittest.TestLoader.testMethodPrefix) and
isinstance(test_method, types.FunctionType)):
setattr(class_object, name, _multi_worker_test(test_method))
return combination_decorator(class_object)
else:
return combination_decorator(_multi_worker_test(test_method_or_class))
return decorator
combine = combinations_lib.combine
times = combinations_lib.times
NamedObject = combinations_lib.NamedObject
# Identifies whether we're in the main process or worker processes.
# `_multi_worker_test` decoration behaves differently in the main processs and
# the worker processes. See the documentation of _multi_worker_test for detail.
_running_in_worker = False
def in_main_process():
"""Whether it's in the main test process.
This is normally used to prepare the test environment which should only happen
in the main process.
Returns:
A boolean.
"""
return not _running_in_worker
class TestEnvironment(object):
def __init__(self):
self.tf_data_service_dispatcher = None
# Note that this includes GPUs that may not be visible to the current
# worker.
self.total_phsyical_gpus = None
def __setattr__(self, name, value):
if not in_main_process():
raise ValueError(
"combinations.env() should only be modified in the main process. "
"Condition your code on combinations.in_main_process().")
super().__setattr__(name, value)
_env = TestEnvironment()
def env():
"""Returns the object holds the test environment information.
Tests should modifies this in the main process if needed, and it will be
passed to the worker processes each time a test case is ran.
Returns:
a TestEnvironment object.
"""
return _env
def _set_total_phsyical_gpus():
if in_main_process():
env().total_phsyical_gpus = len(
context.context().list_physical_devices("GPU"))
# This is needed in case CUDA is lazily loaded.
app.call_after_init(_set_total_phsyical_gpus)
_TestResult = collections.namedtuple("_TestResult", ["status", "message"])
def _test_runner(test_id, test_env):
"""Executes the test with the given test_id.
This is a simple wrapper around TestRunner to be used with
multi_process_runner. Similar to test.main(), but it executes only one test
specified by test_id and returns whether the test succeeds. If the test fails,
the function prints failures and errors to stdout.
Args:
test_id: TestCase.id()
test_env: a TestEnvironment object.
Returns:
A boolean indicates whether the test succeeds.
"""
global _running_in_worker, _env
# No need to restore the value of _running_in_worker since it should always be
# True in worker processes.
_running_in_worker = True
_env = test_env
test = unittest.defaultTestLoader.loadTestsFromName(test_id)
runner = unittest.TextTestRunner()
result = runner.run(test)
# Treat expected failures as failures, so that the main process can get
# them and fail as expected. Also treat errors as failures to simplify the
# handling.
failures = result.failures + result.expectedFailures + result.errors
if failures:
ret = _TestResult(status="failure", message=failures[0][1])
elif result.skipped:
ret = _TestResult(status="skipped", message=result.skipped[0][1])
else:
# Treat unexpectedSuccesses as OK so that the test case in the main process
# succeed as well.
ret = _TestResult(status="ok", message=None)
# Print tracebacks to stdout and multi_process_runner will collect
# them and stream back to the main process.
if ret.message:
print(ret.message)
return ret
def _multi_worker_test(test_method):
"""Decorate test_method so that it runs in each worker.
We use `multi_process_runner` to simulate multiple workers. Since we run the
this function in the main process and all worker processes, this decoration
behaves differently in the main process and worker procssses. In the main
process, it spawns subprocesses and runs the test on each of them; in a worker
process, it executes test in the same way as a normal test, e.g.
setUp()/tearDown() are called before/after the test.
Args:
test_method: a function which must be a test method.
Returns:
Decorated `test_method`. Note that the decorated function has additional
arguments.
"""
def decorator(self, has_chief, num_workers, num_ps, share_gpu, runner,
**kwargs):
if _num_total_workers(has_chief,
num_workers) == 1 or _running_in_worker or (
# Use in-process cluster for PS combinations
# when XLA is enabled.
test_util.is_xla_enabled() and num_ps > 0):
# We're in worker process or the test is for single worker. Either case we
# execute the test method directly instead of spawning subprocesses.
# For MultiWorkerMirroredStrategy(CollectiveAllReduceStrategy), install a
# session that connects to the local server. This is necessary for multi
# worker graph mode tests to work. Those tests cannot use their graphs or
# sessions, including the one returned by self.cached_session(). Since
# existing tests may already be doing so, we only install the session for
# multi worker tests.
with _multi_worker_session(kwargs):
test_method(self, **kwargs)
return
# We're in the main process. We spawn subprocesses and run the *test* on
# each of them. Note that we're not directly executing test_method passed to
# _multi_worker_test, because we need setUp()/tearDown() to be called and
# all the decorations on the test method. The conceptual call stack is:
# [main process]test.main()
# [main process]test_runner.run(test)
# [main process]wrapper by combinations.generate()
# [main process]_multi_worker_test.decorator()
# # A sub process goes through the same code path as the main
# # process.
# [sub process]_test_runner()
# [sub process]test_runner.run(test)
# [sub process]wrapper by combinations.generate()
# [sub process]_multi_worker_test.decorator()
# # _running_in_worker is True
# [sub process]test_method()
test_id = self.id()
if runner:
results = runner.run(_test_runner, args=(test_id, _env))
else:
cluster_spec = multi_worker_test_base.create_cluster_spec(
has_chief=has_chief,
num_workers=num_workers,
num_ps=num_ps,
has_eval=False)
ephemeral_runner = multi_process_runner.MultiProcessRunner(
_test_runner,
cluster_spec,
share_gpu=share_gpu,
args=(test_id, _env),
dependence_on_chief=has_chief)
ephemeral_runner.start()
results = ephemeral_runner.join().return_value
skip_reason = None
for result in results:
if result.status == "failure":
# We can't tell which worker the return value come from, so we fail on
# the first error.
self.fail(result.message)
break
elif result.status == "skipped":
# Record the skip reason, but do not actually skip the test in case some
# processes fail instead.
skip_reason = result.message
if skip_reason is not None:
self.skipTest(skip_reason)
argspec = tf_inspect.getfullargspec(test_method)
decorator_args = (argspec.args or []) + [
"has_chief", "num_workers", "num_ps", "share_gpu", "runner"
]
decorator_argspec = argspec._replace(args=decorator_args)
return tf_decorator.make_decorator(
test_method, decorator, decorator_argspec=decorator_argspec)
def _num_total_workers(has_chief, num_workers):
"""Returns the number of workers including the chief."""
if has_chief:
return num_workers + 1
return num_workers
def _multi_worker_session(kwargs):
"""Returns a context manager that enters a session that is configured for the MultiWorkerMirroredStrategy.
Args:
kwargs: a dict. Keyword arguments passed to the test.
Returns:
A context manager. If MultiWorkerMirroredStrategy is the one and only one
strategy in kwargs and it's in graph mode, it's the seesion that is
configured for that strategy. Otherwise, it's a no-op context manager.
"""
strategy = None
for _, v in kwargs.items():
if isinstance(v, distribute_lib.StrategyBase):
if strategy is not None:
logging.warning(
"The test uses multiple strategies. Skipping "
"entering a session that is configured for the strategy.")
return ops.NullContextmanager()
strategy = v
if context.executing_eagerly() or not isinstance(
strategy, collective_all_reduce_strategy.CollectiveAllReduceStrategy):
return ops.NullContextmanager()
sess_config = copy.deepcopy(context.context().config)
sess_config = strategy.update_config_proto(sess_config)
target = strategy.cluster_resolver.master()
return session.Session(config=sess_config, target=target).as_default()
| |
"""The tests for the Xiaomi vacuum platform."""
from datetime import datetime, time, timedelta
from unittest import mock
import pytest
from pytz import utc
from homeassistant.components.vacuum import (
ATTR_BATTERY_ICON,
ATTR_FAN_SPEED,
ATTR_FAN_SPEED_LIST,
DOMAIN,
SERVICE_CLEAN_SPOT,
SERVICE_LOCATE,
SERVICE_RETURN_TO_BASE,
SERVICE_SEND_COMMAND,
SERVICE_SET_FAN_SPEED,
SERVICE_START,
SERVICE_STOP,
STATE_CLEANING,
STATE_ERROR,
)
from homeassistant.components.xiaomi_miio.const import DOMAIN as XIAOMI_DOMAIN
from homeassistant.components.xiaomi_miio.vacuum import (
ATTR_CLEANED_AREA,
ATTR_CLEANED_TOTAL_AREA,
ATTR_CLEANING_COUNT,
ATTR_CLEANING_TIME,
ATTR_CLEANING_TOTAL_TIME,
ATTR_DO_NOT_DISTURB,
ATTR_DO_NOT_DISTURB_END,
ATTR_DO_NOT_DISTURB_START,
ATTR_ERROR,
ATTR_FILTER_LEFT,
ATTR_MAIN_BRUSH_LEFT,
ATTR_SIDE_BRUSH_LEFT,
ATTR_TIMERS,
CONF_HOST,
CONF_NAME,
CONF_TOKEN,
SERVICE_CLEAN_SEGMENT,
SERVICE_CLEAN_ZONE,
SERVICE_GOTO,
SERVICE_MOVE_REMOTE_CONTROL,
SERVICE_MOVE_REMOTE_CONTROL_STEP,
SERVICE_START_REMOTE_CONTROL,
SERVICE_STOP_REMOTE_CONTROL,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_SUPPORTED_FEATURES,
CONF_PLATFORM,
STATE_OFF,
STATE_ON,
)
from homeassistant.setup import async_setup_component
PLATFORM = "xiaomi_miio"
# calls made when device status is requested
STATUS_CALLS = [
mock.call.status(),
mock.call.consumable_status(),
mock.call.clean_history(),
mock.call.dnd_status(),
mock.call.timer(),
]
@pytest.fixture(name="mock_mirobo_is_got_error")
def mirobo_is_got_error_fixture():
"""Mock mock_mirobo."""
mock_vacuum = mock.MagicMock()
mock_vacuum.status().data = {"test": "raw"}
mock_vacuum.status().is_on = False
mock_vacuum.status().fanspeed = 38
mock_vacuum.status().got_error = True
mock_vacuum.status().error = "Error message"
mock_vacuum.status().battery = 82
mock_vacuum.status().clean_area = 123.43218
mock_vacuum.status().clean_time = timedelta(hours=2, minutes=35, seconds=34)
mock_vacuum.consumable_status().main_brush_left = timedelta(
hours=12, minutes=35, seconds=34
)
mock_vacuum.consumable_status().side_brush_left = timedelta(
hours=12, minutes=35, seconds=34
)
mock_vacuum.consumable_status().filter_left = timedelta(
hours=12, minutes=35, seconds=34
)
mock_vacuum.clean_history().count = "35"
mock_vacuum.clean_history().total_area = 123.43218
mock_vacuum.clean_history().total_duration = timedelta(
hours=11, minutes=35, seconds=34
)
mock_vacuum.status().state = "Test Xiaomi Charging"
mock_vacuum.dnd_status().enabled = True
mock_vacuum.dnd_status().start = time(hour=22, minute=0)
mock_vacuum.dnd_status().end = time(hour=6, minute=0)
mock_timer_1 = mock.MagicMock()
mock_timer_1.enabled = True
mock_timer_1.cron = "5 5 1 8 1"
mock_timer_1.next_schedule = datetime(2020, 5, 23, 13, 21, 10, tzinfo=utc)
mock_timer_2 = mock.MagicMock()
mock_timer_2.enabled = False
mock_timer_2.cron = "5 5 1 8 2"
mock_timer_2.next_schedule = datetime(2020, 5, 23, 13, 21, 10, tzinfo=utc)
mock_vacuum.timer.return_value = [mock_timer_1, mock_timer_2]
with mock.patch(
"homeassistant.components.xiaomi_miio.vacuum.Vacuum"
) as mock_vaccum_cls:
mock_vaccum_cls.return_value = mock_vacuum
yield mock_vacuum
old_fanspeeds = {
"Silent": 38,
"Standard": 60,
"Medium": 77,
"Turbo": 90,
}
new_fanspeeds = {
"Silent": 101,
"Standard": 102,
"Medium": 103,
"Turbo": 104,
"Gentle": 105,
}
@pytest.fixture(name="mock_mirobo_fanspeeds", params=[old_fanspeeds, new_fanspeeds])
def mirobo_old_speeds_fixture(request):
"""Fixture for testing both types of fanspeeds."""
mock_vacuum = mock.MagicMock()
mock_vacuum.status().battery = 32
mock_vacuum.fan_speed_presets.return_value = request.param
mock_vacuum.status().fanspeed = list(request.param.values())[0]
with mock.patch(
"homeassistant.components.xiaomi_miio.vacuum.Vacuum"
) as mock_vaccum_cls:
mock_vaccum_cls.return_value = mock_vacuum
yield mock_vacuum
@pytest.fixture(name="mock_mirobo_is_on")
def mirobo_is_on_fixture():
"""Mock mock_mirobo."""
mock_vacuum = mock.MagicMock()
mock_vacuum.status().data = {"test": "raw"}
mock_vacuum.status().is_on = True
mock_vacuum.status().fanspeed = 99
mock_vacuum.status().got_error = False
mock_vacuum.status().battery = 32
mock_vacuum.status().clean_area = 133.43218
mock_vacuum.status().clean_time = timedelta(hours=2, minutes=55, seconds=34)
mock_vacuum.consumable_status().main_brush_left = timedelta(
hours=11, minutes=35, seconds=34
)
mock_vacuum.consumable_status().side_brush_left = timedelta(
hours=11, minutes=35, seconds=34
)
mock_vacuum.consumable_status().filter_left = timedelta(
hours=11, minutes=35, seconds=34
)
mock_vacuum.clean_history().count = "41"
mock_vacuum.clean_history().total_area = 323.43218
mock_vacuum.clean_history().total_duration = timedelta(
hours=11, minutes=15, seconds=34
)
mock_vacuum.status().state = "Test Xiaomi Cleaning"
mock_vacuum.status().state_code = 5
mock_vacuum.dnd_status().enabled = False
mock_timer_1 = mock.MagicMock()
mock_timer_1.enabled = True
mock_timer_1.cron = "5 5 1 8 1"
mock_timer_1.next_schedule = datetime(2020, 5, 23, 13, 21, 10, tzinfo=utc)
mock_timer_2 = mock.MagicMock()
mock_timer_2.enabled = False
mock_timer_2.cron = "5 5 1 8 2"
mock_timer_2.next_schedule = datetime(2020, 5, 23, 13, 21, 10, tzinfo=utc)
mock_vacuum.timer.return_value = [mock_timer_1, mock_timer_2]
with mock.patch(
"homeassistant.components.xiaomi_miio.vacuum.Vacuum"
) as mock_vaccum_cls:
mock_vaccum_cls.return_value = mock_vacuum
yield mock_vacuum
@pytest.fixture(name="mock_mirobo_errors")
def mirobo_errors_fixture():
"""Mock mock_mirobo_errors to simulate a bad vacuum status request."""
mock_vacuum = mock.MagicMock()
mock_vacuum.status.side_effect = OSError()
with mock.patch(
"homeassistant.components.xiaomi_miio.vacuum.Vacuum"
) as mock_vaccum_cls:
mock_vaccum_cls.return_value = mock_vacuum
yield mock_vacuum
async def test_xiaomi_exceptions(hass, caplog, mock_mirobo_errors):
"""Test vacuum supported features."""
entity_name = "test_vacuum_cleaner_error"
await setup_component(hass, entity_name)
assert "Initializing with host 192.168.1.100 (token 12345...)" in caplog.text
assert mock_mirobo_errors.status.call_count == 1
assert "ERROR" in caplog.text
assert "Got OSError while fetching the state" in caplog.text
async def test_xiaomi_vacuum_services(hass, caplog, mock_mirobo_is_got_error):
"""Test vacuum supported features."""
entity_name = "test_vacuum_cleaner_1"
entity_id = await setup_component(hass, entity_name)
assert "Initializing with host 192.168.1.100 (token 12345...)" in caplog.text
# Check state attributes
state = hass.states.get(entity_id)
assert state.state == STATE_ERROR
assert state.attributes.get(ATTR_SUPPORTED_FEATURES) == 14204
assert state.attributes.get(ATTR_DO_NOT_DISTURB) == STATE_ON
assert state.attributes.get(ATTR_DO_NOT_DISTURB_START) == "22:00:00"
assert state.attributes.get(ATTR_DO_NOT_DISTURB_END) == "06:00:00"
assert state.attributes.get(ATTR_ERROR) == "Error message"
assert state.attributes.get(ATTR_BATTERY_ICON) == "mdi:battery-80"
assert state.attributes.get(ATTR_CLEANING_TIME) == 155
assert state.attributes.get(ATTR_CLEANED_AREA) == 123
assert state.attributes.get(ATTR_MAIN_BRUSH_LEFT) == 12
assert state.attributes.get(ATTR_SIDE_BRUSH_LEFT) == 12
assert state.attributes.get(ATTR_FILTER_LEFT) == 12
assert state.attributes.get(ATTR_CLEANING_COUNT) == 35
assert state.attributes.get(ATTR_CLEANED_TOTAL_AREA) == 123
assert state.attributes.get(ATTR_CLEANING_TOTAL_TIME) == 695
assert state.attributes.get(ATTR_TIMERS) == [
{
"enabled": True,
"cron": "5 5 1 8 1",
"next_schedule": datetime(2020, 5, 23, 13, 21, 10, tzinfo=utc),
},
{
"enabled": False,
"cron": "5 5 1 8 2",
"next_schedule": datetime(2020, 5, 23, 13, 21, 10, tzinfo=utc),
},
]
# Call services
await hass.services.async_call(
DOMAIN, SERVICE_START, {"entity_id": entity_id}, blocking=True
)
mock_mirobo_is_got_error.assert_has_calls(
[mock.call.resume_or_start()], any_order=True
)
mock_mirobo_is_got_error.assert_has_calls(STATUS_CALLS, any_order=True)
mock_mirobo_is_got_error.reset_mock()
await hass.services.async_call(
DOMAIN, SERVICE_STOP, {"entity_id": entity_id}, blocking=True
)
mock_mirobo_is_got_error.assert_has_calls([mock.call.stop()], any_order=True)
mock_mirobo_is_got_error.assert_has_calls(STATUS_CALLS, any_order=True)
mock_mirobo_is_got_error.reset_mock()
await hass.services.async_call(
DOMAIN, SERVICE_RETURN_TO_BASE, {"entity_id": entity_id}, blocking=True
)
mock_mirobo_is_got_error.assert_has_calls([mock.call.home()], any_order=True)
mock_mirobo_is_got_error.assert_has_calls(STATUS_CALLS, any_order=True)
mock_mirobo_is_got_error.reset_mock()
await hass.services.async_call(
DOMAIN, SERVICE_LOCATE, {"entity_id": entity_id}, blocking=True
)
mock_mirobo_is_got_error.assert_has_calls([mock.call.find()], any_order=True)
mock_mirobo_is_got_error.assert_has_calls(STATUS_CALLS, any_order=True)
mock_mirobo_is_got_error.reset_mock()
await hass.services.async_call(
DOMAIN, SERVICE_CLEAN_SPOT, {"entity_id": entity_id}, blocking=True
)
mock_mirobo_is_got_error.assert_has_calls([mock.call.spot()], any_order=True)
mock_mirobo_is_got_error.assert_has_calls(STATUS_CALLS, any_order=True)
mock_mirobo_is_got_error.reset_mock()
await hass.services.async_call(
DOMAIN,
SERVICE_SEND_COMMAND,
{"entity_id": entity_id, "command": "raw"},
blocking=True,
)
mock_mirobo_is_got_error.assert_has_calls(
[mock.call.raw_command("raw", None)], any_order=True
)
mock_mirobo_is_got_error.assert_has_calls(STATUS_CALLS, any_order=True)
mock_mirobo_is_got_error.reset_mock()
await hass.services.async_call(
DOMAIN,
SERVICE_SEND_COMMAND,
{"entity_id": entity_id, "command": "raw", "params": {"k1": 2}},
blocking=True,
)
mock_mirobo_is_got_error.assert_has_calls(
[mock.call.raw_command("raw", {"k1": 2})], any_order=True
)
mock_mirobo_is_got_error.assert_has_calls(STATUS_CALLS, any_order=True)
mock_mirobo_is_got_error.reset_mock()
async def test_xiaomi_specific_services(hass, caplog, mock_mirobo_is_on):
"""Test vacuum supported features."""
entity_name = "test_vacuum_cleaner_2"
entity_id = await setup_component(hass, entity_name)
assert "Initializing with host 192.168.1.100 (token 12345" in caplog.text
# Check state attributes
state = hass.states.get(entity_id)
assert state.state == STATE_CLEANING
assert state.attributes.get(ATTR_SUPPORTED_FEATURES) == 14204
assert state.attributes.get(ATTR_DO_NOT_DISTURB) == STATE_OFF
assert state.attributes.get(ATTR_ERROR) is None
assert state.attributes.get(ATTR_BATTERY_ICON) == "mdi:battery-30"
assert state.attributes.get(ATTR_CLEANING_TIME) == 175
assert state.attributes.get(ATTR_CLEANED_AREA) == 133
assert state.attributes.get(ATTR_MAIN_BRUSH_LEFT) == 11
assert state.attributes.get(ATTR_SIDE_BRUSH_LEFT) == 11
assert state.attributes.get(ATTR_FILTER_LEFT) == 11
assert state.attributes.get(ATTR_CLEANING_COUNT) == 41
assert state.attributes.get(ATTR_CLEANED_TOTAL_AREA) == 323
assert state.attributes.get(ATTR_CLEANING_TOTAL_TIME) == 675
assert state.attributes.get(ATTR_TIMERS) == [
{
"enabled": True,
"cron": "5 5 1 8 1",
"next_schedule": datetime(2020, 5, 23, 13, 21, 10, tzinfo=utc),
},
{
"enabled": False,
"cron": "5 5 1 8 2",
"next_schedule": datetime(2020, 5, 23, 13, 21, 10, tzinfo=utc),
},
]
# Xiaomi vacuum specific services:
await hass.services.async_call(
XIAOMI_DOMAIN,
SERVICE_START_REMOTE_CONTROL,
{ATTR_ENTITY_ID: entity_id},
blocking=True,
)
mock_mirobo_is_on.assert_has_calls([mock.call.manual_start()], any_order=True)
mock_mirobo_is_on.assert_has_calls(STATUS_CALLS, any_order=True)
mock_mirobo_is_on.reset_mock()
control = {"duration": 1000, "rotation": -40, "velocity": -0.1}
await hass.services.async_call(
XIAOMI_DOMAIN,
SERVICE_MOVE_REMOTE_CONTROL,
{**control, ATTR_ENTITY_ID: entity_id},
blocking=True,
)
mock_mirobo_is_on.manual_control.assert_has_calls(
[mock.call(**control)], any_order=True
)
mock_mirobo_is_on.assert_has_calls(STATUS_CALLS, any_order=True)
mock_mirobo_is_on.reset_mock()
await hass.services.async_call(
XIAOMI_DOMAIN,
SERVICE_STOP_REMOTE_CONTROL,
{ATTR_ENTITY_ID: entity_id},
blocking=True,
)
mock_mirobo_is_on.assert_has_calls([mock.call.manual_stop()], any_order=True)
mock_mirobo_is_on.assert_has_calls(STATUS_CALLS, any_order=True)
mock_mirobo_is_on.reset_mock()
control_once = {"duration": 2000, "rotation": 120, "velocity": 0.1}
await hass.services.async_call(
XIAOMI_DOMAIN,
SERVICE_MOVE_REMOTE_CONTROL_STEP,
{**control_once, ATTR_ENTITY_ID: entity_id},
blocking=True,
)
mock_mirobo_is_on.manual_control_once.assert_has_calls(
[mock.call(**control_once)], any_order=True
)
mock_mirobo_is_on.assert_has_calls(STATUS_CALLS, any_order=True)
mock_mirobo_is_on.reset_mock()
control = {"zone": [[123, 123, 123, 123]], "repeats": 2}
await hass.services.async_call(
XIAOMI_DOMAIN,
SERVICE_CLEAN_ZONE,
{**control, ATTR_ENTITY_ID: entity_id},
blocking=True,
)
mock_mirobo_is_on.zoned_clean.assert_has_calls(
[mock.call([[123, 123, 123, 123, 2]])], any_order=True
)
mock_mirobo_is_on.assert_has_calls(STATUS_CALLS, any_order=True)
mock_mirobo_is_on.reset_mock()
async def test_xiaomi_vacuum_fanspeeds(hass, caplog, mock_mirobo_fanspeeds):
"""Test Xiaomi vacuum fanspeeds."""
entity_name = "test_vacuum_cleaner_2"
entity_id = await setup_component(hass, entity_name)
assert "Initializing with host 192.168.1.100 (token 12345" in caplog.text
state = hass.states.get(entity_id)
assert state.attributes.get(ATTR_FAN_SPEED) == "Silent"
fanspeeds = state.attributes.get(ATTR_FAN_SPEED_LIST)
for speed in ["Silent", "Standard", "Medium", "Turbo"]:
assert speed in fanspeeds
# Set speed service:
await hass.services.async_call(
DOMAIN,
SERVICE_SET_FAN_SPEED,
{"entity_id": entity_id, "fan_speed": 60},
blocking=True,
)
mock_mirobo_fanspeeds.assert_has_calls(
[mock.call.set_fan_speed(60)], any_order=True
)
mock_mirobo_fanspeeds.assert_has_calls(STATUS_CALLS, any_order=True)
mock_mirobo_fanspeeds.reset_mock()
fan_speed_dict = mock_mirobo_fanspeeds.fan_speed_presets()
await hass.services.async_call(
DOMAIN,
SERVICE_SET_FAN_SPEED,
{"entity_id": entity_id, "fan_speed": "Medium"},
blocking=True,
)
mock_mirobo_fanspeeds.assert_has_calls(
[mock.call.set_fan_speed(fan_speed_dict["Medium"])], any_order=True
)
mock_mirobo_fanspeeds.assert_has_calls(STATUS_CALLS, any_order=True)
mock_mirobo_fanspeeds.reset_mock()
assert "ERROR" not in caplog.text
await hass.services.async_call(
DOMAIN,
SERVICE_SET_FAN_SPEED,
{"entity_id": entity_id, "fan_speed": "invent"},
blocking=True,
)
assert "ERROR" in caplog.text
async def test_xiaomi_vacuum_goto_service(hass, caplog, mock_mirobo_is_on):
"""Test vacuum supported features."""
entity_name = "test_vacuum_cleaner_2"
entity_id = await setup_component(hass, entity_name)
data = {"entity_id": entity_id, "x_coord": 25500, "y_coord": 25500}
await hass.services.async_call(XIAOMI_DOMAIN, SERVICE_GOTO, data, blocking=True)
mock_mirobo_is_on.goto.assert_has_calls(
[mock.call(x_coord=data["x_coord"], y_coord=data["y_coord"])], any_order=True
)
mock_mirobo_is_on.assert_has_calls(STATUS_CALLS, any_order=True)
async def test_xiaomi_vacuum_clean_segment_service(hass, caplog, mock_mirobo_is_on):
"""Test vacuum supported features."""
entity_name = "test_vacuum_cleaner_2"
entity_id = await setup_component(hass, entity_name)
data = {"entity_id": entity_id, "segments": ["1", "2"]}
await hass.services.async_call(
XIAOMI_DOMAIN, SERVICE_CLEAN_SEGMENT, data, blocking=True
)
mock_mirobo_is_on.segment_clean.assert_has_calls(
[mock.call(segments=[int(i) for i in data["segments"]])], any_order=True
)
mock_mirobo_is_on.assert_has_calls(STATUS_CALLS, any_order=True)
async def test_xiaomi_vacuum_clean_segment_service_single_segment(
hass, caplog, mock_mirobo_is_on
):
"""Test vacuum supported features."""
entity_name = "test_vacuum_cleaner_2"
entity_id = await setup_component(hass, entity_name)
data = {"entity_id": entity_id, "segments": 1}
await hass.services.async_call(
XIAOMI_DOMAIN, SERVICE_CLEAN_SEGMENT, data, blocking=True
)
mock_mirobo_is_on.segment_clean.assert_has_calls(
[mock.call(segments=[data["segments"]])], any_order=True
)
mock_mirobo_is_on.assert_has_calls(STATUS_CALLS, any_order=True)
async def setup_component(hass, entity_name):
"""Set up vacuum component."""
entity_id = f"{DOMAIN}.{entity_name}"
await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
CONF_PLATFORM: PLATFORM,
CONF_HOST: "192.168.1.100",
CONF_NAME: entity_name,
CONF_TOKEN: "12345678901234567890123456789012",
}
},
)
await hass.async_block_till_done()
return entity_id
| |
"""
Expressions
-----------
Offer fast expression evaluation through numexpr
"""
import warnings
import numpy as np
from pandas.core.common import _values_from_object
from pandas.core.computation.check import _NUMEXPR_INSTALLED
from pandas.core.config import get_option
if _NUMEXPR_INSTALLED:
import numexpr as ne
_TEST_MODE = None
_TEST_RESULT = None
_USE_NUMEXPR = _NUMEXPR_INSTALLED
_evaluate = None
_where = None
# the set of dtypes that we will allow pass to numexpr
_ALLOWED_DTYPES = {
'evaluate': set(['int64', 'int32', 'float64', 'float32', 'bool']),
'where': set(['int64', 'float64', 'bool'])
}
# the minimum prod shape that we will use numexpr
_MIN_ELEMENTS = 10000
def set_use_numexpr(v=True):
# set/unset to use numexpr
global _USE_NUMEXPR
if _NUMEXPR_INSTALLED:
_USE_NUMEXPR = v
# choose what we are going to do
global _evaluate, _where
if not _USE_NUMEXPR:
_evaluate = _evaluate_standard
_where = _where_standard
else:
_evaluate = _evaluate_numexpr
_where = _where_numexpr
def set_numexpr_threads(n=None):
# if we are using numexpr, set the threads to n
# otherwise reset
if _NUMEXPR_INSTALLED and _USE_NUMEXPR:
if n is None:
n = ne.detect_number_of_cores()
ne.set_num_threads(n)
def _evaluate_standard(op, op_str, a, b, **eval_kwargs):
""" standard evaluation """
if _TEST_MODE:
_store_test_result(False)
with np.errstate(all='ignore'):
return op(a, b)
def _can_use_numexpr(op, op_str, a, b, dtype_check):
""" return a boolean if we WILL be using numexpr """
if op_str is not None:
# required min elements (otherwise we are adding overhead)
if np.prod(a.shape) > _MIN_ELEMENTS:
# check for dtype compatiblity
dtypes = set()
for o in [a, b]:
if hasattr(o, 'get_dtype_counts'):
s = o.get_dtype_counts()
if len(s) > 1:
return False
dtypes |= set(s.index)
elif isinstance(o, np.ndarray):
dtypes |= set([o.dtype.name])
# allowed are a superset
if not len(dtypes) or _ALLOWED_DTYPES[dtype_check] >= dtypes:
return True
return False
def _evaluate_numexpr(op, op_str, a, b, truediv=True,
reversed=False, **eval_kwargs):
result = None
if _can_use_numexpr(op, op_str, a, b, 'evaluate'):
try:
# we were originally called by a reversed op
# method
if reversed:
a, b = b, a
a_value = getattr(a, "values", a)
b_value = getattr(b, "values", b)
result = ne.evaluate('a_value {op} b_value'.format(op=op_str),
local_dict={'a_value': a_value,
'b_value': b_value},
casting='safe', truediv=truediv,
**eval_kwargs)
except ValueError as detail:
if 'unknown type object' in str(detail):
pass
if _TEST_MODE:
_store_test_result(result is not None)
if result is None:
result = _evaluate_standard(op, op_str, a, b)
return result
def _where_standard(cond, a, b):
return np.where(_values_from_object(cond), _values_from_object(a),
_values_from_object(b))
def _where_numexpr(cond, a, b):
result = None
if _can_use_numexpr(None, 'where', a, b, 'where'):
try:
cond_value = getattr(cond, 'values', cond)
a_value = getattr(a, 'values', a)
b_value = getattr(b, 'values', b)
result = ne.evaluate('where(cond_value, a_value, b_value)',
local_dict={'cond_value': cond_value,
'a_value': a_value,
'b_value': b_value},
casting='safe')
except ValueError as detail:
if 'unknown type object' in str(detail):
pass
except Exception as detail:
raise TypeError(str(detail))
if result is None:
result = _where_standard(cond, a, b)
return result
# turn myself on
set_use_numexpr(get_option('compute.use_numexpr'))
def _has_bool_dtype(x):
try:
return x.dtype == bool
except AttributeError:
try:
return 'bool' in x.dtypes
except AttributeError:
return isinstance(x, (bool, np.bool_))
def _bool_arith_check(op_str, a, b, not_allowed=frozenset(('/', '//', '**')),
unsupported=None):
if unsupported is None:
unsupported = {'+': '|', '*': '&', '-': '^'}
if _has_bool_dtype(a) and _has_bool_dtype(b):
if op_str in unsupported:
warnings.warn("evaluating in Python space because the {op!r} "
"operator is not supported by numexpr for "
"the bool dtype, use {alt_op!r} instead"
.format(op=op_str, alt_op=unsupported[op_str]))
return False
if op_str in not_allowed:
raise NotImplementedError("operator {op!r} not implemented for "
"bool dtypes".format(op=op_str))
return True
def evaluate(op, op_str, a, b, use_numexpr=True,
**eval_kwargs):
""" evaluate and return the expression of the op on a and b
Parameters
----------
op : the actual operand
op_str: the string version of the op
a : left operand
b : right operand
use_numexpr : whether to try to use numexpr (default True)
"""
use_numexpr = use_numexpr and _bool_arith_check(op_str, a, b)
if use_numexpr:
return _evaluate(op, op_str, a, b, **eval_kwargs)
return _evaluate_standard(op, op_str, a, b)
def where(cond, a, b, use_numexpr=True):
""" evaluate the where condition cond on a and b
Parameters
----------
cond : a boolean array
a : return if cond is True
b : return if cond is False
use_numexpr : whether to try to use numexpr (default True)
"""
if use_numexpr:
return _where(cond, a, b)
return _where_standard(cond, a, b)
def set_test_mode(v=True):
"""
Keeps track of whether numexpr was used. Stores an additional ``True``
for every successful use of evaluate with numexpr since the last
``get_test_result``
"""
global _TEST_MODE, _TEST_RESULT
_TEST_MODE = v
_TEST_RESULT = []
def _store_test_result(used_numexpr):
global _TEST_RESULT
if used_numexpr:
_TEST_RESULT.append(used_numexpr)
def get_test_result():
"""get test result and reset test_results"""
global _TEST_RESULT
res = _TEST_RESULT
_TEST_RESULT = []
return res
| |
from otp.level.EntityTypes import *
class FactoryLevelMgr(LevelMgr):
type = 'levelMgr'
attribs = (('cogLevel',
0,
'int',
{'min': 0,
'max': 11}), ('wantDoors', 1, 'bool'), ('farPlaneDistance', 1500, 'float'))
class BarrelBase(Nodepath):
abstract = 1
attribs = (('rewardPerGrab', 5, 'int'), ('rewardPerGrabMax', 0, 'int'))
class BeanBarrel(BarrelBase):
type = 'beanBarrel'
class GagBarrel(BarrelBase):
type = 'gagBarrel'
attribs = (('gagLevel',
0,
'int',
{'min': 0,
'max': 5}), ('gagLevelMax',
0,
'int',
{'min': 0,
'max': 5}), ('gagTrack',
0,
'choice',
{'choiceSet': ('heal', 'trap', 'lure', 'sound', 'throw', 'squirt', 'drop', 'random'),
'valueDict': {'heal': 0,
'trap': 1,
'lure': 2,
'sound': 3,
'throw': 4,
'squirt': 5,
'drop': 6,
'random': 'random'}}))
class HealBarrel(BarrelBase):
type = 'healBarrel'
class Switch(Nodepath):
abstract = 1
output = 'bool'
attribs = (('isOnEvent',
0,
'entId',
{'output': 'bool'}), ('isOn', 0, 'bool'), ('secondsOn', 1, 'float'))
class Button(Switch):
type = 'button'
attribs = (('scale', Vec3(3), 'scale'), ('color', Vec4(1, 1, 1, 1), 'color'))
class Trigger(Switch):
type = 'trigger'
attribs = (('scale', Vec3(10), 'scale'), ('triggerName', ''))
class ConveyorBelt(Nodepath):
type = 'conveyorBelt'
attribs = (('speed', 1.0, 'float'),
('length', 1.0, 'float'),
('widthScale', 1.0, 'float'),
('treadLength', 10.0, 'float'),
('treadModelPath', 'phase_9/models/cogHQ/platform1', 'bamfilename'),
('floorName', 'platformcollision'))
class Door(Nodepath):
type = 'door'
output = 'bool'
attribs = (('color', Vec4(1, 1, 1, 1), 'color'),
('isVisBlocker', 1, 'bool'),
('unlock0Event',
0,
'entId',
{'output': 'bool'}),
('unlock1Event',
0,
'entId',
{'output': 'bool'}),
('unlock2Event',
0,
'entId',
{'output': 'bool'}),
('unlock3Event',
0,
'entId',
{'output': 'bool'}),
('isOpenEvent',
0,
'entId',
{'output': 'bool'}),
('isLock0Unlocked', 1, 'bool'),
('isLock1Unlocked', 1, 'bool'),
('isLock2Unlocked', 1, 'bool'),
('isLock3Unlocked', 1, 'bool'),
('isOpen', 0, 'bool'),
('secondsOpen', 1, 'float'))
class Grid(Nodepath):
type = 'grid'
blockAttribs = ('hpr',)
attribs = (('cellSize', 3, 'float'), ('numCol', 3, 'int'), ('numRow', 3, 'int'))
class Crushable(Entity):
abstract = 1
attribs = (('pos', Point3(0, 0, 0), 'pos'),
('hpr', Vec3(0, 0, 0), 'hpr'),
('crushCellId',
None,
'entId',
{'type': 'crusherCell'}),
('gridId',
None,
'entId',
{'type': 'grid'}))
class Crusher(Nodepath):
abstract = 1
attribs = (('crushCellId',
None,
'entId',
{'type': 'crusherCell'}),)
class Crate(Crushable):
type = 'crate'
blockAttribs = ('hpr',)
attribs = (('modelType',
0,
'int',
{'min': 0,
'max': 1}), ('scale', 0.92, 'float'), ('pushable', 1, 'bool'))
class Goon(Crushable):
type = 'goon'
attribs = (('goonType',
'pg',
'choice',
{'choiceSet': ['pg', 'sg']}),
('strength',
5,
'int',
{'min': 0,
'max': 105}),
('velocity',
4,
'float',
{'min': 0,
'max': 10}),
('attackRadius',
15,
'float',
{'min': 1,
'max': 20}),
('scale', 1.5, 'float'),
('hFov',
70,
'float',
{'min': 0,
'max': 179}))
class GridGoon(Goon):
type = 'gridGoon'
attribs = ()
class GoonClipPlane(Nodepath):
type = 'goonClipPlane'
attribs = (('goonId',
None,
'entId',
{'type': 'goon'}),)
class ActiveCell(Nodepath):
type = 'activeCell'
attribs = (('row', 0, 'int'), ('col', 0, 'int'), ('gridId',
None,
'entId',
{'type': 'grid'}))
class CrusherCell(ActiveCell):
type = 'crusherCell'
attribs = ()
class DirectionalCell(ActiveCell):
type = 'directionalCell'
attribs = (('dir',
[0, 0],
'choice',
{'choiceSet': ['l',
'r',
'up',
'dn'],
'valueDict': {'l': [-1, 0],
'r': [1, 0],
'up': [0, 1],
'dn': [0, -1]}}),)
class GolfGreenGame(Nodepath):
type = 'golfGreenGame'
output = 'bool'
attribs = (('pos', Point3(0, 0, 0), 'pos'),
('hpr', Vec3(0, 0, 0), 'hpr'),
('cellId', 0, 'int'),
('switchId',
0,
'entId',
{'type': 'button'}),
('timeToPlay', 120, 'int'),
('puzzleBase', 4, 'int'),
('puzzlePerPlayer', 1, 'int'))
class LaserField(Nodepath):
type = 'laserField'
output = 'bool'
attribs = (('laserFactor', 3, 'float'),
('gridScaleX', 32.0, 'float'),
('gridScaleY', 32.0, 'float'),
('projector', Point3(6, 6, 25), 'pos'),
('modelPath',
0,
'choice',
{'choiceSet': ['square'],
'valueDict': {'square': 0}}),
('pos', Point3(0, 0, 0), 'pos'),
('hpr', Vec3(0, 0, 0), 'hpr'),
('cellId', 0, 'int'),
('switchId',
0,
'entId',
{'type': 'button'}),
('gridGame',
'Random',
'choice',
{'choiceSet': ['MineSweeper',
'Roll',
'Avoid',
'Random']}))
class SecurityCamera(Nodepath):
type = 'securityCamera'
attribs = (('damPow', 3, 'int'),
('radius', 5, 'float'),
('accel', 1, 'float'),
('maxVel', 5, 'float'),
('projector', Point3(6, 6, 25), 'pos'),
('modelPath',
0,
'choice',
{'choiceSet': ['square'],
'valueDict': {'square': 0}}),
('hideModel', 0, 'bool'),
('pos', Point3(0, 0, 0), 'pos'),
('hpr', Vec3(0, 0, 0), 'hpr'),
('switchId',
0,
'entId',
{'type': 'button'}),
('trackTarget1',
0,
'entId',
{'type': 'button'}),
('trackTarget2',
0,
'entId',
{'type': 'button'}),
('trackTarget3',
0,
'entId',
{'type': 'button'}))
class ElevatorMarker(Nodepath):
type = 'elevatorMarker'
attribs = (('modelPath',
0,
'choice',
{'choiceSet': ['square'],
'valueDict': {'square': 0}}), ('pos', Point3(0, 0, 0), 'pos'), ('hpr', Vec3(0, 0, 0), 'hpr'))
class Lift(Nodepath):
type = 'lift'
attribs = (('duration', 1, 'float'),
('startPos', Point3(0, 0, 0), 'pos'),
('endPos', Point3(0, 0, 0), 'pos'),
('modelPath', 'phase_9/models/cogHQ/Elevator', 'bamfilename'),
('floorName', 'elevator_floor', 'string'),
('modelScale', Vec3(1), 'scale'),
('startGuardName', '', 'string'),
('endGuardName', '', 'string'),
('startBoardSides', ['front',
'back',
'left',
'right']),
('endBoardSides', ['front',
'back',
'left',
'right']),
('moveDelay',
1,
'float',
{'min': 0}),
('autoMoveDelay',
5,
'float',
{'min': 0}))
class Mover(Nodepath):
type = 'mover'
attribs = (('modelPath',
0,
'choice',
{'choiceSet': ['square'],
'valueDict': {'square': 0}}),
('pos', Point3(0, 0, 0), 'pos'),
('hpr', Vec3(0, 0, 0), 'hpr'),
('switchId',
0,
'entId',
{'type': 'button'}),
('entity2Move',
0,
'entId',
{'type': 'button'}),
('moveTarget',
0,
'entId',
{'type': 'button'}),
('pos0Move', 2, 'float'),
('pos0Wait', 2, 'float'),
('pos1Move', 2, 'float'),
('pos1Wait', 2, 'float'),
('startOn', 0, 'bool'),
('cycleType',
'return',
'choice',
{'choiceSet': ['return',
'linear',
'loop',
'oneWay']}))
class Platform(Nodepath):
type = 'platform'
attribs = (('modelPath', 'phase_9/models/cogHQ/platform1', 'bamfilename'),
('modelScale', Vec3(1, 1, 1), 'scale'),
('floorName', 'platformcollision', 'string'),
('offset', Point3(0, 0, 0), 'pos'),
('period', 2, 'float'),
('waitPercent',
0.1,
'float',
{'min': 0,
'max': 1}),
('phaseShift',
0.0,
'float',
{'min': 0,
'max': 1}),
('motion',
'noBlend',
'choice',
{'choiceSet': ['noBlend',
'easeInOut',
'easeIn',
'easeOut']}))
class SinkingPlatform(Nodepath):
type = 'sinkingPlatform'
attribs = (('verticalRange', 1, 'float'),
('sinkDuration', 1, 'float'),
('pauseBeforeRise', 1, 'float'),
('riseDuration', 1, 'float'))
class Stomper(Crusher):
type = 'stomper'
attribs = (('damage', 3, 'int'),
('style',
'vertical',
'choice',
{'choiceSet': ['horizontal', 'vertical']}),
('period', 2.0, 'float'),
('phaseShift',
0.0,
'float',
{'min': 0,
'max': 1}),
('range', 6, 'float'),
('motion',
3,
'choice',
{'choiceSet': ['linear',
'sinus',
'half sinus',
'slow fast',
'crush',
'switched'],
'valueDict': {'linear': 0,
'sinus': 1,
'half sinus': 2,
'slow fast': 3,
'crush': 4,
'switched': 5}}),
('headScale', Vec3(1, 1, 1), 'scale'),
('shaftScale', Vec3(1, 1, 1), 'scale'),
('wantSmoke', 1, 'bool'),
('wantShadow', 1, 'bool'),
('animateShadow', 1, 'bool'),
('soundOn', 0, 'bool'),
('soundPath',
0,
'choice',
{'choiceSet': ['small', 'medium', 'large'],
'valueDict': {'small': 0,
'medium': 1,
'large': 2}}),
('soundLen', 0, 'float'),
('zOffset', 0, 'float'),
('switchId',
0,
'entId',
{'type': 'button'}),
('modelPath',
0,
'choice',
{'choiceSet': ['square'],
'valueDict': {'square': 0}}),
('cogStyle',
0,
'choice',
{'choiceSet': ['default', 'lawbot'],
'valueDict': {'default': 0,
'lawbot': 1}}),
('removeHeadFloor', 0, 'bool'),
('removeCamBarrierCollisions', 0, 'bool'))
class StomperPair(Nodepath):
type = 'stomperPair'
attribs = (('headScale', Vec3(1, 1, 1), 'scale'),
('motion',
3,
'choice',
{'choiceSet': ['linear',
'sinus',
'half sinus',
'slow fast',
'crush',
'switched'],
'valueDict': {'linear': 0,
'sinus': 1,
'half sinus': 2,
'slow fast': 3,
'crush': 4,
'switched': 5}}),
('period', 2.0, 'float'),
('phaseShift',
0.0,
'float',
{'min': 0,
'max': 1}),
('range', 6, 'float'),
('shaftScale', Vec3(1, 1, 1), 'scale'),
('soundLen', 0, 'float'),
('soundOn', 0, 'bool'),
('stomperIds',
[],
'entId',
{'type': 'stomper',
'num': 2}),
('style',
'horizontal',
'choice',
{'choiceSet': ['horizontal', 'vertical']}))
class Gear(Nodepath):
type = 'gear'
attribs = (('modelType',
'factory',
'choice',
{'choiceSet': ['factory', 'mint']}),
('gearScale', 1, 'float'),
('orientation',
'horizontal',
'choice',
{'choiceSet': ['horizontal', 'vertical']}),
('degreesPerSec', 0, 'float'),
('phaseShift',
0,
'float',
{'min': 0,
'max': 1}))
class BattleBlocker(Nodepath):
type = 'battleBlocker'
attribs = (('radius', 10, 'float'), ('cellId', 0, 'int'))
class PaintMixer(Platform):
type = 'paintMixer'
attribs = (('modelPath', 'phase_9/models/cogHQ/PaintMixer', 'const'), ('floorName', 'PaintMixerFloorCollision', 'const'), ('shaftScale', 1, 'float'))
class MintProduct(Nodepath):
type = 'mintProduct'
attribs = (('mintId',
12500,
'choice',
{'choiceSet': ('coin', 'dollar', 'bullion'),
'valueDict': {'coin': 12500,
'dollar': 12600,
'bullion': 12700}}),)
class MintProductPallet(Nodepath):
type = 'mintProductPallet'
attribs = (('mintId',
12500,
'choice',
{'choiceSet': ('coin', 'dollar', 'bullion'),
'valueDict': {'coin': 12500,
'dollar': 12600,
'bullion': 12700}}),)
class MintShelf(Nodepath):
type = 'mintShelf'
attribs = (('mintId',
12500,
'choice',
{'choiceSet': ('coin', 'dollar', 'bullion'),
'valueDict': {'coin': 12500,
'dollar': 12600,
'bullion': 12700}}),)
class PathMaster(Nodepath):
type = 'pathMaster'
attribs = (('pathIndex', 0, 'int'),
('pathScale', 1.0, 'float'),
('pathTarget0',
0,
'entId',
{'type': 'button'}),
('pathTarget1',
0,
'entId',
{'type': 'button'}),
('pathTarget2',
0,
'entId',
{'type': 'button'}),
('pathTarget3',
0,
'entId',
{'type': 'button'}),
('pathTarget4',
0,
'entId',
{'type': 'button'}),
('pathTarget5',
0,
'entId',
{'type': 'button'}),
('pathTarget6',
0,
'entId',
{'type': 'button'}),
('pathTarget7',
0,
'entId',
{'type': 'button'}))
class Rendering(Nodepath):
type = 'rendering'
attribs = (('pos', Point3(0, 0, 0), 'pos'),
('hpr', Vec3(0, 0, 0), 'hpr'),
('colorR', 1.0, 'float'),
('colorG', 1.0, 'float'),
('colorB', 1.0, 'float'),
('colorA', 1.0, 'float'),
('blending',
'Normal',
'choice',
{'choiceSet': ['Normal', 'Additive', 'Alpha']}),
('fogOn', 0, 'bool'),
('renderBin',
'default',
'choice',
{'choiceSet': ['default', 'fixed', 'transparent']}))
class MoleField(Nodepath):
type = 'moleField'
attribs = (('numSquaresX', 5, 'int'),
('numSquaresY', 5, 'int'),
('spacingX', 5.0, 'float'),
('spacingY', 5.0, 'float'),
('timeToPlay', 60, 'int'),
('molesBase', 4, 'int'),
('molesPerPlayer', 1, 'int'))
class Maze(Nodepath):
type = 'maze'
attribs = (('numSections', 4, 'int'),)
| |
# Copyright 2012 OpenStack LLC.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
"""
Command-line interface to the Quantum APIs
"""
import argparse
import gettext
import logging
import os
import sys
from cliff.app import App
from cliff.commandmanager import CommandManager
from quantumclient.common import clientmanager
from quantumclient.common import exceptions as exc
from quantumclient.common import utils
VERSION = '2.0'
QUANTUM_API_VERSION = '2.0'
def env(*_vars, **kwargs):
"""Search for the first defined of possibly many env vars
Returns the first environment variable defined in vars, or
returns the default defined in kwargs.
"""
for v in _vars:
value = os.environ.get(v, None)
if value:
return value
return kwargs.get('default', '')
COMMAND_V2 = {
'net-list': utils.import_class(
'quantumclient.quantum.v2_0.network.ListNetwork'),
'net-external-list': utils.import_class(
'quantumclient.quantum.v2_0.network.ListExternalNetwork'),
'net-show': utils.import_class(
'quantumclient.quantum.v2_0.network.ShowNetwork'),
'net-create': utils.import_class(
'quantumclient.quantum.v2_0.network.CreateNetwork'),
'net-delete': utils.import_class(
'quantumclient.quantum.v2_0.network.DeleteNetwork'),
'net-update': utils.import_class(
'quantumclient.quantum.v2_0.network.UpdateNetwork'),
'subnet-list': utils.import_class(
'quantumclient.quantum.v2_0.subnet.ListSubnet'),
'subnet-show': utils.import_class(
'quantumclient.quantum.v2_0.subnet.ShowSubnet'),
'subnet-create': utils.import_class(
'quantumclient.quantum.v2_0.subnet.CreateSubnet'),
'subnet-delete': utils.import_class(
'quantumclient.quantum.v2_0.subnet.DeleteSubnet'),
'subnet-update': utils.import_class(
'quantumclient.quantum.v2_0.subnet.UpdateSubnet'),
'port-list': utils.import_class(
'quantumclient.quantum.v2_0.port.ListPort'),
'port-show': utils.import_class(
'quantumclient.quantum.v2_0.port.ShowPort'),
'port-create': utils.import_class(
'quantumclient.quantum.v2_0.port.CreatePort'),
'port-delete': utils.import_class(
'quantumclient.quantum.v2_0.port.DeletePort'),
'port-update': utils.import_class(
'quantumclient.quantum.v2_0.port.UpdatePort'),
'quota-list': utils.import_class(
'quantumclient.quantum.v2_0.quota.ListQuota'),
'quota-show': utils.import_class(
'quantumclient.quantum.v2_0.quota.ShowQuota'),
'quota-delete': utils.import_class(
'quantumclient.quantum.v2_0.quota.DeleteQuota'),
'quota-update': utils.import_class(
'quantumclient.quantum.v2_0.quota.UpdateQuota'),
'ext-list': utils.import_class(
'quantumclient.quantum.v2_0.extension.ListExt'),
'ext-show': utils.import_class(
'quantumclient.quantum.v2_0.extension.ShowExt'),
'router-list': utils.import_class(
'quantumclient.quantum.v2_0.router.ListRouter'),
'router-port-list': utils.import_class(
'quantumclient.quantum.v2_0.port.ListRouterPort'),
'router-show': utils.import_class(
'quantumclient.quantum.v2_0.router.ShowRouter'),
'router-create': utils.import_class(
'quantumclient.quantum.v2_0.router.CreateRouter'),
'router-delete': utils.import_class(
'quantumclient.quantum.v2_0.router.DeleteRouter'),
'router-update': utils.import_class(
'quantumclient.quantum.v2_0.router.UpdateRouter'),
'router-interface-add': utils.import_class(
'quantumclient.quantum.v2_0.router.AddInterfaceRouter'),
'router-interface-delete': utils.import_class(
'quantumclient.quantum.v2_0.router.RemoveInterfaceRouter'),
'router-gateway-set': utils.import_class(
'quantumclient.quantum.v2_0.router.SetGatewayRouter'),
'router-gateway-clear': utils.import_class(
'quantumclient.quantum.v2_0.router.RemoveGatewayRouter'),
'floatingip-list': utils.import_class(
'quantumclient.quantum.v2_0.floatingip.ListFloatingIP'),
'floatingip-show': utils.import_class(
'quantumclient.quantum.v2_0.floatingip.ShowFloatingIP'),
'floatingip-create': utils.import_class(
'quantumclient.quantum.v2_0.floatingip.CreateFloatingIP'),
'floatingip-delete': utils.import_class(
'quantumclient.quantum.v2_0.floatingip.DeleteFloatingIP'),
'floatingip-associate': utils.import_class(
'quantumclient.quantum.v2_0.floatingip.AssociateFloatingIP'),
'floatingip-disassociate': utils.import_class(
'quantumclient.quantum.v2_0.floatingip.DisassociateFloatingIP'),
}
COMMANDS = {'2.0': COMMAND_V2}
class HelpAction(argparse.Action):
"""Provide a custom action so the -h and --help options
to the main app will print a list of the commands.
The commands are determined by checking the CommandManager
instance, passed in as the "default" value for the action.
"""
def __call__(self, parser, namespace, values, option_string=None):
outputs = []
max_len = 0
app = self.default
parser.print_help(app.stdout)
app.stdout.write('\nCommands for API v%s:\n' % app.api_version)
command_manager = app.command_manager
for name, ep in sorted(command_manager):
factory = ep.load()
cmd = factory(self, None)
one_liner = cmd.get_description().split('\n')[0]
outputs.append((name, one_liner))
max_len = max(len(name), max_len)
for (name, one_liner) in outputs:
app.stdout.write(' %s %s\n' % (name.ljust(max_len), one_liner))
sys.exit(0)
class QuantumShell(App):
CONSOLE_MESSAGE_FORMAT = '%(message)s'
DEBUG_MESSAGE_FORMAT = '%(levelname)s: %(name)s %(message)s'
log = logging.getLogger(__name__)
def __init__(self, apiversion):
super(QuantumShell, self).__init__(
description=__doc__.strip(),
version=VERSION,
command_manager=CommandManager('quantum.cli'), )
self.commands = COMMANDS
for k, v in self.commands[apiversion].items():
self.command_manager.add_command(k, v)
# This is instantiated in initialize_app() only when using
# password flow auth
self.auth_client = None
self.api_version = apiversion
def build_option_parser(self, description, version):
"""Return an argparse option parser for this application.
Subclasses may override this method to extend
the parser with more global options.
:param description: full description of the application
:paramtype description: str
:param version: version number for the application
:paramtype version: str
"""
parser = argparse.ArgumentParser(
description=description,
add_help=False, )
parser.add_argument(
'--version',
action='version',
version='%(prog)s {0}'.format(version), )
parser.add_argument(
'-v', '--verbose',
action='count',
dest='verbose_level',
default=self.DEFAULT_VERBOSE_LEVEL,
help='Increase verbosity of output. Can be repeated.', )
parser.add_argument(
'-q', '--quiet',
action='store_const',
dest='verbose_level',
const=0,
help='suppress output except warnings and errors', )
parser.add_argument(
'-h', '--help',
action=HelpAction,
nargs=0,
default=self, # tricky
help="show this help message and exit", )
parser.add_argument(
'--debug',
default=False,
action='store_true',
help='show tracebacks on errors', )
# Global arguments
parser.add_argument(
'--os-auth-strategy', metavar='<auth-strategy>',
default=env('OS_AUTH_STRATEGY', default='keystone'),
help='Authentication strategy (Env: OS_AUTH_STRATEGY'
', default keystone). For now, any other value will'
' disable the authentication')
parser.add_argument(
'--os_auth_strategy',
help=argparse.SUPPRESS)
parser.add_argument(
'--os-auth-url', metavar='<auth-url>',
default=env('OS_AUTH_URL'),
help='Authentication URL (Env: OS_AUTH_URL)')
parser.add_argument(
'--os_auth_url',
help=argparse.SUPPRESS)
parser.add_argument(
'--os-tenant-name', metavar='<auth-tenant-name>',
default=env('OS_TENANT_NAME'),
help='Authentication tenant name (Env: OS_TENANT_NAME)')
parser.add_argument(
'--os_tenant_name',
help=argparse.SUPPRESS)
parser.add_argument(
'--os-username', metavar='<auth-username>',
default=utils.env('OS_USERNAME'),
help='Authentication username (Env: OS_USERNAME)')
parser.add_argument(
'--os_username',
help=argparse.SUPPRESS)
parser.add_argument(
'--os-password', metavar='<auth-password>',
default=utils.env('OS_PASSWORD'),
help='Authentication password (Env: OS_PASSWORD)')
parser.add_argument(
'--os_password',
help=argparse.SUPPRESS)
parser.add_argument(
'--os-region-name', metavar='<auth-region-name>',
default=env('OS_REGION_NAME'),
help='Authentication region name (Env: OS_REGION_NAME)')
parser.add_argument(
'--os_region_name',
help=argparse.SUPPRESS)
parser.add_argument(
'--os-token', metavar='<token>',
default=env('OS_TOKEN'),
help='Defaults to env[OS_TOKEN]')
parser.add_argument(
'--os_token',
help=argparse.SUPPRESS)
parser.add_argument(
'--os-url', metavar='<url>',
default=env('OS_URL'),
help='Defaults to env[OS_URL]')
parser.add_argument(
'--os_url',
help=argparse.SUPPRESS)
return parser
def _bash_completion(self):
"""
Prints all of the commands and options to stdout so that the
quantum's bash-completion script doesn't have to hard code them.
"""
commands = set()
options = set()
for option, _action in self.parser._option_string_actions.items():
options.add(option)
for command_name, command in self.command_manager:
commands.add(command_name)
cmd_factory = command.load()
cmd = cmd_factory(self, None)
cmd_parser = cmd.get_parser('')
for option, _action in cmd_parser._option_string_actions.items():
options.add(option)
print ' '.join(commands | options)
def run(self, argv):
"""Equivalent to the main program for the application.
:param argv: input arguments and options
:paramtype argv: list of str
"""
try:
index = 0
command_pos = -1
help_pos = -1
help_command_pos = -1
for arg in argv:
if arg == 'bash-completion':
self._bash_completion()
return 0
if arg in self.commands[self.api_version]:
if command_pos == -1:
command_pos = index
elif arg in ('-h', '--help'):
if help_pos == -1:
help_pos = index
elif arg == 'help':
if help_command_pos == -1:
help_command_pos = index
index = index + 1
if command_pos > -1 and help_pos > command_pos:
argv = ['help', argv[command_pos]]
if help_command_pos > -1 and command_pos == -1:
argv[help_command_pos] = '--help'
self.options, remainder = self.parser.parse_known_args(argv)
self.configure_logging()
self.interactive_mode = not remainder
self.initialize_app(remainder)
except Exception as err:
if self.options.debug:
self.log.exception(err)
raise
else:
self.log.error(err)
return 1
result = 1
if self.interactive_mode:
_argv = [sys.argv[0]]
sys.argv = _argv
result = self.interact()
else:
result = self.run_subcommand(remainder)
return result
def run_subcommand(self, argv):
subcommand = self.command_manager.find_command(argv)
cmd_factory, cmd_name, sub_argv = subcommand
cmd = cmd_factory(self, self.options)
err = None
result = 1
try:
self.prepare_to_run_command(cmd)
full_name = (cmd_name
if self.interactive_mode
else ' '.join([self.NAME, cmd_name])
)
cmd_parser = cmd.get_parser(full_name)
known_args, values_specs = cmd_parser.parse_known_args(sub_argv)
cmd.values_specs = values_specs
result = cmd.run(known_args)
except Exception as err:
if self.options.debug:
self.log.exception(err)
else:
self.log.error(err)
try:
self.clean_up(cmd, result, err)
except Exception as err2:
if self.options.debug:
self.log.exception(err2)
else:
self.log.error('Could not clean up: %s', err2)
if self.options.debug:
raise
else:
try:
self.clean_up(cmd, result, None)
except Exception as err3:
if self.options.debug:
self.log.exception(err3)
else:
self.log.error('Could not clean up: %s', err3)
return result
def authenticate_user(self):
"""Make sure the user has provided all of the authentication
info we need.
"""
if self.options.os_auth_strategy == 'keystone':
if self.options.os_token or self.options.os_url:
# Token flow auth takes priority
if not self.options.os_token:
raise exc.CommandError(
"You must provide a token via"
" either --os-token or env[OS_TOKEN]")
if not self.options.os_url:
raise exc.CommandError(
"You must provide a service URL via"
" either --os-url or env[OS_URL]")
else:
# Validate password flow auth
if not self.options.os_username:
raise exc.CommandError(
"You must provide a username via"
" either --os-username or env[OS_USERNAME]")
if not self.options.os_password:
raise exc.CommandError(
"You must provide a password via"
" either --os-password or env[OS_PASSWORD]")
if not (self.options.os_tenant_name):
raise exc.CommandError(
"You must provide a tenant_name via"
" either --os-tenant-name or via env[OS_TENANT_NAME]")
if not self.options.os_auth_url:
raise exc.CommandError(
"You must provide an auth url via"
" either --os-auth-url or via env[OS_AUTH_URL]")
else: # not keystone
if not self.options.os_url:
raise exc.CommandError(
"You must provide a service URL via"
" either --os-url or env[OS_URL]")
self.client_manager = clientmanager.ClientManager(
token=self.options.os_token,
url=self.options.os_url,
auth_url=self.options.os_auth_url,
tenant_name=self.options.os_tenant_name,
username=self.options.os_username,
password=self.options.os_password,
region_name=self.options.os_region_name,
api_version=self.api_version,
auth_strategy=self.options.os_auth_strategy, )
return
def initialize_app(self, argv):
"""Global app init bits:
* set up API versions
* validate authentication info
"""
super(QuantumShell, self).initialize_app(argv)
self.api_version = {'network': self.api_version}
# If the user is not asking for help, make sure they
# have given us auth.
cmd_name = None
if argv:
cmd_info = self.command_manager.find_command(argv)
cmd_factory, cmd_name, sub_argv = cmd_info
if self.interactive_mode or cmd_name != 'help':
self.authenticate_user()
def clean_up(self, cmd, result, err):
self.log.debug('clean_up %s', cmd.__class__.__name__)
if err:
self.log.debug('got an error: %s', err)
def configure_logging(self):
"""Create logging handlers for any log output.
"""
root_logger = logging.getLogger('')
# Set up logging to a file
root_logger.setLevel(logging.DEBUG)
# Send higher-level messages to the console via stderr
console = logging.StreamHandler(self.stderr)
console_level = {0: logging.WARNING,
1: logging.INFO,
2: logging.DEBUG,
}.get(self.options.verbose_level, logging.DEBUG)
console.setLevel(console_level)
if logging.DEBUG == console_level:
formatter = logging.Formatter(self.DEBUG_MESSAGE_FORMAT)
else:
formatter = logging.Formatter(self.CONSOLE_MESSAGE_FORMAT)
console.setFormatter(formatter)
root_logger.addHandler(console)
return
def main(argv=sys.argv[1:]):
gettext.install('quantumclient', unicode=1)
try:
return QuantumShell(QUANTUM_API_VERSION).run(argv)
except exc.QuantumClientException:
return 1
except Exception as e:
print e
return 1
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| |
# ----------------------------------------------------------------------------
# cocos2d
# Copyright (c) 2008-2011 Daniel Moisset, Ricardo Quesada, Rayentray Tappa,
# Lucio Torre
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of cocos2d nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Grid data structure'''
__docformat__ = 'restructuredtext'
import pyglet
from pyglet import image
from pyglet.gl import *
from euclid import Point2, Point3
from director import director
import framegrabber
__all__ = ['GridBase',
'Grid3D',
'TiledGrid3D',
]
class GridBase(object):
"""
A Scene that takes two scenes and makes a transition between them
"""
texture = None
def __init__(self):
super(GridBase, self).__init__()
self._active = False
self.reuse_grid = 0 #! Number of times that this grid will be reused
def init( self, grid ):
'''Initializes the grid creating both a vertex_list for an independent-tiled grid
and creating also a vertex_list_indexed for a "united" (non independent tile) grid.
:Parameters:
`grid` : euclid.Point2
size of a 2D grid
'''
#: size of the grid. (rows, columns)
self.grid = grid
width, height = director.get_window_size()
if self.texture is None:
self.texture = image.Texture.create_for_size(
GL_TEXTURE_2D, width,
height, GL_RGBA)
self.grabber = framegrabber.TextureGrabber()
self.grabber.grab(self.texture)
#: x pixels between each vertex (float)
self.x_step = width / self.grid.x
#: y pixels between each vertex (float)
self.y_step = height / self.grid.y
self._init()
def before_draw( self ):
'''Binds the framebuffer to a texture
and set a 2d projection before binding
to prevent calculating a new texture
'''
self._set_2d_projection()
# capture before drawing
self.grabber.before_render(self.texture)
def after_draw( self, camera ):
'''Called by CocosNode when the texture is already grabbed.
The FrameBuffer will be unbound and the texture will be drawn
:Parameters:
`camera` : `Camera`
The target's camera object.
'''
# capture after drawing
self.grabber.after_render(self.texture)
# after unbinding
# set a 3d projection
self._set_3d_projection()
# and center the camera
camera.locate( force=True )
# blit
glEnable(self.texture.target)
glBindTexture(self.texture.target, self.texture.id)
glPushAttrib(GL_COLOR_BUFFER_BIT)
self._blit()
glPopAttrib()
glDisable(self.texture.target)
def _set_active(self, bool):
if self._active == bool:
return
self._active = bool
if self._active == True:
pass
elif self._active == False:
self.vertex_list.delete()
# to restore the camera to default position
director.set_projection()
else:
raise Exception("Invalid value for GridBase.active")
def _get_active(self):
return self._active
active = property(_get_active, _set_active,
doc='''Determines whether the grid is active or not
:type: bool
''')
def _init(self):
raise NotImplementedError('abstract')
def _blit(self):
raise NotImplementedError('abstract')
def _on_resize(self):
raise NotImplementedError('abstract')
@classmethod
def _set_3d_projection(cls):
glViewport(director._offset_x, director._offset_y, director._usable_width, director._usable_height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(60, 1.0*director._usable_width/director._usable_height, 0.1, 3000.0)
glMatrixMode(GL_MODELVIEW)
@classmethod
def _set_2d_projection(cls):
# director.set_2d_projection()
width, height = director.get_window_size()
glLoadIdentity()
glViewport(0, 0, width, height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(0, width, 0, height, -100, 100)
glMatrixMode(GL_MODELVIEW)
class Grid3D(GridBase):
'''`Grid3D` is a 3D grid implementation. Each vertex has 3 dimensions: x,y,z
The vindexed ertex array will be built with::
self.vertex_list.vertices: x,y,z (floats)
self.vertex_list.tex_coords: x,y,z (floats)
self.vertex_list.colors: RGBA, with values from 0 - 255
'''
def _init( self ):
# calculate vertex, textures depending on screen size
idx_pts, ver_pts_idx, tex_pts_idx = self._calculate_vertex_points()
#: indexed vertex array that can be transformed.
#: it has these attributes:
#:
#: - vertices
#: - colors
#: - tex_coords
#:
#: for more information refer to pyglet's documentation: pyglet.graphics.vertex_list_indexed
self.vertex_list = pyglet.graphics.vertex_list_indexed( (self.grid.x+1) * (self.grid.y+1),
idx_pts, "t2f", "v3f/stream","c4B")
#: original vertex array of the grid. (read-only)
self.vertex_points = ver_pts_idx[:]
self.vertex_list.vertices = ver_pts_idx
self.vertex_list.tex_coords = tex_pts_idx
self.vertex_list.colors = (255,255,255,255) * (self.grid.x+1) * (self.grid.y+1)
def _blit(self ):
self.vertex_list.draw(pyglet.gl.GL_TRIANGLES)
def _calculate_vertex_points(self):
w = float(self.texture.width)
h = float(self.texture.height)
index_points = []
vertex_points_idx = []
texture_points_idx = []
for x in xrange(0,self.grid.x+1):
for y in xrange(0,self.grid.y+1):
vertex_points_idx += [-1,-1,-1]
texture_points_idx += [-1,-1]
for x in xrange(0, self.grid.x):
for y in xrange(0, self.grid.y):
x1 = x * self.x_step
x2 = x1 + self.x_step
y1 = y * self.y_step
y2 = y1 + self.y_step
# d <-- c
# ^
# |
# a --> b
a = x * (self.grid.y+1) + y
b = (x+1) * (self.grid.y+1) + y
c = (x+1) * (self.grid.y+1) + (y+1)
d = x * (self.grid.y+1) + (y+1)
# 2 triangles: a-b-d, b-c-d
index_points += [ a, b, d, b, c, d] # triangles
l1 = ( a*3, b*3, c*3, d*3 )
l2 = ( Point3(x1,y1,0), Point3(x2,y1,0), Point3(x2,y2,0), Point3(x1,y2,0) )
# building the vertex
for i in xrange( len(l1) ):
vertex_points_idx[ l1[i] ] = l2[i].x
vertex_points_idx[ l1[i] + 1 ] = l2[i].y
vertex_points_idx[ l1[i] + 2 ] = l2[i].z
# building the texels
tex1 = ( a*2, b*2, c*2, d*2 )
tex2 = ( Point2(x1,y1), Point2(x2,y1), Point2(x2,y2), Point2(x1,y2) )
for i in xrange( len(tex1)):
texture_points_idx[ tex1[i] ] = tex2[i].x / w
texture_points_idx[ tex1[i] + 1 ] = tex2[i].y / h
return ( index_points, vertex_points_idx, texture_points_idx )
def get_vertex( self, x, y):
'''Get the current vertex coordinate
:Parameters:
`x` : int
x-vertex
`y` : int
y-vertex
:rtype: (float, float, float)
'''
idx = (x * (self.grid.y+1) + y) * 3
x = self.vertex_list.vertices[idx]
y = self.vertex_list.vertices[idx+1]
z = self.vertex_list.vertices[idx+2]
return (x,y,z)
def get_original_vertex( self, x, y):
'''Get the original vertex coordinate.
The original vertices are the ones weren't modified by the current action.
:Parameters:
`x` : int
x-vertex
`y` : int
y-vertex
:rtype: (float, float, float)
'''
idx = (x * (self.grid.y+1) + y) * 3
x = self.vertex_points[idx]
y = self.vertex_points[idx+1]
z = self.vertex_points[idx+2]
return (x,y,z)
def set_vertex( self, x, y, v):
'''Set a vertex point is a certain value
:Parameters:
`x` : int
x-vertex
`y` : int
y-vertex
`v` : (float, float, float)
tuple value for the vertex
'''
idx = (x * (self.grid.y+1) + y) * 3
self.vertex_list.vertices[idx] = int(v[0])
self.vertex_list.vertices[idx+1] = int(v[1])
self.vertex_list.vertices[idx+2] = int(v[2])
class TiledGrid3D(GridBase):
'''`TiledGrid3D` is a 3D grid implementation. It differs from `Grid3D` in that
the tiles can be separated from the grid.
The vertex array will be built with::
self.vertex_list.vertices: x,y,z (floats)
self.vertex_list.tex_coords: x,y (floats)
self.vertex_list.colors: RGBA, with values from 0 - 255
'''
def _init( self ):
# calculate vertex, textures depending on screen size
ver_pts, tex_pts = self._calculate_vertex_points()
#: vertex array that can be transformed.
#: it has these attributes:
#:
#: - vertices
#: - colors
#: - tex_coords
#:
#: for more information refer to pyglet's documentation: pyglet.graphics.vertex_list
self.vertex_list = pyglet.graphics.vertex_list(self.grid.x * self.grid.y * 4,
"t2f", "v3f/stream","c4B")
#: original vertex array of the grid. (read-only)
self.vertex_points = ver_pts[:]
self.vertex_list.vertices = ver_pts
self.vertex_list.tex_coords = tex_pts
self.vertex_list.colors = (255,255,255,255) * self.grid.x * self.grid.y * 4
def _blit(self ):
self.vertex_list.draw(pyglet.gl.GL_QUADS)
def _calculate_vertex_points(self):
w = float(self.texture.width)
h = float(self.texture.height)
vertex_points = []
texture_points = []
for x in xrange(0, self.grid.x):
for y in xrange(0, self.grid.y):
x1 = x * self.x_step
x2 = x1 + self.x_step
y1 = y * self.y_step
y2 = y1 + self.y_step
# Building the tiles' vertex and texture points
vertex_points += [x1, y1, 0, x2, y1, 0, x2, y2, 0, x1, y2, 0 ]
texture_points += [x1/w, y1/h, x2/w, y1/h, x2/w, y2/h, x1/w, y2/h]
# Generates a quad for each tile, to perform tiles effect
return (vertex_points, texture_points)
def set_tile(self, x, y, coords):
'''Set the 4 tile coordinates
Coordinates positions::
3 <-- 2
^
|
0 --> 1
:Parameters:
`x` : int
x coodinate of the tile
`y` : int
y coordinate of the tile
`coords` : [ float, float, float, float, float, float, float, float, float, float, float, float ]
The 4 coordinates in the format (x0, y0, z0, x1, y1, z1,..., x3, y3, z3)
'''
idx = (self.grid.y * x + y) * 4 * 3
self.vertex_list.vertices[idx:idx+12] = coords
def get_original_tile(self, x, y):
'''Get the 4-original tile coordinates.
Coordinates positions::
3 <-- 2
^
|
0 --> 1
:Parameters:
`x` : int
x coordinate of the tile
`y` : int
y coordinate of the tile
:rtype: [ float, float, float, float, float, float, float, float, float, float, float, float ]
:returns: The 4 coordinates with the following order: x0, y0, z0, x1, y1, z1,...,x3, y3, z3
'''
idx = (self.grid.y * x + y) * 4 * 3
return self.vertex_points[idx:idx+12]
def get_tile(self, x, y):
'''Get the current tile coordinates.
Coordinates positions::
3 <-- 2
^
|
0 --> 1
:Parameters:
`x` : int
x coordinate of the tile
`y` : int
y coordinate of the tile
:rtype: [ float, float, float, float, float, float, float, float, float, float, float, float ]
:returns: The 4 coordinates with the following order: x0, y0, z0, x1, y1, z1,...,x3, y3, z3
'''
idx = (self.grid.y * x + y) * 4 * 3
return self.vertex_list.vertices[idx:idx+12]
| |
from sfc_models.equation import EquationBlock, Equation, Term
from sfc_models.models import EconomicObject, Model, Country
from sfc_models.utils import Logger, replace_token_from_lookup, LogicError, create_equation_from_terms
class Sector(EconomicObject):
"""
All sectors derive from this class.
"""
def __init__(self, country, code, long_name='', has_F=True):
if long_name == '':
long_name = 'Sector Object {0} in Country {1}'.format(code, country.Code)
self.Code = code
EconomicObject.__init__(self, country, code=code)
self.CurrencyZone = country.CurrencyZone
country._AddSector(self)
# This is calculated by the Model
self.FullCode = ''
self.LongName = long_name
# self.Equations = {}
self.HasF = has_F
self.IsTaxable = False
self.EquationBlock = EquationBlock()
if has_F:
# self.AddVariable('F', 'Financial assets', '<TO BE GENERATED>')
F = Equation('F', 'Financial assets')
F.AddTerm('LAG_F')
self.AddVariableFromEquation(F)
# self.AddVariable('LAG_F', 'Previous period''s financial assets.', 'F(k-1)')
INC = Equation('INC', 'Income (PreTax)', rhs=[])
self.AddVariableFromEquation(INC)
self.AddVariable('LAG_F', 'Previous period''s financial assets.', 'F(k-1)')
def AddVariable(self, varname, desc='', eqn=''):
"""
Add a variable to the sector.
The variable name (varname) is the local name; it will be decorated to create a
full name. Equations within a sector can use the local name; other sectors need to
use GetVariableName to get the full name.
:param varname: str
:param desc: str
:param eqn: str
:return: None
"""
if '__' in varname:
raise ValueError('Cannot use "__" inside local variable names: ' + varname)
if desc is None:
desc = ''
if type(eqn) == Equation:
equation = eqn
else:
equation = Equation(varname, desc, [Term(eqn, is_blob=True),])
if varname in self.GetVariables():
Logger('[ID={0}] Variable Overwritten: {1}', priority=3,
data_to_format=(self.ID, varname))
self.EquationBlock.AddEquation(equation)
# self.Equations[varname] = eqn
Logger('[ID={0}] Variable Added: {1} = {2} # {3}', priority=2,
data_to_format=(self.ID, varname, eqn, desc))
def AddVariableFromEquation(self, eqn):
"""
Method to be used until the Equation member is replaced...
:param eqn: Equation
:return:
"""
if type(eqn) == str:
eqn = Equation(eqn)
self.AddVariable(eqn.LeftHandSide, eqn.Description, eqn)
def SetEquationRightHandSide(self, varname, rhs):
"""
Set the right hand side of the equation for an existing variable.
:param varname: str
:param rhs: str
:return: None
"""
try:
self.EquationBlock[varname].TermList = [Term(rhs, is_blob=True),]
except KeyError:
raise KeyError('Variable {0} does not exist'.format(varname))
# Could try: Equation.ParseString(rhs), but is too slow in unit tests...
# if varname not in self.Equations:
# raise KeyError('Variable {0} does not exist'.format(varname))
Logger('[ID={0}] Equation set: {1} = {2} ', priority=2,
data_to_format=(self.ID, varname, rhs))
# self.Equations[varname] = rhs
def AddTermToEquation(self, varname, term):
"""
Add a new term to an existing equation.
The term variable may be either a string or (non-Blob) Term object.
:param varname: str
:param term: Term
:return: None
"""
term = Term(term)
Logger('Adding term {0} to Equation {1} in Sector {2} [ID={3}]', priority=2,
data_to_format=(term, varname, self.Code, self.ID))
try:
self.EquationBlock[varname].AddTerm(term)
except KeyError:
raise KeyError('Variable {0} not in Sector {1}'.format(varname, self.Code))
def SetExogenous(self, varname, val):
"""
Set an exogenous variable for a sector. The variable must already be defined (by AddVariable()).
:param varname: str
:param val: str
:return: None
"""
self.GetModel().AddExogenous(self, varname, val)
def GetVariables(self):
"""
Return a sorted list of variables.
(Need to sort to make testing easier; dict's store in "random" hash order.
This is a convenience function; it just passes along self.EquationBlock.GetEquationList()
:return: list
"""
return self.EquationBlock.GetEquationList()
def GetVariableName(self, varname):
"""
Get the full variable name associated with a local variable.
Standard convention:
{sector_fullcode}__{local variable name}.
NOTE: that is is double-underscore '_'. The use of double underscores in
variable names (or sector codes) is now verboten!
This means that the presence of double underscore means that this is a full variable name.
NOTE: If the sector FullCode is not defined, a temporary alias is created and registered.
The Model object will ensure that all registered aliases are cleaned up.]
:param varname: str
:return: str
"""
if varname not in self.EquationBlock.GetEquationList():
raise KeyError('Variable %s not in sector %s' % (varname, self.FullCode))
if self.FullCode == '':
alias = '_{0}__{1}'.format(self.ID, varname)
Logger('Registering alias: {0}', priority=5, data_to_format=(alias,))
self.GetModel()._RegisterAlias(alias, self, varname)
return alias
else:
# Put in a sanity check here
if '__' in self.FullCode:
raise ValueError('The use of "__" in sector codes is invalid: ' + self.FullCode)
if '__' in varname:
raise ValueError('The use of "__" in variable local names is invalid: ' + varname)
return self.FullCode + '__' + varname
def IsSharedCurrencyZone(self, other):
"""
Is a sector in the same CurrencyZone as the other?
:param other: Sector
:return: bool
"""
return self.CurrencyZone.ID == other.CurrencyZone.ID
def _ReplaceAliases(self, lookup):
"""
Use the lookup dictionary to replace aliases.
:param lookup: dict
:return:
"""
self.EquationBlock.ReplaceTokensFromLookup(lookup)
def AddCashFlow(self, term, eqn=None, desc=None, is_income=True):
"""
Add a cash flow to the sector. Will add to the financial asset equation (F), and
the income equation (INC) if is_income is True.
Except: There is a list of exclusions to which cash flows are not considered income.
That setting will override the is_income parameter. This allows us to carve out exceptions
to the standard behaviour, which generally is to assume that cash flows are associated with
income.
:param term: str
:param eqn: str
:param desc: str
:param is_income: bool
:return: None
"""
term = term.strip()
if len(term) == 0:
return
term_obj = Term(term)
if not term_obj.IsSimple: # pragma: no cover - Not implemented; cannot hit the line below.
raise LogicError('Must supply a single variable as the term to AddCashFlow')
# term = term.replace(' ', '')
# if not (term[0] in ('+', '-')):
# term = '+' + term
# if len(term) < 2:
# raise ValueError('Invalid cash flow term')
self.EquationBlock['F'].AddTerm(term)
if is_income:
# Need to see whether it is excluded
mod = self.GetModel()
for obj, excluded in mod.IncomeExclusions:
if obj.ID == self.ID:
if term_obj.Term == excluded:
is_income = False
break
if is_income:
self.EquationBlock['INC'].AddTerm(term)
if eqn is None:
return
# Remove the +/- from the term
term = term_obj.Term
if term in self.GetVariables():
rhs = self.EquationBlock[term].RHS()
if rhs == '' or rhs == '0.0':
self.SetEquationRightHandSide(term, eqn)
else:
self.AddVariable(term, desc, eqn)
def AddInitialCondition(self, variable_name, value):
"""
Add an initial condition for a variable associated with this sector.
:param variable_name: str
:param value: float
:return:
"""
self.GetModel().AddInitialCondition(self.ID, variable_name, value)
def _GenerateEquationsFrontEnd(self): # pragma: no cover
"""
Used by graphical front ends; generates a logging message. (In Model.Main(),
the logging is done by the Model before it calls the Sector.)
:return:
"""
Logger('Running _GenerateEquations on {0} [{1}]', priority=3,
data_to_format=(self.Code, self.ID))
self._GenerateEquations()
def _GenerateEquations(self):
"""
Work is done in derived classes.
:return: None
"""
return
def Dump(self):
"""
Create a string with information about this object. This is for debugging
purposes, and the format will change over time. In other words, do not rely on
this output if you want specific information.
:return: str
"""
out = '[%s] %s. FullCode = "%s" \n' % (self.Code, self.LongName, self.FullCode)
out += '-' * 60 + '\n'
for var in self.EquationBlock.GetEquationList():
out += str(self.EquationBlock[var]) + '\n'
return out
def _CreateFinalEquations(self):
"""
Returns the final set of equations, with the full names of variables.
:return: list
"""
out = []
lookup = {}
for varname in self.EquationBlock.GetEquationList():
lookup[varname] = self.GetVariableName(varname)
for varname in self.EquationBlock.GetEquationList():
eq = self.EquationBlock[varname]
rhs = eq.GetRightHandSide()
if len(rhs.strip()) == 0: # pragma: no cover [Does not happen any more; leave in just in case.]
continue
out.append((self.GetVariableName(varname),
replace_token_from_lookup(rhs, lookup),
'[%s] %s' % (varname, eq.Description)))
return out
def GenerateAssetWeighting(self, asset_weighting_dict, residual_asset_code, is_absolute_weighting=False):
"""
Generates the asset weighting/allocation equations. If there are N assets, pass N-1 in the list, the residual
gets the rest.
The variable asset_weighting_list is a
dictionary, of the form:
{'asset1code': 'weighting equation',
'asset2code': 'weighting2')}
The is_absolute_weighting parameter is a placeholder; if set to true, asset demands are
absolute. There is a TODO marking where code should be added.
Note that weightings are (normally) from 0-1.
:param asset_weighting_dict: dict
:param residual_asset_code: str
:param is_absolute_weighting: bool
:return:
"""
if is_absolute_weighting:
# TODO: Implement absolute weightings.
raise NotImplementedError('Absolute weightings not implemented')
residual_weight = '1.0'
if type(asset_weighting_dict) in (list, tuple):
# Allow asset_weighting_dict to be a list of key: value pairs.
tmp = dict()
for code, eqn in asset_weighting_dict:
tmp[code] = eqn
asset_weighting_dict = tmp
for code, weight_eqn in asset_weighting_dict.items():
# Weight variable = 'WGT_{CODE}'
weight = 'WGT_' + code
self.AddVariable(weight, 'Asset weight for' + code, weight_eqn)
self.AddVariable('DEM_' + code, 'Demand for asset ' + code, 'F * {0}'.format(weight))
residual_weight += ' - ' + weight
self.AddVariable('WGT_' + residual_asset_code, 'Asset weight for ' + residual_asset_code, residual_weight)
self.AddVariable('DEM_' + residual_asset_code, 'Demand for asset ' + residual_asset_code,
'F * {0}'.format('WGT_' + residual_asset_code))
class Market(Sector):
"""
Market Not really a sector, but keep it in the same list.
"""
def __init__(self, country, code, long_name=''):
if long_name == '':
long_name = 'Market {0} in Country {1}'.format(code, country.Code)
Sector.__init__(self, country, code, long_name, has_F=False)
self.AddVariable('SUP_' + code, 'Supply for market ' + code, '')
self.AddVariable('DEM_' + code, 'Demand for market ' + code, '')
self.ResidualSupply = None
self.OtherSuppliers = []
def _SearchSupplier(self):
"""
Find the sector that is a single supplier in a country.
Throws a LogicError if more than one, or none.
Need to set SupplyAllocation if you want to do something not
covered by this default behaviour.
:return: Sector
"""
Logger('Market {0} searching Country {1} for a supplier', priority=3,
data_to_format=(self.Code, self.Parent.Code))
ret_value = None
for sector in self.Parent.GetSectors():
if sector.ID == self.ID:
continue
if 'SUP_' + self.Code in sector.EquationBlock.Equations:
if ret_value is None:
ret_value = sector
else:
raise LogicError('More than one supplier, must set SupplyAllocation: ' + self.Code)
if ret_value is None:
raise LogicError('No supplier: ' + self.Code)
self.ResidualSupply = ret_value
return ret_value
def _GenerateEquations(self):
"""
Generate the equations associated with this market.
:return:
"""
if self.ResidualSupply is None:
supplier = self._SearchSupplier()
self._GenerateTermsLowLevel('DEM', 'Demand')
self._GenerateMultiSupply()
def _GenerateTermsLowLevel(self, prefix, long_desc):
"""
Generate the terms associated with this market, for supply and demand.
TODO: This is now only called for the demand function; simplify to just refer
to demand.
:param prefix: str
:param long_desc: str
:return: None
"""
Logger('Searching for demand for market {0}', priority=3, data_to_format=(self.FullCode,))
if prefix not in ('SUP', 'DEM'):
raise LogicError('Input to function must be "SUP" or "DEM"')
# country = self.Parent
short_name = prefix + '_' + self.Code
long_name = prefix + '_' + self.FullCode
self.AddVariable(short_name, long_desc + ' for Market ' + self.Code, '')
term_list = []
for s in self.CurrencyZone.GetSectors():
if s.ID == self.ID:
continue
if self.ShareParent(s):
var_name = short_name
else:
var_name = long_name
try:
term = s.GetVariableName(var_name)
except KeyError:
Logger('Variable {0} does not exist in {1}', priority=10,
data_to_format=(var_name, s.FullCode))
continue
term_list.append('+ ' + term)
if prefix == 'SUP': # pragma: no cover
# Since we assume that there is a single supplier, we can set the supply equation to
# point to the equation in the market.
s.AddCashFlow(var_name, self.GetVariableName(var_name), long_desc)
else:
# Must fill in demand equation in sectors.
s.AddCashFlow('-' + var_name, '', long_desc)
eqn = create_equation_from_terms(term_list)
self.SetEquationRightHandSide(short_name, eqn)
def AddSupplier(self, supplier, supply_eqn=''):
"""
Add a supply. If the supply_eqn is empty (or None), becomes the ResidualSupplier
:param supplier: Sector
:param supply_eqn: str
:return:
"""
if supply_eqn is None or supply_eqn=='':
self.ResidualSupply = supplier
return
self.OtherSuppliers.append((supplier, supply_eqn))
def _GenerateMultiSupply(self):
"""
Generate the supply terms with multiple suppliers.
:return:
"""
sup_name = 'SUP_' + self.Code
dem_name = 'DEM_' + self.Code
# Set aggregate supply equal to demand
self.SetEquationRightHandSide(sup_name, rhs=dem_name)
# Generate individual supply equations
# These are already supplied for everything other than the residual supply, so
# we need to build it up.
# Also, the name of the supply varies, depending on whether we are in te same
# country/region.
residual_sector = self.ResidualSupply
residual_equation = Equation(self.GetSupplierTerm(residual_sector),
'Residual supply', sup_name)
sector_list = self.OtherSuppliers
# residual supply = total supply less other supply terms
for supplier, _ in sector_list:
term = '-SUP_' + supplier.FullCode
residual_equation.AddTerm(term)
# Now that we have an equation for the residual sector, append it to the
# list of suppliers, so we can process all suppliers in one block of code.
sector_list.append((residual_sector, residual_equation.RHS()))
for supplier, eqn in sector_list:
local_name = 'SUP_' + supplier.FullCode
self.AddVariable(local_name, 'Supply from {0}'.format(supplier.LongName), eqn)
# Push this local variable into the supplying sector
# If we are in the same country, use 'SUP_{CODE}'
# If we are in different countries, use 'SUP_{FULLCODE}'
supply_name = self.GetSupplierTerm(supplier)
if supply_name not in supplier.EquationBlock:
supplier.AddVariable(supply_name, 'Supply to {0}'.format(self.FullCode), '')
if self.IsSharedCurrencyZone(supplier):
supplier.AddTermToEquation(supply_name, self.GetVariableName(local_name))
supplier.AddCashFlow('+' + supply_name)
else:
model = self.GetModel()
if model.ExternalSector is None:
raise LogicError('Must create ExternalSector if we have cross-currency suppliers')
full_local_name = self.GetVariableName(local_name)
model.ExternalSector._SendMoney(self, full_local_name)
term = model.ExternalSector._ReceiveMoney(supplier, self, full_local_name)
supplier.AddTermToEquation(supply_name, term)
supplier.AddCashFlow(term)
return
# # Residual sector supplies rest
# # noinspection PyUnusedLocal
# # This declaration of sector is not needed, but I left it in case code from
# # above is pasted here, without replacing 'sector' with residual_sector.
# if not self.IsSharedCurrencyZone(residual_sector):
# raise NotImplementedError('Currently does not support residual sectors in another currency')
# sector = residual_sector
# local_name = 'SUP_' + residual_sector.FullCode
# # Equation = [Total supply] - \Sum Individual suppliers
# eqn = '-'.join(terms)
# self.AddVariable(local_name, 'Supply from {0}'.format(residual_sector.LongName), eqn)
# if self.ShareParent(residual_sector):
# supply_name = 'SUP_' + self.Code
# else:
# supply_name = 'SUP_' + self.FullCode
# if supply_name not in residual_sector.EquationBlock:
# residual_sector.AddVariable(supply_name, 'Supply to {0}'.format(self.FullCode), '')
# residual_sector.SetEquationRightHandSide(supply_name, self.GetVariableName(local_name))
# residual_sector.AddCashFlow('+' + supply_name)
def GetSupplierTerm(self, supplier):
"""
What is the local variable name within a supplier for supply to this
market?
If same Country,
out = "SUP_{code}"
>>> mod = Model()
>>> ca = Country(mod, 'CA')
>>> mar = Market(ca, 'GOOD')
>>> supplier = Sector(ca, 'BUS')
>>> mar.GetSupplierTerm(supplier)
'SUP_GOOD'
Howevever, if we are in a different country, must use the full code.
out = "SUP_{FullCode}"
>>> mod = Model()
>>> ca = Country(mod, 'CA')
>>> us = Country(mod, 'US')
>>> mar = Market(ca, 'GOOD')
>>> supplier = Sector(us, 'BUS')
>>> mar.GetSupplierTerm(supplier)
'SUP_CA_GOOD'
:param supplier: Sector
:return: str
"""
if self.ShareParent(supplier):
return 'SUP_' + self.Code
else:
return 'SUP_' + self.GetModel().GetSectorCodeWithCountry(self)
class FinancialAssetMarket(Market):
"""
Handles the interactions for a market in a financial asset.
Must be a single issuer.
"""
def __init__(self, country, code, long_name='', issuer_short_code='GOV'):
Market.__init__(self, country, code, long_name)
self.IssuerShortCode = issuer_short_code
self.SearchListSource = self.CurrencyZone
| |
import re
from anki.utils import stripHTML, stripHTMLMedia
from anki.hooks import runFilter
from anki.template import furigana; furigana.install()
from anki.template import hint; hint.install()
clozeReg = r"(?s)\{\{c%s::(.*?)(::(.*?))?\}\}"
modifiers = {}
def modifier(symbol):
"""Decorator for associating a function with a Mustache tag modifier.
@modifier('P')
def render_tongue(self, tag_name=None, context=None):
return ":P %s" % tag_name
{{P yo }} => :P yo
"""
def set_modifier(func):
modifiers[symbol] = func
return func
return set_modifier
def get_or_attr(obj, name, default=None):
try:
return obj[name]
except KeyError:
return default
except:
try:
return getattr(obj, name)
except AttributeError:
return default
class Template(object):
# The regular expression used to find a #section
section_re = None
# The regular expression used to find a tag.
tag_re = None
# Opening tag delimiter
otag = '{{'
# Closing tag delimiter
ctag = '}}'
def __init__(self, template, context=None):
self.template = template
self.context = context or {}
self.compile_regexps()
def render(self, template=None, context=None, encoding=None):
"""Turns a Mustache template into something wonderful."""
template = template or self.template
context = context or self.context
template = self.render_sections(template, context)
result = self.render_tags(template, context)
if encoding is not None:
result = result.encode(encoding)
return result
def compile_regexps(self):
"""Compiles our section and tag regular expressions."""
tags = { 'otag': re.escape(self.otag), 'ctag': re.escape(self.ctag) }
section = r"%(otag)s[\#|^]([^\}]*)%(ctag)s(.+?)%(otag)s/\1%(ctag)s"
self.section_re = re.compile(section % tags, re.M|re.S)
tag = r"%(otag)s(#|=|&|!|>|\{)?(.+?)\1?%(ctag)s+"
self.tag_re = re.compile(tag % tags)
def render_sections(self, template, context):
"""Expands sections."""
while 1:
match = self.section_re.search(template)
if match is None:
break
section, section_name, inner = match.group(0, 1, 2)
section_name = section_name.strip()
# check for cloze
m = re.match("c[qa]:(\d+):(.+)", section_name)
if m:
# get full field text
txt = get_or_attr(context, m.group(2), None)
m = re.search(clozeReg%m.group(1), txt)
if m:
it = m.group(1)
else:
it = None
else:
it = get_or_attr(context, section_name, None)
replacer = ''
# if it and isinstance(it, collections.Callable):
# replacer = it(inner)
if isinstance(it, basestring):
it = stripHTMLMedia(it).strip()
if it and not hasattr(it, '__iter__'):
if section[2] != '^':
replacer = inner
elif it and hasattr(it, 'keys') and hasattr(it, '__getitem__'):
if section[2] != '^':
replacer = self.render(inner, it)
elif it:
insides = []
for item in it:
insides.append(self.render(inner, item))
replacer = ''.join(insides)
elif not it and section[2] == '^':
replacer = inner
template = template.replace(section, replacer)
return template
def render_tags(self, template, context):
"""Renders all the tags in a template for a context."""
while 1:
match = self.tag_re.search(template)
if match is None:
break
tag, tag_type, tag_name = match.group(0, 1, 2)
tag_name = tag_name.strip()
try:
func = modifiers[tag_type]
replacement = func(self, tag_name, context)
template = template.replace(tag, replacement)
except (SyntaxError, KeyError):
return u"{{invalid template}}"
return template
# {{{ functions just like {{ in anki
@modifier('{')
def render_tag(self, tag_name, context):
return self.render_unescaped(tag_name, context)
@modifier('!')
def render_comment(self, tag_name=None, context=None):
"""Rendering a comment always returns nothing."""
return ''
@modifier(None)
def render_unescaped(self, tag_name=None, context=None):
"""Render a tag without escaping it."""
txt = get_or_attr(context, tag_name)
if txt is not None:
# some field names could have colons in them
# avoid interpreting these as field modifiers
# better would probably be to put some restrictions on field names
return txt
# field modifiers
parts = tag_name.split(':')
extra = None
if len(parts) == 1 or parts[0] == '':
return '{unknown field %s}' % tag_name
else:
mods, tag = parts[:-1], parts[-1] #py3k has *mods, tag = parts
txt = get_or_attr(context, tag)
#Since 'text:' and other mods can affect html on which Anki relies to
#process clozes, we need to make sure clozes are always
#treated after all the other mods, regardless of how they're specified
#in the template, so that {{cloze:text: == {{text:cloze:
#For type:, we return directly since no other mod than cloze (or other
#pre-defined mods) can be present and those are treated separately
mods.reverse()
mods.sort(key=lambda s: not s=="type")
for mod in mods:
# built-in modifiers
if mod == 'text':
# strip html
txt = stripHTML(txt) if txt else ""
elif mod == 'type':
# type answer field; convert it to [[type:...]] for the gui code
# to process
return "[[%s]]" % tag_name
elif mod.startswith('cq-') or mod.startswith('ca-'):
# cloze deletion
mod, extra = mod.split("-")
txt = self.clozeText(txt, extra, mod[1]) if txt and extra else ""
else:
# hook-based field modifier
mod, extra = re.search("^(.*?)(?:\((.*)\))?$", mod).groups()
txt = runFilter('fmod_' + mod, txt or '', extra or '', context,
tag, tag_name);
if txt is None:
return '{unknown field %s}' % tag_name
return txt
def clozeText(self, txt, ord, type):
reg = clozeReg
if not re.search(reg%ord, txt):
return ""
def repl(m):
# replace chosen cloze with type
if type == "q":
if m.group(3):
return "<span class=cloze>[%s]</span>" % m.group(3)
else:
return "<span class=cloze>[...]</span>"
else:
return "<span class=cloze>%s</span>" % m.group(1)
txt = re.sub(reg%ord, repl, txt)
# and display other clozes normally
return re.sub(reg%"\d+", "\\1", txt)
@modifier('=')
def render_delimiter(self, tag_name=None, context=None):
"""Changes the Mustache delimiter."""
try:
self.otag, self.ctag = tag_name.split(' ')
except ValueError:
# invalid
return
self.compile_regexps()
return ''
| |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright 2011 Ken Pepple
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Built-in instance properties."""
import re
import uuid
from oslo.config import cfg
import six
from nova import context
from nova import db
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.openstack.common.db import exception as db_exc
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
from nova.pci import pci_request
from nova import utils
flavor_opts = [
cfg.StrOpt('default_flavor',
default='m1.small',
help='Default flavor to use for the EC2 API only. The Nova API '
'does not support a default flavor.'),
]
CONF = cfg.CONF
CONF.register_opts(flavor_opts)
LOG = logging.getLogger(__name__)
# NOTE(luisg): Flavor names can include non-ascii characters so that users can
# create flavor names in locales that use them, however flavor IDs are limited
# to ascii characters.
VALID_ID_REGEX = re.compile("^[\w\.\- ]*$")
VALID_NAME_REGEX = re.compile("^[\w\.\- ]*$", re.UNICODE)
# NOTE(dosaboy): This is supposed to represent the maximum value that we can
# place into a SQL single precision float so that we can check whether values
# are oversize. Postgres and MySQL both define this as their max whereas Sqlite
# uses dynamic typing so this would not apply. Different dbs react in different
# ways to oversize values e.g. postgres will raise an exception while mysql
# will round off the value. Nevertheless we may still want to know prior to
# insert whether the value is oversize.
SQL_SP_FLOAT_MAX = 3.40282e+38
# Validate extra specs key names.
VALID_EXTRASPEC_NAME_REGEX = re.compile(r"[\w\.\- :]+$", re.UNICODE)
def _int_or_none(val):
if val is not None:
return int(val)
system_metadata_flavor_props = {
'id': int,
'name': str,
'memory_mb': int,
'vcpus': int,
'root_gb': int,
'ephemeral_gb': int,
'flavorid': str,
'swap': int,
'rxtx_factor': float,
'vcpu_weight': _int_or_none,
}
def create(name, memory, vcpus, root_gb, ephemeral_gb=0, flavorid=None,
swap=0, rxtx_factor=1.0, is_public=True):
"""Creates flavors."""
if not flavorid:
flavorid = uuid.uuid4()
kwargs = {
'memory_mb': memory,
'vcpus': vcpus,
'root_gb': root_gb,
'ephemeral_gb': ephemeral_gb,
'swap': swap,
'rxtx_factor': rxtx_factor,
}
if isinstance(name, six.string_types):
name = name.strip()
# ensure name do not exceed 255 characters
utils.check_string_length(name, 'name', min_length=1, max_length=255)
# ensure name does not contain any special characters
valid_name = VALID_NAME_REGEX.search(name)
if not valid_name:
msg = _("Flavor names can only contain alphanumeric characters, "
"periods, dashes, underscores and spaces.")
raise exception.InvalidInput(reason=msg)
# NOTE(vish): Internally, flavorid is stored as a string but it comes
# in through json as an integer, so we convert it here.
flavorid = unicode(flavorid)
# ensure leading/trailing whitespaces not present.
if flavorid.strip() != flavorid:
msg = _("id cannot contain leading and/or trailing whitespace(s)")
raise exception.InvalidInput(reason=msg)
# ensure flavor id does not exceed 255 characters
utils.check_string_length(flavorid, 'id', min_length=1,
max_length=255)
# ensure flavor id does not contain any special characters
valid_flavor_id = VALID_ID_REGEX.search(flavorid)
if not valid_flavor_id:
msg = _("Flavor id can only contain letters from A-Z (both cases), "
"periods, dashes, underscores and spaces.")
raise exception.InvalidInput(reason=msg)
# NOTE(wangbo): validate attributes of the creating flavor.
# ram and vcpus should be positive ( > 0) integers.
# disk, ephemeral and swap should be non-negative ( >= 0) integers.
flavor_attributes = {
'memory_mb': ('ram', 1),
'vcpus': ('vcpus', 1),
'root_gb': ('disk', 0),
'ephemeral_gb': ('ephemeral', 0),
'swap': ('swap', 0)
}
for key, value in flavor_attributes.items():
kwargs[key] = utils.validate_integer(kwargs[key], value[0], value[1],
db.MAX_INT)
# rxtx_factor should be a positive float
try:
kwargs['rxtx_factor'] = float(kwargs['rxtx_factor'])
if (kwargs['rxtx_factor'] <= 0 or
kwargs['rxtx_factor'] > SQL_SP_FLOAT_MAX):
raise ValueError()
except ValueError:
msg = (_("'rxtx_factor' argument must be a float between 0 and %g") %
SQL_SP_FLOAT_MAX)
raise exception.InvalidInput(reason=msg)
kwargs['name'] = name
kwargs['flavorid'] = flavorid
# ensure is_public attribute is boolean
try:
kwargs['is_public'] = strutils.bool_from_string(
is_public, strict=True)
except ValueError:
raise exception.InvalidInput(reason=_("is_public must be a boolean"))
try:
return db.flavor_create(context.get_admin_context(), kwargs)
except db_exc.DBError as e:
LOG.exception(_LE('DB error: %s'), e)
raise exception.FlavorCreateFailed()
def destroy(name):
"""Marks flavor as deleted."""
try:
if not name:
raise ValueError()
db.flavor_destroy(context.get_admin_context(), name)
except (ValueError, exception.NotFound):
LOG.exception(_LE('Instance type %s not found for deletion'), name)
raise exception.FlavorNotFoundByName(flavor_name=name)
def get_all_flavors(ctxt=None, inactive=False, filters=None):
"""Get all non-deleted flavors as a dict.
Pass true as argument if you want deleted flavors returned also.
"""
if ctxt is None:
ctxt = context.get_admin_context()
inst_types = db.flavor_get_all(
ctxt, inactive=inactive, filters=filters)
inst_type_dict = {}
for inst_type in inst_types:
inst_type_dict[inst_type['id']] = inst_type
return inst_type_dict
def get_all_flavors_sorted_list(ctxt=None, inactive=False, filters=None,
sort_key='flavorid', sort_dir='asc',
limit=None, marker=None):
"""Get all non-deleted flavors as a sorted list.
Pass true as argument if you want deleted flavors returned also.
"""
if ctxt is None:
ctxt = context.get_admin_context()
return db.flavor_get_all(ctxt, filters=filters, sort_key=sort_key,
sort_dir=sort_dir, limit=limit, marker=marker)
def get_default_flavor():
"""Get the default flavor."""
name = CONF.default_flavor
return get_flavor_by_name(name)
def get_flavor(instance_type_id, ctxt=None, inactive=False):
"""Retrieves single flavor by id."""
if instance_type_id is None:
return get_default_flavor()
if ctxt is None:
ctxt = context.get_admin_context()
if inactive:
ctxt = ctxt.elevated(read_deleted="yes")
return db.flavor_get(ctxt, instance_type_id)
def get_flavor_by_name(name, ctxt=None):
"""Retrieves single flavor by name."""
if name is None:
return get_default_flavor()
if ctxt is None:
ctxt = context.get_admin_context()
return db.flavor_get_by_name(ctxt, name)
# TODO(termie): flavor-specific code should probably be in the API that uses
# flavors.
def get_flavor_by_flavor_id(flavorid, ctxt=None, read_deleted="yes"):
"""Retrieve flavor by flavorid.
:raises: FlavorNotFound
"""
if ctxt is None:
ctxt = context.get_admin_context(read_deleted=read_deleted)
return db.flavor_get_by_flavor_id(ctxt, flavorid, read_deleted)
def get_flavor_access_by_flavor_id(flavorid, ctxt=None):
"""Retrieve flavor access list by flavor id."""
if ctxt is None:
ctxt = context.get_admin_context()
return db.flavor_access_get_by_flavor_id(ctxt, flavorid)
def add_flavor_access(flavorid, projectid, ctxt=None):
"""Add flavor access for project."""
if ctxt is None:
ctxt = context.get_admin_context()
return db.flavor_access_add(ctxt, flavorid, projectid)
def remove_flavor_access(flavorid, projectid, ctxt=None):
"""Remove flavor access for project."""
if ctxt is None:
ctxt = context.get_admin_context()
return db.flavor_access_remove(ctxt, flavorid, projectid)
def extract_flavor(instance, prefix=''):
"""Create an InstanceType-like object from instance's system_metadata
information.
"""
instance_type = {}
sys_meta = utils.instance_sys_meta(instance)
for key, type_fn in system_metadata_flavor_props.items():
type_key = '%sinstance_type_%s' % (prefix, key)
instance_type[key] = type_fn(sys_meta[type_key])
return instance_type
def save_flavor_info(metadata, instance_type, prefix=''):
"""Save properties from instance_type into instance's system_metadata,
in the format of:
[prefix]instance_type_[key]
This can be used to update system_metadata in place from a type, as well
as stash information about another instance_type for later use (such as
during resize).
"""
for key in system_metadata_flavor_props.keys():
to_key = '%sinstance_type_%s' % (prefix, key)
metadata[to_key] = instance_type[key]
pci_request.save_flavor_pci_info(metadata, instance_type, prefix)
return metadata
def delete_flavor_info(metadata, *prefixes):
"""Delete flavor instance_type information from instance's system_metadata
by prefix.
"""
for key in system_metadata_flavor_props.keys():
for prefix in prefixes:
to_key = '%sinstance_type_%s' % (prefix, key)
del metadata[to_key]
pci_request.delete_flavor_pci_info(metadata, *prefixes)
return metadata
def validate_extra_spec_keys(key_names_list):
for key_name in key_names_list:
if not VALID_EXTRASPEC_NAME_REGEX.match(key_name):
expl = _('Key Names can only contain alphanumeric characters, '
'periods, dashes, underscores, colons and spaces.')
raise exception.InvalidInput(message=expl)
| |
import base64
import os
import re
import subprocess
from itertools import takewhile
from django.utils.encoding import smart_str
try:
from staticfiles import finders
except ImportError:
from django.contrib.staticfiles import finders # noqa
from pipeline.conf import settings
from pipeline.utils import to_class, relpath
from pipeline.storage import default_storage
URL_DETECTOR = r'url\([\'"]?([^\s)]+\.[a-z]+[\?\#\d\w]*)[\'"]?\)'
URL_REPLACER = r'url\(__EMBED__(.+?)(\?\d+)?\)'
DEFAULT_TEMPLATE_FUNC = "template"
TEMPLATE_FUNC = r"""var template = function(str){var fn = new Function('obj', 'var __p=[],print=function(){__p.push.apply(__p,arguments);};with(obj||{}){__p.push(\''+str.replace(/\\/g, '\\\\').replace(/'/g, "\\'").replace(/<%=([\s\S]+?)%>/g,function(match,code){return "',"+code.replace(/\\'/g, "'")+",'";}).replace(/<%([\s\S]+?)%>/g,function(match,code){return "');"+code.replace(/\\'/g, "'").replace(/[\r\n\t]/g,' ')+"__p.push('";}).replace(/\r/g,'\\r').replace(/\n/g,'\\n').replace(/\t/g,'\\t')+"');}return __p.join('');");return fn;};"""
MIME_TYPES = {
'.png': 'image/png',
'.jpg': 'image/jpeg',
'.jpeg': 'image/jpeg',
'.gif': 'image/gif',
'.tif': 'image/tiff',
'.tiff': 'image/tiff',
'.ttf': 'font/truetype',
'.otf': 'font/opentype',
'.woff': 'font/woff'
}
EMBED_EXTS = MIME_TYPES.keys()
FONT_EXTS = ['.ttf', '.otf', '.woff']
class Compressor(object):
asset_contents = {}
def __init__(self, storage=default_storage, verbose=False):
self.storage = storage
self.verbose = verbose
def js_compressor(self):
return to_class(settings.PIPELINE_JS_COMPRESSOR)
js_compressor = property(js_compressor)
def css_compressor(self):
return to_class(settings.PIPELINE_CSS_COMPRESSOR)
css_compressor = property(css_compressor)
def compress_js(self, paths, templates=None, **kwargs):
"""Concatenate and compress JS files"""
js = self.concatenate(paths)
if templates:
js = js + self.compile_templates(templates)
if not settings.PIPELINE_DISABLE_WRAPPER:
js = "(function() { %s }).call(this);" % js
compressor = self.js_compressor
if compressor:
js = getattr(compressor(verbose=self.verbose), 'compress_js')(js)
return js
def compress_js_block(self, block, **kwargs):
"""compress JS block"""
if not settings.PIPELINE_DISABLE_WRAPPER:
content = "(function() { %s }).call(this);" % block
else:
content = block
compressor = self.js_compressor
if compressor:
content = getattr(compressor(verbose=self.verbose),
'compress_js')(content)
return content
def compress_css(self, paths, output_filename, variant=None, **kwargs):
"""Concatenate and compress CSS files"""
css = self.concatenate_and_rewrite(paths, output_filename, variant)
compressor = self.css_compressor
if compressor:
css = getattr(compressor(verbose=self.verbose), 'compress_css')(css)
if not variant:
return css
elif variant == "datauri":
return self.with_data_uri(css)
else:
raise CompressorError("\"%s\" is not a valid variant" % variant)
def compress_css_block(self, block, **kwargs):
"""compress CSS block"""
compressor = self.css_compressor
if compressor:
content = getattr(compressor(verbose=self.verbose),
'compress_css')(block)
return content
def compile_templates(self, paths):
compiled = ""
if not paths:
return compiled
namespace = settings.PIPELINE_TEMPLATE_NAMESPACE
base_path = self.base_path(paths)
for path in paths:
contents = self.read_file(path)
contents = re.sub(r"\r?\n", "\\\\n", contents)
contents = re.sub(r"'", "\\'", contents)
name = self.template_name(path, base_path)
compiled += "%s['%s'] = %s('%s');\n" % (
namespace,
name,
settings.PIPELINE_TEMPLATE_FUNC,
contents
)
compiler = TEMPLATE_FUNC if settings.PIPELINE_TEMPLATE_FUNC == DEFAULT_TEMPLATE_FUNC else ""
return "\n".join([
"%(namespace)s = %(namespace)s || {};" % {'namespace': namespace},
compiler,
compiled
])
def base_path(self, paths):
def names_equal(name):
return all(n == name[0] for n in name[1:])
directory_levels = zip(*[p.split(os.sep) for p in paths])
return os.sep.join(x[0] for x in takewhile(names_equal, directory_levels))
def template_name(self, path, base):
"""Find out the name of a JS template"""
if not base:
path = os.path.basename(path)
if path == base:
base = os.path.dirname(path)
name = re.sub(r"^%s[\/\\]?(.*)%s$" % (
re.escape(base), re.escape(settings.PIPELINE_TEMPLATE_EXT)
), r"\1", path)
return re.sub(r"[\/\\]", "_", name)
def concatenate_and_rewrite(self, paths, output_filename, variant=None):
"""Concatenate together files and rewrite urls"""
stylesheets = []
for path in paths:
def reconstruct(match):
asset_path = match.group(1)
if asset_path.startswith("http") or asset_path.startswith("//"):
return "url(%s)" % asset_path
asset_url = self.construct_asset_path(asset_path, path,
output_filename, variant)
return "url(%s)" % asset_url
content = self.read_file(path)
content = re.sub(URL_DETECTOR, reconstruct, smart_str(content))
stylesheets.append(content)
return '\n'.join(stylesheets)
def concatenate(self, paths):
"""Concatenate together a list of files"""
return '\n'.join([self.read_file(path) for path in paths])
def construct_asset_path(self, asset_path, css_path, output_filename, variant=None):
"""Return a rewritten asset URL for a stylesheet"""
public_path = self.absolute_path(asset_path, os.path.dirname(css_path))
if self.embeddable(public_path, variant):
return "__EMBED__%s" % public_path
if not os.path.isabs(asset_path):
asset_path = self.relative_path(public_path, output_filename)
return asset_path
def embeddable(self, path, variant):
"""Is the asset embeddable ?"""
name, ext = os.path.splitext(path)
font = ext in FONT_EXTS
if not variant:
return False
if not (re.search(settings.PIPELINE_EMBED_PATH, path) and self.storage.exists(path)):
return False
if not ext in EMBED_EXTS:
return False
if not (font or len(self.encoded_content(path)) < settings.PIPELINE_EMBED_MAX_IMAGE_SIZE):
return False
return True
def with_data_uri(self, css):
def datauri(match):
path = match.group(1)
mime_type = self.mime_type(path)
data = self.encoded_content(path)
return "url(\"data:%s;charset=utf-8;base64,%s\")" % (mime_type, data)
return re.sub(URL_REPLACER, datauri, css)
def encoded_content(self, path):
"""Return the base64 encoded contents"""
if path in self.__class__.asset_contents:
return self.__class__.asset_contents[path]
data = self.read_file(path)
self.__class__.asset_contents[path] = base64.b64encode(data)
return self.__class__.asset_contents[path]
def mime_type(self, path):
"""Get mime-type from filename"""
name, ext = os.path.splitext(path)
return MIME_TYPES[ext]
def absolute_path(self, path, start):
"""
Return the absolute public path for an asset,
given the path of the stylesheet that contains it.
"""
if os.path.isabs(path):
path = os.path.join(default_storage.location, path)
else:
path = os.path.join(start, path)
return os.path.normpath(path)
def relative_path(self, absolute_path, output_filename):
"""Rewrite paths relative to the output stylesheet path"""
absolute_path = os.path.join(settings.PIPELINE_ROOT, absolute_path)
output_path = os.path.join(settings.PIPELINE_ROOT, os.path.dirname(output_filename))
return relpath(absolute_path, output_path)
def read_file(self, path):
"""Read file content in binary mode"""
file = default_storage.open(path, 'rb')
content = file.read()
file.close()
return content
class CompressorBase(object):
def __init__(self, verbose):
self.verbose = verbose
def filter_css(self, css):
raise NotImplementedError
def filter_js(self, js):
raise NotImplementedError
class CompressorError(Exception):
"""This exception is raised when a filter fails"""
pass
class SubProcessCompressor(CompressorBase):
def execute_command(self, command, content):
pipe = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE,
stdin=subprocess.PIPE, stderr=subprocess.PIPE)
pipe.stdin.write(smart_str(content))
pipe.stdin.close()
compressed_content = pipe.stdout.read()
pipe.stdout.close()
error = pipe.stderr.read()
pipe.stderr.close()
if pipe.wait() != 0:
if not error:
error = "Unable to apply %s compressor" % self.__class__.__name__
raise CompressorError(error)
if self.verbose:
print error
return compressed_content
| |
import json
import sys
from ..exceptions import JSONRPCInvalidRequestException
from ..jsonrpc1 import (
JSONRPC10Request,
JSONRPC10Response,
)
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
class TestJSONRPC10Request(unittest.TestCase):
""" Test JSONRPC10Request functionality."""
def setUp(self):
self.request_params = {
"method": "add",
"params": [1, 2],
"_id": 1,
}
def test_correct_init(self):
""" Test object is created."""
JSONRPC10Request(**self.request_params)
def test_validation_incorrect_no_parameters(self):
with self.assertRaises(ValueError):
JSONRPC10Request()
def test_method_validation_str(self):
self.request_params.update({"method": "add"})
JSONRPC10Request(**self.request_params)
def test_method_validation_not_str(self):
self.request_params.update({"method": []})
with self.assertRaises(ValueError):
JSONRPC10Request(**self.request_params)
self.request_params.update({"method": {}})
with self.assertRaises(ValueError):
JSONRPC10Request(**self.request_params)
self.request_params.update({"method": None})
with self.assertRaises(ValueError):
JSONRPC10Request(**self.request_params)
def test_params_validation_list(self):
self.request_params.update({"params": []})
JSONRPC10Request(**self.request_params)
self.request_params.update({"params": [0]})
JSONRPC10Request(**self.request_params)
def test_params_validation_tuple(self):
self.request_params.update({"params": ()})
JSONRPC10Request(**self.request_params)
self.request_params.update({"params": tuple([0])})
JSONRPC10Request(**self.request_params)
def test_params_validation_dict(self):
self.request_params.update({"params": {}})
with self.assertRaises(ValueError):
JSONRPC10Request(**self.request_params)
self.request_params.update({"params": {"a": 0}})
with self.assertRaises(ValueError):
JSONRPC10Request(**self.request_params)
def test_params_validation_none(self):
self.request_params.update({"params": None})
with self.assertRaises(ValueError):
JSONRPC10Request(**self.request_params)
def test_params_validation_incorrect(self):
self.request_params.update({"params": "str"})
with self.assertRaises(ValueError):
JSONRPC10Request(**self.request_params)
def test_request_args(self):
self.assertEqual(JSONRPC10Request("add", []).args, ())
self.assertEqual(JSONRPC10Request("add", [1, 2]).args, (1, 2))
def test_id_validation_string(self):
self.request_params.update({"_id": "id"})
JSONRPC10Request(**self.request_params)
def test_id_validation_int(self):
self.request_params.update({"_id": 0})
JSONRPC10Request(**self.request_params)
def test_id_validation_null(self):
self.request_params.update({"_id": "null"})
JSONRPC10Request(**self.request_params)
def test_id_validation_none(self):
self.request_params.update({"_id": None})
JSONRPC10Request(**self.request_params)
def test_id_validation_float(self):
self.request_params.update({"_id": 0.1})
JSONRPC10Request(**self.request_params)
def test_id_validation_list_tuple(self):
self.request_params.update({"_id": []})
JSONRPC10Request(**self.request_params)
self.request_params.update({"_id": ()})
JSONRPC10Request(**self.request_params)
def test_id_validation_default_id_none(self):
del self.request_params["_id"]
JSONRPC10Request(**self.request_params)
def test_data_method_1(self):
r = JSONRPC10Request("add", [])
self.assertEqual(json.loads(r.json), r.data)
self.assertEqual(r.data, {
"method": "add",
"params": [],
"id": None,
})
def test_data_method_2(self):
r = JSONRPC10Request(method="add", params=[])
self.assertEqual(json.loads(r.json), r.data)
self.assertEqual(r.data, {
"method": "add",
"params": [],
"id": None,
})
def test_data_params_1(self):
r = JSONRPC10Request("add", params=[], _id=None)
self.assertEqual(json.loads(r.json), r.data)
self.assertEqual(r.data, {
"method": "add",
"params": [],
"id": None,
})
def test_data_params_2(self):
r = JSONRPC10Request("add", ())
self.assertEqual(json.loads(r.json), r.data)
self.assertEqual(r.data, {
"method": "add",
"params": [],
"id": None,
})
def test_data_params_3(self):
r = JSONRPC10Request("add", (1, 2))
self.assertEqual(json.loads(r.json), r.data)
self.assertEqual(r.data, {
"method": "add",
"params": [1, 2],
"id": None,
})
def test_data_id_1(self):
r = JSONRPC10Request("add", [], _id="null")
self.assertEqual(json.loads(r.json), r.data)
self.assertEqual(r.data, {
"method": "add",
"params": [],
"id": "null",
})
def test_data_id_1_notification(self):
r = JSONRPC10Request("add", [], _id="null", is_notification=True)
self.assertEqual(json.loads(r.json), r.data)
self.assertEqual(r.data, {
"method": "add",
"params": [],
"id": None,
})
def test_data_id_2(self):
r = JSONRPC10Request("add", [], _id=None)
self.assertEqual(json.loads(r.json), r.data)
self.assertEqual(r.data, {
"method": "add",
"params": [],
"id": None,
})
def test_data_id_2_notification(self):
r = JSONRPC10Request("add", [], _id=None, is_notification=True)
self.assertEqual(json.loads(r.json), r.data)
self.assertEqual(r.data, {
"method": "add",
"params": [],
"id": None,
})
def test_data_id_3(self):
r = JSONRPC10Request("add", [], _id="id")
self.assertEqual(json.loads(r.json), r.data)
self.assertEqual(r.data, {
"method": "add",
"params": [],
"id": "id",
})
def test_data_id_3_notification(self):
r = JSONRPC10Request("add", [], _id="id", is_notification=True)
self.assertEqual(json.loads(r.json), r.data)
self.assertEqual(r.data, {
"method": "add",
"params": [],
"id": None,
})
def test_data_id_4(self):
r = JSONRPC10Request("add", [], _id=0)
self.assertEqual(json.loads(r.json), r.data)
self.assertEqual(r.data, {
"method": "add",
"params": [],
"id": 0,
})
def test_data_id_4_notification(self):
r = JSONRPC10Request("add", [], _id=0, is_notification=True)
self.assertEqual(json.loads(r.json), r.data)
self.assertEqual(r.data, {
"method": "add",
"params": [],
"id": None,
})
def test_is_notification(self):
r = JSONRPC10Request("add", [])
self.assertTrue(r.is_notification)
r = JSONRPC10Request("add", [], _id=None)
self.assertTrue(r.is_notification)
r = JSONRPC10Request("add", [], _id="null")
self.assertFalse(r.is_notification)
r = JSONRPC10Request("add", [], _id=0)
self.assertFalse(r.is_notification)
r = JSONRPC10Request("add", [], is_notification=True)
self.assertTrue(r.is_notification)
r = JSONRPC10Request("add", [], is_notification=True, _id=None)
self.assertTrue(r.is_notification)
r = JSONRPC10Request("add", [], is_notification=True, _id=0)
self.assertTrue(r.is_notification)
def test_set_unset_notification_keep_id(self):
r = JSONRPC10Request("add", [], is_notification=True, _id=0)
self.assertTrue(r.is_notification)
self.assertEqual(r.data["id"], None)
r.is_notification = False
self.assertFalse(r.is_notification)
self.assertEqual(r.data["id"], 0)
def test_error_if_notification_true_but_id_none(self):
r = JSONRPC10Request("add", [], is_notification=True, _id=None)
with self.assertRaises(ValueError):
r.is_notification = False
def test_from_json_invalid_request_method(self):
str_json = json.dumps({
"params": [1, 2],
"id": 0,
})
with self.assertRaises(JSONRPCInvalidRequestException):
JSONRPC10Request.from_json(str_json)
def test_from_json_invalid_request_params(self):
str_json = json.dumps({
"method": "add",
"id": 0,
})
with self.assertRaises(JSONRPCInvalidRequestException):
JSONRPC10Request.from_json(str_json)
def test_from_json_invalid_request_id(self):
str_json = json.dumps({
"method": "add",
"params": [1, 2],
})
with self.assertRaises(JSONRPCInvalidRequestException):
JSONRPC10Request.from_json(str_json)
def test_from_json_invalid_request_extra_data(self):
str_json = json.dumps({
"method": "add",
"params": [1, 2],
"id": 0,
"is_notification": True,
})
with self.assertRaises(JSONRPCInvalidRequestException):
JSONRPC10Request.from_json(str_json)
def test_from_json_request(self):
str_json = json.dumps({
"method": "add",
"params": [1, 2],
"id": 0,
})
request = JSONRPC10Request.from_json(str_json)
self.assertTrue(isinstance(request, JSONRPC10Request))
self.assertEqual(request.method, "add")
self.assertEqual(request.params, [1, 2])
self.assertEqual(request._id, 0)
self.assertFalse(request.is_notification)
def test_from_json_request_notification(self):
str_json = json.dumps({
"method": "add",
"params": [1, 2],
"id": None,
})
request = JSONRPC10Request.from_json(str_json)
self.assertTrue(isinstance(request, JSONRPC10Request))
self.assertEqual(request.method, "add")
self.assertEqual(request.params, [1, 2])
self.assertEqual(request._id, None)
self.assertTrue(request.is_notification)
def test_from_json_string_not_dict(self):
with self.assertRaises(ValueError):
JSONRPC10Request.from_json("[]")
with self.assertRaises(ValueError):
JSONRPC10Request.from_json("0")
def test_data_setter(self):
request = JSONRPC10Request(**self.request_params)
with self.assertRaises(ValueError):
request.data = []
with self.assertRaises(ValueError):
request.data = ""
with self.assertRaises(ValueError):
request.data = None
class TestJSONRPC10Response(unittest.TestCase):
""" Test JSONRPC10Response functionality."""
def setUp(self):
self.response_success_params = {
"result": "",
"error": None,
"_id": 1,
}
self.response_error_params = {
"result": None,
"error": {
"code": 1,
"message": "error",
},
"_id": 1,
}
def test_correct_init(self):
""" Test object is created."""
JSONRPC10Response(**self.response_success_params)
JSONRPC10Response(**self.response_error_params)
def test_validation_incorrect_no_parameters(self):
with self.assertRaises(ValueError):
JSONRPC10Response()
def test_validation_success_incorrect(self):
wrong_params = self.response_success_params
del wrong_params["_id"]
with self.assertRaises(ValueError):
JSONRPC10Response(**wrong_params)
def test_validation_error_incorrect(self):
wrong_params = self.response_error_params
del wrong_params["_id"]
with self.assertRaises(ValueError):
JSONRPC10Response(**wrong_params)
def _test_validation_incorrect_result_and_error(self):
# @todo: remove
# It is OK because result is an mepty string, it is still result
with self.assertRaises(ValueError):
JSONRPC10Response(result="", error="", _id=0)
response = JSONRPC10Response(error="", _id=0)
with self.assertRaises(ValueError):
response.result = ""
def test_data(self):
r = JSONRPC10Response(result="", _id=0)
self.assertEqual(json.loads(r.json), r.data)
self.assertEqual(r.data, {
"result": "",
"id": 0,
})
def test_data_setter(self):
response = JSONRPC10Response(**self.response_success_params)
with self.assertRaises(ValueError):
response.data = []
with self.assertRaises(ValueError):
response.data = ""
with self.assertRaises(ValueError):
response.data = None
def test_validation_id(self):
response = JSONRPC10Response(**self.response_success_params)
self.assertEqual(response._id, self.response_success_params["_id"])
| |
# -*- coding: utf-8 -*-
import sys
import os
__version__ = "0.6.7"
try:
import setuptools
from setuptools import setup, find_packages
packages = find_packages()
except:
setuptools = None
from distutils.core import setup
packages = ['coherence',]
def find_packages(path):
for f in os.listdir(path):
if f[0] == '.':
continue
if os.path.isdir(os.path.join(path,f)) == True:
next_path = os.path.join(path,f)
if '__init__.py' in os.listdir(next_path):
packages.append(next_path.replace(os.sep,'.'))
find_packages(next_path)
find_packages('coherence')
from distutils.core import Command
from distutils import log
class build_docs(Command):
description = "build documentation from rst-files"
user_options=[]
def initialize_options (self): pass
def finalize_options (self):
self.docpages = DOCPAGES
def run(self):
substitutions = ('.. |VERSION| replace:: '
+ self.distribution.get_version())
for writer, rstfilename, outfilename in self.docpages:
distutils.dir_util.mkpath(os.path.dirname(outfilename))
log.info("creating %s page %s", writer, outfilename)
if not self.dry_run:
try:
rsttext = open(rstfilename).read()
except IOError, e:
raise SystemExit(e)
rsttext = '\n'.join((substitutions, rsttext))
# docutils.core does not offer easy reading from a
# string into a file, so we need to do it ourself :-(
doc = docutils.core.publish_string(source=rsttext,
source_path=rstfilename,
writer_name=writer)
try:
rsttext = open(outfilename, 'w').write(doc)
except IOError, e:
raise SystemExit(e)
cmdclass = {}
try:
import docutils.core
import docutils.io
import docutils.writers.manpage
import distutils.command.build
distutils.command.build.build.sub_commands.append(('build_docs', None))
cmdclass['build_docs'] = build_docs
except ImportError:
log.warn("docutils not installed, can not build man pages. "
"Using pre-build ones.")
DOCPAGES = (
('manpage', 'docs/man/coherence.rst', 'docs/man/coherence.1'),
)
setup_args = {
'name':"Coherence",
'version':__version__,
'description':"""Coherence - DLNA/UPnP framework for the digital living""",
'long_description':"""
Coherence is a framework written in Python, providing a variety of
UPnP MediaServer and UPnP MediaRenderer implementations for instant
use.
It includes an UPnP ControlPoint, which is accessible via D-Bus too.
Furthermore it enables your application to participate in
digital living networks, at the moment primarily the DLNA/UPnP universe.
Its objective and demand is to relieve your application from all the
membership/the UPnP related tasks as much as possible.
New in this %s - the Red-Nosed Reindeer - release
* new MediaServer backends that allow access to
* Banshee - exports audio and video files from Banshees media db
(http://banshee-project.org/)
* FeedStore - a MediaServer serving generic RSS feeds
* Playlist - exposes the list of video/audio streams from a m3u
playlist (e.g. web TV listings published by french ISPs such as
Free, SFR...)
* YAMJ - serves the movie/TV series data files and metadata from a
given YAMJ (Yet Another Movie Jukebox) library
(http://code.google.com/p/moviejukebox/)
* updates on Mirabeau - our "UPnP over XMPP" bridge
* simplifications in the D-Bus API
* a first implementation of an JSON/REST API
* advancements of the GStreamer MediaRenderer, supporting now GStreamers
playbin2
* upgrade of the DVB-Daemon MediaServer
* refinements in the transcoding section, having now the choice to use
GStreamer pipelines or external processes like mencoder
* more 'compatibility' improvements for different devices (e.g.
Samsung TVs or Apache Felix)
* and - as every time - the usual bugfixes and enhancements
Kudos go to:
* Benjamin (lightyear) Kampmann,
* Charlie (porthose) Smotherman
* Dominik (schrei5) Ruf,
* Frank (dev) Scholz,
* Friedrich (frinring) Kossebau,
* Jean-Michel (jmsizun) Sizun,
* Philippe (philn) Normand,
* Sebastian (sebp) Poelsterl,
* Zaheer (zaheerm) Merali
""" % __version__,
'author':"Frank Scholz",
'author_email':'dev@coherence-project.org',
'license' : "MIT",
'packages':packages,
'scripts' : ['bin/coherence','misc/Desktop-Applet/applet-coherence'],
'url' : "http://coherence-project.org",
'download_url' : 'http://coherence-project.org/download/Coherence-%s.tar.gz' % __version__,
'keywords':['UPnP', 'DLNA', 'multimedia', 'gstreamer'],
'classifiers' : ['Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
'package_data' : {
'coherence': ['upnp/core/xml-service-descriptions/*.xml',
'ui/icons/*.png',
'web/static/*.css','web/static/*.js'],
'misc': ['Desktop-Applet/*.png',
'device-icons/*.png'],
},
}
if setuptools:
setup_args['install_requires'] = [
'ConfigObj >= 4.3',
'Twisted >= 8.2',
'zope.interface',
'louie',
]
if sys.platform in ('win32','sunos5'):
setup_args['install_requires'].append('Netifaces >= 0.4')
setup_args['entry_points'] = """
[coherence.plugins.backend.media_server]
FSStore = coherence.backends.fs_storage:FSStore
MediaStore = coherence.backends.mediadb_storage:MediaStore
ElisaMediaStore = coherence.backends.elisa_storage:ElisaMediaStore
FlickrStore = coherence.backends.flickr_storage:FlickrStore
AxisCamStore = coherence.backends.axiscam_storage:AxisCamStore
BuzztardStore = coherence.backends.buzztard_control:BuzztardStore
IRadioStore = coherence.backends.iradio_storage:IRadioStore
LastFMStore = coherence.backends.lastfm_storage:LastFMStore
AmpacheStore = coherence.backends.ampache_storage:AmpacheStore
TrackerStore = coherence.backends.tracker_storage:TrackerStore
DVBDStore = coherence.backends.dvbd_storage:DVBDStore
AppleTrailersStore = coherence.backends.appletrailers_storage:AppleTrailersStore
LolcatsStore = coherence.backends.lolcats_storage:LolcatsStore
TEDStore = coherence.backends.ted_storage:TEDStore
BBCStore = coherence.backends.bbc_storage:BBCStore
SWR3Store = coherence.backends.swr3_storage:SWR3Store
Gallery2Store = coherence.backends.gallery2_storage:Gallery2Store
YouTubeStore = coherence.backends.youtube_storage:YouTubeStore
MiroGuideStore = coherence.backends.miroguide_storage:MiroGuideStore
ITVStore = coherence.backends.itv_storage:ITVStore
PicasaStore = coherence.backends.picasa_storage:PicasaStore
TestStore = coherence.backends.test_storage:TestStore
PlaylistStore = coherence.backends.playlist_storage:PlaylistStore
YamjStore = coherence.backends.yamj_storage:YamjStore
BansheeStore = coherence.backends.banshee_storage:BansheeStore
FeedStore = coherence.backends.feed_storage:FeedStore
RadiotimeStore = coherence.backends.radiotime_storage:RadiotimeStore
AudioCDStore = coherence.backends.audiocd_storage:AudioCDStore
[coherence.plugins.backend.media_renderer]
ElisaPlayer = coherence.backends.elisa_renderer:ElisaPlayer
GStreamerPlayer = coherence.backends.gstreamer_renderer:GStreamerPlayer
BuzztardPlayer = coherence.backends.buzztard_control:BuzztardPlayer
[coherence.plugins.backend.binary_light]
SimpleLight = coherence.backends.light:SimpleLight
[coherence.plugins.backend.dimmable_light]
BetterLight = coherence.backends.light:BetterLight
"""
setup(cmdclass=cmdclass, **setup_args)
| |
import numpy as np
import os.path as op
from numpy.testing import assert_array_almost_equal, assert_allclose
from scipy.signal import welch
import pytest
from mne import pick_types, Epochs, read_events
from mne.io import RawArray, read_raw_fif
from mne.utils import catch_logging
from mne.time_frequency import psd_welch, psd_multitaper, psd_array_welch
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(base_dir, 'test_raw.fif')
event_fname = op.join(base_dir, 'test-eve.fif')
def test_psd_nan():
"""Test handling of NaN in psd_array_welch."""
n_samples, n_fft, n_overlap = 2048, 1024, 512
x = np.random.RandomState(0).randn(1, n_samples)
psds, freqs = psd_array_welch(x[:, :n_fft + n_overlap], float(n_fft),
n_fft=n_fft, n_overlap=n_overlap)
x[:, n_fft + n_overlap:] = np.nan # what Raw.get_data() will give us
psds_2, freqs_2 = psd_array_welch(x, float(n_fft), n_fft=n_fft,
n_overlap=n_overlap)
assert_allclose(freqs, freqs_2)
assert_allclose(psds, psds_2)
# 1-d
psds_2, freqs_2 = psd_array_welch(
x[0], float(n_fft), n_fft=n_fft, n_overlap=n_overlap)
assert_allclose(freqs, freqs_2)
assert_allclose(psds[0], psds_2)
# defaults
with catch_logging() as log:
psd_array_welch(x, float(n_fft), verbose='debug')
log = log.getvalue()
assert 'using 256-point FFT on 256 samples with 0 overlap' in log
assert 'hamming window' in log
def test_psd():
"""Tests the welch and multitaper PSD."""
raw = read_raw_fif(raw_fname)
picks_psd = [0, 1]
# Populate raw with sinusoids
rng = np.random.RandomState(40)
data = 0.1 * rng.randn(len(raw.ch_names), raw.n_times)
freqs_sig = [8., 50.]
for ix, freq in zip(picks_psd, freqs_sig):
data[ix, :] += 2 * np.sin(np.pi * 2. * freq * raw.times)
first_samp = raw._first_samps[0]
raw = RawArray(data, raw.info)
tmin, tmax = 0, 20 # use a few seconds of data
fmin, fmax = 2, 70 # look at frequencies between 2 and 70Hz
n_fft = 128
# -- Raw --
kws_psd = dict(tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax,
picks=picks_psd) # Common to all
kws_welch = dict(n_fft=n_fft)
kws_mt = dict(low_bias=True)
funcs = [(psd_welch, kws_welch),
(psd_multitaper, kws_mt)]
for func, kws in funcs:
kws = kws.copy()
kws.update(kws_psd)
kws.update(verbose='debug')
if func is psd_welch:
kws.update(window='hann')
with catch_logging() as log:
psds, freqs = func(raw, proj=False, **kws)
log = log.getvalue()
if func is psd_welch:
assert f'{n_fft}-point FFT on {n_fft} samples with 0 overl' in log
assert 'hann window' in log
psds_proj, freqs_proj = func(raw, proj=True, **kws)
assert psds.shape == (len(kws['picks']), len(freqs))
assert np.sum(freqs < 0) == 0
assert np.sum(psds < 0) == 0
# Is power found where it should be
ixs_max = np.argmax(psds, axis=1)
for ixmax, ifreq in zip(ixs_max, freqs_sig):
# Find nearest frequency to the "true" freq
ixtrue = np.argmin(np.abs(ifreq - freqs))
assert (np.abs(ixmax - ixtrue) < 2)
# Make sure the projection doesn't change channels it shouldn't
assert_array_almost_equal(psds, psds_proj)
# Array input shouldn't work
pytest.raises(ValueError, func, raw[:3, :20][0])
# test n_per_seg in psd_welch (and padding)
psds1, freqs1 = psd_welch(raw, proj=False, n_fft=128, n_per_seg=128,
**kws_psd)
psds2, freqs2 = psd_welch(raw, proj=False, n_fft=256, n_per_seg=128,
**kws_psd)
assert (len(freqs1) == np.floor(len(freqs2) / 2.))
assert (psds1.shape[-1] == np.floor(psds2.shape[-1] / 2.))
kws_psd.update(dict(n_fft=tmax * 1.1 * raw.info['sfreq']))
with pytest.raises(ValueError, match='n_fft is not allowed to be > n_tim'):
psd_welch(raw, proj=False, n_per_seg=None,
**kws_psd)
kws_psd.update(dict(n_fft=128, n_per_seg=64, n_overlap=90))
with pytest.raises(ValueError, match='n_overlap cannot be greater'):
psd_welch(raw, proj=False, **kws_psd)
with pytest.raises(ValueError, match='No frequencies found'):
psd_array_welch(np.zeros((1, 1000)), 1000., fmin=10, fmax=1)
# -- Epochs/Evoked --
events = read_events(event_fname)
events[:, 0] -= first_samp
tmin, tmax, event_id = -0.5, 0.5, 1
epochs = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks_psd,
proj=False, preload=True, baseline=None)
evoked = epochs.average()
tmin_full, tmax_full = -1, 1
epochs_full = Epochs(raw, events[:10], event_id, tmin_full, tmax_full,
picks=picks_psd, proj=False, preload=True,
baseline=None)
kws_psd = dict(tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax,
picks=picks_psd) # Common to all
funcs = [(psd_welch, kws_welch),
(psd_multitaper, kws_mt)]
for func, kws in funcs:
kws = kws.copy()
kws.update(kws_psd)
psds, freqs = func(
epochs[:1], proj=False, **kws)
psds_proj, freqs_proj = func(
epochs[:1], proj=True, **kws)
psds_f, freqs_f = func(
epochs_full[:1], proj=False, **kws)
# this one will fail if you add for example 0.1 to tmin
assert_array_almost_equal(psds, psds_f, 27)
# Make sure the projection doesn't change channels it shouldn't
assert_array_almost_equal(psds, psds_proj, 27)
# Is power found where it should be
ixs_max = np.argmax(psds.mean(0), axis=1)
for ixmax, ifreq in zip(ixs_max, freqs_sig):
# Find nearest frequency to the "true" freq
ixtrue = np.argmin(np.abs(ifreq - freqs))
assert (np.abs(ixmax - ixtrue) < 2)
assert (psds.shape == (1, len(kws['picks']), len(freqs)))
assert (np.sum(freqs < 0) == 0)
assert (np.sum(psds < 0) == 0)
# Array input shouldn't work
pytest.raises(ValueError, func, epochs.get_data())
# Testing evoked (doesn't work w/ compute_epochs_psd)
psds_ev, freqs_ev = func(
evoked, proj=False, **kws)
psds_ev_proj, freqs_ev_proj = func(
evoked, proj=True, **kws)
# Is power found where it should be
ixs_max = np.argmax(psds_ev, axis=1)
for ixmax, ifreq in zip(ixs_max, freqs_sig):
# Find nearest frequency to the "true" freq
ixtrue = np.argmin(np.abs(ifreq - freqs_ev))
assert (np.abs(ixmax - ixtrue) < 2)
# Make sure the projection doesn't change channels it shouldn't
assert_array_almost_equal(psds_ev, psds_ev_proj, 27)
assert (psds_ev.shape == (len(kws['picks']), len(freqs)))
@pytest.mark.parametrize('kind', ('raw', 'epochs', 'evoked'))
def test_psd_welch_average_kwarg(kind):
"""Test `average` kwarg of psd_welch()."""
raw = read_raw_fif(raw_fname)
picks_psd = [0, 1]
# Populate raw with sinusoids
rng = np.random.RandomState(40)
data = 0.1 * rng.randn(len(raw.ch_names), raw.n_times)
freqs_sig = [8., 50.]
for ix, freq in zip(picks_psd, freqs_sig):
data[ix, :] += 2 * np.sin(np.pi * 2. * freq * raw.times)
first_samp = raw._first_samps[0]
raw = RawArray(data, raw.info)
tmin, tmax = -0.5, 0.5
fmin, fmax = 0, np.inf
n_fft = 256
n_per_seg = 128
n_overlap = 0
event_id = 2
events = read_events(event_fname)
events[:, 0] -= first_samp
kws = dict(fmin=fmin, fmax=fmax, tmin=tmin, tmax=tmax, n_fft=n_fft,
n_per_seg=n_per_seg, n_overlap=n_overlap, picks=picks_psd)
if kind == 'raw':
inst = raw
elif kind == 'epochs':
inst = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks_psd,
proj=False, preload=True, baseline=None)
elif kind == 'evoked':
inst = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks_psd,
proj=False, preload=True, baseline=None).average()
else:
raise ValueError('Unknown parametrization passed to test, check test '
'for typos.')
psds_mean, freqs_mean = psd_welch(inst=inst, average='mean', **kws)
psds_median, freqs_median = psd_welch(inst=inst, average='median', **kws)
psds_unagg, freqs_unagg = psd_welch(inst=inst, average=None, **kws)
# Frequencies should be equal across all "average" types, as we feed in
# the exact same data.
assert_allclose(freqs_mean, freqs_median)
assert_allclose(freqs_mean, freqs_unagg)
# For `average=None`, the last dimension contains the un-aggregated
# segments.
assert psds_mean.shape == psds_median.shape
assert psds_mean.shape == psds_unagg.shape[:-1]
assert_allclose(psds_mean, psds_unagg.mean(axis=-1))
# Compare with manual median calculation
assert_allclose(psds_median, np.median(psds_unagg, axis=-1))
@pytest.mark.slowtest
def test_compares_psd():
"""Test PSD estimation on raw for plt.psd and scipy.signal.welch."""
raw = read_raw_fif(raw_fname)
exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053'] # bads + 2 more
# picks MEG gradiometers
picks = pick_types(raw.info, meg='grad', eeg=False, stim=False,
exclude=exclude)[:2]
tmin, tmax = 0, 10 # use the first 60s of data
fmin, fmax = 2, 70 # look at frequencies between 5 and 70Hz
n_fft = 2048
# Compute psds with the new implementation using Welch
psds_welch, freqs_welch = psd_welch(raw, tmin=tmin, tmax=tmax, fmin=fmin,
fmax=fmax, proj=False, picks=picks,
n_fft=n_fft, n_jobs=1)
# Compute psds with plt.psd
start, stop = raw.time_as_index([tmin, tmax])
data, times = raw[picks, start:(stop + 1)]
out = [welch(d, fs=raw.info['sfreq'], nperseg=n_fft, noverlap=0)
for d in data]
freqs_mpl = out[0][0]
psds_mpl = np.array([o[1] for o in out])
mask = (freqs_mpl >= fmin) & (freqs_mpl <= fmax)
freqs_mpl = freqs_mpl[mask]
psds_mpl = psds_mpl[:, mask]
assert_array_almost_equal(psds_welch, psds_mpl)
assert_array_almost_equal(freqs_welch, freqs_mpl)
assert (psds_welch.shape == (len(picks), len(freqs_welch)))
assert (psds_mpl.shape == (len(picks), len(freqs_mpl)))
assert (np.sum(freqs_welch < 0) == 0)
assert (np.sum(freqs_mpl < 0) == 0)
assert (np.sum(psds_welch < 0) == 0)
assert (np.sum(psds_mpl < 0) == 0)
| |
# -*- coding: utf-8 -*-
"""Provide full functionality for the create wizard.
This section is aimed at the engineer
"""
from pprint import pprint
from datetime import datetime
from app import socketio
from app.modules.utils.logger import logger
from app.modules.utils.static import data
from app.modules.utils.driver import browser, wait
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.common.by import By
from selenium.common.exceptions import InvalidElementStateException
class EngineerCreate:
"""Run through the entire create process."""
def __init__(self, serial_number, job_type):
"""."""
job_type = self._humanize_job_type(job_type)
logger.debug('running _go_to_create')
if not self._go_to_create():
socketio.emit('my response', {'data': 'failed navigation', }, namespace='/')
logger.debug('running _create_welcome_fill')
if not self._create_welcome_fill():
socketio.emit('my response', {'data': 'Failed create welcome fill', }, namespace='/')
logger.debug('running _job_site_details')
if not self._job_site_details():
socketio.emit('my response', {'data': 'Failed first job site details', }, namespace='/')
logger.debug('running _job_item_details')
if not self._job_item_details(serial_number): #good to here
socketio.emit('my response', {'data': 'Failed job item details', }, namespace='/')
else:
socketio.emit('my response', {'data': 'Succesfully passed job item details', }, namespace='/')
logger.debug('running _job_site_details')
if not self._job_site_details():
socketio.emit('my response', {'data': 'Failed second job site details', }, namespace='/')
logger.debug('running _ship_site_details')
if not self._ship_site_details(job_type):
socketio.emit('my response', {'data': 'failed ship site details', }, namespace='/')
else:
socketio.emit('my response', {'data': 'Succesfully passed job ship site details', }, namespace='/')
if not self._job_details(job_type):
socketio.emit('my response', {'data': 'failed job details', }, namespace='/')
else:
socketio.emit('my response', {'data': 'passed job details', }, namespace='/')
logger.debug('running _complete')
if not self._complete():
socketio.emit('my response', {'data': 'failed completion', }, namespace='/')
@staticmethod
def _humanize_job_type(job_type):
if job_type == 'Imac Refurb':
return 'ZR2'
elif job_type == 'Adhoc TCG':
return 'ZR3'
elif job_type == 'Zulu VP':
return 'VP'
elif job_type == 'Mid Counties':
return 'ZR6'
else:
return False
@staticmethod
def _go_to_create():
socketio.emit('my response', {'data': 'navigating to create wizard', }, namespace='/')
browser.get(data['url']['create_wizard']['add'])
return True
@staticmethod
def _create_welcome_fill():
socketio.emit('my response', {'data': 'populating welcome page', }, namespace='/')
dt = datetime.now()
wait.until(ec.text_to_be_present_in_element(
(By.ID, 'scmaster_cplMainContent_lblWelcome'),
'Welcome the the Repair Job Creation Wizard. Please enter your workshop site code.'))
_= browser.find_element_by_id(data['id']['create_wizard']['workshop_site'])
_.clear()
_.send_keys('STOWS')
browser.execute_script(data['script']['create_wizard']['workshop_site'])
if not EngineerCreate._handle_modal(expected_value='STOWS'):
socketio.emit('my response', {'data': 'Failed to handle modal popup', }, namespace='/')
return False
wait.until(ec.text_to_be_present_in_element_value((By.ID, data['id']['create_wizard']['book_in_date']),
f'{dt.month}/{dt.day}/{dt.year}'))
browser.find_element_by_id(data['id']['create_wizard']['next']).click()
return True
@staticmethod
def _job_site_details():
"""."""
socketio.emit('my response', {'data': 'populating site details', }, namespace='/')
wait.until(ec.element_to_be_clickable((By.ID, data['id']['create_wizard']['site_num'])))
browser.find_element_by_id(data['id']['create_wizard']['next']).click()
return True
@staticmethod
def _job_item_details(serial_number):
socketio.emit('my response', {'data': 'populating job item details', }, namespace='/')
browser.find_element_by_id(data['id']['create_wizard']['serial_number']).send_keys(serial_number)
browser.execute_script(data['script']['create_wizard']['serial_number'])
if not EngineerCreate._handle_modal(expected_value=serial_number):
return False
_ = wait.until(ec.element_to_be_clickable((By.ID, data['id']['create_wizard']['next'])))
_.click()
return True
@staticmethod
def _ship_site_details(job_type):
wait.until(ec.presence_of_element_located((By.ID, 'scmaster_cplMainContent_cboShipSiteNum')))
if job_type == 'ZR2':
ship_site = 'IMACREP'
ship_site_name = 'IMAC repaired equipment'
elif job_type == 'ZR6':
ship_site = 'STOFSLGDSI'
ship_site_name = 'Stoke FSL'
else:
ship_site = 'STOKGOODS'
ship_site_name = 'Stoke Central Stores'
_ = browser.find_element_by_id(data['id']['create_wizard']['ship_site_num'])
_.clear()
_.send_keys(ship_site)
browser.execute_script(data['script']['create_wizard']['ship_site_num'])
if not EngineerCreate._handle_modal(expected_value=ship_site):
return
# may need the element updating to the shipsite descrtiption
socketio.emit('my response', {'data': f'searching for {ship_site_name}', }, namespace='/')
wait.until(ec.text_to_be_present_in_element_value((By.ID, data['id']['create_wizard']['name']), ship_site_name))
browser.find_element_by_id(data['id']['create_wizard']['next']).click()
@staticmethod
def _job_details(job_type):
if 'VP' in job_type:
job_type = 'ZR1'
wait.until(ec.presence_of_element_located((By.ID, data['id']['create_wizard']['job_type']))).send_keys(job_type)
browser.find_element_by_id(data['id']['create_wizard']['flow_code']).send_keys('SWBO%')
browser.find_element_by_id(data['id']['create_wizard']['problem']).send_keys(
('This call was automatically generated with'
' T-Infinity created by Kieran Wynne'))
browser.execute_script(data['script']['create_wizard']['flow_code'])
if not EngineerCreate._handle_modal(expected_value='SWBOOKIN'):
return False
wait.until(ec.text_to_be_present_in_element_value((By.ID, data['id']['create_wizard']['position']), '1'))
browser.execute_script(data['script']['create_wizard']['job_type'])
if not EngineerCreate._handle_modal(expected_value=job_type):
return False
_ = wait.until(ec.presence_of_element_located((By.ID, 'scmaster_cplMainContent_cmdFinish')))
_.click()
return True
@staticmethod
def _complete():
"""."""
wait.until(ec.presence_of_element_located((By.ID, data['id']['create_wizard']['job_numbers'])))
browser.find_element_by_id(data['id']['create_wizard']['modify_repair_job']).click()
browser.find_element_by_id(data['id']['create_wizard']['finsih']).click()
return True
@staticmethod
def _handle_modal(frame_id='fraModalPopup', expected_value=None):
# this may need sorting out
wait.until(ec.frame_to_be_available_and_switch_to_it((By.ID, frame_id)))
options = browser.find_elements_by_css_selector(data['css']['modal']['options'])
if 'options' not in locals():
return False
if len(options) is 3:
socketio.emit('my response', {'data': 'found expected value in modal', }, namespace='/')
options[1].click()
browser.switch_to.default_content()
return True
else:
socketio.emit('my response', {'data': 'no options found in modal', }, namespace='/')
browser.switch_to.default_content()
return False
| |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.net.proto import ProtocolBuffer
import array
import dummy_thread as thread
__pychecker__ = """maxreturns=0 maxbranches=0 no-callinit
unusednames=printElemNumber,debug_strs no-special"""
if hasattr(ProtocolBuffer, 'ExtendableProtocolMessage'):
_extension_runtime = True
_ExtendableProtocolMessage = ProtocolBuffer.ExtendableProtocolMessage
else:
_extension_runtime = False
_ExtendableProtocolMessage = ProtocolBuffer.ProtocolMessage
from google.appengine.datastore.datastore_v3_pb import *
import google.appengine.datastore.datastore_v3_pb
from google.appengine.datastore.entity_pb import *
import google.appengine.datastore.entity_pb
class Request(ProtocolBuffer.ProtocolMessage):
has_service_name_ = 0
service_name_ = ""
has_method_ = 0
method_ = ""
has_request_ = 0
request_ = ""
has_request_id_ = 0
request_id_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def service_name(self): return self.service_name_
def set_service_name(self, x):
self.has_service_name_ = 1
self.service_name_ = x
def clear_service_name(self):
if self.has_service_name_:
self.has_service_name_ = 0
self.service_name_ = ""
def has_service_name(self): return self.has_service_name_
def method(self): return self.method_
def set_method(self, x):
self.has_method_ = 1
self.method_ = x
def clear_method(self):
if self.has_method_:
self.has_method_ = 0
self.method_ = ""
def has_method(self): return self.has_method_
def request(self): return self.request_
def set_request(self, x):
self.has_request_ = 1
self.request_ = x
def clear_request(self):
if self.has_request_:
self.has_request_ = 0
self.request_ = ""
def has_request(self): return self.has_request_
def request_id(self): return self.request_id_
def set_request_id(self, x):
self.has_request_id_ = 1
self.request_id_ = x
def clear_request_id(self):
if self.has_request_id_:
self.has_request_id_ = 0
self.request_id_ = ""
def has_request_id(self): return self.has_request_id_
def MergeFrom(self, x):
assert x is not self
if (x.has_service_name()): self.set_service_name(x.service_name())
if (x.has_method()): self.set_method(x.method())
if (x.has_request()): self.set_request(x.request())
if (x.has_request_id()): self.set_request_id(x.request_id())
def Equals(self, x):
if x is self: return 1
if self.has_service_name_ != x.has_service_name_: return 0
if self.has_service_name_ and self.service_name_ != x.service_name_: return 0
if self.has_method_ != x.has_method_: return 0
if self.has_method_ and self.method_ != x.method_: return 0
if self.has_request_ != x.has_request_: return 0
if self.has_request_ and self.request_ != x.request_: return 0
if self.has_request_id_ != x.has_request_id_: return 0
if self.has_request_id_ and self.request_id_ != x.request_id_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_service_name_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: service_name not set.')
if (not self.has_method_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: method not set.')
if (not self.has_request_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: request not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.service_name_))
n += self.lengthString(len(self.method_))
n += self.lengthString(len(self.request_))
if (self.has_request_id_): n += 1 + self.lengthString(len(self.request_id_))
return n + 3
def ByteSizePartial(self):
n = 0
if (self.has_service_name_):
n += 1
n += self.lengthString(len(self.service_name_))
if (self.has_method_):
n += 1
n += self.lengthString(len(self.method_))
if (self.has_request_):
n += 1
n += self.lengthString(len(self.request_))
if (self.has_request_id_): n += 1 + self.lengthString(len(self.request_id_))
return n
def Clear(self):
self.clear_service_name()
self.clear_method()
self.clear_request()
self.clear_request_id()
def OutputUnchecked(self, out):
out.putVarInt32(18)
out.putPrefixedString(self.service_name_)
out.putVarInt32(26)
out.putPrefixedString(self.method_)
out.putVarInt32(34)
out.putPrefixedString(self.request_)
if (self.has_request_id_):
out.putVarInt32(42)
out.putPrefixedString(self.request_id_)
def OutputPartial(self, out):
if (self.has_service_name_):
out.putVarInt32(18)
out.putPrefixedString(self.service_name_)
if (self.has_method_):
out.putVarInt32(26)
out.putPrefixedString(self.method_)
if (self.has_request_):
out.putVarInt32(34)
out.putPrefixedString(self.request_)
if (self.has_request_id_):
out.putVarInt32(42)
out.putPrefixedString(self.request_id_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 18:
self.set_service_name(d.getPrefixedString())
continue
if tt == 26:
self.set_method(d.getPrefixedString())
continue
if tt == 34:
self.set_request(d.getPrefixedString())
continue
if tt == 42:
self.set_request_id(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_service_name_: res+=prefix+("service_name: %s\n" % self.DebugFormatString(self.service_name_))
if self.has_method_: res+=prefix+("method: %s\n" % self.DebugFormatString(self.method_))
if self.has_request_: res+=prefix+("request: %s\n" % self.DebugFormatString(self.request_))
if self.has_request_id_: res+=prefix+("request_id: %s\n" % self.DebugFormatString(self.request_id_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kservice_name = 2
kmethod = 3
krequest = 4
krequest_id = 5
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
2: "service_name",
3: "method",
4: "request",
5: "request_id",
}, 5)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.STRING,
4: ProtocolBuffer.Encoder.STRING,
5: ProtocolBuffer.Encoder.STRING,
}, 5, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.ext.remote_api.Request'
class ApplicationError(ProtocolBuffer.ProtocolMessage):
has_code_ = 0
code_ = 0
has_detail_ = 0
detail_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def code(self): return self.code_
def set_code(self, x):
self.has_code_ = 1
self.code_ = x
def clear_code(self):
if self.has_code_:
self.has_code_ = 0
self.code_ = 0
def has_code(self): return self.has_code_
def detail(self): return self.detail_
def set_detail(self, x):
self.has_detail_ = 1
self.detail_ = x
def clear_detail(self):
if self.has_detail_:
self.has_detail_ = 0
self.detail_ = ""
def has_detail(self): return self.has_detail_
def MergeFrom(self, x):
assert x is not self
if (x.has_code()): self.set_code(x.code())
if (x.has_detail()): self.set_detail(x.detail())
def Equals(self, x):
if x is self: return 1
if self.has_code_ != x.has_code_: return 0
if self.has_code_ and self.code_ != x.code_: return 0
if self.has_detail_ != x.has_detail_: return 0
if self.has_detail_ and self.detail_ != x.detail_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_code_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: code not set.')
if (not self.has_detail_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: detail not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthVarInt64(self.code_)
n += self.lengthString(len(self.detail_))
return n + 2
def ByteSizePartial(self):
n = 0
if (self.has_code_):
n += 1
n += self.lengthVarInt64(self.code_)
if (self.has_detail_):
n += 1
n += self.lengthString(len(self.detail_))
return n
def Clear(self):
self.clear_code()
self.clear_detail()
def OutputUnchecked(self, out):
out.putVarInt32(8)
out.putVarInt32(self.code_)
out.putVarInt32(18)
out.putPrefixedString(self.detail_)
def OutputPartial(self, out):
if (self.has_code_):
out.putVarInt32(8)
out.putVarInt32(self.code_)
if (self.has_detail_):
out.putVarInt32(18)
out.putPrefixedString(self.detail_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 8:
self.set_code(d.getVarInt32())
continue
if tt == 18:
self.set_detail(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_code_: res+=prefix+("code: %s\n" % self.DebugFormatInt32(self.code_))
if self.has_detail_: res+=prefix+("detail: %s\n" % self.DebugFormatString(self.detail_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kcode = 1
kdetail = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "code",
2: "detail",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.NUMERIC,
2: ProtocolBuffer.Encoder.STRING,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.ext.remote_api.ApplicationError'
class Response(ProtocolBuffer.ProtocolMessage):
has_response_ = 0
response_ = ""
has_exception_ = 0
exception_ = ""
has_application_error_ = 0
application_error_ = None
has_java_exception_ = 0
java_exception_ = ""
def __init__(self, contents=None):
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def response(self): return self.response_
def set_response(self, x):
self.has_response_ = 1
self.response_ = x
def clear_response(self):
if self.has_response_:
self.has_response_ = 0
self.response_ = ""
def has_response(self): return self.has_response_
def exception(self): return self.exception_
def set_exception(self, x):
self.has_exception_ = 1
self.exception_ = x
def clear_exception(self):
if self.has_exception_:
self.has_exception_ = 0
self.exception_ = ""
def has_exception(self): return self.has_exception_
def application_error(self):
if self.application_error_ is None:
self.lazy_init_lock_.acquire()
try:
if self.application_error_ is None: self.application_error_ = ApplicationError()
finally:
self.lazy_init_lock_.release()
return self.application_error_
def mutable_application_error(self): self.has_application_error_ = 1; return self.application_error()
def clear_application_error(self):
if self.has_application_error_:
self.has_application_error_ = 0;
if self.application_error_ is not None: self.application_error_.Clear()
def has_application_error(self): return self.has_application_error_
def java_exception(self): return self.java_exception_
def set_java_exception(self, x):
self.has_java_exception_ = 1
self.java_exception_ = x
def clear_java_exception(self):
if self.has_java_exception_:
self.has_java_exception_ = 0
self.java_exception_ = ""
def has_java_exception(self): return self.has_java_exception_
def MergeFrom(self, x):
assert x is not self
if (x.has_response()): self.set_response(x.response())
if (x.has_exception()): self.set_exception(x.exception())
if (x.has_application_error()): self.mutable_application_error().MergeFrom(x.application_error())
if (x.has_java_exception()): self.set_java_exception(x.java_exception())
def Equals(self, x):
if x is self: return 1
if self.has_response_ != x.has_response_: return 0
if self.has_response_ and self.response_ != x.response_: return 0
if self.has_exception_ != x.has_exception_: return 0
if self.has_exception_ and self.exception_ != x.exception_: return 0
if self.has_application_error_ != x.has_application_error_: return 0
if self.has_application_error_ and self.application_error_ != x.application_error_: return 0
if self.has_java_exception_ != x.has_java_exception_: return 0
if self.has_java_exception_ and self.java_exception_ != x.java_exception_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (self.has_application_error_ and not self.application_error_.IsInitialized(debug_strs)): initialized = 0
return initialized
def ByteSize(self):
n = 0
if (self.has_response_): n += 1 + self.lengthString(len(self.response_))
if (self.has_exception_): n += 1 + self.lengthString(len(self.exception_))
if (self.has_application_error_): n += 1 + self.lengthString(self.application_error_.ByteSize())
if (self.has_java_exception_): n += 1 + self.lengthString(len(self.java_exception_))
return n
def ByteSizePartial(self):
n = 0
if (self.has_response_): n += 1 + self.lengthString(len(self.response_))
if (self.has_exception_): n += 1 + self.lengthString(len(self.exception_))
if (self.has_application_error_): n += 1 + self.lengthString(self.application_error_.ByteSizePartial())
if (self.has_java_exception_): n += 1 + self.lengthString(len(self.java_exception_))
return n
def Clear(self):
self.clear_response()
self.clear_exception()
self.clear_application_error()
self.clear_java_exception()
def OutputUnchecked(self, out):
if (self.has_response_):
out.putVarInt32(10)
out.putPrefixedString(self.response_)
if (self.has_exception_):
out.putVarInt32(18)
out.putPrefixedString(self.exception_)
if (self.has_application_error_):
out.putVarInt32(26)
out.putVarInt32(self.application_error_.ByteSize())
self.application_error_.OutputUnchecked(out)
if (self.has_java_exception_):
out.putVarInt32(34)
out.putPrefixedString(self.java_exception_)
def OutputPartial(self, out):
if (self.has_response_):
out.putVarInt32(10)
out.putPrefixedString(self.response_)
if (self.has_exception_):
out.putVarInt32(18)
out.putPrefixedString(self.exception_)
if (self.has_application_error_):
out.putVarInt32(26)
out.putVarInt32(self.application_error_.ByteSizePartial())
self.application_error_.OutputPartial(out)
if (self.has_java_exception_):
out.putVarInt32(34)
out.putPrefixedString(self.java_exception_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_response(d.getPrefixedString())
continue
if tt == 18:
self.set_exception(d.getPrefixedString())
continue
if tt == 26:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_application_error().TryMerge(tmp)
continue
if tt == 34:
self.set_java_exception(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_response_: res+=prefix+("response: %s\n" % self.DebugFormatString(self.response_))
if self.has_exception_: res+=prefix+("exception: %s\n" % self.DebugFormatString(self.exception_))
if self.has_application_error_:
res+=prefix+"application_error <\n"
res+=self.application_error_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_java_exception_: res+=prefix+("java_exception: %s\n" % self.DebugFormatString(self.java_exception_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kresponse = 1
kexception = 2
kapplication_error = 3
kjava_exception = 4
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "response",
2: "exception",
3: "application_error",
4: "java_exception",
}, 4)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.STRING,
4: ProtocolBuffer.Encoder.STRING,
}, 4, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.ext.remote_api.Response'
class TransactionRequest_Precondition(ProtocolBuffer.ProtocolMessage):
has_key_ = 0
has_hash_ = 0
hash_ = ""
def __init__(self, contents=None):
self.key_ = Reference()
if contents is not None: self.MergeFromString(contents)
def key(self): return self.key_
def mutable_key(self): self.has_key_ = 1; return self.key_
def clear_key(self):self.has_key_ = 0; self.key_.Clear()
def has_key(self): return self.has_key_
def hash(self): return self.hash_
def set_hash(self, x):
self.has_hash_ = 1
self.hash_ = x
def clear_hash(self):
if self.has_hash_:
self.has_hash_ = 0
self.hash_ = ""
def has_hash(self): return self.has_hash_
def MergeFrom(self, x):
assert x is not self
if (x.has_key()): self.mutable_key().MergeFrom(x.key())
if (x.has_hash()): self.set_hash(x.hash())
def Equals(self, x):
if x is self: return 1
if self.has_key_ != x.has_key_: return 0
if self.has_key_ and self.key_ != x.key_: return 0
if self.has_hash_ != x.has_hash_: return 0
if self.has_hash_ and self.hash_ != x.hash_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_key_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: key not set.')
elif not self.key_.IsInitialized(debug_strs): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(self.key_.ByteSize())
if (self.has_hash_): n += 1 + self.lengthString(len(self.hash_))
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_key_):
n += 1
n += self.lengthString(self.key_.ByteSizePartial())
if (self.has_hash_): n += 1 + self.lengthString(len(self.hash_))
return n
def Clear(self):
self.clear_key()
self.clear_hash()
def OutputUnchecked(self, out):
out.putVarInt32(18)
out.putVarInt32(self.key_.ByteSize())
self.key_.OutputUnchecked(out)
if (self.has_hash_):
out.putVarInt32(26)
out.putPrefixedString(self.hash_)
def OutputPartial(self, out):
if (self.has_key_):
out.putVarInt32(18)
out.putVarInt32(self.key_.ByteSizePartial())
self.key_.OutputPartial(out)
if (self.has_hash_):
out.putVarInt32(26)
out.putPrefixedString(self.hash_)
def TryMerge(self, d):
while 1:
tt = d.getVarInt32()
if tt == 12: break
if tt == 18:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_key().TryMerge(tmp)
continue
if tt == 26:
self.set_hash(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_key_:
res+=prefix+"key <\n"
res+=self.key_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_hash_: res+=prefix+("hash: %s\n" % self.DebugFormatString(self.hash_))
return res
class TransactionRequest(ProtocolBuffer.ProtocolMessage):
has_puts_ = 0
puts_ = None
has_deletes_ = 0
deletes_ = None
has_allow_multiple_eg_ = 0
allow_multiple_eg_ = 0
def __init__(self, contents=None):
self.precondition_ = []
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def precondition_size(self): return len(self.precondition_)
def precondition_list(self): return self.precondition_
def precondition(self, i):
return self.precondition_[i]
def mutable_precondition(self, i):
return self.precondition_[i]
def add_precondition(self):
x = TransactionRequest_Precondition()
self.precondition_.append(x)
return x
def clear_precondition(self):
self.precondition_ = []
def puts(self):
if self.puts_ is None:
self.lazy_init_lock_.acquire()
try:
if self.puts_ is None: self.puts_ = PutRequest()
finally:
self.lazy_init_lock_.release()
return self.puts_
def mutable_puts(self): self.has_puts_ = 1; return self.puts()
def clear_puts(self):
if self.has_puts_:
self.has_puts_ = 0;
if self.puts_ is not None: self.puts_.Clear()
def has_puts(self): return self.has_puts_
def deletes(self):
if self.deletes_ is None:
self.lazy_init_lock_.acquire()
try:
if self.deletes_ is None: self.deletes_ = DeleteRequest()
finally:
self.lazy_init_lock_.release()
return self.deletes_
def mutable_deletes(self): self.has_deletes_ = 1; return self.deletes()
def clear_deletes(self):
if self.has_deletes_:
self.has_deletes_ = 0;
if self.deletes_ is not None: self.deletes_.Clear()
def has_deletes(self): return self.has_deletes_
def allow_multiple_eg(self): return self.allow_multiple_eg_
def set_allow_multiple_eg(self, x):
self.has_allow_multiple_eg_ = 1
self.allow_multiple_eg_ = x
def clear_allow_multiple_eg(self):
if self.has_allow_multiple_eg_:
self.has_allow_multiple_eg_ = 0
self.allow_multiple_eg_ = 0
def has_allow_multiple_eg(self): return self.has_allow_multiple_eg_
def MergeFrom(self, x):
assert x is not self
for i in xrange(x.precondition_size()): self.add_precondition().CopyFrom(x.precondition(i))
if (x.has_puts()): self.mutable_puts().MergeFrom(x.puts())
if (x.has_deletes()): self.mutable_deletes().MergeFrom(x.deletes())
if (x.has_allow_multiple_eg()): self.set_allow_multiple_eg(x.allow_multiple_eg())
def Equals(self, x):
if x is self: return 1
if len(self.precondition_) != len(x.precondition_): return 0
for e1, e2 in zip(self.precondition_, x.precondition_):
if e1 != e2: return 0
if self.has_puts_ != x.has_puts_: return 0
if self.has_puts_ and self.puts_ != x.puts_: return 0
if self.has_deletes_ != x.has_deletes_: return 0
if self.has_deletes_ and self.deletes_ != x.deletes_: return 0
if self.has_allow_multiple_eg_ != x.has_allow_multiple_eg_: return 0
if self.has_allow_multiple_eg_ and self.allow_multiple_eg_ != x.allow_multiple_eg_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
for p in self.precondition_:
if not p.IsInitialized(debug_strs): initialized=0
if (self.has_puts_ and not self.puts_.IsInitialized(debug_strs)): initialized = 0
if (self.has_deletes_ and not self.deletes_.IsInitialized(debug_strs)): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += 2 * len(self.precondition_)
for i in xrange(len(self.precondition_)): n += self.precondition_[i].ByteSize()
if (self.has_puts_): n += 1 + self.lengthString(self.puts_.ByteSize())
if (self.has_deletes_): n += 1 + self.lengthString(self.deletes_.ByteSize())
if (self.has_allow_multiple_eg_): n += 2
return n
def ByteSizePartial(self):
n = 0
n += 2 * len(self.precondition_)
for i in xrange(len(self.precondition_)): n += self.precondition_[i].ByteSizePartial()
if (self.has_puts_): n += 1 + self.lengthString(self.puts_.ByteSizePartial())
if (self.has_deletes_): n += 1 + self.lengthString(self.deletes_.ByteSizePartial())
if (self.has_allow_multiple_eg_): n += 2
return n
def Clear(self):
self.clear_precondition()
self.clear_puts()
self.clear_deletes()
self.clear_allow_multiple_eg()
def OutputUnchecked(self, out):
for i in xrange(len(self.precondition_)):
out.putVarInt32(11)
self.precondition_[i].OutputUnchecked(out)
out.putVarInt32(12)
if (self.has_puts_):
out.putVarInt32(34)
out.putVarInt32(self.puts_.ByteSize())
self.puts_.OutputUnchecked(out)
if (self.has_deletes_):
out.putVarInt32(42)
out.putVarInt32(self.deletes_.ByteSize())
self.deletes_.OutputUnchecked(out)
if (self.has_allow_multiple_eg_):
out.putVarInt32(48)
out.putBoolean(self.allow_multiple_eg_)
def OutputPartial(self, out):
for i in xrange(len(self.precondition_)):
out.putVarInt32(11)
self.precondition_[i].OutputPartial(out)
out.putVarInt32(12)
if (self.has_puts_):
out.putVarInt32(34)
out.putVarInt32(self.puts_.ByteSizePartial())
self.puts_.OutputPartial(out)
if (self.has_deletes_):
out.putVarInt32(42)
out.putVarInt32(self.deletes_.ByteSizePartial())
self.deletes_.OutputPartial(out)
if (self.has_allow_multiple_eg_):
out.putVarInt32(48)
out.putBoolean(self.allow_multiple_eg_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 11:
self.add_precondition().TryMerge(d)
continue
if tt == 34:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_puts().TryMerge(tmp)
continue
if tt == 42:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_deletes().TryMerge(tmp)
continue
if tt == 48:
self.set_allow_multiple_eg(d.getBoolean())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
cnt=0
for e in self.precondition_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("Precondition%s {\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+"}\n"
cnt+=1
if self.has_puts_:
res+=prefix+"puts <\n"
res+=self.puts_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_deletes_:
res+=prefix+"deletes <\n"
res+=self.deletes_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_allow_multiple_eg_: res+=prefix+("allow_multiple_eg: %s\n" % self.DebugFormatBool(self.allow_multiple_eg_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kPreconditionGroup = 1
kPreconditionkey = 2
kPreconditionhash = 3
kputs = 4
kdeletes = 5
kallow_multiple_eg = 6
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "Precondition",
2: "key",
3: "hash",
4: "puts",
5: "deletes",
6: "allow_multiple_eg",
}, 6)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STARTGROUP,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.STRING,
4: ProtocolBuffer.Encoder.STRING,
5: ProtocolBuffer.Encoder.STRING,
6: ProtocolBuffer.Encoder.NUMERIC,
}, 6, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.ext.remote_api.TransactionRequest'
class TransactionQueryResult(ProtocolBuffer.ProtocolMessage):
has_result_ = 0
has_entity_group_key_ = 0
has_entity_group_ = 0
entity_group_ = None
def __init__(self, contents=None):
self.result_ = QueryResult()
self.entity_group_key_ = Reference()
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def result(self): return self.result_
def mutable_result(self): self.has_result_ = 1; return self.result_
def clear_result(self):self.has_result_ = 0; self.result_.Clear()
def has_result(self): return self.has_result_
def entity_group_key(self): return self.entity_group_key_
def mutable_entity_group_key(self): self.has_entity_group_key_ = 1; return self.entity_group_key_
def clear_entity_group_key(self):self.has_entity_group_key_ = 0; self.entity_group_key_.Clear()
def has_entity_group_key(self): return self.has_entity_group_key_
def entity_group(self):
if self.entity_group_ is None:
self.lazy_init_lock_.acquire()
try:
if self.entity_group_ is None: self.entity_group_ = EntityProto()
finally:
self.lazy_init_lock_.release()
return self.entity_group_
def mutable_entity_group(self): self.has_entity_group_ = 1; return self.entity_group()
def clear_entity_group(self):
if self.has_entity_group_:
self.has_entity_group_ = 0;
if self.entity_group_ is not None: self.entity_group_.Clear()
def has_entity_group(self): return self.has_entity_group_
def MergeFrom(self, x):
assert x is not self
if (x.has_result()): self.mutable_result().MergeFrom(x.result())
if (x.has_entity_group_key()): self.mutable_entity_group_key().MergeFrom(x.entity_group_key())
if (x.has_entity_group()): self.mutable_entity_group().MergeFrom(x.entity_group())
def Equals(self, x):
if x is self: return 1
if self.has_result_ != x.has_result_: return 0
if self.has_result_ and self.result_ != x.result_: return 0
if self.has_entity_group_key_ != x.has_entity_group_key_: return 0
if self.has_entity_group_key_ and self.entity_group_key_ != x.entity_group_key_: return 0
if self.has_entity_group_ != x.has_entity_group_: return 0
if self.has_entity_group_ and self.entity_group_ != x.entity_group_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_result_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: result not set.')
elif not self.result_.IsInitialized(debug_strs): initialized = 0
if (not self.has_entity_group_key_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: entity_group_key not set.')
elif not self.entity_group_key_.IsInitialized(debug_strs): initialized = 0
if (self.has_entity_group_ and not self.entity_group_.IsInitialized(debug_strs)): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(self.result_.ByteSize())
n += self.lengthString(self.entity_group_key_.ByteSize())
if (self.has_entity_group_): n += 1 + self.lengthString(self.entity_group_.ByteSize())
return n + 2
def ByteSizePartial(self):
n = 0
if (self.has_result_):
n += 1
n += self.lengthString(self.result_.ByteSizePartial())
if (self.has_entity_group_key_):
n += 1
n += self.lengthString(self.entity_group_key_.ByteSizePartial())
if (self.has_entity_group_): n += 1 + self.lengthString(self.entity_group_.ByteSizePartial())
return n
def Clear(self):
self.clear_result()
self.clear_entity_group_key()
self.clear_entity_group()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putVarInt32(self.result_.ByteSize())
self.result_.OutputUnchecked(out)
out.putVarInt32(18)
out.putVarInt32(self.entity_group_key_.ByteSize())
self.entity_group_key_.OutputUnchecked(out)
if (self.has_entity_group_):
out.putVarInt32(26)
out.putVarInt32(self.entity_group_.ByteSize())
self.entity_group_.OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_result_):
out.putVarInt32(10)
out.putVarInt32(self.result_.ByteSizePartial())
self.result_.OutputPartial(out)
if (self.has_entity_group_key_):
out.putVarInt32(18)
out.putVarInt32(self.entity_group_key_.ByteSizePartial())
self.entity_group_key_.OutputPartial(out)
if (self.has_entity_group_):
out.putVarInt32(26)
out.putVarInt32(self.entity_group_.ByteSizePartial())
self.entity_group_.OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_result().TryMerge(tmp)
continue
if tt == 18:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_entity_group_key().TryMerge(tmp)
continue
if tt == 26:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_entity_group().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_result_:
res+=prefix+"result <\n"
res+=self.result_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_entity_group_key_:
res+=prefix+"entity_group_key <\n"
res+=self.entity_group_key_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_entity_group_:
res+=prefix+"entity_group <\n"
res+=self.entity_group_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kresult = 1
kentity_group_key = 2
kentity_group = 3
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "result",
2: "entity_group_key",
3: "entity_group",
}, 3)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.STRING,
}, 3, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.ext.remote_api.TransactionQueryResult'
if _extension_runtime:
pass
__all__ = ['Request','ApplicationError','Response','TransactionRequest','TransactionRequest_Precondition','TransactionQueryResult']
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of Cluster Resolvers for Cloud TPUs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
from tensorflow.python.distribute.cluster_resolver import cluster_resolver
from tensorflow.python.framework import errors
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
from tensorflow.python.util.tf_export import tf_export
try:
from cloud_tpu_client import client # pylint: disable=g-import-not-at-top
except ImportError:
logging.debug(
'Falling back to TensorFlow client; we recommended you install the Cloud '
'TPU client directly with pip install cloud-tpu-client.')
from tensorflow.python.tpu.client import client
def is_running_in_gce():
return True
_TPU_DEVICE_REGEX = re.compile(
r'.*task:(?P<host_id>\d+)/.*device:TPU:(?P<core_id>\d+)$')
_TPU_CONN_RETRIES = 120
DeviceDetails = collections.namedtuple(
'DeviceDetails', ['device_map', 'total_cores'])
@tf_export('distribute.cluster_resolver.TPUClusterResolver')
class TPUClusterResolver(cluster_resolver.ClusterResolver):
"""Cluster Resolver for Google Cloud TPUs.
This is an implementation of cluster resolvers for the Google Cloud TPU
service. As Cloud TPUs are in alpha, you will need to specify a API definition
file for this to consume, in addition to a list of Cloud TPUs in your Google
Cloud Platform project.
TPUClusterResolver supports the following distinct environments:
Google Compute Engine
Google Kubernetes Engine
Google internal
"""
@staticmethod
def _get_device_dict_and_cores(devices):
"""Returns a dict of hosts to cores and total cores given devices names.
Returns a namedtuple with two attributes:
device_map: A map of host_ids to a list of core_ids.
total_cores: The total number of cores within the TPU system.
Args:
devices: A list of devices returned by session.list_devices()
"""
device_map = collections.defaultdict(list)
num_cores = 0
for device in devices:
match = _TPU_DEVICE_REGEX.match(device.name)
if match:
host_id = match.group('host_id')
core_id = match.group('core_id')
device_map[host_id].append(core_id)
num_cores += 1
return DeviceDetails(device_map, num_cores)
@staticmethod
def _verify_and_return_same_core_count(device_dict):
"""Verifies that every device in device_dict has the same # of cores."""
num_cores_per_host_set = (
{len(core_ids) for core_ids in device_dict.values()})
if len(num_cores_per_host_set) != 1:
raise RuntimeError('TPU cores on each device is not the same. This '
'should never happen. Devices: {}'.format(device_dict))
return num_cores_per_host_set.pop()
def __init__(self,
tpu=None,
zone=None,
project=None,
job_name='worker',
coordinator_name=None,
coordinator_address=None,
credentials='default',
service=None,
discovery_url=None):
"""Creates a new TPUClusterResolver object.
The ClusterResolver will then use the parameters to query the Cloud TPU APIs
for the IP addresses and ports of each Cloud TPU listed.
Args:
tpu: A string corresponding to the TPU to use. If the string is an empty
string, the string 'local', or a string that begins with 'grpc://', then
it is assumed to not correspond with a Cloud TPU and will instead be
passed as the session master and no ClusterSpec propagation will be
done. In the future, this may also support a list of strings when
multiple Cloud TPUs are used.
zone: Zone where the TPUs are located. If omitted or empty, we will assume
that the zone of the TPU is the same as the zone of the GCE VM, which we
will try to discover from the GCE metadata service.
project: Name of the GCP project containing Cloud TPUs. If omitted or
empty, we will try to discover the project name of the GCE VM from the
GCE metadata service.
job_name: Name of the TensorFlow job the TPUs belong to.
coordinator_name: The name to use for the coordinator. Set to None if the
coordinator should not be included in the computed ClusterSpec.
coordinator_address: The address of the coordinator (typically an ip:port
pair). If set to None, a TF server will be started. If coordinator_name
is None, a TF server will not be started even if coordinator_address is
None.
credentials: GCE Credentials. If None, then we use default credentials
from the oauth2client
service: The GCE API object returned by the googleapiclient.discovery
function. If you specify a custom service object, then the credentials
parameter will be ignored.
discovery_url: A URL template that points to the location of the discovery
service. It should have two parameters {api} and {apiVersion} that when
filled in produce an absolute URL to the discovery document for that
service. The environment variable 'TPU_API_DISCOVERY_URL' will override
this.
Raises:
ImportError: If the googleapiclient is not installed.
ValueError: If no TPUs are specified.
RuntimeError: If an empty TPU name is specified and this is running in a
Google Cloud environment.
"""
self._cloud_tpu_client = client.Client(
tpu=tpu,
zone=zone,
project=project,
credentials=credentials,
service=service,
discovery_url=discovery_url)
self._tpu = self._cloud_tpu_client.name()
# By default the task_type is 'worker` and the task_id is 0 (which is the
# first worker in the task).
self.task_type = job_name
self.task_id = 0
self._coordinator_name = coordinator_name
if (coordinator_name and not coordinator_address):
self._start_local_server()
else:
self._coordinator_address = coordinator_address
def __enter__(self):
self._cloud_tpu_client.enter()
def __exit__(self, type, value, traceback): # pylint: disable=redefined-builtin
self._cloud_tpu_client.exit(type, value, traceback)
def master(self, task_type=None, task_id=None, rpc_layer=None):
"""Get the Master string to be used for the session.
In the normal case, this returns the grpc path (grpc://1.2.3.4:8470) of
first instance in the ClusterSpec returned by the cluster_spec function.
If a non-TPU name is used when constructing a TPUClusterResolver, that will
be returned instead (e.g. If the tpus argument's value when constructing
this TPUClusterResolver was 'grpc://10.240.1.2:8470',
'grpc://10.240.1.2:8470' will be returned).
Args:
task_type: (Optional, string) The type of the TensorFlow task of the
master.
task_id: (Optional, integer) The index of the TensorFlow task of the
master.
rpc_layer: (Optional, string) The RPC protocol TensorFlow should use to
communicate with TPUs.
Returns:
string, the connection string to use when creating a session.
Raises:
ValueError: If none of the TPUs specified exists.
"""
cluster_spec = self.cluster_spec()
if task_type is not None and task_id is not None:
# task_type and task_id is from the function parameter
master = cluster_spec.task_address(task_type, task_id)
elif self.task_type is not None and self.task_id is not None:
# task_type and task_id is from the object
master = cluster_spec.task_address(self.task_type, self.task_id)
else:
# by default we take the first item in the cluster with the right name
job_tasks = cluster_spec.job_tasks(self.task_type)
if not job_tasks:
raise ValueError('No TPUs with the specified names exist.')
master = job_tasks[0]
return cluster_resolver.format_master_url(master, 'grpc')
def get_master(self):
return self.master()
def get_job_name(self):
return self.task_type
def cluster_spec(self):
"""Returns a ClusterSpec object based on the latest TPU information.
We retrieve the information from the GCE APIs every time this method is
called.
Returns:
A ClusterSpec containing host information returned from Cloud TPUs,
or None.
Raises:
RuntimeError: If the provided TPU is not healthy.
"""
############################################################################
# There are 5 potential cases this code must handle:
# 1. [Normal case.] We should resolve the TPU name to a set of tasks, and
# a. Create a ClusterSpec that includes the coordinator job
# b. Create a ClusterSpec without the coordinator job.
# 2. [GKE / No API Access.] We should not resolve the TPU name to a set of
# tasks and
# a. Create a ClusterSpec with the coordinator
# b. Create a ClusterSpec without the coordinator
############################################################################
network_endpoints = self._cloud_tpu_client.network_endpoints()
worker_list = [
'%s:%s' % (endpoint['ipAddress'], endpoint['port'])
for endpoint in network_endpoints
]
cluster_spec = {self.task_type: worker_list}
if self._coordinator_address:
# {1, 2}.a
cluster_spec[self._coordinator_name] = [self._coordinator_address]
return server_lib.ClusterSpec(cluster_spec)
def num_accelerators(self,
task_type=None,
task_id=None,
config_proto=None):
"""Returns the number of TPU cores per worker.
Connects to the master and list all the devices present in the master,
and counts them up. Also verifies that the device counts per host in the
cluster is the same before returning the number of TPU cores per host.
Args:
task_type: Unused.
task_id: Unused.
config_proto: Used to create a connection to a TPU master in order to
retrieve the system metadata.
Raises:
RuntimeError: If we cannot talk to a TPU worker after retrying or if the
number of TPU devices per host is different.
"""
retry_count = 1
# TODO(b/120564445): Replace with standard library for retries.
while True:
try:
device_details = TPUClusterResolver._get_device_dict_and_cores(
cluster_resolver.get_accelerator_devices(
self.master(), config_proto=config_proto))
break
except errors.DeadlineExceededError:
error_message = ('Failed to connect to master. The TPU might not be '
'ready (e.g. still scheduling) or the master '
'address is incorrect: got (%s)' % self.master())
if retry_count <= _TPU_CONN_RETRIES:
logging.warning(error_message)
logging.warning('Retrying (%d/%d)...', retry_count, _TPU_CONN_RETRIES)
retry_count += 1
else:
raise RuntimeError(error_message)
if device_details.total_cores:
return {'TPU': TPUClusterResolver._verify_and_return_same_core_count(
device_details.device_map)}
return {'TPU': 0}
@property
def environment(self):
"""Returns the current environment which TensorFlow is running in."""
return self._environment
def _start_local_server(self):
address = compat.as_text(self._cloud_tpu_client.get_local_ip())
self._server = server_lib.Server({'local': ['0.0.0.0:0']},
protocol='grpc',
config=None,
start=True)
# self._server.target is of the form: grpc://ipaddress:port
target = compat.as_bytes(self._server.target)
splits = target.split(compat.as_bytes(':'))
assert len(splits) == 3, self._server.target
assert splits[0] == compat.as_bytes('grpc'), self._server.target
self._coordinator_port = compat.as_text(splits[2])
self._coordinator_address = '%s:%s' % (
address, compat.as_text(self._coordinator_port))
def __deepcopy__(self, memo):
# TODO(b/73668574): Remove this once RunConfig avoids performing deepcopy.
return self
| |
import os, sys, ConfigParser
from django.contrib.messages import constants as messages
def createModuleGlobalsFromConfigFile(module, filepath):
config = ConfigParser.RawConfigParser()
config.read(filepath)
setattr(module,'TWILIO_FROM_PHONE_NUMBER', config.get('Twilio', 'TWILIO_FROM_PHONE_NUMBER'))
setattr(module,'TWILIO_ACCOUNT', config.get('Twilio', 'TWILIO_ACCOUNT'))
setattr(module,'TWILIO_TOKEN', config.get('Twilio', 'TWILIO_TOKEN'))
setattr(module,'DATABASE_HOST', config.get('Database', 'host'))
setattr(module,'DATABASE_USER', config.get('Database', 'user'))
setattr(module,'DATABASE_PASSWORD', config.get('Database', 'password'))
setattr(module,'DATABASE_DB', config.get('Database', 'database'))
setattr(module,'AWS_ACCESS_KEY_ID', config.get('AWS', 'user'))
setattr(module,'AWS_SECRET_ACCESS_KEY', config.get('AWS', 'password'))
setattr(module,'FACEBOOK_APP_ID', config.get('Facebook', 'app_id'))
setattr(module,'FACEBOOK_API_SECRET', config.get('Facebook', 'api_secret'))
abspath = lambda *p: os.path.abspath(os.path.join(*p))
PROJECT_ROOT = abspath(os.path.dirname(__file__))
USERENA_MODULE_PATH = abspath(PROJECT_ROOT, '..')
sys.path.insert(0, USERENA_MODULE_PATH)
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Adam Feuer', 'adamf@pobox.com'),
)
MANAGERS = ADMINS
#if DEBUG:
# Use the Python SMTP debugging server. You can run it with:
# ``python -m smtpd -n -c DebuggingServer localhost:1025``.
# EMAIL_PORT = 1025
# We're using Apache mod_proxy and gunicorn - use this so redirects work
USE_X_FORWARDED_HOST = True
TIME_ZONE = 'America/Los_Angeles'
LANGUAGE_CODE = 'en-us'
ugettext = lambda s: s
LANGUAGES = (
('en', ugettext('English')),
)
USE_I18N = True
USE_L10N = True
MEDIA_ROOT = abspath(PROJECT_ROOT, 'media')
DOCUMENT_ROOT = abspath(PROJECT_ROOT, 'docs')
MEDIA_URL = '/app/media/'
ADMIN_MEDIA_PREFIX = '/media/admin/'
STATIC_URL = '/app/'
SECRET_KEY = 'sx405#tc)5m@s#^jh5l7$k#cl3ekg)jtbo2ds(n(kw@gp0t7x@'
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'userena.middleware.UserenaLocaleMiddleware',
'common.middleware.RuntimePathsMiddleware',
'social_auth.middleware.SocialAuthExceptionMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
'django.core.context_processors.static',
"django.contrib.messages.context_processors.messages",
"django.core.context_processors.request",
"common.context_processors.settings",
"common.context_processors.site",
"social_auth.context_processors.social_auth_by_name_backends",
"social_auth.context_processors.social_auth_backends",
)
AUTHENTICATION_BACKENDS = (
'userena.backends.UserenaAuthenticationBackend',
'guardian.backends.ObjectPermissionBackend',
'django.contrib.auth.backends.ModelBackend',
'social_auth.backends.facebook.FacebookBackend',
)
ROOT_URLCONF = 'gratitude.urls'
TEMPLATE_DIRS = (
abspath(PROJECT_ROOT, 'templates')
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.admin',
'django.contrib.staticfiles',
'gunicorn',
'easy_thumbnails',
'guardian',
'south',
'userena',
'userena.contrib.umessages',
'django_ses',
'adminplus',
'cronjobs',
'crispy_forms',
'tastypie',
'social_auth',
'gratitude.profiles',
'gratitude.gratitude',
'gratitude.gratitude.cron',
)
# Bootstrap CSS for django alerts
MESSAGE_TAGS = {
messages.SUCCESS: 'alert alert-success',
messages.INFO: 'alert alert-info',
messages.WARNING: 'alert',
messages.ERROR: 'alert alert-error',
}
# Django email settings
#EMAIL_BACKEND = 'django.core.mail.backends.filebased.EmailBackend'
#EMAIL_FILE_PATH = '/tmp/gratitude-email.log'
EMAIL_BACKEND = 'django_ses.SESBackend'
AWS_SES_REGION_NAME = 'us-east-1'
AWS_SES_REGION_ENDPOINT = 'email.us-east-1.amazonaws.com'
DEFAULT_FROM_EMAIL = '"Art of Gratitude" <team@artofgratitude.com>'
# Django Social Auth
SOCIAL_AUTH_DEFAULT_USERNAME = 'social_auth_user'
FACEBOOK_EXTENDED_PERMISSIONS = ['email']
SOCIAL_AUTH_COMPLETE_URL_NAME = 'socialauth_complete'
SOCIAL_AUTH_ASSOCIATE_URL_NAME = 'socialauth_associate_complete'
SOCIAL_AUTH_REDIRECT_IS_HTTPS = True
SOCIAL_AUTH_NEW_USER_REDIRECT_BASE_URL = '/social-verification/'
SOCIAL_AUTH_BACKEND_ERROR_BASE_URL = '/signup-error/'
SOCIAL_AUTH_INACTIVE_USER_BASE_URL = '/login-error/'
SOCIAL_AUTH_INACTIVE_USER_MESSAGE = 'Your account is not yet verified. Please check your email and save your first gratitudes to activate your account and start your 30 days of gratitude!'
SOCIAL_AUTH_RAISE_EXCEPTIONS = False
# Userena settings
USERENA_ACTIVATION_REQUIRED = True
AUTH_PROFILE_MODULE = 'profiles.Profile'
USERENA_WITHOUT_USERNAMES = True
USERENA_DISABLE_PROFILE_LIST = True
USERENA_MUGSHOT_SIZE = 140
USERENA_REMEMBER_ME_DAYS = ('3 years', 1095)
USERENA_ACTIVATION_DAYS = 35
# Userena base urls
LOGIN_REDIRECT_BASE_URL = '/profile/'
LOGIN_BASE_URL = '/'
LOGOUT_BASE_URL = '/signout/'
SIGNUP_SUCCESSFUL_BASE_URL = "/signup-verification"
# Userena activation email
USERENA_SEND_EMAIL_MODULE='gratitude.gratitude.EmailSender'
USERENA_ACTIVATION_EMAIL_MESSAGE_TEMPLATE='gratitude/emails/activation_email_body.html'
USERENA_ACTIVATION_EMAIL_SUBJECT_TEMPLATE='gratitude/emails/activation_email_subject.txt'
USERENA_SIGNUP_FIRST_AND_LAST_NAMES=True
USERENA_SIGNIN_REDIRECT_BASE_URL = LOGIN_REDIRECT_BASE_URL
#USERENA_SIGNIN_REDIRECT_BASE_URL='/profile/%(username)s/'
# Test settings
TEST_RUNNER = 'django.test.simple.DjangoTestSuiteRunner'
SOUTH_TESTS_MIGRATE = False
# Guardian
ANONYMOUS_USER_ID = -1
# Gratitude App
VERSION = "0.2"
PROD = "prod"
TEST = "test"
DEV = "dev"
MAX_STASHED_GRATITUDES=270
MAX_AGE_OF_STASHED_GRATITUDES_IN_DAYS=90
GRATITUDES_PER_DAY = 3
DAYS_OF_GRATITUDE = 30
REMINDER_EMAIL_DAYS = [1, 3, 7, 30]
# Gratitude Analytics Actions
MAX_ACTIONS_PER_DAY = 1000
| |
#!/usr/bin/env python
# encoding: utf-8
"""
qrcan_api.py
Created by Michael Hausenblas on 2011-03-04.
"""
import logging
_logger = logging.getLogger('qrcan')
import sys
import os
import rdflib
import cgi
import datetime
try:
import json
except ImportError:
import simplejson as json
from rdflib import Graph
from rdflib import Namespace
from rdflib import URIRef
from rdflib import Literal
from rdflib import RDF
from rdflib import XSD
from qrcan_ds import *
from qrcan_exceptions import *
from qrcan_store import *
class QrcanAPI:
# HTTP API configuration:
API_BASE = '/api'
DATASOURCES_API_BASE = '/datasource'
ALL_DS_NOUN = '/all'
SYNC_DS_NOUN ='/sync'
QUERY_DS_NOUN ='/query'
SCHEMA_DS_NOUN ='/schema'
REMOVE_DS_NOUN ='/rm'
# Configuration of the data source description store:
DATASOURCES_METADATA_BASE = 'datasources/'
def __init__(self, api_base):
self.datasources = dict()
self.api_base = api_base
self.datasource_base = ''.join([api_base, QrcanAPI.API_BASE, QrcanAPI.DATASOURCES_API_BASE, '/'])
self.store = QrcanStore()
self.store.setup_store()
self.apimap = {
''.join([QrcanAPI.API_BASE, QrcanAPI.DATASOURCES_API_BASE, QrcanAPI.ALL_DS_NOUN]) : 'list_all_datasources', # GET
''.join([QrcanAPI.API_BASE, QrcanAPI.DATASOURCES_API_BASE]) : 'add_datasource' # POST
}
_logger.debug('API ready at %s' %''.join([api_base, QrcanAPI.API_BASE]))
def dispatch_api_call(self, noun, instream, outstream, headers):
try:
m = getattr(self, self.apimap[str(noun)]) # handling fixed resources
m(instream, outstream, headers)
except KeyError: # handling potentially dynamic resources
if noun.startswith(''.join([QrcanAPI.API_BASE, QrcanAPI.DATASOURCES_API_BASE, '/'])):
try:
dsid = ''.join([self.api_base, noun])
#_logger.debug('Target data source [%s]' %dsid)
if noun.endswith('/'):
dsid = dsid[:-1] # remove the trailing slash
self._update_datasource(instream, outstream, headers, dsid) # POST
elif noun.endswith(QrcanAPI.SYNC_DS_NOUN):
dsid = dsid[:-len(QrcanAPI.SYNC_DS_NOUN)]
self._sync_datasource(outstream, dsid) # GET, should really be POST
elif noun.endswith(QrcanAPI.QUERY_DS_NOUN):
dsid = dsid[:-len(QrcanAPI.QUERY_DS_NOUN)]
self._query_datasource(instream, outstream, headers, dsid) # POST
elif noun.endswith(QrcanAPI.SCHEMA_DS_NOUN):
dsid = dsid[:-len(QrcanAPI.SCHEMA_DS_NOUN)]
self._schema_datasource(instream, outstream, headers, dsid) # GET/POST
elif noun.endswith(QrcanAPI.REMOVE_DS_NOUN):
dsid = dsid[:-len(QrcanAPI.REMOVE_DS_NOUN)]
self._remove_datasource(instream, outstream, headers, dsid) # POST
else:
self._serve_datasource(outstream, dsid) # GET
except DatasourceNotExists:
_logger.debug('Seems the data source does not exist!')
raise HTTP404
else:
_logger.debug('unknown noun %s' %noun)
raise HTTP404
def init_datasources(self):
_logger.debug('Scanning [%s] for data sources ...' %QrcanAPI.DATASOURCES_METADATA_BASE)
for f in os.listdir(QrcanAPI.DATASOURCES_METADATA_BASE):
if f.endswith('.ttl'):
ds = Datasource(self.datasource_base, QrcanAPI.DATASOURCES_METADATA_BASE)
ds.load(''.join([QrcanAPI.DATASOURCES_METADATA_BASE, f]))
self.datasources[ds.identify()] = ds
_logger.debug('Added data sources [%s]' %ds.identify())
def list_all_datasources(self, instream, outstream, headers):
dslist = list()
for ds in self.datasources.itervalues():
dslist.append(ds.describe(encoding = 'raw'))
outstream.write(json.JSONEncoder().encode(dslist))
def add_datasource(self, instream, outstream, headers):
dsdata = self._get_formenc_param(instream, headers, 'dsdata')
if dsdata:
_logger.debug('Creating data source with:')
for key in dsdata.keys():
_logger.debug('%s = %s' %(key, dsdata[key]))
# prepare metadata
ds = Datasource(self.datasource_base, QrcanAPI.DATASOURCES_METADATA_BASE)
ds.update(dsdata['name'], dsdata['access_method'], dsdata['access_uri'], dsdata['access_mode'])
# store content for local data sources
if ds.is_local():
g = self.store.init_datasource(ds.identify())
ds.sync(g)
self.store.store_datasource(g, ds.identify())
# store metadata
ds.store()
self.datasources[ds.identify()] = ds
def _serve_datasource(self, outstream, dsid):
_logger.debug('Trying to get description of data source [%s] ...' %dsid)
try:
ds = self.datasources[dsid]
outstream.write(ds.describe())
except KeyError:
raise DatasourceNotExists
def _sync_datasource(self, outstream, dsid):
try:
ds = self.datasources[dsid]
except KeyError:
raise DatasourceNotExists
if not ds.is_local():
_logger.debug('[%s] is a remote data source ... NOP' %dsid)
else:
_logger.debug('Trying to sync data source [%s] ...' %dsid)
g = self.store.init_datasource(ds.identify())
ds.sync(g)
ds.store()
self.store.store_datasource(g, ds.identify())
outstream.write(ds.describe())
def _query_datasource(self, instream, outstream, headers, dsid):
querydata = self._get_formenc_param(instream, headers, 'querydata')
_logger.debug('Trying to query data source [%s] ...' %dsid)
try:
ds = self.datasources[dsid]
g = None
if ds.is_local():
g = self.store.init_datasource(ds.identify())
if not self.store.is_datasource_available(ds.identify()):
self.store.restore_datasource(g, ds.identify())
_logger.debug('Got query string: %s' %querydata['query_str'])
res = ds.query(g, querydata['query_str'])
outstream.write(res)
except KeyError:
raise DatasourceNotExists
def _schema_datasource(self, instream, outstream, headers, dsid):
sampledata = self._get_formenc_param(instream, headers, 'sampledata')
try:
ds = self.datasources[dsid]
g = None
if ds.is_local():
g = self.store.init_datasource(ds.identify())
if not self.store.is_datasource_available(ds.identify()):
self.store.restore_datasource(g, ds.identify())
if sampledata:
_logger.debug('Trying to get type sample for type [%s] in data source [%s] ...' %(sampledata['type_uri'], dsid))
res = ds.type_sample(g, sampledata['type_uri'])
else:
_logger.debug('Trying to get schema info for data source [%s] ...' %dsid)
res = ds.schema(g)
outstream.write(res)
except KeyError:
raise DatasourceNotExists
def _remove_datasource(self, instream, outstream, headers, dsid):
_logger.debug('Trying to remove data source [%s] ...' %dsid)
try:
ds = self.datasources[dsid]
self.store.remove_datasource(ds.identify())
ds.remove()
del self.datasources[dsid]
except KeyError:
raise DatasourceNotExists
def _update_datasource(self, instream, outstream, headers, dsid):
dsdata = self._get_formenc_param(instream, headers, 'dsdata')
if dsdata:
_logger.debug('Updating data source with:')
for key in dsdata.keys():
_logger.debug('%s = %s' %(key, dsdata[key]))
try:
self.datasources[dsid].update(dsdata['name'], dsdata['access_method'], dsdata['access_uri'], dsdata['access_mode'])
self.datasources[dsid].store()
except KeyError:
raise DatasourceNotExists
def _get_formenc_param(self, instream, headers, param):
try:
encparams = instream.read(int(headers.getheader('content-length')))
except TypeError:
return None
params = cgi.parse_qs(encparams)
if params[param]:
params = json.JSONDecoder().decode(params[param][0])
return params
else:
return None
| |
from unittest.mock import call, mock_open, patch
import pytest
from pytest import raises
from vang.maven.multi_module_project import get_pom
from vang.maven.multi_module_project import get_pom_infos
from vang.maven.multi_module_project import main
from vang.maven.multi_module_project import make_project
from vang.maven.multi_module_project import parse_args
@pytest.fixture
def pom_infos_fixture():
return [
{
'pom_path': '/root/m1/pom.xml',
'artifact_id': 'm1',
'group_id': 'com.example',
'version': '1.0.0-SNAPSHOT',
'packaging': 'jar'
},
{
'pom_path': '/root/m2/pom.xml',
'artifact_id': 'm2',
'group_id': 'com.example',
'version': '1.0.0-SNAPSHOT',
'packaging': 'jar'
},
]
def test_get_pom(pom_infos_fixture):
expected = """<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>group_id</groupId>
<artifactId>artifact_id</artifactId>
<version>version</version>
<packaging>pom</packaging>
<modules>
<module>../m1</module>
<module>../m2</module>
</modules>
</project>"""
assert expected == get_pom(
pom_infos_fixture,
'/root/ws',
'group_id',
'artifact_id',
'version',
)
@patch('vang.maven.multi_module_project.makedirs')
@patch('vang.maven.multi_module_project.get_pom')
def test_make_project(mock_get_pom, mock_makedirs, pom_infos_fixture):
mock_get_pom.return_value = 'pom'
with patch('vang.maven.multi_module_project.open', mock_open()) as m:
make_project(
pom_infos_fixture,
'/root/ws',
'group_id',
'artifact_id',
'version',
)
assert [
call([
{
'pom_path': '/root/m1/pom.xml',
'artifact_id': 'm1',
'group_id': 'com.example',
'version': '1.0.0-SNAPSHOT',
'packaging': 'jar'
},
{
'pom_path': '/root/m2/pom.xml',
'artifact_id': 'm2',
'group_id': 'com.example',
'version': '1.0.0-SNAPSHOT',
'packaging': 'jar'
},
], '/root/ws', 'group_id', 'artifact_id', 'version')
] == mock_get_pom.mock_calls
assert [call('/root/ws')] == mock_makedirs.mock_calls
assert [
call('/root/ws/pom.xml', 'wt', encoding='utf-8'),
call().__enter__(),
call().write('pom'),
call().__exit__(None, None, None)
] == m.mock_calls
@patch('vang.maven.multi_module_project.pom.get_pom_info')
@patch('vang.maven.multi_module_project.pom.get_pom_paths')
def test_get_pom_infos(mock_get_pom_paths, mock_get_pom_info,
pom_infos_fixture):
mock_get_pom_paths.return_value = (
'/root/m1/pom.xml',
'/root/m2/pom.xml',
)
mock_get_pom_info.side_effect = pom_infos_fixture
assert [
{
'artifact_id': 'm1',
'group_id': 'com.example',
'packaging': 'jar',
'pom_path': '/root/m1/pom.xml',
'version': '1.0.0-SNAPSHOT'
},
{
'artifact_id': 'm2',
'group_id': 'com.example',
'packaging': 'jar',
'pom_path': '/root/m2/pom.xml',
'version': '1.0.0-SNAPSHOT'
},
] == get_pom_infos('source_dir')
@pytest.mark.parametrize("args", [
'foo',
])
def test_parse_args_raises(args):
with raises(SystemExit):
parse_args(args.split(' ') if args else args)
@pytest.mark.parametrize("args, expected", [
[
'',
{
'use_defaults': False
},
],
[
'-d',
{
'use_defaults': True
},
],
])
def test_parse_args_valid(args, expected):
assert expected == parse_args(args.split(' ') if args else '').__dict__
@pytest.mark.parametrize("use_defaults, source_dir_expected, expected", [
(False, [call('s')], [
call(
[{
'pom_path': '/root/m1/pom.xml',
'artifact_id': 'm1',
'group_id': 'com.example',
'version': '1.0.0-SNAPSHOT',
'packaging': 'jar'
},
{
'pom_path': '/root/m2/pom.xml',
'artifact_id': 'm2',
'group_id': 'com.example',
'version': '1.0.0-SNAPSHOT',
'packaging': 'jar'
}],
'o',
'g',
'a',
'v',
),
]),
(True, [call('.')], [
call(
[
{
'pom_path': '/root/m1/pom.xml',
'artifact_id': 'm1',
'group_id': 'com.example',
'version': '1.0.0-SNAPSHOT',
'packaging': 'jar'
},
{
'pom_path': '/root/m2/pom.xml',
'artifact_id': 'm2',
'group_id': 'com.example',
'version': '1.0.0-SNAPSHOT',
'packaging': 'jar'}],
artifact_id='ws-tests',
group_id='my.group',
output_dir='ws-tests',
source_dir='.',
version='1.0.0-SNAPSHOT'
)
]),
])
@patch('vang.maven.multi_module_project.getcwd')
@patch('vang.maven.multi_module_project.input')
@patch('vang.maven.multi_module_project.make_project')
@patch('vang.maven.multi_module_project.get_pom_infos')
def test_main(
mock_get_pom_infos,
mock_make_project,
mock_input,
mock_getcwd,
pom_infos_fixture,
use_defaults,
source_dir_expected,
expected,
):
mock_getcwd.return_value = 'tests'
mock_get_pom_infos.return_value = pom_infos_fixture
mock_input.side_effect = ('g', 'a', 'v', 's', 'o')
main(use_defaults)
assert source_dir_expected == mock_get_pom_infos.mock_calls
assert expected == mock_make_project.mock_calls
| |
import asyncio
import codecs
import dataclasses
import functools
import io
import re
import sys
import traceback
import warnings
from hashlib import md5, sha1, sha256
from http.cookies import CookieError, Morsel, SimpleCookie
from types import MappingProxyType, TracebackType
from typing import (
TYPE_CHECKING,
Any,
Dict,
Iterable,
List,
Mapping,
Optional,
Tuple,
Type,
Union,
cast,
)
from multidict import CIMultiDict, CIMultiDictProxy, MultiDict, MultiDictProxy
from yarl import URL
from . import hdrs, helpers, http, multipart, payload
from .abc import AbstractStreamWriter
from .client_exceptions import (
ClientConnectionError,
ClientOSError,
ClientResponseError,
ContentTypeError,
InvalidURL,
ServerFingerprintMismatch,
)
from .formdata import FormData
from .hdrs import CONTENT_TYPE
from .helpers import (
BaseTimerContext,
BasicAuth,
HeadersMixin,
TimerNoop,
is_expected_content_type,
noop,
parse_mimetype,
reify,
set_result,
)
from .http import SERVER_SOFTWARE, HttpVersion10, HttpVersion11, StreamWriter
from .http_parser import HAS_BROTLI
from .log import client_logger
from .streams import StreamReader
from .typedefs import (
DEFAULT_JSON_DECODER,
JSONDecoder,
LooseCookies,
LooseHeaders,
RawHeaders,
)
try:
import ssl
from ssl import SSLContext
except ImportError: # pragma: no cover
ssl = None # type: ignore[assignment]
SSLContext = object # type: ignore[misc,assignment]
try:
import cchardet as chardet
except ImportError: # pragma: no cover
import charset_normalizer as chardet # type: ignore[no-redef]
__all__ = ("ClientRequest", "ClientResponse", "RequestInfo", "Fingerprint")
if TYPE_CHECKING: # pragma: no cover
from .client import ClientSession
from .connector import Connection
from .tracing import Trace
def _gen_default_accept_encoding() -> str:
return "gzip, deflate, br" if HAS_BROTLI else "gzip, deflate"
@dataclasses.dataclass(frozen=True)
class ContentDisposition:
type: Optional[str]
parameters: "MappingProxyType[str, str]"
filename: Optional[str]
@dataclasses.dataclass(frozen=True)
class RequestInfo:
url: URL
method: str
headers: "CIMultiDictProxy[str]"
real_url: URL
class Fingerprint:
HASHFUNC_BY_DIGESTLEN = {
16: md5,
20: sha1,
32: sha256,
}
def __init__(self, fingerprint: bytes) -> None:
digestlen = len(fingerprint)
hashfunc = self.HASHFUNC_BY_DIGESTLEN.get(digestlen)
if not hashfunc:
raise ValueError("fingerprint has invalid length")
elif hashfunc is md5 or hashfunc is sha1:
raise ValueError(
"md5 and sha1 are insecure and " "not supported. Use sha256."
)
self._hashfunc = hashfunc
self._fingerprint = fingerprint
@property
def fingerprint(self) -> bytes:
return self._fingerprint
def check(self, transport: asyncio.Transport) -> None:
if not transport.get_extra_info("sslcontext"):
return
sslobj = transport.get_extra_info("ssl_object")
cert = sslobj.getpeercert(binary_form=True)
got = self._hashfunc(cert).digest()
if got != self._fingerprint:
host, port, *_ = transport.get_extra_info("peername")
raise ServerFingerprintMismatch(self._fingerprint, got, host, port)
if ssl is not None:
SSL_ALLOWED_TYPES = (ssl.SSLContext, bool, Fingerprint, type(None))
else: # pragma: no cover
SSL_ALLOWED_TYPES = type(None)
@dataclasses.dataclass(frozen=True)
class ConnectionKey:
# the key should contain an information about used proxy / TLS
# to prevent reusing wrong connections from a pool
host: str
port: Optional[int]
is_ssl: bool
ssl: Union[SSLContext, None, bool, Fingerprint]
proxy: Optional[URL]
proxy_auth: Optional[BasicAuth]
proxy_headers_hash: Optional[int] # hash(CIMultiDict)
class ClientRequest:
GET_METHODS = {
hdrs.METH_GET,
hdrs.METH_HEAD,
hdrs.METH_OPTIONS,
hdrs.METH_TRACE,
}
POST_METHODS = {hdrs.METH_PATCH, hdrs.METH_POST, hdrs.METH_PUT}
ALL_METHODS = GET_METHODS.union(POST_METHODS).union({hdrs.METH_DELETE})
DEFAULT_HEADERS = {
hdrs.ACCEPT: "*/*",
hdrs.ACCEPT_ENCODING: _gen_default_accept_encoding(),
}
body = b""
auth = None
response = None
_writer = None # async task for streaming data
_continue = None # waiter future for '100 Continue' response
# N.B.
# Adding __del__ method with self._writer closing doesn't make sense
# because _writer is instance method, thus it keeps a reference to self.
# Until writer has finished finalizer will not be called.
def __init__(
self,
method: str,
url: URL,
*,
params: Optional[Mapping[str, str]] = None,
headers: Optional[LooseHeaders] = None,
skip_auto_headers: Iterable[str] = frozenset(),
data: Any = None,
cookies: Optional[LooseCookies] = None,
auth: Optional[BasicAuth] = None,
version: http.HttpVersion = http.HttpVersion11,
compress: Optional[str] = None,
chunked: Optional[bool] = None,
expect100: bool = False,
loop: asyncio.AbstractEventLoop,
response_class: Optional[Type["ClientResponse"]] = None,
proxy: Optional[URL] = None,
proxy_auth: Optional[BasicAuth] = None,
timer: Optional[BaseTimerContext] = None,
session: Optional["ClientSession"] = None,
ssl: Union[SSLContext, bool, Fingerprint, None] = None,
proxy_headers: Optional[LooseHeaders] = None,
traces: Optional[List["Trace"]] = None,
):
assert isinstance(url, URL), url
assert isinstance(proxy, (URL, type(None))), proxy
# FIXME: session is None in tests only, need to fix tests
# assert session is not None
self._session = cast("ClientSession", session)
if params:
q = MultiDict(url.query)
url2 = url.with_query(params)
q.extend(url2.query)
url = url.with_query(q)
self.original_url = url
self.url = url.with_fragment(None)
self.method = method.upper()
self.chunked = chunked
self.compress = compress
self.loop = loop
self.length = None
if response_class is None:
real_response_class = ClientResponse
else:
real_response_class = response_class
self.response_class = real_response_class # type: Type[ClientResponse]
self._timer = timer if timer is not None else TimerNoop()
self._ssl = ssl
if loop.get_debug():
self._source_traceback = traceback.extract_stack(sys._getframe(1))
self.update_version(version)
self.update_host(url)
self.update_headers(headers)
self.update_auto_headers(skip_auto_headers)
self.update_cookies(cookies)
self.update_content_encoding(data)
self.update_auth(auth)
self.update_proxy(proxy, proxy_auth, proxy_headers)
self.update_body_from_data(data)
if data is not None or self.method not in self.GET_METHODS:
self.update_transfer_encoding()
self.update_expect_continue(expect100)
if traces is None:
traces = []
self._traces = traces
def is_ssl(self) -> bool:
return self.url.scheme in ("https", "wss")
@property
def ssl(self) -> Union["SSLContext", None, bool, Fingerprint]:
return self._ssl
@property
def connection_key(self) -> ConnectionKey:
proxy_headers = self.proxy_headers
if proxy_headers:
h = hash(
tuple((k, v) for k, v in proxy_headers.items())
) # type: Optional[int]
else:
h = None
return ConnectionKey(
self.host,
self.port,
self.is_ssl(),
self.ssl,
self.proxy,
self.proxy_auth,
h,
)
@property
def host(self) -> str:
ret = self.url.raw_host
assert ret is not None
return ret
@property
def port(self) -> Optional[int]:
return self.url.port
@property
def request_info(self) -> RequestInfo:
headers = CIMultiDictProxy(self.headers) # type: CIMultiDictProxy[str]
return RequestInfo(self.url, self.method, headers, self.original_url)
def update_host(self, url: URL) -> None:
"""Update destination host, port and connection type (ssl)."""
# get host/port
if not url.raw_host:
raise InvalidURL(url)
# basic auth info
username, password = url.user, url.password
if username:
self.auth = helpers.BasicAuth(username, password or "")
def update_version(self, version: Union[http.HttpVersion, str]) -> None:
"""Convert request version to two elements tuple.
parser HTTP version '1.1' => (1, 1)
"""
if isinstance(version, str):
v = [part.strip() for part in version.split(".", 1)]
try:
version = http.HttpVersion(int(v[0]), int(v[1]))
except ValueError:
raise ValueError(
f"Can not parse http version number: {version}"
) from None
self.version = version
def update_headers(self, headers: Optional[LooseHeaders]) -> None:
"""Update request headers."""
self.headers = CIMultiDict() # type: CIMultiDict[str]
# add host
netloc = cast(str, self.url.raw_host)
if helpers.is_ipv6_address(netloc):
netloc = f"[{netloc}]"
if self.url.port is not None and not self.url.is_default_port():
netloc += ":" + str(self.url.port)
self.headers[hdrs.HOST] = netloc
if headers:
if isinstance(headers, (dict, MultiDictProxy, MultiDict)):
headers = headers.items() # type: ignore[assignment]
for key, value in headers: # type: ignore[misc]
# A special case for Host header
if key.lower() == "host":
self.headers[key] = value
else:
self.headers.add(key, value)
def update_auto_headers(self, skip_auto_headers: Iterable[str]) -> None:
self.skip_auto_headers = CIMultiDict(
(hdr, None) for hdr in sorted(skip_auto_headers)
)
used_headers = self.headers.copy()
used_headers.extend(self.skip_auto_headers) # type: ignore[arg-type]
for hdr, val in self.DEFAULT_HEADERS.items():
if hdr not in used_headers:
self.headers.add(hdr, val)
if hdrs.USER_AGENT not in used_headers:
self.headers[hdrs.USER_AGENT] = SERVER_SOFTWARE
def update_cookies(self, cookies: Optional[LooseCookies]) -> None:
"""Update request cookies header."""
if not cookies:
return
c = SimpleCookie() # type: SimpleCookie[str]
if hdrs.COOKIE in self.headers:
c.load(self.headers.get(hdrs.COOKIE, ""))
del self.headers[hdrs.COOKIE]
if isinstance(cookies, Mapping):
iter_cookies = cookies.items()
else:
iter_cookies = cookies # type: ignore[assignment]
for name, value in iter_cookies:
if isinstance(value, Morsel):
# Preserve coded_value
mrsl_val = value.get(value.key, Morsel())
mrsl_val.set(value.key, value.value, value.coded_value)
c[name] = mrsl_val
else:
c[name] = value # type: ignore[assignment]
self.headers[hdrs.COOKIE] = c.output(header="", sep=";").strip()
def update_content_encoding(self, data: Any) -> None:
"""Set request content encoding."""
if data is None:
return
enc = self.headers.get(hdrs.CONTENT_ENCODING, "").lower()
if enc:
if self.compress:
raise ValueError(
"compress can not be set " "if Content-Encoding header is set"
)
elif self.compress:
if not isinstance(self.compress, str):
self.compress = "deflate"
self.headers[hdrs.CONTENT_ENCODING] = self.compress
self.chunked = True # enable chunked, no need to deal with length
def update_transfer_encoding(self) -> None:
"""Analyze transfer-encoding header."""
te = self.headers.get(hdrs.TRANSFER_ENCODING, "").lower()
if "chunked" in te:
if self.chunked:
raise ValueError(
"chunked can not be set "
'if "Transfer-Encoding: chunked" header is set'
)
elif self.chunked:
if hdrs.CONTENT_LENGTH in self.headers:
raise ValueError(
"chunked can not be set " "if Content-Length header is set"
)
self.headers[hdrs.TRANSFER_ENCODING] = "chunked"
else:
if hdrs.CONTENT_LENGTH not in self.headers:
self.headers[hdrs.CONTENT_LENGTH] = str(len(self.body))
def update_auth(self, auth: Optional[BasicAuth]) -> None:
"""Set basic auth."""
if auth is None:
auth = self.auth
if auth is None:
return
if not isinstance(auth, helpers.BasicAuth):
raise TypeError("BasicAuth() tuple is required instead")
self.headers[hdrs.AUTHORIZATION] = auth.encode()
def update_body_from_data(self, body: Any) -> None:
if body is None:
return
# FormData
if isinstance(body, FormData):
body = body()
try:
body = payload.PAYLOAD_REGISTRY.get(body, disposition=None)
except payload.LookupError:
boundary = None
if CONTENT_TYPE in self.headers:
boundary = parse_mimetype(self.headers[CONTENT_TYPE]).parameters.get(
"boundary"
)
body = FormData(body, boundary=boundary)()
self.body = body
# enable chunked encoding if needed
if not self.chunked:
if hdrs.CONTENT_LENGTH not in self.headers:
size = body.size
if size is None:
self.chunked = True
else:
if hdrs.CONTENT_LENGTH not in self.headers:
self.headers[hdrs.CONTENT_LENGTH] = str(size)
# copy payload headers
assert body.headers
for (key, value) in body.headers.items():
if key in self.headers:
continue
if key in self.skip_auto_headers:
continue
self.headers[key] = value
def update_expect_continue(self, expect: bool = False) -> None:
if expect:
self.headers[hdrs.EXPECT] = "100-continue"
elif self.headers.get(hdrs.EXPECT, "").lower() == "100-continue":
expect = True
if expect:
self._continue = self.loop.create_future()
def update_proxy(
self,
proxy: Optional[URL],
proxy_auth: Optional[BasicAuth],
proxy_headers: Optional[LooseHeaders],
) -> None:
if proxy_auth and not isinstance(proxy_auth, helpers.BasicAuth):
raise ValueError("proxy_auth must be None or BasicAuth() tuple")
self.proxy = proxy
self.proxy_auth = proxy_auth
self.proxy_headers = proxy_headers
def keep_alive(self) -> bool:
if self.version < HttpVersion10:
# keep alive not supported at all
return False
if self.version == HttpVersion10:
if self.headers.get(hdrs.CONNECTION) == "keep-alive":
return True
else: # no headers means we close for Http 1.0
return False
elif self.headers.get(hdrs.CONNECTION) == "close":
return False
return True
async def write_bytes(
self, writer: AbstractStreamWriter, conn: "Connection"
) -> None:
"""Support coroutines that yields bytes objects."""
# 100 response
if self._continue is not None:
await writer.drain()
await self._continue
protocol = conn.protocol
assert protocol is not None
try:
if isinstance(self.body, payload.Payload):
await self.body.write(writer)
else:
if isinstance(self.body, (bytes, bytearray)):
self.body = (self.body,) # type: ignore[assignment]
for chunk in self.body:
await writer.write(chunk) # type: ignore[arg-type]
await writer.write_eof()
except OSError as exc:
new_exc = ClientOSError(
exc.errno, "Can not write request body for %s" % self.url
)
new_exc.__context__ = exc
new_exc.__cause__ = exc
protocol.set_exception(new_exc)
except asyncio.CancelledError as exc:
if not conn.closed:
protocol.set_exception(exc)
except Exception as exc:
protocol.set_exception(exc)
finally:
self._writer = None
async def send(self, conn: "Connection") -> "ClientResponse":
# Specify request target:
# - CONNECT request must send authority form URI
# - not CONNECT proxy must send absolute form URI
# - most common is origin form URI
if self.method == hdrs.METH_CONNECT:
connect_host = self.url.raw_host
assert connect_host is not None
if helpers.is_ipv6_address(connect_host):
connect_host = f"[{connect_host}]"
path = f"{connect_host}:{self.url.port}"
elif self.proxy and not self.is_ssl():
path = str(self.url)
else:
path = self.url.raw_path
if self.url.raw_query_string:
path += "?" + self.url.raw_query_string
protocol = conn.protocol
assert protocol is not None
writer = StreamWriter(
protocol,
self.loop,
on_chunk_sent=functools.partial(
self._on_chunk_request_sent, self.method, self.url
),
on_headers_sent=functools.partial(
self._on_headers_request_sent, self.method, self.url
),
)
if self.compress:
writer.enable_compression(self.compress)
if self.chunked is not None:
writer.enable_chunking()
# set default content-type
if (
self.method in self.POST_METHODS
and hdrs.CONTENT_TYPE not in self.skip_auto_headers
and hdrs.CONTENT_TYPE not in self.headers
):
self.headers[hdrs.CONTENT_TYPE] = "application/octet-stream"
# set the connection header
connection = self.headers.get(hdrs.CONNECTION)
if not connection:
if self.keep_alive():
if self.version == HttpVersion10:
connection = "keep-alive"
else:
if self.version == HttpVersion11:
connection = "close"
if connection is not None:
self.headers[hdrs.CONNECTION] = connection
# status + headers
status_line = "{0} {1} HTTP/{2[0]}.{2[1]}".format(
self.method, path, self.version
)
await writer.write_headers(status_line, self.headers)
self._writer = self.loop.create_task(self.write_bytes(writer, conn))
response_class = self.response_class
assert response_class is not None
self.response = response_class(
self.method,
self.original_url,
writer=self._writer,
continue100=self._continue,
timer=self._timer,
request_info=self.request_info,
traces=self._traces,
loop=self.loop,
session=self._session,
)
return self.response
async def close(self) -> None:
if self._writer is not None:
try:
await self._writer
finally:
self._writer = None
def terminate(self) -> None:
if self._writer is not None:
if not self.loop.is_closed():
self._writer.cancel()
self._writer = None
async def _on_chunk_request_sent(self, method: str, url: URL, chunk: bytes) -> None:
for trace in self._traces:
await trace.send_request_chunk_sent(method, url, chunk)
async def _on_headers_request_sent(
self, method: str, url: URL, headers: "CIMultiDict[str]"
) -> None:
for trace in self._traces:
await trace.send_request_headers(method, url, headers)
class ClientResponse(HeadersMixin):
# from the Status-Line of the response
version = None # HTTP-Version
status = None # type: int # Status-Code
reason = None # Reason-Phrase
content = None # type: StreamReader # Payload stream
_headers = None # type: CIMultiDictProxy[str] # Response headers
_raw_headers = None # type: RawHeaders # Response raw headers
_connection = None # current connection
_source_traceback = None
# setted up by ClientRequest after ClientResponse object creation
# post-init stage allows to not change ctor signature
_closed = True # to allow __del__ for non-initialized properly response
_released = False
def __init__(
self,
method: str,
url: URL,
*,
writer: "asyncio.Task[None]",
continue100: Optional["asyncio.Future[bool]"],
timer: BaseTimerContext,
request_info: RequestInfo,
traces: List["Trace"],
loop: asyncio.AbstractEventLoop,
session: "ClientSession",
) -> None:
assert isinstance(url, URL)
super().__init__()
self.method = method
self.cookies = SimpleCookie() # type: SimpleCookie[str]
self._real_url = url
self._url = url.with_fragment(None)
self._body = None # type: Optional[bytes]
self._writer = writer # type: Optional[asyncio.Task[None]]
self._continue = continue100 # None by default
self._closed = True
self._history = () # type: Tuple[ClientResponse, ...]
self._request_info = request_info
self._timer = timer if timer is not None else TimerNoop()
self._cache = {} # type: Dict[str, Any]
self._traces = traces
self._loop = loop
# store a reference to session #1985
self._session = session # type: Optional[ClientSession]
if loop.get_debug():
self._source_traceback = traceback.extract_stack(sys._getframe(1))
@reify
def url(self) -> URL:
return self._url
@reify
def real_url(self) -> URL:
return self._real_url
@reify
def host(self) -> str:
assert self._url.host is not None
return self._url.host
@reify
def headers(self) -> "CIMultiDictProxy[str]":
return self._headers
@reify
def raw_headers(self) -> RawHeaders:
return self._raw_headers
@reify
def request_info(self) -> RequestInfo:
return self._request_info
@reify
def content_disposition(self) -> Optional[ContentDisposition]:
raw = self._headers.get(hdrs.CONTENT_DISPOSITION)
if raw is None:
return None
disposition_type, params_dct = multipart.parse_content_disposition(raw)
params = MappingProxyType(params_dct)
filename = multipart.content_disposition_filename(params)
return ContentDisposition(disposition_type, params, filename)
def __del__(self, _warnings: Any = warnings) -> None:
if self._closed:
return
if self._connection is not None:
self._connection.release()
self._cleanup_writer()
if self._loop.get_debug():
_warnings.warn(
f"Unclosed response {self!r}", ResourceWarning, source=self
)
context = {"client_response": self, "message": "Unclosed response"}
if self._source_traceback:
context["source_traceback"] = self._source_traceback
self._loop.call_exception_handler(context)
def __repr__(self) -> str:
out = io.StringIO()
ascii_encodable_url = str(self.url)
if self.reason:
ascii_encodable_reason = self.reason.encode(
"ascii", "backslashreplace"
).decode("ascii")
else:
ascii_encodable_reason = self.reason
print(
"<ClientResponse({}) [{} {}]>".format(
ascii_encodable_url, self.status, ascii_encodable_reason
),
file=out,
)
print(self.headers, file=out)
return out.getvalue()
@property
def connection(self) -> Optional["Connection"]:
return self._connection
@reify
def history(self) -> Tuple["ClientResponse", ...]:
"""A sequence of responses, if redirects occurred."""
return self._history
@reify
def links(self) -> "MultiDictProxy[MultiDictProxy[Union[str, URL]]]":
links_str = ", ".join(self.headers.getall("link", []))
if not links_str:
return MultiDictProxy(MultiDict())
links = MultiDict() # type: MultiDict[MultiDictProxy[Union[str, URL]]]
for val in re.split(r",(?=\s*<)", links_str):
match = re.match(r"\s*<(.*)>(.*)", val)
if match is None: # pragma: no cover
# the check exists to suppress mypy error
continue
url, params_str = match.groups()
params = params_str.split(";")[1:]
link = MultiDict() # type: MultiDict[Union[str, URL]]
for param in params:
match = re.match(r"^\s*(\S*)\s*=\s*(['\"]?)(.*?)(\2)\s*$", param, re.M)
if match is None: # pragma: no cover
# the check exists to suppress mypy error
continue
key, _, value, _ = match.groups()
link.add(key, value)
key = link.get("rel", url) # type: ignore[assignment]
link.add("url", self.url.join(URL(url)))
links.add(key, MultiDictProxy(link))
return MultiDictProxy(links)
async def start(self, connection: "Connection") -> "ClientResponse":
"""Start response processing."""
self._closed = False
self._protocol = connection.protocol
self._connection = connection
with self._timer:
while True:
# read response
try:
protocol = self._protocol
message, payload = await protocol.read() # type: ignore[union-attr]
except http.HttpProcessingError as exc:
raise ClientResponseError(
self.request_info,
self.history,
status=exc.code,
message=exc.message,
headers=exc.headers,
) from exc
if message.code < 100 or message.code > 199 or message.code == 101:
break
if self._continue is not None:
set_result(self._continue, True)
self._continue = None
# payload eof handler
payload.on_eof(self._response_eof)
# response status
self.version = message.version
self.status = message.code
self.reason = message.reason
# headers
self._headers = message.headers # type is CIMultiDictProxy
self._raw_headers = message.raw_headers # type is Tuple[bytes, bytes]
# payload
self.content = payload
# cookies
for hdr in self.headers.getall(hdrs.SET_COOKIE, ()):
try:
self.cookies.load(hdr)
except CookieError as exc:
client_logger.warning("Can not load response cookies: %s", exc)
return self
def _response_eof(self) -> None:
if self._closed:
return
if self._connection is not None:
# websocket, protocol could be None because
# connection could be detached
if (
self._connection.protocol is not None
and self._connection.protocol.upgraded
):
return
self._connection.release()
self._connection = None
self._closed = True
self._cleanup_writer()
@property
def closed(self) -> bool:
return self._closed
def close(self) -> None:
if not self._released:
self._notify_content()
if self._closed:
return
self._closed = True
if self._loop is None or self._loop.is_closed():
return
if self._connection is not None:
self._connection.close()
self._connection = None
self._cleanup_writer()
def release(self) -> Any:
if not self._released:
self._notify_content()
if self._closed:
return noop()
self._closed = True
if self._connection is not None:
self._connection.release()
self._connection = None
self._cleanup_writer()
return noop()
@property
def ok(self) -> bool:
"""Returns ``True`` if ``status`` is less than ``400``, ``False`` if not.
This is **not** a check for ``200 OK`` but a check that the response
status is under 400.
"""
return 400 > self.status
def raise_for_status(self) -> None:
if not self.ok:
# reason should always be not None for a started response
assert self.reason is not None
self.release()
raise ClientResponseError(
self.request_info,
self.history,
status=self.status,
message=self.reason,
headers=self.headers,
)
def _cleanup_writer(self) -> None:
if self._writer is not None:
self._writer.cancel()
self._writer = None
self._session = None
def _notify_content(self) -> None:
content = self.content
if content and content.exception() is None:
content.set_exception(ClientConnectionError("Connection closed"))
self._released = True
async def wait_for_close(self) -> None:
if self._writer is not None:
try:
await self._writer
finally:
self._writer = None
self.release()
async def read(self) -> bytes:
"""Read response payload."""
if self._body is None:
try:
self._body = await self.content.read()
for trace in self._traces:
await trace.send_response_chunk_received(
self.method, self.url, self._body
)
except BaseException:
self.close()
raise
elif self._released:
raise ClientConnectionError("Connection closed")
return self._body
def get_encoding(self) -> str:
ctype = self.headers.get(hdrs.CONTENT_TYPE, "").lower()
mimetype = helpers.parse_mimetype(ctype)
encoding = mimetype.parameters.get("charset")
if encoding:
try:
codecs.lookup(encoding)
except LookupError:
encoding = None
if not encoding:
if mimetype.type == "application" and (
mimetype.subtype == "json" or mimetype.subtype == "rdap"
):
# RFC 7159 states that the default encoding is UTF-8.
# RFC 7483 defines application/rdap+json
encoding = "utf-8"
elif self._body is None:
raise RuntimeError(
"Cannot guess the encoding of " "a not yet read body"
)
else:
encoding = chardet.detect(self._body)["encoding"]
if not encoding:
encoding = "utf-8"
return encoding
async def text(self, encoding: Optional[str] = None, errors: str = "strict") -> str:
"""Read response payload and decode."""
if self._body is None:
await self.read()
if encoding is None:
encoding = self.get_encoding()
return self._body.decode(encoding, errors=errors) # type: ignore[union-attr]
async def json(
self,
*,
encoding: Optional[str] = None,
loads: JSONDecoder = DEFAULT_JSON_DECODER,
content_type: Optional[str] = "application/json",
) -> Any:
"""Read and decodes JSON response."""
if self._body is None:
await self.read()
if content_type:
if not is_expected_content_type(self.content_type, content_type):
raise ContentTypeError(
self.request_info,
self.history,
message=(
"Attempt to decode JSON with "
"unexpected mimetype: %s" % self.content_type
),
headers=self.headers,
)
if encoding is None:
encoding = self.get_encoding()
return loads(self._body.decode(encoding)) # type: ignore[union-attr]
async def __aenter__(self) -> "ClientResponse":
return self
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
# similar to _RequestContextManager, we do not need to check
# for exceptions, response object can close connection
# if state is broken
self.release()
| |
import logging
import subprocess
import sys
import time
import boto3
from .. import appversion
logger = logging.getLogger(__name__)
def get_environ_name_for_cname(app_name, cname):
""" Determine environment name having :param cname: on :param app_name:.
If cname duplicated, longer one will be returned.
For example, there are myenv.ap-northeast-1.elasticbeanstalk.com and myenv.elasticbeanstal.com,
myenv.ap-northeast-1.elasticbeanstal.com will be returned.
"""
eb = boto3.client('elasticbeanstalk')
res = eb.describe_environments(ApplicationName=app_name)
if res['ResponseMetadata']['HTTPStatusCode'] != 200:
raise ValueError('ElasticBeanstalk client did not return 200 for describing environments')
for e in reversed(sorted(res['Environments'], key=lambda x: len(x['CNAME']))):
if e['CNAME'].startswith(cname + '.'):
return e['EnvironmentName']
raise ValueError('Could not find environment for applied app_name and cname')
def get_instance_health(group_name, number):
autoscale = boto3.client('autoscaling')
as_json = autoscale.describe_auto_scaling_groups(
AutoScalingGroupNames=[group_name])
instances = as_json['AutoScalingGroups'][0]['Instances']
instance_number = len(instances)
if instance_number != number:
return False
for instance in instances:
# Wait for the all of instance status to be healthy.
ec2 = boto3.client('ec2')
instance = ec2.describe_instance_status(InstanceIds=[instance['InstanceId']])
if not instance['InstanceStatuses']:
return False
elif instance['InstanceStatuses'][0]['InstanceStatus']['Status'] != 'ok':
return False
return True
def update_secondary_group_capacity(primary_group_name, secondary_group_name, secondary_env_name, app_name):
autoscale = boto3.client('autoscaling')
as_json = autoscale.describe_auto_scaling_groups(
AutoScalingGroupNames=[primary_group_name])
number = as_json['AutoScalingGroups'][0]['DesiredCapacity']
min_size = as_json['AutoScalingGroups'][0]['MinSize']
max_size = as_json['AutoScalingGroups'][0]['MaxSize']
autoscale.update_auto_scaling_group(
AutoScalingGroupName=secondary_group_name,
MaxSize=max_size,
MinSize=min_size,
DesiredCapacity=number
)
logger.info(
'The number of instances to run was set to %d, the minimum size to %d, the maximum size to %d',
number,
min_size,
max_size
)
# Wait for the instance to come up.
logger.info('Wait for the instance to come up')
start = time.time()
while not get_instance_health(secondary_group_name, number):
passed_time = time.time() - start
# Make timeout 20 minutes
if passed_time >= 20 * 60:
logger.warning("The capacity set operation timed out.")
sys.exit(1)
time.sleep(30)
logger.info("The all of instances are healthy.")
# update EB environment description
eb = boto3.client('elasticbeanstalk')
eb.update_environment(
ApplicationName=app_name,
EnvironmentName=secondary_env_name,
OptionSettings=[
{'Namespace': 'aws:autoscaling:asg', 'OptionName': 'MinSize', 'Value': str(min_size)},
{'Namespace': 'aws:autoscaling:asg', 'OptionName': 'MaxSize', 'Value': str(max_size)}
]
)
def main(parsed):
master_env_name = get_environ_name_for_cname(parsed.app_name, parsed.cname)
if parsed.blue_env == master_env_name:
primary_env_name = parsed.blue_env
secondary_env_name = parsed.green_env
elif parsed.green_env == master_env_name:
primary_env_name = parsed.green_env
secondary_env_name = parsed.blue_env
else:
raise ValueError('master env for cname {p.cname} was not in {p.app_name}'.format(p=parsed))
###
# Deploying
###
if parsed.version:
version = parsed.version
elif parsed.prefix:
version = "{}_{}".format(parsed.prefix, int(time.time()))
else:
version = str(int(time.time()))
if parsed.description:
description = parsed.description
else:
description = ''
appversion.make_application_version(parsed.app_name, version, parsed.dockerrun, parsed.docker_compose, parsed.ebext, parsed.use_ebignore, description)
logger.info('Ok, now deploying the version %s for %s', version, secondary_env_name)
payload = ['eb', 'deploy', secondary_env_name,
'--version=' + version]
if parsed.profile:
payload.append('--profile=' + parsed.profile)
if parsed.region:
payload.append('--region=' + parsed.region)
if parsed.timeout:
payload.append('--timeout=' + parsed.timeout)
r = subprocess.call(payload)
if r != 0:
logger.error("Failed to deploy version %s to environment %s",
version, secondary_env_name)
sys.exit(r)
###
# Set desired capacity
###
if parsed.capacity:
autoscale = boto3.client('autoscaling')
as_json = autoscale.describe_tags(
Filters=[
{
'Name': 'Value',
'Values': [secondary_env_name, primary_env_name]
},
]
)
for x in as_json['Tags']:
if x['Key'] == 'Name' and x['Value'] == secondary_env_name:
secondary_group_name = x['ResourceId']
elif x['Key'] == 'Name' and x['Value'] == primary_env_name:
primary_group_name = x['ResourceId']
update_secondary_group_capacity(
primary_group_name,
secondary_group_name,
secondary_env_name,
parsed.app_name
)
###
# Swapping
###
if parsed.noswap:
logger.info('DONE successfully without Swapping. just deployed secondary environment %s',
secondary_env_name)
return
eb = boto3.client('elasticbeanstalk')
logger.info('Swapping primary %s => new primary %s',
primary_env_name, secondary_env_name)
eb.swap_environment_cnames(SourceEnvironmentName=primary_env_name,
DestinationEnvironmentName=secondary_env_name)
logger.info('DONE successfully. Primary %s => new primary %s.'
'If problem, re-swap new primary to primary',
primary_env_name, secondary_env_name)
def apply_args(parser):
parser.add_argument('app_name', help='Application name to deploy')
parser.add_argument('green_env', help='green env name')
parser.add_argument('blue_env', help='blue env name')
parser.add_argument('cname', help='cname prefix for primary environment')
parser.add_argument('--noswap', help='Without swapping, it will just deploy for secondary'
'environment',
action='store_true', default=False)
parser.add_argument('--version', help='Version label you want to specify')
parser.add_argument('--prefix', help='Version label prefix you want to specify')
parser.add_argument('--description', help='Description for this version')
parser.add_argument('--profile', help='AWS account')
parser.add_argument('--region', help='AWS region')
parser.add_argument('--timeout', help='The number of minutes before the deploy timeout')
parser.add_argument('--dockerrun', help='Path to file used as Dockerrun.aws.json')
parser.add_argument('--docker-compose', help='Path to file used as docker-compose.yml')
parser.add_argument('--ebext', help='Path to directory used as .ebextensions/')
parser.add_argument('--use-ebignore', help='Zip project based on .ebignore',
action='store_true', default=True)
parser.add_argument('--capacity', help='Set the number of instances.',
action='store_true', default=False)
parser.set_defaults(func=main)
| |
# -*- coding: utf-8 -*-
# Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright 2008, Benjamin Kampmann <ben.kampmann@googlemail.com>
"""
This is a Media Backend that allows you to access the Trailers from Apple.com
"""
from coherence.backend import BackendItem, BackendStore
from coherence.upnp.core import DIDLLite
from coherence.upnp.core.utils import ReverseProxyUriResource
from twisted.web import client
from twisted.internet import task, reactor
from coherence.extern.et import parse_xml
XML_URL = "http://www.apple.com/trailers/home/xml/current.xml"
ROOT_ID = 0
class AppleTrailerProxy(ReverseProxyUriResource):
def __init__(self, uri):
ReverseProxyUriResource.__init__(self, uri)
def render(self, request):
request.requestHeaders.setRawHeaders('user-agent', ['QuickTime/7.6.2 (qtver=7.6.2;os=Windows NT 5.1Service Pack 3)'])
return ReverseProxyUriResource.render(self, request)
class Trailer(BackendItem):
def __init__(self, parent_id, urlbase, id=None, name=None, cover=None,
url=None):
BackendItem.__init__(self)
self.parentid = parent_id
self.id = id
self.name = name
self.cover = cover
if(len(urlbase) and urlbase[-1] != '/'):
urlbase += '/'
self.url = urlbase + str(self.id)
self.location = AppleTrailerProxy(url)
self.item = DIDLLite.VideoItem(id, parent_id, self.name)
self.item.albumArtURI = self.cover
def get_path(self):
return self.url
class Container(BackendItem):
logCategory = 'apple_trailers'
def __init__(self, id, parent_id, name, store=None, \
children_callback=None):
BackendItem.__init__(self)
self.id = id
self.parent_id = parent_id
self.name = name
self.mimetype = 'directory'
self.update_id = 0
self.children = []
self.item = DIDLLite.Container(id, parent_id, self.name)
self.item.childCount = None # self.get_child_count()
def get_children(self, start=0, end=0):
if(end - start > 25 or
start - end == start or
end - start == 0):
end = start + 25
if end != 0:
return self.children[start:end]
return self.children[start:]
def get_child_count(self):
return len(self.children)
def get_item(self):
return self.item
def get_name(self):
return self.name
def get_id(self):
return self.id
class AppleTrailersStore(BackendStore):
logCategory = 'apple_trailers'
implements = ['MediaServer']
def __init__(self, server, *args, **kwargs):
BackendStore.__init__(self, server, **kwargs)
self.next_id = 1000
self.name = kwargs.get('name', 'Apple Trailers')
self.refresh = int(kwargs.get('refresh', 8)) * (60 * 60)
self.trailers = {}
self.wmc_mapping = {'15': 0}
dfr = self.update_data()
# first get the first bunch of data before sending init_completed
dfr.addCallback(lambda x: self.init_completed())
def queue_update(self, result):
reactor.callLater(self.refresh, self.update_data)
return result
def update_data(self):
dfr = client.getPage(XML_URL)
dfr.addCallback(parse_xml)
dfr.addCallback(self.parse_data)
dfr.addCallback(self.queue_update)
return dfr
def parse_data(self, xml_data):
def iterate(root):
for item in root.findall('./movieinfo'):
trailer = self._parse_into_trailer(item)
yield trailer
root = xml_data.getroot()
return task.coiterate(iterate(root))
def _parse_into_trailer(self, item):
"""
info = item.find('info')
for attr in ('title', 'runtime', 'rating', 'studio', 'postdate',
'releasedate', 'copyright', 'director', 'description'):
setattr(trailer, attr, info.find(attr).text)
"""
data = {}
data['id'] = item.get('id')
data['name'] = item.find('./info/title').text
data['cover'] = item.find('./poster/location').text
data['url'] = item.find('./preview/large').text
trailer = Trailer(ROOT_ID, self.urlbase, **data)
duration = None
try:
hours = 0
minutes = 0
seconds = 0
duration = item.find('./info/runtime').text
try:
hours, minutes, seconds = duration.split(':')
except ValueError:
try:
minutes, seconds = duration.split(':')
except ValueError:
seconds = duration
duration = "%d:%02d:%02d" % (int(hours), int(minutes), int(seconds))
except:
pass
try:
trailer.item.director = item.find('./info/director').text
except:
pass
try:
trailer.item.description = item.find('./info/description').text
except:
pass
res = DIDLLite.Resource(trailer.get_path(), 'http-get:*:video/quicktime:*')
res.duration = duration
try:
res.size = item.find('./preview/large').get('filesize', None)
except:
pass
trailer.item.res.append(res)
if self.server.coherence.config.get('transcoding', 'no') == 'yes':
dlna_pn = 'DLNA.ORG_PN=AVC_TS_BL_CIF15_AAC'
dlna_tags = DIDLLite.simple_dlna_tags[:]
dlna_tags[2] = 'DLNA.ORG_CI=1'
url = self.urlbase + str(trailer.id) + '?transcoded=mp4'
new_res = DIDLLite.Resource(url,
'http-get:*:%s:%s' % ('video/mp4', ';'.join([dlna_pn] + dlna_tags)))
new_res.size = None
res.duration = duration
trailer.item.res.append(new_res)
dlna_pn = 'DLNA.ORG_PN=JPEG_TN'
dlna_tags = DIDLLite.simple_dlna_tags[:]
dlna_tags[2] = 'DLNA.ORG_CI=1'
dlna_tags[3] = 'DLNA.ORG_FLAGS=00f00000000000000000000000000000'
url = self.urlbase + str(trailer.id) + '?attachment=poster&transcoded=thumb&type=jpeg'
new_res = DIDLLite.Resource(url,
'http-get:*:%s:%s' % ('image/jpeg', ';'.join([dlna_pn] + dlna_tags)))
new_res.size = None
#new_res.resolution = "160x160"
trailer.item.res.append(new_res)
if not hasattr(trailer.item, 'attachments'):
trailer.item.attachments = {}
trailer.item.attachments['poster'] = data['cover']
self.trailers[trailer.id] = trailer
def get_by_id(self, id):
try:
if int(id) == 0:
return self.container
else:
return self.trailers.get(id, None)
except:
return None
def upnp_init(self):
if self.server:
self.server.connection_manager_server.set_variable( \
0, 'SourceProtocolInfo', ['http-get:*:video/quicktime:*', 'http-get:*:video/mp4:*'])
self.container = Container(ROOT_ID, -1, self.name)
trailers = self.trailers.values()
trailers.sort(cmp=lambda x, y: cmp(x.get_name().lower(), y.get_name().lower()))
self.container.children = trailers
def __repr__(self):
return self.__class__.__name__
| |
#!/usr/bin/env python
# coding: utf-8
from __future__ import unicode_literals, print_function
from oar.lib import (config, get_logger)
from oar.lib.tools import (Popen, PIPE)
import os
import re
import time
import signal
from pwd import getpwnam
# Set undefined config value to default one
DEFAULT_CONFIG = {
'META_SCHED_CMD': 'kao',
'SERVER_HOSTNAME': 'localhost',
'SERVER_PORT': '6666',
'SCHEDULER_MIN_TIME_BETWEEN_2_CALLS': '1',
'FINAUD_FREQUENCY': '300',
'MAX_CONCURRENT_JOBS_STARTING_OR_TERMINATING': '25',
'DETACH_JOB_FROM_SERVER': '0',
'LOG_FILE': '/var/log/oar.log',
'ENERGY_SAVING_INTERNAL': 'no'
}
config.setdefault_config(DEFAULT_CONFIG)
# retrieve umask and set new one
old_umask = os.umask(oct('022'))
# TODO
# my $oldfh = select(STDERR); $| = 1; select($oldfh);
# $oldfh = select(STDOUT); $| = 1; select($oldfh);
# Everything is run by oar user (The real uid of this process.)
os.environ['OARDO_UID'] = str(os.geteuid())
# TODO
# my $Redirect_STD_process = OAR::Modules::Judas::redirect_everything();
logger = get_logger("oar.modules.almighty", forward_stderr=True)
logger.info('Start Almighty')
# TODO
# send_log_by_email("Start OAR server","[Almighty] Start Almighty");
if 'OARDIR' in os.environ:
binpath = os.environ['OARDIR'] + '/'
else:
binpath = '/usr/local/lib/oar/'
logger.warning("OARDIR env variable must be defined, " + binpath + " is used by default")
# Signal handle
finishTag = False
def signal_handler():
global finishTag
finishTag = True
# To avoid zombie processes
#
# Surely useless, ignoring SIGCHLD is the common default setting
signal.signal(signal.SIGCHLD, signal.SIG_IGN)
signal.signal(signal.SIGUSR1, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
meta_sched_command = config['META_SCHED_CMD']
m = re.match(r'^\/', meta_sched_command)
if not m:
meta_sched_command = binpath + meta_sched_command
check_for_villains_command = binpath + 'sarko'
check_for_node_changes = binpath + 'finaud'
leon_command = binpath + 'leon'
nodeChangeState_command = binpath + 'NodeChangeState'
bipbip_command = binpath + 'bipbip'
appendice_command = binpath + 'appendice'
# This timeout is used to slowdown the main automaton when the
# command queue is empty, it correspond to a blocking read of
# new commands. A High value is likely to reduce the CPU usage of
# the Almighty.
# Setting it to 0 or a low value is not likely to improve performance
# dramatically (because it blocks only when nothing else is to be done).
# Nevertheless it is closely related to the precision at which the
# internal counters are checked
read_commands_timeout = 10
# This parameter sets the number of pending commands read from
# appendice before proceeding with internal work
# should not be set at a too high value as this would make the
# Almighty weak against flooding
max_successive_read = 100
# Max waiting time before new scheduling attempt (in the case of
# no notification)
schedulertimeout = 60
# Min waiting time before 2 scheduling attempts
scheduler_min_time_between_2_calls = config['SCHEDULER_MIN_TIME_BETWEEN_2_CALLS']
scheduler_wanted = 0 # 1 if the scheduler must be run next time update
# Max waiting time before check for jobs whose time allowed has elapsed
villainstimeout = 10
# Max waiting time before check node states
checknodestimeout = config['FINAUD_FREQUENCY']
# Max number of concurrent bipbip processes
Max_bipbip_processes = config['MAX_CONCURRENT_JOBS_STARTING_OR_TERMINATING']
Detach_oarexec = config['DETACH_JOB_FROM_SERVER']
# Maximum duration a a bipbip process (after that time the process is killed)
Max_bipbip_process_duration = 30*60
Log_file = config['LOG_FILE']
# Regexp of the notification received from oarexec processes
# $1: job id
# $2: oarexec exit code
# $3: job script exit code
# $4: secret string that identifies the oarexec process (for security)
OAREXEC_REGEXP = r'OAREXEC_(\d+)_(\d+)_(\d+|N)_(\d+)'
# Regexp of the notification received when a job must be launched
# $1: job id
OARRUNJOB_REGEXP = r'OARRUNJOB_(\d+)'
# Regexp of the notification received when a job must be exterminate
# $1: job id
LEONEXTERMINATE_REGEXP = r'LEONEXTERMINATE_(\d+)'
energy_pid = 0
def launch_command(command):
'''launch the command line passed in parameter'''
global finishTag
logger.debug('Launching command : [' + command + ']')
# $ENV{PATH}="/bin:/usr/bin:/usr/local/bin";
# ###### THE LINE BELOW SHOULD NOT BE COMMENTED IN NORMAL USE ##### ??? TODO (BELOW -> ABOVE ???)
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
# system $command; ??? TODO to remove ?
pid = os.fork()
if pid == 0:
# CHILD
signal.signal(signal.SIGUSR1, signal.SIG_IGN)
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGTERM, signal.SIG_IGN)
os.execv(command, ['Almighty: ' + command])
status = 0
while True:
kid, status = os.wait()
if kid == pid:
break
signal.signal(signal.SIGCHLD, signal.SIG_IGN)
exit_value = status >> 8
signal_num = status & 127
dumped_core = status & 128
logger.debug(command + ' terminated')
logger.debug('Exit value : ' + exit_value)
logger.debug('Signal num : ' + signal_num)
logger.debug('Core dumped : ' + dumped_core)
if signal_num or dumped_core:
logger.error('Something wrong occured (signal or core dumped) when trying to call [' +
command + '] command')
finishTag = 1
return exit_value
def start_hulot(): # TODO
'''hulot module forking'''
# try:
# energy_pid = os.fork()
#
# if(!defined($energy_pid)){
# oar_error("[Almighty] Cannot fork Hulot, the energy saving module\n");
# exit(6);
# }
# if (!$energy_pid){
# $SIG{CHLD} = 'DEFAULT';
# $SIG{USR1} = 'IGNORE';
# $SIG{INT} = 'IGNORE';
# $SIG{TERM} = 'IGNORE';
# $0="Almighty: hulot";
# OAR::Modules::Hulot::start_energy_loop();
# oar_error("[Almighty] Energy saving loop (hulot) exited. This should not happen.\n");
# exit(7);
# }
# }
def check_hulot():
'''check the hulot process'''
return os.kill(energy_pid, 0)
def ipc_clean(): # TODO do we need it ?
'''Clean ipcs'''
oar_uid = getpwnam('oar').pw_uid
with open('/proc/sysvipc/msg') as f_ipcs:
for line in f_ipcs:
ipcs = line.slip()
if int(ipcs[7]) == oar_uid:
logger.debug('cleaning ipc ' + ipcs[7])
os.system('/usr/bin/ipcrm -q ' + ipcs[7])
# functions associated with each state of the automaton
def meta_scheduler():
return launch_command(meta_sched_command)
def check_for_villains():
return launch_command(check_for_villains_command)
def check_nodes():
return launch_command(check_for_node_changes)
def leon():
return launch_command(leon_command)
def nodeChangeState():
return launch_command(nodeChangeState_command)
class Almighty(object):
def __init__(self):
self.state = 'Init'
logger.debug("Current state [" + self.state + "]")
# launch appendice
try:
self.appendice = Popen(appendice_command, bufsize=0, stdout=PIPE)
except OSError as e:
logger.error('Failed to launch appendice: ' + str(e))
# Starting of Hulot, the Energy saving module
if config['ENERGY_SAVING_INTERNAL'] == 'yes':
start_hulot()
self.lastscheduler = 0
self.lastvillains = 0
self.lastchecknodes = 0
self.internal_command_file = []
logger.debug('Init done')
self.state = 'Qget'
def time_update(self):
current = time.time() # ---> TODO my $current = time; -> ???
logger.debug('Timeouts check : ' + str(current))
# check timeout for scheduler
if (current >= (self.lastscheduler + schedulertimeout))\
or (scheduler_wanted >= 1)\
and (current >= (self.lastscheduler + scheduler_min_time_between_2_calls)):
logger.debug('Scheduling timeout')
# lastscheduler = current + schedulertimeout
self.add_command('Scheduling')
if current >= (self.lastvillains + villainstimeout):
logger.debug('Villains check timeout')
# lastvillains = current + villainstimeout
self.add_command('Villains')
if (current >= (self.lastchecknodes + checknodestimeout)) and (checknodestimeout > 0):
logger.debug('Node check timeout')
# lastchecknodes = -current + checknodestimeout
self.add_command('Finaud')
def qget(self, timeout):
'''function used by the main automaton to get notifications pending
inside the appendice'''
answer = ''
while True:
try:
(res, _) = self.appendice.communicate(None, timeout)
except Timeouterror:
logger.error("Timeout error from appendice's pipe read")
return "Time"
my $rinTmp = '';
my $carac;
vec($rin,fileno(READ),1) = 1;
my $res = select($rinTmp = $rin, undef, undef, $timeout);
if ($res > 0){
$carac="OAR";
while ($carac ne "\n"){
if ((!defined(sysread(READ, $carac, 1))) || ($carac eq "")){
oar_error("[Almighty] Error while reading in pipe : I guess Appendice has died\n");
exit(8);
}
if ($carac ne "\n"){
$answer = $answer.$carac;
}
}
}elsif ($res < 0){
if ($finishTag == 1){
oar_debug("[Almighty] Premature end of select cmd. res = $res. It is normal, Almighty is stopping\n");
$answer = "Time";
}else{
oar_error("[Almighty] Error while reading in pipe : I guess Appendice has died, the result code of select = $res\n");
exit(15);
}
}else{
$answer = "Time";
}
return $answer;
}
def add_command(self, command):
'''as commands are just notifications that will
handle all the modifications in the base up to now, we should
avoid duplication in the command file'''
m = re.compile('^' + command + '$')
flag = True
for cmd in self.internal_command_file:
if re.match(m, cmd):
flag = False
break
if flag:
self.internal_command_file += command
def read_commands(self, timeout): # TODO
''' read commands until reaching the maximal successive read value or
having read all of the pending commands'''
command = ''
remaining = max_successive_read
while (command != 'Time') and remaining:
if remaining != max_successive_read:
timeout = 0
command = qget(timeout)
self.add_command(command)
remaining -= 1
logger.debug('Got command ' + command + ', ' + str(remaining) + ' remaining')
def run(self):
global finishTag
while True:
logger.debug("Current state [" + self.state + "]")
# We stop Almighty and its child
if finishTag:
if energy_pid:
logger.debug("kill child process " + str(energy_pid))
os.kill(energy_pid, signal.SIGKILL)
logger.debug("kill child process " + str(self.appendice_pid))
os.kill(self.appendice_pid, signal.SIGKILL)
# TODO: $Redirect_STD_process = OAR::Modules::Judas::redirect_everything();
Redirect_STD_process = False
if Redirect_STD_process:
os.kill(Redirect_STD_process, signal.SIGKILL)
ipc_clean()
logger.warning("Stop Almighty\n")
# TODO: send_log_by_email("Stop OAR server", "[Almighty] Stop Almighty")
exit(10)
# We check Hulot
if energy_pid and not check_hulot():
logger.warning("Energy saving module (hulot) died. Restarting it.")
time.sleep(5)
ipc_clean()
start_hulot()
# QGET
elif self.state == 'Qget':
if self.internal_command_file != []:
self.read_commands(0)
else:
self.read_commands(read_commands_timeout)
logger.debug('Command queue : ' + str(self.internal_command_file))
current_command = self.internal_command_file.pop(0)
command, arg1, arg2, arg3 = re.split(' ', current_command)
logger.debug('Qtype = [' + command + ']')
if (command == 'Qsub') or (command == 'Term') or (command == 'BipBip')\
or (command == 'Scheduling') or (command == 'Qresume'):
self.state = 'Scheduler'
elif command == 'Qdel':
self.state == 'Leon'
elif command == 'Villains':
self.state = 'Check for villains'
elif command == 'Finaud':
self.state = 'Check node states'
elif command == 'Time':
self.state = 'Time update'
elif command == 'ChState':
self.state = 'Change node state'
else:
logger.debug('Unknown command found in queue : ' + command)
# SCHEDULER
elif self.state == 'Scheduler':
current_time = time.time()
if current_time >= (self.lastscheduler + scheduler_min_time_between_2_calls):
self.scheduler_wanted = 0
# First, check pending events
check_result = nodeChangeState()
if check_result == 2:
self.state = 'Leon'
self.add_command('Term')
elif check_result == 1:
self.state = 'Scheduler'
elif check_result == 0:
# Launch the scheduler
# We check Hulot just before starting the scheduler
# because if the pipe is not read, it may freeze oar
if (energy_pid > 0) and not check_hulot():
logger.warning('Energy saving module (hulot) died. Restarting it.')
time.sleep(5)
ipc_clean()
start_hulot()
scheduler_result = meta_scheduler()
lastscheduler = time.time()
if scheduler_result == 0:
self.state = 'Time update'
elif scheduler_result == 1:
self.state = 'Scheduler'
elif scheduler_result == 2:
self.state = 'Leon'
else:
logger.error('Scheduler returned an unknown value : scheduler_result')
finishTag = 1
else:
logger.error('nodeChangeState_command returned an unknown value.')
finishTag = 1
else:
self.scheduler_wanted = 1
self.state = 'Time update'
logger.debug('Scheduler call too early, waiting... (' + current_time +
'>= (' + lastscheduler + ' + ' + scheduler_min_time_between_2_calls + ')')
# TIME UPDATE
elif self.state == 'Time update':
self.time_update()
self.state = 'Qget'
# CHECK FOR VILLAINS
elif self.state == 'Check for villains':
check_result = check_for_villains()
self.lastvillains = time.time()
if check_result == 1:
self.state = 'Leon'
elif check_result == 0:
self.state = 'Time update'
else:
logger.error('check_for_villains_command returned an unknown value : check_result.')
finishTag = 1
# CHECK NODE STATES
elif self.state == 'Check node states':
check_result = check_nodes()
self.lastchecknodes = time.time()
if check_result == 1:
self.state = 'Change node state'
elif check_result == 0:
self.state = 'Time update'
else:
logger.error('check_for_node_changes returned an unknown value.')
finishTag = 1
# LEON
elif self.state == 'Leon':
check_result = leon()
self.state = 'Time update'
if check_result == 1:
self.add_command('Term')
# Change state for dynamic nodes
elif self.state == 'Change node state':
check_result = nodeChangeState()
if check_result == 2:
self.state = 'Leon'
self.add_command('Term')
elif check_result == 1:
self.state = 'Scheduler'
elif check_result == 0:
self.state = 'Time update'
else:
logger.error('nodeChangeState_command returned an unknown value.')
finishTag = 1
else:
logger.warning('Critical bug !!!!\n')
logger.error('Almighty just falled into an unknown state !!!.')
finishTag = 1
if __name__ == '__main__': # pragma: no cover
almighty = Almighty()
almighty.run()
| |
"""Generation of Treadmill manifests from cell events.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import io
import json
import logging
import os
import sys
from treadmill import appcfg
from treadmill import context
from treadmill import subproc
from treadmill import supervisor
from treadmill import utils
from treadmill import yamlwrapper as yaml
from treadmill.appcfg import features
_LOGGER = logging.getLogger(__name__)
def read(filename, file_format='json'):
"""Standard way of reading a manifest.
"""
with io.open(filename) as f:
if file_format == 'json':
manifest = json.load(fp=f)
else:
manifest = yaml.load(stream=f)
return manifest
def load(event):
"""Loads the app event file, ensuring it is in valid format, and supplement
it into a full Treadmill manifest.
:param event:
Full path to the application node event in the zookeeper cache.
:type event:
``str``
:return:
Application manifest object
:rtype:
``dict``
"""
# TODO: need better input validation / setting defaults process.
name = os.path.basename(event)
manifest = read(event, 'yaml')
utils.validate(manifest, [('image', False, str)])
app_type = appcfg.AppType.get_app_type(manifest.get('image'))
schema = [
('proid', True, str),
('environment', True, str),
('services', app_type == appcfg.AppType.NATIVE, list),
('command', False, str),
('args', False, list),
('endpoints', False, list),
('environ', False, list),
('cpu', True, str),
('memory', True, str),
('disk', True, str),
('keytabs', False, list),
]
utils.validate(manifest, schema)
manifest['system_services'] = []
manifest['name'] = name
manifest['app'] = appcfg.appname_basename(name)
manifest['type'] = app_type.value
manifest['uniqueid'] = appcfg.gen_uniqueid(event)
if manifest['environment'] not in ['dev', 'qa', 'uat', 'prod']:
_LOGGER.warning(
'Unrecognized environment: %s', manifest['environment']
)
raise Exception('Invalid environment: ' + manifest['environment'])
if manifest['cpu'].endswith('%'):
manifest['cpu'] = int(manifest['cpu'][:-1])
# By default network is private.
if 'shared_network' not in manifest:
manifest['shared_network'] = False
else:
manifest['shared_network'] = bool(manifest['shared_network'])
# By default host IP is not shared, not used in the container.
if 'shared_ip' not in manifest:
manifest['shared_ip'] = False
else:
manifest['shared_ip'] = bool(manifest['shared_ip'])
# Check archive
manifest['archive'] = list(manifest.get('archive', []))
if manifest['archive'] is None:
manifest['archive'] = []
# Adds cell specific information to the loaded manifest.
manifest['cell'] = context.GLOBAL.cell
manifest['zookeeper'] = context.GLOBAL.zk.url
def _set_default(attr, value, obj=None):
"""Set default manifest attribute if it is not present."""
if obj is None:
obj = manifest
if attr not in obj:
obj[attr] = value
_set_default('command', None)
_set_default('args', [])
_set_default('environ', [])
_set_default('endpoints', [])
_set_default('passthrough', [])
_set_default('services', [])
_set_default('vring', {})
_set_default('cells', [], manifest['vring'])
_set_default('identity_group', None)
_set_default('identity', None)
_set_default('data_retention_timeout', None)
_set_default('lease', None)
_set_default('keytabs', [])
# Normalize optional and port information
manifest['endpoints'] = [
{
'name': endpoint['name'],
'port': int(endpoint['port']),
'type': endpoint.get('type', None),
'proto': endpoint.get('proto', 'tcp'),
}
for endpoint in manifest.get('endpoints', [])
]
# TODO: need better way to normalize.
if 'ephemeral_ports' not in manifest:
manifest['ephemeral_ports'] = {'tcp': 0, 'udp': 0}
if 'tcp' not in manifest['ephemeral_ports']:
manifest['ephemeral_ports']['tcp'] = 0
else:
manifest['ephemeral_ports']['tcp'] = int(
manifest['ephemeral_ports']['tcp']
)
if 'udp' not in manifest['ephemeral_ports']:
manifest['ephemeral_ports']['udp'] = 0
else:
manifest['ephemeral_ports']['udp'] = int(
manifest['ephemeral_ports']['udp']
)
return manifest
def add_linux_system_services(tm_env, manifest):
"""Configure linux system services."""
container_svcdir = supervisor.open_service(
os.path.join(
tm_env.apps_dir,
appcfg.manifest_unique_name(manifest)
),
existing=False
)
container_data_dir = container_svcdir.data_dir
if 'vring' in manifest:
# Add the Vring daemon services
for cell in manifest['vring']['cells']:
vring = {
'name': 'vring.%s' % cell,
'proid': 'root',
'restart': {
'limit': 5,
'interval': 60,
},
'command': (
'exec {python} -m treadmill sproc'
' --zookeeper {zkurl}'
' --cell {cell}'
' vring'
' --approot {tm_root}'
' {manifest}'
).format(
python=sys.executable,
zkurl=manifest['zookeeper'],
cell=cell,
tm_root=tm_env.root,
manifest=os.path.join(container_data_dir, 'state.json')
),
'environ': [
{
'name': 'KRB5CCNAME',
'value': os.path.expandvars(
'FILE:${TREADMILL_HOST_TICKET}'
),
},
],
'config': None,
'downed': False,
'trace': False,
}
manifest['system_services'].append(vring)
# Create ticket refresh and container/endpoint presence service
register_presence = {
'name': 'register',
'proid': 'root',
'restart': {
'limit': 5,
'interval': 60,
},
'command': (
'exec {python} -m treadmill sproc'
' --zookeeper {zkurl}'
' --cell {cell}'
' presence register'
' {manifest} {container_dir}'
).format(
python=sys.executable,
zkurl=manifest['zookeeper'],
cell=manifest['cell'],
manifest=os.path.join(container_data_dir, 'state.json'),
container_dir=container_data_dir
),
'environ': [
{
'name': 'KRB5CCNAME',
'value': os.path.expandvars(
'FILE:${TREADMILL_HOST_TICKET}'
),
},
{
'name': 'TREADMILL_ALIASES_PATH',
'value': os.getenv('TREADMILL_ALIASES_PATH'),
},
],
'config': None,
'downed': False,
'trace': False,
}
manifest['system_services'].append(register_presence)
# Create container /etc/hosts manager service
run_overlay = os.path.join(container_data_dir, 'overlay', 'run')
etc_overlay = os.path.join(container_data_dir, 'overlay', 'etc')
hostaliases = {
'name': 'hostaliases',
'proid': 'root',
'restart': {
'limit': 5,
'interval': 60,
},
'command': (
'exec {python} -m treadmill sproc'
' --cell {cell}'
' host-aliases'
' --aliases-dir {aliases_dir}'
' {hosts_original} {hosts_container}'
).format(
python=sys.executable,
cell=manifest['cell'],
aliases_dir=os.path.join(
run_overlay, 'host-aliases',
),
hosts_original=os.path.join(
'/', 'etc', 'hosts'
),
hosts_container=os.path.join(
etc_overlay, 'hosts'
),
),
'environ': [],
'downed': False,
'trace': False,
}
manifest['system_services'].append(hostaliases)
# Create the user app top level supervisor
#
# Reset environment variables set by treadmill to default values.
start_container = {
'name': 'start_container',
'proid': 'root',
'restart': {
'limit': 0,
'interval': 60,
},
'command': (
'exec'
' {pid1} -i -m -p'
' --propagation slave'
' {python} -m treadmill sproc'
' --cell {cell}'
' start-container'
' --container-root {container_dir}/root'
).format(
python=sys.executable,
cell=manifest['cell'],
pid1=subproc.resolve('pid1'),
container_dir=container_data_dir,
),
'environ': [],
'config': None,
'downed': True,
'trace': False,
}
manifest['system_services'].append(start_container)
def add_linux_services(manifest):
"""Configure linux standard services."""
# Configures sshd services in the container.
sshd_svc = {
'name': 'sshd',
'proid': 'root',
'restart': {
'limit': 5,
'interval': 60,
},
'command': (
'exec {sshd} -D -f /etc/ssh/sshd_config'
' -p $TREADMILL_ENDPOINT_SSH'
).format(
sshd=subproc.resolve('sshd')
),
'root': True,
'environ': [],
'config': None,
'downed': False,
'trace': False,
}
manifest['services'].append(sshd_svc)
ssh_endpoint = {
'name': 'ssh',
'proto': 'tcp',
'port': 0,
'type': 'infra',
}
manifest['endpoints'].append(ssh_endpoint)
def add_manifest_features(manifest, runtime):
"""Configure optional container features."""
for feature in manifest.get('features', []):
feature_mod = features.get_feature(feature)()
if feature_mod is None:
_LOGGER.error('Unable to load feature: %s', feature)
raise Exception('Unsupported feature: ' + feature)
if not feature_mod.applies(manifest, runtime):
_LOGGER.error('Feature does not apply: %s', feature)
raise Exception('Unsupported feature: ' + feature)
try:
feature_mod.configure(manifest)
except Exception:
_LOGGER.exception('Error configuring feature: %s', feature)
raise Exception('Error configuring feature: ' + feature)
| |
import fnmatch
import logging
import os
from urlparse import urlparse
from django.conf import settings
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.db import models
from django.template.defaultfilters import slugify
from django.utils.translation import ugettext_lazy as _
from guardian.shortcuts import assign
from readthedocs.builds.constants import LATEST
from readthedocs.builds.constants import LATEST_VERBOSE_NAME
from readthedocs.builds.constants import STABLE
from readthedocs.oauth import utils as oauth_utils
from readthedocs.privacy.loader import RelatedProjectManager, ProjectManager
from readthedocs.projects import constants
from readthedocs.projects.exceptions import ProjectImportError
from readthedocs.projects.templatetags.projects_tags import sort_version_aware
from readthedocs.projects.utils import make_api_version, symlink, update_static_metadata
from readthedocs.projects.version_handling import determine_stable_version
from readthedocs.projects.version_handling import version_windows
from taggit.managers import TaggableManager
from readthedocs.api.client import api
from readthedocs.vcs_support.base import VCSProject
from readthedocs.vcs_support.backends import backend_cls
from readthedocs.vcs_support.utils import Lock, NonBlockingLock
log = logging.getLogger(__name__)
class ProjectRelationship(models.Model):
parent = models.ForeignKey('Project', verbose_name=_('Parent'),
related_name='subprojects')
child = models.ForeignKey('Project', verbose_name=_('Child'),
related_name='superprojects')
def __unicode__(self):
return "%s -> %s" % (self.parent, self.child)
# HACK
def get_absolute_url(self):
return ("http://%s.readthedocs.org/projects/%s/%s/latest/"
% (self.parent.slug, self.child.slug, self.child.language))
class Project(models.Model):
# Auto fields
pub_date = models.DateTimeField(_('Publication date'), auto_now_add=True)
modified_date = models.DateTimeField(_('Modified date'), auto_now=True)
# Generally from conf.py
users = models.ManyToManyField(User, verbose_name=_('User'),
related_name='projects')
name = models.CharField(_('Name'), max_length=255)
slug = models.SlugField(_('Slug'), max_length=255, unique=True)
description = models.TextField(_('Description'), blank=True,
help_text=_('The reStructuredText '
'description of the project'))
repo = models.CharField(_('Repository URL'), max_length=255,
help_text=_('Hosted documentation repository URL'))
repo_type = models.CharField(_('Repository type'), max_length=10,
choices=constants.REPO_CHOICES, default='git')
project_url = models.URLField(_('Project homepage'), blank=True,
help_text=_('The project\'s homepage'))
canonical_url = models.URLField(_('Canonical URL'), blank=True,
help_text=_('URL that documentation is expected to serve from'))
version = models.CharField(_('Version'), max_length=100, blank=True,
help_text=_('Project version these docs apply '
'to, i.e. 1.0a'))
copyright = models.CharField(_('Copyright'), max_length=255, blank=True,
help_text=_('Project copyright information'))
theme = models.CharField(
_('Theme'), max_length=20, choices=constants.DEFAULT_THEME_CHOICES,
default=constants.THEME_DEFAULT,
help_text=(u'<a href="http://sphinx.pocoo.org/theming.html#builtin-'
'themes" target="_blank">%s</a>') % _('Examples'))
suffix = models.CharField(_('Suffix'), max_length=10, editable=False,
default='.rst')
single_version = models.BooleanField(
_('Single version'), default=False,
help_text=_('A single version site has no translations and only your "latest" version, served at the root of the domain. Use this with caution, only turn it on if you will <b>never</b> have multiple versions of your docs.'))
default_version = models.CharField(
_('Default version'), max_length=255, default=LATEST,
help_text=_('The version of your project that / redirects to'))
# In default_branch, None max_lengtheans the backend should choose the
# appropraite branch. Eg 'master' for git
default_branch = models.CharField(
_('Default branch'), max_length=255, default=None, null=True,
blank=True, help_text=_('What branch "latest" points to. Leave empty '
'to use the default value for your VCS (eg. '
'<code>trunk</code> or <code>master</code>).'))
requirements_file = models.CharField(
_('Requirements file'), max_length=255, default=None, null=True,
blank=True, help_text=_(
'Requires Virtualenv. A <a '
'href="https://pip.pypa.io/en/latest/user_guide.html#requirements-files">'
'pip requirements file</a> needed to build your documentation. '
'Path from the root of your project.'))
documentation_type = models.CharField(
_('Documentation type'), max_length=20,
choices=constants.DOCUMENTATION_CHOICES, default='sphinx',
help_text=_('Type of documentation you are building. <a href="http://'
'sphinx-doc.org/builders.html#sphinx.builders.html.'
'DirectoryHTMLBuilder">More info</a>.'))
allow_comments = models.BooleanField(_('Allow Comments'), default=False)
comment_moderation = models.BooleanField(_('Comment Moderation)'), default=False)
cdn_enabled = models.BooleanField(_('CDN Enabled'), default=False)
analytics_code = models.CharField(
_('Analytics code'), max_length=50, null=True, blank=True,
help_text=_("Google Analytics Tracking ID "
"(ex. <code>UA-22345342-1</code>). "
"This may slow down your page loads."))
# Sphinx specific build options.
enable_epub_build = models.BooleanField(
_('Enable EPUB build'), default=True,
help_text=_(
'Create a EPUB version of your documentation with each build.'))
enable_pdf_build = models.BooleanField(
_('Enable PDF build'), default=True,
help_text=_(
'Create a PDF version of your documentation with each build.'))
# Other model data.
path = models.CharField(_('Path'), max_length=255, editable=False,
help_text=_("The directory where "
"<code>conf.py</code> lives"))
conf_py_file = models.CharField(
_('Python configuration file'), max_length=255, default='', blank=True,
help_text=_('Path from project root to <code>conf.py</code> file '
'(ex. <code>docs/conf.py</code>).'
'Leave blank if you want us to find it for you.'))
featured = models.BooleanField(_('Featured'), default=False)
skip = models.BooleanField(_('Skip'), default=False)
mirror = models.BooleanField(_('Mirror'), default=False)
use_virtualenv = models.BooleanField(
_('Use virtualenv'),
help_text=_("Install your project inside a virtualenv using <code>setup.py "
"install</code>"),
default=False
)
# This model attribute holds the python interpreter used to create the
# virtual environment
python_interpreter = models.CharField(
_('Python Interpreter'),
max_length=20,
choices=constants.PYTHON_CHOICES,
default='python',
help_text=_("(Beta) The Python interpreter used to create the virtual "
"environment."))
use_system_packages = models.BooleanField(
_('Use system packages'),
help_text=_("Give the virtual environment access to the global "
"site-packages dir."),
default=False
)
django_packages_url = models.CharField(_('Django Packages URL'),
max_length=255, blank=True)
privacy_level = models.CharField(
_('Privacy Level'), max_length=20, choices=constants.PRIVACY_CHOICES,
default=getattr(settings, 'DEFAULT_PRIVACY_LEVEL', 'public'),
help_text=_("(Beta) Level of privacy that you want on the repository. "
"Protected means public but not in listings."))
version_privacy_level = models.CharField(
_('Version Privacy Level'), max_length=20,
choices=constants.PRIVACY_CHOICES, default=getattr(
settings, 'DEFAULT_PRIVACY_LEVEL', 'public'),
help_text=_("(Beta) Default level of privacy you want on built "
"versions of documentation."))
# Subprojects
related_projects = models.ManyToManyField(
'self', verbose_name=_('Related projects'), blank=True,
symmetrical=False, through=ProjectRelationship)
# Language bits
language = models.CharField(_('Language'), max_length=20, default='en',
help_text=_("The language the project "
"documentation is rendered in. "
"Note: this affects your project's URL."),
choices=constants.LANGUAGES)
programming_language = models.CharField(_('Programming Language'), max_length=20, default='words',
help_text=_(
"The primary programming language the project is written in."),
choices=constants.PROGRAMMING_LANGUAGES, blank=True)
# A subproject pointed at it's main language, so it can be tracked
main_language_project = models.ForeignKey('self',
related_name='translations',
blank=True, null=True)
# Version State
num_major = models.IntegerField(
_('Number of Major versions'),
default=2,
null=True,
blank=True,
help_text=_("2 means supporting 3.X.X and 2.X.X, but not 1.X.X")
)
num_minor = models.IntegerField(
_('Number of Minor versions'),
default=2,
null=True,
blank=True,
help_text=_("2 means supporting 2.2.X and 2.1.X, but not 2.0.X")
)
num_point = models.IntegerField(
_('Number of Point versions'),
default=2,
null=True,
blank=True,
help_text=_("2 means supporting 2.2.2 and 2.2.1, but not 2.2.0")
)
tags = TaggableManager(blank=True)
objects = ProjectManager()
all_objects = models.Manager()
class Meta:
ordering = ('slug',)
permissions = (
# Translators: Permission around whether a user can view the
# project
('view_project', _('View Project')),
)
def __unicode__(self):
return self.name
@property
def subdomain(self):
prod_domain = getattr(settings, 'PRODUCTION_DOMAIN')
# if self.canonical_domain:
# return self.canonical_domain
# else:
subdomain_slug = self.slug.replace('_', '-')
return "%s.%s" % (subdomain_slug, prod_domain)
def sync_supported_versions(self):
supported = self.supported_versions()
if supported:
self.versions.filter(
verbose_name__in=supported).update(supported=True)
self.versions.exclude(
verbose_name__in=supported).update(supported=False)
self.versions.filter(verbose_name=LATEST_VERBOSE_NAME).update(supported=True)
def save(self, *args, **kwargs):
first_save = self.pk is None
if not self.slug:
# Subdomains can't have underscores in them.
self.slug = slugify(self.name).replace('_', '-')
if self.slug == '':
raise Exception(_("Model must have slug"))
super(Project, self).save(*args, **kwargs)
for owner in self.users.all():
assign('view_project', owner, self)
try:
if self.default_branch:
latest = self.versions.get(slug=LATEST)
if latest.identifier != self.default_branch:
latest.identifier = self.default_branch
latest.save()
except Exception:
log.error('Failed to update latest identifier', exc_info=True)
# Add exceptions here for safety
try:
self.sync_supported_versions()
except Exception:
log.error('failed to sync supported versions', exc_info=True)
try:
if not first_save:
symlink(project=self.slug)
except Exception:
log.error('failed to symlink project', exc_info=True)
try:
update_static_metadata(project_pk=self.pk)
except Exception:
log.error('failed to update static metadata', exc_info=True)
try:
branch = self.default_branch or self.vcs_repo().fallback_branch
if not self.versions.filter(slug=LATEST).exists():
self.versions.create_latest(identifier=branch)
# if not self.versions.filter(slug=STABLE).exists():
# self.versions.create_stable(type='branch', identifier=branch)
except Exception:
log.error('Error creating default branches', exc_info=True)
def get_absolute_url(self):
return reverse('projects_detail', args=[self.slug])
def get_docs_url(self, version_slug=None, lang_slug=None):
"""
Return a url for the docs. Always use http for now,
to avoid content warnings.
"""
protocol = "http"
version = version_slug or self.get_default_version()
lang = lang_slug or self.language
use_subdomain = getattr(settings, 'USE_SUBDOMAIN', False)
if use_subdomain:
if self.single_version:
return "%s://%s/" % (
protocol,
self.subdomain,
)
else:
return "%s://%s/%s/%s/" % (
protocol,
self.subdomain,
lang,
version,
)
else:
if self.single_version:
return reverse('docs_detail', kwargs={
'project_slug': self.slug,
'filename': ''
})
else:
return reverse('docs_detail', kwargs={
'project_slug': self.slug,
'lang_slug': lang,
'version_slug': version,
'filename': ''
})
def get_translation_url(self, version_slug=None, full=False):
parent = self.main_language_project
lang_slug = self.language
protocol = "http"
version = version_slug or parent.get_default_version()
use_subdomain = getattr(settings, 'USE_SUBDOMAIN', False)
if use_subdomain and full:
return "%s://%s/%s/%s/" % (
protocol,
parent.subdomain,
lang_slug,
version,
)
elif use_subdomain and not full:
return "/%s/%s/" % (
lang_slug,
version,
)
else:
return reverse('docs_detail', kwargs={
'project_slug': parent.slug,
'lang_slug': lang_slug,
'version_slug': version,
'filename': ''
})
def get_builds_url(self):
return reverse('builds_project_list', kwargs={
'project_slug': self.slug,
})
def get_production_media_path(self, type, version_slug, include_file=True):
"""
Get file path for media files in production.
This is used to see if these files exist so we can offer them for download.
"""
if getattr(settings, 'DEFAULT_PRIVACY_LEVEL', 'public') == 'public':
path = os.path.join(
settings.MEDIA_ROOT, type, self.slug, version_slug)
else:
path = os.path.join(
settings.PRODUCTION_MEDIA_ARTIFACTS, type, self.slug, version_slug)
if include_file:
path = os.path.join(
path, '%s.%s' % (self.slug, type.replace('htmlzip', 'zip')))
return path
def get_production_media_url(self, type, version_slug, full_path=True):
"""
Get the URL for downloading a specific media file.
"""
path = reverse('project_download_media', kwargs={
'project_slug': self.slug,
'type': type,
'version_slug': version_slug,
})
if full_path:
path = '//%s%s' % (settings.PRODUCTION_DOMAIN, path)
return path
def get_downloads(self):
downloads = {}
downloads['htmlzip'] = self.get_production_media_url(
'htmlzip', self.get_default_version())
downloads['epub'] = self.get_production_media_url(
'epub', self.get_default_version())
downloads['pdf'] = self.get_production_media_url(
'pdf', self.get_default_version())
return downloads
@property
def canonical_domain(self):
if not self.clean_canonical_url:
return ""
return urlparse(self.clean_canonical_url).netloc
@property
def clean_canonical_url(self):
if not self.canonical_url:
return ""
parsed = urlparse(self.canonical_url)
if parsed.scheme:
scheme, netloc = parsed.scheme, parsed.netloc
elif parsed.netloc:
scheme, netloc = "http", parsed.netloc
else:
scheme, netloc = "http", parsed.path
if getattr(settings, 'DONT_HIT_DB', True):
if parsed.path:
netloc = netloc + parsed.path
else:
if self.superprojects.count() and parsed.path:
netloc = netloc + parsed.path
return "%s://%s/" % (scheme, netloc)
@property
def clean_repo(self):
if self.repo.startswith('http://github.com'):
return self.repo.replace('http://github.com', 'https://github.com')
return self.repo
# Doc PATH:
# MEDIA_ROOT/slug/checkouts/version/<repo>
@property
def doc_path(self):
return os.path.join(settings.DOCROOT, self.slug.replace('_', '-'))
def checkout_path(self, version=LATEST):
return os.path.join(self.doc_path, 'checkouts', version)
def venv_path(self, version=LATEST):
return os.path.join(self.doc_path, 'envs', version)
#
# Paths for symlinks in project doc_path.
#
def cnames_symlink_path(self, domain):
"""
Path in the doc_path that we symlink cnames
This has to be at the top-level because Nginx doesn't know the projects slug.
"""
return os.path.join(settings.CNAME_ROOT, domain)
def translations_symlink_path(self, language=None):
"""
Path in the doc_path that we symlink translations
"""
if not language:
language = self.language
return os.path.join(self.doc_path, 'translations', language)
def subprojects_symlink_path(self, project):
"""
Path in the doc_path that we symlink subprojects
"""
return os.path.join(self.doc_path, 'subprojects', project)
def single_version_symlink_path(self):
"""
Path in the doc_path for the single_version symlink.
"""
return os.path.join(self.doc_path, 'single_version')
#
# End symlink paths
#
def venv_bin(self, version=LATEST, bin=None):
"""Return path to the virtualenv bin path, or a specific binary
If ``bin`` is :py:data:`None`, then return the path to the virtual env
path, otherwise, return the path to the executable ``bin`` in the
virtual env ``bin`` path
"""
parts = [self.venv_path(version), 'bin']
if bin is not None:
parts.append(bin)
return os.path.join(*parts)
def full_doc_path(self, version=LATEST):
"""
The path to the documentation root in the project.
"""
doc_base = self.checkout_path(version)
for possible_path in ['docs', 'doc', 'Doc']:
if os.path.exists(os.path.join(doc_base, '%s' % possible_path)):
return os.path.join(doc_base, '%s' % possible_path)
# No docs directory, docs are at top-level.
return doc_base
def artifact_path(self, type, version=LATEST):
"""
The path to the build html docs in the project.
"""
return os.path.join(self.doc_path, "artifacts", version, type)
def full_build_path(self, version=LATEST):
"""
The path to the build html docs in the project.
"""
return os.path.join(self.conf_dir(version), "_build", "html")
def full_latex_path(self, version=LATEST):
"""
The path to the build LaTeX docs in the project.
"""
return os.path.join(self.conf_dir(version), "_build", "latex")
def full_epub_path(self, version=LATEST):
"""
The path to the build epub docs in the project.
"""
return os.path.join(self.conf_dir(version), "_build", "epub")
# There is currently no support for building man/dash formats, but we keep
# the support there for existing projects. They might have already existing
# legacy builds.
def full_man_path(self, version=LATEST):
"""
The path to the build man docs in the project.
"""
return os.path.join(self.conf_dir(version), "_build", "man")
def full_dash_path(self, version=LATEST):
"""
The path to the build dash docs in the project.
"""
return os.path.join(self.conf_dir(version), "_build", "dash")
def full_json_path(self, version=LATEST):
"""
The path to the build json docs in the project.
"""
if 'sphinx' in self.documentation_type:
return os.path.join(self.conf_dir(version), "_build", "json")
elif 'mkdocs' in self.documentation_type:
return os.path.join(self.checkout_path(version), "_build", "json")
def full_singlehtml_path(self, version=LATEST):
"""
The path to the build singlehtml docs in the project.
"""
return os.path.join(self.conf_dir(version), "_build", "singlehtml")
def rtd_build_path(self, version=LATEST):
"""
The destination path where the built docs are copied.
"""
return os.path.join(self.doc_path, 'rtd-builds', version)
def static_metadata_path(self):
"""
The path to the static metadata JSON settings file
"""
return os.path.join(self.doc_path, 'metadata.json')
def conf_file(self, version=LATEST):
if self.conf_py_file:
conf_path = os.path.join(self.checkout_path(version), self.conf_py_file)
if os.path.exists(conf_path):
log.info('Inserting conf.py file path from model')
return conf_path
else:
log.warning("Conf file specified on model doesn't exist")
files = self.find('conf.py', version)
if not files:
files = self.full_find('conf.py', version)
if len(files) == 1:
return files[0]
for file in files:
if file.find('doc', 70) != -1:
return file
# Having this be translatable causes this odd error:
# ProjectImportError(<django.utils.functional.__proxy__ object at
# 0x1090cded0>,)
raise ProjectImportError(
u"Conf File Missing. Please make sure you have a conf.py in your project.")
def conf_dir(self, version=LATEST):
conf_file = self.conf_file(version)
if conf_file:
return conf_file.replace('/conf.py', '')
@property
def is_type_sphinx(self):
'''Is project type Sphinx'''
return 'sphinx' in self.documentation_type
@property
def is_type_mkdocs(self):
'''Is project type Mkdocs'''
return 'mkdocs' in self.documentation_type
@property
def is_imported(self):
return bool(self.repo)
@property
def has_good_build(self):
return self.builds.filter(success=True).exists()
@property
def has_versions(self):
return self.versions.exists()
@property
def has_aliases(self):
return self.aliases.exists()
def has_pdf(self, version_slug=LATEST):
if not self.enable_pdf_build:
return False
return os.path.exists(self.get_production_media_path(type='pdf', version_slug=version_slug))
def has_epub(self, version_slug=LATEST):
if not self.enable_epub_build:
return False
return os.path.exists(self.get_production_media_path(type='epub', version_slug=version_slug))
def has_htmlzip(self, version_slug=LATEST):
return os.path.exists(self.get_production_media_path(type='htmlzip', version_slug=version_slug))
@property
def sponsored(self):
return False
def vcs_repo(self, version=LATEST):
backend = backend_cls.get(self.repo_type)
if not backend:
repo = None
else:
proj = VCSProject(
self.name, self.default_branch, self.checkout_path(version), self.clean_repo)
repo = backend(proj, version)
return repo
def repo_nonblockinglock(self, version, max_lock_age=5):
return NonBlockingLock(project=self, version=version, max_lock_age=max_lock_age)
def repo_lock(self, version, timeout=5, polling_interval=5):
return Lock(self, version, timeout, polling_interval)
def find(self, file, version):
"""
A balla API to find files inside of a projects dir.
"""
matches = []
for root, dirnames, filenames in os.walk(self.full_doc_path(version)):
for filename in fnmatch.filter(filenames, file):
matches.append(os.path.join(root, filename))
return matches
def full_find(self, file, version):
"""
A balla API to find files inside of a projects dir.
"""
matches = []
for root, dirnames, filenames in os.walk(self.checkout_path(version)):
for filename in fnmatch.filter(filenames, file):
matches.append(os.path.join(root, filename))
return matches
def get_latest_build(self, finished=True):
"""
Get latest build for project
finished
Return only builds that are in a finished state
"""
kwargs = {'type': 'html'}
if finished:
kwargs['state'] = 'finished'
return self.builds.filter(**kwargs).first()
def api_versions(self):
ret = []
for version_data in api.version.get(project=self.pk,
active=True)['objects']:
version = make_api_version(version_data)
ret.append(version)
return sort_version_aware(ret)
def active_versions(self):
from readthedocs.builds.models import Version
versions = Version.objects.public(project=self, only_active=True)
return (versions.filter(built=True, active=True) |
versions.filter(active=True, uploaded=True))
def ordered_active_versions(self):
from readthedocs.builds.models import Version
versions = Version.objects.public(project=self, only_active=True)
return sort_version_aware(versions)
def all_active_versions(self):
"""A temporary workaround for active_versions filtering out things
that were active, but failed to build
"""
return self.versions.filter(active=True)
def supported_versions(self):
"""
Get the list of supported versions.
Returns a list of version strings.
"""
if not self.num_major or not self.num_minor or not self.num_point:
return []
version_identifiers = self.versions.values_list('verbose_name', flat=True)
return version_windows(
version_identifiers,
major=self.num_major,
minor=self.num_minor,
point=self.num_point,
)
def get_stable_version(self):
return self.versions.filter(slug=STABLE).first()
def update_stable_version(self):
"""
Returns the version that was promoited to be the new stable version.
It will return ``None`` if no update was mode or if there is no
version on the project that can be considered stable.
"""
versions = self.versions.all()
new_stable = determine_stable_version(versions)
if new_stable:
current_stable = self.get_stable_version()
if current_stable:
identifier_updated = (
new_stable.identifier != current_stable.identifier)
if identifier_updated and current_stable.machine:
log.info(
"Update stable version: {project}:{version}".format(
project=self.slug,
version=new_stable.identifier))
current_stable.identifier = new_stable.identifier
current_stable.save()
return new_stable
else:
log.info(
"Creating new stable version: {project}:{version}".format(
project=self.slug,
version=new_stable.identifier))
current_stable = self.versions.create_stable(
type=new_stable.type,
identifier=new_stable.identifier)
return new_stable
def version_from_branch_name(self, branch):
versions = self.versions_from_branch_name(branch)
try:
return versions[0]
except IndexError:
return None
def versions_from_branch_name(self, branch):
return (
self.versions.filter(identifier=branch) |
self.versions.filter(identifier='remotes/origin/%s' % branch) |
self.versions.filter(identifier='origin/%s' % branch)
)
def get_default_version(self):
"""
Get the default version (slug).
Returns self.default_version if the version with that slug actually
exists (is built and published). Otherwise returns 'latest'.
"""
# latest is a special case where we don't have to check if it exists
if self.default_version == LATEST:
return self.default_version
# check if the default_version exists
version_qs = self.versions.filter(
slug=self.default_version, active=True
)
if version_qs.exists():
return self.default_version
return LATEST
def get_default_branch(self):
"""
Get the version representing "latest"
"""
if self.default_branch:
return self.default_branch
else:
return self.vcs_repo().fallback_branch
def add_subproject(self, child):
subproject, created = ProjectRelationship.objects.get_or_create(
parent=self, child=child,
)
return subproject
def remove_subproject(self, child):
ProjectRelationship.objects.filter(parent=self, child=child).delete()
return
def moderation_queue(self):
# non-optimal SQL warning.
from readthedocs.comments.models import DocumentComment
queue = []
comments = DocumentComment.objects.filter(node__project=self)
for comment in comments:
if not comment.has_been_approved_since_most_recent_node_change():
queue.append(comment)
return queue
def add_node(self, node_hash, page, version, commit):
from readthedocs.comments.models import NodeSnapshot, DocumentNode
project_obj = Project.objects.get(slug=self.slug)
version_obj = project_obj.versions.get(slug=version)
try:
NodeSnapshot.objects.get(hash=node_hash, node__project=project_obj, node__version=version_obj, node__page=page, commit=commit)
return False # ie, no new node was created.
except NodeSnapshot.DoesNotExist:
DocumentNode.objects.create(
hash=node_hash,
page=page,
project=project_obj,
version=version_obj,
commit=commit
)
return True # ie, it's True that a new node was created.
def add_comment(self, version_slug, page, hash, commit, user, text):
from readthedocs.comments.models import DocumentNode
try:
node = self.nodes.from_hash(version_slug, page, hash)
except DocumentNode.DoesNotExist:
version = self.versions.get(slug=version_slug)
node = self.nodes.create(version=version, page=page, hash=hash, commit=commit)
return node.comments.create(user=user, text=text)
class ImportedFile(models.Model):
project = models.ForeignKey('Project', verbose_name=_('Project'),
related_name='imported_files')
version = models.ForeignKey('builds.Version', verbose_name=_('Version'),
related_name='imported_files', null=True)
name = models.CharField(_('Name'), max_length=255)
slug = models.SlugField(_('Slug'))
path = models.CharField(_('Path'), max_length=255)
md5 = models.CharField(_('MD5 checksum'), max_length=255)
commit = models.CharField(_('Commit'), max_length=255)
@models.permalink
def get_absolute_url(self):
return ('docs_detail', [self.project.slug, self.project.language,
self.version.slug, self.path])
def __unicode__(self):
return '%s: %s' % (self.name, self.project)
class Notification(models.Model):
project = models.ForeignKey(Project,
related_name='%(class)s_notifications')
objects = RelatedProjectManager()
class Meta:
abstract = True
class EmailHook(Notification):
email = models.EmailField()
def __unicode__(self):
return self.email
class WebHook(Notification):
url = models.URLField(blank=True,
help_text=_('URL to send the webhook to'))
def __unicode__(self):
return self.url
| |
# Class definition:
# ErrorDiagnosis
#
import os
import re
from Diagnosis import Diagnosis
from PilotErrors import PilotErrors
from pUtil import tolog, getExperiment
class ErrorDiagnosis(Diagnosis):
# private data members
__instance = None # Boolean used by subclasses to become a Singleton
__error = PilotErrors() # PilotErrors object
def __init__(self):
""" Default initialization """
# e.g. self.__errorLabel = errorLabel
pass
def __new__(cls, *args, **kwargs):
""" Override the __new__ method to make the class a singleton """
if not cls.__instance:
cls.__instance = super(ErrorDiagnosis, cls).__new__(cls, *args, **kwargs)
return cls.__instance
def interpretPayload(self, job, res, getstatusoutput_was_interrupted, current_job_number, runCommandList, failureCode):
""" Interpret the payload, look for specific errors in the stdout """
# get the experiment object
thisExperiment = getExperiment(job.experiment)
if not thisExperiment:
job.pilotErrorDiag = "ErrorDiagnosis did not get an experiment object from the factory"
job.result[2] = self.__error.ERR_GENERALERROR # change to better/new error code
tolog("!!WARNING!!3234!! %s" % (job.pilotErrorDiag))
return job
### WARNING: EXPERIMENT SPECIFIC, MOVE LATER
try:
ec, pilotErrorDiag = self.processJobReport(job.workdir)
except Exception, e:
tolog("!!WARNING!!1114!! Caught exception: %s" % (e))
else:
if ec != 0:
job.pilotErrorDiag = pilotErrorDiag
job.result[2] = ec
return job
# handle special errors from the trf
if res[0] == 146:
# try to extract the tarball url from res[1] (should have been added in executePayload())
tarball_url = self.extractTarballURL(res[1])
job.pilotErrorDiag = "User tarball %s cannot be downloaded from PanDA server" % tarball_url
job.result[2] = self.__error.ERR_NOUSERTARBALL
# no need to continue, as the job report will not exist
return job
# Extract job information (e.g. number of events)
job = self.extractJobInformation(job, runCommandList) # add more arguments as needed
# interpret the stdout (the stdout is experiment specific so use the corresponding method)
job = thisExperiment.interpretPayloadStdout(job, res, getstatusoutput_was_interrupted, current_job_number, runCommandList, failureCode)
return job
def extractTarballURL(self, tail):
""" Extract the tarball URL for missing user code if possible from stdout tail """
tarball_url = "(source unknown)"
if "https://" in tail or "http://" in tail:
pattern = r"(https?\:\/\/.+)"
found = re.findall(pattern, tail)
if len(found) > 0:
tarball_url = found[0]
return tarball_url
### WARNING: EXPERIMENT SPECIFIC, MOVE LATER
def getJobReport(self, workDir):
""" Get the jobReport.json dictionary """
fileName = os.path.join(workDir, "jobReport.json")
if os.path.exists(fileName):
# the jobReport file exists, read it back
try:
f = open(fileName, "r")
except Exception, e:
tolog("!!WARNING!!1001!! Could not open file: %s, %s" % (fileName, e))
jobReport_dictionary = {}
else:
from json import load
try:
# load the dictionary
jobReport_dictionary = load(f)
except Exception, e:
tolog("!!WARNING!!1001!! Could not read back jobReport dictionary: %s" % (e))
jobReport_dictionary = {}
# done with the file
f.close()
else:
tolog("WARNING: File %s does not exist" % (fileName))
jobReport_dictionary = {}
return jobReport_dictionary
### WARNING: EXPERIMENT SPECIFIC, MOVE LATER
def getJobReportErrors(self, jobReport_dictionary):
""" Extract the error list from the jobReport.json dictionary """
# WARNING: Currently compatible with version <= 0.9.4
jobReportErrors = []
if jobReport_dictionary.has_key('reportVersion'):
tolog("Scanning jobReport (v %s) for error info" % jobReport_dictionary['reportVersion'])
else:
tolog("WARNING: jobReport does not have the reportVersion key")
if jobReport_dictionary.has_key('executor'):
try:
error_details = jobReport_dictionary['executor'][0]['logfileReport']['details']['ERROR']
except Exception, e:
tolog("WARNING: Aborting jobReport scan: %s"% (e))
else:
try:
for m in error_details:
jobReportErrors.append(m['message'])
except Exception, e:
tolog("!!WARNING!!1113!! Did not get a list object: %s" % (e))
else:
tolog("WARNING: jobReport does not have the executor key (aborting)")
return jobReportErrors
### WARNING: EXPERIMENT SPECIFIC, MOVE LATER
def isBadAlloc(self, jobReportErrors):
""" Check for bad_alloc errors """
bad_alloc = False
pilotErrorDiag = ""
for m in jobReportErrors:
if "bad_alloc" in m:
tolog("!!WARNING!!1112!! Encountered a bad_alloc error: %s" % (m))
bad_alloc = True
pilotErrorDiag = m
break
return bad_alloc, pilotErrorDiag
### WARNING: EXPERIMENT SPECIFIC, MOVE LATER
def processJobReport(self, workDir):
""" Scan the jobReport.json for specific errors """
# Specific errors
ec = 0
pilotErrorDiag = ""
bad_alloc = False
jobReport_dictionary = self.getJobReport(workDir)
if jobReport_dictionary != {}:
jobReportErrors = self.getJobReportErrors(jobReport_dictionary)
# Check for specific errors
if jobReportErrors != []:
bad_alloc, pilotErrorDiag = self.isBadAlloc(jobReportErrors)
if bad_alloc:
ec = self.__error.ERR_BADALLOC # get the corresponding error code
return ec, pilotErrorDiag
def extractJobInformation(self, job, runCommandList):
""" Extract relevant job information, e.g. number of events """
# get the experiment object
thisExperiment = getExperiment(job.experiment)
if not thisExperiment:
job.pilotErrorDiag = "ErrorDiagnosis did not get an experiment object from the factory"
job.result[2] = self.__error.ERR_GENERALERROR # change to better/new error code
tolog("!!WARNING!!3234!! %s" % (job.pilotErrorDiag))
return job
# note that this class should not be experiment specific, so move anything related to ATLAS to ATLASExperiment.py
# and use thisExperiment.whatever() to retrieve it here
# grab the number of events
try:
# nEvents_str can be a string of the form N|N|..|N with the number of jobs in the trf(s) [currently not used]
# Add to Job class if necessary
job.nEvents, job.nEventsW, nEvents_str = thisExperiment.getNumberOfEvents(job=job, number_of_jobs=len(runCommandList))
except Exception, e:
tolog("!!WARNING!!2999!! Failed to get number of events: %s (ignore)" % str(e))
# get the DB info from the jobReport (experiment specific)
from FileHandling import getDBInfo
job.dbTime, job.dbData = getDBInfo(job.workdir)
return job
if __name__ == "__main__":
print "Implement test cases here"
ed = ErrorDiagnosis()
# ed.hello()
| |
#!/usr/bin/env python
"""
@package mi/dataset/parser
@file mi/dataset/parser/glider.py
@author Stuart Pearce & Chris Wingard
@brief Module containing parser scripts for glider data set agents
"""
import re
import ntplib
from math import copysign, isnan
import scipy.interpolate as interpolate
from mi.core.log import get_logger
from mi.core.common import BaseEnum
from mi.core.exceptions import SampleException, \
ConfigurationException, \
DatasetParserException
from mi.core.instrument.dataset_data_particle import DataParticle, DataParticleKey
from mi.dataset.dataset_parser import SimpleParser, DataSetDriverConfigKeys
# start the logger
log = get_logger()
__author__ = 'Stuart Pearce, Chris Wingard, Nick Almonte, Rene Gelinas'
__license__ = 'Apache 2.0'
class DataParticleType(BaseEnum):
# Data particle types for the Open Ocean (aka Global) and Coastal gliders.
# ADCPA data will parsed by a different parser (adcpa.py)
DOSTA_ABCDJM_GLIDER_INSTRUMENT = 'dosta_abcdjm_glider_instrument'
DOSTA_ABCDJM_GLIDER_RECOVERED = 'dosta_abcdjm_glider_recovered'
CTDGV_M_GLIDER_INSTRUMENT = 'ctdgv_m_glider_instrument'
CTDGV_M_GLIDER_INSTRUMENT_RECOVERED = 'ctdgv_m_glider_instrument_recovered'
FLORD_M_GLIDER_INSTRUMENT = 'flord_m_glider_instrument'
FLORD_M_GLIDER_INSTRUMENT_RECOVERED = 'flord_m_glider_instrument_recovered'
FLORT_M_GLIDER_INSTRUMENT = 'flort_m_sample'
FLORT_M_GLIDER_RECOVERED = 'flort_m_sample'
FLORT_O_GLIDER_DATA = 'flort_o_glider_data'
PARAD_M_GLIDER_INSTRUMENT = 'parad_m_glider_instrument'
PARAD_M_GLIDER_RECOVERED = 'parad_m_glider_recovered'
GLIDER_ENG_TELEMETERED = 'glider_eng_telemetered'
GLIDER_ENG_METADATA = 'glider_eng_metadata'
GLIDER_ENG_RECOVERED = 'glider_eng_recovered'
GLIDER_ENG_SCI_TELEMETERED = 'glider_eng_sci_telemetered'
GLIDER_ENG_SCI_RECOVERED = 'glider_eng_sci_recovered'
GLIDER_ENG_METADATA_RECOVERED = 'glider_eng_metadata_recovered'
GLIDER_GPS_POSITON = 'glider_gps_position'
NUTNR_M_GLIDER_INSTRUMENT = 'nutnr_m_glider_instrument'
class GliderParticleKey(BaseEnum):
"""
Common glider particle parameters
"""
M_PRESENT_SECS_INTO_MISSION = 'm_present_secs_into_mission'
M_PRESENT_TIME = 'm_present_time' # you need the m_ timestamps for lats & lons
SCI_M_PRESENT_TIME = 'sci_m_present_time'
SCI_M_PRESENT_SECS_INTO_MISSION = 'sci_m_present_secs_into_mission'
@classmethod
def science_parameter_list(cls):
"""
Get a list of all science parameters
"""
result = []
for key in cls.list():
if key not in GliderParticleKey.list():
result.append(key)
return result
class GliderParticle(DataParticle):
"""
Base particle for glider data. Glider files are
publishing as a particle rather than a raw data string. This is in
part to solve the dynamic nature of a glider file and not having to
hard code >2000 variables in a regex.
This class should be a parent class to all the data particle classes
associated with the glider.
"""
def _parsed_values(self, key_list):
value_list = [self.raw_data.get(key, None) for key in key_list]
result = []
for key, value in zip(key_list, value_list):
# encode strings into float or int
if value is None:
# this key was not present in this file, there is no value
result.append({DataParticleKey.VALUE_ID: key, DataParticleKey.VALUE: None})
elif 'inf' in value:
# guard against 'inf' parameter values found in some datasets
result.append({DataParticleKey.VALUE_ID: key, DataParticleKey.VALUE: None})
elif value == '69696969':
# guard against '69696969' parameter values used as fill values
result.append({DataParticleKey.VALUE_ID: key, DataParticleKey.VALUE: None})
elif '_lat' in key or '_lon' in key:
# special encoding for latitude and longitude
result.append(self._encode_value(key, value, GliderParticle._string_to_ddegrees))
elif isnan(float(value)) or '.' in value or 'e' in value:
# this is a float
result.append(self._encode_value(key, value, GliderParticle._encode_float_or_nan))
else:
# if it is not a float it is an int
result.append(self._encode_value(key, value, int))
return result
@staticmethod
def _encode_float_or_nan(value):
if isnan(float(value)):
return None
else:
return float(value)
@staticmethod
def _string_to_ddegrees(value):
float_val = float(value)
if isnan(float_val):
return None
absval = abs(float_val)
degrees = int(absval / 100)
minutes = absval - degrees * 100
return copysign(degrees + minutes / 60, float_val)
class CtdgvParticleKey(GliderParticleKey):
# science data made available via telemetry or Glider recovery
SCI_CTD41CP_TIMESTAMP = 'sci_ctd41cp_timestamp'
SCI_WATER_COND = 'sci_water_cond'
SCI_WATER_PRESSURE = 'sci_water_pressure'
SCI_WATER_TEMP = 'sci_water_temp'
class CtdgvTelemeteredDataParticle(GliderParticle):
_data_particle_type = DataParticleType.CTDGV_M_GLIDER_INSTRUMENT
science_parameters = CtdgvParticleKey.science_parameter_list()
def _build_parsed_values(self):
"""
Extracts CTDGV data from the glider data dictionary initialized with
the particle class and puts the data into a CTDGV Telemetered Data Particle.
@returns A list of dictionaries of particle data
@throws SampleException if the data is not a glider data dictionary
"""
return self._parsed_values(CtdgvParticleKey.list())
class CtdgvRecoveredDataParticle(GliderParticle):
_data_particle_type = DataParticleType.CTDGV_M_GLIDER_INSTRUMENT_RECOVERED
science_parameters = CtdgvParticleKey.science_parameter_list()
def _build_parsed_values(self):
"""
Extracts CTDGV data from the glider data dictionary initialized with
the particle class and puts the data into a CTDGV Recovered Data Particle.
@returns A list of dictionaries of particle data
@throws SampleException if the data is not a glider data dictionary
"""
return self._parsed_values(CtdgvParticleKey.list())
class DostaTelemeteredParticleKey(GliderParticleKey):
# science data made available via telemetry
SCI_OXY4_OXYGEN = 'sci_oxy4_oxygen'
SCI_OXY4_SATURATION = 'sci_oxy4_saturation'
class DostaRecoveredParticleKey(GliderParticleKey):
# science data made available via glider recovery
SCI_OXY4_OXYGEN = 'sci_oxy4_oxygen'
SCI_OXY4_SATURATION = 'sci_oxy4_saturation'
SCI_OXY4_TIMESTAMP = 'sci_oxy4_timestamp'
SCI_OXY4_C1AMP = 'sci_oxy4_c1amp'
SCI_OXY4_C1RPH = 'sci_oxy4_c1rph'
SCI_OXY4_C2AMP = 'sci_oxy4_c2amp'
SCI_OXY4_C2RPH = 'sci_oxy4_c2rph'
SCI_OXY4_CALPHASE = 'sci_oxy4_calphase'
SCI_OXY4_RAWTEMP = 'sci_oxy4_rawtemp'
SCI_OXY4_TCPHASE = 'sci_oxy4_tcphase'
SCI_OXY4_TEMP = 'sci_oxy4_temp'
class DostaTelemeteredDataParticle(GliderParticle):
_data_particle_type = DataParticleType.DOSTA_ABCDJM_GLIDER_INSTRUMENT
science_parameters = DostaTelemeteredParticleKey.science_parameter_list()
def _build_parsed_values(self):
"""
Takes a GliderParser object and extracts DOSTA data from the
data dictionary and puts the data into a DOSTA Data Particle.
@returns A list of dictionaries of particle data
@throws SampleException if the data is not a glider data dictionary
"""
return self._parsed_values(DostaTelemeteredParticleKey.list())
class DostaRecoveredDataParticle(GliderParticle):
_data_particle_type = DataParticleType.DOSTA_ABCDJM_GLIDER_RECOVERED
science_parameters = DostaRecoveredParticleKey.science_parameter_list()
def _build_parsed_values(self):
"""
Takes a GliderParser object and extracts DOSTA data from the
data dictionary and puts the data into a DOSTA Data Particle.
@returns A list of dictionaries of particle data
@throws SampleException if the data is not a glider data dictionary
"""
return self._parsed_values(DostaRecoveredParticleKey.list())
class FlordParticleKey(GliderParticleKey):
# science data made available via telemetry or glider recovery
SCI_FLBB_TIMESTAMP = 'sci_flbb_timestamp'
SCI_FLBB_BB_REF = 'sci_flbb_bb_ref'
SCI_FLBB_BB_SIG = 'sci_flbb_bb_sig'
SCI_FLBB_BB_UNITS = 'sci_flbb_bb_units'
SCI_FLBB_CHLOR_REF = 'sci_flbb_chlor_ref'
SCI_FLBB_CHLOR_SIG = 'sci_flbb_chlor_sig'
SCI_FLBB_CHLOR_UNITS = 'sci_flbb_chlor_units'
SCI_FLBB_THERM = 'sci_flbb_therm'
class FlordTelemeteredDataParticle(GliderParticle):
_data_particle_type = DataParticleType.FLORD_M_GLIDER_INSTRUMENT
science_parameters = FlordParticleKey.science_parameter_list()
def _build_parsed_values(self):
"""
Takes a GliderParser object and extracts FLORD data from the
data dictionary and puts the data into a FLORD Telemetered Data Particle.
@returns A list of dictionaries of particle data
@throws SampleException if the data is not a glider data dictionary
"""
return self._parsed_values(FlordParticleKey.list())
class FlordRecoveredDataParticle(GliderParticle):
_data_particle_type = DataParticleType.FLORD_M_GLIDER_INSTRUMENT_RECOVERED
science_parameters = FlordParticleKey.science_parameter_list()
def _build_parsed_values(self):
"""
Takes a GliderParser object and extracts FLORD data from the
data dictionary and puts the data into a FLORD Recovered Data Particle.
@returns A list of dictionaries of particle data
@throws SampleException if the data is not a glider data dictionary
"""
return self._parsed_values(FlordParticleKey.list())
class FlortTelemeteredParticleKey(GliderParticleKey):
# science data made available via telemetry
SCI_FLBBCD_BB_UNITS = 'sci_flbbcd_bb_units'
SCI_FLBBCD_CDOM_UNITS = 'sci_flbbcd_cdom_units'
SCI_FLBBCD_CHLOR_UNITS = 'sci_flbbcd_chlor_units'
class FlortRecoveredParticleKey(GliderParticleKey):
# science data made available via glider recovery
SCI_FLBBCD_TIMESTAMP = 'sci_flbbcd_timestamp'
SCI_FLBBCD_BB_REF = 'sci_flbbcd_bb_ref'
SCI_FLBBCD_BB_SIG = 'sci_flbbcd_bb_sig'
SCI_FLBBCD_BB_UNITS = 'sci_flbbcd_bb_units'
SCI_FLBBCD_CDOM_REF = 'sci_flbbcd_cdom_ref'
SCI_FLBBCD_CDOM_SIG = 'sci_flbbcd_cdom_sig'
SCI_FLBBCD_CDOM_UNITS = 'sci_flbbcd_cdom_units'
SCI_FLBBCD_CHLOR_REF = 'sci_flbbcd_chlor_ref'
SCI_FLBBCD_CHLOR_SIG = 'sci_flbbcd_chlor_sig'
SCI_FLBBCD_CHLOR_UNITS = 'sci_flbbcd_chlor_units'
SCI_FLBBCD_THERM = 'sci_flbbcd_therm'
class FlortTelemeteredDataParticle(GliderParticle):
_data_particle_type = DataParticleType.FLORT_M_GLIDER_INSTRUMENT
science_parameters = FlortTelemeteredParticleKey.science_parameter_list()
def _build_parsed_values(self):
"""
Takes a GliderParser object and extracts FLORT data from the
data dictionary and puts the data into a FLORT Data Particle.
@returns A list of dictionaries of particle data
@throws SampleException if the data is not a glider data dictionary
"""
return self._parsed_values(FlortTelemeteredParticleKey.list())
class FlortRecoveredDataParticle(GliderParticle):
_data_particle_type = DataParticleType.FLORT_M_GLIDER_RECOVERED
science_parameters = FlortRecoveredParticleKey.science_parameter_list()
def _build_parsed_values(self):
"""
Takes a GliderParser object and extracts FLORT data from the
data dictionary and puts the data into a FLORT Data Particle.
@returns A list of dictionaries of particle data
@throws SampleException if the data is not a glider data dictionary
"""
return self._parsed_values(FlortRecoveredParticleKey.list())
class FlortODataParticleKey(GliderParticleKey):
# science data made available via telemetry
SCI_BB3SLO_B470_SIG = 'sci_bb3slo_b470_sig'
SCI_BB3SLO_B532_SIG = 'sci_bb3slo_b532_sig'
SCI_BB3SLO_B660_SIG = 'sci_bb3slo_b660_sig'
class FlortODataParticle(GliderParticle):
_data_particle_type = DataParticleType.FLORT_O_GLIDER_DATA
science_parameters = FlortODataParticleKey.science_parameter_list()
def _build_parsed_values(self):
"""
Takes a GliderParser object and extracts FLORT data from the
data dictionary and puts the data into a FLORT Data Particle.
@returns A list of dictionaries of particle data
@throws SampleException if the data is not a glider data dictionary
"""
return self._parsed_values(FlortODataParticleKey.list())
class ParadTelemeteredParticleKey(GliderParticleKey):
# science data made available via telemetry
SCI_BSIPAR_PAR = 'sci_bsipar_par'
class ParadRecoveredParticleKey(GliderParticleKey):
# science data made available via glider recovery
SCI_BSIPAR_PAR = 'sci_bsipar_par'
SCI_BSIPAR_SENSOR_VOLTS = 'sci_bsipar_sensor_volts'
SCI_BSIPAR_SUPPLY_VOLTS = 'sci_bsipar_supply_volts'
SCI_BSIPAR_TEMP = 'sci_bsipar_temp'
class ParadTelemeteredDataParticle(GliderParticle):
_data_particle_type = DataParticleType.PARAD_M_GLIDER_INSTRUMENT
science_parameters = ParadTelemeteredParticleKey.science_parameter_list()
def _build_parsed_values(self):
"""
Takes a GliderParser object and extracts PARAD data from the
data dictionary and puts the data into a PARAD Data Particle.
@returns A list of dictionaries of particle data
@throws SampleException if the data is not a glider data dictionary
"""
return self._parsed_values(ParadTelemeteredParticleKey.list())
class ParadRecoveredDataParticle(GliderParticle):
_data_particle_type = DataParticleType.PARAD_M_GLIDER_RECOVERED
science_parameters = ParadRecoveredParticleKey.science_parameter_list()
def _build_parsed_values(self):
"""
Takes a GliderParser object and extracts PARAD data from the
data dictionary and puts the data into a PARAD Data Particle.
@returns A list of dictionaries of particle data
@throws SampleException if the data is not a glider data dictionary
"""
return self._parsed_values(ParadRecoveredParticleKey.list())
class EngineeringRecoveredParticleKey(GliderParticleKey):
# engineering data made available via glider recovery
M_ALTITUDE = 'm_altitude'
M_DEPTH = 'm_depth'
M_LAT = 'm_lat'
M_LON = 'm_lon'
C_AIR_PUMP = 'c_air_pump'
C_BALLAST_PUMPED = 'c_ballast_pumped'
C_BATTPOS = 'c_battpos'
C_BATTROLL = 'c_battroll'
C_BSIPAR_ON = 'c_bsipar_on'
C_DE_OIL_VOL = 'c_de_oil_vol'
C_DVL_ON = 'c_dvl_on'
C_FLBBCD_ON = 'c_flbbcd_on'
C_HEADING = 'c_heading'
C_OXY3835_WPHASE_ON = 'c_oxy3835_wphase_on'
C_PITCH = 'c_pitch'
C_PROFILE_ON = 'c_profile_on'
C_WPT_LAT = 'c_wpt_lat'
C_WPT_LON = 'c_wpt_lon'
M_1MEG_PERSISTOR = 'm_1meg_persistor'
M_AGROUND_WATER_DEPTH = 'm_aground_water_depth'
M_AIR_FILL = 'm_air_fill'
M_AIR_PUMP = 'm_air_pump'
M_ALTIMETER_STATUS = 'm_altimeter_status'
M_ALTIMETER_VOLTAGE = 'm_altimeter_voltage'
M_ALTITUDE_RATE = 'm_altitude_rate'
M_APPEAR_TO_BE_AT_SURFACE = 'm_appear_to_be_at_surface'
M_ARGOS_IS_XMITTING = 'm_argos_is_xmitting'
M_ARGOS_ON = 'm_argos_on'
M_ARGOS_SENT_DATA = 'm_argos_sent_data'
M_ARGOS_TIMESTAMP = 'm_argos_timestamp'
M_AT_RISK_DEPTH = 'm_at_risk_depth'
M_AVBOT_ENABLE = 'm_avbot_enable'
M_AVBOT_POWER = 'm_avbot_power'
M_AVG_CLIMB_RATE = 'm_avg_climb_rate'
M_AVG_DEPTH_RATE = 'm_avg_depth_rate'
M_AVG_DIVE_RATE = 'm_avg_dive_rate'
M_AVG_DOWNWARD_INFLECTION_TIME = 'm_avg_downward_inflection_time'
M_AVG_SPEED = 'm_avg_speed'
M_AVG_SYSTEM_CLOCK_LAGS_GPS = 'm_avg_system_clock_lags_gps'
M_AVG_UPWARD_INFLECTION_TIME = 'm_avg_upward_inflection_time'
M_AVG_YO_TIME = 'm_avg_yo_time'
M_BALLAST_PUMPED = 'm_ballast_pumped'
M_BALLAST_PUMPED_ENERGY = 'm_ballast_pumped_energy'
M_BALLAST_PUMPED_VEL = 'm_ballast_pumped_vel'
M_BATTERY = 'm_battery'
M_BATTERY_INST = 'm_battery_inst'
M_BATTPOS = 'm_battpos'
M_BATTPOS_VEL = 'm_battpos_vel'
M_BATTROLL = 'm_battroll'
M_BATTROLL_VEL = 'm_battroll_vel'
M_BPUMP_FAULT_BIT = 'm_bpump_fault_bit'
M_CERTAINLY_AT_SURFACE = 'm_certainly_at_surface'
M_CHARS_TOSSED_BY_ABEND = 'm_chars_tossed_by_abend'
M_CHARS_TOSSED_WITH_CD_OFF = 'm_chars_tossed_with_cd_off'
M_CHARS_TOSSED_WITH_POWER_OFF = 'm_chars_tossed_with_power_off'
M_CLIMB_TOT_TIME = 'm_climb_tot_time'
M_CONSOLE_CD = 'm_console_cd'
M_CONSOLE_ON = 'm_console_on'
M_COP_TICKLE = 'm_cop_tickle'
M_COULOMB_AMPHR = 'm_coulomb_amphr'
M_COULOMB_AMPHR_RAW = 'm_coulomb_amphr_raw'
M_COULOMB_AMPHR_TOTAL = 'm_coulomb_amphr_total'
M_COULOMB_CURRENT = 'm_coulomb_current'
M_COULOMB_CURRENT_RAW = 'm_coulomb_current_raw'
M_CYCLE_NUMBER = 'm_cycle_number'
M_DEPTH_RATE = 'm_depth_rate'
M_DEPTH_RATE_AVG_FINAL = 'm_depth_rate_avg_final'
M_DEPTH_RATE_RUNNING_AVG = 'm_depth_rate_running_avg'
M_DEPTH_RATE_RUNNING_AVG_N = 'm_depth_rate_running_avg_n'
M_DEPTH_RATE_SUBSAMPLED = 'm_depth_rate_subsampled'
M_DEPTH_REJECTED = 'm_depth_rejected'
M_DEPTH_STATE = 'm_depth_state'
M_DEPTH_SUBSAMPLED = 'm_depth_subsampled'
M_DEVICE_DRIVERS_CALLED_ABNORMALLY = 'm_device_drivers_called_abnormally'
M_DEVICE_ERROR = 'm_device_error'
M_DEVICE_ODDITY = 'm_device_oddity'
M_DEVICE_WARNING = 'm_device_warning'
M_DE_OIL_VOL = 'm_de_oil_vol'
M_DE_OIL_VOL_POT_VOLTAGE = 'm_de_oil_vol_pot_voltage'
M_DE_PUMP_FAULT_COUNT = 'm_de_pump_fault_count'
M_DIGIFIN_CMD_DONE = 'm_digifin_cmd_done'
M_DIGIFIN_CMD_ERROR = 'm_digifin_cmd_error'
M_DIGIFIN_LEAKDETECT_READING = 'm_digifin_leakdetect_reading'
M_DIGIFIN_MOTORSTEP_COUNTER = 'm_digifin_motorstep_counter'
M_DIGIFIN_RESP_DATA = 'm_digifin_resp_data'
M_DIGIFIN_STATUS = 'm_digifin_status'
M_DISK_FREE = 'm_disk_free'
M_DISK_USAGE = 'm_disk_usage'
M_DIST_TO_WPT = 'm_dist_to_wpt'
M_DIVE_DEPTH = 'm_dive_depth'
M_DIVE_TOT_TIME = 'm_dive_tot_time'
M_DR_FIX_TIME = 'm_dr_fix_time'
M_DR_POSTFIX_TIME = 'm_dr_postfix_time'
M_DR_SURF_X_LMC = 'm_dr_surf_x_lmc'
M_DR_SURF_Y_LMC = 'm_dr_surf_y_lmc'
M_DR_TIME = 'm_dr_time'
M_DR_X_ACTUAL_ERR = 'm_dr_x_actual_err'
M_DR_X_INI_ERR = 'm_dr_x_ini_err'
M_DR_X_POSTFIX_DRIFT = 'm_dr_x_postfix_drift'
M_DR_X_TA_POSTFIX_DRIFT = 'm_dr_x_ta_postfix_drift'
M_DR_Y_ACTUAL_ERR = 'm_dr_y_actual_err'
M_DR_Y_INI_ERR = 'm_dr_y_ini_err'
M_DR_Y_POSTFIX_DRIFT = 'm_dr_y_postfix_drift'
M_DR_Y_TA_POSTFIX_DRIFT = 'm_dr_y_ta_postfix_drift'
M_EST_TIME_TO_SURFACE = 'm_est_time_to_surface'
M_FIN = 'm_fin'
M_FINAL_WATER_VX = 'm_final_water_vx'
M_FINAL_WATER_VY = 'm_final_water_vy'
M_FIN_VEL = 'm_fin_vel'
M_FLUID_PUMPED = 'm_fluid_pumped'
M_FLUID_PUMPED_AFT_HALL_VOLTAGE = 'm_fluid_pumped_aft_hall_voltage'
M_FLUID_PUMPED_FWD_HALL_VOLTAGE = 'm_fluid_pumped_fwd_hall_voltage'
M_FLUID_PUMPED_VEL = 'm_fluid_pumped_vel'
M_FREE_HEAP = 'm_free_heap'
M_GPS_DIST_FROM_DR = 'm_gps_dist_from_dr'
M_GPS_FIX_X_LMC = 'm_gps_fix_x_lmc'
M_GPS_FIX_Y_LMC = 'm_gps_fix_y_lmc'
M_GPS_FULL_STATUS = 'm_gps_full_status'
M_GPS_HEADING = 'm_gps_heading'
M_GPS_IGNORED_LAT = 'm_gps_ignored_lat'
M_GPS_IGNORED_LON = 'm_gps_ignored_lon'
M_GPS_INVALID_LAT = 'm_gps_invalid_lat'
M_GPS_INVALID_LON = 'm_gps_invalid_lon'
M_GPS_MAG_VAR = 'm_gps_mag_var'
M_GPS_NUM_SATELLITES = 'm_gps_num_satellites'
M_GPS_ON = 'm_gps_on'
M_GPS_POSTFIX_X_LMC = 'm_gps_postfix_x_lmc'
M_GPS_POSTFIX_Y_LMC = 'm_gps_postfix_y_lmc'
M_GPS_STATUS = 'm_gps_status'
M_GPS_SPEED = 'm_gps_speed'
M_GPS_TOOFAR_LAT = 'm_gps_toofar_lat'
M_GPS_TOOFAR_LON = 'm_gps_toofar_lon'
M_GPS_UNCERTAINTY = 'm_gps_uncertainty'
M_GPS_UTC_DAY = 'm_gps_utc_day'
M_GPS_UTC_HOUR = 'm_gps_utc_hour'
M_GPS_UTC_MINUTE = 'm_gps_utc_minute'
M_GPS_UTC_MONTH = 'm_gps_utc_month'
M_GPS_UTC_SECOND = 'm_gps_utc_second'
M_GPS_UTC_YEAR = 'm_gps_utc_year'
M_GPS_X_LMC = 'm_gps_x_lmc'
M_GPS_Y_LMC = 'm_gps_y_lmc'
M_HDG_DERROR = 'm_hdg_derror'
M_HDG_ERROR = 'm_hdg_error'
M_HDG_IERROR = 'm_hdg_ierror'
M_HDG_RATE = 'm_hdg_rate'
M_HEADING = 'm_heading'
M_INITIAL_WATER_VX = 'm_initial_water_vx'
M_INITIAL_WATER_VY = 'm_initial_water_vy'
M_IRIDIUM_ATTEMPT_NUM = 'm_iridium_attempt_num'
M_IRIDIUM_CALL_NUM = 'm_iridium_call_num'
M_IRIDIUM_CONNECTED = 'm_iridium_connected'
M_IRIDIUM_CONSOLE_ON = 'm_iridium_console_on'
M_IRIDIUM_DIALED_NUM = 'm_iridium_dialed_num'
M_IRIDIUM_ON = 'm_iridium_on'
M_IRIDIUM_REDIALS = 'm_iridium_redials'
M_IRIDIUM_SIGNAL_STRENGTH = 'm_iridium_signal_strength'
M_IRIDIUM_STATUS = 'm_iridium_status'
M_IRIDIUM_WAITING_REDIAL_DELAY = 'm_iridium_waiting_redial_delay'
M_IRIDIUM_WAITING_REGISTRATION = 'm_iridium_waiting_registration'
M_IS_BALLAST_PUMP_MOVING = 'm_is_ballast_pump_moving'
M_IS_BATTPOS_MOVING = 'm_is_battpos_moving'
M_IS_BATTROLL_MOVING = 'm_is_battroll_moving'
M_IS_DE_PUMP_MOVING = 'm_is_de_pump_moving'
M_IS_FIN_MOVING = 'm_is_fin_moving'
M_IS_FPITCH_PUMP_MOVING = 'm_is_fpitch_pump_moving'
M_IS_SPEED_ESTIMATED = 'm_is_speed_estimated'
M_IS_THERMAL_VALVE_MOVING = 'm_is_thermal_valve_moving'
M_LAST_YO_TIME = 'm_last_yo_time'
M_LEAK = 'm_leak'
M_LEAKDETECT_VOLTAGE = 'm_leakdetect_voltage'
M_LEAKDETECT_VOLTAGE_FORWARD = 'm_leakdetect_voltage_forward'
M_LEAK_FORWARD = 'm_leak_forward'
M_LITHIUM_BATTERY_RELATIVE_CHARGE = 'm_lithium_battery_relative_charge'
M_LITHIUM_BATTERY_STATUS = 'm_lithium_battery_status'
M_LITHIUM_BATTERY_TIME_TO_CHARGE = 'm_lithium_battery_time_to_charge'
M_LITHIUM_BATTERY_TIME_TO_DISCHARGE = 'm_lithium_battery_time_to_discharge'
M_MIN_FREE_HEAP = 'm_min_free_heap'
M_MIN_SPARE_HEAP = 'm_min_spare_heap'
M_MISSION_AVG_SPEED_CLIMBING = 'm_mission_avg_speed_climbing'
M_MISSION_AVG_SPEED_DIVING = 'm_mission_avg_speed_diving'
M_MISSION_START_TIME = 'm_mission_start_time'
M_NUM_HALF_YOS_IN_SEGMENT = 'm_num_half_yos_in_segment'
M_PITCH = 'm_pitch'
M_PITCH_ENERGY = 'm_pitch_energy'
M_PITCH_ERROR = 'm_pitch_error'
M_PRESSURE = 'm_pressure'
M_PRESSURE_RAW_VOLTAGE_SAMPLE0 = 'm_pressure_raw_voltage_sample0'
M_PRESSURE_RAW_VOLTAGE_SAMPLE19 = 'm_pressure_raw_voltage_sample19'
M_PRESSURE_VOLTAGE = 'm_pressure_voltage'
M_RAW_ALTITUDE = 'm_raw_altitude'
M_RAW_ALTITUDE_REJECTED = 'm_raw_altitude_rejected'
M_ROLL = 'm_roll'
M_SCIENCE_CLOTHESLINE_LAG = 'm_science_clothesline_lag'
M_SCIENCE_ON = 'm_science_on'
M_SCIENCE_READY_FOR_CONSCI = 'm_science_ready_for_consci'
M_SCIENCE_SENT_SOME_DATA = 'm_science_sent_some_data'
M_SCIENCE_SYNC_TIME = 'm_science_sync_time'
M_SCIENCE_UNREADINESS_FOR_CONSCI = 'm_science_unreadiness_for_consci'
M_SPARE_HEAP = 'm_spare_heap'
M_SPEED = 'm_speed'
M_STABLE_COMMS = 'm_stable_comms'
M_STROBE_CTRL = 'm_strobe_ctrl'
M_SURFACE_EST_CMD = 'm_surface_est_cmd'
M_SURFACE_EST_CTD = 'm_surface_est_ctd'
M_SURFACE_EST_FW = 'm_surface_est_fw'
M_SURFACE_EST_GPS = 'm_surface_est_gps'
M_SURFACE_EST_IRID = 'm_surface_est_irid'
M_SURFACE_EST_TOTAL = 'm_surface_est_total'
M_SYSTEM_CLOCK_LAGS_GPS = 'm_system_clock_lags_gps'
M_TCM3_IS_CALIBRATED = 'm_tcm3_is_calibrated'
M_TCM3_MAGBEARTH = 'm_tcm3_magbearth'
M_TCM3_POLL_TIME = 'm_tcm3_poll_time'
M_TCM3_RECV_START_TIME = 'm_tcm3_recv_start_time'
M_TCM3_RECV_STOP_TIME = 'm_tcm3_recv_stop_time'
M_TCM3_STDDEVERR = 'm_tcm3_stddeverr'
M_TCM3_XCOVERAGE = 'm_tcm3_xcoverage'
M_TCM3_YCOVERAGE = 'm_tcm3_ycoverage'
M_TCM3_ZCOVERAGE = 'm_tcm3_zcoverage'
M_THERMAL_ACC_PRES = 'm_thermal_acc_pres'
M_THERMAL_ACC_PRES_VOLTAGE = 'm_thermal_acc_pres_voltage'
M_THERMAL_ACC_VOL = 'm_thermal_acc_vol'
M_THERMAL_ENUF_ACC_VOL = 'm_thermal_enuf_acc_vol'
M_THERMAL_PUMP = 'm_thermal_pump'
M_THERMAL_UPDOWN = 'm_thermal_updown'
M_THERMAL_VALVE = 'm_thermal_valve'
M_TIME_TIL_WPT = 'm_time_til_wpt'
M_TOT_BALLAST_PUMPED_ENERGY = 'm_tot_ballast_pumped_energy'
M_TOT_HORZ_DIST = 'm_tot_horz_dist'
M_TOT_NUM_INFLECTIONS = 'm_tot_num_inflections'
M_TOT_ON_TIME = 'm_tot_on_time'
M_VACUUM = 'm_vacuum'
M_VEHICLE_TEMP = 'm_vehicle_temp'
M_VEH_OVERHEAT = 'm_veh_overheat'
M_VEH_TEMP = 'm_veh_temp'
M_VMG_TO_WPT = 'm_vmg_to_wpt'
M_VX_LMC = 'm_vx_lmc'
M_VY_LMC = 'm_vy_lmc'
M_WATER_COND = 'm_water_cond'
M_WATER_DELTA_VX = 'm_water_delta_vx'
M_WATER_DELTA_VY = 'm_water_delta_vy'
M_WATER_DEPTH = 'm_water_depth'
M_WATER_PRESSURE = 'm_water_pressure'
M_WATER_TEMP = 'm_water_temp'
M_WATER_VX = 'm_water_vx'
M_WATER_VY = 'm_water_vy'
M_WHY_STARTED = 'm_why_started'
M_X_LMC = 'm_x_lmc'
M_Y_LMC = 'm_y_lmc'
X_LAST_WPT_LAT = 'x_last_wpt_lat'
X_LAST_WPT_LON = 'x_last_wpt_lon'
X_SYSTEM_CLOCK_ADJUSTED = 'x_system_clock_adjusted'
class EngineeringScienceRecoveredParticleKey(GliderParticleKey):
# science data made available via glider recovery
SCI_M_DISK_FREE = 'sci_m_disk_free'
SCI_M_DISK_USAGE = 'sci_m_disk_usage'
SCI_M_FREE_HEAP = 'sci_m_free_heap'
SCI_M_MIN_FREE_HEAP = 'sci_m_min_free_heap'
SCI_M_MIN_SPARE_HEAP = 'sci_m_min_spare_heap'
SCI_M_SCIENCE_ON = 'sci_m_science_on'
SCI_CTD41CP_IS_INSTALLED = 'sci_ctd41cp_is_installed'
SCI_BSIPAR_IS_INSTALLED = 'sci_bsipar_is_installed'
SCI_FLBBCD_IS_INSTALLED = 'sci_flbbcd_is_installed'
SCI_OXY3835_WPHASE_IS_INSTALLED = 'sci_oxy3835_wphase_is_installed'
SCI_OXY4_IS_INSTALLED = 'sci_oxy4_is_installed'
SCI_DVL_IS_INSTALLED = 'sci_dvl_is_installed'
SCI_M_SPARE_HEAP = 'sci_m_spare_heap'
SCI_REQD_HEARTBEAT = 'sci_reqd_heartbeat'
SCI_SOFTWARE_VER = 'sci_software_ver'
SCI_WANTS_COMMS = 'sci_wants_comms'
SCI_WANTS_SURFACE = 'sci_wants_surface'
SCI_X_DISK_FILES_REMOVED = 'sci_x_disk_files_removed'
SCI_X_SENT_DATA_FILES = 'sci_x_sent_data_files'
class EngineeringMetadataParticleKey(BaseEnum):
# science data made available via glider recovery
GLIDER_ENG_FILENAME = 'glider_eng_filename'
GLIDER_MISSION_NAME = 'glider_mission_name'
GLIDER_ENG_FILEOPEN_TIME = 'glider_eng_fileopen_time'
class EngineeringTelemeteredParticleKey(GliderParticleKey):
# engineering data made available via telemetry
M_LAT = 'm_lat'
M_LON = 'm_lon'
C_BATTPOS = 'c_battpos'
C_BALLAST_PUMPED = 'c_ballast_pumped'
C_DE_OIL_VOL = 'c_de_oil_vol'
C_DVL_ON = 'c_dvl_on'
C_HEADING = 'c_heading'
C_PITCH = 'c_pitch'
C_WPT_LAT = 'c_wpt_lat'
C_WPT_LON = 'c_wpt_lon'
M_AIR_PUMP = 'm_air_pump'
M_ALTITUDE = 'm_altitude'
M_BALLAST_PUMPED = 'm_ballast_pumped'
M_BATTERY = 'm_battery'
M_BATTPOS = 'm_battpos'
M_COULOMB_AMPHR = 'm_coulomb_amphr'
M_COULOMB_AMPHR_TOTAL = 'm_coulomb_amphr_total'
M_COULOMB_CURRENT = 'm_coulomb_current'
M_DEPTH = 'm_depth'
M_DE_OIL_VOL = 'm_de_oil_vol'
M_FIN = 'm_fin'
M_HEADING = 'm_heading'
M_LITHIUM_BATTERY_RELATIVE_CHARGE = 'm_lithium_battery_relative_charge'
M_PITCH = 'm_pitch'
M_PRESSURE = 'm_pressure'
M_SPEED = 'm_speed'
M_RAW_ALTITUDE = 'm_raw_altitude'
M_ROLL = 'm_roll'
M_VACUUM = 'm_vacuum'
M_WATER_DEPTH = 'm_water_depth'
M_WATER_VX = 'm_water_vx'
M_WATER_VY = 'm_water_vy'
class EngineeringScienceTelemeteredParticleKey(GliderParticleKey):
# engineering data made available via telemetry
SCI_M_DISK_FREE = 'sci_m_disk_free'
SCI_M_DISK_USAGE = 'sci_m_disk_usage'
class GpsPositionParticleKey(GliderParticleKey):
M_GPS_LAT = 'm_gps_lat'
M_GPS_LON = 'm_gps_lon'
M_LAT = 'm_lat'
M_LON = 'm_lon'
INTERP_LAT = 'interp_lat'
INTERP_LON = 'interp_lon'
class GpsPositionDataParticle(GliderParticle):
_data_particle_type = DataParticleType.GLIDER_GPS_POSITON
science_parameters = GpsPositionParticleKey.science_parameter_list()
keys_exclude_all_times = GpsPositionParticleKey.list()
# Exclude all the "common" parameters, all we want is GPS data
keys_exclude_all_times.remove(GliderParticleKey.M_PRESENT_SECS_INTO_MISSION)
keys_exclude_all_times.remove(GliderParticleKey.M_PRESENT_TIME)
keys_exclude_all_times.remove(GliderParticleKey.SCI_M_PRESENT_TIME)
keys_exclude_all_times.remove(GliderParticleKey.SCI_M_PRESENT_SECS_INTO_MISSION)
def _build_parsed_values(self):
"""
Takes a GliderParser object and extracts engineering data from the
data dictionary and puts the data into a engineering Data Particle.
@returns A list of dictionaries of particle data
@throws SampleException if the data is not a glider data dictionary
"""
# need to exclude sci times
return self._parsed_values(GpsPositionDataParticle.keys_exclude_all_times)
class EngineeringTelemeteredDataParticle(GliderParticle):
_data_particle_type = DataParticleType.GLIDER_ENG_TELEMETERED
science_parameters = EngineeringTelemeteredParticleKey.science_parameter_list()
keys_exclude_sci_times = EngineeringTelemeteredParticleKey.list()
keys_exclude_sci_times.remove(GliderParticleKey.SCI_M_PRESENT_TIME)
keys_exclude_sci_times.remove(GliderParticleKey.SCI_M_PRESENT_SECS_INTO_MISSION)
def _build_parsed_values(self):
"""
Takes a GliderParser object and extracts engineering data from the
data dictionary and puts the data into a engineering Data Particle.
@returns A list of dictionaries of particle data
@throws SampleException if the data is not a glider data dictionary
"""
# need to exclude sci times
return self._parsed_values(EngineeringTelemeteredDataParticle.keys_exclude_sci_times)
class EngineeringMetadataCommonDataParticle(DataParticle):
def _build_parsed_values(self):
"""
Takes a GliderParser object and extracts engineering metadata from the
header and puts the data into a Data Particle.
@returns A list of dictionaries of particle data
@throws SampleException if the data is not a glider data dictionary
"""
results = []
for key in EngineeringMetadataParticleKey.list():
results.append(self._encode_value(key, self.raw_data[key], str))
return results
class EngineeringMetadataDataParticle(EngineeringMetadataCommonDataParticle):
_data_particle_type = DataParticleType.GLIDER_ENG_METADATA
class EngineeringMetadataRecoveredDataParticle(EngineeringMetadataCommonDataParticle):
_data_particle_type = DataParticleType.GLIDER_ENG_METADATA_RECOVERED
class EngineeringScienceTelemeteredDataParticle(GliderParticle):
_data_particle_type = DataParticleType.GLIDER_ENG_SCI_TELEMETERED
science_parameters = EngineeringScienceTelemeteredParticleKey.science_parameter_list()
keys_exclude_times = EngineeringScienceTelemeteredParticleKey.list()
keys_exclude_times.remove(GliderParticleKey.M_PRESENT_TIME)
keys_exclude_times.remove(GliderParticleKey.M_PRESENT_SECS_INTO_MISSION)
def _build_parsed_values(self):
"""
Takes a GliderParser object and extracts engineering data from the
data dictionary and puts the data into a engineering Data Particle.
@returns A list of dictionaries of particle data
@throws SampleException if the data is not a glider data dictionary
"""
# need to exclude m times
return self._parsed_values(EngineeringScienceTelemeteredDataParticle.keys_exclude_times)
class EngineeringRecoveredDataParticle(GliderParticle):
_data_particle_type = DataParticleType.GLIDER_ENG_RECOVERED
science_parameters = EngineeringRecoveredParticleKey.science_parameter_list()
keys_exclude_sci_times = EngineeringRecoveredParticleKey.list()
keys_exclude_sci_times.remove(GliderParticleKey.SCI_M_PRESENT_TIME)
keys_exclude_sci_times.remove(GliderParticleKey.SCI_M_PRESENT_SECS_INTO_MISSION)
def _build_parsed_values(self):
"""
Takes a GliderParser object and extracts engineering data from the
data dictionary and puts the data into a engineering Data Particle.
@returns A list of dictionaries of particle data
@throws SampleException if the data is not a glider data dictionary
"""
# need to exclude sci times
return self._parsed_values(EngineeringRecoveredDataParticle.keys_exclude_sci_times)
class EngineeringScienceRecoveredDataParticle(GliderParticle):
_data_particle_type = DataParticleType.GLIDER_ENG_SCI_RECOVERED
science_parameters = EngineeringScienceRecoveredParticleKey.science_parameter_list()
keys_exclude_times = EngineeringScienceRecoveredParticleKey.list()
keys_exclude_times.remove(GliderParticleKey.M_PRESENT_TIME)
keys_exclude_times.remove(GliderParticleKey.M_PRESENT_SECS_INTO_MISSION)
def _build_parsed_values(self):
"""
Takes a GliderParser object and extracts engineering data from the
data dictionary and puts the data into a engineering Data Particle.
@returns A list of dictionaries of particle data
@throws SampleException if the data is not a glider data dictionary
"""
# need to exclude m times
return self._parsed_values(EngineeringScienceRecoveredDataParticle.keys_exclude_times)
class NutnrMParticleKey(GliderParticleKey):
SCI_SUNA_TIMESTAMP = 'sci_suna_timestamp'
SCI_SUNA_RECORD_OFFSET = 'sci_suna_record_offset'
SCI_SUNA_NITRATE_UM = 'sci_suna_nitrate_um'
SCI_SUNA_NITRATE_MG = 'sci_suna_nitrate_mg'
class NutnrMDataParticle(GliderParticle):
_data_particle_type = DataParticleType.NUTNR_M_GLIDER_INSTRUMENT
science_parameters = NutnrMParticleKey.science_parameter_list()
def _build_parsed_values(self):
"""
Takes a GliderParser object and extracts FLORT data from the
data dictionary and puts the data into a FLORT Data Particle.
@returns A list of dictionaries of particle data
@throws SampleException if the data is not a glider data dictionary
"""
return self._parsed_values(NutnrMParticleKey.list())
# noinspection PyPackageRequirements
class GliderParser(SimpleParser):
"""
GliderParser parses a Slocum Electric Glider data file that has been
converted to ASCII from binary and merged with it's corresponding flight or
science data file, and holds the self describing header data in a header
dictionary and the data in a data dictionary using the column labels as the
dictionary keys. These dictionaries are used to build the particles.
"""
def __init__(self,
config,
stream_handle,
exception_callback):
self._record_buffer = [] # holds tuples of (record, state)
self._header_dict = {}
# only initialize particle class to None if it does not already exist
if not hasattr(self, '_particle_class'):
self._particle_class = None
self.num_columns = None
super(GliderParser, self).__init__(config,
stream_handle,
exception_callback)
if self._particle_class is None:
msg = 'particle_class was not defined in configuration %s' % config
log.warn(msg)
raise ConfigurationException(msg)
# Read and store the configuration found in the 14 line header
self._read_file_definition()
# Read and store the information found in the 3 lines of column labels
self._read_column_labels()
def _read_file_definition(self):
"""
Read the first 14 lines of the data file for the file definitions, values
are colon delimited key value pairs. The pairs are parsed and stored in
header_dict member.
"""
row_count = 0
#
# THIS METHOD ASSUMES A 14 ROW HEADER
# If the number of header row lines in the glider ASCII input file changes from 14,
# this method will NOT WORK
num_hdr_lines = 14
header_pattern = r'(.*): (.*)$'
header_re = re.compile(header_pattern)
line = self._stream_handle.readline()
while line and row_count < num_hdr_lines:
match = header_re.match(line)
if match:
key = match.group(1)
value = match.group(2)
value = value.strip()
# update num_hdr_lines based on the header info.
if key == 'num_ascii_tags':
# this key has a required value of 14, otherwise we don't know how to parse the file
if int(value) != num_hdr_lines:
raise DatasetParserException("Header must be %d rows, but it is %s" % (num_hdr_lines, value))
elif key == 'num_label_lines':
# this key has a required value of 3, otherwise we don't know how to parse the file
if int(value) != 3:
raise DatasetParserException("There must be 3 Label lines from the header for this parser")
elif key == 'sensors_per_cycle':
# save for future use
self._header_dict[key] = int(value)
elif key in ['filename_label', 'mission_name', 'fileopen_time']:
# create a dictionary of these 3 key/value pairs strings from
# the header rows that need to be saved for future use
self._header_dict[key] = value
else:
log.warn("Failed to parse header row: %s.", line)
row_count += 1
# only read the header lines in this method so make sure we stop
if row_count < num_hdr_lines:
line = self._stream_handle.readline()
if row_count < num_hdr_lines:
log.error('Not enough data lines for a full header')
raise DatasetParserException('Not enough data lines for a full header')
def _read_column_labels(self):
"""
Read the next three lines to populate column data.
1st Row (row 15 of file) == labels
2nd Row (row 16 of file) == units
3rd Row (row 17 of file) == column byte size
Currently we are only able to support 3 label line rows.
"""
# read the label line (should be at row 15 of the file at this point)
label_list = self._stream_handle.readline().strip().split()
self.num_columns = len(label_list)
self._header_dict['labels'] = label_list
# the m_present_time label is required to generate particles, raise an exception if it is not found
if GliderParticleKey.M_PRESENT_TIME not in label_list:
raise DatasetParserException('The m_present_time label has not been found, which means the timestamp '
'cannot be determined for any particles')
# read the units line (should be at row 16 of the file at this point)
data_unit_list = self._stream_handle.readline().strip().split()
data_unit_list_length = len(data_unit_list)
# read the number of bytes line (should be at row 17 of the file at this point)
num_of_bytes_list = self._stream_handle.readline().strip().split()
num_of_bytes_list_length = len(num_of_bytes_list)
# number of labels for name, unit, and number of bytes must match
if data_unit_list_length != self.num_columns or self.num_columns != num_of_bytes_list_length:
raise DatasetParserException("The number of columns in the labels row: %d, units row: %d, "
"and number of bytes row: %d are not equal."
% (self.num_columns, data_unit_list_length, num_of_bytes_list_length))
# if the number of columns from the header does not match that in the data, but the rest of the file has
# the same number of columns in each line this is not a fatal error, just parse the columns that are present
if self._header_dict['sensors_per_cycle'] != self.num_columns:
msg = 'sensors_per_cycle from header %d does not match the number of data label columns %d' % \
(self._header_dict['sensors_per_cycle'], self.num_columns)
self._exception_callback(SampleException(msg))
log.debug("Label count: %d", self.num_columns)
# noinspection PyPackageRequirements
def _read_data(self, data_record):
"""
Read in the column labels, data type, number of bytes of each
data type, and the data from an ASCII glider data file.
"""
data_dict = {}
data_labels = self._header_dict['labels']
data = data_record.strip().split()
if self.num_columns != len(data):
err_msg = "GliderParser._read_data(): Num Of Columns NOT EQUAL to Num of Data items: " + \
"Expected Columns= %s vs Actual Data= %s" % (self.num_columns, len(data))
log.error(err_msg)
raise DatasetParserException(err_msg)
# extract record to dictionary
for ii, value in enumerate(data):
label = data_labels[ii]
data_dict[label] = value
return data_dict
def parse_file(self):
"""
Create particles from the data in the file
"""
# the header was already read in the init, start at the first sample line
for line in self._stream_handle:
# create the dictionary of key/value pairs composed of the labels and the values from the
# record being parsed
# ex: data_dict = {'sci_bsipar_temp':10.67, n1, n2, nn}
data_dict = self._read_data(line)
if GliderParser._has_science_data(data_dict, self._particle_class):
# create the timestamp
timestamp = ntplib.system_to_ntp_time(float(data_dict[GliderParticleKey.M_PRESENT_TIME]))
# create the particle
self._record_buffer.append(self._extract_sample(
self._particle_class, None, data_dict, internal_timestamp=timestamp))
@staticmethod
def _has_science_data(data_dict, particle_class):
"""
Examine the data_dict to see if it contains particle parameters
"""
return_value = False
# Modified to make this check more efficient
if len(particle_class.science_parameters) < len(data_dict):
for key in particle_class.science_parameters:
value = data_dict.get(key, None)
if value is not None and not(isnan(float(value))):
return_value = True
break
if particle_class._data_particle_type == 'glider_eng_telemetered':
log.info("GliderParser._has_science_data failed: key=[%s] value=[%s]", key, value)
else:
for key, value in data_dict.iteritems():
if not (isnan(float(value))) and key in particle_class.science_parameters:
return_value = True
break
if particle_class._data_particle_type == 'glider_eng_telemetered':
log.info("GliderParser._has_science_data failed: key=[%s] value=[%s]", key, value)
return return_value
class EngineeringClassKey(BaseEnum):
METADATA = 'engineering_metadata'
DATA = 'engineering_data'
SCIENCE = 'engineering_science'
GPS = 'gps_position'
class GliderEngineeringParser(GliderParser):
def __init__(self,
config,
stream_handle,
exception_callback):
# set the class types from the config
particle_class_dict = config.get(DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT)
if particle_class_dict is not None:
try:
# get the particle module
module = __import__('mi.dataset.parser.glider',
fromlist=[particle_class_dict[EngineeringClassKey.METADATA],
particle_class_dict[EngineeringClassKey.DATA],
particle_class_dict[EngineeringClassKey.SCIENCE]])
# get the class from the string name of the class
self._metadata_class = getattr(module, particle_class_dict[EngineeringClassKey.METADATA])
self._particle_class = getattr(module, particle_class_dict[EngineeringClassKey.DATA])
self._science_class = getattr(module, particle_class_dict[EngineeringClassKey.SCIENCE])
self._gps_class = getattr(module, particle_class_dict[EngineeringClassKey.GPS])
except AttributeError:
raise ConfigurationException('Config provided a class which does not exist %s' % config)
else:
raise ConfigurationException('Missing particle_classes_dict in config')
self._metadata_sent = False
super(GliderEngineeringParser, self).__init__(config,
stream_handle,
exception_callback)
def parse_file(self):
"""
Create particles out of the data in the file
"""
# Create the gps position interpolator
gps_interpolator = GpsInterpolator()
# the header was already read in the init, start at the samples
for data_record in self._stream_handle:
# create the dictionary of key/value pairs composed of the labels and the values from the
# record being parsed
data_dict = self._read_data(data_record)
timestamp = ntplib.system_to_ntp_time(float(data_dict[GliderParticleKey.M_PRESENT_TIME]))
# handle this particle if it is an engineering metadata particle
# this is the glider_eng_metadata* particle
if not self._metadata_sent:
self._record_buffer.append(self.handle_metadata_particle(timestamp))
# check for the presence of engineering data in the raw data row before continuing
# This is the glider_eng* particle
if GliderParser._has_science_data(data_dict, self._particle_class):
self._record_buffer.append(self._extract_sample(
self._particle_class, None, data_dict, internal_timestamp=timestamp))
# check for the presence of GPS data in the raw data row before continuing
# This is the glider_gps_position particle
if GliderParser._has_science_data(data_dict, self._gps_class):
gps_interpolator.append_to_buffer(
self._extract_sample(self._gps_class, None, data_dict, internal_timestamp=timestamp))
else:
log.info("GPS data no-find: ")
# check for the presence of science particle data in the raw data row before continuing
# This is the glider_eng_sci* particle
if GliderParser._has_science_data(data_dict, self._science_class):
self._record_buffer.append(self._extract_sample(
self._science_class, None, data_dict, internal_timestamp=timestamp))
# If there are GPS entries, interpolate them if they contain gps lat/lon values
if gps_interpolator.get_size() > 0:
self._record_buffer.extend(gps_interpolator.process_and_get_objects())
def handle_metadata_particle(self, timestamp):
"""
Check if this particle is an engineering metadata particle that hasn't already been produced, ensure the
metadata particle is produced only once
:param timestamp - timestamp to put on particle
"""
# change the names in the dictionary from the name in the data file to the parameter name
header_data_dict = {'glider_eng_filename': self._header_dict.get('filename_label'),
'glider_mission_name': self._header_dict.get('mission_name'),
'glider_eng_fileopen_time': self._header_dict.get('fileopen_time')}
self._metadata_sent = True
return self._extract_sample(self._metadata_class, None, header_data_dict, internal_timestamp=timestamp)
class GpsInterpolator(object):
def __init__(self):
# the buffer containing the glider_gps_position entries
self._glider_gps_position_buffer = []
# the starting and ending entry positions containing gps lat/lon values
self._start_gps = self._end_gps = None
# list of gps lat,lon and time used for interpolation (those that are available)
self.gps_lat = []
self.gps_lon = []
self.gps_time = []
# List of times needing interpolated lat,lon
self.particle_ts = []
def _get_time(self, glider_contents):
return glider_contents.get(glider_contents.get('preferred_timestamp'))
def _get_value(self, glider_values, key):
return_value = None
for glider_value_entry in glider_values:
glider_value_id = glider_value_entry['value_id']
if glider_value_id == key:
return_value = glider_value_entry['value']
break
return return_value
def _has_gps_positioning(self, glider_gps_position):
return self._get_value(glider_gps_position._values, 'm_gps_lat') and \
self._get_value(glider_gps_position._values, 'm_gps_lon')
# two entries are required for the start and end gps positions to be populated
def _lacks_interpolate_entry_positions(self):
return self._start_gps is None or self._end_gps is None
def _put_value(self, glider_values, value, key):
for glider_value_entry in glider_values:
glider_value_id = glider_value_entry['value_id']
if glider_value_id == key:
glider_value_entry['value'] = value
break
def _set_start_and_end_gps_position_entries(self):
# Iterate the buffer to pinpoint entries with the first/last GPS points
for idx, glider_gps_position in enumerate(self._glider_gps_position_buffer):
if self._has_gps_positioning(glider_gps_position):
# only populate the end position if the start has previously populated
# as the interpolation only makes sense and works if they are different
if self._start_gps is None:
self._start_gps = idx
else:
self._end_gps = idx
def append_to_buffer(self, glider_gps_position):
self._glider_gps_position_buffer.append(glider_gps_position)
def get_size(self):
return len(self._glider_gps_position_buffer)
# Interpolate the buffered objects, then return them
def process_and_get_objects(self):
self._set_start_and_end_gps_position_entries()
# if the set of entries lacks gps lat/lon data, return it as-is
if self._lacks_interpolate_entry_positions():
return self._glider_gps_position_buffer
# Iterate the buffer to extract the values for interpolation
for idx, glider_gps_position in enumerate(self._glider_gps_position_buffer):
# Capture all glider times for potential extrapolation
glider_time = self._get_time(glider_gps_position.contents)
self.particle_ts.append(glider_time)
# Only look within range of start,end gps positions for matching time and point
if self._start_gps <= idx <= self._end_gps:
# If the lat,lon can be used for interpolation, capture them
if self._has_gps_positioning(glider_gps_position):
self.gps_lat.append(self._get_value(glider_gps_position._values, GpsPositionParticleKey.M_GPS_LAT))
self.gps_lon.append(self._get_value(glider_gps_position._values, GpsPositionParticleKey.M_GPS_LON))
self.gps_time.append(glider_time)
# Set up the interpolation function for gps_lat to extrapolate beyond start,end gps positions
interp_function = interpolate.interp1d(self.gps_time, self.gps_lat, kind='linear', axis=0, copy=False,
fill_value='extrapolate')
# Use the particle time array to get the interpolated gps lat array
interpolated_gps_lat = interp_function(self.particle_ts)
# Set up the interpolation function for gps_lon to extrapolate beyond start,end gps positions
interp_function = interpolate.interp1d(self.gps_time, self.gps_lon, kind='linear', axis=0, copy=False,
fill_value='extrapolate')
# Use the particle time array to get the interpolated gps lon array
interpolated_gps_lon = interp_function(self.particle_ts)
# Populate the interpolated gps_lat,gps_lon onto the buffer objects
# for i in range(self._start_gps, self._end_gps + 1):
# self._put_value(self._glider_gps_position_buffer[i]._values, interpolated_gps_lat[i - self._start_gps],
# GpsPositionParticleKey.INTERP_LAT)
# self._put_value(self._glider_gps_position_buffer[i]._values, interpolated_gps_lon[i - self._start_gps],
# GpsPositionParticleKey.INTERP_LON)
for i in range(len(self._glider_gps_position_buffer)):
self._put_value(self._glider_gps_position_buffer[i]._values, interpolated_gps_lat[i],
GpsPositionParticleKey.INTERP_LAT)
self._put_value(self._glider_gps_position_buffer[i]._values, interpolated_gps_lon[i],
GpsPositionParticleKey.INTERP_LON)
# Return the buffer objects
return self._glider_gps_position_buffer
| |
"""
click._termui_impl
~~~~~~~~~~~~~~~~~~
This module contains implementations for the termui module. To keep the
import time of Click down, some infrequently used functionality is placed
in this module and only imported as needed.
:copyright: (c) 2014 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
import time
import math
from ._compat import _default_text_stdout, range_type, PY2, isatty, \
open_stream, strip_ansi, term_len, get_best_encoding, WIN, int_types
from .utils import echo
from .exceptions import ClickException
if os.name == 'nt':
BEFORE_BAR = '\r'
AFTER_BAR = '\n'
else:
BEFORE_BAR = '\r\033[?25l'
AFTER_BAR = '\033[?25h\n'
def _length_hint(obj):
"""Returns the length hint of an object."""
try:
return len(obj)
except TypeError:
try:
get_hint = type(obj).__length_hint__
except AttributeError:
return None
try:
hint = get_hint(obj)
except TypeError:
return None
if hint is NotImplemented or \
not isinstance(hint, int_types) or \
hint < 0:
return None
return hint
class ProgressBar(object):
def __init__(self, iterable, length=None, fill_char='#', empty_char=' ',
bar_template='%(bar)s', info_sep=' ', show_eta=True,
show_percent=None, show_pos=False, item_show_func=None,
label=None, file=None, color=None, width=30):
self.fill_char = fill_char
self.empty_char = empty_char
self.bar_template = bar_template
self.info_sep = info_sep
self.show_eta = show_eta
self.show_percent = show_percent
self.show_pos = show_pos
self.item_show_func = item_show_func
self.label = label or ''
if file is None:
file = _default_text_stdout()
self.file = file
self.color = color
self.width = width
self.autowidth = width == 0
if length is None:
length = _length_hint(iterable)
if iterable is None:
if length is None:
raise TypeError('iterable or length is required')
iterable = range_type(length)
self.iter = iter(iterable)
self.length = length
self.length_known = length is not None
self.pos = 0
self.avg = []
self.start = self.last_eta = time.time()
self.eta_known = False
self.finished = False
self.max_width = None
self.entered = False
self.current_item = None
self.is_hidden = not isatty(self.file)
self._last_line = None
def __enter__(self):
self.entered = True
self.render_progress()
return self
def __exit__(self, exc_type, exc_value, tb):
self.render_finish()
def __iter__(self):
if not self.entered:
raise RuntimeError('You need to use progress bars in a with block.')
self.render_progress()
return self
def render_finish(self):
if self.is_hidden:
return
self.file.write(AFTER_BAR)
self.file.flush()
@property
def pct(self):
if self.finished:
return 1.0
return min(self.pos / (float(self.length) or 1), 1.0)
@property
def time_per_iteration(self):
if not self.avg:
return 0.0
return sum(self.avg) / float(len(self.avg))
@property
def eta(self):
if self.length_known and not self.finished:
return self.time_per_iteration * (self.length - self.pos)
return 0.0
def format_eta(self):
if self.eta_known:
t = self.eta + 1
seconds = t % 60
t /= 60
minutes = t % 60
t /= 60
hours = t % 24
t /= 24
if t > 0:
days = t
return '%dd %02d:%02d:%02d' % (days, hours, minutes, seconds)
else:
return '%02d:%02d:%02d' % (hours, minutes, seconds)
return ''
def format_pos(self):
pos = str(self.pos)
if self.length_known:
pos += '/%s' % self.length
return pos
def format_pct(self):
return ('% 4d%%' % int(self.pct * 100))[1:]
def format_progress_line(self):
show_percent = self.show_percent
info_bits = []
if self.length_known:
bar_length = int(self.pct * self.width)
bar = self.fill_char * bar_length
bar += self.empty_char * (self.width - bar_length)
if show_percent is None:
show_percent = not self.show_pos
else:
if self.finished:
bar = self.fill_char * self.width
else:
bar = list(self.empty_char * (self.width or 1))
if self.time_per_iteration != 0:
bar[int((math.cos(self.pos * self.time_per_iteration)
/ 2.0 + 0.5) * self.width)] = self.fill_char
bar = ''.join(bar)
if self.show_pos:
info_bits.append(self.format_pos())
if show_percent:
info_bits.append(self.format_pct())
if self.show_eta and self.eta_known and not self.finished:
info_bits.append(self.format_eta())
if self.item_show_func is not None:
item_info = self.item_show_func(self.current_item)
if item_info is not None:
info_bits.append(item_info)
return (self.bar_template % {
'label': self.label,
'bar': bar,
'info': self.info_sep.join(info_bits)
}).rstrip()
def render_progress(self):
from .termui import get_terminal_size
nl = False
if self.is_hidden:
buf = [self.label]
nl = True
else:
buf = []
# Update width in case the terminal has been resized
if self.autowidth:
old_width = self.width
self.width = 0
clutter_length = term_len(self.format_progress_line())
new_width = max(0, get_terminal_size()[0] - clutter_length)
if new_width < old_width:
buf.append(BEFORE_BAR)
buf.append(' ' * self.max_width)
self.max_width = new_width
self.width = new_width
clear_width = self.width
if self.max_width is not None:
clear_width = self.max_width
buf.append(BEFORE_BAR)
line = self.format_progress_line()
line_len = term_len(line)
if self.max_width is None or self.max_width < line_len:
self.max_width = line_len
buf.append(line)
buf.append(' ' * (clear_width - line_len))
line = ''.join(buf)
# Render the line only if it changed.
if line != self._last_line:
self._last_line = line
echo(line, file=self.file, color=self.color, nl=nl)
self.file.flush()
def make_step(self, n_steps):
self.pos += n_steps
if self.length_known and self.pos >= self.length:
self.finished = True
if (time.time() - self.last_eta) < 1.0:
return
self.last_eta = time.time()
self.avg = self.avg[-6:] + [-(self.start - time.time()) / (self.pos)]
self.eta_known = self.length_known
def update(self, n_steps):
self.make_step(n_steps)
self.render_progress()
def finish(self):
self.eta_known = 0
self.current_item = None
self.finished = True
def next(self):
if self.is_hidden:
return next(self.iter)
try:
rv = next(self.iter)
self.current_item = rv
except StopIteration:
self.finish()
self.render_progress()
raise StopIteration()
else:
self.update(1)
return rv
if not PY2:
__next__ = next
del next
def pager(text, color=None):
"""Decide what method to use for paging through text."""
stdout = _default_text_stdout()
if not isatty(sys.stdin) or not isatty(stdout):
return _nullpager(stdout, text, color)
pager_cmd = (os.environ.get('PAGER', None) or '').strip()
if pager_cmd:
if WIN:
return _tempfilepager(text, pager_cmd, color)
return _pipepager(text, pager_cmd, color)
if os.environ.get('TERM') in ('dumb', 'emacs'):
return _nullpager(stdout, text, color)
if WIN or sys.platform.startswith('os2'):
return _tempfilepager(text, 'more <', color)
if hasattr(os, 'system') and os.system('(less) 2>/dev/null') == 0:
return _pipepager(text, 'less', color)
import tempfile
fd, filename = tempfile.mkstemp()
os.close(fd)
try:
if hasattr(os, 'system') and os.system('more "%s"' % filename) == 0:
return _pipepager(text, 'more', color)
return _nullpager(stdout, text, color)
finally:
os.unlink(filename)
def _pipepager(text, cmd, color):
"""Page through text by feeding it to another program. Invoking a
pager through this might support colors.
"""
import subprocess
env = dict(os.environ)
# If we're piping to less we might support colors under the
# condition that
cmd_detail = cmd.rsplit('/', 1)[-1].split()
if color is None and cmd_detail[0] == 'less':
less_flags = os.environ.get('LESS', '') + ' '.join(cmd_detail[1:])
if not less_flags:
env['LESS'] = '-R'
color = True
elif 'r' in less_flags or 'R' in less_flags:
color = True
if not color:
text = strip_ansi(text)
c = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,
env=env)
encoding = get_best_encoding(c.stdin)
try:
c.stdin.write(text.encode(encoding, 'replace'))
c.stdin.close()
except (IOError, KeyboardInterrupt):
pass
# Less doesn't respect ^C, but catches it for its own UI purposes (aborting
# search or other commands inside less).
#
# That means when the user hits ^C, the parent process (click) terminates,
# but less is still alive, paging the output and messing up the terminal.
#
# If the user wants to make the pager exit on ^C, they should set
# `LESS='-K'`. It's not our decision to make.
while True:
try:
c.wait()
except KeyboardInterrupt:
pass
else:
break
def _tempfilepager(text, cmd, color):
"""Page through text by invoking a program on a temporary file."""
import tempfile
filename = tempfile.mktemp()
if not color:
text = strip_ansi(text)
encoding = get_best_encoding(sys.stdout)
with open_stream(filename, 'wb')[0] as f:
f.write(text.encode(encoding))
try:
os.system(cmd + ' "' + filename + '"')
finally:
os.unlink(filename)
def _nullpager(stream, text, color):
"""Simply print unformatted text. This is the ultimate fallback."""
if not color:
text = strip_ansi(text)
stream.write(text)
class Editor(object):
def __init__(self, editor=None, env=None, require_save=True,
extension='.txt'):
self.editor = editor
self.env = env
self.require_save = require_save
self.extension = extension
def get_editor(self):
if self.editor is not None:
return self.editor
for key in 'VISUAL', 'EDITOR':
rv = os.environ.get(key)
if rv:
return rv
if WIN:
return 'notepad'
for editor in 'vim', 'nano':
if os.system('which %s >/dev/null 2>&1' % editor) == 0:
return editor
return 'vi'
def edit_file(self, filename):
import subprocess
editor = self.get_editor()
if self.env:
environ = os.environ.copy()
environ.update(self.env)
else:
environ = None
try:
c = subprocess.Popen('%s "%s"' % (editor, filename),
env=environ, shell=True)
exit_code = c.wait()
if exit_code != 0:
raise ClickException('%s: Editing failed!' % editor)
except OSError as e:
raise ClickException('%s: Editing failed: %s' % (editor, e))
def edit(self, text):
import tempfile
text = text or ''
if text and not text.endswith('\n'):
text += '\n'
fd, name = tempfile.mkstemp(prefix='editor-', suffix=self.extension)
try:
if WIN:
encoding = 'utf-8-sig'
text = text.replace('\n', '\r\n')
else:
encoding = 'utf-8'
text = text.encode(encoding)
f = os.fdopen(fd, 'wb')
f.write(text)
f.close()
timestamp = os.path.getmtime(name)
self.edit_file(name)
if self.require_save \
and os.path.getmtime(name) == timestamp:
return None
f = open(name, 'rb')
try:
rv = f.read()
finally:
f.close()
return rv.decode('utf-8-sig').replace('\r\n', '\n')
finally:
os.unlink(name)
def open_url(url, wait=False, locate=False):
import subprocess
def _unquote_file(url):
try:
import urllib
except ImportError:
import urllib
if url.startswith('file://'):
url = urllib.unquote(url[7:])
return url
if sys.platform == 'darwin':
args = ['open']
if wait:
args.append('-W')
if locate:
args.append('-R')
args.append(_unquote_file(url))
null = open('/dev/null', 'w')
try:
return subprocess.Popen(args, stderr=null).wait()
finally:
null.close()
elif WIN:
if locate:
url = _unquote_file(url)
args = 'explorer /select,"%s"' % _unquote_file(
url.replace('"', ''))
else:
args = 'start %s "" "%s"' % (
wait and '/WAIT' or '', url.replace('"', ''))
return os.system(args)
try:
if locate:
url = os.path.dirname(_unquote_file(url)) or '.'
else:
url = _unquote_file(url)
c = subprocess.Popen(['xdg-open', url])
if wait:
return c.wait()
return 0
except OSError:
if url.startswith(('http://', 'https://')) and not locate and not wait:
import webbrowser
webbrowser.open(url)
return 0
return 1
def _translate_ch_to_exc(ch):
if ch == '\x03':
raise KeyboardInterrupt()
if ch == '\x04':
raise EOFError()
if WIN:
import msvcrt
def getchar(echo):
rv = msvcrt.getch()
if echo:
msvcrt.putchar(rv)
_translate_ch_to_exc(rv)
if PY2:
enc = getattr(sys.stdin, 'encoding', None)
if enc is not None:
rv = rv.decode(enc, 'replace')
else:
rv = rv.decode('cp1252', 'replace')
return rv
else:
import tty
import termios
def getchar(echo):
if not isatty(sys.stdin):
f = open('/dev/tty')
fd = f.fileno()
else:
fd = sys.stdin.fileno()
f = None
try:
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(fd)
ch = os.read(fd, 32)
if echo and isatty(sys.stdout):
sys.stdout.write(ch)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
sys.stdout.flush()
if f is not None:
f.close()
except termios.error:
pass
_translate_ch_to_exc(ch)
return ch.decode(get_best_encoding(sys.stdin), 'replace')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.