gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
###############################################################################
# Caleydo - Visualization for Molecular Biology - http://caleydo.org
# Copyright (c) The Caleydo Team. All rights reserved.
# Licensed under the new BSD license, available at http://caleydo.org/license
###############################################################################
from gevent import monkey
monkey.patch_all()
import logging.config # noqa: E402
import logging # noqa: E402
from . import config # noqa: E402
# set configured registry
def _get_config():
# check initialization
if config._c is None:
config._initialize()
return config.view('phovea_server')
# added for testing
def _get_config_hdf():
# check initialization
if config._c is None:
config._initialize()
return config.view('phovea_data_hdf')
cc = _get_config()
cc_hdf = _get_config_hdf()
# configure logging
logging.config.dictConfig(cc.logging)
_log = logging.getLogger(__name__)
def enable_dev_mode():
_log.info('enabling development mode')
cc.set('env', 'development')
cc.set('debug', True)
cc.set('error_stack_trace', True)
cc.set('nocache', False)
def enable_prod_mode():
_log.info('enabling production mode')
cc.set('env', 'production')
cc.set('debug', False)
cc.set('error_stack_trace', False)
cc.set('nocache', False)
def _config_files():
"""
list all known config files
:return:
"""
from .plugin import plugins
return [p for p in (p.config_file() for p in plugins()) if p is not None]
def _resolve_launcher(launcher):
"""
resolves the launcher if it is a string it will load the module and determine the function to use
:param launcher:
:return: launcher function
"""
import six
import os
import importlib
if isinstance(launcher, six.string_types):
module_name, function_name = os.path.splitext(launcher)
m = importlib.import_module(module_name)
return m[function_name]
return launcher
def set_default_subparser(parser, name, args=None):
"""default subparser selection. Call after setup, just before parse_args()
name: is the name of the subparser to call by default
args: if set is the argument list handed to parse_args()
see https://stackoverflow.com/a/26378414
, tested with 2.7, 3.2, 3.3, 3.4
it works with 2.6 assuming argparse is installed
"""
import sys
import argparse
subparser_found = False
for arg in sys.argv[1:]:
if arg in ['-h', '--help']: # global help if no subparser
break
else:
for x in parser._subparsers._actions:
if not isinstance(x, argparse._SubParsersAction):
continue
for sp_name in list(x._name_parser_map.keys()):
if sp_name in sys.argv[1:]:
subparser_found = True
if not subparser_found:
# insert default in first position, this implies no
# global options without a sub_parsers specified
if args is None:
sys.argv.insert(1, name)
else:
args.insert(0, name)
def _resolve_commands(parser):
"""
Resolve commands from the phovea registy, loads the phovea extension, adds the instance to the command parser.
"""
from .plugin import list as list_plugins
# create a subparser
subparsers = parser.add_subparsers(dest='cmd')
default_command = None
for command in list_plugins('command'):
_log.info('add command ' + command.id)
if hasattr(command, 'isDefault') and command.isDefault:
default_command = command.id
# create a argument parser for the command
cmdparser = subparsers.add_parser(command.id)
_log.info('loading and initializing the command: ' + command.id)
# use the phovea extension point loading mechanism.
# pass the parser as argument to the factory method so that the extension point (i.e., command)
# can add further arguments to the parser (e.g., the address or port of the server).
# the factory must return a launcher function, which gets the previously defined parser arguments as parameter.
instance = command.load().factory(cmdparser)
# register the instance as argument `launcher` and the command as `launcherid` to the command parser
_log.info('add command instance to parser')
cmdparser.set_defaults(launcher=instance, launcherid=command.id)
return default_command
def _set_runtime_infos(args):
"""
Set run time information, such as the executed command (registered as phovea extension point).
The information is, for instance, used in the plugin.py when initializing the phovea registry.
Additionally the configuration value `absoluteDir` is set.
"""
import os
runtime = cc.view('_runtime')
runtime.set('command', args.launcherid)
runtime.set('reloader', args.use_reloader)
cc.set('absoluteDir', os.path.abspath(cc.get('dir')) + '/')
def run():
"""
Run an application. The execution of the application can be configured using a command and arguments.
Example terminal command:
```
cd <workspace>
python phovea_server --use_reloader --env dev api
```
Supported arguments:
`--use_reloader`: whether to automatically reload the server
`--env`: environment mode (dev or prod)
The last argument (e.g., `api`) is the command that must be registered as extension in the __init__.py and points to an execution file.
Example:
```py
registry.append('command', 'api', 'phovea_server.server', {'isDefault': True})
```
The example registers the api command that runs the `create()` factory method from the server.py.
"""
import argparse
parser = argparse.ArgumentParser(description='Phovea Server')
parser.add_argument('--use_reloader', action='store_true', help='whether to automatically reload the server')
parser.add_argument('--env', default=cc.get('env'), help='environment mode (dev or prod)')
# parse before to enable correct plugin discovery
args = parser.parse_known_args()[0]
if args.env.startswith('dev'):
enable_dev_mode()
else:
enable_prod_mode()
# resolve the default command to decide which application to launch
default_command = _resolve_commands(parser)
if default_command is not None:
# set a default subparse to extract the defined arguments from the instance to the main arguments (?)
set_default_subparser(parser, default_command)
args = parser.parse_args()
_set_runtime_infos(args)
main = args.launcher(args) # execute the launcher function, which returns another function
if args.use_reloader:
_log.info('start application using reloader...')
run_with_reloader(main, extra_files=_config_files())
else:
_log.info('start application...')
main()
def create_embedded():
"""
Imports the phovea_server and creates an application
"""
from .server import create_application
return create_application()
# copied code of method run_with_reloader from werkzeug._reloader, because it causes import problems otherwise
def run_with_reloader(main_func, extra_files=None, interval=1, reloader_type="auto"):
"""Run the given function in an independent python interpreter."""
import signal
import os
import sys
import threading
from werkzeug._reloader import reloader_loops, ensure_echo_on
reloader = reloader_loops[reloader_type](extra_files, interval)
signal.signal(signal.SIGTERM, lambda *args: sys.exit(0))
try:
if os.environ.get("WERKZEUG_RUN_MAIN") == "true":
ensure_echo_on()
t = threading.Thread(target=main_func, args=())
t.setDaemon(True)
t.start()
reloader.run()
else:
sys.exit(reloader.restart_with_reloader())
except KeyboardInterrupt:
pass
| |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import datetime
from libcloud import __version__
from libcloud.common.base import ConnectionUserAndKey, BaseDriver
from libcloud.dns.types import RecordType
__all__ = [
'Zone',
'Record',
'DNSDriver'
]
class Zone(object):
"""
DNS zone.
"""
def __init__(self, id, domain, type, ttl, driver, extra=None):
"""
:param id: Zone id.
:type id: ``str``
:param domain: The name of the domain.
:type domain: ``str``
:param type: Zone type (master, slave).
:type type: ``str``
:param ttl: Default TTL for records in this zone (in seconds).
:type ttl: ``int``
:param driver: DNSDriver instance.
:type driver: :class:`DNSDriver`
:param extra: (optional) Extra attributes (driver specific).
:type extra: ``dict``
"""
self.id = str(id) if id else None
self.domain = domain
self.type = type
self.ttl = ttl or None
self.driver = driver
self.extra = extra or {}
def list_records(self):
return self.driver.list_records(zone=self)
def create_record(self, name, type, data, extra=None):
return self.driver.create_record(name=name, zone=self, type=type,
data=data, extra=extra)
def update(self, domain=None, type=None, ttl=None, extra=None):
return self.driver.update_zone(zone=self, domain=domain, type=type,
ttl=ttl, extra=extra)
def delete(self):
return self.driver.delete_zone(zone=self)
def export_to_bind_format(self):
return self.driver.export_zone_to_bind_format(zone=self)
def export_to_bind_zone_file(self, file_path):
self.driver.export_zone_to_bind_zone_file(zone=self,
file_path=file_path)
def __repr__(self):
return ('<Zone: domain=%s, ttl=%s, provider=%s ...>' %
(self.domain, self.ttl, self.driver.name))
class Record(object):
"""
Zone record / resource.
"""
def __init__(self, id, name, type, data, zone, driver, extra=None):
"""
:param id: Record id
:type id: ``str``
:param name: Hostname or FQDN.
:type name: ``str``
:param type: DNS record type (A, AAAA, ...).
:type type: :class:`RecordType`
:param data: Data for the record (depends on the record type).
:type data: ``str``
:param zone: Zone instance.
:type zone: :class:`Zone`
:param driver: DNSDriver instance.
:type driver: :class:`DNSDriver`
:param extra: (optional) Extra attributes (driver specific).
:type extra: ``dict``
"""
self.id = str(id) if id else None
self.name = name
self.type = type
self.data = data
self.zone = zone
self.driver = driver
self.extra = extra or {}
def update(self, name=None, type=None, data=None, extra=None):
return self.driver.update_record(record=self, name=name, type=type,
data=data, extra=extra)
def delete(self):
return self.driver.delete_record(record=self)
def _get_numeric_id(self):
record_id = self.id
if record_id.isdigit():
record_id = int(record_id)
return record_id
def __repr__(self):
return ('<Record: zone=%s, name=%s, type=%s, data=%s, provider=%s '
'...>' %
(self.zone.id, self.name, self.type, self.data,
self.driver.name))
class DNSDriver(BaseDriver):
"""
A base DNSDriver class to derive from
This class is always subclassed by a specific driver.
"""
connectionCls = ConnectionUserAndKey
name = None
website = None
# Map libcloud record type enum to provider record type name
RECORD_TYPE_MAP = {}
def __init__(self, key, secret=None, secure=True, host=None, port=None,
**kwargs):
"""
:param key: API key or username to used (required)
:type key: ``str``
:param secret: Secret password to be used (required)
:type secret: ``str``
:param secure: Weither to use HTTPS or HTTP. Note: Some providers
only support HTTPS, and it is on by default.
:type secure: ``bool``
:param host: Override hostname used for connections.
:type host: ``str``
:param port: Override port used for connections.
:type port: ``int``
:return: ``None``
"""
super(DNSDriver, self).__init__(key=key, secret=secret, secure=secure,
host=host, port=port, **kwargs)
def list_record_types(self):
"""
Return a list of RecordType objects supported by the provider.
:return: ``list`` of :class:`RecordType`
"""
return list(self.RECORD_TYPE_MAP.keys())
def iterate_zones(self):
"""
Return a generator to iterate over available zones.
:rtype: ``generator`` of :class:`Zone`
"""
raise NotImplementedError(
'iterate_zones not implemented for this driver')
def list_zones(self):
"""
Return a list of zones.
:return: ``list`` of :class:`Zone`
"""
return list(self.iterate_zones())
def iterate_records(self, zone):
"""
Return a generator to iterate over records for the provided zone.
:param zone: Zone to list records for.
:type zone: :class:`Zone`
:rtype: ``generator`` of :class:`Record`
"""
raise NotImplementedError(
'iterate_records not implemented for this driver')
def list_records(self, zone):
"""
Return a list of records for the provided zone.
:param zone: Zone to list records for.
:type zone: :class:`Zone`
:return: ``list`` of :class:`Record`
"""
return list(self.iterate_records(zone))
def get_zone(self, zone_id):
"""
Return a Zone instance.
:param zone_id: ID of the required zone
:type zone_id: ``str``
:rtype: :class:`Zone`
"""
raise NotImplementedError(
'get_zone not implemented for this driver')
def get_record(self, zone_id, record_id):
"""
Return a Record instance.
:param zone_id: ID of the required zone
:type zone_id: ``str``
:param record_id: ID of the required record
:type record_id: ``str``
:rtype: :class:`Record`
"""
raise NotImplementedError(
'get_record not implemented for this driver')
def create_zone(self, domain, type='master', ttl=None, extra=None):
"""
Create a new zone.
:param domain: Zone domain name (e.g. example.com)
:type domain: ``str``
:param type: Zone type (master / slave).
:type type: ``str``
:param ttl: TTL for new records. (optional)
:type ttl: ``int``
:param extra: Extra attributes (driver specific). (optional)
:type extra: ``dict``
:rtype: :class:`Zone`
"""
raise NotImplementedError(
'create_zone not implemented for this driver')
def update_zone(self, zone, domain, type='master', ttl=None, extra=None):
"""
Update en existing zone.
:param zone: Zone to update.
:type zone: :class:`Zone`
:param domain: Zone domain name (e.g. example.com)
:type domain: ``str``
:param type: Zone type (master / slave).
:type type: ``str``
:param ttl: TTL for new records. (optional)
:type ttl: ``int``
:param extra: Extra attributes (driver specific). (optional)
:type extra: ``dict``
:rtype: :class:`Zone`
"""
raise NotImplementedError(
'update_zone not implemented for this driver')
def create_record(self, name, zone, type, data, extra=None):
"""
Create a new record.
:param name: Record name without the domain name (e.g. www).
Note: If you want to create a record for a base domain
name, you should specify empty string ('') for this
argument.
:type name: ``str``
:param zone: Zone where the requested record is created.
:type zone: :class:`Zone`
:param type: DNS record type (A, AAAA, ...).
:type type: :class:`RecordType`
:param data: Data for the record (depends on the record type).
:type data: ``str``
:param extra: Extra attributes (driver specific). (optional)
:type extra: ``dict``
:rtype: :class:`Record`
"""
raise NotImplementedError(
'create_record not implemented for this driver')
def update_record(self, record, name, type, data, extra=None):
"""
Update an existing record.
:param record: Record to update.
:type record: :class:`Record`
:param name: Record name without the domain name (e.g. www).
Note: If you want to create a record for a base domain
name, you should specify empty string ('') for this
argument.
:type name: ``str``
:param type: DNS record type (A, AAAA, ...).
:type type: :class:`RecordType`
:param data: Data for the record (depends on the record type).
:type data: ``str``
:param extra: (optional) Extra attributes (driver specific).
:type extra: ``dict``
:rtype: :class:`Record`
"""
raise NotImplementedError(
'update_record not implemented for this driver')
def delete_zone(self, zone):
"""
Delete a zone.
Note: This will delete all the records belonging to this zone.
:param zone: Zone to delete.
:type zone: :class:`Zone`
:rtype: ``bool``
"""
raise NotImplementedError(
'delete_zone not implemented for this driver')
def delete_record(self, record):
"""
Delete a record.
:param record: Record to delete.
:type record: :class:`Record`
:rtype: ``bool``
"""
raise NotImplementedError(
'delete_record not implemented for this driver')
def export_zone_to_bind_format(self, zone):
"""
Export Zone object to the BIND compatible format.
:param zone: Zone to export.
:type zone: :class:`Zone`
:return: Zone data in BIND compatible format.
:rtype: ``str``
"""
if zone.type != 'master':
raise ValueError('You can only generate BIND out for master zones')
lines = []
# For consistent output, records are sorted based on the id
records = zone.list_records()
records = sorted(records, key=Record._get_numeric_id)
date = datetime.datetime.now().strftime('%Y-%m-%d %H:%m:%S')
values = {'version': __version__, 'date': date}
lines.append('; Generated by Libcloud v%(version)s on %(date)s' %
values)
lines.append('$ORIGIN %(domain)s.' % {'domain': zone.domain})
lines.append('$TTL %(domain_ttl)s\n' % {'domain_ttl': zone.ttl})
for record in records:
line = self._get_bind_record_line(record=record)
lines.append(line)
output = '\n'.join(lines)
return output
def export_zone_to_bind_zone_file(self, zone, file_path):
"""
Export Zone object to the BIND compatible format and write result to a
file.
:param zone: Zone to export.
:type zone: :class:`Zone`
:param file_path: File path where the output will be saved.
:type file_path: ``str``
"""
result = self.export_zone_to_bind_format(zone=zone)
with open(file_path, 'w') as fp:
fp.write(result)
def _get_bind_record_line(self, record):
"""
Generate BIND record line for the provided record.
:param record: Record to generate the line for.
:type record: :class:`Record`
:return: Bind compatible record line.
:rtype: ``str``
"""
parts = []
if record.name:
name = '%(name)s.%(domain)s' % {'name': record.name,
'domain': record.zone.domain}
else:
name = record.zone.domain
name += '.'
ttl = record.extra['ttl'] if 'ttl' in record.extra else record.zone.ttl
ttl = str(ttl)
data = record.data
if record.type in [RecordType.CNAME, RecordType.DNAME, RecordType.MX,
RecordType.PTR, RecordType.SRV]:
# Make sure trailing dot is present
if data[len(data) - 1] != '.':
data += '.'
if record.type in [RecordType.TXT, RecordType.SPF] and ' ' in data:
# Escape the quotes
data = data.replace('"', '\\"')
# Quote the string
data = '"%s"' % (data)
if record.type in [RecordType.MX, RecordType.SRV]:
priority = str(record.extra['priority'])
parts = [name, ttl, 'IN', record.type, priority, data]
else:
parts = [name, ttl, 'IN', record.type, data]
line = '\t'.join(parts)
return line
def _string_to_record_type(self, string):
"""
Return a string representation of a DNS record type to a
libcloud RecordType ENUM.
:rtype: ``str``
"""
string = string.upper()
record_type = getattr(RecordType, string)
return record_type
| |
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:12100")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:12100")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Blooddonation address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Blooddonation address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
| |
import luigi
import luigi_bigquery
## Running Queries
class MyQuery(luigi_bigquery.Query):
def query(self):
return "SELECT count(*) cnt FROM [publicdata:samples.github_nested]"
## Getting Results
class MyQueryRun(luigi_bigquery.Query):
def query(self):
return "SELECT count(*) cnt FROM [publicdata:samples.github_nested]"
def run(self):
result = self.run_query(self.query())
print "Job ID :", result.job_id
print "Result size:", result.size
print "Result :"
print "\t".join([c['name'] for i, c in result.description])
print "----"
for row in result:
print "\t".join([str(c) for c in row])
print '===================='
class MyQuerySave(luigi_bigquery.Query):
def query(self):
return "SELECT count(*) cnt FROM [publicdata:samples.github_nested]"
def output(self):
return luigi.LocalTarget('MyQuerySave.csv')
def run(self):
result = self.run_query(self.query())
with self.output().open('w') as f:
result.to_csv(f)
## Building Pipelines
class MyQueryStep1(luigi_bigquery.Query):
def query(self):
return "SELECT count(*) cnt FROM [publicdata:samples.github_nested]"
def output(self):
return luigi_bigquery.ResultTarget('MyQueryStep1.job')
class MyQueryStep2(luigi.Task):
def requires(self):
return MyQueryStep1()
def output(self):
return luigi.LocalTarget('MyQueryStep2.csv')
def run(self):
# retrieve the result and save it as a CSV file
with self.output().open('w') as f:
self.input().result.to_csv(f)
class MyQueryStep3(luigi.Task):
def requires(self):
return MyQueryStep2()
def output(self):
return luigi.LocalTarget('MyQueryStep3.txt')
def run(self):
with self.input().open() as f:
# process the result here
print f.read()
with self.output().open('w') as f:
# crate the final output
f.write('done')
## Templating Queries
class MyQueryFromTemplate(luigi_bigquery.Query):
source = 'templates/query_with_language.sql'
# variables used in the template
language = 'Python'
class MuQueryWithVariables(luigi_bigquery.Query):
source = 'templates/query_with_variables.sql'
# define variables
variables = {
'language': 'Python',
}
# or use property for dynamic variables
# @property
# def variables(self):
# return {
# 'language': 'Python',
# }
## Passing Parameters
class MyQueryWithParameters(luigi_bigquery.Query):
source = 'templates/query_with_time_range.sql'
# parameters
year = luigi.IntParameter()
def output(self):
# create a unique name for this output using parameters
return luigi_bigquery.ResultTarget('MyQueryWithParameters-{0}.job'.format(self.year))
class MyQueryAggregator(luigi.Task):
def requires(self):
# create a list of tasks with different parameters
return [
MyQueryWithParameters(2009),
MyQueryWithParameters(2010),
MyQueryWithParameters(2011),
MyQueryWithParameters(2012)
]
def output(self):
return luigi.LocalTarget('MyQueryAggregator.txt')
def run(self):
with self.output().open('w') as f:
# repeat for each ResultTarget
for target in self.input():
# output results into a single file
for row in target.result:
f.write(str(row) + "\n")
## Building Pipelines using QueryTable
class MyQueryTableStep1(luigi_bigquery.QueryTable):
def dataset(self):
return 'tmp'
def table(self):
return 'github_nested_count'
def query(self):
return "SELECT count(*) cnt FROM [publicdata:samples.github_nested]"
class MyQueryTableStep2(luigi_bigquery.Query):
def requires(self):
return MyQueryTableStep1()
def query(self):
input = self.input()
print(input.dataset_id)
print(input.table_id)
return "SELECT cnt FROM [{0}.{1}]".format(input.dataset_id, input.table_id)
def output(self):
return luigi.LocalTarget('MyQueryTableStep2.csv')
def run(self):
# retrieve the result and save it as a CSV file
result = self.run_query(self.query())
with self.output().open('w') as f:
result.to_csv(f)
if __name__ == '__main__':
luigi.run()
import luigi
import luigi_bigquery
## Running Queries
class MyQuery(luigi_bigquery.Query):
def query(self):
return "SELECT count(*) cnt FROM [publicdata:samples.github_nested]"
## Getting Results
class MyQueryRun(luigi_bigquery.Query):
def query(self):
return "SELECT count(*) cnt FROM [publicdata:samples.github_nested]"
def run(self):
result = self.run_query(self.query())
print "Job ID :", result.job_id
print "Result size:", result.size
print "Result :"
print "\t".join([c['name'] for i, c in result.description])
print "----"
for row in result:
print "\t".join([str(c) for c in row])
print '===================='
class MyQuerySave(luigi_bigquery.Query):
def query(self):
return "SELECT count(*) cnt FROM [publicdata:samples.github_nested]"
def output(self):
return luigi.LocalTarget('MyQuerySave.csv')
def run(self):
result = self.run_query(self.query())
with self.output().open('w') as f:
result.to_csv(f)
## Building Pipelines
class MyQueryStep1(luigi_bigquery.Query):
def query(self):
return "SELECT count(*) cnt FROM [publicdata:samples.github_nested]"
def output(self):
return luigi_bigquery.ResultTarget('MyQueryStep1.job')
class MyQueryStep2(luigi.Task):
def requires(self):
return MyQueryStep1()
def output(self):
return luigi.LocalTarget('MyQueryStep2.csv')
def run(self):
# retrieve the result and save it as a CSV file
with self.output().open('w') as f:
self.input().result.to_csv(f)
class MyQueryStep3(luigi.Task):
def requires(self):
return MyQueryStep2()
def output(self):
return luigi.LocalTarget('MyQueryStep3.txt')
def run(self):
with self.input().open() as f:
# process the result here
print f.read()
with self.output().open('w') as f:
# crate the final output
f.write('done')
## Templating Queries
class MyQueryFromTemplate(luigi_bigquery.Query):
source = 'templates/query_with_language.sql'
# variables used in the template
language = 'Python'
class MuQueryWithVariables(luigi_bigquery.Query):
source = 'templates/query_with_variables.sql'
# define variables
variables = {
'language': 'Python',
}
# or use property for dynamic variables
# @property
# def variables(self):
# return {
# 'language': 'Python',
# }
## Passing Parameters
class MyQueryWithParameters(luigi_bigquery.Query):
source = 'templates/query_with_time_range.sql'
# parameters
year = luigi.IntParameter()
def output(self):
# create a unique name for this output using parameters
return luigi_bigquery.ResultTarget('MyQueryWithParameters-{0}.job'.format(self.year))
class MyQueryAggregator(luigi.Task):
def requires(self):
# create a list of tasks with different parameters
return [
MyQueryWithParameters(2009),
MyQueryWithParameters(2010),
MyQueryWithParameters(2011),
MyQueryWithParameters(2012)
]
def output(self):
return luigi.LocalTarget('MyQueryAggregator.txt')
def run(self):
with self.output().open('w') as f:
# repeat for each ResultTarget
for target in self.input():
# output results into a single file
for row in target.result:
f.write(str(row) + "\n")
## Building Pipelines using QueryTable
class MyQueryTableStep1(luigi_bigquery.QueryTable):
def dataset(self):
return 'tmp'
def table(self):
return 'github_nested_count'
def query(self):
return "SELECT count(*) cnt FROM [publicdata:samples.github_nested]"
class MyQueryTableStep2(luigi_bigquery.Query):
def requires(self):
return MyQueryTableStep1()
def query(self):
input = self.input()
return "SELECT cnt FROM [{0}.{1}]".format(input.dataset_id, input.table_id)
def output(self):
return luigi.LocalTarget('MyQueryTableStep2.csv')
def run(self):
# retrieve the result and save it as a CSV file
result = self.run_query(self.query())
with self.output().open('w') as f:
result.to_csv(f)
# QueryToGCS
class MyQueryToGCS(luigi_bigquery.QueryToGCS):
use_temporary_table = True
def bucket(self):
return 'my-bucket'
def path(self):
return '/path/to/file.csv'
def query(self):
return "SELECT count(*) cnt FROM [publicdata:samples.github_nested]"
if __name__ == '__main__':
luigi.run()
| |
# (2/3/13) read_source_data() and read_sink_data() get "dt" from
# source_file or sink_file vs. channels comp, but what
# about canals ? Can't use "channel_dt" as before.
# (2/3/13) "vol@channel" is now obtained as a reference.
## NB! CFG file does not have "n_steps" so it is set
## to 1 in BMI_base.py. Maybe it should be
## set to max of "nd_max" values that appear in
## read_source_data(), read_sink_data(), etc. (11/14/11)
##============================================================
#
# Copyright (c) 2001-2014, Scott D. Peckham
#
# Feb. 2017. Changes to internal variable names.
# Cleanup & testing with Test_Plane_Canal data.
# Sep 2014. New standard names and BMI updates and testing.
# Nov 2013. Converted TopoFlow to a Python package.
# Feb 2013. Adapted to use EMELI framework.
# Oct 2012. CSDMS Standard Names and BMI.
# May 2010. Changes to initialize() and read_cfg_file().
# Jul 2009. Updates.
# May 2009. Updates.
# Jan 2009. Converted from IDL to Python with I2PY.
#
#---------------------------------------------------------------------
# Notes: Maybe replace "dur_sums" approach with the same approach
# now used in precip.py ??
# Make sure volume in caller gets updated correctly.
#---------------------------------------------------------------------
#
# class diversions_component: (inherits from BMI_base.py)
#
# initialize()
# update()
# finalize()
# set_computed_input_vars()
#---------------------------------
# read_input_files()
# read_source_data()
# read_sink_data()
# read_canal_data()
#----------------------------
# update_sources()
# update_sinks()
# update_canals()
#
#--------------------------------------------------------------------
import numpy as np
import glob
import os
from topoflow.utils import BMI_base
from topoflow.utils import cfg_files as cfg
#---------------------------------------------------------------------
class diversions_component( BMI_base.BMI_component ):
def initialize(self, cfg_file=None, mode="nondriver",
SILENT=False):
if not(SILENT):
print ' '
print 'Diversions component: Initializing...'
self.status = 'initializing' # (OpenMI 2.0 convention)
self.mode = mode
self.cfg_file = cfg_file
#-----------------------------------------------
# Load component parameters from a config file
#-----------------------------------------------
# self.set_constants()
self.initialize_config_vars()
self.read_grid_info() # (need this, 5/19/10)
self.initialize_basin_vars() # (5/14/10)
############################################################
# With new framework approach, we can't request the time
# step for a specific component as "dt" (due to conflicts).
############################################################
# The Diversions component "dt" should probably match that
# of the Channels component. (Must be named "dt" vs.
# "canal_dt".)
############################################################
self.initialize_time_vars()
#-----------------------------------------------------
# These are used by the Channels component (9/22/14)
# to tell if diversions are available and "on".
#-----------------------------------------------------
if not(self.use_canals):
self.n_canals = self.initialize_scalar( 0, dtype='int32')
if not(self.use_sinks):
self.n_sinks = self.initialize_scalar( 0, dtype='int32')
if not(self.use_sources):
self.n_sources = self.initialize_scalar( 0, dtype='int32')
#----------------------------------
# Return if component is disabled
#----------------------------------
if (self.comp_status == 'Disabled'):
if not(SILENT):
print 'Diversions component: Disabled in CFG file.'
self.n_canals = self.initialize_scalar( 0, dtype='int32')
self.n_sinks = self.initialize_scalar( 0, dtype='int32')
self.n_sources = self.initialize_scalar( 0, dtype='int32')
#-----------------------------------------------------------------------
self.sinks_x = self.initialize_scalar( 0, dtype='float64')
self.sinks_y = self.initialize_scalar( 0, dtype='float64')
self.sinks_Q = self.initialize_scalar( 0, dtype='float64')
#-----------------------------------------------------------------------
self.sources_x = self.initialize_scalar( 0, dtype='float64')
self.sources_y = self.initialize_scalar( 0, dtype='float64')
self.sources_Q = self.initialize_scalar( 0, dtype='float64')
#-----------------------------------------------------------------------
self.canals_in_x = self.initialize_scalar( 0, dtype='float64')
self.canals_in_y = self.initialize_scalar( 0, dtype='float64')
self.canals_in_Q_fraction = self.initialize_scalar( 0, dtype='float64')
self.canals_out_Q = self.initialize_scalar( 0, dtype='float64')
self.canals_out_x = self.initialize_scalar( 0, dtype='float64')
self.canals_out_y = self.initialize_scalar( 0, dtype='float64')
#-----------------------------------------------------------------------
self.DONE = True
self.status = 'initialized' # (OpenMI 2.0 convention)
return
#----------------------------------------
# Initialize all time-related variables
#----------------------------------------
self.initialize_time_vars()
#-----------------------------------------------
# Read from files as needed to initialize vars
#-----------------------------------------------
# source_files and sink_files have their own "dt"
# which will override the default above. (2/3/13)
#--------------------------------------------------
self.read_input_files()
self.status = 'initialized' # (OpenMI 2.0 convention)
# initialize()
#--------------------------------------------------------------------------
## def update(self, dt=-1.0, time_seconds=None):
def update(self, dt=-1.0):
if (self.comp_status == 'Disabled'):
return
self.status = 'updating' # (OpenMI 2.0 convention)
#-----------------------------------------------------
# Update info from all sources, sinks and diversions
#-----------------------------------------------------
# print '### Calling update_sources()...'
self.update_sources()
# print '### Calling update_sinks()...'
self.update_sinks()
# print '### Calling update_canals()...'
self.update_canals()
#------------------------
# Update internal clock
#------------------------
self.update_time()
self.status = 'updated' # (OpenMI 2.0 convention)
# update()
#--------------------------------------------------------------------------
def finalize(self):
self.status = 'finalizing' # (OpenMI)
## if (self.comp_status == 'Enabled'):
## self.close_input_files()
## self.close_output_files()
self.status = 'finalized' # (OpenMI)
self.print_final_report(comp_name='Diversions component')
## print 'Diversions component: Finished.'
# finalize()
#-------------------------------------------------------------------
def set_computed_input_vars(self):
#---------------------------------------------------------------
# Note: The initialize() method calls initialize_config_vars()
# (in BMI_base.py), which calls this method at the end.
# But read_input_files() has not been called yet.
# See initialize_computed_vars().
#--------------------------------------------------------------
self.use_sources = (self.use_sources == 'Yes')
self.use_sinks = (self.use_sinks == 'Yes')
self.use_canals = (self.use_canals == 'Yes')
# set_computed_input_vars()
#--------------------------------------------------------------------------
def read_input_files(self):
self.source_file = self.in_directory + self.source_file
self.sink_file = self.in_directory + self.sink_file
self.canal_file = self.in_directory + self.canal_file
self.read_source_data()
self.read_sink_data()
self.read_canal_data()
# read_input_files()
#--------------------------------------------------------------------------
def read_source_data(self):
pass # (See diversions_fraction_method.py)
# read_source_data()
#--------------------------------------------------------------------------
def read_sink_data(self):
pass # (See diversions_fraction_method.py)
# read_sink_data()
#--------------------------------------------------------------------------
def read_canal_data(self):
pass # (See diversions_fraction_method.py)
# read_canal_data()
#--------------------------------------------------------------------------
# def update_sources(self):
#
# if not(self.use_sources): return
# # n = size(self.source_IDs)
# n = self.source_IDs.size
#
# for k in xrange(n):
# dvol = ( self.dt * self.sources_Q[self.time_index, k] )
# self.vol.flat[self.source_IDs[k]] += dvol
#
# # update_sources()
# #--------------------------------------------------------------------------
# def update_sinks(self):
#
# if not(self.use_sinks): return
# # n = size(self.sink_IDs)
# n = self.sink_IDs.size
#
# for k in xrange(n):
# dvol = (self.dt * self.sinks_Q[self.time_index, k])
# self.vol.flat[self.sink_IDs[k]] -= dvol
# self.vol = np.maximum(self.vol, 0.0)
#
# # update_sinks()
# #--------------------------------------------------------------------------
# def update_canals(self):
#
# #------------------------------------------------------------------
# # Note: Q_canals is same at upstream and downstream ends, but the
# # downstream end lags the upstream end by the travel time
# # from in_ID to out_ID. As a result, the duration and Q
# # vector for the downstream end are computed from those of
# # the upstream end, and the travel time, td, as:
# # Q_out = [0, Q_in]
# # dur_out = [td, dur_in]
# # dur_sum_out = [0, dur_sum_in] + td
# #------------------------------------------------------------------
# if not(self.use_canals): return
# n = np.size( self.canal_in_IDs )
#
# #--------------------------------
# # Process upstream IDs as sinks
# #--------------------------------
# for k in xrange(n):
# dvol = (self.dt * self.Q_canals_in[self.time_index, k])
# self.vol.flat[self.canal_in_IDs[k]] -= dvol
# self.vol = np.maximum( self.vol, 0.0 )
#
# #------------------------------------
# # Process downstream IDs as sources
# # Must account for time lag in Q.
# #------------------------------------
# for k in xrange(n):
# #--------------------------------------------
# # Compute canals_out_Q & dur_sum_canals_out
# #--------------------------------------------
# dur_sum_canals_out = np.array([0.0, self.dur_sum_canals_in[:,k]]) + self.canal_t_vals[k]
# canals_out_Q = np.array([0.0, self.Q_canals_in[:,k]])
#
# dvol = (self.dt * canals_out_Q[self.time_index])
# self.vol[self.canal_out_IDs[k]] += dvol
#
# # update_canals()
#--------------------------------------------------------------------------
| |
"""
Module for statical analysis.
"""
from jedi import debug
from parso.python import tree
from jedi.evaluate.compiled import CompiledObject
CODES = {
'attribute-error': (1, AttributeError, 'Potential AttributeError.'),
'name-error': (2, NameError, 'Potential NameError.'),
'import-error': (3, ImportError, 'Potential ImportError.'),
'type-error-too-many-arguments': (4, TypeError, None),
'type-error-too-few-arguments': (5, TypeError, None),
'type-error-keyword-argument': (6, TypeError, None),
'type-error-multiple-values': (7, TypeError, None),
'type-error-star-star': (8, TypeError, None),
'type-error-star': (9, TypeError, None),
'type-error-operation': (10, TypeError, None),
'type-error-not-iterable': (11, TypeError, None),
'type-error-isinstance': (12, TypeError, None),
'type-error-not-subscriptable': (13, TypeError, None),
'value-error-too-many-values': (14, ValueError, None),
'value-error-too-few-values': (15, ValueError, None),
}
class Error(object):
def __init__(self, name, module_path, start_pos, message=None):
self.path = module_path
self._start_pos = start_pos
self.name = name
if message is None:
message = CODES[self.name][2]
self.message = message
@property
def line(self):
return self._start_pos[0]
@property
def column(self):
return self._start_pos[1]
@property
def code(self):
# The class name start
first = self.__class__.__name__[0]
return first + str(CODES[self.name][0])
def __unicode__(self):
return '%s:%s:%s: %s %s' % (self.path, self.line, self.column,
self.code, self.message)
def __str__(self):
return self.__unicode__()
def __eq__(self, other):
return (self.path == other.path and self.name == other.name and
self._start_pos == other._start_pos)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self.path, self._start_pos, self.name))
def __repr__(self):
return '<%s %s: %s@%s,%s>' % (self.__class__.__name__,
self.name, self.path,
self._start_pos[0], self._start_pos[1])
class Warning(Error):
pass
def add(node_context, error_name, node, message=None, typ=Error, payload=None):
exception = CODES[error_name][1]
if _check_for_exception_catch(node_context, node, exception, payload):
return
# TODO this path is probably not right
module_context = node_context.get_root_context()
module_path = module_context.py__file__()
instance = typ(error_name, module_path, node.start_pos, message)
debug.warning(str(instance), format=False)
node_context.evaluator.analysis.append(instance)
def _check_for_setattr(instance):
"""
Check if there's any setattr method inside an instance. If so, return True.
"""
from jedi.evaluate.representation import ModuleContext
module = instance.get_root_context()
if not isinstance(module, ModuleContext):
return False
node = module.tree_node
try:
stmts = node.get_used_names()['setattr']
except KeyError:
return False
return any(node.start_pos < stmt.start_pos < node.end_pos
for stmt in stmts)
def add_attribute_error(name_context, lookup_context, name):
message = ('AttributeError: %s has no attribute %s.' % (lookup_context, name))
from jedi.evaluate.instance import AbstractInstanceContext, CompiledInstanceName
# Check for __getattr__/__getattribute__ existance and issue a warning
# instead of an error, if that happens.
typ = Error
if isinstance(lookup_context, AbstractInstanceContext):
slot_names = lookup_context.get_function_slot_names('__getattr__') + \
lookup_context.get_function_slot_names('__getattribute__')
for n in slot_names:
if isinstance(name, CompiledInstanceName) and \
n.parent_context.obj == object:
typ = Warning
break
if _check_for_setattr(lookup_context):
typ = Warning
payload = lookup_context, name
add(name_context, 'attribute-error', name, message, typ, payload)
def _check_for_exception_catch(node_context, jedi_name, exception, payload=None):
"""
Checks if a jedi object (e.g. `Statement`) sits inside a try/catch and
doesn't count as an error (if equal to `exception`).
Also checks `hasattr` for AttributeErrors and uses the `payload` to compare
it.
Returns True if the exception was catched.
"""
def check_match(cls, exception):
try:
return isinstance(cls, CompiledObject) and issubclass(exception, cls.obj)
except TypeError:
return False
def check_try_for_except(obj, exception):
# Only nodes in try
iterator = iter(obj.children)
for branch_type in iterator:
colon = next(iterator)
suite = next(iterator)
if branch_type == 'try' \
and not (branch_type.start_pos < jedi_name.start_pos <= suite.end_pos):
return False
for node in obj.get_except_clause_tests():
if node is None:
return True # An exception block that catches everything.
else:
except_classes = node_context.eval_node(node)
for cls in except_classes:
from jedi.evaluate import iterable
if isinstance(cls, iterable.AbstractSequence) and \
cls.array_type == 'tuple':
# multiple exceptions
for lazy_context in cls.py__iter__():
for typ in lazy_context.infer():
if check_match(typ, exception):
return True
else:
if check_match(cls, exception):
return True
def check_hasattr(node, suite):
try:
assert suite.start_pos <= jedi_name.start_pos < suite.end_pos
assert node.type in ('power', 'atom_expr')
base = node.children[0]
assert base.type == 'name' and base.value == 'hasattr'
trailer = node.children[1]
assert trailer.type == 'trailer'
arglist = trailer.children[1]
assert arglist.type == 'arglist'
from jedi.evaluate.param import TreeArguments
args = list(TreeArguments(node_context.evaluator, node_context, arglist).unpack())
# Arguments should be very simple
assert len(args) == 2
# Check name
key, lazy_context = args[1]
names = list(lazy_context.infer())
assert len(names) == 1 and isinstance(names[0], CompiledObject)
assert names[0].obj == payload[1].value
# Check objects
key, lazy_context = args[0]
objects = lazy_context.infer()
return payload[0] in objects
except AssertionError:
return False
obj = jedi_name
while obj is not None and not isinstance(obj, (tree.Function, tree.Class)):
if isinstance(obj, tree.Flow):
# try/except catch check
if obj.type == 'try_stmt' and check_try_for_except(obj, exception):
return True
# hasattr check
if exception == AttributeError and obj.type in ('if_stmt', 'while_stmt'):
if check_hasattr(obj.children[1], obj.children[3]):
return True
obj = obj.parent
return False
| |
'''
This code helps in controlling the ACs based upon the user preference.
If the user selects POWERSAVER mode, only 2 ACs are allowed to be switched on.
If the user selects USERDEFINED mode, the no of ACs to be switched
on are determined upon the intervel to which the minimum temperature selected
selected belongs. The interval is defined using machine learned data
The machine learned data is sorted from higher temperature intervals to lower
The mintemp if falls in a particular interval checks for the no of ACs that
has to be switched on for that particular interval,
assigns it to no of ACs to be used. Then lower temerature is assigned as mintemp.
Higher temerature is assigned as mintemp+3.
Then the required no ACs are switched on. Temperature is checked every one minute until
mintemp is reached and then ACs are switched off. Then waits for temperature to rise
to reach the desired mintemp+3 when another combination for that particular no of ACs are switched on again.
This process is continued
Here we have use code from http://learn.adafruit.com/dht-humidity-sensing-on-raspberry-pi-with-gdocs-logging/software-install
to fetch data from the AM2302 temperature sensors. Here ./Adafruit_DHT is renamed as ./hackTemp
'''
#TODO need to implement the option for getting data from the config.csv file and auto config the intervals
#also add the option for running MACHINELEARN to update the config.csv
# Currently we have used option for max 4 ACs and used data from the set of combinations which we tried
# and observed to hardcode the intervals
import RPi.GPIO as GPIO
import time
import subprocess
import re
#setting up the pins for the ACs
onlist={"1":"6","2":"2","3":"4","4":"8"} #acstart lookup
offlist={"1":"5","2":"1","3":"3","4":"7"} #acstop lookup
oldTemperature=17.9 #setting a minimum value than AC's lowest set
temper=17.9
#GPIO.setwarnings(False)
GPIO.cleanup()
GPIO.setmode(GPIO.BOARD)
#setting up the AC pin values
GPIO.setup(16,GPIO.OUT)
GPIO.setup(18,GPIO.OUT)
GPIO.setup(22,GPIO.OUT)
GPIO.setup(24,GPIO.OUT)
#following function fetches data from the temperature sensor
#and sets it to oldTemperature and temper
def checktemp():
#the ./hackTemp is run with 2302 as model of sensor. 27 the pin to which sensor is connected.
output =subprocess.check_output(["./hackTemp","2302",'27']);
#regular expression extracts temperature data
matches = re.search("Temp =\s+([0-9.]+)", output)
global oldTemperature
global temper
if (not matches):
#sets oldTemperature data as current temperature data if no matches occur
temper=oldTemperature;
else:
#if match found the oldTemperature and current temperature are updated maintained
temper=float(matches.group(1))
oldTemperature=temper
checktemp()
#does checktemp() with one minute delay
def slowchecktemp():
time.sleep(30)
checktemp()
time.sleep(30)
#following function contains the pins which are connect RPi to Arduinos for controlling ACs
def onoff(sel):
if sel==1: # Fan Mode
GPIO.output(16,False)
if sel==2: # Compressor Mode
GPIO.output(16,True)
if sel==7: # Fan mode
GPIO.output(18,False)
if sel==8: # Compressor Mode
GPIO.output(18,True)
if sel==5: # Fan Mode
GPIO.output(22,False)
if sel==6: # Compressor mode
GPIO.output(22,True)
if sel==3: # Fan Mode
GPIO.output(24,False)
if sel==4: # Compressor mode
GPIO.output(24,True)
#function for starting ACs
def acstart(aclist):
for i in aclist:
onoff(int(onlist[str(i)]))
#function for stopping ACs
def acstop(aclist):
for i in aclist:
onoff(int(offlist[str(i)]))
#stopping all the ACs before starting the code
acstop([1,2,3,4])
while True:
#POWERSAVER MODE lets only two ACs to run
#USERDEFINED MODE lets user select temperature, but no of ACs that will run is based upon the interval
#in which the minimum temperature falls. This interval is to be decided using the Machine Learning Data
#currently the data used is based on the observations which we made
mode=raw_input("POWERSAVER or USERDEFINED : ") #TODO add MACHINELEARN option
#TL stores lower temperature allowed
#TU stores upper temperature allowed
#N stored no of ACs to be switched on
#TODO to set the TL automatically from the data from mintemp for two ACs from config.csv
#else can set best comfort temperature via some other technique using data and emperical algorithms
if mode == "POWERSAVER":
TL=28
TU=30
N=2
break
elif mode == "USERDEFINED":
mintemp=float(raw_input("Input minimum temperature required (in Celcius):"))
#TODO implement code for reading the config.csv to get the intervals and their combinations and no of ACs to be on
if (mintemp >= 28):
print "Two ACs will be switched on"
N=2
TL=mintemp
TU=mintemp+3.0
break
elif (mintemp < 28) and (mintemp>24):
print "Three ACs will be switched on"
N=3
TL=mintemp
TU=mintemp+3.0
break
else:
print "Four ACs will be switched on"
N=4
TL=mintemp
TU=mintemp+3
break
#TODO implement MACHINELEARN if check and code call
else:
print "Invalid mode"
if N == 2:
while True:
if (temper > TL ): #just checking whether TL < temper to prevent unwanted switch on
acstart([1,3])
#once the ACs are started, the temperature is checked in every one minute
while (temper > TL):
slowchecktemp()
#once the temperature goes below TL, ACs are switched off
acstop([1,3])
#keep checking for temperature crossing TU
while (temper < TU):
slowchecktemp()
#ACs start once TU is crossed. Here the AC combination is changed.
#This process is repeated for every combination present for 2 ACs here
acstart([2,4])
while (temper > TL):
slowchecktemp()
acstop([2,4])
while (temper<TU):
slowchecktemp()
elif N==3:
while True:
if (temper > TL): #just checking whether TL < temper to prevent unwanted switch on
acstart([1,2,3])
while (temper > TL):
slowchecktemp()
acstop([1,2,3])
while (temper < TU):
slowchecktemp()
acstart([2,3,4])
while (temper > TL):
slowchecktemp
acstop([2,3,4])
while (temper < TU):
slowchecktemp()
acstart([3,4,1])
while (temper > TL):
slowchecktemp()
acstop([3,4,1])
while (temper < TU):
slowchecktemp()
acstart([4,1,2])
while (temper > TL):
slowchecktemp()
acstop([4,1,2])
while (temper < TU ):
slowchecktemp()
else:
while True:
if (temper > TL): #just checking whether TL < temper to prevent unwanted switch on
acstart([1,2,3,4])
while (temper >TL):
slowchecktemp()
acstop([1,2,3,4])
while (temper < TU):
slowchecktemp()
GPIO.cleanup()
| |
# Copyright (C) 2013 Cisco Systems Inc.
# All rights reserved
from .key import Key
from .line_parser import LineParser
from .section_parser import SectionParser
from .nxcli import *
import nxos_utils
class Routes(object):
egressKey = None
l3intKey = None
l3tableKey = None
l3defipKey = None
vshHeadKey = None
arpKey = None
fwmIntVlanKey = None
validHops = {'100000': ('Drop', 'Null0'),
'100002': ('Receive', 'sup-eth1'),
'100003': ('Attached', 'sup-hi'),
'200000': ('ECMP', 'sup-hi')}
hopEntries = {'Drop': '100000', 'Receive': '100002', 'Attached': '100003'}
def __init__(self):
self.data = []
def __get_arp_key(self):
if self.arpKey is None:
self.arpKey = Key([[r"(\d+.\d+.\d+.\d+)\s+\S+\s+(\S+)\s+(\S+)",
"many"]])
return self.arpKey
def __get_egress_key(self):
if self.egressKey is None:
self.egressKey = Key([[r"(\d+)\s+(\S+)\s+(\d+)\s+(\d+)\s+(\S+)\s+(\S+)",
"many"]])
return self.egressKey
def __get_l3_int_key(self):
if self.l3intKey is None:
self.l3intKey = Key([[r"\d+\s+(\d+)\s+\d+\s+\d+\s+(\d+)\s+(\S+)",
"many"]])
return self.l3intKey
def __get_l3_table_key(self):
if self.l3tableKey is None:
self.l3tableKey = Key([[r"\d+\s+\d+\s+(\S+)\s+\S+\s+(\S+)\s+\S+\s+"
"\S+\s+\S+\s+\S+", "many"]])
return self.l3tableKey
def __get_l3_defip_key(self):
if self.l3defipKey is None:
self.l3defipKey = Key([[r"\d+\s+\d+\s+(\S+)\/(\S+)\s+\S+\s+(\S+)\s"
"+\d+\s+\d+\s+\d+\s+\d+\s+\S+", "many"]])
return self.l3defipKey
def __get_vsh_head_key(self):
if self.vshHeadKey is None:
self.vshHeadKey = Key([[r"(\S+)/(\S+)\s+(\S+)\s+(\S+)", "once"]])
return self.vshHeadKey
def __get_fwm_int_vlan_key(self):
if self.fwmIntVlanKey is None:
self.fwmIntVlanKey = Key([[r"\S+:\s+iftype\s+\S+\s+if_index\s+\S+"
"\s+int-vlan\s+(\d+)\s+l3iif\s+(\S+)\s"
"+", "once"]])
return self.fwmIntVlanKey
def __get_arp_table(self):
return NXCLI('show ip arp').get_output()
def __get_l3_l3_table(self):
return _bcm_sdk_shell_all_unit('l3 l3table show')
def __get_l3_defip(self):
return _bcm_sdk_shell_all_unit('l3 defip show')
def __get_l3_egress(self):
return _bcm_sdk_shell_all_unit('l3 egress show')
def __get_l3_intf(self):
return _bcm_sdk_shell_all_unit('l3 intf show')
def __get_vsh_routes(self):
return NXCLI('show ip fib route vrf all').get_output()
def __rib_route(self):
return NXCLI('show ip route').get_raw_output()
def __parse_arp_table(self):
arpEntry = []
arpData = self.__get_arp_table()
aKey = self.__get_arp_key()
aParser = LineParser(arpData,aKey)
data = aParser.get_data(aKey)
for d in data:
addr,mac,intf = d
arpEntry.append([addr,mac,intf])
return arpEntry
def __parse_l3_egress(self):
egressEntry = {}
status,output = self.__get_l3_egress()
l3Egress = output.split("\n")
eKey = self.__get_egress_key()
eParser = LineParser(l3Egress,eKey)
data = eParser.get_data(eKey)
intfData = self.__parse_l3_intf()
for d in data:
entry,mac,vlan,intf,port,value = d
# if port is a digit then display the port directly
try:
port = int(port)
if port != 0:
if type(port) is int:
port = "{0}/{1}".format("Ethernet1",port)
egressEntry[entry] = [mac,vlan,intf,port]
except ValueError:
vlan,mac1 = intfData[intf]
vlan = "Vlan" + vlan
egressEntry[entry] = [mac,vlan,intf,vlan]
return egressEntry
def __parse_l3_intf(self):
intfData = {}
status,output = self.__get_l3_intf()
l3Intf = output.split("\n")
iKey = self.__get_l3_int_key()
iParser = LineParser(l3Intf,iKey)
data = iParser.get_data(iKey)
for d in data:
intf,vlan,mac = d
intfData[intf] = [vlan,mac]
return intfData
def __parse_l3_table(self):
routes = []
egressEntry = self.__parse_l3_egress()
intfData = self.__parse_l3_intf()
status,output = self.__get_l3_l3_table()
l3table = output.split("\n")
key = self.__get_l3_table_key()
lineParser = LineParser(l3table,key)
data = lineParser.get_data(key)
for d in data:
addr,nexthop = d
if nexthop in self.validHops:
routes.append([addr,'32',nexthop])
else:
macAddr,vlan,intf,port = egressEntry[nexthop]
routes.append([addr,'32',port])
return routes
def __parse_l3_defip(self):
routes = []
egressEntry = self.__parse_l3_egress()
intfData = self.__parse_l3_intf()
status,output = self.__get_l3_defip()
l3defip = output.split("\n")
key = self.__get_l3_defip_key()
lineParser = LineParser(l3defip,key)
data = lineParser.get_data(key)
for d in data:
addr,prefix,nexthop = d
#routes.append([addr,prefix,nexthop])
if nexthop in self.validHops:
routes.append([addr,prefix,nexthop])
else:
macAddr,vlan,intf,port = egressEntry[nexthop]
routes.append([addr,prefix,port])
return routes
def __parse_vsh_routes(self):
routes = []
# vsh routes
fibRoute = self.__get_vsh_routes()
key = Key(start=r"^\d+.\d+.\d+.\d+/\d+")
sections = SectionParser(fibRoute,key).get_sections()
headKey = self.__get_vsh_head_key()
for d in sections:
head,body = d
lineParser = LineParser([head],headKey)
addr,prefix,nexthop,intf = lineParser.get_datum(headKey)
try:
nexthop = self.hopEntries[nexthop]
except KeyError:
cmd = "{0} {1}".format("show platform fwm info l3lif",intf)
fwmInfo = NXCLI(cmd).get_raw_output()
intVlanKey = self.__get_fwm_int_vlan_key()
fwmInfoParser = LineParser([fwmInfo],intVlanKey)
intVlan,l3if = fwmInfoParser.get_datum(intVlanKey)
#nexthop = intVlan
nexthop = l3if
nexthop = intf
routes.append([addr,prefix,nexthop])
for j in body:
print j
return routes
def verify_route(self,route=""):
return self.verify_routes()
def show_arp_table(self):
return nxos_utils.cli_ex('show ip arp')
def show_vsh_routes(self):
return nxos_utils.cli_ex('show ip fib route vrf all')
def show_hw_routes(self):
hwRoutes = 0
routeTable = []
l3TableRoutes = self.__parse_l3_table()
l3DefipRoutes = self.__parse_l3_defip()
for l3t in l3TableRoutes:
routeTable.append(l3t)
for l3d in l3DefipRoutes:
routeTable.append(l3d)
intf = []
print "------------------+------------------+---------------------"
print "Prefix | Next-hop | Interface"
print "------------------+------------------+---------------------"
for i in routeTable:
addr,prefix,nexthop = i
intf = []
#fix the below for null
try:
nexthop, intf = self.validHops[nexthop]
except KeyError:
intf = nexthop
ipaddr = '{0}/{1}'.format(addr,prefix)
print '{0:<20}{2:<20}{3:<20}'.format(ipaddr,prefix,nexthop,intf)
hwRoutes += 1
return hwRoutes
def verify_routes(self):
vshRoutes = []
l3TableRoutes = []
l3DefipRoutes = []
# vsh routes
vshRoutes = self.__parse_vsh_routes()
# hw routes
l3TableRoutes = self.__parse_l3_table()
l3DefipRoutes = self.__parse_l3_defip()
routesFound = 0
routesNotFound = []
for vr in vshRoutes:
if vr in l3TableRoutes:
routesFound += 1
elif vr in l3DefipRoutes:
routesFound += 1
else:
routesNotFound.append(vr)
print
print "Routes verified and found: ", routesFound
print
if len(routesNotFound) > 0:
print "Routes not found: "
for i in routesNotFound:
addr,prefix,nexthop = i
intf = []
ipaddr = '{0}/{1}'.format(addr, prefix)
print '{0:<20}{1:<20}'.format(ipaddr, nexthop)
return routesFound, len(routesNotFound)
def verify_arp_table(self):
arpTable = []
l3TableRoutes = []
l3DefipRoutes = []
l3Egress = []
iparpCount = 0
found = 0
verified = 0
notverified = 0
intfData = {}
# arp entries
arpTable = self.__parse_arp_table()
# hw routes
l3TableRoutes = self.__parse_l3_table()
l3DefipRoutes = self.__parse_l3_defip()
l3Egress = self.__parse_l3_egress()
l3Intf = self.__parse_l3_intf()
#do a reverse lookup on the l3 egress table and get the port
for d in l3Egress:
mac,vlan,intf,port = l3Egress[d]
intfData[intf] = [mac,port]
intfData[d] = [mac,port]
#if sub interface verify the vlan for the sub-intf with the l3 intf
#vlan
for d in arpTable:
addr, mac, intf = d
# handle the dead beef mac
found = 0
nexthop = ""
for l3t in l3TableRoutes:
if found == 0:
ip, prefix, nh = l3t
if addr == ip:
found = 1
nexthop = nh
if found == 0:
for l3d in l3DefipRoutes:
if found == 0:
ip, prefix, nh = l3d
if addr == ip:
found = 1
nexthop = nh
if found == 0:
raise ValueError, 'Entry for addr not found in HW'
if nexthop == "":
raise ValueError, 'HW table entry not found'
else:
try:
macAddr, port = intfData[nexthop]
except KeyError:
macAddr = ''
port = ''
print "mac address:" + macAddr
if mac.replace(".","") == macAddr.replace(":","") :
verified += 1
print "Arp entry for " + addr, mac, intf + " found in HW"
else:
notverified += 1
print ("Arp entry for " + addr, mac, intf + " not found in"
" HW")
return verified, notverified
def __build_route(self, srcIp="", prefix="", mask="", intf= "", nexthop="",
nhMask="", nhPrefix="", tag = "", routePref = ""):
route = "ip route " + srcIp
if prefix != "":
route += "/" + prefix
elif mask != "":
route += " " + mask
if intf != "":
# check if vlan or port-channel and verify they are created
s, o = cli("show running-config interface %s" + intf)
if s == 0:
route += " " + intf
else:
raise ValueError, 'interface %s is not created' % intf
route = ""
return route
if nexthop != "" :
route += " " + nexthop
if nhPrefix != "":
route += "/" + nhPrefix
elif nhMask != "":
route += " " + nhMask
if tag != "":
route += " tag " + str(tag)
if routePref != "":
route += " " + str(routePref)
return route
def add_route(self, srcIp="", prefix="", mask="", intf="", nexthop="",
nhMask="", nhPrefix="", tag="", routePref="", vrf="default"):
confStr = "config t ; "
if vrf != "default":
confStr += "vrf context " + vrf + " ; "
if srcIp == "":
raise ValueError, 'IP address not given to add route'
route = self.__build_route(srcIp, prefix, mask, intf, nexthop, nhMask,
nhPrefix,tag,routePref)
confStr += route + " ; " + "end ;"
print "Vrf: " + vrf
print "Route to be added: " + route
return nxos_utils.cli_ex(confStr)
def delete_route(self, srcIp="", prefix="", mask="", intf="", nexthop="",
nhMask="", nhPrefix="", tag="", routePref="",
vrf="default"):
confStr = "config t ;"
if vrf != "default":
confStr += "vrf context " + vrf + " ; "
if srcIp == "":
raise ValueError, 'IP address not given to add route'
route = self.__build_route(srcIp, prefix, mask, intf, nexthop, nhMask,
nhPrefix, tag, routePref)
confStr += "no " + route + " ; " + "end ;"
print "Vrf: " + vrf
print "Route to be deleted: " + route
return nxos_utils.cli_ex(confStr)
| |
"""
Dictionary statics calculator
"""
import json
import pdb
_PATH_SEP = "|"
class DictionaryCounter(object):
"""
Class will create a statics summary of all dictionaries in a passed.
Pass in dictionary objects with the __call__ method
Args:
method["simple"]: does a simple count on wether the item exists in the
passed in dictionary. "complex" will return aggreated counts
of the items for the dictionary, i.e.
Example for {'x': [1,2,3]}
simple: {'x': 1}
complex: {'x': 3}
sub_total: the sting path to use for aggregateing subtotals
Example for {'x': {'y': 'hello'} and {'x': {'y': 'bye'} the
path 'x|y' would create subtotal value for 'hello' and
'bye'
list_blank: records dictionarys that are missing a value for the path
assigned in this attribute. use the same path format as
'sub_total'
Attributes:
counts: dictionary of summary counts
sub_counts: dictionary of the subtotals
blank: list of dictinaries with missing properties as specified with
'list_blank'
"""
def __init__(self, method="simple", sub_total=None, list_blank={}):
self.counts = {}
self.sub_counts = {}
self.method = method
self.sub_total = sub_total
self.list_blank = list_blank
self.blank = []
def __call__(self, dict_obj):
kwargs = {'current': {}}
counts = self._count_objs(dict_obj, **{'current': {}})
if self.method == "simple":
self.update_counts(counts['current'])
if self.sub_total:
self.update_subtotals(counts['current'], counts['sub_val'])
self._record_blank(counts['current'], dict_obj)
def _record_blank(self, current, dict_obj):
"""
records the dictionay in the the 'blank' attribute based on the
'list_blank' path
args:
-----
current: the current dictionay counts
dict_obj: the original dictionary object
"""
if not self.list_blank:
return
if self.list_blank not in current:
self.blank.append(dict_obj)
def _count_objs(self, obj, path=None, **kwargs):
"""
cycles through the object and adds in count values
Args:
-----
obj: the object to parse
path: the current path
kwargs:
-------
current: a dictionary of counts for current call
sub_val: the value to use for subtotal aggregation
"""
sub_val = None
# pdb.set_trace()
if isinstance(obj, dict):
for key, value in obj.items():
if isinstance(value, (list, dict)):
kwargs = self._count_objs(value,
self.make_path(key, path),
**kwargs)
else:
if self.make_path(key, path) == self.sub_total:
# pdb.set_trace()
sub_val = value
kwargs['current'] = self._increment_prop(key,
path,
**kwargs)
elif isinstance(obj, list):
for item in obj:
if isinstance(item, (list, dict)):
kwargs = self._count_objs(item, path, **kwargs)
else:
if path == self.sub_total:
pdb.set_trace()
sub_val = item
kwargs['current'] = self._increment_prop(path, **kwargs)
else:
kwargs['current'] = self._increment_prop(path, **kwargs)
if path == self.sub_total:
pdb.set_trace()
sub_val = item
if kwargs.get('sub_val') is None:
kwargs['sub_val'] = sub_val
return kwargs
def _increment_prop(self, prop, path=None, **kwargs):
"""
increments the property path count
args:
-----
prop: the key for the prop
path: the path to the prop
kwargs:
-------
current: dictionary count for the current dictionay
"""
new_path = self.make_path(prop, path)
if self.method == 'simple':
counter = kwargs['current']
else:
counter = self.counts
try:
counter[new_path] += 1
except KeyError:
counter[new_path] = 1
return counter
def update_counts(self, current):
"""
updates counts for the class instance based on the current dictionary
counts
args:
-----
current: current dictionary counts
"""
for item in current:
try:
self.counts[item] += 1
except KeyError:
self.counts[item] = 1
def update_subtotals(self, current, sub_key):
"""
updates sub_total counts for the class instance based on the
current dictionary counts
args:
-----
current: current dictionary counts
sub_key: the key/value to use for the subtotals
"""
if not self.sub_counts.get(sub_key):
self.sub_counts[sub_key] = {}
for item in current:
try:
self.sub_counts[sub_key][item] += 1
except KeyError:
self.sub_counts[sub_key][item] = 1
def print(self):
"""
prints to terminal the summray statistics
"""
print("TOTALS -------------------------------------------")
print(json.dumps(self.counts, indent=4, sort_keys=True))
if self.sub_total:
print("\nSUB TOTALS --- based on '%s' ---------" % self.sub_total)
print(json.dumps(self.sub_counts, indent=4, sort_keys=True))
if self.list_blank:
print("\nMISSING nodes for '%s':" % self.list_blank,
len(self.blank))
@staticmethod
def make_path(prop, path):
"""
makes the path string
agrs:
-----
prop: the key for the current object
path: the previous path to the prop
"""
if path:
return _PATH_SEP.join([path, prop])
return prop
@staticmethod
def parse_path(path):
"""
splits the path back to its parts
args:
-----
path: the string path to parse
"""
return path.split(_PATH_SEP)
| |
from __future__ import unicode_literals
from datetime import timedelta
from django.conf import settings
from django.contrib.gis.db import models
from django.contrib.postgres.fields import JSONField
from django.core.urlresolvers import reverse
from django.utils.encoding import python_2_unicode_compatible
from model_utils import Choices
from django.contrib.auth.models import Group
from django.core.files.storage import FileSystemStorage
from django.core.exceptions import ValidationError
#from approvals.models import Approval
from ledger.accounts.models import Organisation, Address as LedgerAddress, OrganisationAddress
#from ledger.payments.models import Invoice
upload_storage = FileSystemStorage(location=settings.PRIVATE_MEDIA_ROOT)
@python_2_unicode_compatible
class Record(models.Model):
"""This model represents a record that needs to be saved for
future reference. It also records metadata and optional text content to be
indexed for search.
"""
DOC_CATEGORY_CHOICES = Choices(
(1, 'consent', ('Landowner consent')),
(2, 'deed', ('Deed')),
(3, 'assessment', ('Assessment report')),
(4, 'referee_response', ('Referee response')),
(5, 'lodgement', ('Lodgement document')),
(6, 'draft', ('Draft document')),
(7, 'final', ('Final document')),
(8, 'determination', ('Determination document')),
(9, 'completion', ('Completed document')),
)
FILE_GROUP = Choices(
(1, 'permit', ('Permit')),
(2, 'licence', ('Licence/permit')),
(3, 'part5', ('Part 5')),
(4, 'emergency', ('Emergency works')),
(5, 'part5cr', ('Part 5 - Amendment Request')),
(6, 'part5amend', ('Part 5 - Amendment Application')),
(7, 'test', ('Test - Application')),
(8, 'permitamend', ('Amend Permit')),
(9, 'licenceamend', ('Amend Licence')),
(10, 'permitrenew', ('Renew Permit')),
(11, 'licencerenew', ('Renew Licence')),
(2001, 'person', ('Person')),
(2002, 'organisation', ('Organistion')),
(2003, 'application_comms', ('Application Communication Logs')),
(2004, 'account_comms', ('Account Communication Logs')),
(2005, 'approval', ('Approval')),
(2006, 'compliance', ('Compliance')),
(2007, 'approval_comms', ('Approval Communication Logs'))
)
upload = models.FileField(max_length=512, upload_to='uploads/%Y/%m/%d', storage=upload_storage)
name = models.CharField(max_length=256)
category = models.IntegerField(choices=DOC_CATEGORY_CHOICES, null=True, blank=True)
metadata = JSONField(null=True, blank=True)
text_content = models.TextField(null=True, blank=True, editable=False) # Text for indexing
file_group = models.IntegerField(choices=FILE_GROUP, null=True, blank=True)
file_group_ref_id = models.IntegerField(null=True, blank=True)
extension = models.CharField(max_length=5, null=True, blank=True)
created = models.DateTimeField(auto_now_add=True, null=True, blank=True)
def file_url(self):
if self.extension is None:
self.extension = ''
return settings.PRIVATE_MEDIA_URL+str(self.pk)+'-file'+self.extension
def __str__(self):
if self.category:
return '{} ({})'.format(self.name, self.get_category_display())
return self.name
@python_2_unicode_compatible
class Vessel(models.Model):
"""This model represents a vessel/craft that will be used
in relation to the application
"""
VESSEL_TYPE_CHOICES = Choices(
(0, 'vessel', ('Vessel')),
(1, 'craft', ('Craft')),
)
vessel_type = models.SmallIntegerField(choices=VESSEL_TYPE_CHOICES, null=True, blank=True)
# Vessel Information
name = models.CharField(max_length=256)
vessel_id = models.CharField(max_length=256, null=True, blank=True, verbose_name='Vessel identification')
registration = models.ManyToManyField(Record, blank=True, related_name='vessel_documents')
size = models.PositiveIntegerField(null=True, blank=True, verbose_name='size (m)')
engine = models.PositiveIntegerField(null=True, blank=True, verbose_name='engine (kW)')
passenger_capacity = models.PositiveIntegerField(null=True, blank=True)
# craft Information
craft_type = models.CharField(max_length=256, null=True, blank=True, verbose_name='Craft Type')
number_of_crafts = models.PositiveIntegerField(null=True, blank=True, verbose_name='Number of crafts')
documents = models.ManyToManyField(Record, blank=True, related_name='craft_documents')
def __str__(self):
return self.name
@python_2_unicode_compatible
class ApplicationPurpose(models.Model):
purpose = models.CharField(max_length=256)
def __str__(self):
return self.purpose
class Craft(models.Model):
name = models.CharField(max_length=256)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Application(models.Model):
"""This model represents an application by a customer to P&W for a single
permit, licence/permit, part 5, etc.
"""
APP_TYPE_CHOICES = Choices(
(1, 'permit', ('Permit')),
(2, 'licence', ('Licence/permit')),
(3, 'part5', ('Part 5')),
(4, 'emergency', ('Emergency works')),
(5, 'part5cr', ('Part 5 - Amendment Request')),
(6, 'part5amend', ('Section 84')),
(7, 'test', ('Test - Application')),
(8, 'permitamend', ('Amend Permit')),
(9, 'licenceamend', ('Amend Licence')),
(10, 'permitrenew', ('Renew Permit')),
(11, 'licencerenew', ('Renew Licence'))
)
APP_APPLY_ON = Choices(
(1,'yourself', ('On Behalf of yourself')),
(2,'yourcompany', ('On Behalf of your company / government agency')),
(3, 'somebody_else_individual', ('On Behalf of indivdual as somebody else (as an authorised agent)')),
(4, 'somebody_else_company', ('On Behalf of a company as somebody else (as an authorised agent)')),
(5, 'internal', ('Internal'))
)
APP_STATUS = Choices(
(1, 'active', ('Active')),
(2, 'cancelled', ('Cancelled')),
(3, 'draft', ('Draft'))
)
APP_STATE_CHOICES = Choices(
(0, 'unknown',('Unknown')),
(1, 'draft', ('Draft')),
(2, 'with_admin', ('With Admin Officer')),
(3, 'with_referee', ('With Referrals')),
(4, 'with_assessor', ('With Assessor')),
(5, 'with_manager', ('With Manager')),
(6, 'issued', ('Issued')),
(7, 'issued_with_admin', ('Issued (with admin)')),
(8, 'declined', ('Declined')),
(9, 'new', ('New')),
(10, 'approved', ('Approved')),
(11, 'expired', ('Expired')),
(12, 'with_director', ('With Director')),
(13, 'with_exec', ('With Executive')),
(14, 'completed', ('Completed')),
(15, 'creator', ('Form Creator')),
(16, 'current', ('Current')),
(17, 'discard', ('Deleted')),
(18, 'payment', ('Pending Payment')),
(19, 'not_supported', ('Not Supported'))
)
APP_LOCATION_CHOICES = Choices(
(0, 'onland', ('On Land')),
(1, 'onwater', ('On Water')),
(2, 'both', ('Both')),
)
APP_YESNO = Choices(
(True, ('Yes')),
(False, ('No'))
)
APP_VESSEL_CRAFT = Choices(
#(1, 'vessel', ('Vessel(s)')),
#(2, 'craft', ('Craft(s)')),
(3, 'yes', ('yes')),
(0, 'none', ('No'))
)
applicant = models.ForeignKey(settings.AUTH_USER_MODEL, blank=True, null=True, on_delete=models.PROTECT, related_name='applicant')
organisation = models.ForeignKey(Organisation, blank=True, null=True, on_delete=models.PROTECT)
app_type = models.IntegerField(choices=APP_TYPE_CHOICES, blank=True, null=True)
apply_on_behalf_of = models.IntegerField(choices=APP_APPLY_ON, blank=True, null=True)
assignee = models.ForeignKey(settings.AUTH_USER_MODEL, blank=True, null=True, on_delete=models.PROTECT, related_name='assignee')
assigned_officer = models.ForeignKey(settings.AUTH_USER_MODEL, blank=True, null=True, on_delete=models.PROTECT, related_name='assigned_officer')
status = models.IntegerField(choices=APP_STATUS, default=APP_STATUS.active, editable=True, )
state = models.IntegerField(choices=APP_STATE_CHOICES, default=APP_STATE_CHOICES.draft, editable=True)
title = models.CharField(max_length=256)
description = models.TextField(null=True, blank=True)
submit_date = models.DateField()
expire_date = models.DateField(blank=True, null=True)
proposed_commence = models.DateField(null=True, blank=True)
proposed_end = models.DateField(null=True, blank=True)
issue_date = models.DateField(null=True, blank=True)
cost = models.CharField(max_length=256, null=True, blank=True)
project_no = models.CharField(max_length=256, null=True, blank=True)
related_permits = models.TextField(null=True, blank=True)
over_water = models.BooleanField(default=False)
records = models.ManyToManyField(Record, blank=True, related_name='records')
vessels = models.ManyToManyField(Vessel, blank=True)
vessel_or_craft_details = models.IntegerField(null=True, blank=True)
purpose = models.ForeignKey(ApplicationPurpose, null=True, blank=True)
max_participants = models.IntegerField(null=True, blank=True)
proposed_location = models.SmallIntegerField(choices=APP_LOCATION_CHOICES, null=True, blank=True)
address = models.TextField(null=True, blank=True)
location_route_access = models.ManyToManyField(Record, blank=True, related_name='location_route_access')
jetties = models.TextField(null=True, blank=True)
jetty_dot_approval = models.NullBooleanField(default=None)
jetty_dot_approval_expiry = models.DateField(null=True, blank=True)
drop_off_pick_up = models.TextField(null=True, blank=True)
food = models.NullBooleanField(default=None)
beverage = models.NullBooleanField(default=None)
liquor_licence = models.NullBooleanField(default=None)
byo_alcohol = models.NullBooleanField(default=None)
sullage_disposal = models.TextField(null=True, blank=True)
waste_disposal = models.TextField(null=True, blank=True)
refuel_location_method = models.TextField(null=True, blank=True)
berth_location = models.TextField(null=True, blank=True)
anchorage = models.TextField(null=True, blank=True)
operating_details = models.TextField(null=True, blank=True)
cert_survey = models.ManyToManyField(Record, blank=True, related_name='cert_survey')
cert_public_liability_insurance = models.ManyToManyField(Record, blank=True, related_name='cert_public_liability_insurace')
risk_mgmt_plan = models.ManyToManyField(Record, blank=True, related_name='risk_mgmt_plan')
safety_mgmt_procedures = models.ManyToManyField(Record, blank=True, related_name='safety_mgmt_plan')
brochures_itineries_adverts = models.ManyToManyField(Record, blank=True, related_name='brochures_itineries_adverts')
other_relevant_documents = models.ManyToManyField(Record, blank=True, related_name='other_relevant_documents')
land_owner_consent = models.ManyToManyField(Record, blank=True, related_name='land_owner_consent')
deed = models.ManyToManyField(Record, blank=True, related_name='deed')
submitted_by = models.ForeignKey(settings.AUTH_USER_MODEL, blank=True, null=True, on_delete=models.PROTECT, related_name='Submitted_by')
river_lease_require_river_lease = models.NullBooleanField(default=None, null=True, blank=True)
river_lease_scan_of_application = models.ManyToManyField(Record, blank=True, related_name='river_lease_scan_of_application')
river_lease_reserve_licence = models.NullBooleanField(default=None, null=True, blank=True)
river_lease_application_number = models.CharField(max_length=30, null=True, blank=True)
proposed_development_current_use_of_land = models.TextField(null=True, blank=True)
proposed_development_plans = models.ManyToManyField(Record, blank=True, related_name='proposed_development_plans')
proposed_development_description = models.TextField(null=True, blank=True)
document_draft = models.ManyToManyField(Record, blank=True, related_name='document_draft')
document_new_draft = models.ManyToManyField(Record, blank=True, related_name='document_newdraft')
document_new_draft_v3 = models.ManyToManyField(Record, blank=True, related_name='document_newdraftv3')
document_draft_signed = models.ManyToManyField(Record, blank=True, related_name='document_draft_signed')
document_final = models.ManyToManyField(Record, blank=True, related_name='document_final')
document_final_signed = models.ManyToManyField(Record, blank=True, related_name='document_final_signed')
document_determination = models.ManyToManyField(Record, blank=True, related_name='document_determination')
document_completion = models.ManyToManyField(Record, blank=True, related_name='document_completion')
publish_documents = models.DateField(null=True, blank=True)
publish_documents_expiry = models.DateTimeField(blank=True, null=True)
publish_draft_report = models.DateField(null=True, blank=True)
publish_draft_expiry = models.DateTimeField(blank=True, null=True)
publish_final_report = models.DateField(null=True, blank=True)
publish_final_expiry = models.DateTimeField(blank=True, null=True)
publish_determination_report = models.DateField(null=True, blank=True)
routeid = models.CharField(null=True, blank=True, default=1, max_length=4)
assessment_start_date = models.DateField(null=True, blank=True)
group = models.ForeignKey(Group, null=True, blank=True, related_name='application_group_assignment')
swan_river_trust_board_feedback = models.ManyToManyField(Record, blank=True, related_name='document_swan_river_board_feedback')
document_memo = models.ManyToManyField(Record, blank=True, related_name='document_memo')
document_memo_2 = models.ManyToManyField(Record, blank=True, related_name='document_memo_2')
document_briefing_note = models.ManyToManyField(Record, blank=True, related_name='document_briefing_note')
document_determination_approved = models.ManyToManyField(Record, blank=True, related_name='document_determination_approved')
approval_id = models.IntegerField(null=True, blank=True)
assessed_by = models.ForeignKey(settings.AUTH_USER_MODEL, blank=True, null=True, on_delete=models.PROTECT, related_name='assessed_by')
supporting_info_demonstrate_compliance_trust_policies = models.ManyToManyField(Record, blank=True, related_name='supporting_info_demonstrate_compliance_trust_policies')
type_of_crafts = models.ForeignKey(Craft, null=True, blank=True, related_name='craft')
number_of_crafts = models.IntegerField(null=True, blank=True)
route_status = models.CharField(null=True, blank=True, default='Draft', max_length=256)
submitter_comment = models.TextField(null=True, blank=True, default='', max_length=1000)
referral_comment = models.TextField(null=True, blank=True, default='', max_length=1000)
landowner = models.TextField(null=True, blank=True)
land_description = models.TextField(null=True, blank=True)
approval_document = models.ForeignKey(Record, null=True, blank=True, related_name='application_approval_document')
approval_document_signed = models.ForeignKey(Record, null=True, blank=True, related_name='application_approval_document_signed')
old_application = models.ForeignKey('Application', null=True, blank=True, related_name='application_old_application')
old_approval_id = models.IntegerField(null=True, blank=True)
def __str__(self):
return 'Application {}: {} - {} ({})'.format(
self.pk, self.get_app_type_display(), self.title, self.get_state_display())
def get_absolute_url(self):
return reverse('application_detail', args=(self.pk,))
@python_2_unicode_compatible
class ApplicationLicenceFee(models.Model):
app_type = models.IntegerField(choices=Application.APP_TYPE_CHOICES, blank=True, null=True)
licence_fee = models.DecimalField(max_digits=8, decimal_places=2, default='0.00', blank=False, null=False)
start_dt = models.DateTimeField(blank=True, null=True)
end_dt = models.DateTimeField(blank=True, null=True)
created = models.DateTimeField(auto_now_add=True, editable=False)
def clean(self, *args, **kwargs):
if self.end_dt < self.start_dt:
raise ValidationError('End date must be greater than Start Date')
if ApplicationLicenceFee.objects.filter(app_type=self.app_type,start_dt__lte=self.start_dt, end_dt__gte=self.start_dt).exclude(pk=self.pk).count() > 0:
raise ValidationError('Start Date matches existing record.')
if ApplicationLicenceFee.objects.filter(app_type=self.app_type,start_dt__lte=self.end_dt, end_dt__gte=self.end_dt).exclude(pk=self.pk).count() > 0:
raise ValidationError('End Date matches existing record.')
if ApplicationLicenceFee.objects.filter(app_type=self.app_type,start_dt__gte=self.start_dt, end_dt__lte=self.end_dt).exclude(pk=self.pk).count() > 0:
raise ValidationError('Dates matches existing record.')
class Reason(models.Model):
text = models.TextField()
detailRequired = models.BooleanField(default=False)
editable = models.BooleanField(default=True,editable=False)
class Meta:
ordering = ('id',)
abstract = True
#def save(self, *args, **kwargs):
# if self.mooring_group == None:
# raise ValidationError("Mooring Group required, please select from list.")
# else:
# super(Reason,self).save(*args,**kwargs)
# Properties
# ==============================
def code(self):
return self.__get_code()
# Methods
# ==============================
def __get_code(self):
length = len(str(self.id))
val = '0'
return '{}{}'.format((val*(4-length)),self.id)
class DiscountReason(Reason):
pass
def __str__(self):
return '{}'.format(self.text)
class Booking(models.Model):
BOOKING_TYPE_CHOICES = (
(0, 'Reception booking'),
(1, 'Internet booking'),
(2, 'Black booking'),
(3, 'Temporary reservation'),
(4, 'Cancelled Booking'),
)
customer = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.PROTECT, blank=True, null=True)
details = JSONField(null=True, blank=True)
booking_type = models.SmallIntegerField(choices=BOOKING_TYPE_CHOICES, default=0)
expiry_time = models.DateTimeField(blank=True, null=True)
cost_total = models.DecimalField(max_digits=8, decimal_places=2, default='0.00')
override_price = models.DecimalField(max_digits=8, decimal_places=2, blank=True, null=True)
override_reason = models.ForeignKey('DiscountReason', null=True, blank=True)
override_reason_info = models.TextField(blank=True, null=True)
overridden_by = models.ForeignKey(settings.AUTH_USER_MODEL,on_delete=models.PROTECT, blank=True, null=True, related_name='overridden_bookings')
application = models.ForeignKey('Application', null=True)
send_invoice = models.BooleanField(default=False)
cancellation_reason = models.TextField(null=True,blank=True)
cancelation_time = models.DateTimeField(null=True,blank=True)
confirmation_sent = models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True)
created_by = models.ForeignKey(settings.AUTH_USER_MODEL,on_delete=models.PROTECT, blank=True, null=True,related_name='created_by_booking')
canceled_by = models.ForeignKey(settings.AUTH_USER_MODEL,on_delete=models.PROTECT, blank=True, null=True,related_name='canceled_bookings')
class BookingInvoice(models.Model):
booking = models.ForeignKey(Booking, related_name='invoices')
invoice_reference = models.CharField(max_length=50, null=True, blank=True, default='')
system_invoice = models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True)
def __str__(self):
return 'Booking {} : Invoice #{}'.format(self.id,self.invoice_reference)
@python_2_unicode_compatible
class PublicationFeedback(models.Model):
PUB_STATES_CHOICES = Choices(
(1, 'Western Australia', ('Western Australia')),
(2, 'New South Wales', ('New South Wales')),
(3, 'Victoria', ('Victoria')),
(4, 'South Australia', ('South Australia')),
(5, 'Northern Territory', ('Northern Territory')),
(6, 'Queensland', ('Queensland')),
(7, 'Australian Capital Territory', ('Australian Capital Territory')),
(8, 'Tasmania', ('Tasmania')),
)
application = models.ForeignKey(Application, on_delete=models.CASCADE)
name = models.CharField(max_length=256)
address = models.CharField(max_length=256)
suburb = models.CharField(max_length=100)
state = models.IntegerField(choices=PUB_STATES_CHOICES)
postcode = models.CharField(max_length=4)
phone = models.CharField(max_length=20)
email = models.EmailField()
comments = models.TextField(null=True, blank=True)
records = models.ManyToManyField(Record, blank=True, related_name='feedback')
status = models.CharField(max_length=20)
def __str__(self):
return 'PublicationFeedback {} ({})'.format(self.pk, self.application)
@python_2_unicode_compatible
class PublicationNewspaper(models.Model):
"""This model represents Application Published in newspapert
"""
application = models.ForeignKey(Application, on_delete=models.CASCADE)
date = models.DateField(null=True, blank=True)
newspaper = models.CharField(max_length=150)
records = models.ManyToManyField(Record, blank=True, related_name='newspaper')
def __str__(self):
return 'PublicationNewspaper {} ({})'.format(self.pk, self.application)
@python_2_unicode_compatible
class PublicationWebsite(models.Model):
"""This model represents Application Published in Website
"""
application = models.ForeignKey(Application, on_delete=models.CASCADE)
original_document = models.ForeignKey(Record, blank=True, null=True, related_name='original_document')
published_document = models.ForeignKey(Record, blank=True, null=True, related_name='published_document')
def __str__(self):
return 'PublicationWebsite {} ({})'.format(self.pk, self.application)
@python_2_unicode_compatible
class Location(models.Model):
"""This model represents a single spatial location associated with an
application.
"""
application = models.ForeignKey(Application, on_delete=models.CASCADE)
lot = models.CharField(max_length=256, null=True, blank=True)
reserve = models.CharField(max_length=256, null=True, blank=True)
suburb = models.CharField(max_length=256, null=True, blank=True)
intersection = models.CharField(max_length=256, null=True, blank=True)
# TODO: validation related to LGA name (possible FK).
lga = models.CharField(max_length=256, null=True, blank=True)
poly = models.PolygonField(null=True, blank=True)
records = models.ManyToManyField(Record, blank=True)
# TODO: certificate of title fields (ref. screen 30)
title_volume = models.CharField(max_length=256, null=True, blank=True)
folio = models.CharField(max_length=30, null=True, blank=True)
dpd_number = models.CharField(max_length=30, null=True, blank=True)
location = models.CharField(max_length=256, null=True, blank=True) # this seem like it different from street address based on the example form.
street_number_name = models.CharField(max_length=256, null=True, blank=True)
local_government_authority = models.CharField(max_length=256, null=True, blank=True)
def __str__(self):
return 'Location {} ({})'.format(self.pk, self.application)
@python_2_unicode_compatible
class Referral(models.Model):
"""This model represents a referral of an application to a referee
(external or internal) for comment/conditions.
"""
REFERRAL_STATUS_CHOICES = Choices(
(1, 'referred', ('Referred')),
(2, 'responded', ('Responded')),
(3, 'recalled', ('Recalled')),
(4, 'expired', ('Expired')),
(5, 'with_admin',('Not Sent'))
)
application = models.ForeignKey(Application, on_delete=models.CASCADE)
referee = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.PROTECT)
details = models.TextField(blank=True, null=True)
sent_date = models.DateField(blank=True, null=True)
period = models.PositiveIntegerField(verbose_name='period (days)')
expire_date = models.DateField(blank=True, null=True, editable=False)
response_date = models.DateField(blank=True, null=True)
feedback = models.TextField(blank=True, null=True)
proposed_conditions = models.TextField(blank=True, null=True)
records = models.ManyToManyField(Record, blank=True)
status = models.IntegerField(choices=REFERRAL_STATUS_CHOICES, default=REFERRAL_STATUS_CHOICES.with_admin)
class Meta:
unique_together = ('application', 'referee')
def __str__(self):
return 'Referral {} to {} ({})'.format(self.pk, self.referee, self.application)
def save(self, *args, **kwargs):
"""Override save to set the expire_date field.
"""
#self.expire_date = self.sent_date + timedelta(days=self.period)
super(Referral, self).save(*args, **kwargs)
@python_2_unicode_compatible
class Condition(models.Model):
"""This model represents a condition of approval for an application
(either proposed by a referee or applied by P&W).
"""
CONDITION_STATUS_CHOICES = Choices(
(1, 'proposed', ('Proposed')),
(2, 'applied', ('Applied')),
(3, 'rejected', ('Rejected')),
(4, 'cancelled', ('Cancelled')),
)
CONDITION_RECUR_CHOICES = Choices(
(1, 'weekly', ('Weekly')),
(2, 'monthly', ('Monthly')),
(3, 'annually', ('Annually')),
)
application = models.ForeignKey(Application, on_delete=models.PROTECT)
condition_no = models.IntegerField(blank=True, null=True)
condition = models.TextField(blank=True, null=True)
referral = models.ForeignKey(Referral, null=True, blank=True, on_delete=models.PROTECT)
status = models.IntegerField(choices=CONDITION_STATUS_CHOICES, default=CONDITION_STATUS_CHOICES.proposed)
records = models.ManyToManyField(Record, blank=True)
due_date = models.DateField(blank=True, null=True)
# Rule: recurrence patterns (if present) begin on the due date.
recur_pattern = models.IntegerField(choices=CONDITION_RECUR_CHOICES, null=True, blank=True)
recur_freq = models.PositiveIntegerField(
null=True, blank=True, verbose_name='recurrence frequency',
help_text='How frequently is the recurrence pattern applied (e.g. every 2 months)')
suspend = models.BooleanField(default=False)
advise_no = models.IntegerField(blank=True, null=True)
advise = models.TextField(blank=True, null=True)
def __str__(self):
return 'Condition {}: {}'.format(self.pk, self.condition)
@python_2_unicode_compatible
class ConditionPredefined(models.Model):
"""This model represents a condition of approval for an application
(either proposed by a referee or applied by P&W).
"""
STATUS = Choices(
(0, 'inactive', ('Inactive')),
(1, 'active', ('Active')),
)
title = models.CharField(max_length=256, null=True, blank=True)
condition = models.TextField(blank=True, null=True)
status = models.IntegerField(choices=STATUS, default=1)
def __str__(self):
return self.title
@python_2_unicode_compatible
class ComplianceGroup(models.Model):
COMPLIANCE_GROUP_STATUS_CHOICES = Choices(
(1, 'current', ('Current')),
(2, 'due',('Due')),
(3, 'future', ('Future')),
(4, 'approved', ('Approved')),
(5, 'with_assessor', ('With Assessor')),
(6, 'with_manager', ('With Manager')),
(7, 'with_holder', ('With Licence Holder')),
(8, 'overdue', ('Overdue')),
(9, 'submitted', ('Submitted'))
)
approval_id = models.IntegerField(blank=True, null=True)
title = models.CharField(max_length=256, blank=True, null=True)
app_type = models.IntegerField(choices=Application.APP_TYPE_CHOICES, blank=True, null=True)
# cid = models.ManyToManyField(Compliance, blank=True)
applicant = models.ForeignKey(settings.AUTH_USER_MODEL, blank=True, null=True, on_delete=models.PROTECT, related_name='compliance_group_applicant')
organisation = models.ForeignKey(Organisation, blank=True, null=True, on_delete=models.PROTECT)
status = models.IntegerField(choices=COMPLIANCE_GROUP_STATUS_CHOICES, default=COMPLIANCE_GROUP_STATUS_CHOICES.future)
due_date = models.DateField(blank=True, null=True)
assignee = models.ForeignKey(settings.AUTH_USER_MODEL, blank=True, null=True, on_delete=models.PROTECT, related_name='compliance_group_assignee')
def __str__(self):
return 'Condition Group {}: {}'.format(self.pk, self.title)
@python_2_unicode_compatible
class Compliance(models.Model):
"""This model represents a request for confirmation of fulfilment of the
requirements for a single condition, based upon supplied evidence.
"""
COMPLIANCE_STATUS_CHOICES = Choices(
(1, 'current', ('Current')),
(2, 'due',('Due')),
(3, 'future', ('Future')),
(4, 'approved', ('Approved')),
(5, 'with_assessor', ('With Assessor')),
(6, 'with_manager', ('With Manager')),
(7, 'with_holder', ('With Licence Holder')),
(8, 'overdue', ('Overdue')),
(9, 'submitted', ('Submitted'))
)
approval_id = models.IntegerField(blank=True, null=True)
title = models.CharField(max_length=256, blank=True, null=True)
app_type = models.IntegerField(choices=Application.APP_TYPE_CHOICES, blank=True, null=True)
condition = models.ForeignKey(Condition, on_delete=models.PROTECT)
applicant = models.ForeignKey(settings.AUTH_USER_MODEL, blank=True, null=True, on_delete=models.PROTECT, related_name='compliance_applicant')
assignee = models.ForeignKey(settings.AUTH_USER_MODEL, blank=True, null=True, on_delete=models.PROTECT, related_name='compliance_assignee')
organisation = models.ForeignKey(Organisation, blank=True, null=True, on_delete=models.PROTECT)
assessed_by = models.ForeignKey(settings.AUTH_USER_MODEL, blank=True, null=True, on_delete=models.PROTECT, related_name='compliance_assigned_by')
assessed_date = models.DateField(blank=True, null=True)
status = models.IntegerField(choices=COMPLIANCE_STATUS_CHOICES, default=COMPLIANCE_STATUS_CHOICES.future)
submitted_by = models.ForeignKey(settings.AUTH_USER_MODEL, blank=True, null=True, on_delete=models.PROTECT, related_name='compliance_submitted_by')
submit_date = models.DateTimeField(auto_now_add=True)
due_date = models.DateField(blank=True, null=True)
compliance = models.TextField(blank=True, null=True, help_text='Information to fulfil requirement of condition.')
external_comments = models.TextField(blank=True, null=True)
external_documents = models.ManyToManyField(Record, blank=True, related_name='compliance_external_documents')
comments = models.TextField(blank=True, null=True)
approve_date = models.DateField(blank=True, null=True)
records = models.ManyToManyField(Record, blank=True)
compliance_group = models.ForeignKey(ComplianceGroup, blank=True, null=True, on_delete=models.PROTECT)
group = models.ForeignKey(Group, null=True, blank=True, related_name='compliance_group_assignment')
def __str__(self):
return 'Compliance {} ({})'.format(self.pk, self.condition)
class Communication(models.Model):
"""This model represents the communication model
"""
COMM_TYPE = Choices(
(0, 'none', ('None')),
(1, 'phone', ('Phone')),
(2, 'email', ('Email')),
(3, 'mail', ('Mail')),
(4, 'system', ('System'))
)
application = models.ForeignKey(Application, on_delete=models.PROTECT)
comms_to = models.CharField(max_length=256, null=True, blank=True)
comms_from = models.CharField(max_length=256, null=True, blank=True)
subject = models.CharField(max_length=256, null=True, blank=True)
comms_type = models.IntegerField(choices=COMM_TYPE, default=COMM_TYPE.none )
details = models.TextField(blank=True, null=True)
records = models.ManyToManyField(Record, blank=True, related_name='communication_docs')
state = models.IntegerField(blank=True, null=True) # move to foreign key once APP_STATE_CHOICES becomes a model
created = models.DateTimeField(auto_now_add=True)
class CommunicationAccount(models.Model):
"""This model represents the communication model
"""
COMM_TYPE = Choices(
(0, 'none', ('None')),
(1, 'phone', ('Phone')),
(2, 'email', ('Email')),
(3, 'mail', ('Mail')),
)
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.PROTECT)
comms_to = models.CharField(max_length=256, null=True, blank=True)
comms_from = models.CharField(max_length=256, null=True, blank=True)
subject = models.CharField(max_length=256, null=True, blank=True)
comms_type = models.IntegerField(choices=COMM_TYPE, default=COMM_TYPE.none)
details = models.TextField(blank=True, null=True)
records = models.ManyToManyField(Record, blank=True, related_name='account_communication_docs')
state = models.IntegerField(blank=True, null=True) # move to foreign key once APP_STATE_CHOICES becomes a model
created = models.DateTimeField(auto_now_add=True)
class CommunicationOrganisation(models.Model):
"""This model represents the communication model
"""
COMM_TYPE = Choices(
(0, 'none', ('None')),
(1, 'phone', ('Phone')),
(2, 'email', ('Email')),
(3, 'mail', ('Mail')),
)
org = models.ForeignKey(Organisation, on_delete=models.PROTECT)
comms_to = models.CharField(max_length=256, null=True, blank=True)
comms_from = models.CharField(max_length=256, null=True, blank=True)
subject = models.CharField(max_length=256, null=True, blank=True)
comms_type = models.IntegerField(choices=COMM_TYPE, default=COMM_TYPE.none)
details = models.TextField(blank=True, null=True)
records = models.ManyToManyField(Record, blank=True, related_name='org_communication_docs')
state = models.IntegerField(blank=True, null=True) # move to foreign key once APP_STATE_CHOICES becomes a model
created = models.DateTimeField(auto_now_add=True)
class CommunicationCompliance(models.Model):
"""This model represents the communication model
"""
COMM_TYPE = Choices(
(0, 'none', ('None')),
(1, 'phone', ('Phone')),
(2, 'email', ('Email')),
(3, 'mail', ('Mail')),
)
compliance = models.ForeignKey(Compliance, on_delete=models.PROTECT)
comms_to = models.CharField(max_length=256, null=True, blank=True)
comms_from = models.CharField(max_length=256, null=True, blank=True)
subject = models.CharField(max_length=256, null=True, blank=True)
comms_type = models.IntegerField(choices=COMM_TYPE, default=COMM_TYPE.none)
details = models.TextField(blank=True, null=True)
records = models.ManyToManyField(Record, blank=True, related_name='compliance_communication_docs')
state = models.IntegerField(blank=True, null=True) # move to foreign key once APP_STATE_CHOICES becomes a model
created = models.DateTimeField(auto_now_add=True)
@python_2_unicode_compatible
class Delegate(models.Model):
"""This model represents the delegation of authority for an EmailUser to
submit applications on behalf of an Organisation, within the Statutory
Development application.
"""
email_user = models.ForeignKey(settings.AUTH_USER_MODEL, blank=False, on_delete=models.PROTECT)
organisation = models.ForeignKey(Organisation, blank=False, on_delete=models.PROTECT)
def __str__(self):
return '{}: {}'. format(self.email_user.email, self.organisation.name)
class Meta:
unique_together = ('email_user', 'organisation')
@python_2_unicode_compatible
class OrganisationContact(models.Model):
"""This model represents the contact people within the organisation for this application.
"""
email = models.EmailField(unique=False, blank=False)
first_name = models.CharField(max_length=128, blank=True, verbose_name='Given name(s)')
last_name = models.CharField(max_length=128, blank=True)
phone_number = models.CharField(max_length=50, null=True, blank=True)
mobile_number = models.CharField(max_length=50, null=True, blank=True)
fax_number = models.CharField(max_length=50, null=True, blank=True)
organisation = models.ForeignKey(Organisation, blank=False, null=True, on_delete=models.PROTECT)
def __str__(self):
return '{}: {}'. format(self.first_name, self.last_name, self.email)
@python_2_unicode_compatible
class OrganisationPending(models.Model):
"""This model represents the contact people within the organisation for this application.
"""
STATUS_CHOICES = Choices(
(1, 'pending', ('Pending')),
(2, 'approve',('Approved')),
(3, 'decline', ('Declined'))
)
name = models.CharField(max_length=128, null=True, blank=True)
abn = models.CharField(max_length=50, null=True, blank=True, verbose_name='ABN')
status = models.IntegerField(choices=STATUS_CHOICES, default=STATUS_CHOICES.pending)
identification = models.ForeignKey(Record, null=True, blank=True, on_delete=models.SET_NULL)
#identification = models.ManyToManyField(Record, blank=True, related_name='Identification Documents')
postal_address = models.ForeignKey(OrganisationAddress, related_name='org_pending_postal_address', blank=True, null=True, on_delete=models.SET_NULL)
billing_address = models.ForeignKey(OrganisationAddress, related_name='org_pending_billing_address', blank=True, null=True, on_delete=models.SET_NULL)
email_user = models.ForeignKey(settings.AUTH_USER_MODEL, blank=False, on_delete=models.PROTECT, null=True)
assignee = models.ForeignKey(settings.AUTH_USER_MODEL, blank=True, null=True, on_delete=models.PROTECT, related_name='org_pending_assignee')
company_exists = models.BooleanField(default=False)
submit_date = models.DateField(auto_now_add=True, null=True, blank=True)
pin1 = models.CharField(max_length=50, null=True, blank=True)
pin2 = models.CharField(max_length=50, null=True, blank=True)
def __str__(self):
return '{}: {}'. format(self.name,self.abn, self.status)
@python_2_unicode_compatible
class OrganisationExtras(models.Model):
organisation = models.ForeignKey(Organisation, blank=False, null=True, on_delete=models.PROTECT, related_name='organisation_extras_org_id')
pin1 = models.CharField(max_length=50, null=True, blank=True)
pin2 = models.CharField(max_length=50, null=True, blank=True)
identification = models.ForeignKey(Record, null=True, blank=True, on_delete=models.SET_NULL, related_name='organisation_extras_org_identification')
def __str__(self):
return '{}: {}'. format(self.pk,self.organisation.name,)
@python_2_unicode_compatible
class ApplicationInvoice(models.Model):
"""This model represents a reference to an invoice for payment raised against
an application.
"""
application = models.ForeignKey(Application)
invoice_reference = models.CharField(max_length=64)
def __str__(self):
return 'Application {} invoice {}'.format(self.application, self.invoice_reference)
@python_2_unicode_compatible
class StakeholderComms(models.Model):
"""This model represents a reference to the stakeholder communication for
an application.
"""
STAKEHOLDER_ROLE_TYPE = Choices(
(0, 'none', ('None')),
(1, 'applicant', ('Applicant')),
(2, 'submitter', ('Submitter')),
(3, 'referral', ('Referral')),
(4, 'feedback', ('Feedback'))
)
STAKEHOLDER_COMM_TYPE = Choices(
(0, 'none', ('None')),
(1, 'email', ('Email')),
(2, 'posted', ('Posted')),
)
application = models.ForeignKey(Application)
email = models.EmailField(unique=False, blank=False)
name = models.CharField(max_length=255)
sent_date = models.DateTimeField(auto_now_add=True)
role = models.IntegerField(choices=STAKEHOLDER_ROLE_TYPE, default=STAKEHOLDER_ROLE_TYPE.none)
comm_type = models.IntegerField(choices=STAKEHOLDER_COMM_TYPE, default=STAKEHOLDER_COMM_TYPE.none)
def __str__(self):
return 'Stakeholder {} Sent {}'.format(self.name, self.sent_date)
| |
# -*- coding: utf-8 -*-
"""Helper to check for availability and version of dependencies."""
from __future__ import print_function
from __future__ import unicode_literals
import re
try:
import ConfigParser as configparser
except ImportError:
import configparser # pylint: disable=import-error
class DependencyDefinition(object):
"""Dependency definition.
Attributes:
dpkg_name (str): name of the dpkg package that provides the dependency.
is_optional (bool): True if the dependency is optional.
l2tbinaries_name (str): name of the l2tbinaries package that provides
the dependency.
maximum_version (str): maximum supported version.
minimum_version (str): minimum supported version.
name (str): name of (the Python module that provides) the dependency.
pypi_name (str): name of the PyPI package that provides the dependency.
python2_only (bool): True if the dependency is only supported by Python 2.
rpm_name (str): name of the rpm package that provides the dependency.
version_property (str): name of the version attribute or function.
"""
def __init__(self, name):
"""Initializes a dependency configuration.
Args:
name (str): name of the dependency.
"""
super(DependencyDefinition, self).__init__()
self.dpkg_name = None
self.is_optional = False
self.l2tbinaries_name = None
self.maximum_version = None
self.minimum_version = None
self.name = name
self.pypi_name = None
self.python2_only = False
self.rpm_name = None
self.version_property = None
class DependencyDefinitionReader(object):
"""Dependency definition reader."""
_VALUE_NAMES = frozenset([
'dpkg_name',
'is_optional',
'l2tbinaries_name',
'maximum_version',
'minimum_version',
'pypi_name',
'python2_only',
'rpm_name',
'version_property'])
def _GetConfigValue(self, config_parser, section_name, value_name):
"""Retrieves a value from the config parser.
Args:
config_parser (ConfigParser): configuration parser.
section_name (str): name of the section that contains the value.
value_name (str): name of the value.
Returns:
object: configuration value or None if the value does not exists.
"""
try:
return config_parser.get(section_name, value_name)
except configparser.NoOptionError:
return None
def Read(self, file_object):
"""Reads dependency definitions.
Args:
file_object (file): file-like object to read from.
Yields:
DependencyDefinition: dependency definition.
"""
config_parser = configparser.RawConfigParser()
# pylint: disable=deprecated-method
# TODO: replace readfp by read_file, check if Python 2 compatible
config_parser.readfp(file_object)
for section_name in config_parser.sections():
dependency_definition = DependencyDefinition(section_name)
for value_name in self._VALUE_NAMES:
value = self._GetConfigValue(config_parser, section_name, value_name)
setattr(dependency_definition, value_name, value)
yield dependency_definition
class DependencyHelper(object):
"""Dependency helper.
Attributes:
dependencies (dict[str, DependencyDefinition]): dependencies.
"""
_VERSION_NUMBERS_REGEX = re.compile(r'[0-9.]+')
_VERSION_SPLIT_REGEX = re.compile(r'\.|\-')
def __init__(self, configuration_file='dependencies.ini'):
"""Initializes a dependency helper.
Args:
configuration_file (Optional[str]): path to the dependencies
configuration file.
"""
super(DependencyHelper, self).__init__()
self._test_dependencies = {}
self.dependencies = {}
dependency_reader = DependencyDefinitionReader()
with open(configuration_file, 'r') as file_object:
for dependency in dependency_reader.Read(file_object):
self.dependencies[dependency.name] = dependency
dependency = DependencyDefinition('mock')
dependency.minimum_version = '0.7.1'
dependency.version_property = '__version__'
self._test_dependencies['mock'] = dependency
def _CheckPythonModule(self, dependency):
"""Checks the availability of a Python module.
Args:
dependency (DependencyDefinition): dependency definition.
Returns:
tuple: consists:
bool: True if the Python module is available and conforms to
the minimum required version, False otherwise.
str: status message.
"""
module_object = self._ImportPythonModule(dependency.name)
if not module_object:
status_message = 'missing: {0:s}'.format(dependency.name)
return False, status_message
if not dependency.version_property:
return True, dependency.name
return self._CheckPythonModuleVersion(
dependency.name, module_object, dependency.version_property,
dependency.minimum_version, dependency.maximum_version)
def _CheckPythonModuleVersion(
self, module_name, module_object, version_property, minimum_version,
maximum_version):
"""Checks the version of a Python module.
Args:
module_object (module): Python module.
module_name (str): name of the Python module.
version_property (str): version attribute or function.
minimum_version (str): minimum version.
maximum_version (str): maximum version.
Returns:
tuple: consists:
bool: True if the Python module is available and conforms to
the minimum required version, False otherwise.
str: status message.
"""
module_version = None
if not version_property.endswith('()'):
module_version = getattr(module_object, version_property, None)
else:
version_method = getattr(
module_object, version_property[:-2], None)
if version_method:
module_version = version_method()
if not module_version:
status_message = (
'unable to determine version information for: {0:s}').format(
module_name)
return False, status_message
# Make sure the module version is a string.
module_version = '{0!s}'.format(module_version)
# Split the version string and convert every digit into an integer.
# A string compare of both version strings will yield an incorrect result.
# Strip any semantic suffixes such as a1, b1, pre, post, rc, dev.
module_version = self._VERSION_NUMBERS_REGEX.findall(module_version)[0]
if module_version[-1] == '.':
module_version = module_version[:-1]
try:
module_version_map = list(
map(int, self._VERSION_SPLIT_REGEX.split(module_version)))
except ValueError:
status_message = 'unable to parse module version: {0:s} {1:s}'.format(
module_name, module_version)
return False, status_message
if minimum_version:
try:
minimum_version_map = list(
map(int, self._VERSION_SPLIT_REGEX.split(minimum_version)))
except ValueError:
status_message = 'unable to parse minimum version: {0:s} {1:s}'.format(
module_name, minimum_version)
return False, status_message
if module_version_map < minimum_version_map:
status_message = (
'{0:s} version: {1!s} is too old, {2!s} or later required').format(
module_name, module_version, minimum_version)
return False, status_message
if maximum_version:
try:
maximum_version_map = list(
map(int, self._VERSION_SPLIT_REGEX.split(maximum_version)))
except ValueError:
status_message = 'unable to parse maximum version: {0:s} {1:s}'.format(
module_name, maximum_version)
return False, status_message
if module_version_map > maximum_version_map:
status_message = (
'{0:s} version: {1!s} is too recent, {2!s} or earlier '
'required').format(module_name, module_version, maximum_version)
return False, status_message
status_message = '{0:s} version: {1!s}'.format(module_name, module_version)
return True, status_message
def _CheckSQLite3(self):
"""Checks the availability of sqlite3.
Returns:
tuple: consists:
bool: True if the Python module is available and conforms to
the minimum required version, False otherwise.
str: status message.
"""
# On Windows sqlite3 can be provided by both pysqlite2.dbapi2 and
# sqlite3. sqlite3 is provided with the Python installation and
# pysqlite2.dbapi2 by the pysqlite2 Python module. Typically
# pysqlite2.dbapi2 would contain a newer version of sqlite3, hence
# we check for its presence first.
module_name = 'pysqlite2.dbapi2'
minimum_version = '3.7.8'
module_object = self._ImportPythonModule(module_name)
if not module_object:
module_name = 'sqlite3'
module_object = self._ImportPythonModule(module_name)
if not module_object:
status_message = 'missing: {0:s}.'.format(module_name)
return False, status_message
return self._CheckPythonModuleVersion(
module_name, module_object, 'sqlite_version', minimum_version, None)
def _ImportPythonModule(self, module_name):
"""Imports a Python module.
Args:
module_name (str): name of the module.
Returns:
module: Python module or None if the module cannot be imported.
"""
try:
module_object = list(map(__import__, [module_name]))[0]
except ImportError:
return None
# If the module name contains dots get the upper most module object.
if '.' in module_name:
for submodule_name in module_name.split('.')[1:]:
module_object = getattr(module_object, submodule_name, None)
return module_object
def _PrintCheckDependencyStatus(
self, dependency, result, status_message, verbose_output=True):
"""Prints the check dependency status.
Args:
dependency (DependencyDefinition): dependency definition.
result (bool): True if the Python module is available and conforms to
the minimum required version, False otherwise.
status_message (str): status message.
"""
if not result or dependency.is_optional:
if dependency.is_optional:
status_indicator = '[OPTIONAL]'
else:
status_indicator = '[FAILURE]'
print('{0:s}\t{1:s}'.format(status_indicator, status_message))
elif verbose_output:
print('[OK]\t\t{0:s}'.format(status_message))
def CheckDependencies(self, verbose_output=True):
"""Checks the availability of the dependencies.
Args:
verbose_output (Optional[bool]): True if output should be verbose.
Returns:
bool: True if the dependencies are available, False otherwise.
"""
print('Checking availability and versions of dependencies.')
check_result = True
for module_name, dependency in sorted(self.dependencies.items()):
if module_name == 'sqlite3':
result, status_message = self._CheckSQLite3()
else:
result, status_message = self._CheckPythonModule(dependency)
if not result and module_name == 'lzma':
dependency.name = 'backports.lzma'
result, status_message = self._CheckPythonModule(dependency)
if not result and not dependency.is_optional:
check_result = False
self._PrintCheckDependencyStatus(
dependency, result, status_message, verbose_output=verbose_output)
if check_result and not verbose_output:
print('[OK]')
print('')
return check_result
def CheckTestDependencies(self, verbose_output=True):
"""Checks the availability of the dependencies when running tests.
Args:
verbose_output (Optional[bool]): True if output should be verbose.
Returns:
bool: True if the dependencies are available, False otherwise.
"""
if not self.CheckDependencies(verbose_output=verbose_output):
return False
print('Checking availability and versions of test dependencies.')
check_result = True
for dependency in sorted(
self._test_dependencies.values(),
key=lambda dependency: dependency.name):
result, status_message = self._CheckPythonModule(dependency)
if not result:
check_result = False
self._PrintCheckDependencyStatus(
dependency, result, status_message, verbose_output=verbose_output)
if check_result and not verbose_output:
print('[OK]')
print('')
return check_result
| |
#!/usr/bin/env python
"""This module provides tools for scheduling dirsig runs.
DESCRIPTION:
This is intended to run on python 2.4.
PUBLIC REPOSITORY:
https://github.com/pavdpr/DIRSIG/
USAGE:
python parallel.py [options] or
parallel.py [options] If execute permissions on parallel.py and it is
in the path.
[options] are:
--path=<path> Set the path to search for sim files from. The
default is the path where this command is
executed from.
--regex=<regex> Set the regular expression to search for sim
files. Quotes may be needed around the
regular expression to properly pass it to
python. The default is r'.+\.sim' (all sim
files).
--exclude=<regex> Trim the list of sim files by not processing
any sim file that matches the regular
expression.
--rmsim=<sim file> Do not process this sim file.
--addsim=<sim file> Add a specific sim file to the list of sims to
run. These sim files will be earlier in the
list to run.
--dirsig=<dirsig version> Set the dirsig executable name. The default is
dirsig.
--logfile=<log file name> Set the logfile name. The default is log.
--option=<option> Set an option to pass to the dirsig executable.
Multiple options need to be passed
independantly.
--run Run the simulation. Not setting the --run flag
will show the simulations that would be run.
Notes:
- The angle brackets after each of the above options should NOT be included.
- There should not be spaces on either side of the equals.
SAMPLE USAGE:
parallel.py
Shows what settings were used. Does NOT execute any runs. Allows the user
to review what simulations will be run.
parallel.py --run
Runs with all defaults.
parallel.py --path=/some/path --dirsig=dirsig-4.7.0 --processes=8 --run
Searches for all sim files in /some/path and executes dirsig-4.7.0 on 8
cores.
parallel.py --option=--mode=preview --option=--output_prefix=foobar --run
Runs dirsig in preview mode and with an output prefix of foobar. This runs
dirsig --mode=preview --output_prefix=foobar sim.sim &> log
parallel.py --regex="simulation.*\.sim' --run
Searches for all simulations that match simulation.*\.sim
REQUIRED PACKAGES:
re
os
"""
__author__ = 'Paul Romanczyk'
__copyright__ = "Copyright 2015, Rochester Institute of Technology"
__credits__ = []
__license__ = "MIT"
#__version__ = "1.0"
__maintainer__ = "Paul Romanczyk"
__email__ = "par4249@rit.edu"
__status__ = "Production"
import subprocess
import os
import re
def find_sims_by_regex(regex, pth='.'):
"""Finds sim files in a directory tree by using regular expressions.
Args:
regex (_sre.SRE_Pattern): The regular expression to use. This should
be compiled e.g., re.compile(r'.+sim') which will find all sim files.
pth (str, optional): The path to search. The default is '.'.
Returns:
A list of all list of strings containing all files that match the regex.
If no matches are found, the list will be empty.
"""
output = []
# search the directory tree starting with pth
for root, _, files in os.walk(pth):
for current_file in files:
if regex.search(current_file):
# check if the file alone matches the regex
output.append(os.path.join(root, current_file))
elif regex.search(os.path.join(root, current_file)):
# check if the file and directory matches the regex
output.append(os.path.join(root, current_file))
return output
def exclude_sims_by_regex(sims, regex):
"""Removes sims by using a regular expression.
DESCRIPTION:
Returns all sims that do NOT match the regular expression.
ARGS:
sims (iterable of strings): An iterable of strings contating candidate
sim files
regex (_sre.SRE_Pattern): The regular expression to use. This should
be compiled e.g., re.compile(r'.+sim') which will find all sim files.
RETURNS:
A list of strings that do not match the regular expression.
"""
output = []
for sim in sims:
if not regex.search(sim):
output.append(sim)
return output
def clean_cmd(cmd):
"""Removes multiple spaces and whitespace at beginning or end of command.
Args:
cmd (str): A string containing the command to clean.
Returns:
A cleaned command string.
"""
return re.sub(r'\s{2, }', ' ', cmd).strip(' \t\n\r')
def cd_for_run(cmd, pth='.', delim=';', basepath=None):
"""Modifies the DIRSIG command to change directories.
This will add add a cd command to execute before the dirsig call. After the
dirsig call, it will add a second cd to change directories back to the
original one. If the directory (pth) is '.', the original command will be
returned.
Args:
cmd (str): The dirsig command to run in between the cd commands.
pth (str, optional): A string containing the path to change to. The
default is '.'.
delim (str, optional): The deliminater betwen the cd's and command. The
default is ';'.
basepath (str, optional): A string containing the refence path. If none,
basepath will default to os.getcwd().
Notes:
This should be run from the directory where the main call will be made
to get the paths right.
Exceptions:
RuntimeError: If pth does not exist.
Returns:
A string with the new command including the cd commands.
"""
try:
if not pth:
return cmd
elif not os.path.isdir(pth):
raise RuntimeError("The sim path '" + pth + "' does not exist")
if not basepath:
basepath = os.getcwd()
elif not os.path.isdir(basepath):
raise RuntimeError("The base path '" + basepath + "' does not exist")
if os.path.samefile(basepath, pth):
return cmd
return clean_cmd('cd ' + os.path.abspath(pth) + delim + ' ' + \
cmd + delim + ' cd ' + os.path.abspath(basepath))
except RuntimeError, error:
raise error
def remove_sim_files(sims, ommit=[]):
""" Removes sim files.
DESCRIPTION:
Removes sim files. There are 3 sets of sim files that will be removed:
1. sim files in the iterable ommit.
2. sim files that are duplicates of ones already processed.
3. sim files that do not exist on disk.
ARGS:
sims (iterable of str): An iterable of sim files.
ommit (iterable of str): An iterable of sim files to ommit.
RETURNS:
list of str: A list of sim files with no duplicates.
"""
tmpset = set()
# prepopulate with files that we want to ommit
for f in ommit:
tmpset.add(os.path.abspath(f))
output = []
# remove sim files
for sim in sims:
if os.path.abspath(sim) not in tmpset:
tmpset.add(os.path.abspath(sim))
if os.path.isfile(sim):
output.append(os.path.abspath(sim))
return output
def make_dirsig_command(sim, options=None, dirsig='dirsig', logfile='log'):
""" Makes a command to rund dirsig.
Args:
sim (str): A string containing the name of the sim file.
options (str, optional): A string contating options to pass to dirsig.
The default is None.
dirsig (str, optional): A string containing the executable to of dirsig
to use. The default is 'dirsig'.
logfile (str, optional): A string contating the name of the logfile to
write to. The default is 'log'.
Returns:
A string for the dirsig command to call.
"""
# which dirsig to use
cmd = dirsig
# set options
if options:
cmd += ' ' + options
# add the sim
cmd += ' ' + sim
# add a log file.
cmd += ' &> ' + logfile
# clean the dirsig command
return clean_cmd(cmd)
def parallel_run_dirsig(cmds):
"""Executes dirsig runs in parallel.
Args:
cmds (str): A list of strings, where each string is a dirsig command to
execute.
Returns:
None
Note:
"Invoking the system shell with shell=True can be a security hazard if
combinedwith untrusted input."
"""
for cmd in cmds:
subprocess.call(cmd, shell=True)
return
if __name__ == '__main__':
# set defaults
SEARCH_REGEX = []
EXCLUDE_REGEX = []
EXCLUDE_FILES = []
SIMS = []
DIRSIG = 'dirsig'
PATH = '.'
BASEPATH = None
LOGFILE = 'log'
OPTIONS = None
RUN = False
import sys
ARGS = sys.argv[1:]
REGEXREGEX1 = re.compile(r'regex="(.*)"', re.IGNORECASE)
REGEXREGEX2 = re.compile(r"regex='(.*)'", re.IGNORECASE)
I = 0
while I < len(ARGS):
ARG = ARGS[I]
if ARG.lower().startswith('--path='):
PATH = ARG[7:]
# elif ARG.lower().startswith('--basepath='):
# BASEPATH = ARG[11:]
elif ARG.lower().startswith('--processes='):
PROCESSES = int(ARG[12:])
elif ARG.lower().startswith('--regex='):
SEARCH_REGEX.append(ARG[8:])#.decode('string_escape')
elif ARG.lower().startswith('--exclude='):
EXCLUDE_REGEX.append(ARG[10:])#.decode('string_escape')
elif ARG.lower().startswith('--dirsig='):
DIRSIG = ARG[9:]
elif ARG.lower().startswith('--logfile='):
LOGFILE = ARG[10:]
elif ARG.lower().startswith('--addsim='):
SIMS.append(ARG[9:])
elif ARG.lower().startswith('--rmsim='):
EXCLUDE_FILES.append(ARG[8:])
elif ARG.lower().startswith('--option='):
if OPTIONS:
OPTIONS += ' ' + ARG[9:]
else:
OPTIONS = ARG[9:]
elif ARG.lower() == '--run':
RUN = True
else:
sys.exit("'" + ARG + "' is an unexpected command line option.")
I += 1
if not SEARCH_REGEX:
SEARCH_REGEX = [r'.+\.sim']
# Find some sim files
for REGEX in SEARCH_REGEX:
SIMS += find_sims_by_regex(re.compile(REGEX), pth=PATH)
# Exclude some sim files
for REGEX in EXCLUDE_REGEX:
SIMS = exclude_sims_by_regex(SIMS, re.compile(REGEX))
# Remove duplicate sim files
SIMS = remove_sim_files(SIMS, EXCLUDE_FILES)
if not RUN:
print "dirsig.parallel.parallel.py"
print
print "Called from %s" % os.getcwd()
print "Searching: %s" % os.path.abspath(PATH)
print
print "Found %d sim files:" % len(SIMS)
for SIM in SIMS:
print "\t%s" % SIM
print
print "To add more simulations add --regex=<regular expression> to " + \
"your python call."
print "To add a specific simulation add --addsim=<sim file> to your " + \
"python call."
print "To remove simulations add --exclude=<regular expression> to " + \
"your python call."
print "To remove a specific simulation add --rmsim=<sim file> to " + \
"your python call."
print
print "The following dirsig call will be performed on each sim file:"
print "\t%s" % make_dirsig_command("*.sim", options=OPTIONS, \
dirsig=DIRSIG, logfile = LOGFILE)
print
print "To run with these settings, use:"
print "\tpython parallel.py %s --run" % " ".join(ARGS)
else:
# make dirsig commands
CMDS = []
for SIM in SIMS:
(DIR, SIMFILE) = os.path.split(SIM)
CMDS.append(cd_for_run(make_dirsig_command(SIMFILE, options=OPTIONS, \
dirsig=DIRSIG, logfile=LOGFILE), pth=DIR, basepath=BASEPATH))
# run dirsig
parallel_run_dirsig(CMDS)
| |
from __future__ import absolute_import
"""M2Crypto wrapper for OpenSSL EVP API.
Copyright (c) 1999-2004 Ng Pheng Siong. All rights reserved.
Portions Copyright (c) 2004-2007 Open Source Applications Foundation.
Author: Heikki Toivonen
"""
import logging
from M2Crypto import BIO, Err, RSA, m2, util
if util.py27plus:
from typing import AnyStr, Optional, Callable # noqa
log = logging.getLogger('EVP')
class EVPError(ValueError):
pass
m2.evp_init(EVPError)
def pbkdf2(password, salt, iter, keylen):
# type: (bytes, bytes, int, int) -> bytes
"""
Derive a key from password using PBKDF2 algorithm specified in RFC 2898.
:param password: Derive the key from this password.
:param salt: Salt.
:param iter: Number of iterations to perform.
:param keylen: Length of key to produce.
:return: Key.
"""
return m2.pkcs5_pbkdf2_hmac_sha1(password, salt, iter, keylen)
class MessageDigest(object):
"""
Message Digest
"""
m2_md_ctx_free = m2.md_ctx_free
def __init__(self, algo):
# type: (str) -> None
md = getattr(m2, algo, None) # type: Optional[Callable]
if md is None:
# if the digest algorithm isn't found as an attribute of the m2
# module, try to look up the digest using get_digestbyname()
self.md = m2.get_digestbyname(algo)
else:
self.md = md()
self.ctx = m2.md_ctx_new()
m2.digest_init(self.ctx, self.md)
def __del__(self):
# type: () -> None
if getattr(self, 'ctx', None):
self.m2_md_ctx_free(self.ctx)
def update(self, data):
# type: (bytes) -> int
"""
Add data to be digested.
:return: -1 for Python error, 1 for success, 0 for OpenSSL failure.
"""
return m2.digest_update(self.ctx, data)
def final(self):
return m2.digest_final(self.ctx)
# Deprecated.
digest = final
class HMAC(object):
m2_hmac_ctx_free = m2.hmac_ctx_free
def __init__(self, key, algo='sha1'):
# type: (bytes, str) -> None
md = getattr(m2, algo, None)
if md is None:
raise ValueError('unknown algorithm', algo)
self.md = md()
self.ctx = m2.hmac_ctx_new()
m2.hmac_init(self.ctx, key, self.md)
def __del__(self):
# type: () -> None
if getattr(self, 'ctx', None):
self.m2_hmac_ctx_free(self.ctx)
def reset(self, key):
# type: (bytes) -> None
m2.hmac_init(self.ctx, key, self.md)
def update(self, data):
# type: (bytes) -> None
m2.hmac_update(self.ctx, data)
def final(self):
# type: () -> bytes
return m2.hmac_final(self.ctx)
digest = final
def hmac(key, data, algo='sha1'):
# type: (bytes, bytes, str) -> bytes
md = getattr(m2, algo, None)
if md is None:
raise ValueError('unknown algorithm', algo)
return m2.hmac(key, data, md())
class Cipher(object):
m2_cipher_ctx_free = m2.cipher_ctx_free
def __init__(self, alg, key, iv, op, key_as_bytes=0, d='md5',
salt=b'12345678', i=1, padding=1):
# type: (str, bytes, bytes, object, int, str, bytes, int, int) -> None
cipher = getattr(m2, alg, None)
if cipher is None:
raise ValueError('unknown cipher', alg)
self.cipher = cipher()
if key_as_bytes:
kmd = getattr(m2, d, None)
if kmd is None:
raise ValueError('unknown message digest', d)
key = m2.bytes_to_key(self.cipher, kmd(), key, salt, iv, i)
self.ctx = m2.cipher_ctx_new()
m2.cipher_init(self.ctx, self.cipher, key, iv, op)
self.set_padding(padding)
del key
def __del__(self):
# type: () -> None
if getattr(self, 'ctx', None):
self.m2_cipher_ctx_free(self.ctx)
def update(self, data):
# type: (bytes) -> bytes
return m2.cipher_update(self.ctx, data)
def final(self):
# type: () -> bytes
return m2.cipher_final(self.ctx)
def set_padding(self, padding=1):
# type: (int) -> int
"""
Actually always return 1
"""
return m2.cipher_set_padding(self.ctx, padding)
class PKey(object):
"""
Public Key
"""
m2_pkey_free = m2.pkey_free
m2_md_ctx_free = m2.md_ctx_free
def __init__(self, pkey=None, _pyfree=0, md='sha1'):
# type: (Optional[bytes], int, str) -> None
if pkey is not None:
self.pkey = pkey # type: bytes
self._pyfree = _pyfree
else:
self.pkey = m2.pkey_new()
self._pyfree = 1
self._set_context(md)
def __del__(self):
# type: () -> None
if getattr(self, '_pyfree', 0):
self.m2_pkey_free(self.pkey)
if getattr(self, 'ctx', None):
self.m2_md_ctx_free(self.ctx)
def _ptr(self):
return self.pkey
def _set_context(self, md):
# type: (str) -> None
mda = getattr(m2, md, None) # type: Optional[Callable]
if mda is None:
raise ValueError('unknown message digest', md)
self.md = mda()
self.ctx = m2.md_ctx_new() # type: Context
def reset_context(self, md='sha1'):
# type: (str) -> None
"""
Reset internal message digest context.
:param md: The message digest algorithm.
"""
self._set_context(md)
def sign_init(self):
# type: () -> None
"""
Initialise signing operation with self.
"""
m2.sign_init(self.ctx, self.md)
def sign_update(self, data):
# type: (bytes) -> None
"""
Feed data to signing operation.
:param data: Data to be signed.
"""
m2.sign_update(self.ctx, data)
def sign_final(self):
# type: () -> bytes
"""
Return signature.
:return: The signature.
"""
return m2.sign_final(self.ctx, self.pkey)
# Deprecated
update = sign_update
final = sign_final
def verify_init(self):
# type: () -> None
"""
Initialise signature verification operation with self.
"""
m2.verify_init(self.ctx, self.md)
def verify_update(self, data):
# type: (bytes) -> int
"""
Feed data to verification operation.
:param data: Data to be verified.
:return: -1 on Python error, 1 for success, 0 for OpenSSL error
"""
return m2.verify_update(self.ctx, data)
def verify_final(self, sign):
# type: (bytes) -> int
"""
Return result of verification.
:param sign: Signature to use for verification
:return: Result of verification: 1 for success, 0 for failure, -1 on
other error.
"""
return m2.verify_final(self.ctx, sign, self.pkey)
def assign_rsa(self, rsa, capture=1):
# type: (RSA.RSA, int) -> int
"""
Assign the RSA key pair to self.
:param rsa: M2Crypto.RSA.RSA object to be assigned to self.
:param capture: If true (default), this PKey object will own the RSA
object, meaning that once the PKey object gets
deleted it is no longer safe to use the RSA object.
:return: Return 1 for success and 0 for failure.
"""
if capture:
ret = m2.pkey_assign_rsa(self.pkey, rsa.rsa)
if ret:
rsa._pyfree = 0
else:
ret = m2.pkey_set1_rsa(self.pkey, rsa.rsa)
return ret
def get_rsa(self):
# type: () -> RSA.RSA_pub
"""
Return the underlying RSA key if that is what the EVP
instance is holding.
"""
rsa_ptr = m2.pkey_get1_rsa(self.pkey)
rsa = RSA.RSA_pub(rsa_ptr, 1)
return rsa
def save_key(self, file, cipher='aes_128_cbc',
callback=util.passphrase_callback):
# type: (AnyStr, Optional[str], Callable) -> int
"""
Save the key pair to a file in PEM format.
:param file: Name of file to save key to.
:param cipher: Symmetric cipher to protect the key. The default
cipher is 'aes_128_cbc'. If cipher is None, then
the key is saved in the clear.
:param callback: A Python callable object that is invoked
to acquire a passphrase with which to protect
the key. The default is
util.passphrase_callback.
"""
with BIO.openfile(file, 'wb') as bio:
return self.save_key_bio(bio, cipher, callback)
def save_key_bio(self, bio, cipher='aes_128_cbc',
callback=util.passphrase_callback):
# type: (BIO.BIO, Optional[str], Callable) -> int
"""
Save the key pair to the M2Crypto.BIO object 'bio' in PEM format.
:param bio: M2Crypto.BIO object to save key to.
:param cipher: Symmetric cipher to protect the key. The default
cipher is 'aes_128_cbc'. If cipher is None, then
the key is saved in the clear.
:param callback: A Python callable object that is invoked
to acquire a passphrase with which to protect
the key. The default is
util.passphrase_callback.
"""
if cipher is None:
return m2.pkey_write_pem_no_cipher(self.pkey, bio._ptr(), callback)
else:
proto = getattr(m2, cipher, None)
if proto is None:
raise ValueError('no such cipher %s' % cipher)
return m2.pkey_write_pem(self.pkey, bio._ptr(), proto(), callback)
def as_pem(self, cipher='aes_128_cbc', callback=util.passphrase_callback):
# type: (Optional[str], Callable) -> bytes
"""
Return key in PEM format in a string.
:param cipher: Symmetric cipher to protect the key. The default
cipher is ``'aes_128_cbc'``. If cipher is None,
then the key is saved in the clear.
:param callback: A Python callable object that is invoked
to acquire a passphrase with which to protect
the key. The default is
util.passphrase_callback.
"""
bio = BIO.MemoryBuffer()
self.save_key_bio(bio, cipher, callback)
return bio.read_all()
def as_der(self):
# type: () -> bytes
"""
Return key in DER format in a string
"""
buf = m2.pkey_as_der(self.pkey)
bio = BIO.MemoryBuffer(buf)
return bio.read_all()
def size(self):
# type: () -> int
"""
Return the size of the key in bytes.
"""
return m2.pkey_size(self.pkey)
def get_modulus(self):
# type: () -> Optional[bytes]
"""
Return the modulus in hex format.
"""
return m2.pkey_get_modulus(self.pkey)
def load_key(file, callback=util.passphrase_callback):
# type: (AnyStr, Callable) -> PKey
"""
Load an M2Crypto.EVP.PKey from file.
:param file: Name of file containing the key in PEM format.
:param callback: A Python callable object that is invoked
to acquire a passphrase with which to protect the
key.
:return: M2Crypto.EVP.PKey object.
"""
with BIO.openfile(file, 'r') as bio:
cptr = m2.pkey_read_pem(bio.bio, callback)
return PKey(cptr, 1)
def load_key_bio(bio, callback=util.passphrase_callback):
# type: (BIO.BIO, Callable) -> PKey
"""
Load an M2Crypto.EVP.PKey from an M2Crypto.BIO object.
:param bio: M2Crypto.BIO object containing the key in PEM format.
:param callback: A Python callable object that is invoked
to acquire a passphrase with which to protect the
key.
:return: M2Crypto.EVP.PKey object.
"""
cptr = m2.pkey_read_pem(bio._ptr(), callback)
return PKey(cptr, 1)
def load_key_bio_pubkey(bio, callback=util.passphrase_callback):
# type: (BIO.BIO, Callable) -> PKey
"""
Load an M2Crypto.EVP.PKey from a public key as a M2Crypto.BIO object.
:param bio: M2Crypto.BIO object containing the key in PEM format.
:param callback: A Python callable object that is invoked
to acquire a passphrase with which to protect the
key.
:return: M2Crypto.EVP.PKey object.
"""
cptr = m2.pkey_read_pem_pubkey(bio._ptr(), callback)
if cptr is None:
raise EVPError(Err.get_error())
return PKey(cptr, 1)
def load_key_string(string, callback=util.passphrase_callback):
# type: (AnyStr, Callable) -> PKey
"""
Load an M2Crypto.EVP.PKey from a string.
:param string: String containing the key in PEM format.
:param callback: A Python callable object that is invoked
to acquire a passphrase with which to protect the
key.
:return: M2Crypto.EVP.PKey object.
"""
bio = BIO.MemoryBuffer(string)
return load_key_bio(bio, callback)
def load_key_string_pubkey(string, callback=util.passphrase_callback):
# type: (AnyStr, Callable) -> PKey
"""
Load an M2Crypto.EVP.PKey from a public key as a string.
:param string: String containing the key in PEM format.
:param callback: A Python callable object that is invoked
to acquire a passphrase with which to protect the
key.
:return: M2Crypto.EVP.PKey object.
"""
bio = BIO.MemoryBuffer(string)
return load_key_bio_pubkey(bio, callback)
| |
# Copyright 2014 Google Inc. All Rights Reserved.
"""Base classes for abstracting away common logic."""
import abc
import argparse
import collections
import copy
import cStringIO
import json
import sets
import sys
import textwrap
from enum import Enum
import protorpc.messages
import yaml
from googlecloudapis.apitools.base.py import encoding
from googlecloudapis.compute.v1 import compute_v1_messages
from googlecloudsdk.calliope import actions
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions as calliope_exceptions
from googlecloudsdk.compute.lib import constants
from googlecloudsdk.compute.lib import lister
from googlecloudsdk.compute.lib import metadata_utils
from googlecloudsdk.compute.lib import path_simplifier
from googlecloudsdk.compute.lib import property_selector
from googlecloudsdk.compute.lib import request_helper
from googlecloudsdk.compute.lib import resource_specs
from googlecloudsdk.compute.lib import scope_prompter
from googlecloudsdk.compute.lib import utils
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core import resources as resource_exceptions
from googlecloudsdk.core.console import console_io
from googlecloudsdk.core.util import edit
from googlecloudsdk.core.util import resource_printer
def PrintTable(resources, table_cols):
"""Prints a table of the given resources."""
# TODO(user): Switch over to console_io.TablePrinter once the
# class is refactored to support tables without ASCII borders.
printer = resource_printer.TablePrinter(out=log.out)
header = []
for name, _ in table_cols:
header.append(name)
printer.AddRow(header)
try:
for resource in resources:
row = []
for _, action in table_cols:
if isinstance(action, property_selector.PropertyGetter):
row.append(action.Get(resource) or '')
elif callable(action):
row.append(action(resource))
printer.AddRow(row)
finally:
printer.Print()
class BaseCommand(base.Command, scope_prompter.ScopePrompter):
"""Base class for all compute subcommands."""
__metaclass__ = abc.ABCMeta
def __init__(self, *args, **kwargs):
super(BaseCommand, self).__init__(*args, **kwargs)
# Set the default project for resource resolution
if self.resource_type:
# Constructing the spec can be potentially expensive (e.g.,
# generating the set of valid fields from the protobuf message),
# so we fetch it once in the constructor.
self._resource_spec = resource_specs.GetSpec(
self.resource_type, self.messages, self.context['api-version'])
else:
self._resource_spec = None
@property
def transformations(self):
if self._resource_spec:
return self._resource_spec.transformations
else:
return None
@property
def resource_type(self):
"""Specifies the name of the collection that should be printed."""
return None
@property
def http(self):
"""Specifies the http client to be used for requests."""
return self.context['http']
@property
def project(self):
"""Specifies the user's project."""
return self.context['project']
@property
def batch_url(self):
"""Specifies the API batch URL."""
return self.context['batch-url']
@property
def compute(self):
"""Specifies the compute client."""
return self.context['compute']
@property
def resources(self):
"""Specifies the resources parser for compute resources."""
return self.context['resources']
@property
def computeaccounts(self):
return self.context['computeaccounts']
@property
def computeaccounts_resources(self):
return self.context['computeaccounts-resources']
@property
def messages(self):
"""Specifies the API message classes."""
return self.compute.MESSAGES_MODULE
def Display(self, args, resources):
"""Prints the given resources."""
if resources:
resource_printer.Print(
resources=resources,
print_format='yaml',
out=log.out)
class BaseLister(BaseCommand):
"""Base class for the list subcommands."""
__metaclass__ = abc.ABCMeta
@staticmethod
def Args(parser):
parser.add_argument(
'--limit',
type=arg_parsers.BoundedInt(1, sys.maxint),
help='The maximum number of results.')
sort_by = parser.add_argument(
'--sort-by',
help='A field to sort by.')
sort_by.detailed_help = """\
A field to sort by. To perform a descending-order sort, prefix
the value of this flag with a tilde (``~'').
"""
parser.add_argument(
'names',
metavar='NAME',
nargs='*',
default=[],
help=('If provided, show details for the specified names and/or URIs '
'of resources.'))
uri = parser.add_argument(
'--uri',
action='store_true',
help='If provided, a list of URIs is printed instead of a table.')
uri.detailed_help = """\
If provided, the list command will only print URIs for the
resources returned. If this flag is not provided, the list
command will print a human-readable table of useful resource
data.
"""
regexp = parser.add_argument(
'--regexp', '-r',
help='A regular expression to filter the names of the results on.')
regexp.detailed_help = """\
A regular expression to filter the names of the results on. Any names
that do not match the entire regular expression will be filtered out.
"""
@property
def allowed_filtering_types(self):
"""The list of resource types that can be provided to filtering."""
return [self.resource_type]
@abc.abstractmethod
def GetResources(self, args, errors):
"""Returns a generator of JSON-serializable resource dicts."""
def GetFilterExpr(self, args):
"""Returns a filter expression if --regexp is provided."""
if args.regexp:
return 'name eq {0}'.format(args.regexp)
else:
return None
def PopulateResourceFilteringStructures(self, args):
"""Processes the positional arguments for later filtering."""
allowed_collections = ['compute.{0}'.format(resource_type)
for resource_type in self.allowed_filtering_types]
for name in args.names:
try:
ref = self.resources.Parse(name)
if ref.Collection() not in allowed_collections:
raise calliope_exceptions.ToolException(
'Resource URI must be of type {0}. Received [{1}].'.format(
' or '.join('[{0}]'.format(collection)
for collection in allowed_collections),
ref.Collection()))
self.self_links.add(ref.SelfLink())
self.resource_refs.append(ref)
continue
except resource_exceptions.UserError:
pass
self.names.add(name)
def FilterResults(self, args, items):
"""Filters the list results by name and URI."""
for item in items:
# If no positional arguments were given, do no filtering.
if not args.names:
yield item
# At this point, we have to do filtering because there was at
# least one positional argument.
elif item.selfLink in self.self_links or item.name in self.names:
yield item
def ComputeDynamicProperties(self, args, items):
"""Computes dynamic properties, which are not returned by GCE API."""
_ = args
return items
def Run(self, args):
"""Yields JSON-serializable dicts of resources or self links."""
# Data structures used to perform client-side filtering of
# resources by their names and/or URIs.
self.self_links = set()
self.names = set()
self.resource_refs = []
if args.uri:
field_selector = None
else:
# The field selector should be constructed before any resources
# are fetched, so if there are any syntactic errors with the
# fields, we can fail fast.
field_selector = property_selector.PropertySelector(
properties=None,
transformations=self.transformations)
if args.sort_by:
if args.sort_by.startswith('~'):
sort_by = args.sort_by[1:]
descending = True
else:
sort_by = args.sort_by
descending = False
for col_name, path in self._resource_spec.table_cols:
if sort_by == col_name:
sort_by = path
break
if isinstance(sort_by, property_selector.PropertyGetter):
property_getter = sort_by
else:
property_getter = property_selector.PropertyGetter(sort_by)
sort_key_fn = property_getter.Get
else:
sort_key_fn = None
descending = False
errors = []
self.PopulateResourceFilteringStructures(args)
items = self.FilterResults(args, self.GetResources(args, errors))
items = lister.ProcessResults(
resources=items,
field_selector=field_selector,
sort_key_fn=sort_key_fn,
reverse_sort=descending,
limit=args.limit)
items = self.ComputeDynamicProperties(args, items)
for item in items:
if args.uri:
yield item['selfLink']
else:
yield item
if errors:
utils.RaiseToolException(errors)
def Display(self, args, resources):
"""Prints the given resources."""
if args.uri:
for resource in resources:
log.out.Print(resource)
else:
PrintTable(resources, self._resource_spec.table_cols)
class GlobalLister(BaseLister):
"""Base class for listing global resources."""
def GetResources(self, args, errors):
return lister.GetGlobalResources(
service=self.service,
project=self.project,
filter_expr=self.GetFilterExpr(args),
http=self.http,
batch_url=self.batch_url,
errors=errors)
def GetGlobalListerHelp(resource):
"""Returns the detailed help dict for a global list command."""
return {
'brief': 'List Google Compute Engine ' + resource,
'DESCRIPTION': """\
*{{command}}* displays all Google Compute Engine {0} in a project.
""".format(resource),
'EXAMPLES': """\
To list all {0} in a project in table form, run:
$ {{command}}
To list the URIs of all {0} in a project, run:
$ {{command}} --uri
""".format(resource)
}
class RegionalLister(BaseLister):
"""Base class for listing regional resources."""
@staticmethod
def Args(parser):
BaseLister.Args(parser)
parser.add_argument(
'--regions',
metavar='REGION',
help='If provided, only resources from the given regions are queried.',
type=arg_parsers.ArgList(min_length=1),
action=arg_parsers.FloatingListValuesCatcher(),
default=[])
def GetResources(self, args, errors):
region_names = [
self.CreateGlobalReference(region, resource_type='regions').Name()
for region in args.regions]
return lister.GetRegionalResources(
service=self.service,
project=self.project,
requested_regions=region_names,
filter_expr=self.GetFilterExpr(args),
http=self.http,
batch_url=self.batch_url,
errors=errors)
def GetRegionalListerHelp(resource):
"""Returns the detailed help dict for a regional list command."""
return {
'brief': 'List Google Compute Engine ' + resource,
'DESCRIPTION': """\
*{{command}}* displays all Google Compute Engine {0} in a project.
By default, {0} from all regions are listed. The results can be
narrowed down by providing the ``--regions'' flag.
""".format(resource),
'EXAMPLES': """\
To list all {0} in a project in table form, run:
$ {{command}}
To list the URIs of all {0} in a project, run:
$ {{command}} --uri
To list all {0} in the ``us-central1'' and ``europe-west1'' regions,
run:
$ {{command}} --regions us-central1 europe-west1
""".format(resource)
}
class ZonalLister(BaseLister):
"""Base class for listing zonal resources."""
@staticmethod
def Args(parser):
BaseLister.Args(parser)
parser.add_argument(
'--zones',
metavar='ZONE',
help='If provided, only resources from the given zones are queried.',
type=arg_parsers.ArgList(min_length=1),
action=arg_parsers.FloatingListValuesCatcher(),
default=[])
def GetResources(self, args, errors):
zone_names = [
self.CreateGlobalReference(zone, resource_type='zones').Name()
for zone in args.zones]
return lister.GetZonalResources(
service=self.service,
project=self.project,
requested_zones=zone_names,
filter_expr=self.GetFilterExpr(args),
http=self.http,
batch_url=self.batch_url,
errors=errors)
def GetZonalListerHelp(resource):
"""Returns the detailed help dict for a zonal list command."""
return {
'brief': 'List Google Compute Engine ' + resource,
'DESCRIPTION': """\
*{{command}}* displays all Google Compute Engine {0} in a project.
By default, {0} from all zones are listed. The results can be narrowed
down by providing the ``--zones'' flag.
""".format(resource),
'EXAMPLES': """\
To list all {0} in a project in table form, run:
$ {{command}}
To list the URIs of all {0} in a project, run:
$ {{command}} --uri
To list all {0} in the ``us-central1-b'' and ``europe-west1-d'' zones,
run:
$ {{command}} --zones us-central1-b europe-west1-d
""".format(resource)
}
class GlobalRegionalLister(BaseLister):
"""Base class for listing global and regional resources."""
__metaclass__ = abc.ABCMeta
@staticmethod
def Args(parser):
BaseLister.Args(parser)
scope = parser.add_mutually_exclusive_group()
scope.add_argument(
'--regions',
metavar='REGION',
help=('If provided, only regional resources are shown. '
'If arguments are provided, only resources from the given '
'regions are shown.'),
action=arg_parsers.FloatingListValuesCatcher(switch_value=[]),
type=arg_parsers.ArgList())
scope.add_argument(
'--global',
action='store_true',
help='If provided, only global resources are shown.',
default=False)
@abc.abstractproperty
def global_service(self):
"""The service used to list global resources."""
@abc.abstractproperty
def regional_service(self):
"""The service used to list regional resources."""
def GetResources(self, args, errors):
"""Yields regional and/or global resources."""
# This is true if the user provided no flags indicating scope
no_scope_flags = args.regions is None and not getattr(args, 'global')
requests = []
filter_expr = self.GetFilterExpr(args)
max_results = constants.MAX_RESULTS_PER_PAGE
project = self.project
# If --global is present OR no scope flags are present then we have to fetch
# the global resources.
if getattr(args, 'global'):
requests.append(
(self.global_service,
'List',
self.global_service.GetRequestType('List')(
filter=filter_expr,
maxResults=max_results,
project=project)))
# If --regions is present with no arguments OR no scope flags are present
# then we have to do an aggregated list
# pylint:disable=g-explicit-bool-comparison
if args.regions == [] or no_scope_flags:
requests.append(
(self.regional_service,
'AggregatedList',
self.regional_service.GetRequestType('AggregatedList')(
filter=filter_expr,
maxResults=max_results,
project=project)))
# Else if some regions were provided then only list within them
elif args.regions:
region_names = set(
self.CreateGlobalReference(region, resource_type='regions').Name()
for region in args.regions)
for region_name in sorted(region_names):
requests.append(
(self.regional_service,
'List',
self.regional_service.GetRequestType('List')(
filter=filter_expr,
maxResults=max_results,
region=region_name,
project=project)))
return request_helper.MakeRequests(
requests=requests,
http=self.http,
batch_url=self.batch_url,
errors=errors,
custom_get_requests=None)
def GetGlobalRegionalListerHelp(resource):
"""Returns the detailed help dict for a global and regional list command."""
return {
'brief': 'List Google Compute Engine ' + resource,
'DESCRIPTION': """\
*{{command}}* displays all Google Compute Engine {0} in a project.
By default, global {0} and {0} from all regions are listed. The
results can be narrowed down by providing the ``--regions'' or
``--global'' flag.
""".format(resource),
'EXAMPLES': """\
To list all {0} in a project in table form, run:
$ {{command}}
To list the URIs of all {0} in a project, run:
$ {{command}} --uri
To list all {0} in zones ``us-central1-b'' and ``europe-west1-d'',
run:
$ {{command}} --regions us-central1 europe-west1
To list all global {0} in a project, run:
$ {{command}} --global
To list all regional {0} in a project, run:
$ {{command}} --regions
To list all {0} in the ``us-central1'' and ``europe-west1'' regions,
run:
$ {{command}} --regions us-central1 europe-west1
""".format(resource)
}
class BaseDescriber(BaseCommand):
"""Base class for the describe subcommands."""
__metaclass__ = abc.ABCMeta
@staticmethod
def Args(parser, resource=None, cli=None, completer_command=None):
BaseDescriber.AddArgs(parser, resource, cli, completer_command)
@staticmethod
def AddArgs(parser, resource=None, cli=None, completer_command=None):
describe = parser.add_argument(
'name',
metavar='NAME',
help='The name of the resource to fetch.')
if cli:
describe.completer = utils.GetCompleterForResource(
resource, cli, completer_command)
@property
def method(self):
return 'Get'
def ScopeRequest(self, ref, request):
"""Adds a zone or region to the request object if necessary."""
@abc.abstractmethod
def CreateReference(self, args):
pass
def SetNameField(self, ref, request):
"""Sets the field in the request that corresponds to the object name."""
name_field = self.service.GetMethodConfig(self.method).ordered_params[-1]
setattr(request, name_field, ref.Name())
def ComputeDynamicProperties(self, args, items):
"""Computes dynamic properties, which are not returned by GCE API."""
_ = args
return items
def Run(self, args):
"""Yields JSON-serializable dicts of resources."""
# The field selector should be constructed before any resources
# are fetched, so if there are any syntactic errors with the
# fields, we can fail fast.
field_selector = property_selector.PropertySelector(properties=args.fields)
ref = self.CreateReference(args)
get_request_class = self.service.GetRequestType(self.method)
request = get_request_class(project=self.project)
self.SetNameField(ref, request)
self.ScopeRequest(ref, request)
get_request = (self.service, self.method, request)
errors = []
objects = request_helper.MakeRequests(
requests=[get_request],
http=self.http,
batch_url=self.batch_url,
errors=errors,
custom_get_requests=None)
resources = lister.ProcessResults(objects, field_selector)
resources = list(self.ComputeDynamicProperties(args, resources))
if errors:
utils.RaiseToolException(
errors,
error_message='Could not fetch resource:')
return resources[0]
class GlobalDescriber(BaseDescriber):
"""Base class for describing global resources."""
def CreateReference(self, args):
return self.CreateGlobalReference(args.name)
class RegionalDescriber(BaseDescriber):
"""Base class for describing regional resources."""
@staticmethod
def Args(parser, resource=None, cli=None, command=None):
BaseDescriber.AddArgs(parser, resource, cli, command)
utils.AddRegionFlag(
parser,
resource_type='resource',
operation_type='fetch',
cli=cli)
def CreateReference(self, args):
return self.CreateRegionalReference(args.name, args.region)
def ScopeRequest(self, ref, request):
request.region = ref.region
class ZonalDescriber(BaseDescriber):
"""Base class for describing zonal resources."""
@staticmethod
def Args(parser, resource=None, cli=None, command=None):
BaseDescriber.AddArgs(parser, resource, cli, command)
utils.AddZoneFlag(
parser,
resource_type='resource',
operation_type='fetch',
cli=cli)
def CreateReference(self, args):
return self.CreateZonalReference(args.name, args.zone)
def ScopeRequest(self, ref, request):
request.zone = ref.zone
class GlobalRegionalDescriber(BaseDescriber):
"""Base class for describing global or regional resources."""
__metaclass__ = abc.ABCMeta
@staticmethod
def Args(parser, resource_type, cli=None, command=None):
resource = resource_type
BaseDescriber.AddArgs(parser, 'compute.'+ resource, cli, command)
AddFieldsFlag(parser, resource_type)
scope = parser.add_mutually_exclusive_group()
region = scope.add_argument(
'--region',
help='The region of the resource to fetch.',
action=actions.StoreProperty(properties.VALUES.compute.region))
if cli:
region.completer = utils.GetCompleterForResource(
'compute.regions', cli)
scope.add_argument(
'--global',
action='store_true',
help=('If provided, it is assumed that the requested resource is '
'global.'))
@abc.abstractproperty
def global_service(self):
"""The service used to list global resources."""
@abc.abstractproperty
def regional_service(self):
"""The service used to list regional resources."""
@abc.abstractproperty
def global_resource_type(self):
"""The type of global resources."""
@abc.abstractproperty
def regional_resource_type(self):
"""The type of regional resources."""
@property
def service(self):
return self._service
def CreateReference(self, args):
try:
ref = self.resources.Parse(args.name, params={'region': args.region})
except resource_exceptions.UnknownCollectionException:
if getattr(args, 'global'):
ref = self.CreateGlobalReference(
args.name, resource_type=self.global_resource_type)
else:
ref = self.CreateRegionalReference(
args.name, args.region, resource_type=self.regional_resource_type)
if ref.Collection() not in (
'compute.{0}'.format(self.regional_resource_type),
'compute.{0}'.format(self.global_resource_type)):
raise calliope_exceptions.ToolException(
'You must pass in a reference to a global or regional resource.')
ref_resource_type = utils.CollectionToResourceType(ref.Collection())
if ref_resource_type == self.global_resource_type:
self._service = self.global_service
else:
self._service = self.regional_service
return ref
def ScopeRequest(self, ref, request):
if ref.Collection() == 'compute.{0}'.format(self.regional_resource_type):
request.region = ref.region
def AddFieldsFlag(parser, resource_type):
"""Adds the --fields flag to the given parser.
This function is to be called from implementations of describe/list
subcommands. The resulting help text of --fields will contain all
valid values for the flag. We need this function becasue Args() is a
static method so the only way to communicate the resource type is by
having the subclass pass it in.
Args:
parser: The parser to add --fields to.
resource_type: The resource type as defined in the resource_specs
module.
"""
def GenerateDetailedHelp():
return ('Fields to display. Possible values are:\n+\n ' +
'\n '.join(resource_specs.GetSpec(
resource_type, compute_v1_messages, 'v1').fields))
fields = parser.add_argument(
'--fields',
type=arg_parsers.ArgList(min_length=1),
metavar='FIELD',
action=arg_parsers.FloatingListValuesCatcher(),
# We have not reached an agreement over the --fields flag for
# Cloud SDK tools. It has been agreed that the compute component
# will keep --fields but will keep it undocumented until
# consensus can be reached over the flag's fate.
help=argparse.SUPPRESS)
# Note that we do not actually call GenerateDetailedHelp, the help
# generator does that. This is important because getting the set of
# fields is a potentially expensive operation, so we only want to do
# it when needed.
fields.detailed_help = GenerateDetailedHelp
class BaseAsyncMutator(BaseCommand):
"""Base class for subcommands that mutate resources."""
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def service(self):
"""The service that can mutate resources."""
@property
def custom_get_requests(self):
"""Returns request objects for getting the mutated resources.
This should be a dict mapping operation targetLink names to
requests that can be passed to batch_helper. This is useful for
verbs whose operations do not point to the resources being mutated
(e.g., Disks.createSnapshot).
If None, the operations' targetLinks are used to fetch the mutated
resources.
"""
return None
@abc.abstractproperty
def method(self):
"""The method name on the service as a string."""
@abc.abstractmethod
def CreateRequests(self, args):
"""Creates the requests that perform the mutation.
It is okay for this method to make calls to the API as long as the
calls originating from this method do not cause any mutations.
Args:
args: The command-line arguments.
Returns:
A list of request protobufs.
"""
def Run(self, args, request_protobufs=None, service=None):
if request_protobufs is None:
request_protobufs = self.CreateRequests(args)
if service is None:
service = self.service
requests = []
# If a method is not passed as part of a tuple then use the self.method
# default
for request in request_protobufs:
if isinstance(request, tuple):
method = request[0]
proto = request[1]
else:
method = self.method
proto = request
requests.append((service, method, proto))
errors = []
# We want to run through the generator that MakeRequests returns in order to
# actually make the requests, since these requests mutate resources.
resources = list(request_helper.MakeRequests(
requests=requests,
http=self.http,
batch_url=self.batch_url,
errors=errors,
custom_get_requests=self.custom_get_requests))
resources = lister.ProcessResults(
resources=resources,
field_selector=property_selector.PropertySelector(
properties=None,
transformations=self.transformations))
if errors:
utils.RaiseToolException(errors)
return resources
def Display(self, _, resources):
"""Prints the given resources."""
if resources:
resource_printer.Print(resources=resources,
print_format='yaml',
out=log.out)
class NoOutputAsyncMutator(BaseAsyncMutator):
"""Base class for mutating subcommands that don't display resources."""
def Display(self, _, resources):
pass
class InstanceGroupFilteringMode(Enum):
"""Filtering mode for Instance Groups based on dynamic properties."""
all_groups = 1
only_managed_groups = 2
only_unmanaged_groups = 3
class InstanceGroupManagerDynamicProperiesMixin(object):
"""Mixin class to compute dynamic information for instance groups."""
def ComputeInstanceGroupSize(self, items):
"""Add information about Instance Group size."""
errors = []
items = list(items)
zone_names = sets.Set(
[path_simplifier.Name(result['zone']) for result in items])
instance_groups = lister.GetZonalResources(
service=self.compute.instanceGroups,
project=self.project,
requested_zones=zone_names,
filter_expr=None,
http=self.http,
batch_url=self.batch_url,
errors=errors)
instance_group_ref_to_size = dict([
(path_simplifier.ScopedSuffix(ig.selfLink), ig.size)
for ig in instance_groups
])
if errors:
utils.RaiseToolException(errors)
for item in items:
self_link = item['selfLink']
gm_self_link = self_link.replace(
'/instanceGroupManagers/', '/instanceGroups/')
scoped_suffix = path_simplifier.ScopedSuffix(gm_self_link)
size = instance_group_ref_to_size[scoped_suffix]
item['size'] = str(size)
yield item
class InstanceGroupDynamicProperiesMixin(object):
"""Mixin class to compute dynamic information for instance groups."""
def ComputeInstanceGroupManagerMembership(self, items,
filter_mode=(
InstanceGroupFilteringMode
.all_groups)):
"""Add information if instance group is managed."""
errors = []
items = list(items)
zone_names = sets.Set(
[path_simplifier.Name(result['zone']) for result in items])
instance_group_managers = lister.GetZonalResources(
service=self.compute.instanceGroupManagers,
project=self.project,
requested_zones=zone_names,
filter_expr=None,
http=self.http,
batch_url=self.batch_url,
errors=errors)
instance_group_managers_refs = sets.Set([
path_simplifier.ScopedSuffix(igm.selfLink)
for igm in instance_group_managers])
if errors:
utils.RaiseToolException(errors)
for item in items:
self_link = item['selfLink']
igm_self_link = self_link.replace(
'/instanceGroups/', '/instanceGroupManagers/')
scoped_suffix = path_simplifier.ScopedSuffix(igm_self_link)
is_managed = scoped_suffix in instance_group_managers_refs
if (is_managed and
filter_mode == InstanceGroupFilteringMode.only_unmanaged_groups):
continue
elif (not is_managed and
filter_mode == InstanceGroupFilteringMode.only_managed_groups):
continue
item['isManaged'] = ('Yes' if is_managed else 'No')
if is_managed:
item['instanceGroupManagerUri'] = igm_self_link
yield item
class ListOutputMixin(object):
"""Mixin class to display a list by default."""
def Display(self, _, resources):
PrintTable(resources, self._resource_spec.table_cols)
class BaseAsyncCreator(ListOutputMixin, BaseAsyncMutator):
"""Base class for subcommands that create resources."""
class BaseDeleter(BaseAsyncMutator):
"""Base class for deleting resources."""
@staticmethod
def AddArgs(parser, resource=None, cli=None, command=None):
delete = parser.add_argument(
'names',
metavar='NAME',
nargs='+',
help='The resources to delete.')
if cli:
delete.completer = utils.GetCompleterForResource(
resource, cli, command)
@abc.abstractproperty
def resource_type(self):
"""The name of the collection that we will delete from."""
@abc.abstractproperty
def reference_creator(self):
"""A function that can construct resource reference objects."""
@abc.abstractproperty
def scope_name(self):
"""The name of the scope of the resource references."""
@property
def method(self):
return 'Delete'
@property
def custom_prompt(self):
"""Allows subclasses to override the delete confirmation message."""
return None
def ScopeRequest(self, args, request):
"""Adds a zone or region to the request object if necessary."""
def CreateRequests(self, args):
"""Returns a list of delete request protobufs."""
delete_request_class = self.service.GetRequestType(self.method)
name_field = self.service.GetMethodConfig(self.method).ordered_params[-1]
# pylint:disable=too-many-function-args
refs = self.reference_creator(args.names, args)
utils.PromptForDeletion(
refs, self.scope_name, prompt_title=self.custom_prompt)
requests = []
for ref in refs:
request = delete_request_class(project=self.project)
setattr(request, name_field, ref.Name())
self.ScopeRequest(ref, request)
requests.append(request)
return requests
class ZonalDeleter(BaseDeleter):
"""Base class for deleting zonal resources."""
@staticmethod
def Args(parser, resource=None, cli=None, command=None):
BaseDeleter.AddArgs(parser, resource, cli, command)
utils.AddZoneFlag(
parser, resource_type='resources', operation_type='delete', cli=cli)
@property
def reference_creator(self):
return (lambda names, args: self.CreateZonalReferences(names, args.zone))
def ScopeRequest(self, ref, request):
request.zone = ref.zone
@property
def scope_name(self):
return 'zone'
class RegionalDeleter(BaseDeleter):
"""Base class for deleting regional resources."""
@staticmethod
def Args(parser, resource=None, cli=None, command=None):
BaseDeleter.AddArgs(parser, resource, cli, command)
utils.AddRegionFlag(
parser, resource_type='resources', operation_type='delete', cli=cli)
@property
def reference_creator(self):
return (
lambda names, args: self.CreateRegionalReferences(names, args.region))
def ScopeRequest(self, ref, request):
request.region = ref.region
@property
def scope_name(self):
return 'region'
class GlobalDeleter(BaseDeleter):
"""Base class for deleting global resources."""
@staticmethod
def Args(parser, resource=None, cli=None, command=None):
BaseDeleter.AddArgs(parser, resource, cli, command)
@property
def reference_creator(self):
return (lambda names, _: self.CreateGlobalReferences(names))
@property
def scope_name(self):
return None
class ReadWriteCommand(BaseCommand):
"""Base class for read->update->write subcommands."""
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def service(self):
pass
# TODO(user): Make this an abstractproperty once all
# ReadWriteCommands support URIs and prompting.
def CreateReference(self, args):
"""Returns a resources.Resource object for the object being mutated."""
@abc.abstractmethod
def GetGetRequest(self, args):
"""Returns a request for fetching the resource."""
@abc.abstractmethod
def GetSetRequest(self, args, replacement, existing):
"""Returns a request for setting the resource."""
@abc.abstractmethod
def Modify(self, args, existing):
"""Returns a modified resource."""
def Run(self, args):
self.ref = self.CreateReference(args)
get_request = self.GetGetRequest(args)
errors = []
objects = list(request_helper.MakeRequests(
requests=[get_request],
http=self.http,
batch_url=self.batch_url,
errors=errors,
custom_get_requests=None))
if errors:
utils.RaiseToolException(
errors,
error_message='There was a problem fetching the resource:')
new_object = self.Modify(args, objects[0])
# If existing object is equal to the proposed object or if
# Modify() returns None, then there is no work to be done, so we
# print the resource and return.
if not new_object or objects[0] == new_object:
for resource in lister.ProcessResults(
resources=[objects[0]],
field_selector=property_selector.PropertySelector(
properties=None,
transformations=self.transformations)):
yield resource
return
resources = request_helper.MakeRequests(
requests=[self.GetSetRequest(args, new_object, objects[0])],
http=self.http,
batch_url=self.batch_url,
errors=errors,
custom_get_requests=None)
resources = lister.ProcessResults(
resources=resources,
field_selector=property_selector.PropertySelector(
properties=None,
transformations=self.transformations))
for resource in resources:
yield resource
if errors:
utils.RaiseToolException(
errors,
error_message='There was a problem modifying the resource:')
def Display(self, _, resources):
# We want to consume the generator here, but don't want to actully display
# the resources unless --format is called (which skips this method).
list(resources)
class BaseMetadataAdder(ReadWriteCommand):
"""Base class for adding or modifying metadata entries."""
@staticmethod
def Args(parser):
metadata_utils.AddMetadataArgs(parser)
def Modify(self, args, existing):
new_object = copy.deepcopy(existing)
existing_metadata = getattr(existing, self.metadata_field, None)
setattr(
new_object,
self.metadata_field,
metadata_utils.ConstructMetadataMessage(
self.messages,
metadata=args.metadata,
metadata_from_file=args.metadata_from_file,
existing_metadata=existing_metadata))
if metadata_utils.MetadataEqual(
existing_metadata,
getattr(new_object, self.metadata_field, None)):
return None
else:
return new_object
def Run(self, args):
if not args.metadata and not args.metadata_from_file:
raise calliope_exceptions.ToolException(
'At least one of [--metadata] or [--metadata-from-file] must be '
'provided.')
return super(BaseMetadataAdder, self).Run(args)
class BaseMetadataRemover(ReadWriteCommand):
"""Base class for removing metadata entries."""
@staticmethod
def Args(parser):
group = parser.add_mutually_exclusive_group()
group.add_argument(
'--all',
action='store_true',
default=False,
help='If provided, all metadata entries are removed.')
group.add_argument(
'--keys',
type=arg_parsers.ArgList(min_length=1),
metavar='KEY',
action=arg_parsers.FloatingListValuesCatcher(),
help='The keys of the entries to remove.')
def Modify(self, args, existing):
new_object = copy.deepcopy(existing)
existing_metadata = getattr(existing, self.metadata_field, None)
setattr(new_object,
self.metadata_field,
metadata_utils.RemoveEntries(
self.messages,
existing_metadata=existing_metadata,
keys=args.keys,
remove_all=args.all))
if metadata_utils.MetadataEqual(
existing_metadata,
getattr(new_object, self.metadata_field, None)):
return None
else:
return new_object
def Run(self, args):
if not args.all and not args.keys:
raise calliope_exceptions.ToolException(
'One of [--all] or [--keys] must be provided.')
return super(BaseMetadataRemover, self).Run(args)
class InstanceMetadataMutatorMixin(ReadWriteCommand):
"""Mixin for mutating instance metadata."""
@staticmethod
def Args(parser):
utils.AddZoneFlag(
parser,
resource_type='instance',
operation_type='set metadata on')
parser.add_argument(
'name',
metavar='NAME',
help='The name of the instance whose metadata should be modified.')
@property
def resource_type(self):
return 'instances'
@property
def service(self):
return self.compute.instances
@property
def metadata_field(self):
return 'metadata'
def CreateReference(self, args):
return self.CreateZonalReference(args.name, args.zone)
def GetGetRequest(self, args):
return (self.service,
'Get',
self.messages.ComputeInstancesGetRequest(
instance=self.ref.Name(),
project=self.project,
zone=self.ref.zone))
def GetSetRequest(self, args, replacement, existing):
return (self.service,
'SetMetadata',
self.messages.ComputeInstancesSetMetadataRequest(
instance=self.ref.Name(),
metadata=replacement.metadata,
project=self.project,
zone=self.ref.zone))
class InstanceTagsMutatorMixin(ReadWriteCommand):
"""Mixin for mutating instance tags."""
@staticmethod
def Args(parser):
utils.AddZoneFlag(
parser,
resource_type='instance',
operation_type='set tags on')
parser.add_argument(
'name',
metavar='NAME',
help='The name of the instance whose tags should be modified.')
@property
def resource_type(self):
return 'instances'
@property
def service(self):
return self.compute.instances
def CreateReference(self, args):
return self.CreateZonalReference(args.name, args.zone)
def GetGetRequest(self, args):
return (self.service,
'Get',
self.messages.ComputeInstancesGetRequest(
instance=self.ref.Name(),
project=self.project,
zone=self.ref.zone))
def GetSetRequest(self, args, replacement, existing):
return (self.service,
'SetTags',
self.messages.ComputeInstancesSetTagsRequest(
instance=self.ref.Name(),
tags=replacement.tags,
project=self.project,
zone=self.ref.zone))
class ProjectMetadataMutatorMixin(ReadWriteCommand):
"""Mixin for mutating project-level metadata."""
@property
def service(self):
return self.compute.projects
@property
def metadata_field(self):
return 'commonInstanceMetadata'
def GetGetRequest(self, args):
return (self.service,
'Get',
self.messages.ComputeProjectsGetRequest(
project=self.project))
def GetSetRequest(self, args, replacement, existing):
return (self.service,
'SetCommonInstanceMetadata',
self.messages.ComputeProjectsSetCommonInstanceMetadataRequest(
metadata=replacement.commonInstanceMetadata,
project=self.project))
_HELP = textwrap.dedent("""\
You can edit the resource below. Lines beginning with "#" are
ignored.
If you introduce a syntactic error, you will be given the
opportunity to edit the file again. You can abort by closing this
file without saving it.
At the bottom of this file, you will find an example resource.
Only fields that can be modified are shown. The original resource
with all of its fields is reproduced in the comment section at the
bottom of this document.
""")
def _SerializeDict(value, fmt):
"""Serializes value to either JSON or YAML."""
if fmt == 'json':
return json.dumps(
value,
indent=2,
sort_keys=True,
separators=(',', ': '))
else:
yaml.add_representer(
collections.OrderedDict,
yaml.dumper.SafeRepresenter.represent_dict,
Dumper=yaml.dumper.SafeDumper)
return yaml.safe_dump(
value,
indent=2,
default_flow_style=False,
width=70)
def _DeserializeValue(value, fmt):
"""Parses the given JSON or YAML value."""
if fmt == 'json':
return json.loads(value)
else:
return yaml.load(value)
def _WriteResourceInCommentBlock(serialized_resource, title, buf):
"""Outputs a comment block with the given serialized resource."""
buf.write('# ')
buf.write(title)
buf.write('\n# ')
buf.write('-' * len(title))
buf.write('\n#\n')
for line in serialized_resource.splitlines():
buf.write('#')
if line:
buf.write(' ')
buf.write(line)
buf.write('\n')
class BaseEdit(BaseCommand):
"""Base class for modifying resources using $EDITOR."""
__metaclass__ = abc.ABCMeta
DEFAULT_FORMAT = 'yaml'
@abc.abstractmethod
def CreateReference(self, args):
"""Returns a resources.Resource object for the object being mutated."""
@abc.abstractproperty
def reference_normalizers(self):
"""Defines how to normalize resource references."""
@abc.abstractproperty
def service(self):
pass
@abc.abstractmethod
def GetGetRequest(self, args):
"""Returns a request for fetching the resource."""
@abc.abstractmethod
def GetSetRequest(self, args, replacement, existing):
"""Returns a request for setting the resource."""
@abc.abstractproperty
def example_resource(self):
pass
def ProcessEditedResource(self, file_contents, args):
"""Returns an updated resource that was edited by the user."""
# It's very important that we replace the characters of comment
# lines with spaces instead of removing the comment lines
# entirely. JSON and YAML deserialization give error messages
# containing line, column, and the character offset of where the
# error occurred. If the deserialization fails; we want to make
# sure those numbers map back to what the user actually had in
# front of him or her otherwise the errors will not be very
# useful.
non_comment_lines = '\n'.join(
' ' * len(line) if line.startswith('#') else line
for line in file_contents.splitlines())
modified_record = _DeserializeValue(non_comment_lines,
args.format or BaseEdit.DEFAULT_FORMAT)
# Normalizes all of the fields that refer to other
# resource. (i.e., translates short names to URIs)
reference_normalizer = property_selector.PropertySelector(
transformations=self.reference_normalizers)
modified_record = reference_normalizer.Apply(modified_record)
if self.modifiable_record == modified_record:
new_object = None
else:
modified_record['name'] = self.original_record['name']
fingerprint = self.original_record.get('fingerprint')
if fingerprint:
modified_record['fingerprint'] = fingerprint
new_object = encoding.DictToMessage(
modified_record, self._resource_spec.message_class)
# If existing object is equal to the proposed object or if
# there is no new object, then there is no work to be done, so we
# return the original object.
if not new_object or self.original_object == new_object:
return [self.original_object]
errors = []
resources = list(request_helper.MakeRequests(
requests=[self.GetSetRequest(args, new_object, self.original_object)],
http=self.http,
batch_url=self.batch_url,
errors=errors,
custom_get_requests=None))
if errors:
utils.RaiseToolException(
errors,
error_message='Could not update resource:')
return resources
def Run(self, args):
self.ref = self.CreateReference(args)
get_request = self.GetGetRequest(args)
errors = []
objects = list(request_helper.MakeRequests(
requests=[get_request],
http=self.http,
batch_url=self.batch_url,
errors=errors,
custom_get_requests=None))
if errors:
utils.RaiseToolException(
errors,
error_message='Could not fetch resource:')
self.original_object = objects[0]
self.original_record = encoding.MessageToDict(self.original_object)
# Selects only the fields that can be modified.
field_selector = property_selector.PropertySelector(
properties=self._resource_spec.editables)
self.modifiable_record = field_selector.Apply(self.original_record)
buf = cStringIO.StringIO()
for line in _HELP.splitlines():
buf.write('#')
if line:
buf.write(' ')
buf.write(line)
buf.write('\n')
buf.write('\n')
buf.write(_SerializeDict(self.modifiable_record,
args.format or BaseEdit.DEFAULT_FORMAT))
buf.write('\n')
example = _SerializeDict(
encoding.MessageToDict(self.example_resource),
args.format or BaseEdit.DEFAULT_FORMAT)
_WriteResourceInCommentBlock(example, 'Example resource:', buf)
buf.write('#\n')
original = _SerializeDict(self.original_record,
args.format or BaseEdit.DEFAULT_FORMAT)
_WriteResourceInCommentBlock(original, 'Original resource:', buf)
file_contents = buf.getvalue()
while True:
file_contents = edit.OnlineEdit(file_contents)
try:
resources = self.ProcessEditedResource(file_contents, args)
break
except (ValueError, yaml.error.YAMLError,
protorpc.messages.ValidationError,
calliope_exceptions.ToolException) as e:
if isinstance(e, ValueError):
message = e.message
else:
message = str(e)
if isinstance(e, calliope_exceptions.ToolException):
problem_type = 'applying'
else:
problem_type = 'parsing'
message = ('There was a problem {0} your changes: {1}'
.format(problem_type, message))
if not console_io.PromptContinue(
message=message,
prompt_string='Would you like to edit the resource again?'):
raise calliope_exceptions.ToolException('Edit aborted by user.')
resources = lister.ProcessResults(
resources=resources,
field_selector=property_selector.PropertySelector(
properties=None,
transformations=self.transformations))
for resource in resources:
yield resource
def Display(self, _, resources):
resource_printer.Print(
resources=resources,
print_format='yaml',
out=log.out)
| |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class lbmonbindings_servicegroup_binding(base_resource) :
""" Binding class showing the servicegroup that can be bound to lbmonbindings.
"""
def __init__(self) :
self._servicegroupname = ""
self._servicetype = ""
self._boundservicegroupsvrstate = ""
self._monstate = ""
self._monitorname = ""
self.___count = 0
@property
def monitorname(self) :
"""The name of the monitor.<br/>Minimum length = 1.
"""
try :
return self._monitorname
except Exception as e:
raise e
@monitorname.setter
def monitorname(self, monitorname) :
"""The name of the monitor.<br/>Minimum length = 1
"""
try :
self._monitorname = monitorname
except Exception as e:
raise e
@property
def servicegroupname(self) :
"""The name of the service group.
"""
try :
return self._servicegroupname
except Exception as e:
raise e
@servicegroupname.setter
def servicegroupname(self, servicegroupname) :
"""The name of the service group.
"""
try :
self._servicegroupname = servicegroupname
except Exception as e:
raise e
@property
def boundservicegroupsvrstate(self) :
"""The state of the servicegroup.<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._boundservicegroupsvrstate
except Exception as e:
raise e
@property
def monstate(self) :
"""The configured state (enable/disable) of Monitor on this service.<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._monstate
except Exception as e:
raise e
@property
def servicetype(self) :
"""The type of service.<br/>Possible values = HTTP, FTP, TCP, UDP, SSL, SSL_BRIDGE, SSL_TCP, DTLS, NNTP, RPCSVR, DNS, ADNS, SNMP, RTSP, DHCPRA, ANY, SIP_UDP, DNS_TCP, ADNS_TCP, MYSQL, MSSQL, ORACLE, RADIUS, RDP, DIAMETER, SSL_DIAMETER, TFTP.
"""
try :
return self._servicetype
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(lbmonbindings_servicegroup_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.lbmonbindings_servicegroup_binding
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.monitorname) :
return str(self.monitorname)
return None
except Exception as e :
raise e
@classmethod
def get(cls, service, monitorname) :
""" Use this API to fetch lbmonbindings_servicegroup_binding resources.
"""
try :
obj = lbmonbindings_servicegroup_binding()
obj.monitorname = monitorname
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, monitorname, filter_) :
""" Use this API to fetch filtered set of lbmonbindings_servicegroup_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = lbmonbindings_servicegroup_binding()
obj.monitorname = monitorname
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, monitorname) :
""" Use this API to count lbmonbindings_servicegroup_binding resources configued on NetScaler.
"""
try :
obj = lbmonbindings_servicegroup_binding()
obj.monitorname = monitorname
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, monitorname, filter_) :
""" Use this API to count the filtered set of lbmonbindings_servicegroup_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = lbmonbindings_servicegroup_binding()
obj.monitorname = monitorname
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Boundservicegroupsvrstate:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Servicetype:
HTTP = "HTTP"
FTP = "FTP"
TCP = "TCP"
UDP = "UDP"
SSL = "SSL"
SSL_BRIDGE = "SSL_BRIDGE"
SSL_TCP = "SSL_TCP"
DTLS = "DTLS"
NNTP = "NNTP"
RPCSVR = "RPCSVR"
DNS = "DNS"
ADNS = "ADNS"
SNMP = "SNMP"
RTSP = "RTSP"
DHCPRA = "DHCPRA"
ANY = "ANY"
SIP_UDP = "SIP_UDP"
DNS_TCP = "DNS_TCP"
ADNS_TCP = "ADNS_TCP"
MYSQL = "MYSQL"
MSSQL = "MSSQL"
ORACLE = "ORACLE"
RADIUS = "RADIUS"
RDP = "RDP"
DIAMETER = "DIAMETER"
SSL_DIAMETER = "SSL_DIAMETER"
TFTP = "TFTP"
class Monstate:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class lbmonbindings_servicegroup_binding_response(base_response) :
def __init__(self, length=1) :
self.lbmonbindings_servicegroup_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.lbmonbindings_servicegroup_binding = [lbmonbindings_servicegroup_binding() for _ in range(length)]
| |
from functools import partial
import random
from adder.utils import InvalidArgumentError
class Node:
def __init__(self, state, parent, action, path_cost):
self.__state = state
self.__parent = parent
self.__action = action
self.__path_cost = path_cost
def __eq__(self, other):
return self.state == other.state
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self.state)
def __str__(self):
parent_name = self.parent.state if self.parent else "None"
return self.state
return "(State: {0}, Parent: {1}, Action: {2})"\
.format(self.state, parent_name, self.action)
def __repr__(self):
return str(self)
@property
def state(self):
return self.__state
@property
def parent(self):
return self.__parent
@property
def action(self):
return self.__action
@property
def path_cost(self):
return self.__path_cost
FAILURE = "FAILURE"
SOLUTION_UNKNOWN = "SOLUTION_UNKNOWN"
class Problem:
def child_node(self, node, action):
parent = node
state = self.result(node.state, action)
action = action
path_cost = node.path_cost + self.step_cost(node.state, action)
child = Node(state, parent, action, path_cost)
return child
def actions_iter(self, state):
raise NotImplementedError("_Problem is abc")
def step_cost(state, action):
raise NotImplementedError("_Problem is abc")
def result(self, state, action):
raise NotImplementedError("_Problem is abc")
def goal_test(self, state):
raise NotImplementedError("_Problem is abc")
def construct_solution(self, end_node):
path = []
while end_node != self.initial:
parent = end_node.parent
path.append((end_node.state, end_node.action))
end_node = parent
path.append((self.initial.state, None))
path.reverse()
return path
def solution_cost(self, solution):
if solution is FAILURE or solution is SOLUTION_UNKNOWN:
return 0
cost = 0
previous_state = None
for state, action in solution:
if previous_state:
cost += self.step_cost(previous_state, action)
previous_state = state
return cost
class _GraphProblem(Problem):
def __init__(self, graph, root, goal):
if root not in graph.get_nodes():
raise InvalidArgumentError("root must be be a node in the graph")
if goal not in graph.get_nodes():
raise InvalidArgumentError("goal must be be a node in the graph")
self.graph = graph
self.initial = Node(root, None, None, 0)
self.goal = goal
def actions_iter(self, state):
return self.graph.children_iter(state)
def step_cost(self, state, action):
return self.graph.edge_cost(state, action)
def result(self, state, action):
return action if action in self.graph.children_iter(state) else None
def goal_test(self, state):
return state == self.goal
class _NPuzzleProblem(Problem):
UP = "UP"
DOWN = "DOWN"
LEFT = "LEFT"
RIGHT = "RIGHT"
def __init__(self, initial, goal):
initial = tuple(initial.split())
goal = tuple(goal.split())
self.board_size = len(initial) ** 0.5
if not self.board_size.is_integer():
msg = "The size of the board must be a exact square!"
raise InvalidArgumentError(msg)
self.board_size = int(self.board_size)
self.initial = Node(initial, None, None, 0)
self.goal = goal
def _swap_letters(self, state, first, second):
next = list(state)
next[first], next[second] = next[second], next[first]
return tuple(next)
def coords_of(self, state, number):
number = str(number)
index = state.index(number)
# index = i * size + j
j = index % self.board_size
i = (index - j) / self.board_size
return (i, int(j))
def actions_iter(self, state):
i, j = self.coords_of(state, 0)
index = int(i * self.board_size + j)
neighbours = []
if i < self.board_size - 1:
neighbours.append(_NPuzzleProblem.UP)
if i > 0:
neighbours.append(_NPuzzleProblem.DOWN)
if j < self.board_size - 1:
neighbours.append(_NPuzzleProblem.RIGHT)
if j > 0:
neighbours.append(_NPuzzleProblem.LEFT)
return iter(neighbours)
def step_cost(self, state, action):
return 1
def result(self, state, action):
index = state.index("0")
if action == _NPuzzleProblem.UP:
return self._swap_letters(state, index, index + self.board_size)
if action == _NPuzzleProblem.DOWN:
return self._swap_letters(state, index, index - self.board_size)
if action == _NPuzzleProblem.RIGHT:
return self._swap_letters(state, index, index + 1)
if action == _NPuzzleProblem.LEFT:
return self._swap_letters(state, index, index - 1)
def goal_test(self, state):
return state == self.goal
class _NQueensProblem(Problem):
def __init__(self, size, initial=None):
self.size = size
initial = initial
if not initial:
initial = _NQueensProblem.generate_random_state(size)
self.initial = Node(initial, None, None, 0)
def generate_random_state(size):
return tuple(random.randint(0, size) for i in range(size))
def attacking(state):
size = len(state)
attacking = 0
for col, row in enumerate(state):
for other_col in range(col + 1, size):
# Row
if row == state[other_col]:
attacking += 1
# Diag 1
if row == state[other_col] + (col - other_col):
attacking += 1
# Diag 2
if row == state[other_col] - (col - other_col):
attacking += 1
return attacking
def actions_iter(self, state):
for col in range(self.size):
for row in range(self.size):
if row == state[col]:
continue
yield (col, row)
def step_cost(self, state, action):
return 1
def result(self, state, action):
col_index, row_index = action
next_state = list(state)
next_state[col_index] = row_index
return tuple(next_state)
def goal_test(self, state):
return _NQueensProblem.attacking(state) == 0
class ProblemFactory:
def from_graph(self, graph, root, goal):
return _GraphProblem(graph, root, goal)
def from_functions(self, initial_state, actions,
step_cost, result, goal_test):
problem = Problem()
problem.initial = Node(initial_state, None, None, 0)
problem.actions_iter = actions
problem.step_cost = step_cost
problem.result = result
problem.goal_test = goal_test
return problem
def from_npuzzle(self, initial, goal):
return _NPuzzleProblem(initial, goal)
def from_nqueens(self, size, initial=None):
return _NQueensProblem(size, initial)
def _manhattan_heuristic(problem_instance, state):
scoords = [problem_instance.coords_of(state, num) for num in state]
gcoords = [problem_instance.coords_of(state, num)
for num in problem_instance.goal]
diff = [abs(scoords[i][0] - gcoords[i][0]) +
abs(scoords[i][1] - gcoords[i][1])
for i in range(0, len(state) - 1)]
return sum(diff)
def heuristic_for(self, problem):
if isinstance(problem, _NPuzzleProblem):
return partial(ProblemFactory._manhattan_heuristic, problem)
elif isinstance(problem, _NQueensProblem):
return _NQueensProblem.attacking
else:
raise TypeError("No heuristic exists for this type of problem")
| |
#-*- coding: utf-8 -*-
#
# Copyright (c) 2015 blackPanther OS - Charles Barcza
# GPL
#
from smart.interfaces.qt5 import getPixmap, centerWindow
from smart.channel import getChannelInfo
from smart import *
from PyQt5 import QtGui as QtGui, QtWidgets
from PyQt5 import QtCore as QtCore, QtWidgets
class TextListViewItem(QtWidgets.QTableWidgetItem):
def __init__(self, parent):
QtWidgets.QTableWidgetItem.__init__(self)
self._text = {}
self._oldtext = {}
def setText(self, col, text):
QtWidgets.QTableWidgetItem.setText(self, text)
if col in self._text:
self._oldtext[col] = self._text[col]
self._text[col] = text
def oldtext(self, col):
return self._oldtext.get(col, None)
class QtPriorities(object):
def __init__(self, parent=None):
self._window = QtWidgets.QDialog(None)
self._window.setWindowIcon(QtGui.QIcon(getPixmap("smart")))
self._window.setWindowTitle(_("Priorities"))
#self._window.setModal(True)
self._window.setMinimumSize(600, 400)
layout = QtWidgets.QVBoxLayout(self._window)
#layout.setResizeMode(QtGui.QLayout.FreeResize)
vbox = QtWidgets.QWidget(self._window)
QtWidgets.QVBoxLayout(vbox)
vbox.layout().setMargin(10)
vbox.layout().setSpacing(10)
vbox.show()
layout.addWidget(vbox)
self._treeview = QtWidgets.QTableWidget(vbox)
#self._treeview.setAllColumnsShowFocus(True)
self._treeview.show()
vbox.layout().addWidget(self._treeview)
#self._treeview.itemChanged[QTableWidgetItem.connect(self.itemChanged)
#self._treeview.selectionChanged.connect(self.selectionChanged)
#self._treeview.addColumn(_("Package Name"))
#self._treeview.addColumn(_("Channel Alias"))
#self._treeview.addColumn(_("Priority"))
bbox = QtWidgets.QWidget(vbox)
QtWidgets.QHBoxLayout(bbox)
bbox.layout().setSpacing(10)
bbox.layout().addStretch(1)
bbox.show()
vbox.layout().addWidget(bbox)
button = QtWidgets.QPushButton(_("New"), bbox)
button.setEnabled(True)
button.setIcon(QtGui.QIcon(getPixmap("crystal-add")))
button.show()
button.clicked[()].connect(self.newPriority)
self._newpriority = button
bbox.layout().addWidget(button)
button = QtWidgets.QPushButton(_("Delete"), bbox)
button.setEnabled(False)
button.setIcon(QtGui.QIcon(getPixmap("crystal-delete")))
button.show()
button.clicked[()].connect(self.delPriority)
self._delpriority = button
bbox.layout().addWidget(button)
button = QtWidgets.QPushButton(_("Close"), bbox)
button.clicked[()].connect(self._window.accept)
bbox.layout().addWidget(button)
button.setDefault(True)
vbox.adjustSize()
def fill(self):
self._treeview.clear()
priorities = sysconf.get("package-priorities", {})
prioritieslst = priorities.items()
prioritieslst.sort()
for name, pkgpriorities in prioritieslst:
aliaslst = pkgpriorities.items()
aliaslst.sort()
for alias, priority in aliaslst:
item = TextListViewItem(self._treeview)
item.setText(0, name)
item.setText(1, alias or "*")
item.setText(2, str(priority))
#item.setRenameEnabled(0, True)
#item.setRenameEnabled(1, True)
#item.setRenameEnabled(2, True)
def show(self):
self.fill()
self._window.show()
centerWindow(self._window)
self._window.raise_()
self._window.exec_()
self._window.hide()
def newPriority(self):
name, alias, priority = PriorityCreator(self._window).show()
if name:
if sysconf.has(("package-priorities", name, alias)):
iface.error(_("Name/alias pair already exists!"))
else:
sysconf.set(("package-priorities", name, alias), int(priority))
self.fill()
def delPriority(self):
item = self._treeview.selectedItem()
if item:
name = str(item.text(0))
alias = str(item.text(1))
if alias == "*":
alias = None
sysconf.remove(("package-priorities", name, alias))
self.fill()
def selectionChanged(self):
item = self._treeview.selectedItem()
self._delpriority.setEnabled(bool(item))
def itemChanged(self, item, col):
newtext = item.text(col)
newtext = str(newtext).strip()
if col == 1:
if newtext == "*":
newtext = ""
oldtext = item.oldtext(col)
if newtext != oldtext:
if col == 0:
alias = str(item.text(0))
if alias == "*":
alias = None
priority = str(item.text(2))
if not newtext:
pass
elif sysconf.has(("package-priorities", newtext, alias)):
iface.error(_("Name/alias pair already exists!"))
else:
sysconf.set(("package-priorities", newtext, alias),
int(priority))
sysconf.remove(("package-priorities", oldtext, alias))
elif col == 1:
name = item.text(0)
priority = item.text(2)
if sysconf.has(("package-priorities", name, newtext)):
iface.error(_("Name/alias pair already exists!"))
else:
sysconf.move(("package-priorities", name, oldtext),
("package-priorities", name, newtext))
item.setText(col, newtext or "*")
elif col == 2:
if newtext:
name = str(item.text(0))
alias = str(item.text(1))
if alias == "*":
alias = None
try:
sysconf.set(("package-priorities", name, alias),
int(newtext))
except ValueError:
item.setText(col, oldtext)
iface.error(_("Invalid priority!"))
class PriorityCreator(object):
def __init__(self, parent=None):
self._window = QtWidgets.QDialog(parent)
self._window.setWindowIcon(QtGui.QIcon(getPixmap("smart")))
self._window.setWindowTitle(_("New Package Priority"))
self._window.setModal(True)
#self._window.setMinimumSize(600, 400)
vbox = QtWidgets.QWidget(self._window)
QtWidgets.QVBoxLayout(vbox)
vbox.layout().setMargin(10)
vbox.layout().setSpacing(10)
vbox.show()
table = QtWidgets.QWidget(self._window)
QtWidgets.QGridLayout(table)
table.layout().setSpacing(10)
table.show()
vbox.layout().addWidget(table)
label = QtWidgets.QLabel(_("Package Name:"), table)
table.layout().addWidget(label)
self._name = QtWidgets.QLineEdit(table)
self._name.show()
table.layout().addWidget(self._name)
label = QtWidgets.QLabel(_("Channel Alias:"), table)
table.layout().addWidget(label)
self._alias = QtWidgets.QLineEdit(table)
self._alias.setText("*")
self._alias.show()
table.layout().addWidget(self._alias)
label = QtWidgets.QLabel(_("Priority:"), table)
table.layout().addWidget(label)
self._priority = QtWidgets.QSpinBox(table)
self._priority.setSingleStep(1)
self._priority.setRange(-100000,+100000)
self._priority.show()
table.layout().addWidget(self._priority)
sep = QtWidgets.QFrame(vbox)
sep.setFrameShape(QtWidgets.QFrame.HLine)
sep.setFrameShadow(QtWidgets.QFrame.Sunken)
sep.show()
vbox.layout().addWidget(sep)
bbox = QtWidgets.QWidget(vbox)
QtWidgets.QHBoxLayout(bbox)
bbox.layout().setSpacing(10)
bbox.layout().addStretch(1)
bbox.show()
vbox.layout().addWidget(bbox)
button = QtWidgets.QPushButton(_("Cancel"), bbox)
button.clicked[()].connect(self._window.reject)
bbox.layout().addWidget(button)
button = QtWidgets.QPushButton(_("OK"), bbox)
button.clicked[()].connect(self._window.accept)
bbox.layout().addWidget(button)
button.setDefault(True)
vbox.adjustSize()
self._window.adjustSize()
def show(self):
self._window.show()
self._window.raise_()
self._window.activateWindow()
while True:
self._result = self._window.exec_()
if self._result == QtWidgets.QDialog.Accepted:
name = str(self._name.text()).strip()
if not name:
iface.error(_("No name provided!"))
continue
alias = str(self._alias.text()).strip()
if alias == "*":
alias = None
priority = str(self._priority.value())
break
name = alias = priority = None
break
self._window.hide()
return name, alias, priority
class QtSinglePriority(object):
def __init__(self, parent=None):
self._window = QtWidgets.QDialog(parent)
self._window.setWindowIcon(QtGui.QIcon(getPixmap("smart")))
self._window.setWindowTitle(_("Package Priority"))
self._window.setModal(True)
#self._window.setMinimumSize(600, 400)
vbox = QtWidgets.QWidget(self._window)
QtWidgets.QVBoxLayout(vbox)
vbox.layout().setMargin(10)
vbox.layout().setSpacing(10)
vbox.show()
self._vbox = vbox
self._table = QtWidgets.QWidget(vbox)
QtWidgets.QGridLayout(self._table)
self._table.layout().setSpacing(10)
self._table.show()
vbox.layout().addWidget(self._table)
bbox = QtWidgets.QWidget(vbox)
QtWidgets.QHBoxLayout(bbox)
bbox.layout().setSpacing(10)
bbox.layout().addStretch(1)
bbox.show()
vbox.layout().addWidget(bbox)
button = QtWidgets.QPushButton(_("Close"), bbox)
button.clicked[()].connect(self._window.hide)
bbox.layout().addWidget(button)
self._vbox.adjustSize()
self._window.adjustSize()
def show(self, pkg):
priority = sysconf.get(("package-priorities", pkg.name), {})
table = self._table
#table.foreach(table.remove)
label = QtWidgets.QLabel(_("Package:"), table)
label.show()
table.layout().addWidget(label)
label = QtWidgets.QLabel("<b>%s</b>" % pkg.name, table)
label.show()
table.layout().addWidget(label)
class AliasCheckBox(QtWidgets.QCheckBox):
def __init__(self, name, parent):
QtWidgets.QSpinBox.__init__(self, name, parent)
def connect(self, signal, slot, spin, alias):
# FIXME Ambiguous syntax for this signal connection, can't refactor it.
QtCore.QObject.connect(self, QtCore.SIGNAL(signal), slot)
self._spin = spin
self._alias = alias
def toggled(self, check):
spin = self._spin
alias = self._alias
if check:
priority[alias] = int(spin.value())
spin.setEnabled(True)
else:
if alias in priority:
del priority[alias]
spin.setEnabled(False)
class AliasSpinBox(QtWidgets.QSpinBox):
def __init__(self, parent):
QtWidgets.QSpinBox.__init__(self, parent)
def connect(self, signal, slot, alias):
# FIXME Ambiguous syntax for this signal connection, can't refactor it.
QtCore.QObject.connect(self, QtCore.SIGNAL(signal), slot)
self._alias = alias
def value_changed(self, value):
alias = spin._alias
priority[alias] = value
label = QtWidgets.QLabel(_("Default priority:"), table)
label.show()
table.layout().addWidget(label)
hbox = QtWidgets.QWidget(table)
QtWidgets.QHBoxLayout(hbox)
hbox.layout().setSpacing(10)
hbox.show()
table.layout().addWidget(hbox)
radio = QtWidgets.QRadioButton(_("Channel default"), hbox)
radio.setChecked(None not in priority)
radio.show()
hbox.layout().addWidget(radio)
radio = QtWidgets.QRadioButton(_("Set to"), hbox)
radio.setChecked(None in priority)
radio.show()
hbox.layout().addWidget(radio)
spin = QtWidgets.QSpinBox(hbox)
spin.setSingleStep(1)
spin.setRange(-100000,+100000)
spin.setValue(priority.get(None, 0))
spin.show()
table.layout().addWidget(spin)
label = QtWidgets.QLabel(_("Channel priority:"), table)
label.show()
table.layout().addWidget(label)
chantable = QtWidgets.QWidget(table)
QtWidgets.QGridLayout(chantable)
chantable.layout().setSpacing(10)
chantable.show()
table.layout().addWidget(chantable)
pos = 0
channels = sysconf.get("channels")
for alias in channels:
channel = channels[alias]
if not getChannelInfo(channel.get("type")).kind == "package":
continue
name = channel.get("name")
if not name:
name = alias
check = AliasCheckBox(name, chantable)
check.setChecked(alias in priority)
check.show()
chantable.layout().addWidget(check)
spin = AliasSpinBox(chantable)
if alias not in priority:
spin.setEnabled(False)
spin.setSingleStep(1)
spin.setRange(-100000,+100000)
spin.setValue(priority.get(alias, 0))
spin.connect("valueChanged(int)", spin.value_changed, alias)
check.connect("toggled(bool)", check.toggled, spin, alias)
spin.show()
chantable.layout().addWidget(spin)
pos += 1
table.adjustSize()
self._vbox.adjustSize()
self._window.adjustSize()
self._window.show()
self._window.raise_()
self._window.activateWindow()
self._window.exec_()
self._window.hide()
if not priority:
sysconf.remove(("package-priorities", pkg.name))
else:
sysconf.set(("package-priorities", pkg.name), priority)
| |
import json
from dateutil.relativedelta import relativedelta
from django.conf import settings as django_settings
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.core.paginator import Paginator, EmptyPage
from django.db import transaction
from django.db.models import Q, Count
from django.http import (
Http404,
HttpResponse,
HttpResponseBadRequest,
JsonResponse,
)
from django.shortcuts import get_object_or_404, render
from django.utils import timezone
from django.views.decorators.http import require_POST
from django.views.generic import TemplateView
from django.utils.html import escape
from pontoon.base import forms
from pontoon.base.models import Locale, Project
from pontoon.base.utils import require_AJAX
from pontoon.contributors.utils import (
map_translations_to_events,
users_with_translations_counts,
)
from pontoon.uxactionlog.utils import log_ux_action
@login_required(redirect_field_name="", login_url="/403")
def profile(request):
"""Current user profile."""
return contributor(request, request.user)
def contributor_email(request, email):
user = get_object_or_404(User, email=email)
return contributor(request, user)
def contributor_username(request, username):
user = get_object_or_404(User, username=username)
return contributor(request, user)
def contributor_timeline(request, username):
"""Contributor events in the timeline."""
user = get_object_or_404(User, username=username)
try:
page = int(request.GET.get("page", 1))
except ValueError:
raise Http404("Invalid page number.")
# Exclude obsolete translations
contributor_translations = (
user.contributed_translations.exclude(entity__obsolete=True)
.extra({"day": "date(date)"})
.order_by("-day")
)
counts_by_day = contributor_translations.values("day").annotate(count=Count("id"))
try:
events_paginator = Paginator(
counts_by_day, django_settings.CONTRIBUTORS_TIMELINE_EVENTS_PER_PAGE
)
timeline_events = map_translations_to_events(
events_paginator.page(page).object_list, contributor_translations
)
# Join is the last event in this reversed order.
if page == events_paginator.num_pages:
timeline_events.append({"date": user.date_joined, "type": "join"})
except EmptyPage:
# Return the join event if user reaches the last page.
raise Http404("No events.")
return render(
request, "contributors/includes/timeline.html", {"events": timeline_events}
)
def contributor(request, user):
"""Contributor profile."""
return render(
request,
"contributors/profile.html",
{"contributor": user, "translations": user.contributed_translations},
)
@login_required(redirect_field_name="", login_url="/403")
@require_POST
@transaction.atomic
def toggle_user_profile_attribute(request, username):
user = get_object_or_404(User, username=username)
if user != request.user:
return JsonResponse(
{
"status": False,
"message": "Forbidden: You don't have permission to edit this user",
},
status=403,
)
attribute = request.POST.get("attribute", None)
if attribute not in [
"quality_checks",
"force_suggestions",
"new_string_notifications",
"project_deadline_notifications",
"comment_notifications",
"unreviewed_suggestion_notifications",
]:
return JsonResponse(
{"status": False, "message": "Forbidden: Attribute not allowed"},
status=403,
)
value = request.POST.get("value", None)
if not value:
return JsonResponse(
{"status": False, "message": "Bad Request: Value not set"}, status=400
)
profile = user.profile
setattr(profile, attribute, json.loads(value))
profile.save()
return JsonResponse({"status": True})
@login_required(redirect_field_name="", login_url="/403")
@require_POST
@transaction.atomic
def save_custom_homepage(request):
"""Save custom homepage."""
form = forms.UserCustomHomepageForm(request.POST, instance=request.user.profile)
if not form.is_valid():
error = escape("\n".join(form.errors["custom_homepage"]))
return HttpResponseBadRequest(error)
form.save()
return HttpResponse("ok")
@login_required(redirect_field_name="", login_url="/403")
@require_POST
@transaction.atomic
def save_preferred_source_locale(request):
"""Save preferred source locale."""
form = forms.UserPreferredSourceLocaleForm(
request.POST,
instance=request.user.profile,
)
if not form.is_valid():
error = escape("\n".join(form.errors["preferred_source_locale"]))
return HttpResponseBadRequest(error)
form.save()
return HttpResponse("ok")
@login_required(redirect_field_name="", login_url="/403")
@require_AJAX
@transaction.atomic
def dismiss_addon_promotion(request):
profile = request.user.profile
profile.has_dismissed_addon_promotion = True
profile.save()
return JsonResponse({"status": True})
@login_required(redirect_field_name="", login_url="/403")
def settings(request):
"""View and edit user settings."""
if request.method == "POST":
locales_form = forms.UserLocalesOrderForm(
request.POST,
instance=request.user.profile,
)
profile_form = forms.UserProfileForm(
request.POST,
instance=request.user,
)
if locales_form.is_valid() and profile_form.is_valid():
locales_form.save()
profile_form.save()
messages.success(request, "Settings saved.")
else:
profile_form = forms.UserProfileForm(instance=request.user)
selected_locales = list(request.user.profile.sorted_locales)
available_locales = Locale.objects.exclude(pk__in=[l.pk for l in selected_locales])
default_homepage_locale = Locale(name="Default homepage", code="")
all_locales = list(Locale.objects.all())
all_locales.insert(0, default_homepage_locale)
# Set custom homepage selector value
custom_homepage_locale = request.user.profile.custom_homepage
if custom_homepage_locale:
custom_homepage_locale = Locale.objects.filter(
code=custom_homepage_locale
).first()
else:
custom_homepage_locale = default_homepage_locale
default_preferred_source_locale = Locale(name="Default project locale", code="")
preferred_locales = list(Locale.objects.all())
preferred_locales.insert(0, default_preferred_source_locale)
# Set preferred source locale
preferred_source_locale = request.user.profile.preferred_source_locale
if preferred_source_locale:
preferred_source_locale = Locale.objects.filter(
code=preferred_source_locale
).first()
else:
preferred_source_locale = default_preferred_source_locale
return render(
request,
"contributors/settings.html",
{
"selected_locales": selected_locales,
"available_locales": available_locales,
"locales": all_locales,
"locale": custom_homepage_locale,
"preferred_locales": preferred_locales,
"preferred_locale": preferred_source_locale,
"profile_form": profile_form,
},
)
@login_required(redirect_field_name="", login_url="/403")
def notifications(request):
"""View and edit user notifications."""
notifications = request.user.notifications.prefetch_related(
"actor", "target"
).order_by("-pk")
projects = {}
for notification in notifications:
project = None
if isinstance(notification.actor, Project):
project = notification.actor
elif isinstance(notification.target, Project):
project = notification.target
if project:
if project.slug in projects:
projects[project.slug]["notifications"].append(notification.id)
else:
projects[project.slug] = {
"name": project.name,
"notifications": [notification.id],
}
# Sort projects by the number of notifications
ordered_projects = []
for slug in sorted(
projects, key=lambda slug: len(projects[slug]["notifications"]), reverse=True
):
ordered_projects.append(slug)
log_ux_action(
action_type="Page load: Notifications",
experiment="Notifications 1.0",
data={"referrer": request.GET.get("referrer", "")},
)
return render(
request,
"contributors/notifications.html",
{
"notifications": notifications,
"projects": projects,
"ordered_projects": ordered_projects,
},
)
@login_required(redirect_field_name="", login_url="/403")
@require_AJAX
@transaction.atomic
def mark_all_notifications_as_read(request):
"""Mark all notifications of the currently logged in user as read"""
request.user.notifications.mark_all_as_read()
log_ux_action(
action_type="Background action: Mark all notifications as read",
experiment="Notifications 1.0",
data={"utm_source": request.GET.get("utm_source")},
)
return JsonResponse({"status": True})
class ContributorsMixin:
def contributors_filter(self, **kwargs):
"""
Return Q() filters for fetching contributors. Fetches all by default.
"""
return Q()
def get_context_data(self, **kwargs):
"""Top contributors view."""
context = super().get_context_data(**kwargs)
try:
period = int(self.request.GET["period"])
if period <= 0:
raise ValueError
start_date = timezone.now() + relativedelta(months=-period)
except (KeyError, ValueError):
period = None
start_date = None
context["contributors"] = users_with_translations_counts(
start_date, self.contributors_filter(**kwargs) & Q(user__isnull=False)
)
context["period"] = period
return context
class ContributorsView(ContributorsMixin, TemplateView):
"""
View returns top contributors.
"""
template_name = "contributors/contributors.html"
| |
from io import StringIO
from typing import Union
from hwt.hdl.types.array import HArray
from hwt.hdl.types.bitsVal import BitsVal
from hwt.hdl.types.struct import HStruct
from hwt.simulator.simTestCase import SimTestCase
from hwt.synthesizer.interface import Interface
from hwt.synthesizer.rtlLevel.rtlSignal import RtlSignal
from hwtLib.amba.axi_comp.oooOp.utils import OOOOpPipelineStage
from hwtLib.examples.axi.oooOp.counterHashTable import OooOpExampleCounterHashTable
from hwtSimApi.basic_hdl_simulator.model import BasicRtlSimModel
from hwtSimApi.constants import CLK_PERIOD
from hwtSimApi.triggers import Edge, WaitCombStable
from pyMathBitPrecise.bit_utils import ValidityError
def OutOfOrderCummulativeOp_dump_pipeline(tc: SimTestCase, u: OooOpExampleCounterHashTable, model: BasicRtlSimModel, states:list):
m = model.io
clk = u.clk
if clk._sigInside is None:
clk = clk._sig
else:
clk = clk._sigInside
clk = getattr(m, clk.name)
def int_or_none(v):
try:
return int(v)
except ValidityError:
return None
def read_data(sig: Union[RtlSignal, Interface]):
"""
read data from simulation
"""
if isinstance(sig, Interface):
if sig._interfaces:
return tuple(read_data(i) for i in sig._interfaces)
else:
sig = sig._sig
return int_or_none(getattr(m, sig.name).read())
has_operation = hasattr(u, "OPERATION")
has_trans_data = u.TRANSACTION_STATE_T is not None
has_composite_data = isinstance(u.MAIN_STATE_T, (HStruct, HArray))
ops_without_match = []
if has_operation:
for n in ("SWAP_BY_ADDR", "READ_BY_ADDR"):
o = getattr(u.OPERATION, n, None)
if o is not None:
ops_without_match.append(o)
while True:
yield Edge(clk)
yield WaitCombStable()
if clk.read():
clk_i = tc.hdl_simulator.now // CLK_PERIOD
cur_state = []
for st in u.pipeline:
st: OOOOpPipelineStage
vld = read_data(st.valid)
if vld:
if st.index == 0:
addr = -1
op = -1 if has_operation else None
else:
addr = read_data(st.addr)
assert addr is not None
trans_state_present = st.index > 0 and st.index <= u.PIPELINE_CONFIG.WAIT_FOR_WRITE_ACK
if has_operation:
if trans_state_present:
op = read_data(st.transaction_state.operation)
assert op is not None
else:
op = None
if st.index >= u.PIPELINE_CONFIG.STATE_LOAD and st.index < u.PIPELINE_CONFIG.WRITE_BACK:
if has_trans_data and op not in ops_without_match:
key_match = []
for i, km in enumerate(st.key_matches):
if not isinstance(km, BitsVal):
km = read_data(km)
assert km is not None, (op, st, i)
if km:
key_match.append(i)
else:
key_match = None
collision = None
for i, cd in enumerate(st.collision_detect):
if not isinstance(cd, int):
cd = read_data(cd)
assert cd is not None
if cd:
collision = i
# assert read_data(u.pipeline[i].valid), (clk_i,
# u.pipeline[i].valid.name,
# "not valid and we expecting collision with it")
break
else:
key_match = None
collision = None
if st.index >= u.PIPELINE_CONFIG.STATE_LOAD and st.index <= u.PIPELINE_CONFIG.WRITE_BACK:
wr_forward = []
if st.index == u.PIPELINE_CONFIG.WRITE_BACK:
src_st = u.pipeline[u.PIPELINE_CONFIG.WRITE_BACK]
if getattr(m, f"write_forwarding_en_{st.index}from{src_st.index}").read():
wr_forward.append(src_st.index)
else:
for src_st in u.pipeline[u.PIPELINE_CONFIG.WRITE_BACK:]:
if getattr(m, f"write_forwarding_en_{st.index}from{src_st.index}").read():
wr_forward.append(src_st.index)
else:
wr_forward = None
if has_trans_data:
if trans_state_present:
orig_d = st.transaction_state.original_data
t_item_valid = read_data(orig_d.item_valid)
t_key = read_data(orig_d.key)
t_data = read_data(orig_d.value)
else:
t_item_valid = None
t_key = None
t_data = None
t_data = (t_item_valid, t_key, t_data)
else:
t_data = None
if has_composite_data:
item_vld = read_data(st.data.item_valid)
# assert item_vld is not None
if item_vld:
key = read_data(st.data.key)
data = read_data(st.data.value)
else:
key = None
data = None
data = (item_vld, key, data)
else:
data = read_data(st.data)
state_data = (op, addr,
t_data,
data,
collision, wr_forward, key_match)
else:
state_data = None
cur_state.append(state_data)
if not states or states[-1][1] != cur_state:
states.append((clk_i, cur_state))
# print(f"clk {clk_i}: {cur_state}")
for st_data in cur_state[u.PIPELINE_CONFIG.STATE_LOAD:u.PIPELINE_CONFIG.WRITE_BACK]:
if st_data is not None:
(_, addr, _, data, collision, wr_forward, _) = st_data
if has_composite_data:
(item_vld, key, data) = data
else:
item_vld = 1
key = None
# data = data
for st in u.pipeline[u.PIPELINE_CONFIG.WRITE_BACK:(len(u.pipeline) if collision is None else collision)]:
if st.index == collision:
assert read_data(st.addr) and read_data(st.addr) == addr, (clk_i,
"collision prediction was invalid", st.index,
read_data(st.valid), read_data(st.addr), addr
)
else:
if read_data(st.valid):
assert read_data(st.addr) != addr, (clk_i,
"collision prediction missed item", st.index, read_data(st.addr))
def OutOfOrderCummulativeOp_dump_pipeline_html(file: StringIO, u: OooOpExampleCounterHashTable, states: list):
rows = []
st_names = {getattr(u.PIPELINE_CONFIG, n): n for n in [
"READ_DATA_RECEIVE",
"STATE_LOAD",
"WRITE_BACK",
"WAIT_FOR_WRITE_ACK"]}
if hasattr(u, "OPERATION"):
operation_names = {}
for attr in dir(u.OPERATION):
v = getattr(u.OPERATION, attr)
if isinstance(v, int):
assert v not in operation_names, (attr, v, operation_names)
operation_names[v] = attr
else:
operation_names = None
has_trans_data = u.TRANSACTION_STATE_T is not None
has_composite_data = isinstance(u.MAIN_STATE_T, (HStruct, HArray))
for clk_i, total_st in states:
if not rows:
# construct header
state_cells = []
for i in range(len(total_st)):
st_name = st_names.get(i, None)
if st_name is None:
state_cells.append(f"<th>{i:d}</th>")
else:
state_cells.append(f"<th>{i:d} {st_name:s}</th>")
state_cells = "".join(state_cells)
row = f"<tr><th></th>{state_cells:s}</tr>"
rows.append(row)
state_cells = []
for st_i, st in enumerate(total_st):
if st is None:
cell = f"<td></td>"
else:
(op, addr,
t,
data,
collision, wr_forward, key_match) = st
if st_i == 0 or st_i > u.PIPELINE_CONFIG.WAIT_FOR_WRITE_ACK:
op = "" # operation is not present in these stages
t = None
else:
if operation_names is None:
assert op is None
else:
op = operation_names.get(op, "INVALID")
if has_trans_data:
(t_item_valid, t_key, t_data) = t
if t_item_valid is None:
t = ""
else:
t = repr((t_item_valid, t_key, t_data))
else:
t = None
if has_composite_data:
(item_vld, key, data) = data
if item_vld:
d = repr((item_vld, key, data))
elif item_vld is None:
d = "INVALID"
else:
d = ""
else:
d = repr(data)
cell_lines = []
if st_i > 0:
if operation_names is not None:
cell_lines.append(f"0x{addr:x} {op:s}<br/>")
else:
cell_lines.append(f"0x{addr:x}<br/>")
if t is not None:
cell_lines.append(f"t:{t}<br/>")
cell_lines.append(f"d:{d}<br/>")
if collision is not None:
cell_lines.append(f"collision:{collision}<br/>")
if wr_forward is not None:
cell_lines.append(f"wr_forward:{wr_forward}<br/>")
if key_match is not None:
cell_lines.append(f"t_key_match:{key_match}<br/>")
cell_lines = "".join(cell_lines)
if st_i >= u.PIPELINE_CONFIG.WRITE_BACK:
style = ' style="background-color:LightGreen;"'
else:
style = ''
cell = f"<td{style:s}>{cell_lines:s}</td>"
state_cells.append(cell)
state_cells = "".join(state_cells)
row = f"<tr><td>{clk_i}</td>{state_cells:s}</tr>"
rows.append(row)
rows = "\n".join(rows)
file.write(f"<!DOCTYPE html><html><head><meta charset='utf-8'/></head><body><table border='1'>\n{rows}\n</table></body></html>\n")
| |
from otp.ai.AIBaseGlobal import *
from direct.task.Task import Task
from pandac.PandaModules import *
from DistributedNPCToonBaseAI import *
from toontown.quest import Quests
class DistributedNPCToonAI(DistributedNPCToonBaseAI):
FourthGagVelvetRopeBan = config.GetBool('want-ban-fourth-gag-velvet-rope', 0)
def __init__(self, air, npcId, questCallback = None, hq = 0):
DistributedNPCToonBaseAI.__init__(self, air, npcId, questCallback)
self.hq = hq
self.tutorial = 0
self.pendingAvId = None
return
def getTutorial(self):
return self.tutorial
def setTutorial(self, val):
self.tutorial = val
def getHq(self):
return self.hq
def avatarEnter(self):
avId = self.air.getAvatarIdFromSender()
self.notify.debug('avatar enter ' + str(avId))
self.busy = avId
self.air.questManager.requestInteract(avId, self)
self.acceptOnce(self.air.getAvatarExitEvent(avId), self.__handleUnexpectedExit, extraArgs=[avId])
taskMgr.doMethodLater(20, self.sendTimeoutMovie, self.uniqueName('clearMovie'))
DistributedNPCToonBaseAI.avatarEnter(self)
def chooseQuest(self, questId):
avId = self.air.getAvatarIdFromSender()
self.notify.debug('chooseQuest: avatar %s choseQuest %s' % (avId, questId))
if not self.pendingAvId:
self.notify.warning('chooseQuest: not expecting an answer from any avatar: %s' % avId)
return
if self.pendingAvId != avId:
self.notify.warning('chooseQuest: not expecting an answer from this avatar: %s' % avId)
return
if self.pendingQuests is None:
self.notify.warning('chooseQuest: not expecting a quest choice from this avatar: %s' % avId)
self.air.writeServerEvent('suspicious', avId, 'unexpected chooseQuest')
return
if questId == 0:
self.pendingAvId = None
self.pendingQuests = None
self.air.questManager.avatarCancelled(self)
self.cancelChoseQuest(avId)
return
if questId == 401:
av = self.air.getDo(avId)
if not av:
self.notify.warning('chooseQuest: av not present: %s' % avId)
return
for quest in self.pendingQuests:
if questId == quest[0]:
self.pendingAvId = None
self.pendingQuests = None
self.air.questManager.avatarChoseQuest(avId, self, *quest)
return
self.notify.warning('chooseQuest: avatar: %s chose a quest not offered: %s' % (avId, questId))
self.pendingAvId = None
self.pendingQuests = None
return
def chooseTrack(self, trackId):
avId = self.air.getAvatarIdFromSender()
self.notify.debug('chooseTrack: avatar %s choseTrack %s' % (avId, trackId))
if not self.pendingAvId:
self.notify.warning('chooseTrack: not expecting an answer from any avatar: %s' % avId)
return
if self.pendingAvId != avId:
self.notify.warning('chooseTrack: not expecting an answer from this avatar: %s' % avId)
return
if self.pendingTracks is None:
self.notify.warning('chooseTrack: not expecting a track choice from this avatar: %s' % avId)
self.air.writeServerEvent('suspicious', avId, 'unexpected chooseTrack')
return
if trackId == -1:
self.pendingAvId = None
self.pendingTracks = None
self.pendingTrackQuest = None
self.air.questManager.avatarCancelled(avId)
self.cancelChoseTrack(avId)
return
for track in self.pendingTracks:
if trackId == track:
self.air.questManager.avatarChoseTrack(avId, self, self.pendingTrackQuest, trackId)
self.pendingAvId = None
self.pendingTracks = None
self.pendingTrackQuest = None
return
self.notify.warning('chooseTrack: avatar: %s chose a track not offered: %s' % (avId, trackId))
self.pendingAvId = None
self.pendingTracks = None
self.pendingTrackQuest = None
return
def sendTimeoutMovie(self, task):
self.pendingAvId = None
self.pendingQuests = None
self.pendingTracks = None
self.pendingTrackQuest = None
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_TIMEOUT,
self.npcId,
self.busy,
[],
ClockDelta.globalClockDelta.getRealNetworkTime()])
self.sendClearMovie(None)
self.busy = 0
return Task.done
def sendClearMovie(self, task):
self.pendingAvId = None
self.pendingQuests = None
self.pendingTracks = None
self.pendingTrackQuest = None
self.busy = 0
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_CLEAR,
self.npcId,
0,
[],
ClockDelta.globalClockDelta.getRealNetworkTime()])
return Task.done
def rejectAvatar(self, avId):
self.busy = avId
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_REJECT,
self.npcId,
avId,
[],
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(5.5, self.sendClearMovie, self.uniqueName('clearMovie'))
def rejectAvatarTierNotDone(self, avId):
self.busy = avId
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_TIER_NOT_DONE,
self.npcId,
avId,
[],
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(5.5, self.sendClearMovie, self.uniqueName('clearMovie'))
def completeQuest(self, avId, questId, rewardId):
self.busy = avId
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_COMPLETE,
self.npcId,
avId,
[questId, rewardId, 0],
ClockDelta.globalClockDelta.getRealNetworkTime(bits=16)])
if not self.tutorial:
taskMgr.doMethodLater(60.0, self.sendTimeoutMovie, self.uniqueName('clearMovie'))
def incompleteQuest(self, avId, questId, completeStatus, toNpcId):
self.busy = avId
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_INCOMPLETE,
self.npcId,
avId,
[questId, completeStatus, toNpcId],
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(60.0, self.sendTimeoutMovie, self.uniqueName('clearMovie'))
def assignQuest(self, avId, questId, rewardId, toNpcId):
self.busy = avId
if self.questCallback:
self.questCallback()
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_ASSIGN,
self.npcId,
avId,
[questId, rewardId, toNpcId],
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(60.0, self.sendTimeoutMovie, self.uniqueName('clearMovie'))
def presentQuestChoice(self, avId, quests):
self.busy = avId
self.pendingAvId = avId
self.pendingQuests = quests
flatQuests = []
for quest in quests:
flatQuests.extend(quest)
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_QUEST_CHOICE,
self.npcId,
avId,
flatQuests,
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(60.0, self.sendTimeoutMovie, self.uniqueName('clearMovie'))
def presentTrackChoice(self, avId, questId, tracks):
self.busy = avId
self.pendingAvId = avId
self.pendingTracks = tracks
self.pendingTrackQuest = questId
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_TRACK_CHOICE,
self.npcId,
avId,
tracks,
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(60.0, self.sendTimeoutMovie, self.uniqueName('clearMovie'))
def cancelChoseQuest(self, avId):
self.busy = avId
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_QUEST_CHOICE_CANCEL,
self.npcId,
avId,
[],
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(60.0, self.sendTimeoutMovie, self.uniqueName('clearMovie'))
def cancelChoseTrack(self, avId):
self.busy = avId
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_TRACK_CHOICE_CANCEL,
self.npcId,
avId,
[],
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(60.0, self.sendTimeoutMovie, self.uniqueName('clearMovie'))
def setMovieDone(self):
avId = self.air.getAvatarIdFromSender()
self.notify.debug('setMovieDone busy: %s avId: %s' % (self.busy, avId))
if self.busy == avId:
taskMgr.remove(self.uniqueName('clearMovie'))
self.sendClearMovie(None)
elif self.busy:
self.air.writeServerEvent('suspicious', avId, 'DistributedNPCToonAI.setMovieDone busy with %s' % self.busy)
self.notify.warning('somebody called setMovieDone that I was not busy with! avId: %s' % avId)
return
def __handleUnexpectedExit(self, avId):
self.notify.warning('avatar:' + str(avId) + ' has exited unexpectedly')
self.notify.warning('not busy with avId: %s, busy: %s ' % (avId, self.busy))
taskMgr.remove(self.uniqueName('clearMovie'))
self.sendClearMovie(None)
return
| |
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import user_passes_test
from django.contrib.auth.models import Group, User
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.http import Http404
from django.shortcuts import get_object_or_404, redirect, render
from django.template.loader import render_to_string
from django.utils.timezone import now
from django.utils.encoding import iri_to_uri
from django.utils.safestring import mark_safe
from django.views.decorators.cache import cache_control, never_cache
import waffle
from django_statsd.clients import statsd
from django_browserid.auth import default_username_algo
from funfactory.helpers import urlparams
from product_details import product_details
import forms
from remo.base.decorators import permission_check
from remo.events.utils import get_events_for_user
from remo.profiles.models import UserProfile, UserStatus
from remo.profiles.models import FunctionalArea
from remo.voting.tasks import rotm_nomination_end_date
USERNAME_ALGO = getattr(settings, 'BROWSERID_USERNAME_ALGO',
default_username_algo)
@never_cache
@user_passes_test(lambda u: u.groups.filter(Q(name='Rep') | Q(name='Admin')),
login_url=settings.LOGIN_REDIRECT_URL)
@permission_check(permissions=['profiles.can_edit_profiles'],
filter_field='display_name', owner_field='user',
model=UserProfile)
def edit(request, display_name):
"""Edit user profile.
Permission to edit user profile is granted to the user who owns
the profile and all the users with permissions to edit profiles.
Argument display_name should be lowered before queries because we
allow case-insensitive profile urls. E.g. both /u/Giorgos and
/u/giorgos are the same person.
"""
def profile_date_form_validation(form):
"""Convenience function to only validate datejoinedform when
user has permissions.
"""
if request.user.has_perm('profiles.can_edit_profiles'):
if form.is_valid():
return True
return False
return True
user = get_object_or_404(User,
userprofile__display_name__iexact=display_name)
userform = forms.ChangeUserForm(request.POST or None, instance=user)
profileform = forms.ChangeProfileForm(request.POST or None,
instance=user.userprofile,
request=request)
profile_date_form = forms.ChangeDatesForm(request.POST or None,
instance=user.userprofile)
if (userform.is_valid() and profileform.is_valid() and
profile_date_form_validation(profile_date_form)):
userform.save()
profileform.save()
if request.user.has_perm('profiles.can_edit_profiles'):
# Update groups.
groups = {'Mentor': 'mentor_group',
'Admin': 'admin_group',
'Council': 'council_group',
'Rep': 'rep_group',
'Alumni': 'alumni_group'}
for group_db, group_html in groups.items():
if Group.objects.filter(name=group_db).exists():
if request.POST.get(group_html, None):
user.groups.add(Group.objects.get(name=group_db))
else:
user.groups.remove(Group.objects.get(name=group_db))
# Update date fields
profile_date_form.save()
messages.success(request, 'Profile successfully edited.')
statsd.incr('profiles.edit_profile')
if request.user == user:
return redirect('profiles_view_my_profile')
else:
redirect_url = reverse('profiles_view_profile',
kwargs={'display_name':
user.userprofile.display_name})
return redirect(redirect_url)
else:
# If forms are not valid and the fields are dirty, get a fresh copy
# of the object.
# This is needed when an invalid display_name is used.
# Django tries to resolve the url based on this display_name, which
# results in a NoReverseMatch error. See also bug:
# https://bugzilla.mozilla.org/show_bug.cgi?id=1147541
user = User.objects.get(pk=user.id)
group_bits = map(lambda x: user.groups.filter(name=x).exists(),
['Admin', 'Council', 'Mentor', 'Rep', 'Alumni'])
functional_areas = map(int, profileform['functional_areas'].value())
user_is_alumni = user.groups.filter(name='Alumni').exists()
return render(request, 'profiles_edit.html',
{'userform': userform,
'profileform': profileform,
'profile_date_form': profile_date_form,
'pageuser': user,
'group_bits': group_bits,
'range_years': range(1950, now().date().year - 11),
'functional_areas': functional_areas,
'user_is_alumni': user_is_alumni})
def redirect_list_profiles(request):
profiles_url = reverse('profiles_list_profiles')
extra_path = iri_to_uri('/' + request.path_info[len(profiles_url):])
return redirect(urlparams(profiles_url, hash=extra_path), permanent=True)
@cache_control(private=True)
def list_profiles(request):
"""List users in Rep Group."""
countries = product_details.get_regions('en').values()
countries.sort()
reps = (User.objects
.filter(userprofile__registration_complete=True,
groups__name='Rep')
.order_by('userprofile__country', 'last_name', 'first_name'))
return render(request, 'profiles_people.html',
{'countries': countries,
'reps': reps,
'areas': FunctionalArea.objects.all()})
@cache_control(private=True, max_age=60 * 5)
def view_profile(request, display_name):
"""View user profile."""
user = get_object_or_404(User,
userprofile__display_name__iexact=display_name)
user_is_alumni = user.groups.filter(name='Alumni').exists()
if not user.groups.filter(Q(name='Rep') | Q(name='Alumni')).exists():
raise Http404
if (not user.userprofile.registration_complete and
not request.user.has_perm('profiles.can_edit_profiles')):
raise Http404
nominee_form = forms.RotmNomineeForm(request.POST or None,
instance=user.userprofile)
usergroups = user.groups.filter(Q(name='Mentor') | Q(name='Council'))
is_nomination_period = now().date() < rotm_nomination_end_date()
data = {'pageuser': user,
'user_profile': user.userprofile,
'added_by': user.userprofile.added_by,
'mentor': user.userprofile.mentor,
'usergroups': usergroups,
'user_nominated': user.userprofile.is_rotm_nominee,
'is_nomination_period': is_nomination_period,
'user_is_alumni': user_is_alumni}
if UserStatus.objects.filter(user=user, is_unavailable=True).exists():
status = UserStatus.objects.filter(user=user).latest('created_on')
data['user_status'] = status
if user == request.user:
today = now().date()
date = (status.expected_date.strftime('%d %B %Y')
if status.expected_date > today else None)
msg = render_to_string(
'includes/view_profile_unavailable_msg.html',
{'date': date,
'display_name': user.userprofile.display_name})
messages.info(request, mark_safe(msg))
if nominee_form.is_valid():
if ((is_nomination_period or
waffle.switch_is_active('enable_rotm_tasks')) and
request.user.groups.filter(name='Mentor').exists()):
nominee_form.save()
return redirect('profiles_view_profile', display_name=display_name)
messages.warning(request, ('Only mentors can nominate a mentee.'))
if user_is_alumni:
msg = ('Note: You are viewing a profile of a former Rep '
'who is no longer part of the program')
messages.info(request, msg)
today = now().date()
# NGReports
data['ng_reports'] = (user.ng_reports
.filter(report_date__lte=today)
.order_by('-report_date'))
past_user_events = get_events_for_user(user, to_date=today)
data['future_events'] = get_events_for_user(user, from_date=today)
data['past_events'] = past_user_events.reverse()[:10]
data['featured_rep'] = user.featuredrep_users.all()
data['request_user'] = request.user
data['nominee_form'] = nominee_form
return render(request, 'profiles_view.html', data)
@permission_check()
def view_my_profile(request):
"""View logged-in user profile."""
return view_profile(request,
display_name=request.user.userprofile.display_name)
@cache_control(private=True, no_cache=True)
@permission_check(permissions=['profiles.create_user'])
def invite(request):
"""Invite a user."""
form = forms.InviteUserForm(request.POST or None)
if form.is_valid():
email = form.cleaned_data['email']
user = User.objects.create_user(username=USERNAME_ALGO(email),
email=email)
# Add new users to Rep group
user.groups.add(Group.objects.get(name='Rep'))
if request.user.groups.filter(name='Mentor').exists():
user.userprofile.mentor = request.user
user.userprofile.added_by = request.user
user.userprofile.save()
messages.success(request, ('User was successfully invited, '
'now shoot some mails!'))
return redirect('profiles_invite')
return render(request, 'profiles_invite.html', {'form': form})
@permission_check(permissions=['profiles.can_delete_profiles'])
def delete_user(request, display_name):
"""Delete a user."""
user = get_object_or_404(User, userprofile__display_name=display_name)
if request.method == 'POST':
user.delete()
messages.success(request, 'User was deleted.')
statsd.incr('profiles.delete_profile')
return redirect('main')
@cache_control(private=True)
def list_alumni(request):
"""List users in Alumni Group."""
query = User.objects.filter(groups__name='Alumni')
alumni_paginator = Paginator(query, settings.ITEMS_PER_PAGE)
alumni_page = request.GET.get('page', 1)
try:
objects = alumni_paginator.page(alumni_page)
except PageNotAnInteger:
objects = alumni_paginator.page(1)
except EmptyPage:
objects = alumni_paginator.page(alumni_paginator.num_pages)
return render(request, 'profiles_list_alumni.html', {'objects': objects})
| |
#!/usr/bin/env python2.7
# encoding: utf-8
import os
import re
import logging
from otto.lib.otypes import ApplianceUsage
from otto.connections.ssh_pexpect import Ssh, TIMEOUT, EOF
instance = os.environ.get('instance') or ''
logger = logging.getLogger('otto' + instance + '.appliances')
logger.addHandler(logging.NullHandler())
class Switch(Ssh):
"""
A class for interacting with Arista or Dell switches.
Arista ports are just a port value.
Dell ports have the form: <unit>/<port-type><port>
ex: 1/g45
To use this module, ssh must be enabled on Dell switches
Basic Usage:
from otto.appliance import switch
s = switch.Switch(uname, host, passwd, swtype='auto', prompt='>')
s.connect()
"""
def __init__(self, user, host, password, swtype='auto', prompt='>'):
self.user = user
self.host = host
self.password = password
self.swtype = swtype.lower()
self.prompt = prompt
self.reg = re.compile('(\\d+)(/)(g|xg)(\\d+)')
def connect(self):
"""
Overloaded to handle auto-detection
"""
super(Switch, self).connect()
if self.swtype == 'auto':
self.__autotype()
def disconnect(self):
"""
Dell disconnects in an odd fashion
Handles disconnecting appropriately
"""
if self.swtype == 'arista':
super(Switch, self).disconnect()
elif self.swtype == 'dell':
try:
self.run('quit')
except EOF:
super(Switch, self).disconnect()
def __autotype(self):
"""
Determines the type of switch the module is connected to
"""
# Dell
try:
ret = self.run('show system\n')
except TIMEOUT:
ret = self.before
if 'Dell' in ret:
self.swtype = 'dell'
else:
# Arista
ret = self.run('show version')
if 'Arista' in ret:
self.swtype = 'arista'
else:
logger.error("__autotype: Incorrect type of switch")
def up(self, port):
"""
Brings up the port specified
Correctly formats the port number specified depending on switch type
"""
temp = self.prompt
if self.swtype == 'dell':
m = self.reg.match(port)
if not m:
n = re.match('(\\d+)', port)
if n:
port = '1/g%s' % port # assuming you wanted a gigabit interface
else:
logger.error("up: Incorrect port format")
return False
self.prompt = '#'
self.run('enable')
self.run('configure')
self.run('interface ethernet %s' % port)
self.run('no shutdown')
self.run('end')
self.prompt = temp
if self.swtype == 'dell':
self.run('end')
else:
self.run('disable')
def down(self, port):
"""
Shuts down the port specified
Correctly formats the port number specified depending on switch type
"""
temp = self.prompt
if self.swtype == 'dell':
m = self.reg.match(port)
if not m:
n = re.match('(\\d+)', port)
if n:
port = '1/g%s' % port # assuming you wanted a gigabit interface
else:
logger.error("down: Incorrect port format")
return False
self.prompt = '#'
self.run('enable')
self.run('configure')
self.run('interface ethernet %s' % port)
self.run('shutdown')
self.run('end')
self.prompt = temp
if self.swtype == 'dell':
self.run('end')
else:
self.run('disable')
def chifmode(self, port, mode):
"""
Changes the mode of the port
Mode takes 3 options: access, general, trunk
"""
temp = self.prompt
if self.swtype == 'dell':
m = self.reg.match(port)
if not m:
n = re.match('(\\d+)', port)
if n:
port = '1/g%s' % port # assuming you wanted a gigabit interface
else:
logger.error("chifmode: Incorrect port format")
return False
try:
m = [x for x in ['access', 'general', 'trunk'] if mode == x][0]
except IndexError:
logger.error("chifmode: Incorrect Mode '%s'" % mode)
return False
self.prompt = '#'
self.run('enable')
self.run('configure')
self.run('interface ethernet %s' % port)
self.run('switchport mode %s' % m)
self.run('end')
self.prompt = temp
if self.swtype == 'dell':
self.run('end')
else:
self.run('disable')
def mkvlan(self, segid):
"""
Creates vlan of the given name
"""
temp = self.prompt
self.prompt = '#'
self.run('enable')
self.run('configure')
if self.swtype == 'dell':
self.run('vlan database')
self.run('vlan %s' % segid)
self.run('end')
self.prompt = temp
if self.swtype == 'dell':
self.run('end')
else:
self.run('disable')
def delvlan(self, segid):
"""
Deletes vlan of the given name
"""
temp = self.prompt
self.prompt = '#'
self.run('enable')
self.run('configure')
if self.swtype == 'dell':
self.run('vlan database')
self.run('no vlan %s' % segid)
self.run('end')
self.prompt = temp
if self.swtype == 'dell':
self.run('end')
else:
self.run('disable')
def setvlan(self, port, segid):
"""
Sets a port to specified, already created vlan
"""
temp = self.prompt
if self.swtype == 'dell':
m = self.reg.match(port)
if not m:
n = re.match('(\\d+)', port)
if n:
port = '1/g%s' % port # assuming you wanted a gigabit interface
else:
logger.error("setvlan: Incorrect port format")
return False
self.prompt = '#'
self.run('enable')
self.run('configure')
self.run('interface ethernet %s' % port)
ret = self.run('switchport access vlan %s' % segid)
if 'Interface not in Access Mode' in ret:
logger.error('setvlan: Interface not in Access Mode')
self.run('end')
self.prompt = temp
if self.swtype == 'dell':
self.run('end')
else:
self.run('disable')
def __filtermac(self, mac):
"""
Filter all non-hex characters from mac
All alphas returned lowercase
"""
mac = mac.lower()
s = ''
for c in mac:
try:
s += '%x' % int(c, 16)
except ValueError:
pass
if len(s) != 12:
raise ApplianceUsage("Invalid MAC length: \"%s\"" % s)
return s
def __formatmac(self, swtype, mac):
"""
Return an arista/dell delimited mac address
Most stupidly, valid mac addresses in the arista/dell cli require dot delimiters.
Neither recognize 00100401336b as a mac. Arista does recognize 0010.0401.336b.
Dell does recognize 0010.0401.336B.
"""
if self.swtype == 'arista':
return mac[0:4].lower() + '.' + mac[4:8].lower() + '.' + mac[8:12].lower()
else:
return mac[0:4].upper() + '.' + mac[4:8].upper() + '.' + mac[8:12].upper()
def mac2port(self, mac):
"""
Return the port associated with mac from the address table, or None
All non-hex delimiter characters in the mac are ignored
"""
mac = self.__filtermac(mac)
fmac = self.__formatmac(self.swtype, mac)
if self.swtype == 'arista':
cmd = 'show mac address-table address %s' % fmac
# log.debug cmd here
r = self.run(cmd)
for l in r.split('\r\n'):
ls = l.split()
if len(ls) > 5 and ls[1] == fmac:
port = ls[3]
return port[2:]
elif self.swtype == 'dell':
cont = True
temp = self.prompt
self.prompt = '#'
self.run('enable')
try:
self.run('show bridge address-table', timeout=0.1)
except TIMEOUT:
while cont:
try:
self.run('\r', timeout=0.1)
except (EOF, TIMEOUT):
pass
else:
cont = False
ret = self.before
self.prompt = temp
self.run('exit')
for l in ret.split('\r\n'):
ls = l.split()
if len(ls) == 4 and ls[1] == fmac:
port = ls[2]
if 'xg' in port:
return port[4:]
else:
return port[3:]
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from unittest import mock
from senlin.common import consts
from senlin.common import scaleutils as su
from senlin.policies import deletion_policy as dp
from senlin.tests.unit.common import base
from senlin.tests.unit.common import utils
class TestDeletionPolicy(base.SenlinTestCase):
def setUp(self):
super(TestDeletionPolicy, self).setUp()
self.context = utils.dummy_context()
self.spec = {
'type': 'senlin.policy.deletion',
'version': '1.0',
'properties': {
'criteria': 'OLDEST_FIRST',
'destroy_after_deletion': True,
'grace_period': 60,
'reduce_desired_capacity': False
}
}
def test_policy_init(self):
policy = dp.DeletionPolicy('test-policy', self.spec)
self.assertIsNone(policy.id)
self.assertEqual('test-policy', policy.name)
self.assertEqual('senlin.policy.deletion-1.0', policy.type)
self.assertEqual('OLDEST_FIRST', policy.criteria)
self.assertTrue(policy.destroy_after_deletion)
self.assertEqual(60, policy.grace_period)
self.assertFalse(policy.reduce_desired_capacity)
@mock.patch.object(su, 'nodes_by_random')
def test_victims_by_regions_random(self, mock_select):
cluster = mock.Mock()
node1 = mock.Mock(id=1)
node2 = mock.Mock(id=2)
node3 = mock.Mock(id=3)
cluster.nodes_by_region.side_effect = [
[node1], [node2, node3]
]
mock_select.side_effect = [['1'], ['2', '3']]
self.spec['properties']['criteria'] = 'RANDOM'
policy = dp.DeletionPolicy('test-policy', self.spec)
res = policy._victims_by_regions(cluster, {'R1': 1, 'R2': 2})
self.assertEqual(['1', '2', '3'], res)
mock_select.assert_has_calls([
mock.call([node1], 1),
mock.call([node2, node3], 2)
])
cluster.nodes_by_region.assert_has_calls([
mock.call('R1'), mock.call('R2')])
@mock.patch.object(su, 'nodes_by_profile_age')
def test_victims_by_regions_profile_age(self, mock_select):
cluster = mock.Mock()
node1 = mock.Mock(id=1)
node2 = mock.Mock(id=2)
node3 = mock.Mock(id=3)
cluster.nodes_by_region.side_effect = [
[node1], [node2, node3]
]
mock_select.side_effect = [['1'], ['2', '3']]
self.spec['properties']['criteria'] = 'OLDEST_PROFILE_FIRST'
policy = dp.DeletionPolicy('test-policy', self.spec)
res = policy._victims_by_regions(cluster, {'R1': 1, 'R2': 2})
self.assertEqual(['1', '2', '3'], res)
mock_select.assert_has_calls([
mock.call([node1], 1),
mock.call([node2, node3], 2)
])
cluster.nodes_by_region.assert_has_calls([
mock.call('R1'), mock.call('R2')])
@mock.patch.object(su, 'nodes_by_age')
def test_victims_by_regions_age_oldest(self, mock_select):
cluster = mock.Mock()
node1 = mock.Mock(id=1)
node2 = mock.Mock(id=2)
node3 = mock.Mock(id=3)
cluster.nodes_by_region.side_effect = [
[node1], [node2, node3]
]
mock_select.side_effect = [['1'], ['2', '3']]
self.spec['properties']['criteria'] = 'OLDEST_FIRST'
policy = dp.DeletionPolicy('test-policy', self.spec)
res = policy._victims_by_regions(cluster, {'R1': 1, 'R2': 2})
self.assertEqual(['1', '2', '3'], res)
mock_select.assert_has_calls([
mock.call([node1], 1, True),
mock.call([node2, node3], 2, True)
])
cluster.nodes_by_region.assert_has_calls([
mock.call('R1'), mock.call('R2')])
@mock.patch.object(su, 'nodes_by_age')
def test_victims_by_regions_age_youngest(self, mock_select):
cluster = mock.Mock()
node1 = mock.Mock(id=1)
node2 = mock.Mock(id=2)
node3 = mock.Mock(id=3)
cluster.nodes_by_region.side_effect = [
[node1], [node2, node3]
]
mock_select.side_effect = [['1'], ['2', '3']]
self.spec['properties']['criteria'] = 'YOUNGEST_FIRST'
policy = dp.DeletionPolicy('test-policy', self.spec)
res = policy._victims_by_regions(cluster, {'R1': 1, 'R2': 2})
self.assertEqual(['1', '2', '3'], res)
mock_select.assert_has_calls([
mock.call([node1], 1, False),
mock.call([node2, node3], 2, False)
])
cluster.nodes_by_region.assert_has_calls([
mock.call('R1'), mock.call('R2')])
@mock.patch.object(su, 'nodes_by_random')
def test_victims_by_zones_random(self, mock_select):
cluster = mock.Mock()
node1 = mock.Mock(id=1)
node2 = mock.Mock(id=2)
node3 = mock.Mock(id=3)
cluster.nodes_by_zone.side_effect = [
[node1], [node2, node3]
]
mock_select.side_effect = [['1'], ['3']]
self.spec['properties']['criteria'] = 'RANDOM'
policy = dp.DeletionPolicy('test-policy', self.spec)
res = policy._victims_by_zones(cluster, {'AZ1': 1, 'AZ2': 1})
self.assertEqual(['1', '3'], res)
mock_select.assert_has_calls([
mock.call([node1], 1),
mock.call([node2, node3], 1)
])
cluster.nodes_by_zone.assert_has_calls(
[mock.call('AZ1'), mock.call('AZ2')],
)
@mock.patch.object(su, 'nodes_by_profile_age')
def test_victims_by_zones_profile_age(self, mock_select):
cluster = mock.Mock()
node1 = mock.Mock(id=1)
node2 = mock.Mock(id=2)
node3 = mock.Mock(id=3)
cluster.nodes_by_zone.side_effect = [
[node1], [node2, node3]
]
mock_select.side_effect = [['1'], ['2']]
self.spec['properties']['criteria'] = 'OLDEST_PROFILE_FIRST'
policy = dp.DeletionPolicy('test-policy', self.spec)
res = policy._victims_by_zones(cluster, {'AZ1': 1, 'AZ2': 1})
self.assertEqual(['1', '2'], res)
mock_select.assert_has_calls(
[
mock.call([node1], 1),
mock.call([node2, node3], 1)
],
)
cluster.nodes_by_zone.assert_has_calls(
[mock.call('AZ1'), mock.call('AZ2')],
)
@mock.patch.object(su, 'nodes_by_age')
def test_victims_by_zones_age_oldest(self, mock_select):
cluster = mock.Mock()
node1 = mock.Mock(id=1)
node2 = mock.Mock(id=2)
node3 = mock.Mock(id=3)
cluster.nodes_by_zone.side_effect = [
[node1], [node2, node3]
]
mock_select.side_effect = [['1'], ['3']]
self.spec['properties']['criteria'] = 'OLDEST_FIRST'
policy = dp.DeletionPolicy('test-policy', self.spec)
res = policy._victims_by_zones(cluster, {'AZ1': 1, 'AZ8': 1})
self.assertEqual(['1', '3'], res)
mock_select.assert_has_calls([
mock.call([node1], 1, True),
mock.call([node2, node3], 1, True)
])
cluster.nodes_by_zone.assert_has_calls(
[mock.call('AZ1'), mock.call('AZ8')],
)
@mock.patch.object(su, 'nodes_by_age')
def test_victims_by_zones_age_youngest(self, mock_select):
cluster = mock.Mock()
node1 = mock.Mock(id=1)
node2 = mock.Mock(id=3)
node3 = mock.Mock(id=5)
cluster.nodes_by_zone.side_effect = [
[node1], [node2, node3]
]
mock_select.side_effect = [['1'], ['3', '5']]
self.spec['properties']['criteria'] = 'YOUNGEST_FIRST'
policy = dp.DeletionPolicy('test-policy', self.spec)
res = policy._victims_by_zones(cluster, {'AZ5': 1, 'AZ6': 2})
self.assertEqual(['1', '3', '5'], res)
mock_select.assert_has_calls(
[
mock.call([node1], 1, False),
mock.call([node2, node3], 2, False)
],
)
cluster.nodes_by_zone.assert_has_calls(
[mock.call('AZ5'), mock.call('AZ6')],
)
def test_update_action_clean(self):
action = mock.Mock()
action.data = {}
policy = dp.DeletionPolicy('test-policy', self.spec)
policy._update_action(action, ['N1', 'N2'])
pd = {
'status': 'OK',
'reason': 'Candidates generated',
'deletion': {
'count': 2,
'candidates': ['N1', 'N2'],
'destroy_after_deletion': True,
'grace_period': 60,
'reduce_desired_capacity': False,
}
}
self.assertEqual(pd, action.data)
action.store.assert_called_with(action.context)
def test_update_action_override(self):
action = mock.Mock()
action.data = {
'deletion': {
'count': 3,
}
}
policy = dp.DeletionPolicy('test-policy', self.spec)
policy._update_action(action, ['N1', 'N2'])
pd = {
'status': 'OK',
'reason': 'Candidates generated',
'deletion': {
'count': 2,
'candidates': ['N1', 'N2'],
'destroy_after_deletion': True,
'grace_period': 60,
'reduce_desired_capacity': False,
}
}
self.assertEqual(pd, action.data)
action.store.assert_called_with(action.context)
@mock.patch.object(dp.DeletionPolicy, '_update_action')
def test_pre_op_del_nodes(self, mock_update):
action = mock.Mock()
action.context = self.context
action.inputs = {
'count': 2,
'candidates': ['N1', 'N2'],
}
action.data = {}
policy = dp.DeletionPolicy('test-policy', self.spec)
policy.pre_op('FAKE_ID', action)
mock_update.assert_called_once_with(action, ['N1', 'N2'])
@mock.patch.object(dp.DeletionPolicy, '_update_action')
def test_pre_op_node_delete(self, mock_update):
action = mock.Mock(action=consts.NODE_DELETE, context=self.context,
inputs={}, data={}, entity=mock.Mock(id='NODE_ID'))
policy = dp.DeletionPolicy('test-policy', self.spec)
policy.pre_op('FAKE_ID', action)
mock_update.assert_called_once_with(action, ['NODE_ID'])
@mock.patch.object(dp.DeletionPolicy, '_update_action')
@mock.patch.object(su, 'nodes_by_age')
def test_pre_op_with_count_decisions(self, mock_select, mock_update):
action = mock.Mock(context=self.context, inputs={},
data={'deletion': {'count': 2}})
cluster = mock.Mock(nodes=['a', 'b', 'c'])
action.entity = cluster
mock_select.return_value = ['NODE1', 'NODE2']
policy = dp.DeletionPolicy('test-policy', self.spec)
policy.pre_op('FAKE_ID', action)
mock_update.assert_called_once_with(action, ['NODE1', 'NODE2'])
mock_select.assert_called_once_with(cluster.nodes, 2, True)
@mock.patch.object(dp.DeletionPolicy, '_update_action')
@mock.patch.object(dp.DeletionPolicy, '_victims_by_regions')
def test_pre_op_with_region_decisions(self, mock_select, mock_update):
action = mock.Mock(context=self.context, inputs={})
action.data = {
'deletion': {
'count': 2,
'regions': {
'R1': 1,
'R2': 1
}
}
}
cluster = mock.Mock(nodes=['a', 'b', 'c'])
action.entity = cluster
mock_select.return_value = ['NODE1', 'NODE2']
policy = dp.DeletionPolicy('test-policy', self.spec)
policy.pre_op('FAKE_ID', action)
mock_update.assert_called_once_with(action, ['NODE1', 'NODE2'])
mock_select.assert_called_once_with(cluster, {'R1': 1, 'R2': 1})
@mock.patch.object(dp.DeletionPolicy, '_update_action')
@mock.patch.object(dp.DeletionPolicy, '_victims_by_zones')
def test_pre_op_with_zone_decisions(self, mock_select, mock_update):
action = mock.Mock(context=self.context, inputs={})
action.data = {
'deletion': {
'count': 2,
'zones': {
'AZ1': 1,
'AZ2': 1
}
}
}
cluster = mock.Mock(nodes=['a', 'b', 'c'])
action.entity = cluster
mock_select.return_value = ['NODE1', 'NODE2']
policy = dp.DeletionPolicy('test-policy', self.spec)
policy.pre_op('FAKE_ID', action)
mock_update.assert_called_once_with(action, ['NODE1', 'NODE2'])
mock_select.assert_called_once_with(cluster, {'AZ1': 1, 'AZ2': 1})
@mock.patch.object(dp.DeletionPolicy, '_update_action')
@mock.patch.object(su, 'nodes_by_age')
def test_pre_op_scale_in_with_count(self, mock_select, mock_update):
action = mock.Mock(context=self.context, data={}, inputs={'count': 2},
action=consts.CLUSTER_SCALE_IN)
cluster = mock.Mock(nodes=[mock.Mock()])
action.entity = cluster
mock_select.return_value = ['NODE_ID']
policy = dp.DeletionPolicy('test-policy', self.spec)
policy.pre_op('FAKE_ID', action)
mock_update.assert_called_once_with(action, ['NODE_ID'])
# the following was invoked with 1 because the input count is
# greater than the cluster size
mock_select.assert_called_once_with(cluster.nodes, 1, True)
@mock.patch.object(dp.DeletionPolicy, '_update_action')
@mock.patch.object(su, 'nodes_by_age')
def test_pre_op_scale_in_without_count(self, mock_select, mock_update):
action = mock.Mock(context=self.context, data={}, inputs={},
action=consts.CLUSTER_SCALE_IN)
cluster = mock.Mock(nodes=[mock.Mock()])
action.entity = cluster
mock_select.return_value = ['NODE_ID']
policy = dp.DeletionPolicy('test-policy', self.spec)
policy.pre_op('FAKE_ID', action)
mock_update.assert_called_once_with(action, ['NODE_ID'])
# the following was invoked with 1 because the input count is
# not specified so 1 becomes the default
mock_select.assert_called_once_with(cluster.nodes, 1, True)
@mock.patch.object(dp.DeletionPolicy, '_update_action')
@mock.patch.object(su, 'parse_resize_params')
def test_pre_op_resize_failed_parse(self, mock_parse, mock_update):
action = mock.Mock(context=self.context, inputs={}, data={},
action=consts.CLUSTER_RESIZE)
cluster = mock.Mock(nodes=[mock.Mock(), mock.Mock()])
action.entity = cluster
mock_parse.return_value = 'ERROR', 'Failed parsing.'
policy = dp.DeletionPolicy('test-policy', self.spec)
policy.pre_op('FAKE_ID', action)
self.assertEqual('ERROR', action.data['status'])
self.assertEqual('Failed parsing.', action.data['reason'])
mock_parse.assert_called_once_with(action, cluster, 2)
self.assertEqual(0, mock_update.call_count)
@mock.patch.object(dp.DeletionPolicy, '_update_action')
@mock.patch.object(su, 'parse_resize_params')
def test_pre_op_resize_not_deletion(self, mock_parse, mock_update):
def fake_parse(action, cluster, current):
action.data = {}
return 'OK', 'cool'
action = mock.Mock(context=self.context, inputs={},
action=consts.CLUSTER_RESIZE)
cluster = mock.Mock(nodes=[mock.Mock(), mock.Mock()])
action.entity = cluster
mock_parse.side_effect = fake_parse
policy = dp.DeletionPolicy('test-policy', self.spec)
# a simulation of non-deletion RESZIE
action.data = {}
policy.pre_op('FAKE_ID', action)
mock_parse.assert_called_once_with(action, cluster, 2)
self.assertEqual(0, mock_update.call_count)
@mock.patch.object(su, 'parse_resize_params')
@mock.patch.object(dp.DeletionPolicy, '_update_action')
@mock.patch.object(su, 'nodes_by_age')
def test_pre_op_resize_with_count(self, mock_select, mock_update,
mock_parse):
def fake_parse(a, cluster, current):
a.data = {
'deletion': {
'count': 2
}
}
return 'OK', 'cool'
action = mock.Mock(context=self.context, inputs={}, data={},
action=consts.CLUSTER_RESIZE)
cluster = mock.Mock(nodes=[mock.Mock(), mock.Mock()])
action.entity = cluster
mock_parse.side_effect = fake_parse
mock_select.return_value = ['NID']
policy = dp.DeletionPolicy('test-policy', self.spec)
policy.pre_op('FAKE_ID', action)
mock_parse.assert_called_once_with(action, cluster, 2)
mock_update.assert_called_once_with(action, ['NID'])
@mock.patch.object(dp.DeletionPolicy, '_update_action')
@mock.patch.object(su, 'nodes_by_random')
def test_pre_op_do_random(self, mock_select, mock_update):
action = mock.Mock(context=self.context, inputs={},
data={'deletion': {'count': 2}})
cluster = mock.Mock(nodes=['a', 'b', 'c'])
action.entity = cluster
mock_select.return_value = ['NODE1', 'NODE2']
spec = copy.deepcopy(self.spec)
spec['properties']['criteria'] = 'RANDOM'
policy = dp.DeletionPolicy('test-policy', spec)
policy.pre_op('FAKE_ID', action)
mock_select.assert_called_once_with(cluster.nodes, 2)
mock_update.assert_called_once_with(action, ['NODE1', 'NODE2'])
@mock.patch.object(dp.DeletionPolicy, '_update_action')
@mock.patch.object(su, 'nodes_by_profile_age')
def test_pre_op_do_oldest_profile(self, mock_select, mock_update):
action = mock.Mock(context=self.context, inputs={},
data={'deletion': {'count': 2}})
mock_select.return_value = ['NODE1', 'NODE2']
cluster = mock.Mock(nodes=['a', 'b', 'c'])
action.entity = cluster
spec = copy.deepcopy(self.spec)
spec['properties']['criteria'] = 'OLDEST_PROFILE_FIRST'
policy = dp.DeletionPolicy('test-policy', spec)
policy.pre_op('FAKE_ID', action)
mock_select.assert_called_once_with(cluster.nodes, 2)
mock_update.assert_called_once_with(action, ['NODE1', 'NODE2'])
@mock.patch.object(dp.DeletionPolicy, '_update_action')
@mock.patch.object(su, 'nodes_by_age')
def test_pre_op_do_oldest_first(self, mock_select, mock_update):
action = mock.Mock(context=self.context, inputs={},
data={'deletion': {'count': 2}})
cluster = mock.Mock(nodes=['a', 'b', 'c'])
action.entity = cluster
mock_select.return_value = ['NODE1', 'NODE2']
spec = copy.deepcopy(self.spec)
spec['properties']['criteria'] = 'OLDEST_FIRST'
policy = dp.DeletionPolicy('test-policy', spec)
policy.pre_op('FAKE_ID', action)
mock_select.assert_called_once_with(cluster.nodes, 2, True)
mock_update.assert_called_once_with(action, ['NODE1', 'NODE2'])
@mock.patch.object(dp.DeletionPolicy, '_update_action')
@mock.patch.object(su, 'nodes_by_age')
def test_pre_op_do_youngest_first(self, mock_select, mock_update):
action = mock.Mock(context=self.context, inputs={},
data={'deletion': {'count': 2}})
cluster = mock.Mock(nodes=['a', 'b', 'c'])
action.entity = cluster
mock_select.return_value = ['NODE1', 'NODE2']
spec = copy.deepcopy(self.spec)
spec['properties']['criteria'] = 'YOUNGEST_FIRST'
policy = dp.DeletionPolicy('test-policy', spec)
policy.pre_op('FAKE_ID', action)
mock_select.assert_called_once_with(cluster.nodes, 2, False)
mock_update.assert_called_once_with(action, ['NODE1', 'NODE2'])
| |
# Copyright (c) 2012 VMware, Inc.
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Classes for making VMware VI SOAP calls.
"""
import httplib
import urllib2
from oslo.config import cfg
import suds
from nova.openstack.common.gettextutils import _
from nova import utils
from nova.virt.vmwareapi import error_util
RESP_NOT_XML_ERROR = 'Response is "text/html", not "text/xml"'
CONN_ABORT_ERROR = 'Software caused connection abort'
ADDRESS_IN_USE_ERROR = 'Address already in use'
vmwareapi_wsdl_loc_opt = cfg.StrOpt('wsdl_location',
help='Optional VIM Service WSDL Location '
'e.g http://<server>/vimService.wsdl. '
'Optional over-ride to default location for bug work-arounds')
CONF = cfg.CONF
CONF.register_opt(vmwareapi_wsdl_loc_opt, 'vmware')
def get_moref(value, type):
"""Get managed object reference."""
moref = suds.sudsobject.Property(value)
moref._type = type
return moref
def object_to_dict(obj, list_depth=1):
"""Convert Suds object into serializable format.
The calling function can limit the amount of list entries that
are converted.
"""
d = {}
for k, v in suds.sudsobject.asdict(obj).iteritems():
if hasattr(v, '__keylist__'):
d[k] = object_to_dict(v, list_depth=list_depth)
elif isinstance(v, list):
d[k] = []
used = 0
for item in v:
used = used + 1
if used > list_depth:
break
if hasattr(item, '__keylist__'):
d[k].append(object_to_dict(item, list_depth=list_depth))
else:
d[k].append(item)
else:
d[k] = v
return d
class VIMMessagePlugin(suds.plugin.MessagePlugin):
def addAttributeForValue(self, node):
# suds does not handle AnyType properly.
# VI SDK requires type attribute to be set when AnyType is used
if node.name == 'value':
node.set('xsi:type', 'xsd:string')
def marshalled(self, context):
"""suds will send the specified soap envelope.
Provides the plugin with the opportunity to prune empty
nodes and fixup nodes before sending it to the server.
"""
# suds builds the entire request object based on the wsdl schema.
# VI SDK throws server errors if optional SOAP nodes are sent
# without values, e.g. <test/> as opposed to <test>test</test>
context.envelope.prune()
context.envelope.walk(self.addAttributeForValue)
class Vim:
"""The VIM Object."""
def __init__(self,
protocol="https",
host="localhost",
port=443):
"""Creates the necessary Communication interfaces and gets the
ServiceContent for initiating SOAP transactions.
protocol: http or https
host : ESX IPAddress or Hostname
port : port for connection
"""
if not suds:
raise Exception(_("Unable to import suds."))
self._protocol = protocol
self._host_name = host
self.wsdl_url = Vim.get_wsdl_url(protocol, host, port)
self.url = Vim.get_soap_url(protocol, host, port)
self.client = suds.client.Client(self.wsdl_url, location=self.url,
plugins=[VIMMessagePlugin()])
self._service_content = self.retrieve_service_content()
def retrieve_service_content(self):
return self.RetrieveServiceContent("ServiceInstance")
@staticmethod
def get_wsdl_url(protocol, host_name, port):
"""Allows override of the wsdl location, making this static
means we can test the logic outside of the constructor
without forcing the test environment to have multiple valid
wsdl locations to test against.
:param protocol: https or http
:param host_name: localhost or other server name
:param port: port for connection
:return: string to WSDL location for vSphere WS Management API
"""
# optional WSDL location over-ride for work-arounds
if CONF.vmware.wsdl_location:
return CONF.vmware.wsdl_location
# calculate default WSDL location if no override supplied
return Vim.get_soap_url(protocol, host_name, port) + "/vimService.wsdl"
@staticmethod
def get_soap_url(protocol, host_name, port):
"""Calculates the location of the SOAP services
for a particular server. Created as a static
method for testing.
:param protocol: https or http
:param host_name: localhost or other vSphere server name
:param port: port for connection
:return: the url to the active vSphere WS Management API
"""
if utils.is_valid_ipv6(host_name):
return '%s://[%s]:%d/sdk' % (protocol, host_name, port)
return '%s://%s:%d/sdk' % (protocol, host_name, port)
def get_service_content(self):
"""Gets the service content object."""
return self._service_content
def __getattr__(self, attr_name):
"""Makes the API calls and gets the result."""
def vim_request_handler(managed_object, **kwargs):
"""Builds the SOAP message and parses the response for fault
checking and other errors.
managed_object : Managed Object Reference or Managed
Object Name
**kwargs : Keyword arguments of the call
"""
# Dynamic handler for VI SDK Calls
try:
request_mo = self._request_managed_object_builder(
managed_object)
request = getattr(self.client.service, attr_name)
response = request(request_mo, **kwargs)
# To check for the faults that are part of the message body
# and not returned as Fault object response from the ESX
# SOAP server
if hasattr(error_util.FaultCheckers,
attr_name.lower() + "_fault_checker"):
fault_checker = getattr(error_util.FaultCheckers,
attr_name.lower() + "_fault_checker")
fault_checker(response)
return response
# Catch the VimFaultException that is raised by the fault
# check of the SOAP response
except error_util.VimFaultException:
raise
except suds.MethodNotFound:
raise
except suds.WebFault as excep:
doc = excep.document
fault_string = doc.childAtPath("/Envelope/Body/Fault/"
"faultstring").getText()
detail = doc.childAtPath("/Envelope/Body/Fault/detail")
fault_list = []
details = {}
if detail:
for fault in detail.getChildren():
fault_list.append(fault.get("type"))
for child in fault.getChildren():
details[child.name] = child.getText()
raise error_util.VimFaultException(fault_list, fault_string,
details)
except AttributeError as excep:
raise error_util.VimAttributeError(_("No such SOAP method "
"'%s' provided by VI SDK") % (attr_name), excep)
except (httplib.CannotSendRequest,
httplib.ResponseNotReady,
httplib.CannotSendHeader) as excep:
raise error_util.SessionOverLoadException(_("httplib "
"error in %s: ") % (attr_name), excep)
except (urllib2.URLError,
urllib2.HTTPError) as excep:
raise error_util.SessionConnectionException(_("urllib2 "
"error in %s: ") % (attr_name), excep)
except Exception as excep:
# Socket errors which need special handling for they
# might be caused by ESX API call overload
if (str(excep).find(ADDRESS_IN_USE_ERROR) != -1 or
str(excep).find(CONN_ABORT_ERROR)) != -1:
raise error_util.SessionOverLoadException(_("Socket "
"error in %s: ") % (attr_name), excep)
# Type error that needs special handling for it might be
# caused by ESX host API call overload
elif str(excep).find(RESP_NOT_XML_ERROR) != -1:
raise error_util.SessionOverLoadException(_("Type "
"error in %s: ") % (attr_name), excep)
else:
raise error_util.VimException(
_("Exception in %s ") % (attr_name), excep)
return vim_request_handler
def _request_managed_object_builder(self, managed_object):
"""Builds the request managed object."""
# Request Managed Object Builder
if isinstance(managed_object, str):
mo = suds.sudsobject.Property(managed_object)
mo._type = managed_object
else:
mo = managed_object
return mo
def __repr__(self):
return "VIM Object"
def __str__(self):
return "VIM Object"
| |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builds the CIFAR-10 network.
Summary of available functions:
# Compute input images and labels for training. If you would like to run
# evaluations, use input() instead.
inputs, labels = distorted_inputs()
# Compute inference on the model inputs to make a prediction.
predictions = inference(inputs)
# Compute the total loss of the prediction with respect to the labels.
loss = loss(predictions, labels)
# Create a graph to run one step of training with respect to the loss.
train_op = train(loss, global_step)
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import re
import sys
import tarfile
import tensorflow.python.platform
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.models.image.cifar10 import cifar10_input
from tensorflow.python.platform import gfile
FLAGS = tf.app.flags.FLAGS
# Basic model parameters.
tf.app.flags.DEFINE_integer('batch_size', 128,
"""Number of images to process in a batch.""")
tf.app.flags.DEFINE_string('data_dir', '/tmp/cifar10_data',
"""Path to the CIFAR-10 data directory.""")
# Process images of this size. Note that this differs from the original CIFAR
# image size of 32 x 32. If one alters this number, then the entire model
# architecture will change and any model would need to be retrained.
IMAGE_SIZE = 24
# Global constants describing the CIFAR-10 data set.
NUM_CLASSES = 10
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 50000
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = 10000
# Constants describing the training process.
MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average.
NUM_EPOCHS_PER_DECAY = 350.0 # Epochs after which learning rate decays.
LEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor.
INITIAL_LEARNING_RATE = 0.1 # Initial learning rate.
# If a model is trained with multiple GPU's prefix all Op names with tower_name
# to differentiate the operations. Note that this prefix is removed from the
# names of the summaries when visualizing a model.
TOWER_NAME = 'tower'
DATA_URL = 'http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measure the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.histogram_summary(tensor_name + '/activations', x)
tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def _variable_on_cpu(name, shape, initializer):
"""Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
"""
with tf.device('/cpu:0'):
var = tf.get_variable(name, shape, initializer=initializer)
return var
def _variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
var = _variable_on_cpu(name, shape,
tf.truncated_normal_initializer(stddev=stddev))
if wd:
weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def _generate_image_and_label_batch(image, label, min_queue_examples):
"""Construct a queued batch of images and labels.
Args:
image: 3-D Tensor of [IMAGE_SIZE, IMAGE_SIZE, 3] of type.float32.
label: 1-D Tensor of type.int32
min_queue_examples: int32, minimum number of samples to retain
in the queue that provides of batches of examples.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
"""
# Create a queue that shuffles the examples, and then
# read 'FLAGS.batch_size' images + labels from the example queue.
num_preprocess_threads = 16
images, label_batch = tf.train.shuffle_batch(
[image, label],
batch_size=FLAGS.batch_size,
num_threads=num_preprocess_threads,
capacity=min_queue_examples + 3 * FLAGS.batch_size,
min_after_dequeue=min_queue_examples)
# Display the training images in the visualizer.
tf.image_summary('images', images)
return images, tf.reshape(label_batch, [FLAGS.batch_size])
#read_cifar10
def distorted_inputs():
"""Construct distorted input for CIFAR training using the Reader ops.
Raises:
ValueError: if no data_dir
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
"""
filenames = [os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin',
'data_batch_%d.bin' % i)
for i in xrange(1, 5)]
for f in filenames:
if not gfile.Exists(f):
raise ValueError('Failed to find file: ' + f)
# Create a queue that produces the filenames to read.
filename_queue = tf.train.string_input_producer(filenames)
# Read examples from files in the filename queue.
read_input = cifar10_input.read_cifar10(filename_queue)
reshaped_image = tf.cast(read_input.uint8image, tf.float32)
height = IMAGE_SIZE
width = IMAGE_SIZE
# Image processing for training the network. Note the many random
# distortions applied to the image.
# Randomly crop a [height, width] section of the image.
distorted_image = tf.image.random_crop(reshaped_image, [height, width])
# Randomly flip the image horizontally.
distorted_image = tf.image.random_flip_left_right(distorted_image)
# Because these operations are not commutative, consider randomizing
# randomize the order their operation.
distorted_image = tf.image.random_brightness(distorted_image,
max_delta=63)
distorted_image = tf.image.random_contrast(distorted_image,
lower=0.2, upper=1.8)
# Subtract off the mean and divide by the variance of the pixels.
float_image = tf.image.per_image_whitening(distorted_image)
# Ensure that the random shuffling has good mixing properties.
min_fraction_of_examples_in_queue = 0.4
min_queue_examples = int(NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN *
min_fraction_of_examples_in_queue)
print ('Filling queue with %d CIFAR images before starting to train. '
'This will take a few minutes.' % min_queue_examples)
# Generate a batch of images and labels by building up a queue of examples.
return _generate_image_and_label_batch(float_image, read_input.label,
min_queue_examples)
def inputs(eval_data):
"""Construct input for CIFAR evaluation using the Reader ops.
Args:
eval_data: bool, indicating if one should use the train or eval data set.
Raises:
ValueError: if no data_dir
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
if not eval_data:
filenames = [os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin',
'data_batch_%d.bin' % i)
for i in xrange(1, 5)]
num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
else:
filenames = [os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin',
'test_batch.bin')]
num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
for f in filenames:
if not gfile.Exists(f):
raise ValueError('Failed to find file: ' + f)
# Create a queue that produces the filenames to read.
filename_queue = tf.train.string_input_producer(filenames)
# Read examples from files in the filename queue.
read_input = cifar10_input.read_cifar10(filename_queue)
reshaped_image = tf.cast(read_input.uint8image, tf.float32)
height = IMAGE_SIZE
width = IMAGE_SIZE
# Image processing for evaluation.
# Crop the central [height, width] of the image.
resized_image = tf.image.resize_image_with_crop_or_pad(reshaped_image,
width, height)
# Subtract off the mean and divide by the variance of the pixels.
float_image = tf.image.per_image_whitening(resized_image)
# Ensure that the random shuffling has good mixing properties.
min_fraction_of_examples_in_queue = 0.4
min_queue_examples = int(num_examples_per_epoch *
min_fraction_of_examples_in_queue)
# Generate a batch of images and labels by building up a queue of examples.
return _generate_image_and_label_batch(float_image, read_input.label,
min_queue_examples)
def inference(images):
"""Build the CIFAR-10 model.
Args:
images: Images returned from distorted_inputs() or inputs().
Returns:
Logits.
"""
# We instantiate all variables using tf.get_variable() instead of
# tf.Variable() in order to share variables across multiple GPU training runs.
# If we only ran this model on a single GPU, we could simplify this function
# by replacing all instances of tf.get_variable() with tf.Variable().
#
# conv1
with tf.variable_scope('conv1') as scope:
kernel = _variable_with_weight_decay('weights', shape=[5, 5, 3, 64],
stddev=1e-4, wd=0.0)
conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0))
bias = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(bias, name=scope.name)
_activation_summary(conv1)
# pool1
pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool1')
# norm1
norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm1')
# conv2
with tf.variable_scope('conv2') as scope:
kernel = _variable_with_weight_decay('weights', shape=[5, 5, 64, 64],
stddev=1e-4, wd=0.0)
conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1))
bias = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(bias, name=scope.name)
_activation_summary(conv2)
# norm2
norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm2')
# pool2
pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1], padding='SAME', name='pool2')
# local3
with tf.variable_scope('local3') as scope:
# Move everything into depth so we can perform a single matrix multiply.
dim = 1
for d in pool2.get_shape()[1:].as_list():
dim *= d
reshape = tf.reshape(pool2, [FLAGS.batch_size, dim])
weights = _variable_with_weight_decay('weights', shape=[dim, 384],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [384], tf.constant_initializer(0.1))
local3 = tf.nn.relu_layer(reshape, weights, biases, name=scope.name)
_activation_summary(local3)
# local4
with tf.variable_scope('local4') as scope:
weights = _variable_with_weight_decay('weights', shape=[384, 192],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1))
local4 = tf.nn.relu_layer(local3, weights, biases, name=scope.name)
_activation_summary(local4)
# softmax, i.e. softmax(WX + b)
with tf.variable_scope('softmax_linear') as scope:
weights = _variable_with_weight_decay('weights', [192, NUM_CLASSES],
stddev=1/192.0, wd=0.0)
biases = _variable_on_cpu('biases', [NUM_CLASSES],
tf.constant_initializer(0.0))
softmax_linear = tf.nn.xw_plus_b(local4, weights, biases, name=scope.name)
_activation_summary(softmax_linear)
return softmax_linear
def loss(logits, labels):
"""Add L2Loss to all the trainable variables.
Add summary for for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
# Reshape the labels into a dense Tensor of
# shape [batch_size, NUM_CLASSES].
sparse_labels = tf.reshape(labels, [FLAGS.batch_size, 1])
indices = tf.reshape(tf.range(FLAGS.batch_size), [FLAGS.batch_size, 1])
concated = tf.concat(1, [indices, sparse_labels])
dense_labels = tf.sparse_to_dense(concated,
[FLAGS.batch_size, NUM_CLASSES],
1.0, 0.0)
# Calculate the average cross entropy loss across the batch.
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
logits, dense_labels, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
return tf.add_n(tf.get_collection('losses'), name='total_loss')
def _add_loss_summaries(total_loss):
"""Add summaries for losses in CIFAR-10 model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summmary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.scalar_summary(l.op.name +' (raw)', l)
tf.scalar_summary(l.op.name, loss_averages.average(l))
return loss_averages_op
def train(total_loss, global_step):
"""Train CIFAR-10 model.
Create an optimizer and apply to all trainable variables. Add moving
average for all trainable variables.
Args:
total_loss: Total loss from loss().
global_step: Integer Variable counting the number of training steps
processed.
Returns:
train_op: op for training.
"""
# Variables that affect learning rate.
num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size
decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE,
global_step,
decay_steps,
LEARNING_RATE_DECAY_FACTOR,
staircase=True)
tf.scalar_summary('learning_rate', lr)
# Generate moving averages of all losses and associated summaries.
loss_averages_op = _add_loss_summaries(total_loss)
# Compute gradients.
with tf.control_dependencies([loss_averages_op]):
opt = tf.train.GradientDescentOptimizer(lr)
grads = opt.compute_gradients(total_loss)
# Apply gradients.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
tf.histogram_summary(var.op.name, var)
# Add histograms for gradients.
for grad, var in grads:
if grad:
tf.histogram_summary(var.op.name + '/gradients', grad)
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
train_op = tf.no_op(name='train')
return train_op
def maybe_download_and_extract():
"""Download and extract the tarball from Alex's website."""
print("Maybe download")
dest_directory = FLAGS.data_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath,
reporthook=_progress)
print()
statinfo = os.stat(filepath)
print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
| |
# Author: Travis Oliphant 2001
# Author: Nathan Woods 2013 (nquad &c)
from __future__ import division, print_function, absolute_import
import sys
import warnings
from functools import partial
from . import _quadpack
import numpy
from numpy import Inf
__all__ = ['quad', 'dblquad', 'tplquad', 'nquad', 'quad_explain',
'IntegrationWarning']
error = _quadpack.error
class IntegrationWarning(UserWarning):
"""
Warning on issues during integration.
"""
pass
def quad_explain(output=sys.stdout):
"""
Print extra information about integrate.quad() parameters and returns.
Parameters
----------
output : instance with "write" method, optional
Information about `quad` is passed to ``output.write()``.
Default is ``sys.stdout``.
Returns
-------
None
"""
output.write(quad.__doc__)
def quad(func, a, b, args=(), full_output=0, epsabs=1.49e-8, epsrel=1.49e-8,
limit=50, points=None, weight=None, wvar=None, wopts=None, maxp1=50,
limlst=50):
"""
Compute a definite integral.
Integrate func from `a` to `b` (possibly infinite interval) using a
technique from the Fortran library QUADPACK.
Parameters
----------
func : function
A Python function or method to integrate. If `func` takes many
arguments, it is integrated along the axis corresponding to the
first argument.
If the user desires improved integration performance, then f may
instead be a ``ctypes`` function of the form:
f(int n, double args[n]),
where ``args`` is an array of function arguments and ``n`` is the
length of ``args``. ``f.argtypes`` should be set to
``(c_int, c_double)``, and ``f.restype`` should be ``(c_double,)``.
a : float
Lower limit of integration (use -numpy.inf for -infinity).
b : float
Upper limit of integration (use numpy.inf for +infinity).
args : tuple, optional
Extra arguments to pass to `func`.
full_output : int, optional
Non-zero to return a dictionary of integration information.
If non-zero, warning messages are also suppressed and the
message is appended to the output tuple.
Returns
-------
y : float
The integral of func from `a` to `b`.
abserr : float
An estimate of the absolute error in the result.
infodict : dict
A dictionary containing additional information.
Run scipy.integrate.quad_explain() for more information.
message :
A convergence message.
explain :
Appended only with 'cos' or 'sin' weighting and infinite
integration limits, it contains an explanation of the codes in
infodict['ierlst']
Other Parameters
----------------
epsabs : float or int, optional
Absolute error tolerance.
epsrel : float or int, optional
Relative error tolerance.
limit : float or int, optional
An upper bound on the number of subintervals used in the adaptive
algorithm.
points : (sequence of floats,ints), optional
A sequence of break points in the bounded integration interval
where local difficulties of the integrand may occur (e.g.,
singularities, discontinuities). The sequence does not have
to be sorted.
weight : float or int, optional
String indicating weighting function. Full explanation for this
and the remaining arguments can be found below.
wvar : optional
Variables for use with weighting functions.
wopts : optional
Optional input for reusing Chebyshev moments.
maxp1 : float or int, optional
An upper bound on the number of Chebyshev moments.
limlst : int, optional
Upper bound on the number of cycles (>=3) for use with a sinusoidal
weighting and an infinite end-point.
See Also
--------
dblquad : double integral
tplquad : triple integral
nquad : n-dimensional integrals (uses `quad` recursively)
fixed_quad : fixed-order Gaussian quadrature
quadrature : adaptive Gaussian quadrature
odeint : ODE integrator
ode : ODE integrator
simps : integrator for sampled data
romb : integrator for sampled data
scipy.special : for coefficients and roots of orthogonal polynomials
Notes
-----
**Extra information for quad() inputs and outputs**
If full_output is non-zero, then the third output argument
(infodict) is a dictionary with entries as tabulated below. For
infinite limits, the range is transformed to (0,1) and the
optional outputs are given with respect to this transformed range.
Let M be the input argument limit and let K be infodict['last'].
The entries are:
'neval'
The number of function evaluations.
'last'
The number, K, of subintervals produced in the subdivision process.
'alist'
A rank-1 array of length M, the first K elements of which are the
left end points of the subintervals in the partition of the
integration range.
'blist'
A rank-1 array of length M, the first K elements of which are the
right end points of the subintervals.
'rlist'
A rank-1 array of length M, the first K elements of which are the
integral approximations on the subintervals.
'elist'
A rank-1 array of length M, the first K elements of which are the
moduli of the absolute error estimates on the subintervals.
'iord'
A rank-1 integer array of length M, the first L elements of
which are pointers to the error estimates over the subintervals
with ``L=K`` if ``K<=M/2+2`` or ``L=M+1-K`` otherwise. Let I be the
sequence ``infodict['iord']`` and let E be the sequence
``infodict['elist']``. Then ``E[I[1]], ..., E[I[L]]`` forms a
decreasing sequence.
If the input argument points is provided (i.e. it is not None),
the following additional outputs are placed in the output
dictionary. Assume the points sequence is of length P.
'pts'
A rank-1 array of length P+2 containing the integration limits
and the break points of the intervals in ascending order.
This is an array giving the subintervals over which integration
will occur.
'level'
A rank-1 integer array of length M (=limit), containing the
subdivision levels of the subintervals, i.e., if (aa,bb) is a
subinterval of ``(pts[1], pts[2])`` where ``pts[0]`` and ``pts[2]``
are adjacent elements of ``infodict['pts']``, then (aa,bb) has level l
if ``|bb-aa| = |pts[2]-pts[1]| * 2**(-l)``.
'ndin'
A rank-1 integer array of length P+2. After the first integration
over the intervals (pts[1], pts[2]), the error estimates over some
of the intervals may have been increased artificially in order to
put their subdivision forward. This array has ones in slots
corresponding to the subintervals for which this happens.
**Weighting the integrand**
The input variables, *weight* and *wvar*, are used to weight the
integrand by a select list of functions. Different integration
methods are used to compute the integral with these weighting
functions. The possible values of weight and the corresponding
weighting functions are.
========== =================================== =====================
``weight`` Weight function used ``wvar``
========== =================================== =====================
'cos' cos(w*x) wvar = w
'sin' sin(w*x) wvar = w
'alg' g(x) = ((x-a)**alpha)*((b-x)**beta) wvar = (alpha, beta)
'alg-loga' g(x)*log(x-a) wvar = (alpha, beta)
'alg-logb' g(x)*log(b-x) wvar = (alpha, beta)
'alg-log' g(x)*log(x-a)*log(b-x) wvar = (alpha, beta)
'cauchy' 1/(x-c) wvar = c
========== =================================== =====================
wvar holds the parameter w, (alpha, beta), or c depending on the weight
selected. In these expressions, a and b are the integration limits.
For the 'cos' and 'sin' weighting, additional inputs and outputs are
available.
For finite integration limits, the integration is performed using a
Clenshaw-Curtis method which uses Chebyshev moments. For repeated
calculations, these moments are saved in the output dictionary:
'momcom'
The maximum level of Chebyshev moments that have been computed,
i.e., if ``M_c`` is ``infodict['momcom']`` then the moments have been
computed for intervals of length ``|b-a| * 2**(-l)``,
``l=0,1,...,M_c``.
'nnlog'
A rank-1 integer array of length M(=limit), containing the
subdivision levels of the subintervals, i.e., an element of this
array is equal to l if the corresponding subinterval is
``|b-a|* 2**(-l)``.
'chebmo'
A rank-2 array of shape (25, maxp1) containing the computed
Chebyshev moments. These can be passed on to an integration
over the same interval by passing this array as the second
element of the sequence wopts and passing infodict['momcom'] as
the first element.
If one of the integration limits is infinite, then a Fourier integral is
computed (assuming w neq 0). If full_output is 1 and a numerical error
is encountered, besides the error message attached to the output tuple,
a dictionary is also appended to the output tuple which translates the
error codes in the array ``info['ierlst']`` to English messages. The
output information dictionary contains the following entries instead of
'last', 'alist', 'blist', 'rlist', and 'elist':
'lst'
The number of subintervals needed for the integration (call it ``K_f``).
'rslst'
A rank-1 array of length M_f=limlst, whose first ``K_f`` elements
contain the integral contribution over the interval
``(a+(k-1)c, a+kc)`` where ``c = (2*floor(|w|) + 1) * pi / |w|``
and ``k=1,2,...,K_f``.
'erlst'
A rank-1 array of length ``M_f`` containing the error estimate
corresponding to the interval in the same position in
``infodict['rslist']``.
'ierlst'
A rank-1 integer array of length ``M_f`` containing an error flag
corresponding to the interval in the same position in
``infodict['rslist']``. See the explanation dictionary (last entry
in the output tuple) for the meaning of the codes.
Examples
--------
Calculate :math:`\\int^4_0 x^2 dx` and compare with an analytic result
>>> from scipy import integrate
>>> x2 = lambda x: x**2
>>> integrate.quad(x2, 0, 4)
(21.333333333333332, 2.3684757858670003e-13)
>>> print(4**3 / 3.) # analytical result
21.3333333333
Calculate :math:`\\int^\\infty_0 e^{-x} dx`
>>> invexp = lambda x: np.exp(-x)
>>> integrate.quad(invexp, 0, np.inf)
(1.0, 5.842605999138044e-11)
>>> f = lambda x,a : a*x
>>> y, err = integrate.quad(f, 0, 1, args=(1,))
>>> y
0.5
>>> y, err = integrate.quad(f, 0, 1, args=(3,))
>>> y
1.5
Calculate :math:`\\int^1_0 x^2 + y^2 dx` with ctypes, holding
y parameter as 1::
testlib.c =>
double func(int n, double args[n]){
return args[0]*args[0] + args[1]*args[1];}
compile to library testlib.*
::
from scipy import integrate
import ctypes
lib = ctypes.CDLL('/home/.../testlib.*') #use absolute path
lib.func.restype = ctypes.c_double
lib.func.argtypes = (ctypes.c_int,ctypes.c_double)
integrate.quad(lib.func,0,1,(1))
#(1.3333333333333333, 1.4802973661668752e-14)
print((1.0**3/3.0 + 1.0) - (0.0**3/3.0 + 0.0)) #Analytic result
# 1.3333333333333333
"""
if not isinstance(args, tuple):
args = (args,)
if (weight is None):
retval = _quad(func, a, b, args, full_output, epsabs, epsrel, limit,
points)
else:
retval = _quad_weight(func, a, b, args, full_output, epsabs, epsrel,
limlst, limit, maxp1, weight, wvar, wopts)
ier = retval[-1]
if ier == 0:
return retval[:-1]
msgs = {80: "A Python error occurred possibly while calling the function.",
1: "The maximum number of subdivisions (%d) has been achieved.\n If increasing the limit yields no improvement it is advised to analyze \n the integrand in order to determine the difficulties. If the position of a \n local difficulty can be determined (singularity, discontinuity) one will \n probably gain from splitting up the interval and calling the integrator \n on the subranges. Perhaps a special-purpose integrator should be used." % limit,
2: "The occurrence of roundoff error is detected, which prevents \n the requested tolerance from being achieved. The error may be \n underestimated.",
3: "Extremely bad integrand behavior occurs at some points of the\n integration interval.",
4: "The algorithm does not converge. Roundoff error is detected\n in the extrapolation table. It is assumed that the requested tolerance\n cannot be achieved, and that the returned result (if full_output = 1) is \n the best which can be obtained.",
5: "The integral is probably divergent, or slowly convergent.",
6: "The input is invalid.",
7: "Abnormal termination of the routine. The estimates for result\n and error are less reliable. It is assumed that the requested accuracy\n has not been achieved.",
'unknown': "Unknown error."}
if weight in ['cos','sin'] and (b == Inf or a == -Inf):
msgs[1] = "The maximum number of cycles allowed has been achieved., e.e.\n of subintervals (a+(k-1)c, a+kc) where c = (2*int(abs(omega)+1))\n *pi/abs(omega), for k = 1, 2, ..., lst. One can allow more cycles by increasing the value of limlst. Look at info['ierlst'] with full_output=1."
msgs[4] = "The extrapolation table constructed for convergence acceleration\n of the series formed by the integral contributions over the cycles, \n does not converge to within the requested accuracy. Look at \n info['ierlst'] with full_output=1."
msgs[7] = "Bad integrand behavior occurs within one or more of the cycles.\n Location and type of the difficulty involved can be determined from \n the vector info['ierlist'] obtained with full_output=1."
explain = {1: "The maximum number of subdivisions (= limit) has been \n achieved on this cycle.",
2: "The occurrence of roundoff error is detected and prevents\n the tolerance imposed on this cycle from being achieved.",
3: "Extremely bad integrand behavior occurs at some points of\n this cycle.",
4: "The integral over this cycle does not converge (to within the required accuracy) due to roundoff in the extrapolation procedure invoked on this cycle. It is assumed that the result on this interval is the best which can be obtained.",
5: "The integral over this cycle is probably divergent or slowly convergent."}
try:
msg = msgs[ier]
except KeyError:
msg = msgs['unknown']
if ier in [1,2,3,4,5,7]:
if full_output:
if weight in ['cos','sin'] and (b == Inf or a == Inf):
return retval[:-1] + (msg, explain)
else:
return retval[:-1] + (msg,)
else:
warnings.warn(msg, IntegrationWarning)
return retval[:-1]
else:
raise ValueError(msg)
def _quad(func,a,b,args,full_output,epsabs,epsrel,limit,points):
infbounds = 0
if (b != Inf and a != -Inf):
pass # standard integration
elif (b == Inf and a != -Inf):
infbounds = 1
bound = a
elif (b == Inf and a == -Inf):
infbounds = 2
bound = 0 # ignored
elif (b != Inf and a == -Inf):
infbounds = -1
bound = b
else:
raise RuntimeError("Infinity comparisons don't work for you.")
if points is None:
if infbounds == 0:
return _quadpack._qagse(func,a,b,args,full_output,epsabs,epsrel,limit)
else:
return _quadpack._qagie(func,bound,infbounds,args,full_output,epsabs,epsrel,limit)
else:
if infbounds != 0:
raise ValueError("Infinity inputs cannot be used with break points.")
else:
nl = len(points)
the_points = numpy.zeros((nl+2,), float)
the_points[:nl] = points
return _quadpack._qagpe(func,a,b,the_points,args,full_output,epsabs,epsrel,limit)
def _quad_weight(func,a,b,args,full_output,epsabs,epsrel,limlst,limit,maxp1,weight,wvar,wopts):
if weight not in ['cos','sin','alg','alg-loga','alg-logb','alg-log','cauchy']:
raise ValueError("%s not a recognized weighting function." % weight)
strdict = {'cos':1,'sin':2,'alg':1,'alg-loga':2,'alg-logb':3,'alg-log':4}
if weight in ['cos','sin']:
integr = strdict[weight]
if (b != Inf and a != -Inf): # finite limits
if wopts is None: # no precomputed chebyshev moments
return _quadpack._qawoe(func, a, b, wvar, integr, args, full_output,
epsabs, epsrel, limit, maxp1,1)
else: # precomputed chebyshev moments
momcom = wopts[0]
chebcom = wopts[1]
return _quadpack._qawoe(func, a, b, wvar, integr, args, full_output,
epsabs, epsrel, limit, maxp1, 2, momcom, chebcom)
elif (b == Inf and a != -Inf):
return _quadpack._qawfe(func, a, wvar, integr, args, full_output,
epsabs,limlst,limit,maxp1)
elif (b != Inf and a == -Inf): # remap function and interval
if weight == 'cos':
def thefunc(x,*myargs):
y = -x
func = myargs[0]
myargs = (y,) + myargs[1:]
return func(*myargs)
else:
def thefunc(x,*myargs):
y = -x
func = myargs[0]
myargs = (y,) + myargs[1:]
return -func(*myargs)
args = (func,) + args
return _quadpack._qawfe(thefunc, -b, wvar, integr, args,
full_output, epsabs, limlst, limit, maxp1)
else:
raise ValueError("Cannot integrate with this weight from -Inf to +Inf.")
else:
if a in [-Inf,Inf] or b in [-Inf,Inf]:
raise ValueError("Cannot integrate with this weight over an infinite interval.")
if weight[:3] == 'alg':
integr = strdict[weight]
return _quadpack._qawse(func, a, b, wvar, integr, args,
full_output, epsabs, epsrel, limit)
else: # weight == 'cauchy'
return _quadpack._qawce(func, a, b, wvar, args, full_output,
epsabs, epsrel, limit)
def _infunc(x,func,gfun,hfun,more_args):
a = gfun(x)
b = hfun(x)
myargs = (x,) + more_args
return quad(func,a,b,args=myargs)[0]
def dblquad(func, a, b, gfun, hfun, args=(), epsabs=1.49e-8, epsrel=1.49e-8):
"""
Compute a double integral.
Return the double (definite) integral of ``func(y, x)`` from ``x = a..b``
and ``y = gfun(x)..hfun(x)``.
Parameters
----------
func : callable
A Python function or method of at least two variables: y must be the
first argument and x the second argument.
a, b : float
The limits of integration in x: `a` < `b`
gfun : callable
The lower boundary curve in y which is a function taking a single
floating point argument (x) and returning a floating point result: a
lambda function can be useful here.
hfun : callable
The upper boundary curve in y (same requirements as `gfun`).
args : sequence, optional
Extra arguments to pass to `func`.
epsabs : float, optional
Absolute tolerance passed directly to the inner 1-D quadrature
integration. Default is 1.49e-8.
epsrel : float, optional
Relative tolerance of the inner 1-D integrals. Default is 1.49e-8.
Returns
-------
y : float
The resultant integral.
abserr : float
An estimate of the error.
See also
--------
quad : single integral
tplquad : triple integral
nquad : N-dimensional integrals
fixed_quad : fixed-order Gaussian quadrature
quadrature : adaptive Gaussian quadrature
odeint : ODE integrator
ode : ODE integrator
simps : integrator for sampled data
romb : integrator for sampled data
scipy.special : for coefficients and roots of orthogonal polynomials
"""
return quad(_infunc, a, b, (func, gfun, hfun, args),
epsabs=epsabs, epsrel=epsrel)
def _infunc2(y,x,func,qfun,rfun,more_args):
a2 = qfun(x,y)
b2 = rfun(x,y)
myargs = (y,x) + more_args
return quad(func,a2,b2,args=myargs)[0]
def tplquad(func, a, b, gfun, hfun, qfun, rfun, args=(), epsabs=1.49e-8,
epsrel=1.49e-8):
"""
Compute a triple (definite) integral.
Return the triple integral of ``func(z, y, x)`` from ``x = a..b``,
``y = gfun(x)..hfun(x)``, and ``z = qfun(x,y)..rfun(x,y)``.
Parameters
----------
func : function
A Python function or method of at least three variables in the
order (z, y, x).
a, b : float
The limits of integration in x: `a` < `b`
gfun : function
The lower boundary curve in y which is a function taking a single
floating point argument (x) and returning a floating point result:
a lambda function can be useful here.
hfun : function
The upper boundary curve in y (same requirements as `gfun`).
qfun : function
The lower boundary surface in z. It must be a function that takes
two floats in the order (x, y) and returns a float.
rfun : function
The upper boundary surface in z. (Same requirements as `qfun`.)
args : tuple, optional
Extra arguments to pass to `func`.
epsabs : float, optional
Absolute tolerance passed directly to the innermost 1-D quadrature
integration. Default is 1.49e-8.
epsrel : float, optional
Relative tolerance of the innermost 1-D integrals. Default is 1.49e-8.
Returns
-------
y : float
The resultant integral.
abserr : float
An estimate of the error.
See Also
--------
quad: Adaptive quadrature using QUADPACK
quadrature: Adaptive Gaussian quadrature
fixed_quad: Fixed-order Gaussian quadrature
dblquad: Double integrals
nquad : N-dimensional integrals
romb: Integrators for sampled data
simps: Integrators for sampled data
ode: ODE integrators
odeint: ODE integrators
scipy.special: For coefficients and roots of orthogonal polynomials
"""
return dblquad(_infunc2, a, b, gfun, hfun, (func, qfun, rfun, args),
epsabs=epsabs, epsrel=epsrel)
def nquad(func, ranges, args=None, opts=None):
"""
Integration over multiple variables.
Wraps `quad` to enable integration over multiple variables.
Various options allow improved integration of discontinuous functions, as
well as the use of weighted integration, and generally finer control of the
integration process.
Parameters
----------
func : callable
The function to be integrated. Has arguments of ``x0, ... xn``,
``t0, tm``, where integration is carried out over ``x0, ... xn``, which
must be floats. Function signature should be
``func(x0, x1, ..., xn, t0, t1, ..., tm)``. Integration is carried out
in order. That is, integration over ``x0`` is the innermost integral,
and ``xn`` is the outermost.
If performance is a concern, this function may be a ctypes function of
the form::
f(int n, double args[n])
where ``n`` is the number of extra parameters and args is an array
of doubles of the additional parameters. This function may then
be compiled to a dynamic/shared library then imported through
``ctypes``, setting the function's argtypes to ``(c_int, c_double)``,
and the function's restype to ``(c_double)``. Its pointer may then be
passed into `nquad` normally.
This allows the underlying Fortran library to evaluate the function in
the innermost integration calls without callbacks to Python, and also
speeds up the evaluation of the function itself.
ranges : iterable object
Each element of ranges may be either a sequence of 2 numbers, or else
a callable that returns such a sequence. ``ranges[0]`` corresponds to
integration over x0, and so on. If an element of ranges is a callable,
then it will be called with all of the integration arguments available.
e.g. if ``func = f(x0, x1, x2)``, then ``ranges[0]`` may be defined as
either ``(a, b)`` or else as ``(a, b) = range0(x1, x2)``.
args : iterable object, optional
Additional arguments ``t0, ..., tn``, required by `func`.
opts : iterable object or dict, optional
Options to be passed to `quad`. May be empty, a dict, or
a sequence of dicts or functions that return a dict. If empty, the
default options from scipy.integrate.quadare used. If a dict, the same
options are used for all levels of integraion. If a sequence, then each
element of the sequence corresponds to a particular integration. e.g.
opts[0] corresponds to integration over x0, and so on. The available
options together with their default values are:
- epsabs = 1.49e-08
- epsrel = 1.49e-08
- limit = 50
- points = None
- weight = None
- wvar = None
- wopts = None
The ``full_output`` option from `quad` is unavailable, due to the
complexity of handling the large amount of data such an option would
return for this kind of nested integration. For more information on
these options, see `quad` and `quad_explain`.
Returns
-------
result : float
The result of the integration.
abserr : float
The maximum of the estimates of the absolute error in the various
integration results.
See Also
--------
quad : 1-dimensional numerical integration
dblquad, tplquad : double and triple integrals
fixed_quad : fixed-order Gaussian quadrature
quadrature : adaptive Gaussian quadrature
Examples
--------
>>> from scipy import integrate
>>> func = lambda x0,x1,x2,x3 : x0**2 + x1*x2 - x3**3 + np.sin(x0) + (
... 1 if (x0-.2*x3-.5-.25*x1>0) else 0)
>>> points = [[lambda (x1,x2,x3) : 0.2*x3 + 0.5 + 0.25*x1], [], [], []]
>>> def opts0(*args, **kwargs):
... return {'points':[0.2*args[2] + 0.5 + 0.25*args[0]]}
>>> integrate.nquad(func, [[0,1], [-1,1], [.13,.8], [-.15,1]],
... opts=[opts0,{},{},{}])
(1.5267454070738633, 2.9437360001402324e-14)
>>> scale = .1
>>> def func2(x0, x1, x2, x3, t0, t1):
... return x0*x1*x3**2 + np.sin(x2) + 1 + (1 if x0+t1*x1-t0>0 else 0)
>>> def lim0(x1, x2, x3, t0, t1):
... return [scale * (x1**2 + x2 + np.cos(x3)*t0*t1 + 1) - 1,
... scale * (x1**2 + x2 + np.cos(x3)*t0*t1 + 1) + 1]
>>> def lim1(x2, x3, t0, t1):
... return [scale * (t0*x2 + t1*x3) - 1,
... scale * (t0*x2 + t1*x3) + 1]
>>> def lim2(x3, t0, t1):
... return [scale * (x3 + t0**2*t1**3) - 1,
... scale * (x3 + t0**2*t1**3) + 1]
>>> def lim3(t0, t1):
... return [scale * (t0+t1) - 1, scale * (t0+t1) + 1]
>>> def opts0(x1, x2, x3, t0, t1):
... return {'points' : [t0 - t1*x1]}
>>> def opts1(x2, x3, t0, t1):
... return {}
>>> def opts2(x3, t0, t1):
... return {}
>>> def opts3(t0, t1):
... return {}
>>> integrate.nquad(func2, [lim0, lim1, lim2, lim3], args=(0,0),
... opts=[opts0, opts1, opts2, opts3])
(25.066666666666666, 2.7829590483937256e-13)
"""
depth = len(ranges)
ranges = [rng if callable(rng) else _RangeFunc(rng) for rng in ranges]
if args is None:
args = ()
if opts is None:
opts = [dict([])] * depth
if isinstance(opts, dict):
opts = [_OptFunc(opts)] * depth
else:
opts = [opt if callable(opt) else _OptFunc(opt) for opt in opts]
return _NQuad(func, ranges, opts).integrate(*args)
class _RangeFunc(object):
def __init__(self, range_):
self.range_ = range_
def __call__(self, *args):
"""Return stored value.
*args needed because range_ can be float or func, and is called with
variable number of parameters.
"""
return self.range_
class _OptFunc(object):
def __init__(self, opt):
self.opt = opt
def __call__(self, *args):
"""Return stored dict."""
return self.opt
class _NQuad(object):
def __init__(self, func, ranges, opts):
self.abserr = 0
self.func = func
self.ranges = ranges
self.opts = opts
self.maxdepth = len(ranges)
def integrate(self, *args, **kwargs):
depth = kwargs.pop('depth', 0)
if kwargs:
raise ValueError('unexpected kwargs')
# Get the integration range and options for this depth.
ind = -(depth + 1)
fn_range = self.ranges[ind]
low, high = fn_range(*args)
fn_opt = self.opts[ind]
opt = dict(fn_opt(*args))
if 'points' in opt:
opt['points'] = [x for x in opt['points'] if low <= x <= high]
if depth + 1 == self.maxdepth:
f = self.func
else:
f = partial(self.integrate, depth=depth+1)
value, abserr = quad(f, low, high, args=args, **opt)
self.abserr = max(self.abserr, abserr)
if depth > 0:
return value
else:
# Final result of n-D integration with error
return value, self.abserr
| |
#!/usr/bin/env python
import os
import sys
import sqlite3
from collections import defaultdict, namedtuple
import json
import subprocess
import numpy as np
from scipy.stats import mode
import pysam
from geminicassandra.annotations import annotations_in_region, guess_contig_naming
def add_requested_columns(args, update_cursor, col_names, col_types=None):
"""
Attempt to add new, user-defined columns to the
variants table. Warn if the column already exists.
"""
if args.anno_type in ["count", "boolean"]:
col_name = col_names[0]
col_type = "integer"
try:
alter_qry = "ALTER TABLE variants ADD COLUMN " \
+ col_name \
+ " " \
+ col_type \
+ " " \
+ "DEFAULT NULL"
update_cursor.execute(alter_qry)
except sqlite3.OperationalError:
sys.stderr.write("WARNING: Column \"("
+ col_name
+ ")\" already exists in variants table. Overwriting values.\n")
elif args.anno_type == "extract":
for col_name, col_type in zip(col_names, col_types):
try:
alter_qry = "ALTER TABLE variants ADD COLUMN " \
+ col_name \
+ " " \
+ col_type \
+ " " \
+ "DEFAULT NULL"
update_cursor.execute(alter_qry)
except sqlite3.OperationalError:
sys.stderr.write("WARNING: Column \"("
+ col_name
+ ")\" already exists in variants table. Overwriting values.\n")
else:
sys.exit("Unknown annotation type: %s\n" % args.anno_type)
def _annotate_variants(args, conn, get_val_fn, col_names=None, col_types=None, col_ops=None):
"""Generalized annotation of variants with a new column.
get_val_fn takes a list of annotations in a region and returns
the value for that region to update the database with.
Separates selection and identification of values from update,
to avoid concurrent database access errors from sqlite3, especially on
NFS systems. The retained to_update list is small, but batching
could help if memory issues emerge.
"""
# For each, use Tabix to detect overlaps with the user-defined
# annotation file. Update the variant row with T/F if overlaps found.
anno = pysam.Tabixfile(args.anno_file)
naming = guess_contig_naming(anno)
select_cursor = conn.cursor()
update_cursor = conn.cursor()
add_requested_columns(args, select_cursor, col_names, col_types)
last_id = 0
current_id = 0
total = 0
CHUNK_SIZE = 100000
to_update = []
select_cursor.execute('''SELECT chrom, start, end, variant_id FROM variants''')
while True:
for row in select_cursor.fetchmany(CHUNK_SIZE):
# update_data starts out as a list of the values that should
# be used to populate the new columns for the current row.
# Prefer no pysam parsing over tuple parsing to work around bug in pysam 0.8.0
# https://github.com/pysam-developers/pysam/pull/44
update_data = get_val_fn(annotations_in_region(row, anno, None, naming))
#update_data = get_val_fn(annotations_in_region(row, anno, "tuple", naming))
# were there any hits for this row?
if len(update_data) > 0:
# we add the primary key to update_data for the
# where clause in the SQL UPDATE statement.
update_data.append(str(row["variant_id"]))
to_update.append(tuple(update_data))
current_id = row["variant_id"]
if current_id <= last_id:
break
else:
update_cursor.execute("BEGIN TRANSACTION")
_update_variants(to_update, col_names, update_cursor)
update_cursor.execute("END TRANSACTION")
total += len(to_update)
print "updated", total, "variants"
last_id = current_id
to_update = []
def _update_variants(to_update, col_names, cursor):
update_qry = "UPDATE variants SET "
update_cols = ",".join(col_name + " = ?" for col_name in col_names)
update_qry += update_cols
update_qry += " WHERE variant_id = ?"
cursor.executemany(update_qry, to_update)
def annotate_variants_bool(args, conn, col_names):
"""
Populate a new, user-defined column in the variants
table with a BOOLEAN indicating whether or not
overlaps were detected between the variant and the
annotation file.
"""
def has_hit(hits):
for hit in hits:
return [1]
return [0]
return _annotate_variants(args, conn, has_hit, col_names)
def annotate_variants_count(args, conn, col_names):
"""
Populate a new, user-defined column in the variants
table with a INTEGER indicating the count of overlaps
between the variant and the
annotation file.
"""
def get_hit_count(hits):
return [len(list(hits))]
return _annotate_variants(args, conn, get_hit_count, col_names)
def annotate_variants_extract(args, conn, col_names, col_types, col_ops, col_idxs):
"""
Populate a new, user-defined column in the variants
table based on the value(s) from a specific column.
in the annotation file.
"""
def _map_list_types(hit_list, col_type):
try:
if col_type == "int":
return [int(h) for h in hit_list]
elif col_type == "float":
return [float(h) for h in hit_list]
except ValueError:
sys.exit('Non-numeric value found in annotation file: %s\n' % (','.join(hit_list)))
def summarize_hits(hits):
hits = list(hits)
if len(hits) == 0:
return []
hit_list = defaultdict(list)
for hit in hits:
if isinstance(hit, basestring):
hit = hit.split("\t")
try:
for idx, col_idx in enumerate(col_idxs):
hit_list[idx].append(hit[int(col_idx) - 1])
except IndexError:
sys.exit("EXITING: Column " + args.col_extracts + " exceeds "
"the number of columns in your "
"annotation file.\n")
vals = []
for idx, op in enumerate(col_ops):
# more than one overlap, must summarize
if op == "mean":
val = np.average(_map_list_types(hit_list[idx], col_types[idx]))
elif op == 'list':
val = ",".join(hit_list[idx])
elif op == 'uniq_list':
val = ",".join(set(hit_list[idx]))
elif op == 'median':
val = np.median(_map_list_types(hit_list[idx], col_types[idx]))
elif op == 'min':
val = np.min(_map_list_types(hit_list[idx], col_types[idx]))
elif op == 'max':
val = np.max(_map_list_types(hit_list[idx], col_types[idx]))
elif op == 'mode':
val = mode(_map_list_types(hit_list[idx], col_types[idx]))[0][0]
elif op == 'first':
val = hit_list[idx][0]
elif op == 'last':
val = hit_list[idx][-1]
else:
sys.exit("EXITING: Operation (-o) \"" + op + "\" not recognized.\n")
if col_types[idx] == "int":
try:
vals.append(int(val))
except ValueError:
if not val:
vals.append(None)
else:
sys.exit('Non-integer value found in annotation file: %s\n' % (val))
elif col_types[idx] == "float":
try:
vals.append(float(val))
except ValueError:
if not val:
vals.append(None)
else:
sys.exit('Non-float value found in annotation file: %s\n' % (val))
else:
vals.append(val)
return vals
return _annotate_variants(args, conn, summarize_hits,
col_names, col_types, col_ops)
def annotate(parser, args):
def _validate_args(args):
if (args.col_operations or args.col_types or args.col_extracts):
sys.exit('EXITING: You may only specify a column name (-c) when '
'using \"-a boolean\" or \"-a count\".\n')
col_names = args.col_names.split(',')
if len(col_names) > 1:
sys.exit('EXITING: You may only specify a single column name (-c) '
'when using \"-a boolean\" or \"-a count\".\n')
return col_names
def _validate_extract_args(args):
col_ops = args.col_operations.split(',')
col_names = args.col_names.split(',')
col_types = args.col_types.split(',')
col_idxs = args.col_extracts.split(',')
supported_types = ['text', 'float', 'integer']
for col_type in col_types:
if col_type not in supported_types:
sys.exit('EXITING: Column type [%s] not supported.\n' %
(col_type))
supported_ops = ['mean', 'median', 'mode', 'min', 'max', 'first',
'last', 'list', 'uniq_list']
for col_op in col_ops:
if col_op not in supported_ops:
sys.exit('EXITING: Column operation [%s] not supported.\n' %
(col_op))
if not (len(col_ops) == len(col_names) ==
len(col_types) == len(col_idxs)):
sys.exit('EXITING: The number of column names, numbers, types, and '
'operations must match: [%s], [%s], [%s], [%s]\n' %
(args.col_names, args.col_extracts, args.col_types, args.col_operations))
return col_names, col_types, col_ops, col_idxs
if (args.db is None):
parser.print_help()
exit(1)
if not os.path.exists(args.db):
sys.stderr.write("Error: cannot find database file.")
exit(1)
if not os.path.exists(args.anno_file):
sys.stderr.write("Error: cannot find annotation file.")
exit(1)
conn = sqlite3.connect(args.db)
conn.row_factory = sqlite3.Row # allow us to refer to columns by name
conn.isolation_level = None
if args.anno_type == "boolean":
col_names = _validate_args(args)
annotate_variants_bool(args, conn, col_names)
elif args.anno_type == "count":
col_names = _validate_args(args)
annotate_variants_count(args, conn, col_names)
elif args.anno_type == "extract":
if args.col_extracts is None:
sys.exit("You must specify which column to "
"extract from your annotation file.")
else:
col_names, col_types, col_ops, col_idxs = _validate_extract_args(args)
annotate_variants_extract(args, conn, col_names, col_types, col_ops, col_idxs)
else:
sys.exit("Unknown column type requested. Exiting.")
conn.close()
# index on the newly created columns
for col_name in col_names:
with database_transaction(args.db) as c:
c.execute('''drop index if exists %s''' % (col_name + "idx"))
c.execute('''create index %s on variants(%s)''' % (col_name + "idx", col_name))
# ## Automate addition of extra fields to database
def add_extras(gemini_db, chunk_dbs):
"""Annotate geminicassandra database with extra columns from processed chunks, if available.
"""
extra_files = []
header_files = []
for chunk in chunk_dbs:
extra_file, header_file = get_extra_files(chunk)
if os.path.exists(extra_file) and os.path.getsize(extra_file) > 0:
extra_files.append(extra_file)
assert os.path.exists(header_file)
header_files.append(header_file)
if header_files:
header, types = _merge_headers(header_files)
ops = ["first" for t in types]
extra_beds = [_json_to_bed(x, header) for x in extra_files]
final_bed = _merge_beds(extra_beds, gemini_db)
Args = namedtuple("Args", "db,anno_file,anno_type,col_operations,col_names,col_types,col_extracts")
args = Args(gemini_db, final_bed, "extract", ",".join(ops),
",".join(header), ",".join(types),
",".join([str(i + 4) for i in range(len(header))]))
annotate(None, args)
for fname in extra_beds + [final_bed, final_bed + ".tbi"] + header_files + extra_files:
if os.path.exists(fname):
os.remove(fname)
def _merge_beds(in_beds, final_db):
"""Merge BED files into a final sorted output file.
"""
if len(in_beds) == 1:
out_file = in_beds[0]
else:
out_file = "%s.bed" % os.path.splitext(final_db)[0]
cmd = "cat %s | sort -k1,1 -k2,2n > %s" % (" ".join(in_beds), out_file)
subprocess.check_call(cmd, shell=True)
subprocess.check_call(["bgzip", "-f", out_file])
bgzip_out = out_file + ".gz"
subprocess.check_call(["tabix", "-p", "bed", "-f", bgzip_out])
return bgzip_out
def _json_to_bed(fname, header):
"""Convert JSON output into a BED file in preparation for annotation.
"""
out_file = "%s.bed" % os.path.splitext(fname)[0]
with open(fname) as in_handle:
with open(out_file, "w") as out_handle:
for line in in_handle:
cur_info = json.loads(line)
parts = [str(cur_info.get(h, "")) for h in ["chrom", "start", "end"] + header]
out_handle.write("\t".join(parts) + "\n")
return out_file
def _merge_headers(header_files):
"""Merge a set of header files into a single final header for annotating.
"""
ignore = set(["chrom", "start", "end"])
ctype_order = ["text", "float", "integer", None]
out = {}
for h in header_files:
with open(h) as in_handle:
header = json.loads(in_handle.read())
for column, ctype in header.items():
if column not in ignore:
cur_ctype = sorted([ctype, out.get(column)], key=lambda x: ctype_order.index(x))[0]
out[column] = cur_ctype
headers = []
types = []
for header in sorted(out.keys()):
headers.append(header)
types.append(out[header])
return headers, types
def get_extra_files(gemini_db):
"""Retrieve extra file names associated with a geminicassandra database, for flexible loading.
"""
extra_file = "%s-extra.json" % os.path.splitext(gemini_db)[0]
extraheader_file = "%s-extraheader.json" % os.path.splitext(gemini_db)[0]
return extra_file, extraheader_file
| |
# Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shape utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
from typing import Any, List, Optional, Tuple, Union
import numpy as np
import six
from six.moves import range
from six.moves import zip
import tensorflow as tf
def _broadcast_shape_helper(shape_x: tf.TensorShape,
shape_y: tf.TensorShape) -> Optional[List[Any]]:
"""Helper function for is_broadcast_compatible and broadcast_shape.
Args:
shape_x: A `TensorShape`.
shape_y: A `TensorShape`.
Returns:
Returns None if the shapes are not broadcast compatible, or a list
containing the broadcasted dimensions otherwise.
"""
# To compute the broadcasted dimensions, we zip together shape_x and shape_y,
# and pad with 1 to make them the same length.
broadcasted_dims = reversed(
list(
six.moves.zip_longest(
reversed(shape_x.dims),
reversed(shape_y.dims),
fillvalue=tf.compat.v1.Dimension(1))))
# Next we combine the dimensions according to the numpy broadcasting rules.
# http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html
return_dims = []
for (dim_x, dim_y) in broadcasted_dims:
if dim_x.value is None or dim_y.value is None:
# One or both dimensions is unknown. If either dimension is greater than
# 1, we assume that the program is correct, and the other dimension will
# be broadcast to match it.
if dim_x.value is not None and dim_x.value > 1:
return_dims.append(dim_x)
elif dim_y.value is not None and dim_y.value > 1:
return_dims.append(dim_y)
else:
return_dims.append(None)
elif dim_x.value == 1:
# We will broadcast dim_x to dim_y.
return_dims.append(dim_y)
elif dim_y.value == 1:
# We will broadcast dim_y to dim_x.
return_dims.append(dim_x)
elif dim_x.value == dim_y.value:
# The dimensions are compatible, so output is the same size in that
# dimension.
return_dims.append(dim_x.merge_with(dim_y))
else:
return None
return return_dims
def is_broadcast_compatible(shape_x: tf.TensorShape,
shape_y: tf.TensorShape) -> bool:
"""Returns True if `shape_x` and `shape_y` are broadcast compatible.
Args:
shape_x: A `TensorShape`.
shape_y: A `TensorShape`.
Returns:
True if a shape exists that both `shape_x` and `shape_y` can be broadcasted
to. False otherwise.
"""
if shape_x.ndims is None or shape_y.ndims is None:
return False
return _broadcast_shape_helper(shape_x, shape_y) is not None
def get_broadcasted_shape(shape_x: tf.TensorShape,
shape_y: tf.TensorShape) -> Optional[List[Any]]:
"""Returns the common shape for broadcast compatible shapes.
Args:
shape_x: A `TensorShape`.
shape_y: A `TensorShape`.
Returns:
Returns None if the shapes are not broadcast compatible, or a list
containing the broadcasted dimensions otherwise.
"""
if shape_x.ndims is None or shape_y.ndims is None:
return None
return _broadcast_shape_helper(shape_x, shape_y)
def _check_type(variable, variable_name, expected_type):
"""Helper function for checking that inputs are of expected types."""
if isinstance(expected_type, (list, tuple)):
expected_type_name = 'list or tuple'
else:
expected_type_name = expected_type.__name__
if not isinstance(variable, expected_type):
raise ValueError('{} must be of type {}, but it is {}'.format(
variable_name, expected_type_name,
type(variable).__name__))
def _fix_axis_dim_pairs(pairs, name):
"""Helper function to make `pairs` a list if needed."""
if isinstance(pairs[0], int):
pairs = [pairs]
for pair in pairs:
if len(pair) != 2:
raise ValueError(
'{} must consist of axis-value pairs, but found {}'.format(
name, pair))
return pairs
def _get_dim(tensor, axis):
"""Returns dimensionality of a tensor for a given axis."""
return tf.compat.dimension_value(tensor.shape[axis])
def check_static(tensor: tf.Tensor,
has_rank: Optional[int] = None,
has_rank_greater_than: Optional[int] = None,
has_rank_less_than: Optional[int] = None,
has_dim_equals=None,
has_dim_greater_than=None,
has_dim_less_than=None,
tensor_name: str = 'tensor') -> None:
# TODO(cengizo): Typing for has_dim_equals, has_dim_greater(less)_than.
"""Checks static shapes for rank and dimension constraints.
This function can be used to check a tensor's shape for multiple rank and
dimension constraints at the same time.
Args:
tensor: Any tensor with a static shape.
has_rank: An int or `None`. If not `None`, the function checks if the rank
of the `tensor` equals to `has_rank`.
has_rank_greater_than: An int or `None`. If not `None`, the function checks
if the rank of the `tensor` is greater than `has_rank_greater_than`.
has_rank_less_than: An int or `None`. If not `None`, the function checks if
the rank of the `tensor` is less than `has_rank_less_than`.
has_dim_equals: Either a tuple or list containing a single pair of `int`s,
or a list or tuple containing multiple such pairs. Each pair is in the
form (`axis`, `dim`), which means the function should check if
`tensor.shape[axis] == dim`.
has_dim_greater_than: Either a tuple or list containing a single pair of
`int`s, or a list or tuple containing multiple such pairs. Each pair is in
the form (`axis`, `dim`), which means the function should check if
`tensor.shape[axis] > dim`.
has_dim_less_than: Either a tuple or list containing a single pair of
`int`s, or a list or tuple containing multiple such pairs. Each pair is in
the form (`axis`, `dim`), which means the function should check if
`tensor.shape[axis] < dim`.
tensor_name: A name for `tensor` to be used in the error message if one is
thrown.
Raises:
ValueError: If any input is not of the expected types, or if one of the
checks described above fails.
"""
rank = tensor.shape.ndims
def _raise_value_error_for_rank(variable, error_msg):
raise ValueError(
'{} must have a rank {} {}, but it has rank {} and shape {}'.format(
tensor_name, error_msg, variable, rank, tensor.shape.as_list()))
def _raise_value_error_for_dim(tensor_name, error_msg, axis, value):
raise ValueError(
'{} must have {} {} dimensions in axis {}, but it has shape {}'.format(
tensor_name, error_msg, value, axis, tensor.shape.as_list()))
if has_rank is not None:
_check_type(has_rank, 'has_rank', int)
if rank != has_rank:
_raise_value_error_for_rank(has_rank, 'of')
if has_rank_greater_than is not None:
_check_type(has_rank_greater_than, 'has_rank_greater_than', int)
if rank <= has_rank_greater_than:
_raise_value_error_for_rank(has_rank_greater_than, 'greater than')
if has_rank_less_than is not None:
_check_type(has_rank_less_than, 'has_rank_less_than', int)
if rank >= has_rank_less_than:
_raise_value_error_for_rank(has_rank_less_than, 'less than')
if has_dim_equals is not None:
_check_type(has_dim_equals, 'has_dim_equals', (list, tuple))
has_dim_equals = _fix_axis_dim_pairs(has_dim_equals, 'has_dim_equals')
for axis, value in has_dim_equals:
if _get_dim(tensor, axis) != value:
_raise_value_error_for_dim(tensor_name, 'exactly', axis, value)
if has_dim_greater_than is not None:
_check_type(has_dim_greater_than, 'has_dim_greater_than', (list, tuple))
has_dim_greater_than = _fix_axis_dim_pairs(has_dim_greater_than,
'has_dim_greater_than')
for axis, value in has_dim_greater_than:
if not _get_dim(tensor, axis) > value:
_raise_value_error_for_dim(tensor_name, 'greater than', axis, value)
if has_dim_less_than is not None:
_check_type(has_dim_less_than, 'has_dim_less_than', (list, tuple))
has_dim_less_than = _fix_axis_dim_pairs(has_dim_less_than,
'has_dim_less_than')
for axis, value in has_dim_less_than:
if not _get_dim(tensor, axis) < value:
_raise_value_error_for_dim(tensor_name, 'less than', axis, value)
def _check_tensors(tensors, tensors_name):
"""Helper function to check the type and length of tensors."""
_check_type(tensors, tensors_name, (list, tuple))
if len(tensors) < 2:
raise ValueError('At least 2 tensors are required.')
def _check_tensor_axis_lists(tensors, tensors_name, axes, axes_name):
"""Helper function to check that lengths of `tensors` and `axes` match."""
_check_type(axes, axes_name, (list, tuple))
if len(tensors) != len(axes):
raise ValueError(
'{} and {} must have the same length, but are {} and {}.'.format(
tensors_name, axes_name, len(tensors), len(axes)))
def _fix_axes(tensors, axes, allow_negative):
"""Makes all axes positive and checks for out of bound errors."""
axes = [
axis + tensor.shape.ndims if axis < 0 else axis
for tensor, axis in zip(tensors, axes)
]
if not all(
((allow_negative or
(not allow_negative and axis >= 0)) and axis < tensor.shape.ndims)
for tensor, axis in zip(tensors, axes)):
rank_axis_pairs = list(
zip([tensor.shape.ndims for tensor in tensors], axes))
raise ValueError(
'Some axes are out of bounds. Given rank-axes pairs: {}'.format(
[pair for pair in rank_axis_pairs]))
return axes
def _give_default_names(list_of_objects, name):
"""Helper function to give default names to objects for error messages."""
return [name + '_' + str(index) for index in range(len(list_of_objects))]
def _all_are_equal(list_of_objects):
"""Helper function to check if all the items in a list are the same."""
if not list_of_objects:
return True
if isinstance(list_of_objects[0], list):
list_of_objects = [tuple(obj) for obj in list_of_objects]
return len(set(list_of_objects)) == 1
def _raise_error(tensor_names, batch_shapes):
formatted_list = [(name, batch_shape)
for name, batch_shape in zip(tensor_names, batch_shapes)]
raise ValueError(
'Not all batch dimensions are identical: {}'.format(formatted_list))
def compare_batch_dimensions(
tensors: Union[List[tf.Tensor], Tuple[tf.Tensor]],
last_axes: Union[int, List[int], Tuple[int]],
broadcast_compatible: bool,
initial_axes: Union[int, List[int], Tuple[int]] = 0,
tensor_names: Optional[Union[List[str], Tuple[str]]] = None) -> None:
"""Compares batch dimensions for tensors with static shapes.
Args:
tensors: A list or tuple of tensors with static shapes to compare.
last_axes: An `int` or a list or tuple of `int`s with the same length as
`tensors`. If an `int`, it is assumed to be the same for all the tensors.
Each entry should correspond to the last axis of the batch (with zero
based indices). For instance, if there is only a single batch dimension,
last axis should be `0`.
broadcast_compatible: A 'bool', whether the batch shapes can be broadcast
compatible in the numpy sense.
initial_axes: An `int` or a list or tuple of `int`s with the same length as
`tensors`. If an `int`, it is assumed to be the same for all the tensors.
Each entry should correspond to the first axis of the batch (with zero
based indices). Default value is `0`.
tensor_names: Names of `tensors` to be used in the error message if one is
thrown. If left as `None`, `tensor_i` is used.
Raises:
ValueError: If inputs have unexpected types, or if given axes are out of
bounds, or if the check fails.
"""
_check_tensors(tensors, 'tensors')
if isinstance(initial_axes, int):
initial_axes = [initial_axes] * len(tensors)
if isinstance(last_axes, int):
last_axes = [last_axes] * len(tensors)
_check_tensor_axis_lists(tensors, 'tensors', initial_axes, 'initial_axes')
_check_tensor_axis_lists(tensors, 'tensors', last_axes, 'last_axes')
initial_axes = _fix_axes(tensors, initial_axes, allow_negative=True)
last_axes = _fix_axes(tensors, last_axes, allow_negative=True)
batch_shapes = [
tensor.shape[init:last + 1]
for tensor, init, last in zip(tensors, initial_axes, last_axes)
]
if tensor_names is None:
tensor_names = _give_default_names(tensors, 'tensor')
if not broadcast_compatible:
batch_ndims = [batch_shape.ndims for batch_shape in batch_shapes]
batch_shapes = [batch_shape.as_list() for batch_shape in batch_shapes]
if not _all_are_equal(batch_ndims):
# If not all batch shapes have the same length, they cannot be identical.
_raise_error(tensor_names, batch_shapes)
for dims in zip(*batch_shapes):
if _all_are_equal(dims):
# Continue if all dimensions are None or have the same value.
continue
if None not in dims:
# If all dimensions are known at this point, they are not identical.
_raise_error(tensor_names, batch_shapes)
# At this point dims must consist of both None's and int's.
if len(set(dims)) != 2:
# set(dims) should return (None, some_int).
# Otherwise shapes are not identical.
_raise_error(tensor_names, batch_shapes)
else:
if not all(
is_broadcast_compatible(shape1, shape2)
for shape1, shape2 in itertools.combinations(batch_shapes, 2)):
raise ValueError(
'Not all batch dimensions are broadcast-compatible: {}'.format([
(name, batch_shape.as_list())
for name, batch_shape in zip(tensor_names, batch_shapes)
]))
def compare_dimensions(
tensors: Union[List[tf.Tensor], Tuple[tf.Tensor]],
axes: Union[int, List[int], Tuple[int]],
tensor_names: Optional[Union[List[str], Tuple[str]]] = None) -> None:
"""Compares dimensions of tensors with static or dynamic shapes.
Args:
tensors: A list or tuple of tensors to compare.
axes: An `int` or a list or tuple of `int`s with the same length as
`tensors`. If an `int`, it is assumed to be the same for all the tensors.
Each entry should correspond to the axis of the tensor being compared.
tensor_names: Names of `tensors` to be used in the error message if one is
thrown. If left as `None`, their `Tensor.name` fields are used instead.
Raises:
ValueError: If inputs have unexpected types, or if given axes are out of
bounds, or if the check fails.
"""
_check_tensors(tensors, 'tensors')
if isinstance(axes, int):
axes = [axes] * len(tensors)
_check_tensor_axis_lists(tensors, 'tensors', axes, 'axes')
axes = _fix_axes(tensors, axes, allow_negative=False)
if tensor_names is None:
tensor_names = _give_default_names(tensors, 'tensor')
dimensions = [_get_dim(tensor, axis) for tensor, axis in zip(tensors, axes)]
if not _all_are_equal(dimensions):
raise ValueError('Tensors {} must have the same number of dimensions in '
'axes {}, but they are {}.'.format(
list(tensor_names), list(axes), list(dimensions)))
def is_static(
tensor_shape: Union[List[Any], Tuple[Any], tf.TensorShape]) -> bool:
"""Checks if the given tensor shape is static."""
if isinstance(tensor_shape, (list, tuple)):
return None not in tensor_shape
else:
return None not in tensor_shape.as_list()
def add_batch_dimensions(tensor: tf.Tensor,
tensor_name: str,
batch_shape: List[int],
last_axis: Optional[int] = None) -> tf.Tensor:
"""Broadcasts tensor to match batch dimensions.
It will either broadcast to all provided batch dimensions, therefore
increasing tensor shape by len(batch_shape) dimensions or will do nothing if
batch dimensions already present and equal to expected batch dimensions.
Args:
tensor: A tensor to broadcast of a shape [A1, ..., An, B1, ..., Bn]. Where
[A1, ..., An] is batch dimensions (it is allowed to have no batch
dimensions), and [B1, ..., Bn] are other tensor dimensions. If [A1, ...,
An] are present but different from values in `batch_shape` the error will
be thrown.
tensor_name: Name of `tensor` to be used in the error message if one is
batch_shape: list of `int` representing desired batch dimensions.
last_axis: An `int` corresponding to the last axis of the batch (with zero
based indices). For instance, if there is only a single batch dimension,
last axis should be `0`. If there is no batch dimensions it must be set to
`None`. thrown.
Returns:
Tensor of a shape `batch_shape` + [B1, ..., Bn] or unmodified tensor if
`batch_shape` = [A1, ..., An].
Raises:
ValueError if tensor already has batch dimensions different from desired
one.
"""
if last_axis is not None:
last_axis = _fix_axes([tensor], [last_axis], allow_negative=True)[0]
tensor_batch_shape = tensor.shape.as_list()[:last_axis + 1]
if np.array_equal(tensor_batch_shape, batch_shape):
return tensor
elif tensor_batch_shape:
raise ValueError(
'Tensor {} has batch dimensions different from target '
'one. Found {}, but expected no batch dimensions or {}'.format(
tensor_name, tensor.shape[:last_axis + 1], batch_shape))
return tf.broadcast_to(tensor, batch_shape + list(tensor.shape))
# The util functions or classes are not exported.
__all__ = []
| |
from __future__ import print_function
from collections import OrderedDict
import warnings
import chainer
try:
import onnx
from onnx import checker
from onnx import helper
from onnx.mapping import NP_TYPE_TO_TENSOR_TYPE
from onnx import numpy_helper
from onnx import shape_inference
from onnx_chainer.context import Context
from onnx_chainer.graph import Graph
from onnx_chainer import mapping
from onnx_chainer.onnx_helper import is_support_non_standard_domain
_available = True
except ImportError:
_available = False
MINIMUM_OPSET_VERSION = 7
MAXIMUM_OPSET_VERSION = 11
def _check_available():
if not _available:
raise ImportError(
'ONNX is not installed on your environment. Exporting your model '
'in ONNX format needs the onnx package.\n\n'
'\t$ pip install \'onnx<1.7.0\'\n\n')
def convert_parameter(parameter, context):
if isinstance(parameter, chainer.Parameter):
array = parameter.array
elif isinstance(parameter, chainer.Variable):
array = parameter.array
elif isinstance(parameter, chainer.get_array_types()):
array = parameter
else:
raise ValueError(
'The type of parameter is unknown. It should be either Parameter '
'or Variable or ndarray, but the type was {}.'.format(
type(parameter)))
array = chainer.cuda.to_cpu(array)
tensor = numpy_helper.from_array(array, context.get_name(parameter))
return tensor
def rename_variable_name(
context, variables, named_vars, new_names, prefix='Input'):
# Update ``named_vars`` keys to ``new_names``
if isinstance(variables, (list, tuple)):
if new_names is None:
new_names = ['{}_{}'.format(prefix, i)
for i in range(len(named_vars))]
if not isinstance(new_names, (list, tuple)) or\
len(variables) != len(new_names):
raise ValueError(
'Replacing name list is not match with input (or output) '
'variables')
for i, var in enumerate(variables):
del named_vars[context.get_name(var)]
new_name = new_names[i]
named_vars[new_name] = var
context.set_name(var, new_name, pinned=True)
elif isinstance(variables, dict):
if new_names is None:
new_names = {k: '{}_{}'.format(prefix, i)
for i, k in enumerate(variables.keys())}
if not isinstance(new_names, (list, tuple, dict)) or\
len(variables) != len(new_names):
raise ValueError(
'Replacing name dict is not match with input (or output) '
'variables')
if isinstance(new_names, (list, tuple)):
new_names = {k: v for k, v in zip(variables.keys(), new_names)}
for k, v in variables.items():
if k not in new_names:
raise ValueError(
'Key of replacing name is not found in variables')
del named_vars[context.get_name(v)]
new_name = new_names[k]
named_vars[new_name] = v
context.set_name(v, new_name, pinned=True)
elif isinstance(variables, chainer.Variable):
if not new_names:
new_names = prefix + '_0'
if isinstance(new_names, (list, tuple)):
if len(new_names) != 1:
raise ValueError('Replacing name must be single')
new_name = new_names[0]
elif isinstance(new_names, str):
new_name = new_names
else:
raise ValueError(
'Type {} is not supported for single variable'.format(
type(new_name)))
del named_vars[context.get_name(variables)]
named_vars[new_name] = variables
context.set_name(variables, new_name, pinned=True)
def format_customized_shapes(args, shapes):
if isinstance(args, (list, tuple)):
if not isinstance(shapes, list) or len(args) != len(shapes):
raise ValueError('Customized shapes cannot fit for input list')
for i, (arg, shape) in enumerate(zip(args, shapes)):
if len(arg.shape) != len(shape):
raise ValueError(
'Index-{} shape length must be same as input'.format(i))
return shapes
elif isinstance(args, dict):
if not isinstance(shapes, (list, dict)) or\
len(args) != len(shapes):
raise ValueError('Customized shapes cannot fit for input dict')
if isinstance(shapes, list):
shapes = {k: v for k, v in zip(args.keys(), shapes)}
formatted_shapes = []
for k, arg in args.items():
if k not in shapes:
raise ValueError(
'Key "{}" is not found in customized shapes'.format(k))
if len(arg.shape) != len(shapes[k]):
raise ValueError(
'Key "{}" shape length must be same as input'.format(k))
formatted_shapes.append(shapes[k])
return formatted_shapes
else:
assert isinstance(args, (chainer.Variable, chainer.get_array_types()))
if isinstance(shapes, list):
if len(shapes) != 1:
raise ValueError('Customized shape must be single')
elif not isinstance(shapes, tuple):
raise ValueError(
'Type {} is not supported for single input'.format(
type(shapes)))
else:
shapes = [shapes]
if len(args.shape) != len(shapes[0]):
raise ValueError('Shape length must be same as input')
return shapes
class RetainInputHook(chainer.LinkHook):
"""Retain temporary inputs
Function nodes manage inputs variable nodes using weak reference. When
variable is made as temporary value, exporter cannot get the corresponded
variable from the variable node because the reference is collected. To
resolve it, retain all inputs and will use when make computational graph.
To reduce memory size, this hook retains only variables not showed in link
inputs. To enable this feature, links are required to use ``forward``, not
``__call__``.
"""
def __init__(self):
self.link_inputs = set()
self.retain_inputs = []
self.replaced_inputs = []
self.org_apply = chainer.function_node.FunctionNode.apply
def hooked_apply(_self, inputs):
ret = self.org_apply(_self, inputs)
func_inodes = list(_self.inputs)
for i, inode in enumerate(func_inodes):
referenced_var = inode.get_variable_or_none()
if referenced_var is None:
# This variable is created within function node and weakref
# is lost. Make temporary variable and retain it.
temp_var = chainer.as_variable(inputs[i])
func_inodes[i] = temp_var.node
self.retain_inputs.append(temp_var)
else:
if id(referenced_var) not in self.link_inputs:
# This variable is created within link forward, outside
# of function node. To avoid to lose reference out
# of the forward, retain the variable.
self.retain_inputs.append(referenced_var)
self.replaced_inputs.append((_self, _self.inputs))
_self.inputs = tuple(func_inodes)
return ret
self.hooked_apply = hooked_apply
def _extract_inputs(self, args):
# Retain only chainer.Variable (and its collection)
# Other type args are ignored and not checked instance IDs
# If these variable are used in FunctionNode, they will be retained
ret = set()
if isinstance(args, chainer.Variable):
ret.add(id(args))
elif isinstance(args, (list, tuple)):
for arg in args:
ret |= self._extract_inputs(arg)
elif isinstance(args, dict):
for arg in args.values():
ret |= self._extract_inputs(arg)
return ret
def forward_preprocess(self, args):
self.link_inputs |= self._extract_inputs(args.args)
self.link_inputs |= self._extract_inputs(args.kwargs)
def forward_postprocess(self, args):
self.link_inputs.clear()
def __enter__(self):
chainer.function_node.FunctionNode.apply = self.hooked_apply
return super().__enter__()
def __exit__(self, *exc_details):
chainer.function_node.FunctionNode.apply = self.org_apply
for _self, inputs in self.replaced_inputs:
_self.inputs = inputs
super().__exit__(*exc_details)
def export(model, args, filename=None, export_params=True,
graph_name='Graph', save_text=False, opset_version=None,
input_names=None, output_names=None, train=False,
return_named_inout=False, external_converters=None,
external_opset_imports=None, input_shapes=None):
"""Export function for chainer.Chain in ONNX format.
This function performs a forward computation of the given
:class:`~chainer.Chain`, ``model``, by passing the given arguments ``args``
directly. It means, the output :class:`~chainer.Variable` object ``y`` to
make the computational graph will be created by:
``y = model(*args)``
``external_converters`` and ``external_opset_imports`` are for external
custom operator. When some ~chainer.FunctionNode are expected to convert to
own customized operator, set converter function with ~chainer.FunctionNode
name.
>>> import onnx
>>> def custom_converter(param):
... return onnx.helper.make_node(
... 'CustomizedRelu', param.input_names, param.output_names,
... domain='chainer'),
>>>
>>> external_converters = {'ReLU': custom_converter}
>>> external_imports = {'chainer': 0}
>>>
>>> model = chainer.Sequential(F.relu) # set the target model
>>> args = chainer.Variable(np.random.rand(1,10)) # set dummy input
>>> onnx_graph = onnx_chainer.export(
... model, args,
... external_converters=external_converters,
... external_opset_imports=external_imports)
Returned model has ``CustomizedRelu`` node.
Args:
model (~chainer.Chain): The model object you want to export in ONNX
format. It should have :meth:`__call__` method because the second
argument ``args`` is directly given to the model by the ``[]``
accessor.
args (list or dict): The arguments which are given to the model
directly.
filename (str or file-like object): The filename used for saving the
resulting ONNX model. If None, nothing is saved to the disk.
export_params (bool): If True, this function exports all the parameters
included in the given model at the same time. If False, the
exported ONNX model doesn't include any parameter values.
graph_name (str): A string to be used for the ``name`` field of the
graph in the exported ONNX model.
save_text (bool): If True, the text format of the output ONNX model is
also saved with ``.txt`` extention.
opset_version (int): The operator set version of ONNX. If not specified
or ``None`` is given, the latest opset version of the onnx module
is used. If an integer is given, it will be ensured that all the
operator version in the exported ONNX file is less than this value.
input_names (str, list or dict): Customize input names of the graph.
Number of ``input_names`` must be same as number of ``args``.
When set dict type, keys must be same as ``args``'s keys.
output_names (str, list or dict): Customize output name of the graph.
Number of ``output_names`` must be same as actual outputs from
``model``. When set dict type, keys must be same as the key of
``model`` output.
train (bool): If True, output computational graph with train mode.
return_named_inout (bool): If set True, return ONNX model with named
inputs, and named outputs.
external_converters (dict): Add-on converter. Convert functions
keyed by ~chainer.FunctionNode name.
external_opset_imports (dict): Import external opset. opset version
number keyed by domain name.
input_shapes (tuple, list, dict): Input shape of output graph follows
the customized shapes if set. When input are collection type, set
list or dict. Tuple of tuple is not allowed.
Returns:
~onnx.ModelProto or tuple:
When ``return_named_inout`` is ``False``, return ModelProto as an
ONNX model. Otherwise return the tuple of ModelProto, named inputs
and outputs, both inputs and outputs are list of ~chainer.Variable.
"""
_check_available()
with chainer.using_config('train', train),\
chainer.using_config('in_recomputing', True),\
chainer.using_config('enable_backprop', True):
return _export(
model, args, filename, export_params, graph_name, save_text,
opset_version, input_names, output_names, return_named_inout,
external_converters, external_opset_imports, input_shapes)
def _export(model, args, filename, export_params, graph_name, save_text,
opset_version, input_names, output_names, return_named_inout,
external_converters, external_opset_imports, input_shapes):
if opset_version is None:
opset_version = min(
int(onnx.defs.onnx_opset_version()), MAXIMUM_OPSET_VERSION)
elif opset_version < MINIMUM_OPSET_VERSION or \
opset_version > MAXIMUM_OPSET_VERSION:
warnings.warn(
'ONNX-Chainer has been tested only with opset_version {} ~ {}'
'The ONNX file exported with your requested opset_version ({}) '
'may cause some problems because the converters used for the '
'opset_version have not been tested.'.format(
MINIMUM_OPSET_VERSION, MAXIMUM_OPSET_VERSION, opset_version))
if input_shapes is not None:
# if input shapes are invalid, raise exception before forwarding.
input_shapes = format_customized_shapes(args, input_shapes)
with RetainInputHook():
# Forward computation
context = Context(model)
network_inputs = OrderedDict()
if isinstance(args, tuple):
args = list(args)
if isinstance(args, list):
for i, arg in enumerate(args):
if isinstance(arg, chainer.get_array_types()):
args[i] = chainer.Variable(arg)
network_inputs[context.get_name(args[i])] = args[i]
outputs = model(*args)
elif isinstance(args, dict):
for key, arg in args.items():
if isinstance(arg, chainer.get_array_types()):
args[key] = chainer.Variable(arg)
network_inputs[context.get_name(args[key])] = args[key]
outputs = model(**args)
elif isinstance(args, chainer.get_array_types()):
args = chainer.Variable(args)
network_inputs[context.get_name(args)] = args
outputs = model(args)
elif isinstance(args, chainer.Variable):
network_inputs[context.get_name(args)] = args
outputs = model(args)
else:
raise ValueError(
'The \'args\' argument should be a list, tuple, dict, '
'numpy array, or Chainer Variable. But a {} object was '
'given.'.format(type(args)))
rename_variable_name(context, args, network_inputs, input_names)
initializers = []
input_tensors = []
param_names = set()
for org_name, param in model.namedparams():
# `model.namedparams()` has `include_uninit` flag but not use, to
# output user warning
if param.array is None:
warnings.warn(
'The parameter \'{}\' is not initialized, skip setting to '
'ONNX graph'.format(org_name))
continue
name = context.get_name(param)
param_names.add(name)
tensor = convert_parameter(param, context)
initializers.append(tensor)
input_tensors.append(helper.make_tensor_value_info(
name, tensor.data_type, tensor.dims))
for i, (name, var) in enumerate(network_inputs.items()):
shape = var.shape if input_shapes is None else input_shapes[i]
input_tensors.append(helper.make_tensor_value_info(
name, NP_TYPE_TO_TENSOR_TYPE[var.dtype], shape))
if external_converters:
chainer.utils.experimental('external_converters')
converters = dict(mapping.converters, **external_converters)
else:
converters = mapping.converters
if isinstance(outputs, (list, tuple)):
flat_outputs = outputs
elif isinstance(outputs, dict):
flat_outputs = list(outputs.values())
elif isinstance(outputs, chainer.Variable):
flat_outputs = [outputs]
else:
raise RuntimeError(
'Unexpected output type from the model: {}'.format(
type(outputs)))
if not all([isinstance(o, chainer.Variable) for o in flat_outputs]):
raise ValueError('The all \'outputs\' must be Chainer Variable')
network_outputs = OrderedDict(
[(context.get_name(var), var) for var in flat_outputs])
if output_names:
rename_variable_name(
context, outputs, network_outputs, output_names)
o = Graph(context, converters, opset_version,
param_names | set(network_inputs.keys()),
network_outputs)
o.to_onnx_graph()
implicit_input_names = set(context.implicit_inputs.keys())
for name in implicit_input_names:
tensor = convert_parameter(context.implicit_inputs[name], context)
initializers.append(tensor)
input_tensors.append(helper.make_tensor_value_info(
name, tensor.data_type, tensor.dims))
# If additional parameters are created during conversion
for param in context.parameters:
tensor = convert_parameter(param, context)
initializers.append(tensor)
input_tensors.append(helper.make_tensor_value_info(
context.get_name(param), tensor.data_type, tensor.dims))
# Convert output tensors
output_tensors = []
for name, var in network_outputs.items():
output_tensors.append(helper.make_tensor_value_info(
name, NP_TYPE_TO_TENSOR_TYPE[var.dtype], var.shape))
if not export_params:
initializers = []
onnx_graph = helper.make_graph(
o.graph, graph_name, input_tensors, output_tensors,
initializer=initializers)
opset_imports = [helper.make_operatorsetid('', opset_version)]
if external_opset_imports:
chainer.utils.experimental('external_opset_imports')
for domain, version in external_opset_imports.items():
opset_imports.append(helper.make_operatorsetid(domain, version))
model = helper.make_model(
onnx_graph,
producer_name='Chainer',
producer_version=chainer.__version__,
opset_imports=opset_imports
)
model.ir_version = onnx.IR_VERSION
check_onnx_model(model, external_converters, external_opset_imports)
if input_shapes is not None:
for output in model.graph.output:
for d in output.type.tensor_type.shape.dim:
d.Clear()
model = shape_inference.infer_shapes(model)
check_onnx_model(model, external_converters, external_opset_imports)
if filename is not None and isinstance(filename, str):
with open(filename, 'wb') as fp:
fp.write(model.SerializeToString())
if save_text:
with open(filename + '.txt', 'w') as fp:
print(model, file=fp)
elif hasattr(filename, 'write'):
filename.write(model.SerializeToString())
if return_named_inout:
chainer.utils.experimental('return_named_inout')
return model, network_inputs, network_outputs
return model
def check_onnx_model(onnx_model, external_converters, external_opset_imports):
try:
checker.check_model(onnx_model)
except onnx.checker.ValidationError as e:
if external_converters is None:
raise e
else:
# ONNX version >= 1.5: default checker skips schema check when
# non standard domain is set. In ONNX-Chainer, external ops without
# doamin is also accepted, but show warning.
# ONNX version < 1.5: the checker does not skip schema check
# regardless domain is set or not. In ONNX-Chainer, ignore
# errors when external ops are set.
if is_support_non_standard_domain():
if external_opset_imports:
raise e
else:
warnings.warn(
'ValidationError is occurred but ignored. '
'ONNX-Chainer recommends to set '
'`external_opset_imports` when using '
'`external_converters` on exporting. Please take care '
'about ONNX format check is insufficient. Error '
'message:\n{}'.format(str(e)), UserWarning)
else:
warnings.warn(
'ValidationError is occurred but ignored because '
'exporting with `external_converters`. Please take care '
'about ONNX format check is insufficient. Error '
'message:\n{}'.format(str(e)), UserWarning)
| |
# Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Image caching and management.
"""
import os
import re
from os_win import utilsfactory
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
from oslo_utils import uuidutils
import nova.conf
from nova import exception
from nova.i18n import _, _LI
from nova import utils
from nova.virt.hyperv import pathutils
from nova.virt import imagecache
from nova.virt import images
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
def synchronize_with_path(f):
def wrapper(self, image_path):
@utils.synchronized(image_path)
def inner():
return f(self, image_path)
return inner()
return wrapper
class ImageCache(imagecache.ImageCacheManager):
def __init__(self):
super(ImageCache, self).__init__()
self._pathutils = pathutils.PathUtils()
self._vhdutils = utilsfactory.get_vhdutils()
def _get_root_vhd_size_gb(self, instance):
if instance.old_flavor:
return instance.old_flavor.root_gb
else:
return instance.flavor.root_gb
def _resize_and_cache_vhd(self, instance, vhd_path):
vhd_size = self._vhdutils.get_vhd_size(vhd_path)['VirtualSize']
root_vhd_size_gb = self._get_root_vhd_size_gb(instance)
root_vhd_size = root_vhd_size_gb * units.Gi
root_vhd_internal_size = (
self._vhdutils.get_internal_vhd_size_by_file_size(
vhd_path, root_vhd_size))
if root_vhd_internal_size < vhd_size:
raise exception.FlavorDiskSmallerThanImage(
flavor_size=root_vhd_size, image_size=vhd_size)
if root_vhd_internal_size > vhd_size:
path_parts = os.path.splitext(vhd_path)
resized_vhd_path = '%s_%s%s' % (path_parts[0],
root_vhd_size_gb,
path_parts[1])
@utils.synchronized(resized_vhd_path)
def copy_and_resize_vhd():
if not self._pathutils.exists(resized_vhd_path):
try:
LOG.debug("Copying VHD %(vhd_path)s to "
"%(resized_vhd_path)s",
{'vhd_path': vhd_path,
'resized_vhd_path': resized_vhd_path})
self._pathutils.copyfile(vhd_path, resized_vhd_path)
LOG.debug("Resizing VHD %(resized_vhd_path)s to new "
"size %(root_vhd_size)s",
{'resized_vhd_path': resized_vhd_path,
'root_vhd_size': root_vhd_size})
self._vhdutils.resize_vhd(resized_vhd_path,
root_vhd_internal_size,
is_file_max_size=False)
except Exception:
with excutils.save_and_reraise_exception():
if self._pathutils.exists(resized_vhd_path):
self._pathutils.remove(resized_vhd_path)
copy_and_resize_vhd()
return resized_vhd_path
def get_cached_image(self, context, instance, rescue_image_id=None):
image_id = rescue_image_id or instance.image_ref
base_vhd_dir = self._pathutils.get_base_vhd_dir()
base_vhd_path = os.path.join(base_vhd_dir, image_id)
@utils.synchronized(base_vhd_path)
def fetch_image_if_not_existing():
vhd_path = None
for format_ext in ['vhd', 'vhdx']:
test_path = base_vhd_path + '.' + format_ext
if self._pathutils.exists(test_path):
vhd_path = test_path
break
if not vhd_path:
try:
images.fetch(context, image_id, base_vhd_path)
format_ext = self._vhdutils.get_vhd_format(base_vhd_path)
vhd_path = base_vhd_path + '.' + format_ext.lower()
self._pathutils.rename(base_vhd_path, vhd_path)
except Exception:
with excutils.save_and_reraise_exception():
if self._pathutils.exists(base_vhd_path):
self._pathutils.remove(base_vhd_path)
return vhd_path
vhd_path = fetch_image_if_not_existing()
# Note: rescue images are not resized.
is_vhd = vhd_path.split('.')[-1].lower() == 'vhd'
if CONF.use_cow_images and is_vhd and not rescue_image_id:
# Resize the base VHD image as it's not possible to resize a
# differencing VHD. This does not apply to VHDX images.
resized_vhd_path = self._resize_and_cache_vhd(instance, vhd_path)
if resized_vhd_path:
return resized_vhd_path
if rescue_image_id:
self._verify_rescue_image(instance, rescue_image_id,
vhd_path)
return vhd_path
def _verify_rescue_image(self, instance, rescue_image_id,
rescue_image_path):
rescue_image_info = self._vhdutils.get_vhd_info(rescue_image_path)
rescue_image_size = rescue_image_info['VirtualSize']
flavor_disk_size = instance.flavor.root_gb * units.Gi
if rescue_image_size > flavor_disk_size:
err_msg = _('Using a rescue image bigger than the instance '
'flavor disk size is not allowed. '
'Rescue image size: %(rescue_image_size)s. '
'Flavor disk size:%(flavor_disk_size)s.') % dict(
rescue_image_size=rescue_image_size,
flavor_disk_size=flavor_disk_size)
raise exception.ImageUnacceptable(reason=err_msg,
image_id=rescue_image_id)
def get_image_details(self, context, instance):
image_id = instance.image_ref
return images.get_info(context, image_id)
def _age_and_verify_cached_images(self, context, all_instances, base_dir):
for img in self.originals:
if img in self.used_images:
# change the timestamp on the image so as to reflect the last
# time it was used
self._update_image_timestamp(img)
else:
self._remove_if_old_image(img)
def _update_image_timestamp(self, image):
backing_files = self._get_image_backing_files(image)
for img in backing_files:
os.utime(img, None)
def _get_image_backing_files(self, image):
base_file = self._pathutils.get_image_path(image)
if not base_file:
# not vhd or vhdx, ignore.
return []
backing_files = [base_file]
resize_re = re.compile('%s_[0-9]+$' % image)
for img in self.unexplained_images:
match = resize_re.match(img)
if match:
backing_files.append(self._pathutils.get_image_path(img))
return backing_files
def _remove_if_old_image(self, image):
backing_files = self._get_image_backing_files(image)
max_age_seconds = CONF.remove_unused_original_minimum_age_seconds
for img in backing_files:
age_seconds = self._pathutils.get_age_of_file(img)
if age_seconds > max_age_seconds:
LOG.info(_LI("Removing old, unused image: %s"), img)
self.remove_old_image(img)
@synchronize_with_path
def remove_old_image(self, img):
self._pathutils.remove(img)
def update(self, context, all_instances):
base_vhd_dir = self._pathutils.get_base_vhd_dir()
running = self._list_running_instances(context, all_instances)
self.used_images = running['used_images'].keys()
all_files = self._list_base_images(base_vhd_dir)
self.originals = all_files['originals']
self.unexplained_images = all_files['unexplained_images']
self._age_and_verify_cached_images(context, all_instances,
base_vhd_dir)
def _list_base_images(self, base_dir):
unexplained_images = []
originals = []
for entry in os.listdir(base_dir):
file_name, extension = os.path.splitext(entry)
# extension has a leading '.'. E.g.: '.vhdx'
if extension.lstrip('.').lower() not in ['vhd', 'vhdx']:
# File is not an image. Ignore it.
# imagecache will not store images of any other formats.
continue
if uuidutils.is_uuid_like(file_name):
originals.append(file_name)
else:
unexplained_images.append(file_name)
return {'unexplained_images': unexplained_images,
'originals': originals}
| |
# coding=utf-8
# Copyright 2022 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for common layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import kfac
import numpy as np
from tensor2tensor.layers import common_layers
from tensor2tensor.utils import test_utils
import tensorflow.compat.v1 as tf
tf.enable_eager_execution()
class CommonLayersTest(parameterized.TestCase, tf.test.TestCase):
@test_utils.run_in_graph_and_eager_modes()
def testIndexLastDimWithIndices(self):
x = np.array([[2., 3., 4., 5.],
[6., 7., 8., 9.]])
indices = np.array([2, 0])
x_idx = common_layers.index_last_dim_with_indices(x, indices)
expected = np.array([4., 6.])
self.assertAllEqual(expected, self.evaluate(x_idx))
@test_utils.run_in_graph_and_eager_modes()
def testSaturatingSigmoid(self):
x = np.array([-120.0, -100.0, 0.0, 100.0, 120.0], dtype=np.float32)
y = common_layers.saturating_sigmoid(tf.constant(x))
res = self.evaluate(y)
self.assertAllClose(res, [0.0, 0.0, 0.5, 1.0, 1.0])
@test_utils.run_in_graph_and_eager_modes()
def testFlatten4D3D(self):
x = np.random.randint(1, high=9, size=(3, 5, 2))
y = common_layers.flatten4d3d(common_layers.embedding(x, 10, 7))
self.evaluate(tf.global_variables_initializer())
res = self.evaluate(y)
self.assertEqual(res.shape, (3, 5 * 2, 7))
@test_utils.run_in_graph_and_eager_modes()
def testEmbedding(self):
x = np.random.randint(1, high=9, size=(3, 5))
y = common_layers.embedding(x, 10, 16)
self.evaluate(tf.global_variables_initializer())
res = self.evaluate(y)
self.assertEqual(res.shape, (3, 5, 16))
@test_utils.run_in_graph_mode_only()
def testShakeShake(self):
x = np.random.rand(5, 7)
with self.test_session() as session:
x = tf.constant(x, dtype=tf.float32)
y = common_layers.shakeshake([x, x, x, x, x])
inp, res = session.run([x, y])
self.assertAllClose(res, inp)
@test_utils.run_in_graph_and_eager_modes()
def testConv(self):
x = np.random.rand(5, 7, 1, 11)
y = common_layers.conv(tf.constant(x, dtype=tf.float32), 13, (3, 1))
self.evaluate(tf.global_variables_initializer())
res = self.evaluate(y)
self.assertEqual(res.shape, (5, 5, 1, 13))
@test_utils.run_in_graph_and_eager_modes()
def testConv1d(self):
x = np.random.rand(5, 7, 11)
y = common_layers.conv1d(tf.constant(x, dtype=tf.float32), 13, 1)
self.evaluate(tf.global_variables_initializer())
res = self.evaluate(y)
self.assertEqual(res.shape, (5, 7, 13))
@test_utils.run_in_graph_and_eager_modes()
def testSeparableConv(self):
x = np.random.rand(5, 7, 1, 11)
y = common_layers.separable_conv(
tf.constant(x, dtype=tf.float32), 13, (3, 1))
self.evaluate(tf.global_variables_initializer())
res = self.evaluate(y)
self.assertEqual(res.shape, (5, 5, 1, 13))
@test_utils.run_in_graph_and_eager_modes()
def testSubSeparableConv(self):
for sep in [0, 1, 2, 4]:
x = np.random.rand(5, 7, 1, 12)
with tf.variable_scope("sep_%d" % sep):
y = common_layers.subseparable_conv(
tf.constant(x, dtype=tf.float32), 16, (3, 1), separability=sep)
self.evaluate(tf.global_variables_initializer())
res = self.evaluate(y)
self.assertEqual(res.shape, (5, 5, 1, 16))
@test_utils.run_in_graph_and_eager_modes()
def testConvBlock(self):
x = np.random.rand(5, 7, 1, 11)
y = common_layers.conv_block(
tf.constant(x, dtype=tf.float32),
13, [(1, (3, 3)), (1, (3, 3))],
padding="SAME",
normalizer_fn=common_layers.noam_norm)
self.evaluate(tf.global_variables_initializer())
res = self.evaluate(y)
self.assertEqual(res.shape, (5, 7, 1, 13))
@test_utils.run_in_graph_and_eager_modes()
def testSeparableConvBlock(self):
x = np.random.rand(5, 7, 1, 11)
y = common_layers.separable_conv_block(
tf.constant(x, dtype=tf.float32),
13, [(1, (3, 3)), (1, (3, 3))],
padding="SAME")
self.evaluate(tf.global_variables_initializer())
res = self.evaluate(y)
self.assertEqual(res.shape, (5, 7, 1, 13))
@test_utils.run_in_graph_and_eager_modes()
def testSubSeparableConvBlock(self):
for sep in [0, 1, 2, 4]:
x = np.random.rand(5, 7, 1, 12)
with tf.variable_scope("sep_%d" % sep):
y = common_layers.subseparable_conv_block(
tf.constant(x, dtype=tf.float32),
16, [(1, (3, 3)), (1, (3, 3))],
padding="SAME",
separability=sep)
self.evaluate(tf.global_variables_initializer())
res = self.evaluate(y)
self.assertEqual(res.shape, (5, 7, 1, 16))
@test_utils.run_in_graph_and_eager_modes()
def testPool(self):
x = np.random.rand(5, 8, 1, 11)
y = common_layers.pool(
tf.constant(x, dtype=tf.float32), (2, 2), "AVG", "SAME")
self.evaluate(tf.global_variables_initializer())
res = self.evaluate(y)
self.assertEqual(res.shape, (5, 8, 1, 11))
@test_utils.run_in_graph_and_eager_modes()
def testConvBlockDownsample(self):
x = np.random.rand(5, 7, 1, 11)
y = common_layers.conv_block_downsample(
tf.constant(x, dtype=tf.float32), (3, 1), (2, 1), "SAME")
self.evaluate(tf.global_variables_initializer())
res = self.evaluate(y)
self.assertEqual(res.shape, (5, 4, 1, 27))
@test_utils.run_in_graph_and_eager_modes()
def testGetTimingSignal(self):
length = 7
num_timescales = 10
a = common_layers.get_timing_signal(length, num_timescales=num_timescales)
res = self.evaluate(a)
self.assertEqual(res.shape, (length, 2 * num_timescales))
@test_utils.run_in_graph_and_eager_modes()
def testAddTimingSignal(self):
batch = 5
length = 7
height = 3
depth = 35
x = np.random.rand(batch, length, height, depth)
a = common_layers.add_timing_signal(tf.constant(x, dtype=tf.float32))
res = self.evaluate(a)
self.assertEqual(res.shape, (batch, length, height, depth))
@test_utils.run_in_graph_and_eager_modes()
def testConvGRU(self):
x = np.random.rand(5, 7, 3, 11)
y = common_layers.conv_gru(tf.constant(x, dtype=tf.float32), (1, 3), 11)
z = common_layers.conv_gru(
tf.constant(x, dtype=tf.float32), (1, 3), 11, padding="LEFT")
self.evaluate(tf.global_variables_initializer())
res1 = self.evaluate(y)
res2 = self.evaluate(z)
self.assertEqual(res1.shape, (5, 7, 3, 11))
self.assertEqual(res2.shape, (5, 7, 3, 11))
@test_utils.run_in_graph_mode_only
def testSRU(self):
x = np.random.rand(5, 7, 3, 11)
with self.test_session() as session:
y = common_layers.sru(tf.constant(x, dtype=tf.float32))
session.run(tf.global_variables_initializer())
res = session.run(y)
self.assertEqual(res.shape, (5, 7, 3, 11))
@test_utils.run_in_graph_and_eager_modes()
def testLayerNorm(self):
x = np.random.rand(5, 7, 11)
y = common_layers.layer_norm(tf.constant(x, dtype=tf.float32), 11)
self.evaluate(tf.global_variables_initializer())
res = self.evaluate(y)
self.assertEqual(res.shape, (5, 7, 11))
@test_utils.run_in_graph_and_eager_modes()
def testGroupNorm(self):
x = np.random.rand(5, 7, 3, 16)
y = common_layers.group_norm(tf.constant(x, dtype=tf.float32))
self.evaluate(tf.global_variables_initializer())
res = self.evaluate(y)
self.assertEqual(res.shape, (5, 7, 3, 16))
@test_utils.run_in_graph_and_eager_modes()
def testConvLSTM(self):
x = np.random.rand(5, 7, 11, 13)
y = common_layers.conv_lstm(tf.constant(x, dtype=tf.float32), (1, 3), 13)
self.evaluate(tf.global_variables_initializer())
res = self.evaluate(y)
self.assertEqual(res.shape, (5, 7, 11, 13))
@test_utils.run_in_graph_and_eager_modes()
def testPadToSameLength(self):
x1 = np.random.rand(5, 7, 11)
x2 = np.random.rand(5, 9, 11)
a, b = common_layers.pad_to_same_length(
tf.constant(x1, dtype=tf.float32), tf.constant(x2, dtype=tf.float32))
c, d = common_layers.pad_to_same_length(
tf.constant(x1, dtype=tf.float32),
tf.constant(x2, dtype=tf.float32),
final_length_divisible_by=4)
res1, res2 = self.evaluate([a, b])
res1a, res2a = self.evaluate([c, d])
self.assertEqual(res1.shape, (5, 9, 11))
self.assertEqual(res2.shape, (5, 9, 11))
self.assertEqual(res1a.shape, (5, 12, 11))
self.assertEqual(res2a.shape, (5, 12, 11))
@test_utils.run_in_graph_and_eager_modes()
def testShiftLeft(self):
x1 = np.zeros((5, 7, 1, 11))
x1[:, 0, :] = np.ones_like(x1[:, 0, :])
expected = np.zeros((5, 7, 1, 11))
expected[:, 1, :] = np.ones_like(expected[:, 1, :])
a = common_layers.shift_right(tf.constant(x1, dtype=tf.float32))
actual = self.evaluate(a)
self.assertAllEqual(actual, expected)
@test_utils.run_in_graph_and_eager_modes()
def testConvStride2MultiStep(self):
x1 = np.random.rand(5, 32, 16, 11)
a = common_layers.conv_stride2_multistep(
tf.constant(x1, dtype=tf.float32), 4, 16)
self.evaluate(tf.global_variables_initializer())
actual = self.evaluate(a[0])
self.assertEqual(actual.shape, (5, 2, 1, 16))
@test_utils.run_in_graph_and_eager_modes()
def testDeconvStride2MultiStep(self):
x1 = np.random.rand(5, 2, 1, 11)
a = common_layers.deconv_stride2_multistep(
tf.constant(x1, dtype=tf.float32), 4, 16)
self.evaluate(tf.global_variables_initializer())
actual = self.evaluate(a)
self.assertEqual(actual.shape, (5, 32, 1, 16))
@test_utils.run_in_graph_and_eager_modes()
def testApplyNormLayer(self):
x1 = np.random.rand(5, 2, 1, 11)
x2 = common_layers.apply_norm(
tf.constant(x1, dtype=tf.float32), "layer", depth=11, epsilon=1e-6)
self.evaluate(tf.global_variables_initializer())
actual = self.evaluate(x2)
self.assertEqual(actual.shape, (5, 2, 1, 11))
@test_utils.run_in_graph_and_eager_modes()
def testApplyNormNoam(self):
x1 = np.random.rand(5, 2, 1, 11)
x2 = common_layers.apply_norm(
tf.constant(x1, dtype=tf.float32), "noam", depth=11, epsilon=1e-6)
self.evaluate(tf.global_variables_initializer())
actual = self.evaluate(x2)
self.assertEqual(actual.shape, (5, 2, 1, 11))
@test_utils.run_in_graph_and_eager_modes()
def testApplyNormBatch(self):
x1 = np.random.rand(5, 2, 1, 11)
x2 = common_layers.apply_norm(
tf.constant(x1, dtype=tf.float32), "batch", depth=11, epsilon=1e-6)
self.evaluate(tf.global_variables_initializer())
actual = self.evaluate(x2)
self.assertEqual(actual.shape, (5, 2, 1, 11))
@test_utils.run_in_graph_and_eager_modes()
def testApplyNormNone(self):
x1 = np.random.rand(5, 2, 1, 11)
x2 = common_layers.apply_norm(
tf.constant(x1, dtype=tf.float32), "none", depth=11, epsilon=1e-6)
self.evaluate(tf.global_variables_initializer())
actual = self.evaluate(x2)
self.assertEqual(actual.shape, (5, 2, 1, 11))
self.assertAllClose(actual, x1, atol=1e-03)
@test_utils.run_in_graph_mode_only()
def testDenseWithLayerCollection(self):
with tf.variable_scope("test_layer_collection"):
x1 = tf.zeros([3, 4], tf.float32)
layer_collection = kfac.LayerCollection()
common_layers.dense(
x1, units=10, layer_collection=layer_collection, name="y1")
self.assertLen(layer_collection.get_blocks(), 1)
# 3D inputs.
x2 = tf.zeros([3, 4, 5], tf.float32)
common_layers.dense(
x2, units=10, layer_collection=layer_collection, name="y2")
self.assertLen(layer_collection.get_blocks(), 2)
def testGlobalPool1d(self):
x1 = np.random.rand(5, 4, 11)
no_mask = np.ones((5, 4))
full_mask = np.zeros((5, 4))
x1_ = tf.Variable(x1, dtype=tf.float32)
no_mask_ = tf.Variable(no_mask, dtype=tf.float32)
full_mask_ = tf.Variable(full_mask, dtype=tf.float32)
none_mask_max = common_layers.global_pool_1d(x1_)
no_mask_max = common_layers.global_pool_1d(x1_, mask=no_mask_)
result1 = tf.reduce_sum(none_mask_max - no_mask_max)
full_mask_max = common_layers.global_pool_1d(x1_, mask=full_mask_)
result2 = tf.reduce_sum(full_mask_max)
none_mask_avr = common_layers.global_pool_1d(x1_, "AVR")
no_mask_avr = common_layers.global_pool_1d(x1_, "AVR", no_mask_)
result3 = tf.reduce_sum(none_mask_avr - no_mask_avr)
full_mask_avr = common_layers.global_pool_1d(x1_, "AVR", full_mask_)
result4 = tf.reduce_sum(full_mask_avr)
self.evaluate(tf.global_variables_initializer())
actual = self.evaluate([result1, result2, result3, result4])
self.assertAllEqual(actual[:3], [0.0, 0.0, 0.0])
def testLinearSetLayer(self):
x1 = np.random.rand(5, 4, 11)
cont = np.random.rand(5, 13)
x1_ = tf.Variable(x1, dtype=tf.float32)
cont_ = tf.Variable(cont, dtype=tf.float32)
simple_ff = common_layers.linear_set_layer(32, x1_)
cont_ff = common_layers.linear_set_layer(32, x1_, context=cont_)
self.evaluate(tf.global_variables_initializer())
actual = self.evaluate([simple_ff, cont_ff])
self.assertEqual(actual[0].shape, (5, 4, 32))
self.assertEqual(actual[1].shape, (5, 4, 32))
def testRavanbakhshSetLayer(self):
x1 = np.random.rand(5, 4, 11)
x1_ = tf.Variable(x1, dtype=tf.float32)
layer = common_layers.ravanbakhsh_set_layer(32, x1_)
self.evaluate(tf.global_variables_initializer())
actual = self.evaluate(layer)
self.assertEqual(actual.shape, (5, 4, 32))
@test_utils.run_in_graph_and_eager_modes()
def testTopKthIterativeShape(self):
x = np.random.rand(5, 2, 1, 12)
y = common_layers.top_kth_iterative(tf.constant(x, dtype=tf.float32), 3)
actual = self.evaluate(y)
self.assertEqual(actual.shape, (5, 2, 1, 1))
@test_utils.run_in_graph_and_eager_modes()
def testTopKthIterativeValue(self):
x = [1.0, 2.0, 3.0, 4.0]
y = common_layers.top_kth_iterative(tf.constant(x, dtype=tf.float32), 3)
actual = self.evaluate(y)
self.assertEqual(int(actual[0]), 2.0)
@test_utils.run_in_graph_and_eager_modes()
def testBReLU(self):
x = np.random.rand(5, 2, 1, 12)
y = common_layers.brelu(tf.constant(x, dtype=tf.float32))
actual = self.evaluate(y)
self.assertEqual(actual.shape, (5, 2, 1, 12))
@test_utils.run_in_graph_and_eager_modes()
def testBELU(self):
x = np.random.rand(5, 2, 1, 12)
y = common_layers.belu(tf.constant(x, dtype=tf.float32))
actual = self.evaluate(y)
self.assertEqual(actual.shape, (5, 2, 1, 12))
@test_utils.run_in_graph_and_eager_modes()
def testNAC(self):
x = np.random.rand(5, 2, 1, 12)
y = common_layers.nac(tf.constant(x, dtype=tf.float32), 14)
self.evaluate(tf.global_variables_initializer())
actual = self.evaluate(y)
self.assertEqual(actual.shape, (5, 2, 1, 14))
@test_utils.run_in_graph_and_eager_modes()
def testNALU(self):
x = np.random.rand(5, 2, 1, 12)
y = common_layers.nalu(tf.constant(x, dtype=tf.float32), 14)
self.evaluate(tf.global_variables_initializer())
actual = self.evaluate(y)
self.assertEqual(actual.shape, (5, 2, 1, 14))
@test_utils.run_in_graph_and_eager_modes()
def testNALUzeros(self):
x = np.random.rand(5, 2, 1, 12)
y = common_layers.nalu(tf.zeros_like(x, dtype=tf.float32), 14)
self.evaluate(tf.global_variables_initializer())
actual = self.evaluate(y)
self.assertTrue(np.all(np.isfinite(actual)))
self.assertEqual(actual.shape, (5, 2, 1, 14))
@test_utils.run_in_graph_mode_only
def testPaddingCrossEntropyFactored(self):
vocab_size = 19
rows = 5
cols = 4
depth = 11
label_smoothing = 0.1
features = np.random.rand(rows, cols, depth)
weights = np.random.rand(vocab_size, depth)
labels = np.random.randint(0, vocab_size - 1, size=(rows, cols))
with self.test_session() as session:
features = tf.to_float(features)
weights = tf.to_float(weights)
labels = tf.to_int32(labels)
logits = tf.matmul(
tf.reshape(features, [rows * cols, depth]), weights, transpose_b=True)
logits = tf.reshape(logits, [rows, cols, vocab_size])
loss_num, loss_den = common_layers.padded_cross_entropy(
logits, labels, label_smoothing=label_smoothing, reduce_sum=False)
factored_logits = common_layers.FactoredTensor(features, weights)
loss_num_f, loss_den_f = common_layers.padded_cross_entropy_factored(
factored_logits,
labels=labels,
label_smoothing=label_smoothing,
reduce_sum=False)
num, den, num_f, den_f = session.run(
[loss_num, loss_den, loss_num_f, loss_den_f])
self.assertEqual(num.shape, (rows, cols))
self.assertEqual(den.shape, (rows, cols))
self.assertEqual(num_f.shape, (rows, cols))
self.assertEqual(den_f.shape, (rows, cols))
self.assertAllClose(num, num_f)
self.assertAllClose(den, den_f)
@test_utils.run_in_graph_mode_only
def testPaddingCrossEntropyFactoredGrad(self):
vocab_size = 19
rows = 5
cols = 4
depth = 11
label_smoothing = 0.1
features = np.random.rand(rows, cols, depth)
weights = np.random.rand(vocab_size, depth)
labels = np.random.randint(0, vocab_size - 1, size=(rows, cols))
with self.test_session() as session:
features = tf.to_float(features)
weights = tf.to_float(weights)
labels = tf.to_int32(labels)
logits = tf.matmul(
tf.reshape(features, [rows * cols, depth]), weights, transpose_b=True)
logits = tf.reshape(logits, [rows, cols, vocab_size])
loss_num, loss_den = common_layers.padded_cross_entropy(
logits, labels, label_smoothing=label_smoothing, reduce_sum=False)
factored_logits = common_layers.FactoredTensor(features, weights)
loss_num_factored, loss_den_factored = (
common_layers.padded_cross_entropy_factored(
factored_logits,
labels=labels,
label_smoothing=label_smoothing,
reduce_sum=False))
df, dw = tf.gradients(ys=[loss_num, loss_den], xs=[features, weights])
df_factored, dw_factored = tf.gradients(
ys=[loss_num_factored, loss_den_factored], xs=[features, weights])
actual_df, actual_dw, actual_df_factored, actual_dw_factored = (
session.run([df, dw, df_factored, dw_factored]))
self.assertEqual(actual_df.shape, (rows, cols, depth))
self.assertEqual(actual_dw.shape, (vocab_size, depth))
self.assertEqual(actual_df_factored.shape, (rows, cols, depth))
self.assertEqual(actual_dw_factored.shape, (vocab_size, depth))
self.assertAllClose(actual_df, actual_df_factored)
self.assertAllClose(actual_dw, actual_dw_factored)
@parameterized.parameters(
(2, 4, 4, 5, True),
(2, 4, 4, 5, False),
(1, 16, 16, 1, True),
(1, 16, 16, 1, False),
)
def testDmlLoss(self, batch, height, width, num_mixtures, reduce_sum):
channels = 3
pred = tf.random_normal([batch, height, width, num_mixtures * 10])
labels = tf.random_uniform([batch, height, width, channels],
minval=0, maxval=256, dtype=tf.int32)
actual_loss_num, actual_loss_den = common_layers.dml_loss(
pred=pred, labels=labels, reduce_sum=reduce_sum)
actual_loss = actual_loss_num / actual_loss_den
real_labels = common_layers.convert_rgb_to_symmetric_real(labels)
expected_loss = common_layers.discretized_mix_logistic_loss(
pred=pred, labels=real_labels) / channels
if reduce_sum:
expected_loss = tf.reduce_mean(expected_loss)
actual_loss_val, expected_loss_val = self.evaluate(
[actual_loss, expected_loss])
self.assertAllClose(actual_loss_val, expected_loss_val)
@test_utils.run_in_graph_and_eager_modes()
def testWeightsMultiProblemAll(self):
labels = tf.constant(np.array([[12, 15, 1, 20, 100],
[67, 1, 34, 45, 124],
[78, 2, 34, 18, 29],
[78, 123, 55, 1, 33],
[1, 18, 22, 36, 59]]), dtype=tf.int32)
taskid = 1
expected_mask = np.array([[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1]])
actual_mask = common_layers.weights_multi_problem_all(labels, taskid)
actual_mask_eval = self.evaluate(actual_mask)
self.assertAllClose(expected_mask, actual_mask_eval)
@test_utils.run_in_graph_and_eager_modes()
def testWeightsMultiProblem(self):
labels = tf.constant(np.array([[12, 15, 1, 20, 100],
[67, 1, 34, 45, 124],
[78, 2, 34, 18, 29],
[78, 123, 55, 1, 33],
[1, 18, 22, 36, 59]]), dtype=tf.int32)
taskid = 1
expected_mask = np.array([[0, 0, 0, 1, 1],
[0, 0, 1, 1, 1],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 1],
[0, 1, 1, 1, 1]])
actual_mask = common_layers.weights_multi_problem(labels, taskid)
actual_mask_eval = self.evaluate(actual_mask)
self.assertAllClose(expected_mask, actual_mask_eval)
@test_utils.run_in_graph_and_eager_modes()
def testDiscretizedMixLogisticLoss(self):
batch = 2
height = 4
width = 4
channels = 3
num_mixtures = 5
logits = tf.concat( # assign all probability mass to first component
[tf.ones([batch, height, width, 1]) * 1e8,
tf.zeros([batch, height, width, num_mixtures - 1])],
axis=-1)
locs = tf.random_uniform([batch, height, width, num_mixtures * 3],
minval=-.9, maxval=.9)
log_scales = tf.random_uniform([batch, height, width, num_mixtures * 3],
minval=-1., maxval=1.)
coeffs = tf.atanh(tf.zeros([batch, height, width, num_mixtures * 3]))
pred = tf.concat([logits, locs, log_scales, coeffs], axis=-1)
# Test labels that don't satisfy edge cases where 8-bit value is 0 or 255.
labels = tf.random_uniform([batch, height, width, channels],
minval=-.9, maxval=.9)
locs_0 = locs[..., :3]
log_scales_0 = log_scales[..., :3]
centered_labels = labels - locs_0
inv_stdv = tf.exp(-log_scales_0)
plus_in = inv_stdv * (centered_labels + 1. / 255.)
min_in = inv_stdv * (centered_labels - 1. / 255.)
cdf_plus = tf.nn.sigmoid(plus_in)
cdf_min = tf.nn.sigmoid(min_in)
expected_loss = -tf.reduce_sum(tf.log(cdf_plus - cdf_min), axis=-1)
actual_loss = common_layers.discretized_mix_logistic_loss(
pred=pred, labels=labels)
actual_loss_val, expected_loss_val = self.evaluate(
[actual_loss, expected_loss])
self.assertAllClose(actual_loss_val, expected_loss_val, rtol=1e-5)
@test_utils.run_in_graph_and_eager_modes()
def testSampleFromDiscretizedMixLogistic(self):
batch = 2
height = 4
width = 4
num_mixtures = 5
seed = 42
logits = tf.concat( # assign all probability mass to first component
[tf.ones([batch, height, width, 1]) * 1e8,
tf.zeros([batch, height, width, num_mixtures - 1])],
axis=-1)
locs = tf.random_uniform([batch, height, width, num_mixtures * 3],
minval=-.9, maxval=.9)
log_scales = tf.ones([batch, height, width, num_mixtures * 3]) * -1e8
coeffs = tf.atanh(tf.zeros([batch, height, width, num_mixtures * 3]))
pred = tf.concat([logits, locs, log_scales, coeffs], axis=-1)
locs_0 = locs[..., :3]
expected_sample = tf.clip_by_value(locs_0, -1., 1.)
actual_sample = common_layers.sample_from_discretized_mix_logistic(
pred, seed=seed)
actual_sample_val, expected_sample_val = self.evaluate(
[actual_sample, expected_sample])
# Use a low tolerance: samples numerically differ, as the actual
# implementation clips log-scales so they always contribute to sampling.
self.assertAllClose(actual_sample_val, expected_sample_val, atol=1e-2)
@test_utils.run_in_graph_and_eager_modes()
def testFactoredTensorImplicitConversion(self):
a = np.random.rand(3, 4, 5)
b = np.random.rand(6, 5)
c = np.random.rand(3, 4, 6)
# a factored representation of a Tensor of shape (3, 4, 6)
factored = common_layers.FactoredTensor(tf.to_float(a), tf.to_float(b))
# implicitly converts factored to a Tensor (performing the matmul)
d = factored + tf.to_float(c)
out = self.evaluate(d)
self.assertEqual(out.shape, (3, 4, 6))
@test_utils.run_in_graph_mode_only()
def testConvHiddenReluMemoryEfficient(self):
batch = 3
length = 23
io_size = 16
filter_size = 7
x = np.random.rand(batch, length, io_size)
dy = np.random.rand(batch, length, io_size)
with self.test_session() as session:
x = tf.to_float(x)
dy = tf.to_float(dy)
f1 = tf.get_variable("f1", [1, io_size, filter_size])
f2 = tf.get_variable("f2", [1, filter_size, io_size])
norm_scale, norm_bias = common_layers.layer_norm_vars(io_size)
y = common_layers.conv_hidden_relu_memory_efficient(
x, filter_size, forget=False,
test_vars=(f1, f2, norm_scale, norm_bias))
y_forget = common_layers.conv_hidden_relu_memory_efficient(
x, filter_size, forget=True,
test_vars=(f1, f2, norm_scale, norm_bias))
dx, df1, df2, dnorm_scale, dnorm_bias = tf.gradients(
ys=[y], xs=[x, f1, f2, norm_scale, norm_bias], grad_ys=[dy])
dx_f, df1_f, df2_f, dnorm_scale_f, dnorm_bias_f = tf.gradients(
ys=[y_forget], xs=[x, f1, f2, norm_scale, norm_bias], grad_ys=[dy])
session.run(tf.global_variables_initializer())
(y, y_forget,
dx, df1, df2, dnorm_scale, dnorm_bias,
dx_f, df1_f, df2_f, dnorm_scale_f, dnorm_bias_f) = session.run(
[y, y_forget,
dx, df1, df2, dnorm_scale, dnorm_bias,
dx_f, df1_f, df2_f, dnorm_scale_f, dnorm_bias_f])
self.assertAllClose(y, y_forget)
self.assertAllClose(df2, df2_f, rtol=2e-6, atol=2e-6)
self.assertAllClose(df1, df1_f, rtol=2e-6, atol=2e-6)
self.assertAllClose(dnorm_scale, dnorm_scale_f)
self.assertAllClose(dnorm_bias, dnorm_bias_f)
self.assertAllClose(dx, dx_f)
@test_utils.run_in_graph_and_eager_modes()
def testTopk(self):
batch_size = 3
seq_len = 5
vocab_size = 7
top_k = [3, 2, -1]
logits = np.random.rand(batch_size, seq_len, 1, 1, vocab_size) + 0.001
topk_logits = common_layers._select_top_k(logits, top_k)
self.evaluate(tf.global_variables_initializer())
topk_logits = self.evaluate(topk_logits)
for i, k in enumerate(top_k):
for j in range(seq_len):
self.assertEqual((topk_logits[i, j, 0, 0, :] > -1e6).sum(),
k if k != -1 else vocab_size)
@test_utils.run_in_graph_and_eager_modes()
def testSampleTemperaturePerExample(self):
batch_size = 3
seq_len = 5
vocab_size = 7
logits = np.random.randn(batch_size, seq_len, 1, 1, vocab_size)
temperature = np.random.rand(batch_size)
out = common_layers.sample_temperature_per_example(logits, temperature, -1)
self.assertAllEqual(
self.evaluate(tf.shape(out)), [batch_size, seq_len, 1, 1])
@test_utils.run_in_graph_and_eager_modes()
def testSampleTemperaturePerExampleWithTopK(self):
batch_size = 3
seq_len = 5
vocab_size = 7
logits = np.random.randn(batch_size, seq_len, 1, 1, vocab_size)
temperature = np.random.rand(batch_size)
top_k = np.array([3, -1, 4], dtype=np.int32)
out = common_layers.sample_temperature_per_example(logits, temperature,
top_k)
self.assertAllEqual(
self.evaluate(tf.shape(out)), [batch_size, seq_len, 1, 1])
@test_utils.run_in_graph_and_eager_modes()
def testSampleTemperaturePerExampleWithTopK2(self):
batch_size = 3
vocab_size = 7
logits = np.random.randn(batch_size, vocab_size)
temperature = np.random.rand(batch_size)
top_k = np.array([3, -1, 4], dtype=np.int32)
out = common_layers.sample_temperature_per_example(logits, temperature,
top_k)
self.assertAllEqual(self.evaluate(tf.shape(out)), [batch_size])
@test_utils.run_in_graph_mode_only()
def testSampleTemperaturePerExampleDynamicBatchSize(self):
batch_size = None
vocab_size = 7
logits = tf.placeholder(tf.float32, shape=(batch_size, vocab_size))
temperature = tf.placeholder(tf.float32, shape=(batch_size, 1))
sampling_keep_top_k = tf.placeholder(tf.int32, shape=(batch_size, 1))
out = common_layers.sample_temperature_per_example(logits, temperature,
sampling_keep_top_k)
self.assertAllEqual(out.shape.as_list(), [batch_size])
@test_utils.run_in_graph_and_eager_modes()
def testCycleGANUpsampleNnUpsampleConv(self):
batch = 8
height = 32
width = 32
num_channels = 3
output_filters = 10
stride = [2, 3] # we want height to be x2 and width to be x3
random_input = np.random.rand(batch, height, width, num_channels).astype(
np.float32)
# nn_upsample_conv gives exactly the shapes we'd expect.
upsampled_output = common_layers.cyclegan_upsample(
random_input, output_filters, stride, "nn_upsample_conv")
upsampled_output_shape = tf.shape(upsampled_output)
self.evaluate(tf.global_variables_initializer())
self.assertAllEqual(
[batch, height * stride[0], width * stride[1], output_filters],
self.evaluate(upsampled_output_shape))
@test_utils.run_in_graph_and_eager_modes()
def testCycleGANUpsampleBilinearUpsampleConv(self):
batch = 8
height = 32
width = 32
num_channels = 3
output_filters = 10
stride = [2, 3] # we want height to be x2 and width to be x3
random_input = np.random.rand(batch, height, width, num_channels).astype(
np.float32)
# bilinear_upsample_conv gives exactly the shapes we'd expect.
upsampled_output = common_layers.cyclegan_upsample(
random_input, output_filters, stride, "bilinear_upsample_conv")
upsampled_output_shape = tf.shape(upsampled_output)
self.evaluate(tf.global_variables_initializer())
self.assertAllEqual(
[batch, height * stride[0], width * stride[1], output_filters],
self.evaluate(upsampled_output_shape))
@test_utils.run_in_graph_and_eager_modes()
def testCycleGANUpsampleConv2dTranspose(self):
batch = 8
height = 32
width = 32
num_channels = 3
output_filters = 10
stride = [2, 3] # we want height to be x2 and width to be x3
random_input = tf.convert_to_tensor(
np.random.rand(batch, height, width, num_channels), dtype=tf.float32)
# conv2d_transpose is a little tricky.
# height_new = (height_old - 1) * stride + kernel - 2*padding - correction
# here kernel = 3, padding = 0, correction = 1
upsampled_height = (height - 1) * stride[0] + 3 - 2*0 - 1
upsampled_width = (width - 1) * stride[1] + 3 - 2*0 - 1
upsampled_output = common_layers.cyclegan_upsample(random_input,
output_filters, stride,
"conv2d_transpose")
upsampled_output_shape = tf.shape(upsampled_output)
self.evaluate(tf.global_variables_initializer())
self.assertAllEqual(
[batch, upsampled_height, upsampled_width, output_filters],
self.evaluate(upsampled_output_shape))
def testSpectralNorm(self):
# Test that after 20 calls to apply_spectral_norm, the spectral
# norm of the normalized matrix is close to 1.0
with tf.Graph().as_default():
weights = tf.get_variable("w", dtype=tf.float32, shape=[2, 3, 50, 100])
weights = tf.multiply(weights, 10.0)
normed_weight, assign_op = common_layers.apply_spectral_norm(weights)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for _ in range(20):
sess.run(assign_op)
normed_weight, assign_op = common_layers.apply_spectral_norm(
weights)
normed_weight = sess.run(normed_weight).reshape(-1, 100)
_, s, _ = np.linalg.svd(normed_weight)
self.assertTrue(np.allclose(s[0], 1.0, rtol=0.1))
class FnWithCustomGradTest(tf.test.TestCase):
@test_utils.run_in_graph_mode_only()
def testCorrectness(self):
w = tf.random_uniform([6, 10])
def fn(a, b, c):
return tf.layers.dense(
a,
10,
use_bias=False,
kernel_initializer=lambda shape, dtype, partition_info: w
) + tf.matmul(b, c)
def grad_fn(inputs, variables, outputs, grad_outputs):
outputs = outputs[0]
grad_outputs = grad_outputs[0]
grad_inputs = tf.gradients(outputs, inputs, grad_ys=grad_outputs)
grad_vars = tf.gradients(outputs, variables, grad_ys=grad_outputs)
return grad_inputs, grad_vars
custom_fn = common_layers.fn_with_custom_grad(grad_fn)(fn)
a = tf.random_uniform([11, 6])
b = tf.random_uniform([11, 7])
c = tf.random_uniform([7, 10])
out = fn(a, b, c)
custom_out = custom_fn(a, b, c)
self.assertEqual(out.get_shape().as_list(),
custom_out.get_shape().as_list())
loss = tf.reduce_mean(out)
custom_loss = tf.reduce_mean(custom_out)
grads = tf.gradients(loss, [a, b, c] + [tf.trainable_variables()[0]])
custom_grads = tf.gradients(custom_loss,
[a, b, c] + [tf.trainable_variables()[1]])
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
out_val, custom_out_val, grads_val, custom_grads_val = sess.run(
[out, custom_out, grads, custom_grads])
self.assertAllClose(out_val, custom_out_val)
for g1, g2 in zip(grads_val, custom_grads_val):
self.assertAllClose(g1, g2)
@test_utils.run_in_graph_mode_only()
def testCustomGrad(self):
def fn(a, b, c):
return tf.layers.dense(a, 10, use_bias=False) + tf.matmul(b, c)
def grad_fn(inputs, variables, unused_outputs, unused_grad_outputs):
grad_inputs = [tf.ones_like(t) * (i + 1.) for i, t in enumerate(inputs)]
grad_vars = [
tf.ones_like(t) * (i + len(inputs) + 1.)
for i, t in enumerate(variables)
]
return grad_inputs, grad_vars
a = tf.random_uniform([11, 6])
b = tf.random_uniform([11, 7])
c = tf.random_uniform([7, 10])
w = tf.random_uniform([6, 10])
out = common_layers.fn_with_custom_grad(grad_fn)(fn)(a, b, c)
loss = tf.reduce_mean(out)
grads = tf.gradients(loss, [a, b, c, tf.trainable_variables()[0]])
expected_grads = [
tf.ones_like(t) * (i + 1.) for i, t in enumerate([a, b, c, w])
]
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
g_val, eg_val = sess.run([grads, expected_grads])
for g1, g2 in zip(g_val, eg_val):
self.assertAllClose(g1, g2)
class RecomputeTest(tf.test.TestCase):
@test_utils.run_in_graph_mode_only()
def testRecompute(self):
def layer(x, name=None):
with tf.variable_scope(name, default_name="layer"):
x = common_layers.layer_norm(x)
x = tf.layers.conv1d(
x,
10,
1,
use_bias=False,
kernel_initializer=tf.constant_initializer(42.42))
x = tf.nn.relu(x)
return x
def fn(x):
out = x
for _ in range(3):
out = layer(out)
return out
@common_layers.recompute_grad
def fn_recompute(x):
return fn(x)
x = tf.random_uniform((3, 1, 3))
recompute_vars = None
with tf.variable_scope("recompute") as vs:
out1 = tf.reduce_sum(fn_recompute(x))
recompute_vars = vs.trainable_variables()
reg_vars = None
with tf.variable_scope("regular") as vs:
out2 = tf.reduce_sum(fn(x))
reg_vars = vs.trainable_variables()
grad1 = tf.gradients(out1, recompute_vars)
grad2 = tf.gradients(out2, reg_vars)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
outs = sess.run([out1, out2, grad1, grad2])
self.assertAllClose(outs[0], outs[1])
for g1, g2 in zip(outs[2], outs[3]):
self.assertAllClose(g1, g2)
class WeightNormTest(tf.test.TestCase):
def testInputSpec(self):
"""Test that WeighNorm does not overspecify the input_spec."""
conv = common_layers.WeightNorm(
tf.keras.layers.Conv1D(filters=8, kernel_size=3))
# Call with one batch size:
conv(tf.zeros([1, 16, 2]))
# Should allow call with another batch size.
conv(tf.zeros([2, 16, 2]))
# Input spec does detect incorrect input feature dim.
with self.assertRaises(ValueError):
conv(tf.zeros([2, 16, 3]))
if __name__ == "__main__":
tf.test.main()
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base Estimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import inspect
import os
import tempfile
import numpy as np
import six
from tensorflow.core.framework import summary_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as tf_session
from tensorflow.python.estimator import export
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator import run_config
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import metrics as metrics_lib
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import evaluation
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver
from tensorflow.python.training import training
from tensorflow.python.util import compat
_VALID_MODEL_FN_ARGS = set(
['features', 'labels', 'mode', 'params', 'config'])
class Estimator(object):
"""Estimator class to train and evaluate TensorFlow models.
The Estimator object wraps a model which is specified by a `model_fn`, which,
given inputs and a number of other parameters, returns the ops necessary to
perform training, evaluation, or predictions, respectively.
All outputs (checkpoints, event files, etc.) are written to `model_dir`, or a
subdirectory thereof. If `model_dir` is not set, a temporary directory is
used.
The `config` argument can be passed `RunConfig` object containing information
about the execution environment. It is passed on to the `model_fn`, if the
`model_fn` has a parameter named "config" (and input functions in the same
manner). If the `config` parameter is not passed, it is instantiated by the
Estimator. Not passing config means that defaults useful for local execution
are used. Estimator makes config available to the model (for instance, to
allow specialization based on the number of workers available), and also uses
some of its fields to control internals, especially regarding checkpointing.
The `params` argument contains hyperparameters. It is passed to the
`model_fn`, if the `model_fn` has a parameter named "params", and to the input
functions in the same manner. Estimator only passes params along, it does not
inspect it. The structure of params is therefore entirely up to the developer.
"""
def __init__(self, model_fn, model_dir=None, config=None, params=None):
"""Constructs an `Estimator` instance.
Args:
model_fn: Model function. Follows the signature:
* Args:
* `features`: single `Tensor` or `dict` of `Tensor`s
(depending on data passed to `train`),
* `labels`: `Tensor` or `dict` of `Tensor`s (for multi-head
models). If mode is `ModeKeys.PREDICT`, `labels=None` will be
passed. If the `model_fn`'s signature does not accept
`mode`, the `model_fn` must still be able to handle
`labels=None`.
* `mode`: Optional. Specifies if this training, evaluation or
prediction. See `ModeKeys`.
* `params`: Optional `dict` of hyperparameters. Will receive what
is passed to Estimator in `params` parameter. This allows
to configure Estimators from hyper parameter tuning.
* `config`: Optional configuration object. Will receive what is passed
to Estimator in `config` parameter, or the default `config`.
Allows updating things in your model_fn based on configuration
such as `num_ps_replicas`.
* `model_dir`: Optional directory where model parameters, graph etc
are saved. Will receive what is passed to Estimator in
`model_dir` parameter, or the default `model_dir`. Allows
updating things in your model_fn that expect model_dir, such as
training hooks.
* Returns:
`EstimatorSpec`
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
config: Configuration object.
params: `dict` of hyper parameters that will be passed into `model_fn`.
Keys are names of parameters, values are basic python types.
Raises:
ValueError: parameters of `model_fn` don't match `params`.
ValueError: if this is called via a subclass and if that class overrides
a member of `Estimator`.
"""
self._assert_members_are_not_overridden()
# Model directory.
self._model_dir = model_dir
if self._model_dir is None:
self._model_dir = tempfile.mkdtemp()
logging.warning('Using temporary folder as model directory: %s',
self._model_dir)
if config is None:
self._config = run_config.RunConfig()
logging.info('Using default config.')
else:
if not isinstance(config, run_config.RunConfig):
raise ValueError(
'config must be an instance of RunConfig, but provided %s.' %
config)
self._config = config
logging.info('Using config: %s', str(vars(self._config)))
self._device_fn = _get_replica_device_setter(self._config)
if model_fn is None:
raise ValueError('model_fn must be provided to Estimator.')
_verify_model_fn_args(model_fn, params)
self._model_fn = model_fn
self._params = params or {}
@property
def model_dir(self):
return self._model_dir
@property
def config(self):
return copy.deepcopy(self._config)
@property
def params(self):
return copy.deepcopy(self._params)
def train(self, input_fn, hooks=None, steps=None, max_steps=None):
"""Trains a model given training data input_fn.
Args:
input_fn: Input function returning a tuple of:
features - `Tensor` or dictionary of string feature name to `Tensor`.
labels - `Tensor` or dictionary of `Tensor` with labels.
hooks: List of `SessionRunHook` subclass instances. Used for callbacks
inside the training loop.
steps: Number of steps for which to train model. If `None`, train forever
or train until input_fn generates the `OutOfRange` or `StopIteration`
error. 'steps' works incrementally. If you call two times
train(steps=10) then training occurs in total 20 steps. If `OutOfRange`
or `StopIteration` error occurs in the middle, training stops before 20
steps. If you don't want to have incremental behaviour please set
`max_steps` instead. If set, `max_steps` must be `None`.
max_steps: Number of total steps for which to train model. If `None`,
train forever or train until input_fn generates the `OutOfRange` or
`StopIteration` error. If set, `steps` must be `None`. If `OutOfRange`
or `StopIteration` error occurs in the middle, training stops before
`max_steps` steps.
Two calls to `train(steps=100)` means 200 training
iterations. On the other hand, two calls to `train(max_steps=100)` means
that the second call will not do any iteration since first call did
all 100 steps.
Returns:
`self`, for chaining.
Raises:
ValueError: If both `steps` and `max_steps` are not `None`.
ValueError: If either `steps` or `max_steps` is <= 0.
"""
if (steps is not None) and (max_steps is not None):
raise ValueError('Can not provide both steps and max_steps.')
if steps is not None and steps <= 0:
raise ValueError('Must specify steps >= 0, given: {}'.format(steps))
if max_steps is not None and max_steps <= 0:
raise ValueError(
'Must specify max_steps >= 0, given: {}'.format(max_steps))
if max_steps is not None:
start_step = _load_global_step_from_checkpoint_dir(self._model_dir)
if max_steps <= start_step:
logging.info('Skipping training since max_steps has already saved.')
return self
hooks = list(hooks or [])
if steps is not None or max_steps is not None:
hooks.append(training.StopAtStepHook(steps, max_steps))
loss = self._train_model(input_fn=input_fn, hooks=hooks)
logging.info('Loss for final step: %s.', loss)
return self
def evaluate(self, input_fn, steps=None, hooks=None, checkpoint_path=None,
name=None):
"""Evaluates the model given evaluation data input_fn.
For each step, calls `input_fn`, which returns one batch of data.
Evaluates until:
- `steps` batches are processed, or
- `input_fn` raises an end-of-input exception (`OutOfRangeError` or
`StopIteration`).
Args:
input_fn: Input function returning a tuple of:
features - Dictionary of string feature name to `Tensor` or
`SparseTensor`.
labels - `Tensor` or dictionary of `Tensor` with labels.
steps: Number of steps for which to evaluate model. If `None`, evaluates
until `input_fn` raises an end-of-input exception.
hooks: List of `SessionRunHook` subclass instances. Used for callbacks
inside the evaluation call.
checkpoint_path: Path of a specific checkpoint to evaluate. If `None`, the
latest checkpoint in `model_dir` is used.
name: Name of the evaluation if user needs to run multiple evaluations on
different data sets, such as on training data vs test data. Metrics for
different evaluations are saved in separate folders, and appear
separately in tensorboard.
Returns:
A dict containing the evaluation metrics specified in `model_fn` keyed by
name, as well as an entry `global_step` which contains the value of the
global step for which this evaluation was performed.
Raises:
ValueError: If `steps <= 0`.
ValueError: If no model has been trained, namely `model_dir`, or the
given `checkpoint_path` is empty.
"""
hooks = list(hooks or [])
if steps is not None:
if steps <= 0:
raise ValueError('Must specify steps >= 0, given: {}'.format(steps))
hooks.append(evaluation._StopAfterNEvalsHook( # pylint: disable=protected-access
num_evals=steps))
return self._evaluate_model(
input_fn=input_fn,
hooks=hooks,
checkpoint_path=checkpoint_path,
name=name)
def predict(self, input_fn, predict_keys=None, hooks=None):
"""Returns predictions for given features.
Args:
input_fn: Input function returning features which is a dictionary of
string feature name to `Tensor` or `SparseTensor`. If it returns a
tuple, first item is extracted as features. Prediction continues until
`input_fn` raises an end-of-input exception (`OutOfRangeError` or
`StopIteration`).
predict_keys: list of `str`, name of the keys to predict. It is used if
the `EstimatorSpec.predictions` is a `dict`. If `predict_keys` is used
then rest of the predictions will be filtered from the dictionary. If
`None`, returns all.
hooks: List of `SessionRunHook` subclass instances. Used for callbacks
inside the prediction call.
Yields:
Evaluated values of `predictions` tensors.
Raises:
ValueError: Could not find a trained model in model_dir.
ValueError: if batch length of predictions are not same.
ValueError: If there is a conflict between `predict_keys` and
`predictions`. For example if `predict_keys` is not `None` but
`EstimatorSpec.predictions` is not a `dict`.
"""
hooks = list(hooks or [])
# Check that model has been trained.
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise ValueError('Could not find trained model in model_dir: {}.'.format(
self._model_dir))
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
training.create_global_step(g)
features = self._get_features_from_input_fn(input_fn)
estimator_spec = self._call_model_fn(features, None,
model_fn_lib.ModeKeys.PREDICT)
predictions = self._extract_keys(estimator_spec.predictions, predict_keys)
with training.MonitoredSession(
session_creator=training.ChiefSessionCreator(
checkpoint_filename_with_path=checkpoint_path,
scaffold=estimator_spec.scaffold,
config=config_pb2.ConfigProto(allow_soft_placement=True)),
hooks=hooks) as mon_sess:
while not mon_sess.should_stop():
preds_evaluated = mon_sess.run(predictions)
if not isinstance(predictions, dict):
for pred in preds_evaluated:
yield pred
else:
for i in range(self._extract_batch_length(preds_evaluated)):
yield {
key: value[i]
for key, value in six.iteritems(preds_evaluated)
}
def _assert_members_are_not_overridden(self):
estimator_members = set([m for m in Estimator.__dict__.keys()
if not m.startswith('__')])
subclass_members = set(self.__class__.__dict__.keys())
common_members = estimator_members & subclass_members
overriden_members = [m for m in common_members
if Estimator.__dict__[m] != self.__class__.__dict__[m]]
if overriden_members:
raise ValueError(
'Subclasses of Estimator cannot override members of Estimator. '
'{} does override {}'.format(self.__class__, overriden_members))
def export_savedmodel(
self, export_dir_base, serving_input_receiver_fn,
assets_extra=None,
as_text=False,
checkpoint_path=None):
"""Exports inference graph as a SavedModel into given dir.
This method builds a new graph by first calling the
serving_input_receiver_fn to obtain feature `Tensor`s, and then calling
this `Estimator`'s model_fn to generate the model graph based on those
features. It restores the given checkpoint (or, lacking that, the most
recent checkpoint) into this graph in a fresh session. Finally it creates
a timestamped export directory below the given export_dir_base, and writes
a `SavedModel` into it containing a single `MetaGraphDef` saved from this
session.
The exported `MetaGraphDef` will provide one `SignatureDef` for each
element of the export_outputs dict returned from the model_fn, named using
the same keys. One of these keys is always
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY, indicating which
signature will be served when a serving request does not specify one.
For each signature, the outputs are provided by the corresponding
`ExportOutput`s, and the inputs are always the input receivers provided by
the serving_input_receiver_fn.
Extra assets may be written into the SavedModel via the extra_assets
argument. This should be a dict, where each key gives a destination path
(including the filename) relative to the assets.extra directory. The
corresponding value gives the full path of the source file to be copied.
For example, the simple case of copying a single file without renaming it
is specified as `{'my_asset_file.txt': '/path/to/my_asset_file.txt'}`.
Args:
export_dir_base: A string containing a directory in which to create
timestamped subdirectories containing exported SavedModels.
serving_input_receiver_fn: A function that takes no argument and
returns a `ServingInputReceiver`.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported SavedModel, or `None` if no extra assets are needed.
as_text: whether to write the SavedModel proto in text format.
checkpoint_path: The checkpoint path to export. If `None` (the default),
the most recent checkpoint found within the model directory is chosen.
Returns:
The string path to the exported directory.
Raises:
ValueError: if no serving_input_receiver_fn is provided, no export_outputs
are provided, or no checkpoint can be found.
"""
if serving_input_receiver_fn is None:
raise ValueError('serving_input_receiver_fn must be defined.')
with ops.Graph().as_default() as g:
training.create_global_step(g)
serving_input_receiver = serving_input_receiver_fn()
# Call the model_fn and collect the export_outputs.
estimator_spec = self._call_model_fn(
features=serving_input_receiver.features,
labels=None,
mode=model_fn_lib.ModeKeys.PREDICT)
# Build the SignatureDefs from receivers and all outputs
signature_def_map = export.build_all_signature_defs(
serving_input_receiver.receiver_tensors,
estimator_spec.export_outputs)
if not checkpoint_path:
# Locate the latest checkpoint
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise ValueError("Couldn't find trained model at %s." % self._model_dir)
export_dir = export.get_timestamped_export_dir(export_dir_base)
# TODO(soergel): Consider whether MonitoredSession makes sense here
with tf_session.Session() as session:
saver_for_restore = estimator_spec.scaffold.saver or saver.Saver(
variables.global_variables(),
sharded=True)
saver_for_restore.restore(session, checkpoint_path)
# TODO(b/36111876): replace legacy_init_op with main_op mechanism
# pylint: disable=protected-access
local_init_op = (
estimator_spec.scaffold.local_init_op or
monitored_session.Scaffold._default_local_init_op())
# pylint: enable=protected-access
# Perform the export
builder = saved_model_builder.SavedModelBuilder(export_dir)
builder.add_meta_graph_and_variables(
session, [tag_constants.SERVING],
signature_def_map=signature_def_map,
assets_collection=ops.get_collection(
ops.GraphKeys.ASSET_FILEPATHS),
legacy_init_op=local_init_op)
builder.save(as_text)
# Add the extra assets
if assets_extra:
assets_extra_path = os.path.join(compat.as_bytes(export_dir),
compat.as_bytes('assets.extra'))
for dest_relative, source in assets_extra.items():
dest_absolute = os.path.join(compat.as_bytes(assets_extra_path),
compat.as_bytes(dest_relative))
dest_path = os.path.dirname(dest_absolute)
gfile.MakeDirs(dest_path)
gfile.Copy(source, dest_absolute)
return export_dir
def _get_features_from_input_fn(self, input_fn):
result = input_fn()
if not ops.get_default_graph().get_collection(ops.GraphKeys.QUEUE_RUNNERS):
logging.warning('Input graph does not contain a QueueRunner. '
'That means predict yields forever. '
'This is probably a mistake.')
if isinstance(result, (list, tuple)):
return result[0]
return result
def _extract_batch_length(self, preds_evaluated):
"""Extracts batch length of predictions."""
batch_length = None
for key, value in six.iteritems(preds_evaluated):
batch_length = batch_length or value.shape[0]
if value.shape[0] != batch_length:
raise ValueError('Batch length of predictions should be same. %s has '
'different batch length then others.' % key)
return batch_length
def _extract_keys(self, predictions, predict_keys):
"""Extracts `predict_keys` from `predictions`."""
if not predict_keys:
return predictions
if not isinstance(predictions, dict):
raise ValueError(
'predict_keys argument is not valid in case of non-dict predictions.')
existing_keys = predictions.keys()
predictions = {
key: value
for key, value in six.iteritems(predictions) if key in predict_keys
}
if not predictions:
raise ValueError('Expected to run at least one output from %s, '
'provided %s.' % (existing_keys, predict_keys))
return predictions
def _call_model_fn(self, features, labels, mode):
"""Calls model function.
Args:
features: features dict.
labels: labels dict.
mode: ModeKeys
Returns:
An `EstimatorSpec` object.
Raises:
ValueError: if model_fn returns invalid objects.
"""
model_fn_args = _get_arguments(self._model_fn).args
kwargs = {}
if 'mode' in model_fn_args:
kwargs['mode'] = mode
if 'params' in model_fn_args:
kwargs['params'] = self.params
if 'config' in model_fn_args:
kwargs['config'] = self.config
model_fn_results = self._model_fn(
features=features, labels=labels, **kwargs)
if not isinstance(model_fn_results, model_fn_lib.EstimatorSpec):
raise ValueError('model_fn should return an EstimatorSpec.')
return model_fn_results
def _train_model(self, input_fn, hooks):
all_hooks = []
with ops.Graph().as_default() as g, g.device(self._device_fn):
random_seed.set_random_seed(self._config.tf_random_seed)
global_step_tensor = training.create_global_step(g)
with ops.device('/cpu:0'):
features, labels = input_fn()
estimator_spec = self._call_model_fn(features, labels,
model_fn_lib.ModeKeys.TRAIN)
ops.add_to_collection(ops.GraphKeys.LOSSES, estimator_spec.loss)
all_hooks.extend([
training.NanTensorHook(estimator_spec.loss),
training.LoggingTensorHook(
{
'loss': estimator_spec.loss,
'step': global_step_tensor
},
every_n_iter=100)
])
all_hooks.extend(hooks)
all_hooks.extend(estimator_spec.training_hooks)
if not (estimator_spec.scaffold.saver or
ops.get_collection(ops.GraphKeys.SAVERS)):
ops.add_to_collection(ops.GraphKeys.SAVERS,
training.Saver(
sharded=True,
max_to_keep=self._config.keep_checkpoint_max,
defer_build=True))
chief_hooks = []
if (self._config.save_checkpoints_secs or
self._config.save_checkpoints_steps):
saver_hook_exists = any([
isinstance(h, training.CheckpointSaverHook)
for h in (all_hooks + chief_hooks +
estimator_spec.training_chief_hooks)
])
if not saver_hook_exists:
chief_hooks = [
training.CheckpointSaverHook(
self._model_dir,
save_secs=self._config.save_checkpoints_secs,
save_steps=self._config.save_checkpoints_steps,
scaffold=estimator_spec.scaffold)
]
with training.MonitoredTrainingSession(
master=self._config.master,
is_chief=self._config.is_chief,
checkpoint_dir=self._model_dir,
scaffold=estimator_spec.scaffold,
hooks=all_hooks,
chief_only_hooks=chief_hooks + estimator_spec.training_chief_hooks,
save_checkpoint_secs=0, # Saving is handled by a hook.
save_summaries_steps=self._config.save_summary_steps,
config=config_pb2.ConfigProto(allow_soft_placement=True)) as mon_sess:
loss = None
while not mon_sess.should_stop():
_, loss = mon_sess.run([estimator_spec.train_op, estimator_spec.loss])
return loss
def _evaluate_model(self,
input_fn,
hooks=None,
checkpoint_path=None,
name=''):
"""Evaluates the model using the training.evaluation library."""
# Check that model has been trained (if nothing has been set explicitly).
if not checkpoint_path:
latest_path = saver.latest_checkpoint(self._model_dir)
if not latest_path:
raise ValueError('Could not find trained model in model_dir: {}.'.
format(self._model_dir))
checkpoint_path = latest_path
# Setup output directory.
eval_dir = os.path.join(self._model_dir, 'eval' if not name else
'eval_' + name)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
global_step_tensor = training.create_global_step(g)
features, labels = input_fn()
estimator_spec = self._call_model_fn(
features, labels, model_fn_lib.ModeKeys.EVAL)
self._verify_default_metric_key(model_fn_lib.MetricKeys.LOSS,
estimator_spec.eval_metric_ops)
estimator_spec.eval_metric_ops[
model_fn_lib.MetricKeys.LOSS] = metrics_lib.mean(estimator_spec.loss)
update_op, eval_dict = _extract_metric_update_ops(
estimator_spec.eval_metric_ops)
self._verify_default_metric_key(ops.GraphKeys.GLOBAL_STEP, eval_dict)
eval_dict[ops.GraphKeys.GLOBAL_STEP] = global_step_tensor
eval_results = evaluation._evaluate_once( # pylint: disable=protected-access
checkpoint_path=checkpoint_path,
master=self._config.evaluation_master,
scaffold=estimator_spec.scaffold,
eval_ops=update_op,
final_ops=eval_dict,
hooks=hooks,
config=config_pb2.ConfigProto(allow_soft_placement=True))
_write_dict_to_summary(
output_dir=eval_dir,
dictionary=eval_results,
current_global_step=eval_results[ops.GraphKeys.GLOBAL_STEP])
return eval_results
def _verify_default_metric_key(self, metric_key, eval_dict):
if metric_key in six.iterkeys(eval_dict):
raise ValueError(
'Metric with name `%s` is not allowed, because Estimator '
'already defines a default metric with the same name.' % metric_key)
def _get_replica_device_setter(config):
"""Creates a replica device setter if required as a default device_fn.
`Estimator` uses ReplicaDeviceSetter as a default device placer. It sets the
distributed related arguments such as number of ps_replicas based on given
config.
Args:
config: A `RunConfig` instance.
Returns:
A replica device setter, or None.
"""
ps_ops = [
'Variable', 'VariableV2', 'AutoReloadVariable', 'MutableHashTable',
'MutableHashTableOfTensors', 'MutableDenseHashTable'
]
if config.task_type:
worker_device = '/job:%s/task:%d' % (config.task_type, config.task_id)
else:
worker_device = '/job:worker'
if config.num_ps_replicas > 0:
return training.replica_device_setter(
ps_tasks=config.num_ps_replicas,
worker_device=worker_device,
merge_devices=True,
ps_ops=ps_ops,
cluster=config.cluster_spec)
else:
return None
def _get_arguments(func):
"""Returns a spec of given func."""
if hasattr(func, '__code__'):
# Regular function.
return inspect.getargspec(func)
elif hasattr(func, '__call__'):
# Callable object.
return _get_arguments(func.__call__)
elif hasattr(func, 'func'):
# Partial function.
return _get_arguments(func.func)
def _verify_model_fn_args(model_fn, params):
"""Verifies model fn arguments."""
fn_spec = _get_arguments(model_fn)
if 'features' not in fn_spec.args:
raise ValueError('model_fn (%s) must include features argument.' % model_fn)
if 'labels' not in fn_spec.args:
raise ValueError('model_fn (%s) must include labels argument.' % model_fn)
if params is not None and 'params' not in fn_spec.args:
raise ValueError('model_fn (%s) does not include params argument, '
'but params (%s) is passed to Estimator.' % (model_fn,
params))
if params is None and 'params' in fn_spec.args:
logging.warning('Estimator\'s model_fn (%s) includes params '
'argument, but params are not passed to Estimator.',
model_fn)
non_valid_args = list(set(fn_spec.args) - _VALID_MODEL_FN_ARGS)
if non_valid_args:
raise ValueError('model_fn (%s) has following not expected args: %s' %
(model_fn, non_valid_args))
def _load_global_step_from_checkpoint_dir(checkpoint_dir):
try:
checkpoint_reader = training.NewCheckpointReader(
training.latest_checkpoint(checkpoint_dir))
return checkpoint_reader.get_tensor(ops.GraphKeys.GLOBAL_STEP)
except: # pylint: disable=bare-except
return 0
def _extract_metric_update_ops(eval_dict):
"""Separate update operations from metric value operations."""
update_ops = []
value_ops = {}
# Sort metrics lexicographically so graph is identical every time.
for name, metric_ops in sorted(six.iteritems(eval_dict)):
value_ops[name] = metric_ops[0]
update_ops.append(metric_ops[1])
if update_ops:
update_op = control_flow_ops.group(*update_ops)
else:
update_op = None
return update_op, value_ops
def _dict_to_str(dictionary):
"""Get a `str` representation of a `dict`.
Args:
dictionary: The `dict` to be represented as `str`.
Returns:
A `str` representing the `dictionary`.
"""
return ', '.join('%s = %s' % (k, v)
for k, v in sorted(six.iteritems(dictionary)))
def _write_dict_to_summary(output_dir,
dictionary,
current_global_step):
"""Writes a `dict` into summary file in given output directory.
Args:
output_dir: `str`, directory to write the summary file in.
dictionary: the `dict` to be written to summary file.
current_global_step: `int`, the current global step.
"""
logging.info('Saving dict for global step %d: %s', current_global_step,
_dict_to_str(dictionary))
summary_writer = writer_cache.FileWriterCache.get(output_dir)
summary_proto = summary_pb2.Summary()
for key in dictionary:
if dictionary[key] is None:
continue
value = summary_proto.value.add()
value.tag = key
if (isinstance(dictionary[key], np.float32) or
isinstance(dictionary[key], float)):
value.simple_value = float(dictionary[key])
else:
logging.warn('Skipping summary for %s, must be a float or np.float32.',
key)
summary_writer.add_summary(summary_proto, current_global_step)
summary_writer.flush()
| |
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
The eos_lldp_interfaces class
It is in this file where the current configuration (as dict)
is compared to the provided configuration (as dict) and the command set
necessary to bring the current configuration to it's desired end-state is
created
"""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils.network.common.cfg.base import ConfigBase
from ansible.module_utils.network.common.utils import to_list, dict_diff, param_list_to_dict
from ansible.module_utils.network.eos.facts.facts import Facts
from ansible.module_utils.network.eos.utils.utils import normalize_interface
class Lldp_interfaces(ConfigBase):
"""
The eos_lldp_interfaces class
"""
gather_subset = [
'!all',
'!min',
]
gather_network_resources = [
'lldp_interfaces',
]
def get_lldp_interfaces_facts(self):
""" Get the 'facts' (the current configuration)
:rtype: A dictionary
:returns: The current configuration as a dictionary
"""
facts, _warnings = Facts(self._module).get_facts(self.gather_subset, self.gather_network_resources)
lldp_interfaces_facts = facts['ansible_network_resources'].get('lldp_interfaces')
if not lldp_interfaces_facts:
return []
return lldp_interfaces_facts
def execute_module(self):
""" Execute the module
:rtype: A dictionary
:returns: The result from module execution
"""
result = {'changed': False}
warnings = list()
commands = list()
existing_lldp_interfaces_facts = self.get_lldp_interfaces_facts()
commands.extend(self.set_config(existing_lldp_interfaces_facts))
if commands:
if not self._module.check_mode:
self._connection.edit_config(commands)
result['changed'] = True
result['commands'] = commands
changed_lldp_interfaces_facts = self.get_lldp_interfaces_facts()
result['before'] = existing_lldp_interfaces_facts
if result['changed']:
result['after'] = changed_lldp_interfaces_facts
result['warnings'] = warnings
return result
def set_config(self, existing_lldp_interfaces_facts):
""" Collect the configuration from the args passed to the module,
collect the current configuration (as a dict from facts)
:rtype: A list
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
want = self._module.params['config']
have = existing_lldp_interfaces_facts
resp = self.set_state(want, have)
return to_list(resp)
def set_state(self, want, have):
""" Select the appropriate function based on the state provided
:param want: the desired configuration as a dictionary
:param have: the current configuration as a dictionary
:rtype: A list
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
state = self._module.params['state']
want = param_list_to_dict(want, remove_key=False)
have = param_list_to_dict(have, remove_key=False)
if state == 'overridden':
commands = self._state_overridden(want, have)
elif state == 'deleted':
commands = self._state_deleted(want, have)
elif state == 'merged':
commands = self._state_merged(want, have)
elif state == 'replaced':
commands = self._state_replaced(want, have)
return commands
@staticmethod
def _state_replaced(want, have):
""" The command generator when state is replaced
:rtype: A list
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
commands = []
for key, desired in want.items():
interface_name = normalize_interface(key)
if interface_name in have:
extant = have[interface_name]
else:
extant = dict(name=interface_name)
add_config = dict_diff(extant, desired)
del_config = dict_diff(desired, extant)
commands.extend(generate_commands(interface_name, add_config, del_config))
return commands
@staticmethod
def _state_overridden(want, have):
""" The command generator when state is overridden
:rtype: A list
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
commands = []
for key, extant in have.items():
if key in want:
desired = want[key]
else:
desired = dict(name=key)
add_config = dict_diff(extant, desired)
del_config = dict_diff(desired, extant)
commands.extend(generate_commands(key, add_config, del_config))
return commands
@staticmethod
def _state_merged(want, have):
""" The command generator when state is merged
:rtype: A list
:returns: the commands necessary to merge the provided into
the current configuration
"""
commands = []
for key, desired in want.items():
interface_name = normalize_interface(key)
if interface_name in have:
extant = have[interface_name]
else:
extant = dict(name=interface_name)
add_config = dict_diff(extant, desired)
commands.extend(generate_commands(interface_name, add_config, {}))
return commands
@staticmethod
def _state_deleted(want, have):
""" The command generator when state is deleted
:rtype: A list
:returns: the commands necessary to remove the current configuration
of the provided objects
"""
commands = []
for key in want.keys():
interface_name = normalize_interface(key)
desired = dict(name=interface_name)
if interface_name in have:
extant = have[interface_name]
else:
continue
del_config = dict_diff(desired, extant)
commands.extend(generate_commands(interface_name, {}, del_config))
return commands
def generate_commands(name, to_set, to_remove):
commands = []
for key, value in to_set.items():
if value is None:
continue
prefix = "" if value else "no "
commands.append("{0}lldp {1}".format(prefix, key))
for key in to_remove:
commands.append("lldp {0}".format(key))
if commands:
commands.insert(0, "interface {0}".format(name))
return commands
| |
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""App Engine data model (schema) definition for Gerrit."""
# Python imports
import base64
import datetime
try:
import hashlib
except ImportError:
pass
import logging
import random
import re
import zlib
# AppEngine imports
from google.appengine.ext import db
from google.appengine.api import users
# Local imports
from memcache import Key as MemCacheKey
from memcache import CachedDict
import patching
CUR_SCHEMA_VERSION = 1
DEFAULT_CONTEXT = 10
CONTEXT_CHOICES = (3, 10, 25, 50, 75, 100)
FETCH_MAX = 1000
MAX_DELTA_DEPTH = 10
LGTM_CHOICES = (
('lgtm', 'Looks good to me, approved.'),
('yes', 'Looks good to me, but someone else must approve.'),
('abstain', 'No score.'),
('no', 'I would prefer that you didn\'t submit this.'),
('reject', 'Do not submit.'),
)
LIMITED_LGTM_CHOICES = [choice for choice in LGTM_CHOICES
if choice[0] != 'lgtm' and choice[0] != 'reject']
### GQL query cache ###
_query_cache = {}
class BackedUpModel(db.Model):
"""Base class for our models that keeps a property used for backup."""
last_backed_up = db.IntegerProperty(default=0)
def __init__(self, *args, **kwargs):
db.Model.__init__(self, *args, **kwargs)
def gql(cls, clause, *args, **kwds):
"""Return a query object, from the cache if possible.
Args:
cls: a BackedUpModel subclass.
clause: a query clause, e.g. 'WHERE draft = TRUE'.
*args, **kwds: positional and keyword arguments to be bound to the query.
Returns:
A db.GqlQuery instance corresponding to the query with *args and
**kwds bound to the query.
"""
query_string = 'SELECT * FROM %s %s' % (cls.kind(), clause)
query = _query_cache.get(query_string)
if query is None:
_query_cache[query_string] = query = db.GqlQuery(query_string)
query.bind(*args, **kwds)
return query
### Exceptions ###
class InvalidLgtmException(Exception):
"""User is not alloewd to LGTM this change."""
class InvalidVerifierException(Exception):
"""User is not alloewd to verify this change."""
class InvalidSubmitMergeException(Exception):
"""The change cannot me scheduled for merge."""
class DeltaPatchingException(Exception):
"""Applying a patch yield the wrong hash."""
### Settings ###
def _genkey(n=26):
k = ''.join(map(chr, (random.randrange(256) for i in xrange(n))))
return base64.b64encode(k)
class Settings(BackedUpModel):
"""Global settings for the application instance."""
analytics = db.StringProperty()
internal_api_key = db.StringProperty()
xsrf_key = db.StringProperty()
from_email = db.StringProperty()
canonical_url = db.StringProperty(default='')
source_browser_url = db.StringProperty(default='')
merge_log_email = db.StringProperty()
schema_version = db.IntegerProperty(default=0)
_Key = MemCacheKey('Settings_Singleton')
_LocalCache = None
@classmethod
def get_settings(cls):
"""Get the Settings singleton.
If possible, get it from memcache. If it's not there, it tries to do a
normal get(). Only if that fails does it call get_or_insert, because of
possible contention errors due to get_or_insert's transaction.
"""
if Settings._LocalCache is None:
def read():
result = cls.get_by_key_name('settings')
if result:
return result
else:
return cls.get_or_insert('settings',
internal_api_key=_genkey(26),
xsrf_key=_genkey(26),
schema_version = CUR_SCHEMA_VERSION)
Settings._LocalCache = Settings._Key.get(read)
return Settings._LocalCache
def put(self):
BackedUpModel.put(self)
self._Key.clear()
Settings._LocalCache = None
### Approval rights ###
def _flatten_users_and_groups(users, groups):
"""Returns a set of the users and the groups provided"""
result = set()
for user in users:
result.add(user)
if groups:
for group in db.get(groups):
for user in group.members:
result.add(user)
return result
class ApprovalRight(BackedUpModel):
"""The tuple of a set of path patterns and a set of users who can approve
changes for those paths."""
files = db.StringListProperty()
approvers_users = db.ListProperty(users.User)
approvers_groups = db.ListProperty(db.Key)
verifiers_users = db.ListProperty(users.User)
verifiers_groups = db.ListProperty(db.Key)
submitters_users = db.ListProperty(users.User)
submitters_groups = db.ListProperty(db.Key)
required = db.BooleanProperty()
def approvers(self):
"""Returns a set of the users who are approvers."""
return _flatten_users_and_groups(self.approvers_users,
self.approvers_groups)
def verifiers(self):
"""Returns a set of the users who are verifiers."""
return _flatten_users_and_groups(self.verifiers_users,
self.verifiers_groups)
def submitters(self):
"""Returns a set of the users who are submitters."""
return _flatten_users_and_groups(self.submitters_users,
self.submitters_groups)
@classmethod
def validate_file(cls, file):
"""Returns whether this is a valid file path.
The rules:
- The length must be > 0
- The file path must start with a '/'
- The file path must contain either 0 or 1 '...'
- If it contains one '...', it must either be last or directly
after the first '/'
These last two limitations could be removed someday but are
good enough for now.
"""
if len(file) == 0:
return False
if file[0] != '/':
return False
(before, during, after) = file.partition("...")
if during == "" and after == "":
return True
if before != "/":
return False
if after.find("...") != -1:
return False
return True
### Projects ###
class Project(BackedUpModel):
"""An open source project.
Projects have owners who can set approvers and stuff.
"""
name = db.StringProperty(required=True)
comment = db.StringProperty(required=False)
owners_users = db.ListProperty(users.User)
owners_groups = db.ListProperty(db.Key)
code_reviews = db.ListProperty(db.Key)
@classmethod
def get_all_projects(cls):
"""Return all projects"""
all = cls.all()
all.order('name')
return list(all)
@classmethod
def get_project_for_name(cls, name):
return cls.gql('WHERE name=:name', name=name).get()
def remove(self):
"""delete this project"""
db.delete(ApprovalRight.get(self.code_reviews))
self.delete()
def set_code_reviews(self, approval_right_keys):
for key in self.code_reviews:
val = ApprovalRight.get(key)
if val:
db.delete(val)
self.code_reviews = approval_right_keys
def get_code_reviews(self):
return [ApprovalRight.get(key) for key in self.code_reviews]
def leads(self):
"""Returns a set of the users who are leads."""
return _flatten_users_and_groups(self.owners_users,
self.owners_groups)
def is_user_lead(self, user):
if user in self.owners_users:
return True
for group in AccountGroup.get(self.owners_groups):
if group and user in group.members:
return True
return False
def _get_owned_projects(email):
u = users.User(email)
projects = set(gql(Project,
'WHERE owners_users = :1',
u).fetch(1000))
groups = gql(AccountGroup, 'WHERE members = :1', u).fetch(1000)
if groups:
projects.update(gql(Project,
'WHERE owners_groups IN :1',
[g.key() for g in groups]).fetch(1000))
return [p.key() for p in projects]
OwnedProjects = CachedDict(prefix = 'OwnedProject:',
compute_one = _get_owned_projects)
class Branch(BackedUpModel):
"""A branch in a specific Project."""
project = db.ReferenceProperty(Project, required=True)
name = db.StringProperty(required=True) # == key
status = db.StringProperty(choices=('NEEDS_MERGE',
'MERGING',
'BUILDING'))
merge_submitted = db.DateTimeProperty()
to_merge = db.ListProperty(db.Key) # PatchSets
merging = db.ListProperty(db.Key) # PatchSets
waiting = db.ListProperty(db.Key) # PatchSets
@classmethod
def get_or_insert_branch(cls, project, name):
key = 'p.%s %s' % (project.key().id(), name)
return cls.get_or_insert(key, project=project, name=name)
@classmethod
def get_branch_for_name(cls, project, name):
key = 'p.%s %s' % (project.key().id(), name)
return cls.get_by_key_name(key)
@property
def short_name(self):
if self.name.startswith("refs/heads/"):
return self.name[len("refs/heads/"):]
return self.name
def merge_patchset(self, patchset):
"""Add a patchset to the end of the branch's merge queue
This method runs in an independent transaction.
"""
ps_key = patchset.key()
def trans(key):
b = db.get(key)
if not ps_key in b.to_merge:
b.to_merge.append(ps_key)
if b.status is None:
b.status = 'NEEDS_MERGE'
b.merge_submitted = datetime.datetime.now()
b.put()
db.run_in_transaction(trans, self.key())
def begin_merge(self):
"""Lock this branch and start merging patchsets.
This method runs in an independent transaction.
"""
def trans(key):
b = db.get(key)
if b.status == 'NEEDS_MERGE':
b.status = 'MERGING'
b.merging.extend(b.waiting)
b.merging.extend(b.to_merge)
b.waiting = []
b.to_merge = []
b.put()
return b.merging
return []
keys = db.run_in_transaction(trans, self.key())
objs = db.get(keys)
good = []
torm = []
for k, ps in zip(keys, objs):
if ps and not ps.change.closed:
good.append(ps)
else:
torm.append(k)
if torm:
def clear_branch(key):
b = db.get(key)
for ps_key in torm:
if ps_key in b.merging:
b.merging.remove(ps_key)
if not good and b.status in ('MERGING', 'BUILDING'):
if b.to_merge:
b.status = 'NEEDS_MERGE'
else:
b.status = None
b.put()
db.run_in_transaction(clear_branch, self.key())
return good
def finish_merge(self, success, fail, defer):
"""Update our patchset lists with the results of a merge.
This method runs in an independent transaction.
"""
def trans(key):
b = db.get(key)
rm = []
rm.extend(success)
rm.extend(fail)
for ps in rm:
ps_key = ps.key()
if ps_key in b.to_merge:
b.to_merge.remove(ps_key)
if ps_key in b.merging:
b.merging.remove(ps_key)
if ps_key in b.waiting:
b.waiting.remove(ps_key)
for ps in defer:
ps_key = ps.key()
if ps_key in b.to_merge:
b.to_merge.remove(ps_key)
if ps_key in b.merging:
b.merging.remove(ps_key)
if ps_key not in b.waiting:
b.waiting.append(ps_key)
b.put()
db.run_in_transaction(trans, self.key())
def merged(self, merged):
"""Updates the branch to include pending PatchSets.
This method runs in an independent transaction.
"""
def trans(key):
b = db.get(key)
for ps in merged:
ps_key = ps.key()
if ps_key in b.to_merge:
b.to_merge.remove(ps_key)
if ps_key in b.merging:
b.merging.remove(ps_key)
if ps_key in b.waiting:
b.waiting.remove(ps_key)
if b.status in ('MERGING', 'BUILDING'):
if b.to_merge:
b.status = 'NEEDS_MERGE'
else:
b.status = None
b.put()
db.run_in_transaction(trans, self.key())
### Revisions ###
class RevisionId(BackedUpModel):
"""A specific revision of a project."""
project = db.ReferenceProperty(Project, required=True)
id = db.StringProperty(required=True) # == key
author_name = db.StringProperty()
author_email = db.EmailProperty()
author_when = db.DateTimeProperty()
author_tz = db.IntegerProperty()
committer_name = db.StringProperty()
committer_email = db.EmailProperty()
committer_when = db.DateTimeProperty()
committer_tz = db.IntegerProperty()
ancestors = db.StringListProperty() # other RevisionId.id
message = db.TextProperty()
patchset_key = db.StringProperty()
def _get_patchset(self):
try:
return self._patchset_obj
except AttributeError:
k_str = self.patchset_key
if k_str:
self._patchset_obj = db.get(db.Key(k_str))
else:
self._patchset_obj = None
return self._patchset_obj
def _set_patchset(self, p):
if p is None:
self.patchset_key = None
self._patchset_obj = None
else:
self.patchset_key = str(p.key())
self._patchset_obj = p
patchset = property(_get_patchset, _set_patchset)
@classmethod
def get_or_insert_revision(cls, project, id, **kw):
key = 'p.%s %s' % (project.key().id(), id)
return cls.get_or_insert(key, project=project, id=id, **kw)
@classmethod
def get_revision(cls, project, id):
key = 'p.%s %s' % (project.key().id(), id)
return cls.get_by_key_name(key)
@classmethod
def get_for_patchset(cls, patchset):
"""Get all revisions linked to a patchset.
"""
return gql(cls, 'WHERE patchset_key = :1', str(patchset.key()))
def add_ancestor(self, other_id):
"""Adds the other revision as an ancestor for this one.
If the other rev is already in the list, does nothing.
"""
if not other_id in self.ancestors:
self.ancestors.append(other_id)
def remove_ancestor(self, other_id):
"""Removes an ancestor previously stored.
If the other rev is not already in the list, does nothing.
"""
if other_id in self.ancestors:
self.ancestors.remove(other_id)
def get_ancestors(self):
"""Fully fetches all ancestors from the data store.
"""
p_id = self.project.key().id()
names = ['p.%s %s' % (p_id, i) for i in self.ancestors]
return [r for r in RevisionId.get_by_key_name(names) if r]
def get_children(self):
"""Obtain the revisions that depend upon this one.
"""
return gql(RevisionId,
'WHERE project = :1 AND ancestors = :2',
self.project, self.id)
def link_patchset(self, new_patchset):
"""Uniquely connect one patchset to this revision.
Returns True if the passed patchset is the single patchset;
False if another patchset has already been linked onto it.
"""
def trans(self_key):
c = db.get(self_key)
if c.patchset is None:
c.patchset = new_patchset
c.put()
return True
return False
return db.run_in_transaction(trans, self.key())
class BuildAttempt(BackedUpModel):
"""A specific build attempt."""
branch = db.ReferenceProperty(Branch, required=True)
revision_id = db.StringProperty(required=True)
new_changes = db.ListProperty(db.Key) # PatchSet
started = db.DateTimeProperty(auto_now_add=True)
finished = db.BooleanProperty(default=False)
success = db.BooleanProperty()
### Changes, PatchSets, Patches, DeltaContents, Comments, Messages ###
class Change(BackedUpModel):
"""The major top-level entity.
It has one or more PatchSets as its descendants.
"""
subject = db.StringProperty(required=True)
description = db.TextProperty()
owner = db.UserProperty(required=True)
created = db.DateTimeProperty(auto_now_add=True)
modified = db.DateTimeProperty(auto_now=True)
reviewers = db.ListProperty(db.Email)
claimed = db.BooleanProperty(default=False)
cc = db.ListProperty(db.Email)
closed = db.BooleanProperty(default=False)
n_comments = db.IntegerProperty(default=0)
n_patchsets = db.IntegerProperty(default=0)
dest_project = db.ReferenceProperty(Project, required=True)
dest_branch = db.ReferenceProperty(Branch, required=True)
merge_submitted = db.DateTimeProperty()
merged = db.BooleanProperty(default=False)
emailed_clean_merge = db.BooleanProperty(default=False)
emailed_missing_dependency = db.BooleanProperty(default=False)
emailed_path_conflict = db.BooleanProperty(default=False)
merge_patchset_key = db.StringProperty()
def _get_merge_patchset(self):
try:
return self._merge_patchset_obj
except AttributeError:
k_str = self.merge_patchset_key
if k_str:
self._merge_patchset_obj = db.get(db.Key(k_str))
else:
self._merge_patchset_obj = None
return self._merge_patchset_obj
def _set_merge_patchset(self, p):
if p is None:
self.merge_patchset_key = None
self._merge_patchset_obj = None
else:
self.merge_patchset_key = str(p.key())
self._merge_patchset_obj = p
merge_patchset = property(_get_merge_patchset, _set_merge_patchset)
_is_starred = None
@property
def is_starred(self):
"""Whether the current user has this change starred."""
if self._is_starred is not None:
return self._is_starred
account = Account.current_user_account
self._is_starred = account is not None and self.key().id() in account.stars
return self._is_starred
def update_comment_count(self, n):
"""Increment the n_comments property by n.
"""
self.n_comments += n
@property
def num_comments(self):
"""The number of non-draft comments for this change.
This is almost an alias for self.n_comments, except that if
n_comments is None, it is computed through a query, and stored,
using n_comments as a cache.
"""
return self.n_comments
_num_drafts = None
@property
def num_drafts(self):
"""The number of draft comments on this change for the current user.
The value is expensive to compute, so it is cached.
"""
if self._num_drafts is None:
account = Account.current_user_account
if account is None:
self._num_drafts = 0
else:
query = gql(Comment,
'WHERE ANCESTOR IS :1 AND author = :2 AND draft = TRUE',
self, account.user)
self._num_drafts = query.count()
return self._num_drafts
def new_patchset(self, **kw):
"""Construct a new patchset for this change.
"""
def trans(change_key):
change = db.get(change_key)
change.n_patchsets += 1
id = change.n_patchsets
change.put()
patchset = PatchSet(change=change, parent=change, id=id, **kw)
patchset.put()
return patchset
return db.run_in_transaction(trans, self.key())
def set_review_status(self, user):
"""Gets or inserts the ReviewStatus object for the suppiled user."""
return ReviewStatus.get_or_insert_status(self, user)
def get_review_status(self, user=None):
"""Return the lgtm status for the given user if supplied. All for this
change otherwise."""
if user:
# The owner must be checked separately because she automatically
# approves / verifies her own change and there is no ReviewStatus
# for that one.
if user == self.owner:
return []
return ReviewStatus.get_status_for_user(self, user)
else:
return ReviewStatus.all_for_change(self)
@classmethod
def get_reviewer_status(cls, reviews):
"""Return a tuple of who has commented on the changes.
The owner of the change is automatically added to the list
Args:
reviews a list of ReviewStatus objects are returned from
get_review_status().
Returns:
A map of the LGTM_CHOICES keys to users, plus the mapping
verified_by --> the uesrs who verified it
"""
result = {}
for (k,v) in LGTM_CHOICES:
result[k] = [r.user for r in reviews if r.lgtm == k]
result["verified_by"] = [r.user for r in reviews if r.verified]
return result
@property
def is_submitted(self):
"""Return true if the change has been submitted for merge.
"""
return self.merge_submitted is not None
def submit_merge(self, patchset):
"""Schedule a specific patchset of the change to be merged.
"""
branch = self.dest_branch
if not branch:
raise InvalidSubmitMergeException, 'No branch defined'
if self.merged:
raise InvalidSubmitMergeException, 'Already merged'
if self.is_submitted:
raise InvalidSubmitMergeException, \
"Already merging patch set %s" % self.merge_patchset.id
branch.merge_patchset(patchset)
self.merge_submitted = datetime.datetime.now()
self.merge_patchset = patchset
self.emailed_clean_merge = False
self.emailed_missing_dependency = False
self.emailed_path_conflict = False
def unsubmit_merge(self):
"""Unschedule a merge of this change.
"""
if self.merged:
raise InvalidSubmitMergeException, 'Already merged'
self.merge_submitted = None
self.merge_patchset = None
def set_reviewers(self, reviewers):
self.reviewers = reviewers
self.claimed = len(reviewers) != 0
_user_can_edit = None
def user_can_edit(self):
"""Can the current account edit this change?
"""
if self._user_can_edit is None:
a = Account.current_user_account
if not a:
e = False
elif a.is_admin:
e = True
elif self.owner == a.user:
e = True
elif self.dest_project.is_user_lead(a.user):
e = True
else:
e = False
self._user_can_edit = e
return self._user_can_edit
def remove_reviewer(self, user):
"""Removes a user from the list of reviewers, and removes the ReviewStatus
object."""
def trans():
email = user.email()
reviewers = [e for e in self.reviewers if e != email]
self.set_reviewers(reviewers)
rs = ReviewStatus.get_status_for_user(self, user)
if rs:
rs.delete()
self.put()
db.run_in_transaction(trans)
class PatchSetFilenames(BackedUpModel):
"""A list of the file names in a PatchSet.
This is a descendant of a PatchSet.
"""
compressed_filenames = db.BlobProperty()
@classmethod
def _mc(cls, patchset):
return MemCacheKey("PatchSet %s filenames" % patchset.key())
@classmethod
def store_compressed(cls, patchset, bin):
cls(key_name = 'filenames',
compressed_filenames = db.Blob(bin),
parent = patchset).put()
cls._mc(patchset).set(cls._split(bin))
@classmethod
def get_list(cls, patchset):
def read():
c = cls.get_by_key_name('filenames', parent = patchset)
if c:
return cls._split(c.compressed_filenames)
names = patchset._all_filenames()
bin = zlib.compress("\0".join(names).encode('utf_8'), 9)
cls(key_name = 'filenames',
compressed_filenames = db.Blob(bin),
parent = patchset).put()
return names
return cls._mc(patchset).get(read)
@classmethod
def _split(cls, bin):
tmp = zlib.decompress(bin).split("\0")
return [s.decode('utf_8') for s in tmp]
class PatchSet(BackedUpModel):
"""A set of patchset uploaded together.
This is a descendant of an Change and has Patches as descendants.
"""
id = db.IntegerProperty(required=True)
change = db.ReferenceProperty(Change, required=True) # == parent
message = db.StringProperty()
owner = db.UserProperty(required=True)
created = db.DateTimeProperty(auto_now_add=True)
modified = db.DateTimeProperty(auto_now=True)
revision = db.ReferenceProperty(RevisionId, required=True)
complete = db.BooleanProperty(default=False)
_filenames = None
@property
def filenames(self):
if self._filenames is None:
self._filenames = PatchSetFilenames.get_list(self)
return self._filenames
def _all_filenames(self):
last = ''
names = []
while True:
list = gql(Patch,
'WHERE patchset = :1 AND filename > :2'
' ORDER BY filename',
self, last).fetch(500)
if not list:
break
for p in list:
names.append(p.filename)
last = list[-1].filename
return names
def revision_hash(self):
return self.revision.id
class Message(BackedUpModel):
"""A copy of a message sent out in email.
This is a descendant of an Change.
"""
change = db.ReferenceProperty(Change, required=True) # == parent
subject = db.StringProperty()
sender = db.EmailProperty()
recipients = db.ListProperty(db.Email)
date = db.DateTimeProperty(auto_now_add=True)
text = db.TextProperty()
class CachedDeltaContent(object):
"""A fully inflated DeltaContent stored in memcache.
"""
def __init__(self, dc):
self.data_lines = dc.data_lines
self.patch_lines = dc.patch_lines
self.is_patch = dc.is_patch
self.is_data = dc.is_data
@property
def data_text(self):
if self.data_lines is None:
return None
return ''.join(self.data_lines)
@property
def patch_text(self):
if self.patch_lines is None:
return None
return ''.join(self.patch_lines)
@classmethod
def get(cls, key):
def load():
dc = db.get(key)
if dc:
return cls(dc)
return None
return MemCacheKey('DeltaContent:%s' % key.name(),
compress = True).get(load)
def _apply_patch(old_lines, patch_name, dif_lines):
new_lines = []
chunks = patching.ParsePatchToChunks(dif_lines, patch_name)
for tag, old, new in patching.PatchChunks(old_lines, chunks):
new_lines.extend(new)
return ''.join(new_lines)
def _blob_hash(data):
m = hashlib.sha1()
m.update("blob %d\0" % len(data))
m.update(data)
return m.hexdigest()
class DeltaContent(BackedUpModel):
"""Any content, such as for the old or new image of a Patch,
or the patch data itself.
These are stored as top-level entities.
Key:
Git blob SHA-1 of inflate(text)
-or-
Randomly generated name if this is a patch
"""
text_z = db.BlobProperty(required=True)
depth = db.IntegerProperty(default=0, required=True)
base = db.SelfReferenceProperty()
_data_lines = None
_data_text = None
_patch_text = None
_patch_lines = None
@classmethod
def create_patch(cls, id, text_z):
key_name = 'patch:%s' % id
return cls.get_or_insert(key_name,
text_z = db.Blob(text_z),
depth = 0,
base = None)
@classmethod
def create_content(cls, id, text_z, base = None):
"""Create (or lookup and return an existing) content instance.
Arguments:
id:
Git blob SHA-1 hash of the fully inflated content.
text_z:
If base is None this is the deflated content whose hash
is id.
If base is supplied this is a patch which when applied to
base yields the content whose hash is id.
base:
The base content if text_z is a patch.
"""
key_name = 'content:%s' % id
r = cls.get_by_key_name(key_name)
if r:
return r
if base is None:
return cls.get_or_insert(key_name,
text_z = db.Blob(text_z),
depth = 0,
base = None)
my_text = _apply_patch(base.data_lines,
id,
zlib.decompress(text_z).splitlines(True))
cmp_id = _blob_hash(my_text)
if id != cmp_id:
raise DeltaPatchingException()
if base.depth < MAX_DELTA_DEPTH:
return cls.get_or_insert(key_name,
text_z = db.Blob(text_z),
depth = base.depth + 1,
base = base)
return cls.get_or_insert(key_name,
text_z = db.Blob(zlib.compress(my_text)),
depth = 0,
base = None)
@property
def is_patch(self):
return self._base or self.key().name().startswith('patch:')
@property
def is_data(self):
return self.key().name().startswith('content:')
@property
def data_text(self):
if self._data_text is None:
if self._base:
base = CachedDeltaContent.get(self._base)
raw = _apply_patch(base.data_lines,
self.key().name(),
self.patch_lines)
else:
raw = zlib.decompress(self.text_z)
self._data_text = raw
return self._data_text
@property
def data_lines(self):
if self._data_lines is None:
self._data_lines = self.data_text.splitlines(True)
return self._data_lines
@property
def patch_text(self):
if not self.is_patch:
return None
if self._patch_text is None:
self._patch_text = zlib.decompress(self.text_z)
return self._patch_text
@property
def patch_lines(self):
if not self.is_patch:
return None
if self._patch_lines is None:
self._patch_lines = self.patch_text.splitlines(True)
return self._patch_lines
class PatchOtherVersion(object):
"""A reference to a prior version of a patch
"""
def __init__(self, patchset_id, patchset_name, patch_id):
self.patchset_id = patchset_id
self.patchset_name = patchset_name
self.patch_id = patch_id
class Patch(BackedUpModel):
"""A single patch, i.e. a set of changes to a single file.
This is a descendant of a PatchSet.
"""
patchset = db.ReferenceProperty(PatchSet, required=True) # == parent
filename = db.StringProperty(required=True)
status = db.StringProperty(required=True) # 'A', 'M', 'D'
multi_way_diff = db.BooleanProperty(default=False)
n_comments = db.IntegerProperty()
old_data = db.ReferenceProperty(DeltaContent, collection_name='olddata_set')
new_data = db.ReferenceProperty(DeltaContent, collection_name='newdata_set')
diff_data = db.ReferenceProperty(DeltaContent, collection_name='diffdata_set')
# Note: Each entry is a triple:
# "${patchset.key().id} ${patchset.id} ${patch.id}"
#
other_versions = db.StringListProperty()
@classmethod
def get_or_insert_patch(cls, patchset, filename, **kw):
"""Get or insert the patch for a specific file path.
This method runs in an independent transaction.
"""
m = hashlib.sha1()
m.update(filename)
key = 'z%s' % m.hexdigest()
return cls.get_or_insert(key,
parent = patchset,
patchset = patchset,
filename = filename,
**kw)
@classmethod
def get_patch_by_filename(cls, parent, filename):
m = hashlib.sha1()
m.update(filename)
key = 'z%s' % m.hexdigest()
return cls.get_by_key_name(key, parent=parent);
@classmethod
def get_patch(cls, parent, id_str):
if id_str.startswith('z'):
return cls.get_by_key_name(id_str, parent=parent);
else:
return cls.get_by_id(int(id_str), parent=parent);
@property
def id(self):
return str(self.key().id_or_name())
@property
def num_comments(self):
"""The number of non-draft comments for this patch.
"""
return self.n_comments
def update_comment_count(self, n):
"""Increment the n_comments property by n.
"""
self.n_comments += n
_num_drafts = None
@property
def num_drafts(self):
"""The number of draft comments on this patch for the current user.
The value is expensive to compute, so it is cached.
"""
if self._num_drafts is None:
user = Account.current_user_account
if user is None:
self._num_drafts = 0
else:
query = gql(Comment,
'WHERE patch = :1 AND draft = TRUE AND author = :2',
self, user.user)
self._num_drafts = query.count()
return self._num_drafts
@property
def other_patch_versions(self):
return [PatchOtherVersion(*t)
for t
in [line.split(' ', 3)
for line
in self.other_versions]]
def _data(self, name):
prop = '_%s_CachedDeltaContent' % name
try:
c = getattr(self, prop)
except AttributeError:
# XXX Using internal knowledge about db package:
# Key for ReferenceProperty 'foo' is '_foo'.
data_key = getattr(self, '_%s_data' % name, None)
if data_key:
c = CachedDeltaContent.get(data_key)
if data_key in ('diff', 'new') \
and self._diff_data == self._new_data:
self._diff_CachedDeltaContent = c
self._new_CachedDeltaContent = c
else:
setattr(self, prop, c)
else:
c = None
setattr(self, prop, c)
return c
def patch_equals(self, other_delta):
"""Does this patch use the given DeltaContent as its patch body?
"""
return self._diff_data == other_delta.key()
@property
def patch_text(self):
"""Get the patch converting old_text to new_text.
"""
return self._data('diff').patch_text
@property
def patch_lines(self):
"""The patch_text split into lines, retaining line endings.
"""
return self._data('diff').patch_lines
@property
def old_text(self):
"""Original version of the file text.
"""
d = self._data('old')
if d:
return d.data_text
return ''
@property
def old_lines(self):
"""The old_text split into lines, retaining line endings.
"""
d = self._data('old')
if d:
return d.data_lines
return []
@property
def new_text(self):
"""Get self.new_content
"""
d = self._data('new')
if d:
return d.data_text
return ''
@property
def new_lines(self):
"""The new_text split into lines, retaining line endings.
"""
d = self._data('new')
if d:
return d.data_lines
return []
class Comment(BackedUpModel):
"""A Comment for a specific line of a specific file.
This is a descendant of a Patch.
"""
patch = db.ReferenceProperty(Patch) # == parent
message_id = db.StringProperty() # == key_name
author = db.UserProperty()
date = db.DateTimeProperty(auto_now=True)
lineno = db.IntegerProperty()
text = db.TextProperty()
left = db.BooleanProperty()
draft = db.BooleanProperty(required=True, default=True)
def complete(self, patch):
"""Set the shorttext and buckets attributes."""
# TODO(guido): Turn these into caching proprties instead.
# TODO(guido): Properly parse the text into quoted and unquoted buckets.
self.shorttext = self.text.lstrip()[:50].rstrip()
self.buckets = [Bucket(text=self.text)]
class Bucket(BackedUpModel):
"""A 'Bucket' of text.
A comment may consist of multiple text buckets, some of which may be
collapsed by default (when they represent quoted text).
NOTE: This entity is never written to the database. See Comment.complete().
"""
# TODO(guido): Flesh this out.
text = db.TextProperty()
class ReviewStatus(BackedUpModel):
"""The information for whether a user has LGTMed or verified a change."""
change = db.ReferenceProperty(Change, required=True) # == parent
user = db.UserProperty(required=True)
lgtm = db.StringProperty()
verified = db.BooleanProperty()
@classmethod
def get_or_insert_status(cls, change, user):
key = '<%s>' % user.email
return ReviewStatus.get_or_insert(key,
change=change,
user=user,
parent=change)
@classmethod
def insert_status(cls, change, user):
key = '<%s>' % user.email
return ReviewStatus(key_name=key, change=change, user=user, parent=change)
@classmethod
def get_status_for_user(cls, change, user):
key = '<%s>' % user.email
return cls.get_by_key_name(key, parent=change)
@classmethod
def all_for_change(cls, change):
return gql(ReviewStatus,
'WHERE ANCESTOR IS :1',
change).fetch(FETCH_MAX)
### Contributor License Agreements ###
class IndividualCLA:
NONE = 0
### Accounts ###
class Account(BackedUpModel):
"""Maps a user or email address to a user-selected real_name, and more.
Nicknames do not have to be unique.
The default real_name is generated from the email address by
stripping the first '@' sign and everything after it. The email
should not be empty nor should it start with '@' (AssertionError
error is raised if either of these happens).
Holds a list of ids of starred changes. The expectation
that you won't have more than a dozen or so starred changes (a few
hundred in extreme cases) and the memory used up by a list of
integers of that size is very modest, so this is an efficient
solution. (If someone found a use case for having thousands of
starred changes we'd have to think of a different approach.)
Returns whether a user is authorized to do lgtm or verify.
For now, these authorization check methods do not test which repository
the change is in. This will change.
"""
user = db.UserProperty(required=True)
email = db.EmailProperty(required=True) # key == <email>
preferred_email = db.EmailProperty()
created = db.DateTimeProperty(auto_now_add=True)
modified = db.DateTimeProperty(auto_now=True)
is_admin = db.BooleanProperty(default=False)
welcomed = db.BooleanProperty(default=False)
real_name_entered = db.BooleanProperty(default=False)
real_name = db.StringProperty()
mailing_address = db.TextProperty()
mailing_address_country = db.StringProperty()
phone_number = db.StringProperty()
fax_number = db.StringProperty()
cla_verified = db.BooleanProperty(default=False)
cla_verified_by = db.UserProperty()
cla_verified_timestamp = db.DateTimeProperty() # the first time it's set
individual_cla_version = db.IntegerProperty(default=IndividualCLA.NONE)
individual_cla_timestamp = db.DateTimeProperty()
cla_comments = db.TextProperty()
default_context = db.IntegerProperty(default=DEFAULT_CONTEXT,
choices=CONTEXT_CHOICES)
stars = db.ListProperty(int) # Change ids of all starred changes
unclaimed_changes_projects = db.ListProperty(db.Key)
# Current user's Account. Updated by middleware.AddUserToRequestMiddleware.
current_user_account = None
def get_email(self):
"Gets the email that this person wants us to use -- separate from login."
if self.preferred_email:
return self.preferred_email
else:
return self.email
def get_email_formatted(self):
return '"%s" <%s>' % (self.real_name, self.get_email())
@classmethod
def get_account_for_user(cls, user):
"""Get the Account for a user, creating a default one if needed."""
email = user.email()
assert email
key = '<%s>' % email
# Since usually the account already exists, first try getting it
# without the transaction implied by get_or_insert().
account = cls.get_by_key_name(key)
if account is not None:
return account
real_name = user.nickname()
if '@' in real_name:
real_name = real_name.split('@', 1)[0]
assert real_name
return cls.get_or_insert(key, user=user, email=email, real_name=real_name)
@classmethod
def get_account_for_email(cls, email):
"""Get the Account for an email address, or return None."""
assert email
key = '<%s>' % email
return cls.get_by_key_name(key)
@classmethod
def get_accounts_for_emails(cls, emails):
"""Get the Accounts for all email address.
"""
return cls.get_by_key_name(map(lambda x: '<%s>' % x, emails))
@classmethod
def get_real_name_for_email(cls, email):
"""Get the real_name for an email address, possibly a default."""
account = cls.get_account_for_email(email)
if account is not None and account.real_name:
return account.real_name
real_name = email
if '@' in real_name:
real_name = real_name.split('@', 1)[0]
assert real_name
return real_name
@classmethod
def get_accounts_for_real_name(cls, real_name):
"""Get the list of Accounts that have this real_name."""
assert real_name
assert '@' not in real_name
return list(gql(cls, 'WHERE real_name = :1', real_name))
@classmethod
def get_email_for_real_name(cls, real_name):
"""Turn a real_name into an email address.
If the real_name is not unique or does not exist, this returns None.
"""
accounts = cls.get_accounts_for_real_name(real_name)
if len(accounts) != 1:
return None
return accounts[0].email
_draft_key = None
@property
def drafts(self):
"""A list of change ids that have drafts by this user.
This is cached in memcache.
"""
if self._draft_key is None:
self._draft_key = MemCacheKey(
key = 'user_drafts:%s' % self.email,
incore = True,
timeout = 3600)
def query_store():
# We're looking for the Change key id.
# The ancestry of comments goes:
# Change -> PatchSet -> Patch -> Comment.
#
change_ids = set(comment.key().parent().parent().parent().id()
for comment
in gql(Comment,
'WHERE author = :1'
' AND draft = TRUE',
self.user))
return list(change_ids)
return self._draft_key.get(query_store)
def update_drafts(self, change, have_drafts=None):
"""Update the user's draft status for this change.
Args:
change: an Change instance.
have_drafts: optional bool forcing the draft status. By default,
change.num_drafts is inspected (which may query the datastore).
The Account is written to the datastore if necessary.
"""
my_drafts = self.drafts
id = change.key().id()
if have_drafts is None:
have_drafts = bool(change.num_drafts) # this may do a query
if have_drafts:
if id not in my_drafts:
my_drafts.append(id)
self._draft_key.set(my_drafts)
else:
if id in my_drafts:
my_drafts.remove(id)
self._draft_key.set(my_drafts)
### Group ###
AUTO_GROUPS = ['admin']
class AccountGroup(BackedUpModel):
"""A set of users. Permissions are assigned to groups.
There are some groups that can't be deleted -- like admin and all
"""
name = db.StringProperty(required=True)
comment = db.TextProperty(required=False)
members = db.ListProperty(users.User)
@classmethod
def get_all_groups(cls):
"""Return all groups"""
all = cls.all()
all.order('name')
return list(all)
@property
def is_auto_group(self):
"""These groups can't be deleted."""
return self.name in AUTO_GROUPS
@classmethod
def create_groups(cls):
for group_name in AUTO_GROUPS:
def trans():
g = cls(name = group_name,
comment = 'Auto created %s group' % group_name)
g.put()
if not cls.get_group_for_name(group_name):
db.run_in_transaction(trans)
@classmethod
def get_group_for_name(cls, name):
return cls.gql('WHERE name=:name', name=name).get()
def remove(self):
"""delete this group"""
def trans(group):
group.delete()
# this will do the ON DELETE CASCADE once the users are in there
db.run_in_transaction(trans, self)
| |
import os
from io import BytesIO
import json
import zipfile
import pytest
import numpy as np
from stable_baselines import A2C, ACER, ACKTR, DQN, PPO1, PPO2, TRPO
from stable_baselines.common.identity_env import IdentityEnv
from stable_baselines.common.vec_env import DummyVecEnv
from stable_baselines.common.evaluation import evaluate_policy
from stable_baselines.common.policies import MlpPolicy, FeedForwardPolicy
N_EVAL_EPISODES = 5
MODEL_LIST = [
A2C,
ACER,
ACKTR,
DQN,
PPO1,
PPO2,
TRPO,
]
STORE_METHODS = [
"path",
"file-like"
]
STORE_FORMAT = [
"zip",
"cloudpickle"
]
@pytest.mark.slow
@pytest.mark.parametrize("model_class", MODEL_LIST)
@pytest.mark.parametrize("storage_method", STORE_METHODS)
@pytest.mark.parametrize("store_format", STORE_FORMAT)
def test_model_manipulation(request, model_class, storage_method, store_format):
"""
Test if the algorithm (with a given policy) can be loaded and saved without any issues, the environment switching
works and that the action prediction works
:param model_class: (BaseRLModel) A RL model
:param storage_method: (str) Should file be saved to a file ("path") or to a buffer
("file-like")
:param store_format: (str) Save format, either "zip" or "cloudpickle".
"""
# Use postfix ".model" so we can remove the file later
model_fname = './test_model_{}.model'.format(request.node.name)
store_as_cloudpickle = store_format == "cloudpickle"
kwargs = dict(seed=0, gamma=0.4)
if model_class in [DQN]:
kwargs["learning_starts"] = 0
kwargs["exploration_final_eps"] = 0.05
if model_class == PPO1:
kwargs["entcoeff"] = 0.0
kwargs["optim_batchsize"] = 4
kwargs["timesteps_per_actorbatch"] = 4
if model_class in [A2C, ACKTR, PPO2]:
kwargs["n_steps"] = 4
kwargs["ent_coef"] = 0.0
if model_class in [TRPO]:
kwargs["timesteps_per_batch"] = 4
try:
env = DummyVecEnv([lambda: IdentityEnv(10)])
# create and train
model = model_class(policy="MlpPolicy", env=env, **kwargs)
model.learn(total_timesteps=15)
env.envs[0].action_space.seed(0)
mean_reward, _ = evaluate_policy(model, env, deterministic=True,
n_eval_episodes=N_EVAL_EPISODES)
# test action probability for given (obs, action) pair
env = model.get_env()
obs = env.reset()
observations = np.array([env.step([env.action_space.sample()])[0] for _ in range(10)])
observations = np.squeeze(observations)
selected_actions, _ = model.predict(observations, deterministic=True)
actions = np.array([env.action_space.sample() for _ in range(10)])
actions_probas = model.action_probability(observations, actions=actions)
assert actions_probas.shape == (len(actions), 1), actions_probas.shape
assert actions_probas.min() >= 0, actions_probas.min()
assert actions_probas.max() <= 1, actions_probas.max()
# saving
if storage_method == "path": # saving to a path
model.save(model_fname, cloudpickle=store_as_cloudpickle)
else: # saving to a file-like object (BytesIO in this case)
b_io = BytesIO()
model.save(b_io, cloudpickle=store_as_cloudpickle)
model_bytes = b_io.getvalue()
b_io.close()
del model, env
# loading
if storage_method == "path": # loading from path
model = model_class.load(model_fname)
else:
b_io = BytesIO(model_bytes) # loading from file-like object (BytesIO in this case)
model = model_class.load(b_io)
b_io.close()
# changing environment (note: this can be done at loading)
env = DummyVecEnv([lambda: IdentityEnv(10)])
model.set_env(env)
# check if model still selects the same actions
new_selected_actions, _ = model.predict(observations, deterministic=True)
assert np.allclose(selected_actions, new_selected_actions, 1e-4)
# learn post loading
model.learn(total_timesteps=15)
# predict new values
evaluate_policy(model, env, n_eval_episodes=N_EVAL_EPISODES)
del model, env
finally:
if os.path.exists(model_fname):
os.remove(model_fname)
class CustomMlpPolicy(FeedForwardPolicy):
"""A dummy "custom" policy to test out custom_objects"""
def __init__(self, sess, ob_space, ac_space, n_env, n_steps, n_batch, reuse=False, **_kwargs):
super(CustomMlpPolicy, self).__init__(sess, ob_space, ac_space, n_env, n_steps,
n_batch, reuse, feature_extraction="mlp",
**_kwargs)
@pytest.mark.parametrize("model_class", MODEL_LIST)
def test_save_custom_objects(request, model_class):
"""
Test feeding custom_objects in model.load(...) function
"""
# Skip DQN (not an actor-critic policy)
if model_class == DQN:
return
model_fname = './test_model_{}.zip'.format(request.node.name)
try:
env = DummyVecEnv([lambda: IdentityEnv(10)])
# Create and save model with default MLP policy
model = model_class(policy=MlpPolicy, env=env)
model.save(model_fname)
del model, env
# Corrupt "policy" serialization in the file
data_file = zipfile.ZipFile(model_fname, "r")
# Load all data (can't just update one file in the archive)
parameter_list = data_file.read("parameter_list")
parameters = data_file.read("parameters")
class_data = json.loads(data_file.read("data").decode())
data_file.close()
# Corrupt serialization of the "policy"
class_data["policy"][":serialized:"] = (
"Adding this should break serialization" +
class_data["policy"][":serialized:"]
)
# And dump everything back to the model file
data_file = zipfile.ZipFile(model_fname, "w")
data_file.writestr("data", json.dumps(class_data))
data_file.writestr("parameter_list", parameter_list)
data_file.writestr("parameters", parameters)
data_file.close()
# Try loading the model. This should
# result in an error
with pytest.raises(RuntimeError):
model = model_class.load(model_fname)
# Load model with custom objects ("custom" MlpPolicy)
# and it should work fine.
# Note: We could load model with just vanilla
# MlpPolicy, too.
model = model_class.load(
model_fname,
custom_objects={
"policy": CustomMlpPolicy
}
)
# Make sure we loaded custom MLP policy
assert model.policy == CustomMlpPolicy
del model
finally:
if os.path.exists(model_fname):
os.remove(model_fname)
| |
# coding: utf-8
from __future__ import absolute_import
import logging
from apscheduler.jobstores.base import BaseJobStore, JobLookupError, ConflictingIdError
from apscheduler.util import maybe_ref, datetime_to_utc_timestamp, local_timestamp_to_datetime
# utc_timestamp_to_datetime
from apscheduler.triggers.cron import CronTrigger
from apscheduler.job import Job
from apscheduler.jobconf import JobConf
import apscheduler
from tzlocal import get_localzone
from datetime import datetime
#from .cmdjob import CmdJob
# from .runtimejob import RuntimeJob
try:
import cPickle as pickle
except ImportError: # pragma: nocover
import pickle
try:
from sqlalchemy import create_engine, Table, Column, MetaData, Unicode, String, Integer, Float, LargeBinary, select, text
from sqlalchemy.exc import IntegrityError
except ImportError: # pragma: nocover
raise ImportError('MysqlJobStore requires SQLAlchemy installed')
class MysqlJobStore(BaseJobStore):
"""
Stores jobs in a database table using SQLAlchemy. The table will be created if it doesn't exist in the database.
Plugin alias: ``mysqljobstore``
:param str url: connection string (see `SQLAlchemy documentation
<http://docs.sqlalchemy.org/en/latest/core/engines.html?highlight=create_engine#database-urls>`_
on this)
:param engine: an SQLAlchemy Engine to use instead of creating a new one based on ``url``
:param str tablename: name of the table to store jobs in
:param metadata: a :class:`~mysqljobstore.MetaData` instance to use instead of creating a new one
:param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the highest available
"""
def __init__(self, url=None, engine=None, tablename='apscheduler_jobs', metadata=None,
pickle_protocol=pickle.HIGHEST_PROTOCOL):
super(MysqlJobStore, self).__init__()
self.pickle_protocol = pickle_protocol
metadata = maybe_ref(metadata) or MetaData()
self.timezone = get_localzone()
if engine:
self.engine = maybe_ref(engine)
elif url:
self.engine = create_engine(url, encoding='utf-8', echo=False)
else:
raise ValueError('Need either "engine" or "url" defined')
# 191 = max key length in MySQL for InnoDB/utf8mb4 tables, 25 = precision that translates to an 8-byte float
# self.jobs_t = Table(
# tablename, metadata,
# Column('id', Unicode(191, _warn_on_bytestring=False), primary_key=True),
# Column('next_run_time', Float(25), index=True),
# Column('job_state', LargeBinary, nullable=False)
# )
# self.jobs_t.create(self.engine, True)
self.__ini_schema(metadata)
def __ini_schema(self, metadata=None):
tablename = 'wm_jobs'
self.wm_jobs_t = Table(
tablename, metadata,
Column('id', Integer, primary_key=True),
Column('cmd', String(512), nullable=False, server_default=''),
Column('cron_str', String(512), nullable=False, server_default=''),
Column('name', String(50), nullable=False, server_default='', index=True),
Column('desc', String(1000), nullable=False, server_default=''),
Column('mails', String(200), nullable=False, server_default=''),
Column('phones', String(200), nullable=False, server_default=''),
Column('team', String(50), nullable=False, server_default=''),
Column('owner', String(50), nullable=False, server_default='', index=True),
Column('hosts', String(1000), nullable=False, server_default=''),
Column('host_strategy', Integer, nullable=False, server_default='0', ), # 0, every host; 1, one of these
Column('restore_strategy', Integer, nullable=False, server_default='0'), # 0, restore just one time. 1. restore for every lack
Column('retry_strategy', Integer, nullable=False, server_default='0'), # 0, no retry; N>0, retry N times
Column('error_strategy', Integer, nullable=False, server_default='0'), # 0, do nothing; 1, stop job & alarm through both mail & phones
Column('exist_strategy', Integer, nullable=False, server_default='0'), # 0, whatever; 1, skip this period; 2, wait for stop until end of this period
Column('running_timeout_s', Integer, nullable=False, server_default='0'), # 0, without endless 1, kill job when timeout
Column('status', Integer, nullable=False, server_default='0', index=True), # 0, stopped or new; 1, running normally; 2. suspend by runtime
Column('modify_time', Integer, nullable=False, server_default='0'),
Column('modify_user', String(50), nullable=False, server_default=''),
Column('create_time', Integer, nullable=False, server_default='0'),
Column('create_user', String(50), nullable=False, server_default=''),
Column('start_date', Integer, nullable=False, server_default='0', index=True),
Column('end_date', Integer, nullable=False, server_default='0', index=True),
Column('oupput_match_reg', String(100), nullable=False, server_default=''), # do nothing when empty, otherwise, alarm when match Regex expression specified
Column('next_run_time', Float(25), nullable=False, server_default='0', index=True),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
# by the way, next_run_time / and last_run_time / and running status should be saved in zookeeper
self.wm_jobs_t.create(self.engine, True)
def start(self,scheduler, alias):
return self.engine and BaseJobStore.start(self, scheduler, alias)
# def lookup_job(self, job_id):
# selectable = select([self.jobs_t.c.job_state]).where(self.jobs_t.c.id == job_id)
# job_state = self.engine.execute(selectable).scalar()
# return self._reconstitute_job(job_state) if job_state else None
def lookup_job(self, job_id):
selectable = select([ x for x in self.wm_jobs_t.c]).where(self.wm_jobs_t.c.id == job_id)
row = self.engine.execute(selectable).one()
return self._reconstitute_job(row) if row else None
# def get_due_jobs(self, now):
# timestamp = datetime_to_utc_timestamp(now)
# return self._get_jobs(self.jobs_t.c.next_run_time <= timestamp)
def get_due_jobs(self, now):
timestamp = datetime_to_utc_timestamp(now)
return self._get_jobs(self.wm_jobs_t.c.next_run_time <= timestamp)
# def get_next_run_time(self):
# selectable = select([self.jobs_t.c.next_run_time]).where(self.jobs_t.c.next_run_time != None).\
# order_by(self.jobs_t.c.next_run_time).limit(1)
# next_run_time = self.engine.execute(selectable).scalar()
# return utc_timestamp_to_datetime(next_run_time)
def get_next_run_time(self):
# TDODO ... should access zookeeper to get the next_run_time
selectable = select([self.wm_jobs_t.c.next_run_time]).where(self.wm_jobs_t.c.next_run_time != None).\
order_by(self.wm_jobs_t.c.next_run_time).limit(1)
next_run_time = self.engine.execute(selectable).scalar()
ret = local_timestamp_to_datetime(next_run_time)
return ret
def get_all_jobs(self):
jobs = self._get_jobs()
self._fix_paused_jobs_sorting(jobs)
return jobs
# def add_job(self, job):
# insert = self.jobs_t.insert().values(**{
# 'id': job.id,
# 'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
# 'job_state': pickle.dumps(job.__getstate__(), self.pickle_protocol)
# })
# try:
# self.engine.execute(insert)
# except IntegrityError:
# raise ConflictingIdError(job.id)
def add_job(self, job):
insert = self.wm_jobs_t.insert().values(**{
'id': job.conf.id,
'cmd': job.conf.cmd,
'cron_str': job.conf.cron_str,
'name': job.conf.name,
'desc': job.conf.desc,
'mails': job.conf.mails,
'phones': job.conf.phones,
'team': job.conf.team,
'owner': job.conf.owner,
'hosts': job.conf.hosts,
'host_strategy': job.conf.host_strategy,
'restore_strategy': job.conf.restore_strategy,
'retry_strategy': job.conf.retry_strategy,
'error_strategy': job.conf.error_strategy,
'exist_strategy': job.conf.exist_strategy,
'running_timeout_s': job.conf.running_timeout_s,
'status': job.conf.status,
'modify_time': job.conf.modify_time,
'modify_user': job.conf.modify_user,
'create_time': job.conf.create_time,
'create_user': job.conf.create_user,
'start_date': job.conf.start_date,
'end_date': job.conf.end_date,
'oupput_match_reg': job.conf.oupput_match_reg,
'next_run_time': job.conf.next_run_time,
})
try:
self.engine.execute(insert)
except IntegrityError:
raise ConflictingIdError(job.id)
# def update_job(self, job):
# update = self.jobs_t.update().values(**{
# 'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
# 'job_state': pickle.dumps(job.__getstate__(), self.pickle_protocol)
# }).where(self.jobs_t.c.id == job.id)
# result = self.engine.execute(update)
# if result.rowcount == 0:
# raise JobLookupError(id)
def update_job(self, job):
job.conf.next_run_time = datetime_to_utc_timestamp(job.next_run_time)
logging.debug('job %s update next_run_time to %s %s cmd=%s' % (job.conf.id, job.conf.next_run_time, job.next_run_time, job.conf.cmd))
update = self.wm_jobs_t.update().values(**{
'cmd': job.conf.cmd,
'cron_str': job.conf.cron_str,
'name': job.conf.name,
'desc': job.conf.desc,
'mails': job.conf.mails,
'phones': job.conf.phones,
'team': job.conf.team,
'owner': job.conf.owner,
'hosts': job.conf.hosts,
'host_strategy': job.conf.host_strategy,
'restore_strategy': job.conf.restore_strategy,
'retry_strategy': job.conf.retry_strategy,
'error_strategy': job.conf.error_strategy,
'exist_strategy': job.conf.exist_strategy,
'running_timeout_s': job.conf.running_timeout_s,
'status': job.conf.status,
'modify_time': job.conf.modify_time,
'modify_user': job.conf.modify_user,
'create_time': job.conf.create_time,
'create_user': job.conf.create_user,
'start_date': job.conf.start_date,
'end_date': job.conf.end_date,
'oupput_match_reg': job.conf.oupput_match_reg,
'next_run_time': job.conf.next_run_time
}).where(self.wm_jobs_t.c.id == job.id)
result = self.engine.execute(update)
if result.rowcount == 0:
raise JobLookupError(id)
# def remove_job(self, job_id):
# delete = self.jobs_t.delete().where(self.jobs_t.c.id == job_id)
# result = self.engine.execute(delete)
# if result.rowcount == 0:
# raise JobLookupError(job_id)
def remove_job(self, job_id):
#delete = self.wm_jobs_t.delete().where(self.wm_jobs_t.c.id == job_id)
update = self.wm_jobs_t.update().where(self.wm_jobs_t.c.id == job_id).values(status='2')
# TODO ... add history here, operation cause of job should be stopped as scheduled settings.
result = self.engine.execute(update)
if result.rowcount == 0:
raise JobLookupError(job_id)
# def remove_all_jobs(self):
# delete = self.jobs_t.delete()
# self.engine.execute(delete)
def remove_all_jobs(self):
delete = self.wm_jobs_t.delete()
self.engine.execute(delete)
def shutdown(self):
self.engine.dispose()
# def _reconstitute_job(self, job_state):
# job_state = pickle.loads(job_state)
# job_state['jobstore'] = self
# job = Job.__new__(Job)
# job.__setstate__(job_state)
# job._scheduler = self._scheduler
# job._jobstore_alias = self._alias
# return job
def _reconstitute_job(self, row):
'''
code gen by shell cmd: cat a | awk -F '=' '{print $1}' | cut -c5- | awk '{ print "job."$1" = row."$1}'
what in file a is the wm_jobs_t create statement which can be found in the current source code file
'''
conf = JobConf()
conf.id = row.id
conf.cmd = row.cmd
conf.cron_str = row.cron_str
conf.name = row.name
conf.desc = row.desc
conf.mails = row.mails
conf.phones = row.phones
conf.team = row.team
conf.owner = row.owner
conf.hosts = row.hosts
conf.host_strategy = row.host_strategy
conf.restore_strategy = row.restore_strategy
conf.retry_strategy = row.retry_strategy
conf.error_strategy = row.error_strategy
conf.exist_strategy = row.exist_strategy
conf.running_timeout_s = row.running_timeout_s
conf.status = row.status
conf.modify_time = row.modify_time
conf.modify_user = row.modify_user
conf.create_time = row.create_time
conf.create_user = row.create_user
conf.start_date = row.start_date
conf.end_date = row.end_date
conf.oupput_match_reg = row.oupput_match_reg
conf.next_run_time = row.next_run_time
job = Job.__new__(Job)
job.conf = conf
job.id = job.conf.id
job._scheduler = self._scheduler
job._jobstore_alias = self._alias
job.trigger = self._create_trigger_by_conf(job)
t = apscheduler.util.local_timestamp_to_datetime(conf.next_run_time) if conf.next_run_time > 0 else None
t = apscheduler.util.convert_to_ware_datetime(t, get_localzone(), 'conf.next_run_time' )
state = {
'version': 1,
'conf': conf,
'id': conf.id,
'name': conf.name,
'next_run_time': t,
}
job.__setstate__(state)
return job
def _create_trigger_by_conf(self, job):
'''
refer: http://crontab.org/
day of week 0-7 (0 or 7 is Sun, or use names)
'''
if not job.conf or not job.conf.cron_str:
raise ValueError("job.conf is None or job.conf.cron_str is None")
ary = job.conf.cron_str.split(' ')
second = None
minute = None
hour = None
day_of_the_month = None
month_of_the_year = None
day_of_the_week = None
year = None
if len(ary) == 5: # classics
minute, hour, day_of_the_month, month_of_the_year, day_of_the_week = ary[0], ary[1], ary[2], ary[3], ary[4]
else:
if len(ary) == 6: # with year
minute, hour, day_of_the_month, month_of_the_year, day_of_the_week, year = ary[0], ary[1], ary[2], ary[3], ary[4], ary[5]
else:
if len(ary) == 7: # with second extended
second, minute, hour, day_of_the_month, month_of_the_year, day_of_the_week, year = ary[0], ary[1], ary[2], ary[3], ary[4], ary[5], ary[6]
else:
raise ValueError("job %s has sth. wrong with format of cron_str %s" % (self.id, self.conf.cron_str))
trigger = CronTrigger(second=second, minute=minute, hour=hour, day=day_of_the_month, month=month_of_the_year, day_of_week=day_of_the_week, year=year
, start_date = apscheduler.util.convert_to_ware_datetime(apscheduler.util.local_timestamp_to_datetime(job.conf.start_date), self.timezone, 'start_date') if job.conf.start_date > 0 else None
, end_date = apscheduler.util.convert_to_ware_datetime(apscheduler.util.local_timestamp_to_datetime(job.conf.end_date), self.timezone, 'end_date') if job.conf.end_date > 0 else None
)
# , start_date = apscheduler.util.convert_to_ware_datetime(apscheduler.util.local_timestamp_to_datetime(job.conf.start_date), self.timezone, 'start_date') if job.conf.start_date > 0 else None
# , end_date = apscheduler.util.convert_to_ware_datetime(apscheduler.util.local_timestamp_to_datetime(job.conf.end_date), self.timezone, 'end_date') if job.conf.end_date > 0 else None
return trigger
# def _get_jobs(self, *conditions):
# jobs = []
# selectable = select([self.jobs_t.c.id, self.jobs_t.c.job_state]).order_by(self.jobs_t.c.next_run_time)
# selectable = selectable.where(*conditions) if conditions else selectable
# failed_job_ids = set()
# for row in self.engine.execute(selectable):
# try:
# jobs.append(self._reconstitute_job(row.job_state))
# except:
# logging.exception('Unable to restore job "%s" -- removing it', row.id)
# failed_job_ids.add(row.id)
#
# # Remove all the jobs we failed to restore
# if failed_job_ids:
# delete = self.jobs_t.delete().where(self.jobs_t.c.id.in_(failed_job_ids))
# self.engine.execute(delete)
#
# return jobs
def _get_jobs(self, *conditions):
jobs = []
selectable = select([x for x in self.wm_jobs_t.c])
# selectable = selectable.order_by(self.wm_jobs_t.c.next_run_time)
selectable = selectable.where(*conditions).where(self.wm_jobs_t.c.status == 1) if conditions else selectable
failed_job_ids = set()
for row in self.engine.execute(selectable):
try:
jobs.append(self._reconstitute_job(row))
except:
logging.exception('Unable to restore job "%s" -- removing it', row.id)
failed_job_ids.add(row.id)
# Remove all the jobs we failed to restore
if failed_job_ids:
# delete = self.jobs_t.delete().where(self.jobs_t.c.id.in_(failed_job_ids))
# logic delete
msg = 'job %s update status to 2 cause of failing to _reconstitute_job' % ','.join(list(failed_job_ids))
logging.error(msg)
update = self.wm_jobs_t.update().where(self.wm_jobs_t.c.id.in_(failed_job_ids)).values(status='2')
self.engine.execute(update)
# TODO ... add history here
from apscheduler.history import add_log
conf = JobConf()
conf.id = 0
conf.cmd = ' '
add_log(conf, output=msg)
return jobs
def __repr__(self):
return '<%s (url=%s)>' % (self.__class__.__name__, self.engine.url)
| |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
import copy
from .common import infer_shape
from .common import DistributedOperatorImplContainer
from .common import DistributedOperatorImpl
from .common import register_distributed_operator_impl_container
from .common import register_distributed_operator_impl
from .common import set_comm_op_dist_attr_for_program, naive_copy_op_dist_attr_for_program, is_parameter_related
from ..utils import is_dim_shard
from ..utils import is_dim_replicate
from ..utils import is_valid_list_index
from ..utils import compute_compatible_dim_mapping
from ..utils import compute_compatible_dims_mapping
from ..utils import compute_compatible_and_update_dim_mapping
from ..utils import set_dist_op_desc_original_id
from ..dist_attribute import OperatorDistributedAttribute
from paddle.fluid import core, unique_name
from paddle.fluid.framework import in_dygraph_mode
from paddle.fluid.framework import Program, Parameter, Variable, program_guard
from paddle.fluid.data_feeder import check_variable_and_dtype, check_dtype
from paddle.distributed.fleet.meta_optimizers.common import OpRole, OP_ROLE_KEY, OP_ROLE_VAR_KEY
from ..process_group import new_process_group
from ..utils import _get_comm_group, _get_corresponding_rank
from .dist_default import DistributedDefaultImpl0
def copy_op_with_new_input_output(ctx, block, src_op, **kwargs):
dist_op_desc = block.desc.append_op()
dist_op_desc.copy_from(src_op.desc)
set_dist_op_desc_original_id(dist_op_desc, src_op.desc, ctx)
for input_name in src_op.desc.input_names():
assert input_name in kwargs
dist_op_desc.set_input(input_name, kwargs[input_name])
for output_name in src_op.desc.output_names():
assert input_name in kwargs
dist_op_desc.set_output(output_name, kwargs[output_name])
block._sync_with_cpp()
return dist_op_desc
def _update_dims_mapping_for_matmul(dist_op):
changed = False
op_desc = dist_op.serial_op.desc
op_dist_attr = dist_op.dist_attr
x_name = op_desc.input('X')[0]
y_name = op_desc.input('Y')[0]
out_name = op_desc.output('Out')[0]
x_dims_mapping = op_dist_attr.get_input_dims_mapping(x_name)
y_dims_mapping = op_dist_attr.get_input_dims_mapping(y_name)
out_dims_mapping = op_dist_attr.get_output_dims_mapping(out_name)
x_dims_mapping_len = len(x_dims_mapping)
y_dims_mapping_len = len(y_dims_mapping)
out_dims_mapping_len = len(out_dims_mapping)
# Add dim mapping to Make sure the length dims_mapping be at least 2
if x_dims_mapping_len == 1:
x_dims_mapping.insert(0, -1)
if y_dims_mapping_len == 1:
y_dims_mapping.insert(1, -1)
# Deal with dim > 2 and take care of broadcasting
if out_dims_mapping_len > 2:
broadcast_x_dims_mapping = []
broadcast_y_dims_mapping = []
broadcast_out_dims_mapping = []
for i in range(out_dims_mapping_len - x_dims_mapping_len):
broadcast_x_dims_mapping.append(out_dims_mapping[i])
for i in range(x_dims_mapping_len - 2):
broadcast_x_dims_mapping.append(x_dims_mapping[i])
for i in range(out_dims_mapping_len - y_dims_mapping_len):
broadcast_y_dims_mapping.append(out_dims_mapping[i])
for i in range(y_dims_mapping_len - 2):
broadcast_y_dims_mapping.append(y_dims_mapping[i])
for i in range(out_dims_mapping_len - 2):
broadcast_out_dims_mapping.append(out_dims_mapping[i])
compatible_dims_mapping = compute_compatible_dims_mapping([
broadcast_x_dims_mapping, broadcast_y_dims_mapping,
broadcast_out_dims_mapping
])
assert compatible_dims_mapping is not None, "There is no compatible dim mapping."
for i in range(x_dims_mapping_len - 2):
new_idx = i + (out_dims_mapping_len - x_dims_mapping_len)
if x_dims_mapping[i] != compatible_dims_mapping[new_idx]:
x_dims_mapping[i] = compatible_dims_mapping[new_idx]
changed = True
for i in range(y_dims_mapping_len - 2):
new_idx = i + (out_dims_mapping_len - y_dims_mapping_len)
if y_dims_mapping[i] != compatible_dims_mapping[new_idx]:
y_dims_mapping[i] = compatible_dims_mapping[new_idx]
changed = True
for i in range(out_dims_mapping_len - 2):
if out_dims_mapping[i] != compatible_dims_mapping[i]:
out_dims_mapping[i] = compatible_dims_mapping[i]
changed = True
# The following which uses negative index can be work
# when len(out_dims_mapping) > 2 and len(out_dims_mapping) <=2
dim_changed = compute_compatible_and_update_dim_mapping(
[x_dims_mapping, y_dims_mapping], [-1, -2])
if dim_changed:
changed = True
dim_changed = compute_compatible_and_update_dim_mapping(
[x_dims_mapping, out_dims_mapping], [-2, -2])
if dim_changed:
changed = True
dim_changed = compute_compatible_and_update_dim_mapping(
[y_dims_mapping, out_dims_mapping], [-1, -1])
if dim_changed:
changed = True
# Remove unnecessary dim mapping to make sure the length of dims_mapping is same as its tensor
if x_dims_mapping_len == 1:
x_dims_mapping.pop(0)
if y_dims_mapping_len == 1:
y_dims_mapping.pop(1)
assert len(x_dims_mapping) == x_dims_mapping_len
assert len(y_dims_mapping) == y_dims_mapping_len
assert len(out_dims_mapping) == out_dims_mapping_len
return changed
def _is_auto_compatible_for_matmul(dist_op):
op_desc = dist_op.serial_op.desc
op_dist_attr = dist_op.dist_attr
x_name = op_desc.input('X')[0]
y_name = op_desc.input('Y')[0]
out_name = op_desc.output('Out')[0]
# Deep copy these dims_mappings for keeping them unchanged.
x_dims_mapping = copy.deepcopy(op_dist_attr.get_input_dims_mapping(x_name))
y_dims_mapping = copy.deepcopy(op_dist_attr.get_input_dims_mapping(y_name))
out_dims_mapping = copy.deepcopy(
op_dist_attr.get_output_dims_mapping(out_name))
x_dims_mapping_len = len(x_dims_mapping)
y_dims_mapping_len = len(y_dims_mapping)
out_dims_mapping_len = len(out_dims_mapping)
# Add dim mapping to Make sure the length dims_mapping be at least 2
if x_dims_mapping_len == 1:
x_dims_mapping.insert(0, -1)
if y_dims_mapping_len == 1:
y_dims_mapping.insert(1, -1)
# NOTE: Partition is not supported if matmul op has trans.
if op_desc.type() == "matmul_v2":
if op_desc.attr('trans_x') or op_desc.attr('trans_y'):
if x_dims_mapping[-2:] != [-1, -1] or y_dims_mapping[
-2:] != [-1, -1]:
return False
elif op_desc.type() == "matmul":
if op_desc.attr('transpose_X') or op_desc.attr('transpose_Y'):
if x_dims_mapping[-2:] != [-1, -1] or y_dims_mapping[
-2:] != [-1, -1]:
return False
# Deal with dim > 2 and take care of broadcasting
if out_dims_mapping_len > 2:
broadcast_x_dims_mapping = []
broadcast_y_dims_mapping = []
broadcast_out_dims_mapping = []
for i in range(out_dims_mapping_len - x_dims_mapping_len):
broadcast_x_dims_mapping.append(out_dims_mapping[i])
for i in range(x_dims_mapping_len - 2):
broadcast_x_dims_mapping.append(x_dims_mapping[i])
for i in range(out_dims_mapping_len - y_dims_mapping_len):
broadcast_y_dims_mapping.append(out_dims_mapping[i])
for i in range(y_dims_mapping_len - 2):
broadcast_y_dims_mapping.append(y_dims_mapping[i])
for i in range(out_dims_mapping_len - 2):
broadcast_out_dims_mapping.append(out_dims_mapping[i])
is_same = ((broadcast_x_dims_mapping == broadcast_y_dims_mapping) and
(broadcast_x_dims_mapping == broadcast_out_dims_mapping))
if not is_same:
return False
# The following which uses negative index can be work
# when len(out_dims_mapping) > 2 and len(out_dims_mapping) <=2
is_same = (x_dims_mapping[-1] == y_dims_mapping[-2])
if not is_same:
return False
is_same = (x_dims_mapping[-2] == out_dims_mapping[-2])
if not is_same:
return False
is_same = (y_dims_mapping[-1] == out_dims_mapping[-1])
if not is_same:
return False
return True
def _right_operand_parameter_matmul_backward(ctx, *args, **kwargs):
# by now the backward function only insert the gradient allreduce for dist op itself
dist_op_context = ctx.dist_op_context
main_block = dist_op_context.work_block
backward_op = dist_op_context.cur_src_op
rank_id = dist_op_context.rank_id
dist_attr = ctx.get_op_dist_attr_for_program(backward_op)
assert dist_attr is not None, "backward op [{}] don't have dist attribute !".format(
str(backward_op))
# FIXME (JZ-LIANG) Remove this hack to support any op mesh group for Pipeline Parallelism
if rank_id not in dist_attr.process_mesh.processes:
rank_id = _get_corresponding_rank(ctx, dist_attr.process_mesh, rank_id)
assert 'Y' in kwargs, "input [{}] is not given".format('Y')
assert 'X' in kwargs, "input [{}] is not given".format('X')
assert 'Out@GRAD' in kwargs, "input [{}] is not given".format('Out@GRAD')
assert 'Y@GRAD' in kwargs, "output [{}] is not given".format('Y@GRAD')
assert 'X@GRAD' in kwargs, "output [{}] is not given".format('X@GRAD')
assert len(
kwargs['Y']
) == 1, "row_parallel_embedding input Ids take 1 variable but got {}".format(
kwargs['Y'])
assert len(
kwargs['X']
) == 1, "row_parallel_embedding input Ids take 1 variable but got {}".format(
kwargs['X'])
assert len(
kwargs['Out@GRAD']
) == 1, "row_parallel_embedding input Ids take 1 variable but got {}".format(
kwargs['Out'])
assert len(
kwargs['Y@GRAD']
) == 1, "row_parallel_embedding output Ids take 1 variable but got {}".format(
kwargs['Y@GRAD'])
X_var = main_block.var(kwargs['X'][0])
Y_var = main_block._var_recursive(kwargs['Y'][0])
Out_grad = main_block.var(kwargs['Out@GRAD'][0])
Y_grad = main_block.var(kwargs['Y@GRAD'][0])
assert not is_parameter_related(
X_var.name, main_block
), "left operand(X) [{}] of dist matmul should not be parameter".format(
X_var.name)
Y_var_dim_mapping = dist_attr.get_input_dims_mapping(Y_var.name)
process_mesh_shape = dist_attr.process_mesh.topology
process_mesh_group = dist_attr.process_mesh.processes
# assert len(
# Y_var_dim_mapping
# ) == 2, "dist matmual only support Y operand with 2 dims now but Y({})'s dim is [{}]".format(
# Y_var.name, Y_var_dim_mapping)
Y_var_partitioned = False
for dim in Y_var_dim_mapping:
if dim >= 0 and process_mesh_shape[dim] > 0:
Y_var_partitioned = True
break
if is_parameter_related(Y_var.name, main_block) and Y_var_partitioned:
if Y_var_dim_mapping[0] >= 0:
# row parallel: c_identity + matmul
assert Y_var_dim_mapping[1] < 0
parallel_axis = Y_var_dim_mapping[0]
check_variable_and_dtype(
Out_grad, 'tensor',
['float16', 'float32', 'float64', 'int32', 'int64'],
'_c_identity')
intermediate_var_0 = main_block.create_var(
name=unique_name.generate_with_ignorable_key(".".join(
["c_identity", 'tmp'])) + "@GRAD",
dtype=Out_grad.dtype,
shape=Out_grad.shape,
type=core.VarDesc.VarType.LOD_TENSOR,
persistable=False,
stop_gradient=Out_grad.stop_gradient)
# copy X_var's dist_attr to intermediate_var_0's dist_attr
out_grad_dist_attr = dist_attr.get_input_dist_attr(Out_grad.name)
assert out_grad_dist_attr is not None
ctx.set_tensor_dist_attr_for_program(intermediate_var_0,
out_grad_dist_attr)
group_ranks = _get_comm_group(
process_mesh_group, process_mesh_shape, parallel_axis, rank_id)
group = new_process_group(group_ranks)
c_identity_op = main_block.append_op(
type='c_identity',
inputs={'X': [Out_grad]},
outputs={'Out': intermediate_var_0},
attrs={
'ring_id': group.id,
'use_calc_stream': True,
'use_model_parallel': True,
OP_ROLE_KEY: OpRole.Backward,
})
check_variable_and_dtype(intermediate_var_0, 'x',
['float16', 'float32', 'float64'],
'linear')
check_dtype(intermediate_var_0.dtype, 'dtype',
['float16', 'float32', 'float64'], 'linear')
set_comm_op_dist_attr_for_program(
c_identity_op, dist_attr.process_mesh, out_grad_dist_attr, ctx)
new_kwargs = copy.deepcopy(kwargs)
new_kwargs['Out@GRAD'] = [intermediate_var_0.name]
matmul_op_desc = copy_op_with_new_input_output(
ctx, main_block, backward_op, **new_kwargs)
else:
# col parallel: matmul + allreduce
assert Y_var_dim_mapping[0] < 0
parallel_axis = Y_var_dim_mapping[1]
new_kwargs = copy.deepcopy(kwargs)
# NOTE (JZ-LIANG) should allow left operand be empty for matmul grad
has_x_grad = len(kwargs['X@GRAD']) > 0
if has_x_grad:
assert len(kwargs['X@GRAD']) == 1
X_grad = main_block.var(kwargs['X@GRAD'][0])
intermediate_var_0 = main_block.create_var(
name=unique_name.generate_with_ignorable_key(".".join(
["c_identity", 'tmp'])) + "@GRAD",
dtype=X_grad.dtype,
shape=X_grad.shape,
type=core.VarDesc.VarType.LOD_TENSOR,
persistable=False,
stop_gradient=X_grad.stop_gradient)
X_grad_dist_attr = dist_attr.get_output_dist_attr(X_grad.name)
assert X_grad_dist_attr is not None
ctx.set_tensor_dist_attr_for_program(intermediate_var_0,
X_grad_dist_attr)
new_kwargs['X@GRAD'] = [intermediate_var_0.name]
matmul_op_desc = copy_op_with_new_input_output(
ctx, main_block, backward_op, **new_kwargs)
# NOTE (JZ-LIANG) trick to skip one allreduce if left operand has not grad
if has_x_grad:
group_ranks = _get_comm_group(process_mesh_group,
process_mesh_shape, parallel_axis,
rank_id)
group = new_process_group(group_ranks)
c_allreduce_sum_op = main_block.append_op(
type='c_allreduce_sum',
inputs={'X': [intermediate_var_0.name]},
outputs={'Out': kwargs['X@GRAD']},
attrs={
'ring_id': group.id,
'use_calc_stream': True,
'use_model_parallel': True,
OP_ROLE_KEY: OpRole.Backward
})
set_comm_op_dist_attr_for_program(c_allreduce_sum_op,
dist_attr.process_mesh,
X_grad_dist_attr, ctx)
else:
# replicate
matmul_op_desc = copy_op_with_new_input_output(ctx, main_block,
backward_op, **kwargs)
main_block._sync_with_cpp()
# check if need gradient allreduce
need_gradient_allreduce = False
process_mesh = dist_attr.process_mesh
var_dim_mapping = dist_attr.get_input_dims_mapping(X_var.name)
mesh_shape = process_mesh.topology
batch_size_axis = var_dim_mapping[0]
if batch_size_axis > -1 and mesh_shape[batch_size_axis] > 1:
need_gradient_allreduce = True
group_ranks = _get_comm_group(process_mesh.processes,
process_mesh.topology, batch_size_axis,
rank_id)
dp_degree = len(group_ranks)
dp_group = new_process_group(group_ranks)
if need_gradient_allreduce and is_parameter_related(Y_var.name, main_block):
Y_Grad_var = main_block.var(kwargs['Y@GRAD'][0])
allreduce_op = main_block.append_op(
type='c_allreduce_sum',
inputs={'X': [Y_Grad_var]},
outputs={'Out': [Y_Grad_var]},
attrs={
'ring_id': dp_group.id,
'use_calc_stream': True,
OP_ROLE_KEY: OpRole.Backward
})
scale_op = main_block.append_op(
type='scale',
inputs={'X': Y_Grad_var},
outputs={'Out': Y_Grad_var},
attrs={'scale': 1.0 / dp_degree,
OP_ROLE_KEY: OpRole.Backward})
main_block._sync_with_cpp()
dims_mapping = ctx.get_tensor_dist_attr_for_program(
Y_Grad_var).dims_mapping
process_mesh = dist_attr.process_mesh
for op in [allreduce_op, scale_op]:
op_attr = OperatorDistributedAttribute()
op_attr.process_mesh = process_mesh
op_attr.set_output_dims_mapping(Y_Grad_var.name, dims_mapping)
op_attr.set_input_dims_mapping(Y_Grad_var.name, dims_mapping)
ctx.set_op_dist_attr_for_program(op, op_attr)
def _init_param_sync(Weight_var, dist_op_context, startup_block, ctx, rank_id):
if Weight_var.name in dist_op_context.already_init_sync_vars:
return
assert startup_block.has_var(Weight_var.name)
dist_op_context.already_init_sync_vars.add(Weight_var.name)
param = startup_block.var(Weight_var.name)
param_dist_attr = ctx.get_tensor_dist_attr_for_program(param)
process_mesh = param_dist_attr.process_mesh
dim_mapping = param_dist_attr.dims_mapping
for axis, size in enumerate(process_mesh.topology):
if size <= 1 or axis in dim_mapping:
pass
else:
group_ranks = _get_comm_group(process_mesh.processes,
process_mesh.topology, axis, rank_id)
sync_group = new_process_group(group_ranks)
startup_block.append_op(
type='c_broadcast',
inputs={'X': param},
outputs={'Out': param},
attrs={
'ring_id': sync_group.id,
'root': 0,
'use_calc_stream': True,
OP_ROLE_KEY: OpRole.Forward
})
startup_block._sync_with_cpp()
class DistributedMatmul(DistributedOperatorImplContainer):
def __init__(self, op_type):
super(DistributedMatmul, self).__init__(op_type)
register_distributed_operator_impl_container(DistributedMatmul("matmul"))
# ColumnParallel
class DistributedMatmulImpl0(DistributedOperatorImpl):
def __init__(self, name):
super(DistributedMatmulImpl0, self).__init__(name)
self._forward_implemented = True
self._backward_implemented = True
def is_input_compatible(self, dist_op):
op_desc = dist_op.serial_op.desc
op_dist_attr = dist_op.dist_attr
x_name = op_desc.input('X')[0]
y_name = op_desc.input('Y')[0]
x_dims_mapping = op_dist_attr.get_input_dims_mapping(x_name)
y_dims_mapping = op_dist_attr.get_input_dims_mapping(y_name)
if is_dim_shard(x_dims_mapping[-1]):
return False
if is_dim_shard(y_dims_mapping[-2]) or is_dim_replicate(y_dims_mapping[
-1]):
return False
for mapping in x_dims_mapping[1:-1]:
if is_dim_shard(mapping):
return False
return True
def is_output_compatible(self, dist_op):
op_desc = dist_op.serial_op.desc
op_dist_attr = dist_op.dist_attr
out_name = op_desc.output('Out')[0]
out_dims_mapping = op_dist_attr.get_output_dims_mapping(out_name)
if is_dim_replicate(out_dims_mapping[-1]):
return False
for mapping in out_dims_mapping[1:-1]:
if is_dim_shard(mapping):
return False
return True
def is_auto_compatible(self, dist_op):
if (not self.is_input_compatible(dist_op)) or \
(not self.is_output_compatible(dist_op)):
return False
if not _is_auto_compatible_for_matmul(dist_op):
return False
return True
def update_dims_mapping(self, dist_op):
changed = False
dim_changed = _update_dims_mapping_for_matmul(dist_op)
if dim_changed:
changed = True
return changed
@staticmethod
def forward(ctx, *args, **kwargs):
"""
kwargs: inputname_mapping & outputname_mapping
"""
dist_op_context = ctx.dist_op_context
main_block = dist_op_context.work_block
startup_block = dist_op_context.startup_block
src_op = dist_op_context.cur_src_op
rank_id = dist_op_context.rank_id
op_dist_attr = ctx.get_op_dist_attr_for_program(src_op)
assert op_dist_attr is not None, "backward op [{}] don't have dist attribute !".format(
str(src_op))
# FIXME (JZ-LIANG) Remove this hack to support any op mesh group for Pipeline Parallelism
if rank_id not in op_dist_attr.process_mesh.processes:
rank_id = _get_corresponding_rank(ctx, op_dist_attr.process_mesh,
rank_id)
# check validation of inputs / outputs
for input_name in src_op.desc.input_names():
assert input_name in kwargs, "input [{}] is not given".format(
input_name)
assert len(kwargs[input_name]) == len(
src_op.desc.input(input_name)
), "number of tensor for input [{}] is not match".format(input_name)
for output_name in src_op.desc.output_names():
assert output_name in kwargs, "input [{}] is not given".format(
output_name)
assert len(kwargs[output_name]) == len(
src_op.desc.output(output_name)
), "number of tensor for input [{}] is not match".format(
output_name)
X_var = main_block.var(kwargs['X'][0])
Weight_var = main_block.var(kwargs['Y'][0])
Out_var = main_block.var(kwargs['Out'][0])
# TODO infer logic comm presentation
matmul_col_dim_mapping = op_dist_attr.get_input_dims_mapping(
Weight_var.name)[-1]
assert matmul_col_dim_mapping >= 0, "col_parallel_matmul's row should be divided by a specific mesh axis, but got [{}]".format(
matmul_col_dim_mapping)
process_mesh_shape = op_dist_attr.process_mesh.topology
process_mesh_group = op_dist_attr.process_mesh.processes
parallel_axis = matmul_col_dim_mapping
group_ranks = _get_comm_group(process_mesh_group, process_mesh_shape,
parallel_axis, rank_id)
group = new_process_group(group_ranks)
# infer new var shape with op dist attr
x_tensor_dist_attr = ctx.get_tensor_dist_attr_for_program(X_var)
assert x_tensor_dist_attr is not None
identity_var_dist_attr = op_dist_attr.get_input_dist_attr(X_var.name)
assert identity_var_dist_attr is not None
ref_shape_x = infer_shape(main_block, X_var, x_tensor_dist_attr,
identity_var_dist_attr)
# infer out var shape with op dist attr
out_tensor_dist_attr = ctx.get_tensor_dist_attr_for_program(Out_var)
assert out_tensor_dist_attr is not None
out_var_dist_attr = op_dist_attr.get_output_dist_attr(Out_var.name)
assert out_var_dist_attr is not None
ref_shape_out = infer_shape(main_block, Out_var, out_tensor_dist_attr,
out_var_dist_attr)
intermediate_var_0 = main_block.create_var(
name=unique_name.generate_with_ignorable_key(".".join(
["c_identity", 'tmp'])),
dtype=X_var.dtype,
shape=X_var.shape,
type=core.VarDesc.VarType.LOD_TENSOR,
persistable=False,
stop_gradient=X_var.stop_gradient)
# set intermediate_var_0's dist_attr with X_var's dist_attr
ctx.set_tensor_dist_attr_for_program(intermediate_var_0,
identity_var_dist_attr)
check_variable_and_dtype(
X_var, 'tensor',
['float16', 'float32', 'float64', 'int32', 'int64'], '_c_identity')
c_identity_op = main_block.append_op(
type='c_identity',
inputs={'X': [X_var]},
outputs={'Out': intermediate_var_0},
attrs={
'ring_id': group.id,
'use_calc_stream': True,
'use_model_parallel': True,
})
if intermediate_var_0.shape != ref_shape_x:
intermediate_var_0.desc.set_shape(ref_shape_x)
check_variable_and_dtype(intermediate_var_0, 'x',
['float16', 'float32', 'float64'], 'linear')
check_dtype(intermediate_var_0.dtype, 'dtype',
['float16', 'float32', 'float64'], 'linear')
attrs = {
'transpose_X': False,
'transpose_Y': False,
'alpha': 1,
}
inputs = {'X': [intermediate_var_0], 'Y': [Weight_var]}
matmul_op = main_block.append_op(
type='matmul', inputs=inputs, outputs={'Out': Out_var}, attrs=attrs)
if Out_var.shape != ref_shape_out:
Out_var.desc.set_shape(ref_shape_out)
# set dist op's dist_attr with serial op's dist_attr
# c_identity
identity_op_dist_attr = OperatorDistributedAttribute()
identity_op_dist_attr.process_mesh = op_dist_attr.process_mesh
identity_op_dist_attr.impl_type = op_dist_attr.impl_type
identity_op_dist_attr.impl_idx = op_dist_attr.impl_idx
# input
input_varname = c_identity_op.desc.input_arg_names()[0]
input_dist_attr = op_dist_attr.get_input_dist_attr(input_varname)
assert input_dist_attr is not None, "dist_attr is {}".format(
op_dist_attr)
identity_op_dist_attr.set_input_dist_attr(input_varname,
input_dist_attr)
# output
output_varname = c_identity_op.desc.output_arg_names()[0]
identity_op_dist_attr.set_output_dist_attr(output_varname,
input_dist_attr)
# set op dist attr
ctx.set_op_dist_attr_for_program(c_identity_op, identity_op_dist_attr)
# matmul
matmul_op_dist_attr = OperatorDistributedAttribute()
matmul_op_dist_attr.process_mesh = op_dist_attr.process_mesh
matmul_op_dist_attr.impl_type = op_dist_attr.impl_type
matmul_op_dist_attr.impl_idx = op_dist_attr.impl_idx
# input
for input_varname in matmul_op.desc.input_arg_names():
if input_varname in src_op.desc.input_arg_names():
input_dist_attr = op_dist_attr.get_input_dist_attr(
input_varname)
assert input_dist_attr is not None, "dist_attr is {}".format(
op_dist_attr)
matmul_op_dist_attr.set_input_dist_attr(input_varname,
input_dist_attr)
else:
input_var = main_block.var(input_varname)
tensor_dist_attr = ctx.get_tensor_dist_attr_for_program(
input_var)
matmul_op_dist_attr.set_input_dist_attr(input_varname,
tensor_dist_attr)
# output
output_varname = matmul_op.desc.output_arg_names()[0]
output_dist_attr = op_dist_attr.get_output_dist_attr(output_varname)
assert output_dist_attr is not None, "dist_attr is {}".format(
op_dist_attr)
matmul_op_dist_attr.set_output_dist_attr(output_varname,
output_dist_attr)
# set op dist attr
ctx.set_op_dist_attr_for_program(matmul_op, matmul_op_dist_attr)
# init param sync
if Weight_var.is_parameter and not op_dist_attr.is_recompute:
_init_param_sync(Weight_var, dist_op_context, startup_block, ctx,
rank_id)
@staticmethod
def backward(ctx, *args, **kwargs):
_right_operand_parameter_matmul_backward(ctx, *args, **kwargs)
# RowParallel
class DistributedMatmulImpl1(DistributedOperatorImpl):
def __init__(self, name):
super(DistributedMatmulImpl1, self).__init__(name)
self._forward_implemented = True
self._backward_implemented = True
def is_input_compatible(self, dist_op):
op_desc = dist_op.serial_op.desc
op_dist_attr = dist_op.dist_attr
x_name = op_desc.input('X')[0]
y_name = op_desc.input('Y')[0]
x_dims_mapping = op_dist_attr.get_input_dims_mapping(x_name)
y_dims_mapping = op_dist_attr.get_input_dims_mapping(y_name)
if is_dim_replicate(x_dims_mapping[-1]):
return False
if is_dim_replicate(y_dims_mapping[-2]) or is_dim_shard(y_dims_mapping[
-1]):
return False
# Other dimensions must be replicate except the batch dimension
for mapping in x_dims_mapping[1:-1]:
if is_dim_shard(mapping):
return False
return True
def is_output_compatible(self, dist_op):
op_desc = dist_op.serial_op.desc
op_dist_attr = dist_op.dist_attr
out_name = op_desc.output('Out')[0]
out_dims_mapping = op_dist_attr.get_output_dims_mapping(out_name)
if is_dim_shard(out_dims_mapping[-1]):
return False
# Other dimensions must be replicate except the batch dimension
for mapping in out_dims_mapping[1:-1]:
if is_dim_shard(mapping):
return False
return True
def is_auto_compatible(self, dist_op):
if (not self.is_input_compatible(dist_op)) or \
(not self.is_output_compatible(dist_op)):
return False
if not _is_auto_compatible_for_matmul(dist_op):
return False
return True
def update_dims_mapping(self, dist_op):
changed = False
dim_changed = _update_dims_mapping_for_matmul(dist_op)
if dim_changed:
changed = True
return changed
@staticmethod
def forward(ctx, *args, **kwargs):
"""
kwargs: inputname_mapping & outputname_mapping
"""
dist_op_context = ctx.dist_op_context
main_block = dist_op_context.work_block
startup_block = dist_op_context.startup_block
src_op = dist_op_context.cur_src_op
rank_id = dist_op_context.rank_id
op_dist_attr = ctx.get_op_dist_attr_for_program(src_op)
assert op_dist_attr is not None, "backward op [{}] don't have dist attribute !".format(
str(src_op))
# FIXME (JZ-LIANG) Remove this hack to support any op mesh group for Pipeline Parallelism
if rank_id not in op_dist_attr.process_mesh.processes:
rank_id = _get_corresponding_rank(ctx, op_dist_attr.process_mesh,
rank_id)
# check validation of inputs / outputs
for input_name in src_op.desc.input_names():
assert input_name in kwargs, "input [{}] is not given".format(
input_name)
assert len(kwargs[input_name]) == len(
src_op.desc.input(input_name)
), "number of tensor for input [{}] is not match".format(input_name)
for output_name in src_op.desc.output_names():
assert output_name in kwargs, "input [{}] is not given".format(
output_name)
assert len(kwargs[output_name]) == len(
src_op.desc.output(output_name)
), "number of tensor for input [{}] is not match".format(
output_name)
X_var = main_block.var(kwargs['X'][0])
Weight_var = main_block.var(kwargs['Y'][0])
Out_var = main_block.var(kwargs['Out'][0])
# TODO infer logic comm presentation
matmul_row_dim_mapping = op_dist_attr.get_input_dims_mapping(
Weight_var.name)[-2]
assert matmul_row_dim_mapping >= 0, "row_parallel_matmul's row should be divided by a specific mesh axis, but got [{}]".format(
matmul_row_dim_mapping)
process_mesh_shape = op_dist_attr.process_mesh.topology
process_mesh_group = op_dist_attr.process_mesh.processes
parallel_axis = matmul_row_dim_mapping
group_ranks = _get_comm_group(process_mesh_group, process_mesh_shape,
parallel_axis, rank_id)
group = new_process_group(group_ranks)
check_variable_and_dtype(X_var, 'x', ['float16', 'float32', 'float64'],
'linear')
check_dtype(X_var.dtype, 'dtype', ['float16', 'float32', 'float64'],
'linear')
attrs = {
'transpose_X': False,
'transpose_Y': False,
'alpha': 1,
}
inputs = {'X': X_var, 'Y': Weight_var}
# infer out var shape with op dist attr
out_tensor_dist_attr = ctx.get_tensor_dist_attr_for_program(Out_var)
assert out_tensor_dist_attr is not None
out_var_dist_attr = op_dist_attr.get_output_dist_attr(Out_var.name)
assert out_var_dist_attr is not None
ref_shape = infer_shape(main_block, Out_var, out_tensor_dist_attr,
out_var_dist_attr)
intermediate_var_0 = main_block.create_var(
name=unique_name.generate_with_ignorable_key(".".join(
["c_allreduce_sum", 'tmp'])),
shape=Out_var.shape,
dtype=Out_var.dtype,
type=Out_var.type,
lod_level=Out_var.lod_level,
persistable=False,
is_data=False,
need_check_feed=Out_var.desc.need_check_feed())
# set intermediate_var_0's dist_attr with Out_var's dist_attr
ctx.set_tensor_dist_attr_for_program(intermediate_var_0,
out_var_dist_attr)
matmul_op = main_block.append_op(
type='matmul',
inputs=inputs,
outputs={'Out': intermediate_var_0},
attrs=attrs)
if intermediate_var_0.shape != ref_shape:
intermediate_var_0.desc.set_shape(ref_shape)
c_allreduce_sum_op = main_block.append_op(
type='c_allreduce_sum',
inputs={'X': intermediate_var_0},
outputs={'Out': Out_var},
attrs={
'ring_id': group.id,
'use_calc_stream': True,
'use_model_parallel': True
})
if Out_var.shape != ref_shape:
Out_var.desc.set_shape(ref_shape)
# set dist op's dist_attr with serial op's dist_attr
# matmul
matmul_op_dist_attr = OperatorDistributedAttribute()
matmul_op_dist_attr.process_mesh = op_dist_attr.process_mesh
matmul_op_dist_attr.impl_type = op_dist_attr.impl_type
matmul_op_dist_attr.impl_idx = op_dist_attr.impl_idx
for input_varname in matmul_op.desc.input_arg_names():
input_dist_attr = op_dist_attr.get_input_dist_attr(input_varname)
assert input_dist_attr is not None, "dist_attr is {}".format(
op_dist_attr)
matmul_op_dist_attr.set_input_dist_attr(input_varname,
input_dist_attr)
output_varname = matmul_op.desc.output_arg_names()[0]
output_dist_attr = op_dist_attr.get_output_dist_attr(Out_var.name)
assert output_dist_attr is not None, "dist_attr is {}".format(
op_dist_attr)
matmul_op_dist_attr.set_output_dist_attr(output_varname,
output_dist_attr)
ctx.set_op_dist_attr_for_program(matmul_op, matmul_op_dist_attr)
# allreduce
allreduce_op_dist_attr = OperatorDistributedAttribute()
allreduce_op_dist_attr.process_mesh = op_dist_attr.process_mesh
allreduce_op_dist_attr.impl_type = op_dist_attr.impl_type
allreduce_op_dist_attr.impl_idx = op_dist_attr.impl_idx
for input_varname in c_allreduce_sum_op.desc.input_arg_names():
input_var = main_block.var(input_varname)
tensor_dist_attr = ctx.get_tensor_dist_attr_for_program(input_var)
assert tensor_dist_attr is not None
allreduce_op_dist_attr.set_input_dist_attr(input_varname,
tensor_dist_attr)
for output_varname in c_allreduce_sum_op.desc.output_arg_names():
output_dist_attr = op_dist_attr.get_output_dist_attr(output_varname)
assert output_dist_attr is not None, "dist_attr is {}".format(
op_dist_attr)
allreduce_op_dist_attr.set_output_dist_attr(output_varname,
output_dist_attr)
ctx.set_op_dist_attr_for_program(c_allreduce_sum_op,
allreduce_op_dist_attr)
# init param sync
if Weight_var.is_parameter and not op_dist_attr.is_recompute:
_init_param_sync(Weight_var, dist_op_context, startup_block, ctx,
rank_id)
@staticmethod
def backward(ctx, *args, **kwargs):
_right_operand_parameter_matmul_backward(ctx, *args, **kwargs)
# ReplicateParallel
class DistributedMatmulImpl2(DistributedOperatorImpl):
def __init__(self, name):
super(DistributedMatmulImpl2, self).__init__(name)
def is_input_compatible(self, dist_op):
op_desc = dist_op.serial_op.desc
op_dist_attr = dist_op.dist_attr
x_name = op_desc.input('X')[0]
y_name = op_desc.input('Y')[0]
x_dims_mapping = op_dist_attr.get_input_dims_mapping(x_name)
y_dims_mapping = op_dist_attr.get_input_dims_mapping(y_name)
if is_dim_shard(x_dims_mapping[-1]):
return False
if is_valid_list_index(x_dims_mapping,
-2) and is_dim_shard(x_dims_mapping[-2]):
return False
if is_dim_shard(y_dims_mapping[-1]):
return False
if is_valid_list_index(y_dims_mapping,
-2) and is_dim_shard(y_dims_mapping[-2]):
return False
return True
def is_output_compatible(self, dist_op):
op_desc = dist_op.serial_op.desc
op_dist_attr = dist_op.dist_attr
out_name = op_desc.output('Out')[0]
out_dims_mapping = op_dist_attr.get_output_dims_mapping(out_name)
if is_dim_shard(out_dims_mapping[-1]):
return False
if is_valid_list_index(out_dims_mapping,
-2) and is_dim_shard(out_dims_mapping[-2]):
return False
return True
def is_auto_compatible(self, dist_op):
if (not self.is_input_compatible(dist_op)) or \
(not self.is_output_compatible(dist_op)):
return False
if not _is_auto_compatible_for_matmul(dist_op):
return False
return True
def update_dims_mapping(self, dist_op):
changed = False
dim_changed = _update_dims_mapping_for_matmul(dist_op)
if dim_changed:
changed = True
return changed
@staticmethod
def forward(ctx, *args, **kwargs):
DistributedDefaultImpl0.forward(ctx, *args, **kwargs)
@staticmethod
def backward(ctx, *args, **kwargs):
_right_operand_parameter_matmul_backward(ctx, *args, **kwargs)
register_distributed_operator_impl("matmul",
DistributedMatmulImpl0("column_parallel"))
register_distributed_operator_impl("matmul",
DistributedMatmulImpl1("row_parallel"))
register_distributed_operator_impl("matmul",
DistributedMatmulImpl2("replicate_parallel"))
class DistributedMatmulV2(DistributedOperatorImplContainer):
def __init__(self, op_type):
super(DistributedMatmulV2, self).__init__(op_type)
register_distributed_operator_impl_container(DistributedMatmulV2("matmul_v2"))
# ColumnParallel
class DistributedMatmulV2Impl0(DistributedOperatorImpl):
def __init__(self, name):
super(DistributedMatmulV2Impl0, self).__init__(name)
self._forward_implemented = True
self._backward_implemented = True
def is_input_compatible(self, dist_op):
op_desc = dist_op.serial_op.desc
op_dist_attr = dist_op.dist_attr
x_name = op_desc.input('X')[0]
y_name = op_desc.input('Y')[0]
x_dims_mapping = op_dist_attr.get_input_dims_mapping(x_name)
y_dims_mapping = op_dist_attr.get_input_dims_mapping(y_name)
if is_dim_shard(x_dims_mapping[-1]):
return False
if is_dim_shard(y_dims_mapping[-2]) or is_dim_replicate(y_dims_mapping[
-1]):
return False
for mapping in x_dims_mapping[1:-1]:
if is_dim_shard(mapping):
return False
return True
def is_output_compatible(self, dist_op):
op_desc = dist_op.serial_op.desc
op_dist_attr = dist_op.dist_attr
out_name = op_desc.output('Out')[0]
out_dims_mapping = op_dist_attr.get_output_dims_mapping(out_name)
if is_dim_replicate(out_dims_mapping[-1]):
return False
for mapping in out_dims_mapping[1:-1]:
if is_dim_shard(mapping):
return False
return True
def is_auto_compatible(self, dist_op):
if (not self.is_input_compatible(dist_op)) or \
(not self.is_output_compatible(dist_op)):
return False
if not _is_auto_compatible_for_matmul(dist_op):
return False
return True
def update_dims_mapping(self, dist_op):
changed = False
dim_changed = _update_dims_mapping_for_matmul(dist_op)
if dim_changed:
changed = True
return changed
@staticmethod
def forward(ctx, *args, **kwargs):
"""
kwargs: inputname_mapping & outputname_mapping
"""
dist_op_context = ctx.dist_op_context
main_block = dist_op_context.work_block
startup_block = dist_op_context.startup_block
src_op = dist_op_context.cur_src_op
rank_id = dist_op_context.rank_id
op_dist_attr = ctx.get_op_dist_attr_for_program(src_op)
assert op_dist_attr is not None, "backward op [{}] don't have dist attribute !".format(
str(src_op))
# FIXME (JZ-LIANG) Remove this hack to support any op mesh group for Pipeline Parallelism
if rank_id not in op_dist_attr.process_mesh.processes:
rank_id = _get_corresponding_rank(ctx, op_dist_attr.process_mesh,
rank_id)
# check validation of inputs / outputs
for input_name in src_op.desc.input_names():
assert input_name in kwargs, "input [{}] is not given".format(
input_name)
assert len(kwargs[input_name]) == len(
src_op.desc.input(input_name)
), "number of tensor for input [{}] is not match".format(input_name)
for output_name in src_op.desc.output_names():
assert output_name in kwargs, "input [{}] is not given".format(
output_name)
assert len(kwargs[output_name]) == len(
src_op.desc.output(output_name)
), "number of tensor for input [{}] is not match".format(
output_name)
X_var = main_block.var(kwargs['X'][0])
Weight_var = main_block._var_recursive(kwargs['Y'][0])
Out_var = main_block.var(kwargs['Out'][0])
# TODO infer logic comm presentation
matmul_col_dim_mapping = op_dist_attr.get_input_dims_mapping(
Weight_var.name)[-1]
assert matmul_col_dim_mapping >= 0, "col_parallel_matmul's row should be divided by a specific mesh axis, but got [{}]".format(
matmul_col_dim_mapping)
process_mesh_shape = op_dist_attr.process_mesh.topology
process_mesh_group = op_dist_attr.process_mesh.processes
parallel_axis = matmul_col_dim_mapping
group_ranks = _get_comm_group(process_mesh_group, process_mesh_shape,
parallel_axis, rank_id)
group = new_process_group(group_ranks)
# infer new var shape with op dist attr
x_tensor_dist_attr = ctx.get_tensor_dist_attr_for_program(X_var)
assert x_tensor_dist_attr is not None
identity_var_dist_attr = op_dist_attr.get_input_dist_attr(X_var.name)
assert identity_var_dist_attr is not None
ref_shape_x = infer_shape(main_block, X_var, x_tensor_dist_attr,
identity_var_dist_attr)
# infer out var shape with op dist attr
out_tensor_dist_attr = ctx.get_tensor_dist_attr_for_program(Out_var)
assert out_tensor_dist_attr is not None
out_var_dist_attr = op_dist_attr.get_output_dist_attr(Out_var.name)
assert out_var_dist_attr is not None
ref_shape_out = infer_shape(main_block, Out_var, out_tensor_dist_attr,
out_var_dist_attr)
intermediate_var_0 = main_block.create_var(
name=unique_name.generate_with_ignorable_key(".".join(
["c_identity", 'tmp'])),
dtype=X_var.dtype,
shape=X_var.shape,
type=core.VarDesc.VarType.LOD_TENSOR,
persistable=False,
stop_gradient=X_var.stop_gradient)
# set intermediate_var_0's dist_attr with X_var's dist_attr
ctx.set_tensor_dist_attr_for_program(intermediate_var_0,
identity_var_dist_attr)
check_variable_and_dtype(
X_var, 'tensor',
['float16', 'float32', 'float64', 'int32', 'int64'], '_c_identity')
c_identity_op = main_block.append_op(
type='c_identity',
inputs={'X': [X_var]},
outputs={'Out': intermediate_var_0},
attrs={
'ring_id': group.id,
'use_calc_stream': True,
'use_model_parallel': True,
})
if intermediate_var_0.shape != ref_shape_x:
intermediate_var_0.desc.set_shape(ref_shape_x)
check_variable_and_dtype(intermediate_var_0, 'x',
['float16', 'float32', 'float64'], 'linear')
check_dtype(intermediate_var_0.dtype, 'dtype',
['float16', 'float32', 'float64'], 'linear')
attrs = {'trans_x': False, 'trans_y': False}
inputs = {'X': [intermediate_var_0], 'Y': [Weight_var]}
matmul_v2_op = main_block.append_op(
type='matmul_v2',
inputs=inputs,
outputs={'Out': Out_var},
attrs=attrs)
if Out_var.shape != ref_shape_out:
Out_var.desc.set_shape(ref_shape_out)
# set dist op's dist_attr with serial op's dist_attr
# c_identity
identity_op_dist_attr = OperatorDistributedAttribute()
identity_op_dist_attr.process_mesh = op_dist_attr.process_mesh
identity_op_dist_attr.impl_type = op_dist_attr.impl_type
identity_op_dist_attr.impl_idx = op_dist_attr.impl_idx
# input
input_varname = c_identity_op.desc.input_arg_names()[0]
input_dist_attr = op_dist_attr.get_input_dist_attr(input_varname)
assert input_dist_attr is not None, "dist_attr is {}".format(
op_dist_attr)
identity_op_dist_attr.set_input_dist_attr(input_varname,
input_dist_attr)
# output
output_varname = c_identity_op.desc.output_arg_names()[0]
identity_op_dist_attr.set_output_dist_attr(output_varname,
input_dist_attr)
ctx.set_op_dist_attr_for_program(c_identity_op, identity_op_dist_attr)
# matmulv2
matmulv2_op_dist_attr = OperatorDistributedAttribute()
matmulv2_op_dist_attr.process_mesh = op_dist_attr.process_mesh
matmulv2_op_dist_attr.impl_type = op_dist_attr.impl_type
matmulv2_op_dist_attr.impl_idx = op_dist_attr.impl_idx
for input_varname in matmul_v2_op.desc.input_arg_names():
if input_varname in src_op.desc.input_arg_names():
input_dist_attr = op_dist_attr.get_input_dist_attr(
input_varname)
assert input_dist_attr is not None, "dist_attr is {}".format(
op_dist_attr)
matmulv2_op_dist_attr.set_input_dist_attr(input_varname,
input_dist_attr)
else:
input_var = main_block.var(input_varname)
tensor_dist_attr = ctx.get_tensor_dist_attr_for_program(
input_var)
matmulv2_op_dist_attr.set_input_dist_attr(input_varname,
tensor_dist_attr)
for output_varname in matmul_v2_op.desc.output_arg_names():
output_dist_attr = op_dist_attr.get_output_dist_attr(output_varname)
assert output_dist_attr is not None, "dist_attr is {}".format(
op_dist_attr)
matmulv2_op_dist_attr.set_output_dist_attr(output_varname,
output_dist_attr)
ctx.set_op_dist_attr_for_program(matmul_v2_op, matmulv2_op_dist_attr)
# init param sync
if Weight_var.is_parameter and not op_dist_attr.is_recompute:
_init_param_sync(Weight_var, dist_op_context, startup_block, ctx,
rank_id)
@staticmethod
def backward(ctx, *args, **kwargs):
_right_operand_parameter_matmul_backward(ctx, *args, **kwargs)
# RowParallel
class DistributedMatmulV2Impl1(DistributedOperatorImpl):
def __init__(self, name):
super(DistributedMatmulV2Impl1, self).__init__(name)
self._forward_implemented = True
self._backward_implemented = True
def is_input_compatible(self, dist_op):
op_desc = dist_op.serial_op.desc
op_dist_attr = dist_op.dist_attr
x_name = op_desc.input('X')[0]
y_name = op_desc.input('Y')[0]
x_dims_mapping = op_dist_attr.get_input_dims_mapping(x_name)
y_dims_mapping = op_dist_attr.get_input_dims_mapping(y_name)
if is_dim_replicate(x_dims_mapping[-1]):
return False
if is_dim_replicate(y_dims_mapping[-2]) or is_dim_shard(y_dims_mapping[
-1]):
return False
# Other dimensions must be replicate except the batch dimension
for mapping in x_dims_mapping[1:-1]:
if is_dim_shard(mapping):
return False
return True
def is_output_compatible(self, dist_op):
op_desc = dist_op.serial_op.desc
op_dist_attr = dist_op.dist_attr
out_name = op_desc.output('Out')[0]
out_dims_mapping = op_dist_attr.get_output_dims_mapping(out_name)
if is_dim_shard(out_dims_mapping[-1]):
return False
# Other dimensions must be replicate except the batch dimension
for mapping in out_dims_mapping[1:-1]:
if is_dim_shard(mapping):
return False
return True
def is_auto_compatible(self, dist_op):
if (not self.is_input_compatible(dist_op)) or \
(not self.is_output_compatible(dist_op)):
return False
if not _is_auto_compatible_for_matmul(dist_op):
return False
return True
def update_dims_mapping(self, dist_op):
changed = False
dim_changed = _update_dims_mapping_for_matmul(dist_op)
if dim_changed:
changed = True
return changed
@staticmethod
def forward(ctx, *args, **kwargs):
"""
kwargs: inputname_mapping & outputname_mapping
"""
dist_op_context = ctx.dist_op_context
main_block = dist_op_context.work_block
startup_block = dist_op_context.startup_block
src_op = dist_op_context.cur_src_op
rank_id = dist_op_context.rank_id
op_dist_attr = ctx.get_op_dist_attr_for_program(src_op)
assert op_dist_attr is not None, "backward op [{}] don't have dist attribute !".format(
str(src_op))
# FIXME (JZ-LIANG) Remove this hack to support any op mesh group for Pipeline Parallelism
if rank_id not in op_dist_attr.process_mesh.processes:
rank_id = _get_corresponding_rank(ctx, op_dist_attr.process_mesh,
rank_id)
# check validation of inputs / outputs
for input_name in src_op.desc.input_names():
assert input_name in kwargs, "input [{}] is not given".format(
input_name)
assert len(kwargs[input_name]) == len(
src_op.desc.input(input_name)
), "number of tensor for input [{}] is not match".format(input_name)
for output_name in src_op.desc.output_names():
assert output_name in kwargs, "input [{}] is not given".format(
output_name)
assert len(kwargs[output_name]) == len(
src_op.desc.output(output_name)
), "number of tensor for input [{}] is not match".format(
output_name)
X_var = main_block.var(kwargs['X'][0])
Weight_var = main_block._var_recursive(kwargs['Y'][0])
Out_var = main_block.var(kwargs['Out'][0])
# TODO infer logic comm presentation
matmul_row_dim_mapping = op_dist_attr.get_input_dims_mapping(
Weight_var.name)[-2]
assert matmul_row_dim_mapping >= 0, "row_parallel_matmul's row should be divided by a specific mesh axis, but got [{}]".format(
matmul_row_dim_mapping)
process_mesh_shape = op_dist_attr.process_mesh.topology
process_mesh_group = op_dist_attr.process_mesh.processes
parallel_axis = matmul_row_dim_mapping
group_ranks = _get_comm_group(process_mesh_group, process_mesh_shape,
parallel_axis, rank_id)
group = new_process_group(group_ranks)
check_variable_and_dtype(X_var, 'x', ['float16', 'float32', 'float64'],
'linear')
check_dtype(X_var.dtype, 'dtype', ['float16', 'float32', 'float64'],
'linear')
attrs = {'trans_x': False, 'trans_y': False}
inputs = {'X': X_var, 'Y': Weight_var}
# infer out var shape with op dist attr
out_tensor_dist_attr = ctx.get_tensor_dist_attr_for_program(Out_var)
assert out_tensor_dist_attr is not None
out_var_dist_attr = op_dist_attr.get_output_dist_attr(Out_var.name)
assert out_var_dist_attr is not None
ref_shape = infer_shape(main_block, Out_var, out_tensor_dist_attr,
out_var_dist_attr)
intermediate_var_0 = main_block.create_var(
name=unique_name.generate_with_ignorable_key(".".join(
["c_allreduce_sum", 'tmp'])),
shape=Out_var.shape,
dtype=Out_var.dtype,
type=Out_var.type,
lod_level=Out_var.lod_level,
persistable=False,
is_data=False,
need_check_feed=Out_var.desc.need_check_feed())
# set intermediate_var_0's dist_attr with Out_var's dist_attr
ctx.set_tensor_dist_attr_for_program(intermediate_var_0,
out_var_dist_attr)
matmul_v2_op = main_block.append_op(
type='matmul_v2',
inputs=inputs,
outputs={'Out': intermediate_var_0},
attrs=attrs)
if intermediate_var_0.shape != ref_shape:
intermediate_var_0.desc.set_shape(ref_shape)
c_allreduce_sum_op = main_block.append_op(
type='c_allreduce_sum',
inputs={'X': intermediate_var_0},
outputs={'Out': Out_var},
attrs={
'ring_id': group.id,
'use_calc_stream': True,
'use_model_parallel': True
})
if Out_var.shape != ref_shape:
Out_var.desc.set_shape(ref_shape)
# set dist op's dist_attr with serial op's dist_attr
# matmulv2
matmulv2_op_dist_attr = OperatorDistributedAttribute()
matmulv2_op_dist_attr.process_mesh = op_dist_attr.process_mesh
matmulv2_op_dist_attr.impl_type = op_dist_attr.impl_type
matmulv2_op_dist_attr.impl_idx = op_dist_attr.impl_idx
for input_varname in matmul_v2_op.desc.input_arg_names():
input_dist_attr = op_dist_attr.get_input_dist_attr(input_varname)
assert input_dist_attr is not None, "dist_attr is {}".format(
op_dist_attr)
matmulv2_op_dist_attr.set_input_dist_attr(input_varname,
input_dist_attr)
output_varname = matmul_v2_op.desc.output_arg_names()[0]
output_dist_attr = op_dist_attr.get_output_dist_attr(Out_var.name)
assert output_dist_attr is not None, "dist_attr is {}".format(
op_dist_attr)
matmulv2_op_dist_attr.set_output_dist_attr(output_varname,
output_dist_attr)
ctx.set_op_dist_attr_for_program(matmul_v2_op, matmulv2_op_dist_attr)
# allreduce
allreduce_op_dist_attr = OperatorDistributedAttribute()
allreduce_op_dist_attr.process_mesh = op_dist_attr.process_mesh
allreduce_op_dist_attr.impl_type = op_dist_attr.impl_type
allreduce_op_dist_attr.impl_idx = op_dist_attr.impl_idx
for input_varname in c_allreduce_sum_op.desc.input_arg_names():
input_var = main_block.var(input_varname)
tensor_dist_attr = ctx.get_tensor_dist_attr_for_program(input_var)
assert tensor_dist_attr is not None
allreduce_op_dist_attr.set_input_dist_attr(input_varname,
tensor_dist_attr)
for output_varname in c_allreduce_sum_op.desc.output_arg_names():
output_dist_attr = op_dist_attr.get_output_dist_attr(output_varname)
assert output_dist_attr is not None, "dist_attr is {}".format(
op_dist_attr)
allreduce_op_dist_attr.set_output_dist_attr(output_varname,
output_dist_attr)
ctx.set_op_dist_attr_for_program(c_allreduce_sum_op,
allreduce_op_dist_attr)
# init param sync
if Weight_var.is_parameter and not op_dist_attr.is_recompute:
_init_param_sync(Weight_var, dist_op_context, startup_block, ctx,
rank_id)
@staticmethod
def backward(ctx, *args, **kwargs):
_right_operand_parameter_matmul_backward(ctx, *args, **kwargs)
# ReplicateParallel
class DistributedMatmulV2Impl2(DistributedOperatorImpl):
def __init__(self, name):
super(DistributedMatmulV2Impl2, self).__init__(name)
def is_input_compatible(self, dist_op):
op_desc = dist_op.serial_op.desc
op_dist_attr = dist_op.dist_attr
x_name = op_desc.input('X')[0]
y_name = op_desc.input('Y')[0]
x_dims_mapping = op_dist_attr.get_input_dims_mapping(x_name)
y_dims_mapping = op_dist_attr.get_input_dims_mapping(y_name)
if is_dim_shard(x_dims_mapping[-1]):
return False
if is_valid_list_index(x_dims_mapping,
-2) and is_dim_shard(x_dims_mapping[-2]):
return False
if is_dim_shard(y_dims_mapping[-1]):
return False
if is_valid_list_index(y_dims_mapping,
-2) and is_dim_shard(y_dims_mapping[-2]):
return False
return True
def is_output_compatible(self, dist_op):
op_desc = dist_op.serial_op.desc
op_dist_attr = dist_op.dist_attr
op_desc = dist_op.serial_op.desc
op_dist_attr = dist_op.dist_attr
out_name = op_desc.output('Out')[0]
out_dims_mapping = op_dist_attr.get_output_dims_mapping(out_name)
if is_dim_shard(out_dims_mapping[-1]):
return False
if is_valid_list_index(out_dims_mapping,
-2) and is_dim_shard(out_dims_mapping[-2]):
return False
return True
def is_auto_compatible(self, dist_op):
if (not self.is_input_compatible(dist_op)) or \
(not self.is_output_compatible(dist_op)):
return False
if not _is_auto_compatible_for_matmul(dist_op):
return False
return True
def update_dims_mapping(self, dist_op):
changed = False
dim_changed = _update_dims_mapping_for_matmul(dist_op)
if dim_changed:
changed = True
return changed
@staticmethod
def forward(ctx, *args, **kwargs):
DistributedDefaultImpl0.forward(ctx, *args, **kwargs)
@staticmethod
def backward(ctx, *args, **kwargs):
_right_operand_parameter_matmul_backward(ctx, *args, **kwargs)
register_distributed_operator_impl("matmul_v2",
DistributedMatmulV2Impl0("column_parallel"))
register_distributed_operator_impl("matmul_v2",
DistributedMatmulV2Impl1("row_parallel"))
register_distributed_operator_impl(
"matmul_v2", DistributedMatmulV2Impl2("replicate_parallel"))
| |
import mimetypes
from email import (
charset as Charset, encoders as Encoders, generator, message_from_string,
)
from email.errors import HeaderParseError
from email.header import Header
from email.headerregistry import Address, parser
from email.message import Message
from email.mime.base import MIMEBase
from email.mime.message import MIMEMessage
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.utils import formataddr, formatdate, getaddresses, make_msgid
from io import BytesIO, StringIO
from pathlib import Path
from django.conf import settings
from django.core.mail.utils import DNS_NAME
from django.utils.encoding import force_str, punycode
# Don't BASE64-encode UTF-8 messages so that we avoid unwanted attention from
# some spam filters.
utf8_charset = Charset.Charset('utf-8')
utf8_charset.body_encoding = None # Python defaults to BASE64
utf8_charset_qp = Charset.Charset('utf-8')
utf8_charset_qp.body_encoding = Charset.QP
# Default MIME type to use on attachments (if it is not explicitly given
# and cannot be guessed).
DEFAULT_ATTACHMENT_MIME_TYPE = 'application/octet-stream'
RFC5322_EMAIL_LINE_LENGTH_LIMIT = 998
class BadHeaderError(ValueError):
pass
# Header names that contain structured address data (RFC #5322)
ADDRESS_HEADERS = {
'from',
'sender',
'reply-to',
'to',
'cc',
'bcc',
'resent-from',
'resent-sender',
'resent-to',
'resent-cc',
'resent-bcc',
}
def forbid_multi_line_headers(name, val, encoding):
"""Forbid multi-line headers to prevent header injection."""
encoding = encoding or settings.DEFAULT_CHARSET
val = str(val) # val may be lazy
if '\n' in val or '\r' in val:
raise BadHeaderError("Header values can't contain newlines (got %r for header %r)" % (val, name))
try:
val.encode('ascii')
except UnicodeEncodeError:
if name.lower() in ADDRESS_HEADERS:
val = ', '.join(sanitize_address(addr, encoding) for addr in getaddresses((val,)))
else:
val = Header(val, encoding).encode()
else:
if name.lower() == 'subject':
val = Header(val).encode()
return name, val
def sanitize_address(addr, encoding):
"""
Format a pair of (name, address) or an email address string.
"""
address = None
if not isinstance(addr, tuple):
addr = force_str(addr)
try:
token, rest = parser.get_mailbox(addr)
except (HeaderParseError, ValueError, IndexError):
raise ValueError('Invalid address "%s"' % addr)
else:
if rest:
# The entire email address must be parsed.
raise ValueError(
'Invalid address; only %s could be parsed from "%s"'
% (token, addr)
)
nm = token.display_name or ''
localpart = token.local_part
domain = token.domain or ''
else:
nm, address = addr
localpart, domain = address.rsplit('@', 1)
address_parts = nm + localpart + domain
if '\n' in address_parts or '\r' in address_parts:
raise ValueError('Invalid address; address parts cannot contain newlines.')
# Avoid UTF-8 encode, if it's possible.
try:
nm.encode('ascii')
nm = Header(nm).encode()
except UnicodeEncodeError:
nm = Header(nm, encoding).encode()
try:
localpart.encode('ascii')
except UnicodeEncodeError:
localpart = Header(localpart, encoding).encode()
domain = punycode(domain)
parsed_address = Address(username=localpart, domain=domain)
return formataddr((nm, parsed_address.addr_spec))
class MIMEMixin:
def as_string(self, unixfrom=False, linesep='\n'):
"""Return the entire formatted message as a string.
Optional `unixfrom' when True, means include the Unix From_ envelope
header.
This overrides the default as_string() implementation to not mangle
lines that begin with 'From '. See bug #13433 for details.
"""
fp = StringIO()
g = generator.Generator(fp, mangle_from_=False)
g.flatten(self, unixfrom=unixfrom, linesep=linesep)
return fp.getvalue()
def as_bytes(self, unixfrom=False, linesep='\n'):
"""Return the entire formatted message as bytes.
Optional `unixfrom' when True, means include the Unix From_ envelope
header.
This overrides the default as_bytes() implementation to not mangle
lines that begin with 'From '. See bug #13433 for details.
"""
fp = BytesIO()
g = generator.BytesGenerator(fp, mangle_from_=False)
g.flatten(self, unixfrom=unixfrom, linesep=linesep)
return fp.getvalue()
class SafeMIMEMessage(MIMEMixin, MIMEMessage):
def __setitem__(self, name, val):
# message/rfc822 attachments must be ASCII
name, val = forbid_multi_line_headers(name, val, 'ascii')
MIMEMessage.__setitem__(self, name, val)
class SafeMIMEText(MIMEMixin, MIMEText):
def __init__(self, _text, _subtype='plain', _charset=None):
self.encoding = _charset
MIMEText.__init__(self, _text, _subtype=_subtype, _charset=_charset)
def __setitem__(self, name, val):
name, val = forbid_multi_line_headers(name, val, self.encoding)
MIMEText.__setitem__(self, name, val)
def set_payload(self, payload, charset=None):
if charset == 'utf-8' and not isinstance(charset, Charset.Charset):
has_long_lines = any(
len(line.encode()) > RFC5322_EMAIL_LINE_LENGTH_LIMIT
for line in payload.splitlines()
)
# Quoted-Printable encoding has the side effect of shortening long
# lines, if any (#22561).
charset = utf8_charset_qp if has_long_lines else utf8_charset
MIMEText.set_payload(self, payload, charset=charset)
class SafeMIMEMultipart(MIMEMixin, MIMEMultipart):
def __init__(self, _subtype='mixed', boundary=None, _subparts=None, encoding=None, **_params):
self.encoding = encoding
MIMEMultipart.__init__(self, _subtype, boundary, _subparts, **_params)
def __setitem__(self, name, val):
name, val = forbid_multi_line_headers(name, val, self.encoding)
MIMEMultipart.__setitem__(self, name, val)
class EmailMessage:
"""A container for email information."""
content_subtype = 'plain'
mixed_subtype = 'mixed'
encoding = None # None => use settings default
def __init__(self, subject='', body='', from_email=None, to=None, bcc=None,
connection=None, attachments=None, headers=None, cc=None,
reply_to=None):
"""
Initialize a single email message (which can be sent to multiple
recipients).
"""
if to:
if isinstance(to, str):
raise TypeError('"to" argument must be a list or tuple')
self.to = list(to)
else:
self.to = []
if cc:
if isinstance(cc, str):
raise TypeError('"cc" argument must be a list or tuple')
self.cc = list(cc)
else:
self.cc = []
if bcc:
if isinstance(bcc, str):
raise TypeError('"bcc" argument must be a list or tuple')
self.bcc = list(bcc)
else:
self.bcc = []
if reply_to:
if isinstance(reply_to, str):
raise TypeError('"reply_to" argument must be a list or tuple')
self.reply_to = list(reply_to)
else:
self.reply_to = []
self.from_email = from_email or settings.DEFAULT_FROM_EMAIL
self.subject = subject
self.body = body or ''
self.attachments = []
if attachments:
for attachment in attachments:
if isinstance(attachment, MIMEBase):
self.attach(attachment)
else:
self.attach(*attachment)
self.extra_headers = headers or {}
self.connection = connection
def get_connection(self, fail_silently=False):
from django.core.mail import get_connection
if not self.connection:
self.connection = get_connection(fail_silently=fail_silently)
return self.connection
def message(self):
encoding = self.encoding or settings.DEFAULT_CHARSET
msg = SafeMIMEText(self.body, self.content_subtype, encoding)
msg = self._create_message(msg)
msg['Subject'] = self.subject
msg['From'] = self.extra_headers.get('From', self.from_email)
self._set_list_header_if_not_empty(msg, 'To', self.to)
self._set_list_header_if_not_empty(msg, 'Cc', self.cc)
self._set_list_header_if_not_empty(msg, 'Reply-To', self.reply_to)
# Email header names are case-insensitive (RFC 2045), so we have to
# accommodate that when doing comparisons.
header_names = [key.lower() for key in self.extra_headers]
if 'date' not in header_names:
# formatdate() uses stdlib methods to format the date, which use
# the stdlib/OS concept of a timezone, however, Django sets the
# TZ environment variable based on the TIME_ZONE setting which
# will get picked up by formatdate().
msg['Date'] = formatdate(localtime=settings.EMAIL_USE_LOCALTIME)
if 'message-id' not in header_names:
# Use cached DNS_NAME for performance
msg['Message-ID'] = make_msgid(domain=DNS_NAME)
for name, value in self.extra_headers.items():
if name.lower() != 'from': # From is already handled
msg[name] = value
return msg
def recipients(self):
"""
Return a list of all recipients of the email (includes direct
addressees as well as Cc and Bcc entries).
"""
return [email for email in (self.to + self.cc + self.bcc) if email]
def send(self, fail_silently=False):
"""Send the email message."""
if not self.recipients():
# Don't bother creating the network connection if there's nobody to
# send to.
return 0
return self.get_connection(fail_silently).send_messages([self])
def attach(self, filename=None, content=None, mimetype=None):
"""
Attach a file with the given filename and content. The filename can
be omitted and the mimetype is guessed, if not provided.
If the first parameter is a MIMEBase subclass, insert it directly
into the resulting message attachments.
For a text/* mimetype (guessed or specified), when a bytes object is
specified as content, decode it as UTF-8. If that fails, set the
mimetype to DEFAULT_ATTACHMENT_MIME_TYPE and don't decode the content.
"""
if isinstance(filename, MIMEBase):
assert content is None
assert mimetype is None
self.attachments.append(filename)
else:
assert content is not None
mimetype = mimetype or mimetypes.guess_type(filename)[0] or DEFAULT_ATTACHMENT_MIME_TYPE
basetype, subtype = mimetype.split('/', 1)
if basetype == 'text':
if isinstance(content, bytes):
try:
content = content.decode()
except UnicodeDecodeError:
# If mimetype suggests the file is text but it's
# actually binary, read() raises a UnicodeDecodeError.
mimetype = DEFAULT_ATTACHMENT_MIME_TYPE
self.attachments.append((filename, content, mimetype))
def attach_file(self, path, mimetype=None):
"""
Attach a file from the filesystem.
Set the mimetype to DEFAULT_ATTACHMENT_MIME_TYPE if it isn't specified
and cannot be guessed.
For a text/* mimetype (guessed or specified), decode the file's content
as UTF-8. If that fails, set the mimetype to
DEFAULT_ATTACHMENT_MIME_TYPE and don't decode the content.
"""
path = Path(path)
with path.open('rb') as file:
content = file.read()
self.attach(path.name, content, mimetype)
def _create_message(self, msg):
return self._create_attachments(msg)
def _create_attachments(self, msg):
if self.attachments:
encoding = self.encoding or settings.DEFAULT_CHARSET
body_msg = msg
msg = SafeMIMEMultipart(_subtype=self.mixed_subtype, encoding=encoding)
if self.body or body_msg.is_multipart():
msg.attach(body_msg)
for attachment in self.attachments:
if isinstance(attachment, MIMEBase):
msg.attach(attachment)
else:
msg.attach(self._create_attachment(*attachment))
return msg
def _create_mime_attachment(self, content, mimetype):
"""
Convert the content, mimetype pair into a MIME attachment object.
If the mimetype is message/rfc822, content may be an
email.Message or EmailMessage object, as well as a str.
"""
basetype, subtype = mimetype.split('/', 1)
if basetype == 'text':
encoding = self.encoding or settings.DEFAULT_CHARSET
attachment = SafeMIMEText(content, subtype, encoding)
elif basetype == 'message' and subtype == 'rfc822':
# Bug #18967: per RFC2046 s5.2.1, message/rfc822 attachments
# must not be base64 encoded.
if isinstance(content, EmailMessage):
# convert content into an email.Message first
content = content.message()
elif not isinstance(content, Message):
# For compatibility with existing code, parse the message
# into an email.Message object if it is not one already.
content = message_from_string(force_str(content))
attachment = SafeMIMEMessage(content, subtype)
else:
# Encode non-text attachments with base64.
attachment = MIMEBase(basetype, subtype)
attachment.set_payload(content)
Encoders.encode_base64(attachment)
return attachment
def _create_attachment(self, filename, content, mimetype=None):
"""
Convert the filename, content, mimetype triple into a MIME attachment
object.
"""
attachment = self._create_mime_attachment(content, mimetype)
if filename:
try:
filename.encode('ascii')
except UnicodeEncodeError:
filename = ('utf-8', '', filename)
attachment.add_header('Content-Disposition', 'attachment', filename=filename)
return attachment
def _set_list_header_if_not_empty(self, msg, header, values):
"""
Set msg's header, either from self.extra_headers, if present, or from
the values argument.
"""
if values:
try:
value = self.extra_headers[header]
except KeyError:
value = ', '.join(str(v) for v in values)
msg[header] = value
class EmailMultiAlternatives(EmailMessage):
"""
A version of EmailMessage that makes it easy to send multipart/alternative
messages. For example, including text and HTML versions of the text is
made easier.
"""
alternative_subtype = 'alternative'
def __init__(self, subject='', body='', from_email=None, to=None, bcc=None,
connection=None, attachments=None, headers=None, alternatives=None,
cc=None, reply_to=None):
"""
Initialize a single email message (which can be sent to multiple
recipients).
"""
super().__init__(
subject, body, from_email, to, bcc, connection, attachments,
headers, cc, reply_to,
)
self.alternatives = alternatives or []
def attach_alternative(self, content, mimetype):
"""Attach an alternative content representation."""
assert content is not None
assert mimetype is not None
self.alternatives.append((content, mimetype))
def _create_message(self, msg):
return self._create_attachments(self._create_alternatives(msg))
def _create_alternatives(self, msg):
encoding = self.encoding or settings.DEFAULT_CHARSET
if self.alternatives:
body_msg = msg
msg = SafeMIMEMultipart(_subtype=self.alternative_subtype, encoding=encoding)
if self.body:
msg.attach(body_msg)
for alternative in self.alternatives:
msg.attach(self._create_mime_attachment(*alternative))
return msg
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Python 2 and 3 compatible from start
from __future__ import print_function
import shutil
import subprocess
import sys
from os import path as osp
BASE_DIR = osp.dirname(osp.abspath(__file__))
VENV_DIR = osp.join(BASE_DIR, 'venv')
PIP_VERSION = '9.0.1'
SETUPTOOLS_VERSION = '32.3.1'
REQUIREMENTS_FILE = 'requirements.txt'
DEBUG = False
CONFIG_MODULE = 'bootconf'
def info(message=None, **kwargs):
_output(sys.stdout, message, **kwargs)
def error(message, **kwargs):
_output(sys.stderr, message, **kwargs)
def fatal(message=None, **kwargs):
if message:
error(message, **kwargs)
raise SystemExit(1)
def debug(message, **kwargs):
if DEBUG:
info(message, **kwargs)
def _output(out, message, end='\n', flush=True):
if message:
out.write(str(message))
if end:
out.write(end)
if flush:
out.flush() # python 2 print() got no flush arg
def execute_and_exit(cmd):
raise SystemExit(execute(cmd, raise_error=False))
def venv_execute(cmd, **kwargs):
cmd[0] = osp.join(VENV_DIR, 'bin', cmd[0])
return execute(cmd, **kwargs)
def execute(cmd, raise_error=True):
debug('+ {}'.format(subprocess.list2cmdline(cmd)))
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError as e:
if raise_error:
raise
return e.returncode
return 0
if __name__ == '__main__':
if sys.version_info[0] != 3:
execute_and_exit(['/usr/bin/env', 'python3', __file__] + sys.argv[1:])
# Python 3 from here onwards
def satisfy_requirements():
def read(filename):
try:
with open(filename) as f:
return f.read()
except FileNotFoundError:
return None
name = REQUIREMENTS_FILE
actual_file = osp.join(BASE_DIR, name)
actual = read(actual_file)
if actual is None:
return True
cached_file = osp.join(VENV_DIR, name)
cached = read(cached_file)
if actual != cached:
venv_execute(['pip', 'install', '-r', actual_file])
with open(cached_file, 'w') as f:
f.write(actual)
import venv
venv_python = osp.join(VENV_DIR, 'bin/python')
venv_exists = osp.exists(venv_python)
if not venv_exists:
debug('Creating virtual environment in {!r}'.format(VENV_DIR))
try:
venv.main([VENV_DIR]) # TODO: handle ensurepip error when python3-venv is not installed
venv_execute(['pip', 'install', 'pip=={}'.format(PIP_VERSION)])
venv_execute(['pip', 'install', 'setuptools=={}'.format(SETUPTOOLS_VERSION)])
except:
shutil.rmtree(VENV_DIR) # rollback
raise
satisfy_requirements()
if sys.executable != venv_python: # osp.samefile() always true
execute_and_exit([venv_python, __file__] + sys.argv[1:])
import functools
import importlib
import inspect
import json
import os
import pprint
import re
import runpy
import textwrap
from argparse import ArgumentParser
from contextlib import contextmanager
class Configuration(object):
command_spec_pattern = re.compile(r'^(?P<module>(\w+)(\.\w+)*)(:(?P<attr>(\w+)(\.\w+)*))?$')
def __init__(self):
# TODO: generate example
self.module = importlib.import_module(CONFIG_MODULE)
self.commands = getattr(self.module, 'COMMANDS', {})
if not isinstance(self.commands, dict):
self.invalid_config('commands must be a dict, got {!r} instead'.format(commands))
for name, spec in self.commands.items():
try:
self.commands[name] = self.load_command(spec)
except (ValueError, ImportError) as e:
self.invalid_config('COMMANDS[{!r}]: {}'.format(name, e))
# Default commands
self.commands['venv'] = VenvCommand()
# TODO: unit test
def load_command(self, spec):
m = self.command_spec_pattern.match(spec)
if not m:
self.invalid_command_spec(spec, 'Must match {}'.format(self.command_spec_pattern.pattern))
mod_name = m.group('module')
module = importlib.import_module(mod_name)
attr = m.group('attr')
if attr:
try:
obj = self.get_attr(module, attr)
except AttributeError as e:
self.invalid_command_spec(spec, e)
if inspect.isfunction(obj):
return FunctionCommand(obj)
# Cannot use isinstance(obj, click.core.Command)
if type(obj).__module__ == 'click.core':
return ClickCommand(obj)
error = 'Module attribute {!r} is neither function nor Click command'.format(attr)
self.invalid_command_spec(spec, error)
return ModuleCommand(mod_name)
def get_attr(self, obj, attr):
path = attr.split('.')
progress = []
for name in path:
progress.append(name)
try:
obj = getattr(obj, name)
except AttributeError as e:
raise AttributeError('{}: {}'.format('.'.join(progress), e))
return obj
def invalid_config(self, message):\
fatal('Invalid {!r}: {}'.format(self.module.__file__, message))
def invalid_command_spec(self, spec, message):
self.invalid_config('Invalid command spec {!r}: {}'.format(spec, message))
class Command(object):
default_desc = ''
@property
def desc(self):
if hasattr(self, '_desc'):
return self._desc
return self.default_desc
@desc.setter
def desc(self, value):
self._desc = value
class VenvCommand(Command):
default_desc = 'Run a venv command, for example: venv pip freeze'
def __call__(self):
def usage():
error('Usage: {} <command> [<args>]'.format(program))
error('Same as: {} <args>'.format(osp.join(bin_dir, '<command>')))
fatal()
program, args = sys.argv[0], sys.argv[1:]
bin_dir = osp.join(VENV_DIR, 'bin')
if not args:
usage()
executable = osp.join(bin_dir, args[0])
if not osp.exists(executable):
usage()
execute_and_exit([executable] + args[1:])
class FunctionCommand(Command):
def __init__(self, func):
self.func = func
def __call__(self):
self.func()
@property
def default_desc(self):
return 'Function {}'.format(self.func.__qualname__)
class ClickCommand(Command):
def __init__(self, command):
self.command = command
def __call__(self):
self.command()
@property
def default_desc(self):
return self.command.help or ''
class ModuleCommand(Command):
def __init__(self, mod_name, desc=None):
self.mod_name = mod_name
self._desc = desc
def __call__(self):
runpy.run_module(self.mod_name, run_name='__main__')
@property
def default_desc(self):
return 'Module {}'.format(self.mod_name)
CONFIG = Configuration()
def main(args=None):
if args is None:
program, args = sys.argv[0], sys.argv[1:]
else:
program = main.__qualname__
commands = CONFIG.commands
# TODO: standardize command help, e.g. upper case variable
if not args or args[0] in ['-h', '--help']:
error('Usage: {} [-h|--help] <command> [<args>]'.format(program))
error('\nAvailable commands:')
for cmd in sorted(commands.keys()):
error(' {:10} {}'.format(cmd, commands[cmd].desc))
error("\nRun '{} <command> [-h|--help]' to see command specific help.".format(program))
fatal()
target = args[0]
command = commands.get(target)
if not command:
fatal("Invalid command {!r}, run '{} -h' for more info".format(target, program))
orig_sys_argv = sys.argv
try:
sys.argv = ['{} {}'.format(program, args[0])] + args[1:]
command()
finally:
sys.argv = orig_sys_argv
if __name__ == '__main__':
main()
| |
"""
A classifier implementing the Binary Relevance approach to multi-label
learning.
"""
__all__ = [
"MixedBinaryRelevanceClassifier"
]
import logging
import numpy as np
from joblib import Parallel, delayed
from sklearn.base import clone, BaseEstimator
from sklearn.metrics import (
hamming_loss, label_ranking_loss, f1_score,
precision_score, recall_score
)
from pyppi.model_selection.scoring import fdr_score, specificity
logger = logging.getLogger("pyppi")
def _fit_label(estimator, X, y, label_idx, n_labels, verbose, **fit_params):
if verbose:
logger.info("Fitting label {}/{}.".format(label_idx+1, n_labels))
return estimator.fit(X, y, **fit_params)
def _predict_proba_label(estimator, X):
return estimator.predict_proba(X)
def _predict_label(estimator, X):
return estimator.predict(X)
class MixedBinaryRelevanceClassifier(object):
"""Mimics the `OneVsRest` classifier from Sklearn allowing
a different type of classifier for each label as opposed to one classifier
for all labels.
Parameters:
----------
estimators : `list`
List of `Scikit-Learn` estimators supporting `fit`, `predict` and
`predict_proba`.
n_jobs : int, optional, default: 1
Number of processes to use when fitting each label.
verbose : bool, optional, default: False
Logs messages regarding fitting progress.
"""
def __init__(self, estimators, n_jobs=1, verbose=False):
if not isinstance(estimators, list):
raise TypeError("estimators must be a list.")
self.estimators = estimators
self.n_jobs = n_jobs
self.verbose = verbose
def __repr__(self):
return (
"MixedBinaryRelevanceClassifier(estimators={}, n_jobs={})".format(
self.estimators, self.n_jobs
)
)
def _check_y_shape(self, y):
try:
if y.shape[1] <= 1:
raise ValueError(
"y must be in multi-label indicator matrix format. "
"For binary or multi-class classification use scikit-learn."
)
if y.shape[1] != len(self.estimators):
raise ValueError(
"Shape of y {} along dim 1 does not match {}.".format(
y.shape, len(self.estimators)
)
)
except IndexError:
raise ValueError(
"y must be in multi-label indicator matrix format. "
"For binary or multi-class classification use scikit-learn."
)
def clone(self, deep=True):
params = self.get_params(deep)
return self.__class__(**params)
def _check_fitted(self):
if not hasattr(self, 'estimators_'):
raise ValueError("This estimator has not yet been fit.")
if not hasattr(self, 'n_labels_'):
raise ValueError("This estimator has not yet been fit.")
def get_params(self, deep=True):
return {
"estimators": [clone(e) for e in self.estimators],
"n_jobs": self.n_jobs,
"verbose": self.verbose
}
def set_params(self, **params):
for key, value in params.items():
if key not in self.get_params().keys():
raise ValueError(
"'{}' is not a valid param for {}.".format(
key, self.__class__.__name__
)
)
elif key == 'estimators':
if not isinstance(value, list):
raise TypeError("'estimators' must be a list.")
self.estimators = [clone(e) for e in value]
if hasattr(self, 'n_labels_'):
delattr(self, 'n_labels_')
if hasattr(self, 'estimators_'):
delattr(self, 'estimators_')
else:
setattr(self, key, value)
return self
def fit(self, X, y, **fit_params):
"""
Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples, n_labels)
Target vector relative to X.
Returns
-------
self : object
Returns self.
"""
self._check_y_shape(y)
n_labels = len(self.estimators)
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_label)(
estimator=clone(estimator),
X=X, y=y[:, i], label_idx=i,
n_labels=n_labels, verbose=self.verbose,
**fit_params
)
for i, estimator in enumerate(self.estimators)
)
self.n_labels_ = len(self.estimators_)
return self
def predict(self, X):
"""
Predict class labels for samples in X.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Samples.
Returns
-------
C : array, shape = (n_samples, n_labels)
Predicted class labels per sample.
"""
self._check_fitted()
predictions = np.vstack(Parallel(n_jobs=self.n_jobs)(
delayed(_predict_label)(
estimator=estimator, X=X
)
for estimator in self.estimators_
)).T
return predictions
def predict_proba(self, X):
"""
Probability estimates for each label.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Returns
-------
T : array-like, shape = (n_samples, n_labels)
Returns the probability of the sample for each label in the model,
where labels are ordered as the indices of 'y' used during fit.
"""
self._check_fitted()
probas = Parallel(n_jobs=self.n_jobs)(
delayed(_predict_proba_label)(
estimator=estimator, X=X
)
for estimator in self.estimators_
)
probas = np.vstack([x[:, 1] for x in probas]).T
return probas
def score(self, X, y, sample_weight=None, use_proba=False,
scorer=hamming_loss, **score_params):
"""
Returns the score as determined by `scoring` on the given
test data and labels.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Test samples.
y : array-like, shape = (n_samples, n_labels)
True labels for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
use_proba : boolean, default: False
If True, apply scoring function to probability estimates.
scorer : function, optional
The scoring method to apply to predictions.
score_params : dict, optional
Keyword arguments for scorer.
Returns
-------
`float` or array-like (n_labels, ) if scoring uses binary.
Mean score of self.predict(X) wrt. y.
"""
self._check_y_shape(y)
self._check_fitted()
if use_proba:
y_pred = self.predict_proba(X)
else:
y_pred = self.predict(X)
average = score_params.get("average", None)
if average == "binary":
return np.asarray([
scorer(
y[:, i], y_pred[:, i],
sample_weight=sample_weight,
**score_params
)
for i in range(self.n_labels_)
])
else:
return scorer(
y, y_pred, sample_weight=sample_weight,
**score_params
)
| |
from django.shortcuts import render, render_to_response, redirect, get_object_or_404
from django.contrib.auth import authenticate, login , logout
from .models import Course, Department, User, Student, ExamPaper, Material, Announcement, CourseAllotment, Bookmark, Feedback, Contributor, Stat
from .forms import RegisterForm , LoginForm , AnnouncementForm , MaterialForm , ExamPaperForm, FeedbackForm, AvatorForm, ForgetPasswordForm
from django.contrib.auth.decorators import login_required
from django.core import serializers
from django.http import JsonResponse,HttpResponse
from django.urls import reverse
import datetime
import json
rewardvalue=5
# Create your views here.
def home(request):
if request.user.is_anonymous():
return render(request,"feed.html",context={})
bookmarks =Bookmark.objects.filter(user=request.user)
bookmarkcourses = Bookmark.objects.select_related('course').filter(user=request.user)
courselist=[]
for bookmark in bookmarks:
courselist.append(bookmark.course)
start_from = int(request.GET.get('start_from',0))
announcements = Announcement.objects.select_related('author').filter(course__in=courselist).order_by('-updated_on')[start_from*6:start_from*6+6]
return render(request,"feed.html",context={"feed":announcements,"next":start_from+1,"bookmark":bookmarkcourses})
def about(request):
stats = Stat.objects.get(tag='initial')
return render(request,"about.html",context={'stats':stats})
def _logout(request):
logout(request)
return redirect('home')
def forgetpassword(request):
if request.method =='GET':
form = ForgetPasswordForm()
return render(request,"form.html",context={"form":form})
elif request.method == 'POST':
return render(request,"form.html",context={"message":"Check your email for password :)"})
def _login(request):
if request.method =='GET':
form = LoginForm()
return render(request,"login.html",context={"form":form})
elif request.method == 'POST':
email = request.POST.get('email')
password = request.POST.get('password')
user = authenticate(email=email, password=password)
if user is not None:
if user.is_active:
login(request, user)
print(user.email)
return redirect('home')
form = LoginForm()
return render(request,"login.html",context={"form":form,"message":"forget password"})
def _register(request,register_as=None):
if request.method =='GET':
form = RegisterForm()
return render(request,"register.html",context={"form":form})
elif request.method == 'POST':
form = RegisterForm(request.POST)
if form.is_valid():
user = form.save()
if(register_as=='student'):
user.user_role = 'student'
elif(register_as=='instructor'):
user.user_role = 'instructor'
user.save()
stat = Stat.objects.get(tag='initial')
stat.user_count +=1
stat.save()
return redirect(reverse('login'))
return redirect(reverse('register',kwargs={"register_as":register_as}))
@login_required
def profile(request):
if request.method == 'GET':
email = request.GET.get('email',request.user)
form = AvatorForm()
owner = User.objects.get(email=email)
is_owner = True if owner == request.user else False
if owner.user_role == "student":
student,created = Student.objects.get_or_create(user=owner)
return render(request,"profile.html",context={"user":request.user,"student":student,'form':form, 'is_owner':is_owner})
return render(request,"profile.html",context={"user":owner,'form':form, 'is_owner':is_owner})
elif request.method =='POST':
if 'name' in request.POST.keys():
if request.POST['name'] in ['semester','registration_no','branch']:
student = Student.objects.get(user=request.user)
if request.POST['name'] == 'semester':
print('yeah')
student.semester = request.POST['value']
elif request.POST['name'] == 'registration_no':
student.registration_no = request.POST['value']
elif request.POST['name'] == 'branch':
student.branch = request.POST['value']
student.save()
elif request.POST['name'] in ['first_name','last_name']:
user = User.objects.get(email = request.user.email)
if request.POST['name'] == 'first_name':
user.first_name = request.POST['value']
elif request.POST['name'] == 'last_name':
user.last_name = request.POST['value']
user.save()
else:
user = User.objects.get(email=request.user.email)
form = AvatorForm(request.POST,request.FILES)
if form.is_valid():
user.avatar = form.cleaned_data["avator"]
user.save()
print(user.avatar)
print(form.errors)
return redirect(reverse('profile'))
# Profile edit to be implemented.
def getDepartments(request):
if request.method =='GET':
dept = serializers.serialize("json",Department.objects.all(),use_natural_foreign_keys=True)
data = json.loads(dept)
result = {"result":data}
return HttpResponse(json.dumps(result),content_type='application/json')
def getCourses(request,department=None):
if request.method =='GET':
dept = get_object_or_404(Department,acronym=department)
course = serializers.serialize("json",Course.objects.filter(dept=dept),use_natural_foreign_keys=True)
data = json.loads(course)
result = {"result":data}
return HttpResponse(json.dumps(result),content_type='application/json')
def Announcements(request,department=None,coursecode=None):
if request.method =='GET':
form= AnnouncementForm()
return render(request,"form.html",context={"form":form})
elif request.method =="POST":
if not request.user.is_authenticated:
return redirect(reverse('login'))
form = AnnouncementForm(request.POST,request.FILES)
if(form.is_valid()):
obj=Announcement()
obj.files = form.cleaned_data["files"]
obj.title = form.cleaned_data["title"]
obj.description = form.cleaned_data["description"]
obj.author=request.user
obj.course= Course.objects.get(code=coursecode)
obj.save()
if created:
stat = Stat.objects.get(tag='initial')
stat.contributor_count +=1
stat.save()
contributor,created = Contributor.objects.get_or_create(user=request.user)
contributor.announcement +=1
contributor.points += rewardvalue
contributor.save()
stat = Stat.objects.get(tag='initial')
stat.announcement_count +=1
stat.save()
return redirect(reverse("course", kwargs={'department':department,'coursecode':coursecode}))
print(form.errors)
return redirect(reverse("course", kwargs={'department':department,'coursecode':coursecode}))
def Materials(request,department=None,coursecode=None):
if request.method =='GET':
form= MaterialForm()
return render(request,"form.html",context={"form":form})
elif request.method =="POST":
if not request.user.is_authenticated:
return redirect(reverse('login'))
form = MaterialForm(request.POST,request.FILES)
if(form.is_valid()):
obj = Material()
obj.files = form.cleaned_data["files"]
obj.title = form.cleaned_data["title"]
obj.author=request.user
obj.course= Course.objects.get(code=coursecode)
obj.save()
contributor,created = Contributor.objects.get_or_create(user=request.user)
contributor.material +=1
contributor.points += rewardvalue
contributor.save()
if created:
stat = Stat.objects.get(tag='initial')
stat.contributor_count +=1
stat.save()
stat = Stat.objects.get(tag='initial')
stat.material_count +=1
stat.save()
return redirect(reverse("course", kwargs={'department':department,'coursecode':coursecode}))
print(form.errors)
return redirect(reverse("course", kwargs={'department':department,'coursecode':coursecode}))
@login_required
def FeedbackView(request):
if request.method =='GET':
form= FeedbackForm()
return render(request,"form.html",context={"form":form})
elif request.method =="POST":
form = FeedbackForm(request.POST,request.FILES)
if(form.is_valid()):
obj = Feedback()
obj.files = form.cleaned_data["files"]
obj.title = form.cleaned_data["title"]
obj.feedback = form.cleaned_data["feedback"]
obj.author=request.user
obj.save()
contributor,created = Contributor.objects.get_or_create(user=request.user)
contributor.feedback +=1
contributor.points += 2*rewardvalue
contributor.save()
if created:
stat = Stat.objects.get(tag='initial')
stat.contributor_count +=1
stat.save()
return render(request,"form.html",context={'feedback':True,'message':"Thanks for your valuable feedback. We will be working on your query."})
print(form.errors)
return redirect(reverse("course"))
@login_required
def ExamPaperView(request,department=None,coursecode=None):
if request.method =='GET':
form= ExamPaperForm()
return render(request,"form.html",context={"form":form})
elif request.method =="POST":
form = ExamPaperForm(request.POST,request.FILES)
if(form.is_valid()):
obj = ExamPaper()
obj.files = form.cleaned_data["files"]
obj.term = form.cleaned_data["term"]
obj.author=request.user
obj.course= Course.objects.get(code=coursecode)
obj.save()
contributor,created = Contributor.objects.get_or_create(user=request.user)
contributor.paper +=1
contributor.points += rewardvalue
contributor.save()
if created:
stat = Stat.objects.get(tag='initial')
stat.contributor_count +=1
stat.save()
stat = Stat.objects.get(tag='initial')
stat.paper_count +=1
stat.save()
return redirect(reverse("course", kwargs={'department':department,'coursecode':coursecode}))
print(form.errors)
return redirect(reverse("course", kwargs={'department':department,'coursecode':coursecode}))
def DepartmentView(request,department=None,year=0,semester=0):
if request.method =='GET':
dept = get_object_or_404(Department,acronym=department)
year = int(year)
semester = int(semester)
if year < 1:
# course = CourseAllotment.objects.select_related('course').filter(course__dept=dept).order_by('semester')
return render(request,"years.html",context={'department':department})
else:
if semester == 0 :
course = CourseAllotment.objects.select_related('course').filter(course__dept=dept).filter(semester__in=[(2*year -1),(2*year)])
print(course)
else:
course = CourseAllotment.objects.select_related('course').filter(course__dept=dept).filter(semester=semester)
return render(request,"department.html",context={"department":dept,"courses":course})
def CourseView(request,department=None,coursecode=None):
if request.method =='GET':
dept = get_object_or_404(Department,acronym=department)
course = get_object_or_404(Course,code=coursecode)
announcements = Announcement.objects.filter(course=course)
materials = Material.objects.filter(course=course)
papers = ExamPaper.objects.filter(course=course)
try:
bookmark = Bookmark.objects.get(course=course,user=request.user)
except:
bookmark = None
is_bookmarked = True if bookmark else False
return render(request,"course.html",context={"department":dept,"course":course,"announcements":announcements,"materials":materials,"papers":papers,"is_bookmarked":is_bookmarked})
@login_required
def FeedView(request):
if request.method=='GET':
bookmarks =Bookmark.objects.filter(user=request.user)
bookmarkcourses = Bookmark.objects.select_related('course').filter(user=request.user)
courselist=[]
for bookmark in bookmarks:
courselist.append(bookmark.course)
start_from = int(request.GET.get('start_from',0))
announcements = Announcement.objects.select_related('author').filter(course__in =courselist ).order_by('-updated_on')[start_from*6:start_from*6+6]
return render(request,"feed.html",context={"feed":announcements,"next":start_from+1})
@login_required
def BookmarkView(request):
if request.method =='POST':
course = request.POST.get('course')
user = request.POST.get('user')
course_obj = get_object_or_404(Course,id=course)
try:
bookmark = Bookmark.objects.get(course=course,user=request.user)
except:
bookmark = None
if bookmark is not None:
bookmark.delete()
else:
obj = Bookmark()
obj.course = course_obj
obj.user = request.user
obj.save()
return HttpResponse(json.dumps({"success":True}),content_type='application/json')
| |
"""
Created on Jul 22, 2014
@author: Simon Hohberg
"""
import numpy as np
from algorithms.neuralnetwork.feedforward.multilayer_perceptron import MultilayerPerceptron, \
SimpleUpdate
import utils.numpyutils as nputils
import copy
import time
from layers import ConvLayer, MaxPoolLayer
from utils import logging
from algorithms.AbstractAlgorithm import AbstractAlgorithm
from datahandler.numerical.NumericalDataSet import NumericalDataSet
import matplotlib.pyplot as plt
class ConvNet(AbstractAlgorithm):
def __init__(self, iterations=1, learning_rate=0.5, topo=[('c', 3, 4), ('p', 2), ('c', 3, 4), ('p', 9), ('mlp', 4, 4, 2)], activation_func=(np.tanh, nputils.tanh_deriv)):
"""
Creates a new convolutional neural network with the given topology
(architecture), learning rate and number of iterations.
:param iterations: number of iterations for training.
:param learning_rate: rate for updating the weights
:param topo: defines the architecture of the net. It is a list of
tuples. Each tuple represents a layer, where the first element is a
character that specifies the type of layer. E.g. 'c' convolutional
layer, 'p' pooling layer, 'mlp' fully connected conventional neural
network. The next elements in the tuple are layer
specific.
Convolutional: 2nd element defines the kernel size, e.g. 3 for
a 3x3 kernel. 3rd element specifies the number of maps in the layer.
Pooling: 2nd element defines the pool patch size, e.g. 2 for a pool
patch size of 2x2.
MLP: each element defines the layer size for the network.
A complete example looks like this: [('c', 3, 4), ('p', 2), ('c', 3, 4),
('p', 9), ('mlp', 4, 4, 2)]
"""
self.split_ratio = 0.8
self.iterations = iterations
self.learning_rate = learning_rate
self.layers = []
self.activ_func = activation_func[0]
self.deriv_acitv_func = activation_func[1]
num_prev_maps = 1
self.topo = topo
# parse topology
for layer in topo:
# convolutional layer
if layer[0] == 'c':
conv_layer = ConvLayer(num_prev_maps=num_prev_maps, kernel_size=layer[1], num_maps=layer[2])
self.add_layer(conv_layer)
num_prev_maps = layer[2]
# pooling layer
elif layer[0] == 'p':
self.add_layer(MaxPoolLayer(layer[1], num_prev_maps))
# multilayer perceptron
elif layer[0] == 'mlp':
self.mlp = MultilayerPerceptron(list(layer[1:]), do_classification=True, update_method=SimpleUpdate(self.learning_rate), activ_func=(self.activ_func, self.deriv_acitv_func))
def add_layer(self, layer):
"""
Adds the given layer to this network.
:param layer: layer that is added
"""
self.layers.append(layer)
def feedforward(self, inputs):
"""
Feed input forward through net calculating the ouput of each layer.
:param inputs: 3D numpy array (usually a list of images)
:return: List of 3D numpy arrays each representing the output of a layer
except the first array in the list which is the input.
"""
outputs = [inputs]
for layer in self.layers:
outputs.append(layer.feedforward(outputs[-1]))
outputs.extend(self.mlp.feedforward(outputs[-1])[1:])
return outputs
def predict(self, inputs):
predictions = self.predict_extended(inputs)
if predictions[0].shape == (1,1):
#binary output
predictions = np.array(predictions).ravel()
predictions[predictions <= 0] = 0
predictions[predictions > 0] = 1
return predictions[:, np.newaxis].astype(int)
# multiclass
sparse = np.zeros((len(predictions), predictions[0].shape[1]))
for ix, _ in enumerate(sparse):
sparse[ix][predictions[ix].argmax()] = 1
assert sparse.sum() == len(predictions)
return sparse
def predict_extended(self, inputs):
"""
Predicts targets for given data set.
@param data_set: data Set inheriting AbstractDataSet
:return: List of predictions, i.e. output of this net for each
observation in the data set.
"""
data_set = NumericalDataSet(inputs)
predictions = []
# loop through dataset
for observation, _ in data_set.gen_observations( ):
# make sure it is a numpy array
input_arr = np.array(observation)
outputs = self.feedforward(input_arr)
predictions.append(outputs[-1])
return predictions
def predict_single(self, input_arr):
"""
Predict class for a single observation.
:param input_arr: Observation
:return: Prediction for given observation
"""
return self.feedforward(input_arr)[-1]
def fit(self, inputs, targets):
"""
Train net with given data set.
:param data_set: Data set for training.
n times random sampling for online learning
"""
split_point = int(len(inputs) * self.split_ratio)
data_set = NumericalDataSet(inputs[:split_point], targets[:split_point])
val_in = inputs[split_point:]
val_targets = targets[split_point:]
prev_layers = None
prev_mlp = None
self.train_acc_err = []
self.val_acc_err = []
for it in range(self.iterations):
# randomly select observations as many times as there are
# observations
it_error = 0
start = time.time()
for _ in range(data_set.get_nr_observations()):
input_arr, target_arr = data_set.rand_observation()
# feed-forward
outputs = self.feedforward(input_arr)
current_error = nputils.calc_squared_error(target_arr, outputs[-1])
it_error += current_error
# mlp backpropagation and gradient descent
mlp_outputs = outputs[-len(self.mlp.arr_layer_sizes):]
mlp_deltas = self.mlp.backpropagation(mlp_outputs, target_arr)
mlp_weight_updates = self.mlp.calculate_weight_updates(mlp_deltas, mlp_outputs)
self.mlp.update_method.perform_update(self.mlp.weights_arr, mlp_weight_updates, current_error)
# layer backpropagation and gradient descent
# calculate backpropagated error of first mlp layer
backprop_error = np.array([[x] for x in np.dot(self.mlp.weights_arr[0], mlp_deltas[0].transpose())])
for layer in reversed(self.layers):
backprop_error = layer.backpropagate(backprop_error)
# calculate the weight gradients and update the weights
for layer in self.layers:
layer.calc_gradients()
layer.update(self.learning_rate)
avg_error = it_error / data_set.nrObservations
acc_err = self._accuracy_err(inputs, targets)
self.train_acc_err.append(acc_err)
#validation error
acc_err = self._accuracy_err(val_in, val_targets)
self.val_acc_err.append(acc_err)
logging.info("Iteration #{} MSE: {}, TrainErr: {:.6f}, ValErr: {:.6f} ({:.2f}s)\n"\
.format(it + 1, avg_error, self.train_acc_err[-1], self.val_acc_err[-1], time.time()-start))
#break cond
if it > 3 and val_in is not None and self.val_acc_err[-1] > self.val_acc_err[-4]:
# revert
self.layers = prev_layers
self.mlp = prev_mlp
plt.figure()
plt.plot(self.train_acc_err)
plt.plot(self.val_acc_err)
plt.show(block=False)
break
#prev
if it > 0:
prev_layers = copy.deepcopy(self.layers)
prev_mlp = copy.deepcopy(self.mlp)
def _accuracy_err(self, inputs, targets):
if targets.shape[1] == 1:
predictions = self.predict(inputs)
acc_err = 1 - (predictions == targets).sum() / float(len(inputs))
else:
predictions = self.predict_extended(inputs)
acc_err = 1 - ((np.vstack(predictions)).argmax(axis=1)==targets.argmax(axis=1)).sum() / float(len(inputs))
return acc_err
def set_params(self, parameters):
pass
def get_params(self):
dct = {}
dct["learning_rate"] = self.learning_rate
dct["topo"] = self.topo
return dct
| |
# -*- coding: utf-8 -*-
'''
[109]
Uses RESCAL to make enbeddings for the segments of the TIGER corpus (each word = 3 segments).
The relationes used are +1, +2 and +1 or +2 & 5 other relations mentioned in Hinrich's email:
- (segment, endsegment desselben wortes)
- (segment, endsegment des folgenden wortes)
- (segment, endsegment des darauf folgenden wortes)
- (segment, endsegment des vorhergehenden wortes)
- (segment, endsegment des wortes davor)
Takes as a parameter the number of different considered segments. if no parameter is given, all segements are used
(= no <RARE> segments).
'''
import collections
import os
import GlobalVariables_Katharina_SVD as gl
from scipy.sparse import csr_matrix, issparse
from scipy.sparse.linalg import eigsh
from numpy import dot, zeros, kron, array, eye, ones, savetxt, loadtxt
import ExtRescal.rescal_new as rescal
import logging
import threading
import math
import numpy as np
import datetime
import scipy
import time
import sys
import operator
def initialize(): # a list of dictionary
print('\nINITIALIZE ', datetime.datetime.now().time())
# Make 9 dictionaries, the first 4 are for the +1, the +2 and the +1 or +2 relations. 1 for is for the number of words in the corpus.
# The last 5 dictionaries are the 'new' relations Hinrich defined in his email.
for i in range(0,9):
D=collections.defaultdict(int)
gl.dictionaryList.append(D)
gl.wordFrequency = collections.defaultdict(int)
def ReadFile_01(file, limit, lowerlimit, numberSegments):
print('\nREADFILE_01 ', datetime.datetime.now().time())
print (file)
for num_lines, line in enumerate(file):
if limit > 0 and num_lines > limit:
break
if num_lines < lowerlimit:
continue
tokens=line.strip().split(' ') # Keep the original word forms.
indexes = []
for token in tokens:
if token == '':
continue
if token in gl.wordFrequency:
gl.wordFrequency[token] += 1
else:
gl.wordFrequency[token] = 1
# sort the dictionary by its values
frequWords = sorted(gl.wordFrequency.items(), key=operator.itemgetter(1))
finalResult = []
for i in range(noSegments):
finalResult.append(frequWords[len(frequWords)-i-1][0])
print('Done.')
return finalResult
def tokenIsLastSegment(i, tokens):
if i == len(tokens)-1:
return True
if tokens[i][0] == '2':
return True
if tokens[i][0] == '1':
if tokens[i+1][0] == '1':
return True
if tokens[i][0] == '0':
if not tokens[i+1][0] == '2':
return True
return False
def ReadFile(file, limit, lowerlimit, frequentWords):
print('\nREADFILE ', datetime.datetime.now().time())
for num_lines, line in enumerate(file):
if limit > 0 and num_lines > limit:
break
if num_lines < lowerlimit:
continue
tokens=line.strip().split(' ') # Keep the original word forms.
indexes = []
indicesOfLastSeg = []
isLastSegment = False
for i in range(len(tokens)):
token = tokens[i]
isLastSegment = False
if tokenIsLastSegment(i, tokens):
isLastSegment = True
gl.corpusLength += 1
if token == '':
continue
if not token in frequentWords:
token = '<RARE>'
middle=gl.wordlist.get(token, -1) # Use -1 as default.
if middle == -1: # Is a new word.
gl.wordlist[token] = gl.dimension
middle=gl.dimension
gl.dimension += 1 # gl.dimension count the current word amount
if gl.dimension % 10000 == 0:
print (gl.dimension)
gl.dictionaryList[1][middle] = 0
indexes.append(middle) # 'indexes stores all the word index'
if isLastSegment:
indicesOfLastSeg.append(middle)
currentPosi=len(indexes)-1
leftRange=gl.window if currentPosi>= gl.window else currentPosi
#new from Katharina
if currentPosi>0:
context=indexes[currentPosi-1]
gl.dictionaryList[0][(context, middle)]+=1 #update slice 0
gl.dictionaryList[3][(context, middle)]+=1 #update slice 3
if currentPosi>1:
context=indexes[currentPosi-2]
gl.dictionaryList[2][(context, middle)]+=1 #update slice 2
gl.dictionaryList[3][(context, middle)]+=1 #next update for slice 3
# for the last segments of words:
if len(indicesOfLastSeg) > 1:
gl.dictionaryList[4][(middle, indicesOfLastSeg[len(indicesOfLastSeg)-2])] += 1 # last segment 2 words ago
if len(indicesOfLastSeg) > 0:
gl.dictionaryList[5][(middle, indicesOfLastSeg[len(indicesOfLastSeg)-1])] += 1 # last segment 1 word ago
endPreviousWord = 0
endPreviousWordNew = 0
if len(indicesOfLastSeg) > 1 and isLastSegment:
for k in range(3):
if indexes[currentPosi-k-1] == indicesOfLastSeg[len(indicesOfLastSeg)-2]:
endPreviousWord = k
#print('endPreviousWord: ' + str(k))
break
gl.dictionaryList[6][(indexes[currentPosi-k-1], middle)] += 1 # same word TODO: no test here if currentPosi-i exists... necessary??
#print('gl.dictionaryList[6] '+ str(indexes[currentPosi-k-1]) + '; ' + str(middle))
if len(indicesOfLastSeg) > 2 and isLastSegment:
for k in range(5):
#print(indexes[currentPosi-k-1-endPreviousWord])
#print(indicesOfLastSeg[len(indicesOfLastSeg)-3])
if indexes[currentPosi-k-endPreviousWord-1] == indicesOfLastSeg[len(indicesOfLastSeg)-3]:
endPreviousWordNew = k + endPreviousWord
#print('endPreviousWordNew: ' + str(endPreviousWordNew))
break
gl.dictionaryList[7][(indexes[currentPosi-k-endPreviousWord-1], middle)] += 1 # 1 word after TODO: no test here if currentPosi-i exists... necessary??
#print('gl.dictionaryList[7] '+ str(indexes[currentPosi-k-endPreviousWord-1]) + '; ' + str(middle))
if len(indicesOfLastSeg) > 3 and isLastSegment:
for k in range(5):
if indexes[currentPosi-k-endPreviousWordNew-1] == indicesOfLastSeg[len(indicesOfLastSeg)-4]:
break
gl.dictionaryList[8][(indexes[currentPosi-k-endPreviousWordNew-1], middle)] += 1 # 2 words after TODO: no test here if currentPosi-i exists... necessary??
#print('gl.dictionaryList[8] '+ str(indexes[currentPosi-k-endPreviousWordNew-1]) + '; ' + str(middle))
'''
if currentPosi>2:
context=indexes[currentPosi-3]
gl.dictionaryList[1][(middle, context)]+=0.6 #still update slice 1
gl.dictionaryList[1][(context, middle)]+=0.6 #a symmetric relation
if currentPosi>3:
context=indexes[currentPosi-4]
gl.dictionaryList[2][(middle, context)]+=0.4 #update slice 2
gl.dictionaryList[2][(context, middle)]+=0.4 #a symmetric relation
if currentPosi>4:
context=indexes[currentPosi-5]
gl.dictionaryList[2][(middle, context)]+=0.2 #still update slice 2
gl.dictionaryList[2][(context, middle)]+=0.2 #a symmetric relation
'''
gl.dictionaryList[1][middle] += 1
#for token, value in gl.dictionaryList[1].items():
#print(gl.wordlist[token] + ', ' + str(value) + '\n')
#exit(0)
print('Number of words: ' + str(gl.dimension))
def Traverse(folders, frequentWords):
print('\nTRAVERSE ', datetime.datetime.now().time())
fileNo=0
for rootDir in folders:
if os.path.isfile(rootDir): #is file
file = open(rootDir)
ReadFile(file, -1, 0, frequentWords)
#ReadFile(file, 10000, 0, frequentWords) #V1
#ReadFile(file, 294757048/3*2, 294757048/3) #V2
#ReadFile(file, -1, 294757048/3*2) #V3
fileNo+=1
else: #is directory
for lists in os.listdir(rootDir):
path = os.path.join(rootDir, lists)
list=[path]
Traverse(list)
def Traverse_01(folders, noSegments):
print('\nTRAVERSE_01 ', datetime.datetime.now().time())
fileNo=0
for rootDir in folders:
if os.path.isfile(rootDir): #is file
file = open(rootDir)
finalResult = ReadFile_01(file, -1, 0, noSegments)
#ReadFile(file, 100, 0) #V1
#ReadFile(file, 294757048/3*2, 294757048/3) #V2
#ReadFile(file, -1, 294757048/3*2) #V3
fileNo+=1
else: #is directory
for lists in os.listdir(rootDir):
path = os.path.join(rootDir, lists)
list=[path]
Traverse_01(list)
return finalResult
def formPPMIandSVD():
print('\nFORMPPMI ', datetime.datetime.now().time())
rows=[]
cols=[]
data=[]
for (word, context), value in gl.dictionaryList[0].items():
rows.append(word)
cols.append(context)
newValue = math.log((value * gl.corpusLength)/(gl.dictionaryList[1][word]*gl.dictionaryList[1][context]))
if newValue < 0:
data.append(0)
else:
data.append(math.log((value * gl.corpusLength)/(gl.dictionaryList[1][word]*gl.dictionaryList[1][context])))
ppmi = csr_matrix((data, (rows, cols)), shape= (gl.dimension, gl.dimension))
myMatrix = ppmi
rows=[]
cols=[]
data=[]
for (word, context), value in gl.dictionaryList[2].items():
rows.append(word)
cols.append(context)
newValue = math.log((value * gl.corpusLength)/(gl.dictionaryList[1][word]*gl.dictionaryList[1][context]))
if newValue < 0:
data.append(0)
else:
data.append(math.log((value * gl.corpusLength)/(gl.dictionaryList[1][word]*gl.dictionaryList[1][context])))
ppmi2 = csr_matrix((data, (rows, cols)), shape= (gl.dimension, gl.dimension))
myMatrix2 = ppmi2
rows=[]
cols=[]
data=[]
for (word, context), value in gl.dictionaryList[3].items():
rows.append(word)
cols.append(context)
newValue = math.log((value * gl.corpusLength)/(gl.dictionaryList[1][word]*gl.dictionaryList[1][context]))
if newValue < 0:
data.append(0)
else:
data.append(math.log((value * gl.corpusLength)/(gl.dictionaryList[1][word]*gl.dictionaryList[1][context])))
ppmi3 = csr_matrix((data, (rows, cols)), shape= (gl.dimension, gl.dimension))
myMatrix3 = ppmi3
rows=[]
cols=[]
data=[]
for (word, context), value in gl.dictionaryList[4].items():
rows.append(word)
cols.append(context)
newValue = math.log((value * gl.corpusLength)/(gl.dictionaryList[1][word]*gl.dictionaryList[1][context]))
if newValue < 0:
data.append(0)
else:
data.append(math.log((value * gl.corpusLength)/(gl.dictionaryList[1][word]*gl.dictionaryList[1][context])))
ppmi4 = csr_matrix((data, (rows, cols)), shape= (gl.dimension, gl.dimension))
myMatrix4 = ppmi4
rows=[]
cols=[]
data=[]
for (word, context), value in gl.dictionaryList[5].items():
rows.append(word)
cols.append(context)
newValue = math.log((value * gl.corpusLength)/(gl.dictionaryList[1][word]*gl.dictionaryList[1][context]))
if newValue < 0:
data.append(0)
else:
data.append(math.log((value * gl.corpusLength)/(gl.dictionaryList[1][word]*gl.dictionaryList[1][context])))
ppmi5 = csr_matrix((data, (rows, cols)), shape= (gl.dimension, gl.dimension))
myMatrix5 = ppmi5
rows=[]
cols=[]
data=[]
for (word, context), value in gl.dictionaryList[6].items():
rows.append(word)
cols.append(context)
newValue = math.log((value * gl.corpusLength)/(gl.dictionaryList[1][word]*gl.dictionaryList[1][context]))
if newValue < 0:
data.append(0)
else:
data.append(math.log((value * gl.corpusLength)/(gl.dictionaryList[1][word]*gl.dictionaryList[1][context])))
ppmi6 = csr_matrix((data, (rows, cols)), shape= (gl.dimension, gl.dimension))
myMatrix6 = ppmi6
rows=[]
cols=[]
data=[]
for (word, context), value in gl.dictionaryList[7].items():
rows.append(word)
cols.append(context)
newValue = math.log((value * gl.corpusLength)/(gl.dictionaryList[1][word]*gl.dictionaryList[1][context]))
if newValue < 0:
data.append(0)
else:
data.append(math.log((value * gl.corpusLength)/(gl.dictionaryList[1][word]*gl.dictionaryList[1][context])))
ppmi7 = csr_matrix((data, (rows, cols)), shape= (gl.dimension, gl.dimension))
myMatrix7 = ppmi7
rows=[]
cols=[]
data=[]
for (word, context), value in gl.dictionaryList[8].items():
rows.append(word)
cols.append(context)
newValue = math.log((value * gl.corpusLength)/(gl.dictionaryList[1][word]*gl.dictionaryList[1][context]))
if newValue < 0:
data.append(0)
else:
data.append(math.log((value * gl.corpusLength)/(gl.dictionaryList[1][word]*gl.dictionaryList[1][context])))
ppmi8 = csr_matrix((data, (rows, cols)), shape= (gl.dimension, gl.dimension))
myMatrix8 = ppmi8
print ('Building tensor....')
tensor = [0] * 8 #number is number of tensor slices
tensor[0] = myMatrix # TODO: change this for more slices
tensor[1] = myMatrix2
tensor[2] = myMatrix3
tensor[3] = myMatrix4
tensor[4] = myMatrix5
tensor[5] = myMatrix6
tensor[6] = myMatrix7
tensor[7] = myMatrix8
print('Print the original tensor')
printOriginalTensor(tensor)
#call rescal
print ('Calling rescal...')
#A, R, fit, itr, exectimes = rescal(tensor, 50, init='nvecs', lambda_A=10, lambda_R=10, compute_fit=True)
#A, R, fit, itr, exectimes = rescal.als(tensor, 100)
#printWordEmbeddingSVD(A, 100)
#print ('Calling rescal the 2nd time...')
A, R, fit, itr, exectimes = rescal.als(tensor, 200)
printRelationsTensor(R, 200)
printWordEmbeddingSVD(A, 200)
#print ('Calling rescal the 2nd time...')
#A, R, fit, itr, exectimes = rescal.als(tensor, 300)
#printRelationsTensor(R, 300)
#printWordEmbeddingSVD(A, 300)
def save_sparse_csr(filename,array):
print('\nSAVE_SPARSE_CSR ', datetime.datetime.now().time())
np.savez(filename,data = array.data ,indices=array.indices,
indptr =array.indptr, shape=array.shape )
def printWordEmbeddingSVD(matrix, dim):
print('\nPRINTWORDEMBEDDINGSVD ', datetime.datetime.now().time())
output= open('/mounts/data/proj/kann/FINAL_RESCAL_RESULTS/109_012_wordembeddings_' + str(dim) + '.txt', 'w')
index2word={}
for word, index in gl.wordlist.items():
index2word[index]=word
for i in range(len(matrix)):
output.write(index2word[i]+' ') #this means word printed according id
for length in range(len(matrix[i])):
output.write(str(matrix[i][length])+' ')
output.write('\n')
print ('Wordembeddings are stored over!')
output= open('/mounts/data/proj/kann/FINAL_RESCAL_RESULTS/109_012_real_wordembeddings' + str(dim) + '.txt', 'w') #without weird numbers
index2word={}
for word, index in gl.wordlist.items():
index2word[index]=word
for i in range(len(matrix)):
output.write(index2word[i][1:]+' ') #this means word printed according id, without number
for length in range(len(matrix[i])):
output.write(str(matrix[i][length])+' ')
output.write('\n')
print ('\'Real\' wordembeddings are stored over!')
def printRelationsTensor(R, dim):
print('\nPRINTRELATIONTENSOR ', datetime.datetime.now().time())
for j in range(len(R)):
output= '/mounts/data/proj/kann/FINAL_RESCAL_RESULTS/109_012_R_' + str(j) + '.txt'
np.savetxt(output, R[j])
print ('RelationTensor stored over!')
def printOriginalTensor(R):
print('\nPRINTORIGINALTENSOR ', datetime.datetime.now().time())
for j in range(len(R)):
output= '/mounts/data/proj/kann/FINAL_RESCAL_RESULTS/109_012_T_' + str(j) + '.txt'
save_sparse_csr(output, R[j])
print ('Original Tensor stored over!')
if __name__ == '__main__':
if len(sys.argv) > 1:
noSegments = int(sys.argv[1])-1
else:
noSegments = 89383 # Use all segments. CHANGE THIS if the corpus is not TIGER.
initialize()
# Use time.sleep here if you need to wait for the preparation to finish.
folders=['/mounts/data/proj/kann/SegEm/tiger_text_file_preproc_seg_3']
frequentWords = Traverse_01(folders, noSegments)
Traverse(folders, frequentWords)
formPPMIandSVD()
| |
# Copyright (c) 2012-2013 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2006-2008 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Steve Reinhardt
# Simple test script
#
# "m5 test.py"
import optparse
import sys
import m5
from m5.defines import buildEnv
from m5.objects import *
from m5.util import addToPath, fatal
addToPath('../common')
addToPath('../ruby')
addToPath('../topologies')
import Options
import Ruby
import Simulation
import CacheConfig
import MemConfig
from Caches import *
from cpu2000 import *
#Prodromou: Include SPEC2k6 files
import SPEC2k6_train
import SPEC2k6_ref
#Prodromou: For periodical stat dumping
from m5.internal.stats import periodicStatDump as statDump
def get_processes(options):
"""Interprets provided options and returns a list of processes"""
multiprocesses = []
inputs = []
outputs = []
errouts = []
pargs = []
workloads = options.cmd.split(';')
if options.input != "":
inputs = options.input.split(';')
if options.output != "":
outputs = options.output.split(';')
if options.errout != "":
errouts = options.errout.split(';')
if options.options != "":
pargs = options.options.split(';')
# Prodromou: Add required options here so I don't have to
# re-write them every time
#options.cpu_type = "detailed"
#options.caches = True
#Prodromou: Invoke the benchmarks
if options.benchmark:
if options.bench_size == 'train':
if options.benchmark == 'perlbench':
process = SPEC2k6_train.perlbench
elif options.benchmark == 'bzip2':
process = SPEC2k6_train.bzip2
elif options.benchmark == 'gcc':
process = SPEC2k6_train.gcc
elif options.benchmark == 'mcf':
process = SPEC2k6_train.mcf
elif options.benchmark == 'milc':
process = SPEC2k6_train.milc
elif options.benchmark == 'gobmk':
process = SPEC2k6_train.gobmk
elif options.benchmark == 'hmmer':
process = SPEC2k6_train.hmmer
elif options.benchmark == 'sjeng':
process = SPEC2k6_train.sjeng
elif options.benchmark == 'libquantum':
process = SPEC2k6_train.libquantum
elif options.benchmark == 'h264ref':
process = SPEC2k6_train.h264ref
elif options.benchmark == 'lbm':
process = SPEC2k6_train.lbm
elif options.benchmark == 'sphinx3':
process = SPEC2k6_train.sphinx3
elif options.benchmark == 'specrand':
process = SPEC2k6_train.specrand
else:
print "Error: Unknown Benchmark"
sys.exit(1)
elif options.bench_size == 'ref':
if options.benchmark == 'perlbench':
process = SPEC2k6_ref.perlbench
elif options.benchmark == 'bzip2':
process = SPEC2k6_ref.bzip2
elif options.benchmark == 'gcc':
process = SPEC2k6_ref.gcc
elif options.benchmark == 'mcf':
process = SPEC2k6_ref.mcf
elif options.benchmark == 'milc':
process = SPEC2k6_ref.milc
elif options.benchmark == 'gobmk':
process = SPEC2k6_ref.gobmk
elif options.benchmark == 'hmmer':
process = SPEC2k6_ref.hmmer
elif options.benchmark == 'sjeng':
process = SPEC2k6_ref.sjeng
elif options.benchmark == 'libquantum':
process = SPEC2k6_ref.libquantum
elif options.benchmark == 'h264ref':
process = SPEC2k6_ref.h264ref
elif options.benchmark == 'lbm':
process = SPEC2k6_ref.lbm
elif options.benchmark == 'sphinx3':
process = SPEC2k6_ref.sphinx3
elif options.benchmark == 'specrand':
process = SPEC2k6_ref.specrand
elif options.benchmark == 'bwaves':
process = SPEC2k6_ref.bwaves
elif options.benchmark == 'gamess':
process = SPEC2k6_ref.gamess
elif options.benchmark == 'zeusmp':
process = SPEC2k6_ref.zeusmp
elif options.benchmark == 'leslie3d':
process = SPEC2k6_ref.leslie3d
elif options.benchmark == 'GemsFDTD':
process = SPEC2k6_ref.GemsFDTD
elif options.benchmark == 'tonto':
process = SPEC2k6_ref.tonto
elif options.benchmark == 'namd':
process = SPEC2k6_ref.namd
elif options.benchmark == 'dealII':
process = SPEC2k6_ref.dealII
elif options.benchmark == 'soplex':
process = SPEC2k6_ref.soplex
elif options.benchmark == 'povray':
process = SPEC2k6_ref.povray
elif options.benchmark == 'omnetpp':
process = SPEC2k6_ref.omnetpp
elif options.benchmark == 'astar':
process = SPEC2k6_ref.astar
elif options.benchmark == 'xalancbmk':
process = SPEC2k6_ref.xalancbmk
elif options.benchmark == 'gromacs':
process = SPEC2k6_ref.gromacs
elif options.benchmark == 'cactusADM':
process = SPEC2k6_ref.cactusADM
elif options.benchmark == 'calculix':
process = SPEC2k6_ref.calculix
elif options.benchmark == 'wrf':
process = SPEC2k6_ref.wrf
elif options.benchmark == 'perlbench_x86':
process = SPEC2k6_ref.perlbench_x86
elif options.benchmark == 'bzip2_x86':
process = SPEC2k6_ref.bzip2_x86
elif options.benchmark == 'gcc_x86':
process = SPEC2k6_ref.gcc_x86
elif options.benchmark == 'mcf_x86':
process = SPEC2k6_ref.mcf_x86
elif options.benchmark == 'milc_x86':
process = SPEC2k6_ref.milc_x86
elif options.benchmark == 'gobmk_x86':
process = SPEC2k6_ref.gobmk_x86
elif options.benchmark == 'hmmer_x86':
process = SPEC2k6_ref.hmmer_x86
elif options.benchmark == 'sjeng_x86':
process = SPEC2k6_ref.sjeng_x86
elif options.benchmark == 'libquantum_x86':
process = SPEC2k6_ref.libquantum_x86
elif options.benchmark == 'h264ref_x86':
process = SPEC2k6_ref.h264ref_x86
elif options.benchmark == 'lbm_x86':
process = SPEC2k6_ref.lbm_x86
elif options.benchmark == 'sphinx3_x86':
process = SPEC2k6_ref.sphinx3_x86
elif options.benchmark == 'specrand_x86':
process = SPEC2k6_ref.specrand_x86
elif options.benchmark == 'bwaves_x86':
process = SPEC2k6_ref.bwaves_x86
elif options.benchmark == 'gamess_x86':
process = SPEC2k6_ref.gamess_x86
elif options.benchmark == 'zeusmp_x86':
process = SPEC2k6_ref.zeusmp_x86
elif options.benchmark == 'leslie3d_x86':
process = SPEC2k6_ref.leslie3d_x86
elif options.benchmark == 'GemsFDTD_x86':
process = SPEC2k6_ref.GemsFDTD_x86
elif options.benchmark == 'tonto_x86':
process = SPEC2k6_ref.tonto_x86
elif options.benchmark == 'namd_x86':
process = SPEC2k6_ref.namd_x86
elif options.benchmark == 'dealII_x86':
process = SPEC2k6_ref.dealII_x86
elif options.benchmark == 'soplex_x86':
process = SPEC2k6_ref.soplex_x86
elif options.benchmark == 'povray_x86':
process = SPEC2k6_ref.povray_x86
elif options.benchmark == 'omnetpp_x86':
process = SPEC2k6_ref.omnetpp_x86
elif options.benchmark == 'astar_x86':
process = SPEC2k6_ref.astar_x86
elif options.benchmark == 'xalancbmk_x86':
process = SPEC2k6_ref.xalancbmk_x86
elif options.benchmark == 'gromacs_x86':
process = SPEC2k6_ref.gromacs_x86
elif options.benchmark == 'cactusADM_x86':
process = SPEC2k6_ref.cactusADM_x86
elif options.benchmark == 'calculix_x86':
process = SPEC2k6_ref.calculix_x86
elif options.benchmark == 'wrf_x86':
process = SPEC2k6_ref.wrf_x86
else:
print "Error: Unknown Benchmark"
sys.exit(1)
else:
print "Error: Not supported benchmark size"
sys.exit(1)
multiprocesses.append(process)
return multiprocesses, 1
idx = 0
for wrkld in workloads:
process = LiveProcess()
process.executable = wrkld
if len(pargs) > idx:
process.cmd = [wrkld] + pargs[idx].split()
else:
process.cmd = [wrkld]
if len(inputs) > idx:
process.input = inputs[idx]
if len(outputs) > idx:
process.output = outputs[idx]
if len(errouts) > idx:
process.errout = errouts[idx]
multiprocesses.append(process)
idx += 1
if options.smt:
assert(options.cpu_type == "detailed" or options.cpu_type == "inorder")
return multiprocesses, idx
else:
return multiprocesses, 1
parser = optparse.OptionParser()
Options.addCommonOptions(parser)
Options.addSEOptions(parser)
if '--ruby' in sys.argv:
Ruby.define_options(parser)
(options, args) = parser.parse_args()
if args:
print "Error: script doesn't take any positional arguments"
sys.exit(1)
multiprocesses = []
numThreads = 1
#PRODROMOU
if options.total_insts:
# Some thread HAS to execute AT LEAST this many instructions
# Gives a very coarse grain breakpoint for the resume logic to kick in
options.maxinsts = options.total_insts / options.num_cpus
if options.checkpoint_restore and options.take_checkpoints:
print "Both restore and record checkpoint options enabled. "
cr_value = int(options.checkpoint_restore)
tc_value = int(options.take_checkpoints)
difference = tc_value - cr_value
options.take_checkpoints = str(difference)
print "Value stored is: " + options.take_checkpoints
#PRODROMOU
if options.bench:
apps = options.bench.split("-")
if len(apps) != options.num_cpus:
print "number of benchmarks not equal to set num_cpus!"
sys.exit(1)
for app in apps:
try:
if buildEnv['TARGET_ISA'] == 'alpha':
exec("workload = %s('alpha', 'tru64', 'ref')" % app)
else:
exec("workload = %s(buildEnv['TARGET_ISA'], 'linux', 'ref')" % app)
multiprocesses.append(workload.makeLiveProcess())
except:
print >>sys.stderr, "Unable to find workload for %s: %s" % (buildEnv['TARGET_ISA'], app)
sys.exit(1)
#Prodromou: Need to add this
elif options.benchmark:
multiprocesses, numThreads = get_processes(options)
elif options.cmd:
multiprocesses, numThreads = get_processes(options)
else:
print >> sys.stderr, "No workload specified. Exiting!\n"
sys.exit(1)
(CPUClass, test_mem_mode, FutureClass) = Simulation.setCPUClass(options)
CPUClass.numThreads = numThreads
MemClass = Simulation.setMemClass(options)
# Check -- do not allow SMT with multiple CPUs
if options.smt and options.num_cpus > 1:
fatal("You cannot use SMT with multiple CPUs!")
np = options.num_cpus
#PRODROMOU: Set the instruction window
system = System(cpu = [CPUClass(cpu_id=i) for i in xrange(np)],
mem_mode = test_mem_mode,
mem_ranges = [AddrRange(options.mem_size)],
cache_line_size = options.cacheline_size)
# Create a top-level voltage domain
system.voltage_domain = VoltageDomain(voltage = options.sys_voltage)
# Create a source clock for the system and set the clock period
system.clk_domain = SrcClockDomain(clock = options.sys_clock,
voltage_domain = system.voltage_domain)
# Create a CPU voltage domain
system.cpu_voltage_domain = VoltageDomain()
# Create a separate clock domain for the CPUs
system.cpu_clk_domain = SrcClockDomain(clock = options.cpu_clock,
voltage_domain =
system.cpu_voltage_domain)
# All cpus belong to a common cpu_clk_domain, therefore running at a common
# frequency.
for cpu in system.cpu:
cpu.clk_domain = system.cpu_clk_domain
# Sanity check
if options.fastmem:
if CPUClass != AtomicSimpleCPU:
fatal("Fastmem can only be used with atomic CPU!")
if (options.caches or options.l2cache):
fatal("You cannot use fastmem in combination with caches!")
if options.simpoint_profile:
if not options.fastmem:
# Atomic CPU checked with fastmem option already
fatal("SimPoint generation should be done with atomic cpu and fastmem")
if np > 1:
fatal("SimPoint generation not supported with more than one CPUs")
for i in xrange(np):
if options.smt:
system.cpu[i].workload = multiprocesses
elif len(multiprocesses) == 1:
system.cpu[i].workload = multiprocesses[0]
else:
system.cpu[i].workload = multiprocesses[i]
if options.fastmem:
system.cpu[i].fastmem = True
if options.simpoint_profile:
system.cpu[i].simpoint_profile = True
system.cpu[i].simpoint_interval = options.simpoint_interval
if options.checker:
system.cpu[i].addCheckerCpu()
system.cpu[i].createThreads()
if options.ruby:
if not (options.cpu_type == "detailed" or options.cpu_type == "timing"):
print >> sys.stderr, "Ruby requires TimingSimpleCPU or O3CPU!!"
sys.exit(1)
# Set the option for physmem so that it is not allocated any space
system.physmem = MemClass(range=AddrRange(options.mem_size),
null = True)
options.use_map = True
Ruby.create_system(options, system)
assert(options.num_cpus == len(system.ruby._cpu_ruby_ports))
for i in xrange(np):
ruby_port = system.ruby._cpu_ruby_ports[i]
# Create the interrupt controller and connect its ports to Ruby
# Note that the interrupt controller is always present but only
# in x86 does it have message ports that need to be connected
system.cpu[i].createInterruptController()
# Connect the cpu's cache ports to Ruby
system.cpu[i].icache_port = ruby_port.slave
system.cpu[i].dcache_port = ruby_port.slave
if buildEnv['TARGET_ISA'] == 'x86':
system.cpu[i].interrupts.pio = ruby_port.master
system.cpu[i].interrupts.int_master = ruby_port.slave
system.cpu[i].interrupts.int_slave = ruby_port.master
system.cpu[i].itb.walker.port = ruby_port.slave
system.cpu[i].dtb.walker.port = ruby_port.slave
else:
system.membus = CoherentBus()
system.system_port = system.membus.slave
if options.mutlu:
CacheConfig.config_cache_parbs(options, system)
else:
CacheConfig.config_cache(options, system)
MemConfig.config_mem(options, system)
if options.dump_interval:
statDump (options.dump_interval)
m5.disableAllListeners()
root = Root(full_system = False, system = system)
#Prodromou: Try to modify the tCL value of the controller
#for ctrl in system.mem_ctrls:
# print "Hello %l" % (ctrl.tCL.getValue())
Simulation.run(options, root, system, FutureClass)
| |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
from lxml import etree as ET
except ImportError:
from xml.etree import ElementTree as ET
from libcloud.backup.base import BackupDriver, BackupTarget, BackupTargetJob
from libcloud.backup.types import BackupTargetType
from libcloud.backup.types import Provider
from libcloud.common.dimensiondata import dd_object_to_id
from libcloud.common.dimensiondata import DimensionDataConnection
from libcloud.common.dimensiondata import DimensionDataBackupClient
from libcloud.common.dimensiondata import DimensionDataBackupClientAlert
from libcloud.common.dimensiondata import DimensionDataBackupClientType
from libcloud.common.dimensiondata import DimensionDataBackupDetails
from libcloud.common.dimensiondata import DimensionDataBackupSchedulePolicy
from libcloud.common.dimensiondata import DimensionDataBackupStoragePolicy
from libcloud.common.dimensiondata import API_ENDPOINTS, DEFAULT_REGION
from libcloud.common.dimensiondata import TYPES_URN
from libcloud.common.dimensiondata import GENERAL_NS, BACKUP_NS
from libcloud.utils.xml import fixxpath, findtext, findall
# pylint: disable=no-member
DEFAULT_BACKUP_PLAN = 'Advanced'
class DimensionDataBackupDriver(BackupDriver):
"""
DimensionData backup driver.
"""
selected_region = None
connectionCls = DimensionDataConnection
name = 'Dimension Data Backup'
website = 'https://cloud.dimensiondata.com/'
type = Provider.DIMENSIONDATA
api_version = 1.0
network_domain_id = None
def __init__(self, key, secret=None, secure=True, host=None, port=None,
api_version=None, region=DEFAULT_REGION, **kwargs):
if region not in API_ENDPOINTS and host is None:
raise ValueError(
'Invalid region: %s, no host specified' % (region))
if region is not None:
self.selected_region = API_ENDPOINTS[region]
super(DimensionDataBackupDriver, self).__init__(
key=key, secret=secret,
secure=secure, host=host,
port=port,
api_version=api_version,
region=region,
**kwargs)
def _ex_connection_class_kwargs(self):
"""
Add the region to the kwargs before the connection is instantiated
"""
kwargs = super(DimensionDataBackupDriver,
self)._ex_connection_class_kwargs()
kwargs['region'] = self.selected_region
return kwargs
def get_supported_target_types(self):
"""
Get a list of backup target types this driver supports
:return: ``list`` of :class:``BackupTargetType``
"""
return [BackupTargetType.VIRTUAL]
def list_targets(self):
"""
List all backuptargets
:rtype: ``list`` of :class:`BackupTarget`
"""
targets = self._to_targets(
self.connection.request_with_orgId_api_2('server/server').object)
return targets
def create_target(self, name, address,
type=BackupTargetType.VIRTUAL, extra=None):
"""
Creates a new backup target
:param name: Name of the target (not used)
:type name: ``str``
:param address: The ID of the node in Dimension Data Cloud
:type address: ``str``
:param type: Backup target type, only Virtual supported
:type type: :class:`BackupTargetType`
:param extra: (optional) Extra attributes (driver specific).
:type extra: ``dict``
:rtype: Instance of :class:`BackupTarget`
"""
if extra is not None:
service_plan = extra.get('servicePlan', DEFAULT_BACKUP_PLAN)
else:
service_plan = DEFAULT_BACKUP_PLAN
extra = {'servicePlan': service_plan}
create_node = ET.Element('NewBackup',
{'xmlns': BACKUP_NS})
create_node.set('servicePlan', service_plan)
response = self.connection.request_with_orgId_api_1(
'server/%s/backup' % (address),
method='POST',
data=ET.tostring(create_node)).object
asset_id = None
for info in findall(response,
'additionalInformation',
GENERAL_NS):
if info.get('name') == 'assetId':
asset_id = findtext(info, 'value', GENERAL_NS)
return BackupTarget(
id=asset_id,
name=name,
address=address,
type=type,
extra=extra,
driver=self
)
def create_target_from_node(self, node, type=BackupTargetType.VIRTUAL,
extra=None):
"""
Creates a new backup target from an existing node
:param node: The Node to backup
:type node: ``Node``
:param type: Backup target type (Physical, Virtual, ...).
:type type: :class:`BackupTargetType`
:param extra: (optional) Extra attributes (driver specific).
:type extra: ``dict``
:rtype: Instance of :class:`BackupTarget`
"""
return self.create_target(name=node.name,
address=node.id,
type=BackupTargetType.VIRTUAL,
extra=extra)
def create_target_from_container(self, container,
type=BackupTargetType.OBJECT,
extra=None):
"""
Creates a new backup target from an existing storage container
:param node: The Container to backup
:type node: ``Container``
:param type: Backup target type (Physical, Virtual, ...).
:type type: :class:`BackupTargetType`
:param extra: (optional) Extra attributes (driver specific).
:type extra: ``dict``
:rtype: Instance of :class:`BackupTarget`
"""
return NotImplementedError(
'create_target_from_container not supported for this driver')
def update_target(self, target, name=None, address=None, extra=None):
"""
Update the properties of a backup target, only changing the serviceplan
is supported.
:param target: Backup target to update
:type target: Instance of :class:`BackupTarget` or ``str``
:param name: Name of the target
:type name: ``str``
:param address: Hostname, FQDN, IP, file path etc.
:type address: ``str``
:param extra: (optional) Extra attributes (driver specific).
:type extra: ``dict``
:rtype: Instance of :class:`BackupTarget`
"""
if extra is not None:
service_plan = extra.get('servicePlan', DEFAULT_BACKUP_PLAN)
else:
service_plan = DEFAULT_BACKUP_PLAN
request = ET.Element('ModifyBackup',
{'xmlns': BACKUP_NS})
request.set('servicePlan', service_plan)
server_id = self._target_to_target_address(target)
self.connection.request_with_orgId_api_1(
'server/%s/backup/modify' % (server_id),
method='POST',
data=ET.tostring(request)).object
if isinstance(target, BackupTarget):
target.extra = extra
else:
target = self.ex_get_target_by_id(server_id)
return target
def delete_target(self, target):
"""
Delete a backup target
:param target: Backup target to delete
:type target: Instance of :class:`BackupTarget` or ``str``
:rtype: ``bool``
"""
server_id = self._target_to_target_address(target)
response = self.connection.request_with_orgId_api_1(
'server/%s/backup?disable' % (server_id),
method='GET').object
response_code = findtext(response, 'result', GENERAL_NS)
return response_code in ['IN_PROGRESS', 'SUCCESS']
def list_recovery_points(self, target, start_date=None, end_date=None):
"""
List the recovery points available for a target
:param target: Backup target to delete
:type target: Instance of :class:`BackupTarget`
:param start_date: The start date to show jobs between (optional)
:type start_date: :class:`datetime.datetime`
:param end_date: The end date to show jobs between (optional)
:type end_date: :class:`datetime.datetime``
:rtype: ``list`` of :class:`BackupTargetRecoveryPoint`
"""
raise NotImplementedError(
'list_recovery_points not implemented for this driver')
def recover_target(self, target, recovery_point, path=None):
"""
Recover a backup target to a recovery point
:param target: Backup target to delete
:type target: Instance of :class:`BackupTarget`
:param recovery_point: Backup target with the backup data
:type recovery_point: Instance of :class:`BackupTarget`
:param path: The part of the recovery point to recover (optional)
:type path: ``str``
:rtype: Instance of :class:`BackupTargetJob`
"""
raise NotImplementedError(
'recover_target not implemented for this driver')
def recover_target_out_of_place(self, target, recovery_point,
recovery_target, path=None):
"""
Recover a backup target to a recovery point out-of-place
:param target: Backup target with the backup data
:type target: Instance of :class:`BackupTarget`
:param recovery_point: Backup target with the backup data
:type recovery_point: Instance of :class:`BackupTarget`
:param recovery_target: Backup target with to recover the data to
:type recovery_target: Instance of :class:`BackupTarget`
:param path: The part of the recovery point to recover (optional)
:type path: ``str``
:rtype: Instance of :class:`BackupTargetJob`
"""
raise NotImplementedError(
'recover_target_out_of_place not implemented for this driver')
def get_target_job(self, target, id):
"""
Get a specific backup job by ID
:param target: Backup target with the backup data
:type target: Instance of :class:`BackupTarget`
:param id: Backup target with the backup data
:type id: Instance of :class:`BackupTarget`
:rtype: :class:`BackupTargetJob`
"""
jobs = self.list_target_jobs(target)
return list(filter(lambda x: x.id == id, jobs))[0]
def list_target_jobs(self, target):
"""
List the backup jobs on a target
:param target: Backup target with the backup data
:type target: Instance of :class:`BackupTarget`
:rtype: ``list`` of :class:`BackupTargetJob`
"""
raise NotImplementedError(
'list_target_jobs not implemented for this driver')
def create_target_job(self, target, extra=None):
"""
Create a new backup job on a target
:param target: Backup target with the backup data
:type target: Instance of :class:`BackupTarget`
:param extra: (optional) Extra attributes (driver specific).
:type extra: ``dict``
:rtype: Instance of :class:`BackupTargetJob`
"""
raise NotImplementedError(
'create_target_job not implemented for this driver')
def resume_target_job(self, target, job):
"""
Resume a suspended backup job on a target
:param target: Backup target with the backup data
:type target: Instance of :class:`BackupTarget`
:param job: Backup target job to resume
:type job: Instance of :class:`BackupTargetJob`
:rtype: ``bool``
"""
raise NotImplementedError(
'resume_target_job not implemented for this driver')
def suspend_target_job(self, target, job):
"""
Suspend a running backup job on a target
:param target: Backup target with the backup data
:type target: Instance of :class:`BackupTarget`
:param job: Backup target job to suspend
:type job: Instance of :class:`BackupTargetJob`
:rtype: ``bool``
"""
raise NotImplementedError(
'suspend_target_job not implemented for this driver')
def cancel_target_job(self, job, ex_client=None, ex_target=None):
"""
Cancel a backup job on a target
:param job: Backup target job to cancel. If it is ``None``
ex_client and ex_target must be set
:type job: Instance of :class:`BackupTargetJob` or ``None``
:param ex_client: Client of the job to cancel.
Not necessary if job is specified.
DimensionData only has 1 job per client
:type ex_client: Instance of :class:`DimensionDataBackupClient`
or ``str``
:param ex_target: Target to cancel a job from.
Not necessary if job is specified.
:type ex_target: Instance of :class:`BackupTarget` or ``str``
:rtype: ``bool``
"""
if job is None:
if ex_client is None or ex_target is None:
raise ValueError("Either job or ex_client and "
"ex_target have to be set")
server_id = self._target_to_target_address(ex_target)
client_id = self._client_to_client_id(ex_client)
else:
server_id = job.target.address
client_id = job.extra['clientId']
response = self.connection.request_with_orgId_api_1(
'server/%s/backup/client/%s?cancelJob' % (server_id,
client_id),
method='GET').object
response_code = findtext(response, 'result', GENERAL_NS)
return response_code in ['IN_PROGRESS', 'SUCCESS']
def ex_get_target_by_id(self, id):
"""
Get a target by server id
:param id: The id of the target you want to get
:type id: ``str``
:rtype: :class:`BackupTarget`
"""
node = self.connection.request_with_orgId_api_2(
'server/server/%s' % id).object
return self._to_target(node)
def ex_add_client_to_target(self, target, client_type, storage_policy,
schedule_policy, trigger, email):
"""
Add a client to a target
:param target: Backup target with the backup data
:type target: Instance of :class:`BackupTarget` or ``str``
:param client: Client to add to the target
:type client: Instance of :class:`DimensionDataBackupClientType`
or ``str``
:param storage_policy: The storage policy for the client
:type storage_policy: Instance of
:class:`DimensionDataBackupStoragePolicy`
or ``str``
:param schedule_policy: The schedule policy for the client
:type schedule_policy: Instance of
:class:`DimensionDataBackupSchedulePolicy`
or ``str``
:param trigger: The notify trigger for the client
:type trigger: ``str``
:param email: The notify email for the client
:type email: ``str``
:rtype: ``bool``
"""
server_id = self._target_to_target_address(target)
backup_elm = ET.Element('NewBackupClient',
{'xmlns': BACKUP_NS})
if isinstance(client_type, DimensionDataBackupClientType):
ET.SubElement(backup_elm, "type").text = client_type.type
else:
ET.SubElement(backup_elm, "type").text = client_type
if isinstance(storage_policy, DimensionDataBackupStoragePolicy):
ET.SubElement(backup_elm,
"storagePolicyName").text = storage_policy.name
else:
ET.SubElement(backup_elm,
"storagePolicyName").text = storage_policy
if isinstance(schedule_policy, DimensionDataBackupSchedulePolicy):
ET.SubElement(backup_elm,
"schedulePolicyName").text = schedule_policy.name
else:
ET.SubElement(backup_elm,
"schedulePolicyName").text = schedule_policy
alerting_elm = ET.SubElement(backup_elm, "alerting")
alerting_elm.set('trigger', trigger)
ET.SubElement(alerting_elm, "emailAddress").text = email
response = self.connection.request_with_orgId_api_1(
'server/%s/backup/client' % (server_id),
method='POST',
data=ET.tostring(backup_elm)).object
response_code = findtext(response, 'result', GENERAL_NS)
return response_code in ['IN_PROGRESS', 'SUCCESS']
def ex_remove_client_from_target(self, target, backup_client):
"""
Removes a client from a backup target
:param target: The backup target to remove the client from
:type target: :class:`BackupTarget` or ``str``
:param backup_client: The backup client to remove
:type backup_client: :class:`DimensionDataBackupClient` or ``str``
:rtype: ``bool``
"""
server_id = self._target_to_target_address(target)
client_id = self._client_to_client_id(backup_client)
response = self.connection.request_with_orgId_api_1(
'server/%s/backup/client/%s?disable' % (server_id, client_id),
method='GET').object
response_code = findtext(response, 'result', GENERAL_NS)
return response_code in ['IN_PROGRESS', 'SUCCESS']
def ex_get_backup_details_for_target(self, target):
"""
Returns a backup details object for a target
:param target: The backup target to get details for
:type target: :class:`BackupTarget` or ``str``
:rtype: :class:`DimensionDataBackupDetails`
"""
if not isinstance(target, BackupTarget):
target = self.ex_get_target_by_id(target)
if target is None:
return
response = self.connection.request_with_orgId_api_1(
'server/%s/backup' % (target.address),
method='GET').object
return self._to_backup_details(response, target)
def ex_list_available_client_types(self, target):
"""
Returns a list of available backup client types
:param target: The backup target to list available types for
:type target: :class:`BackupTarget` or ``str``
:rtype: ``list`` of :class:`DimensionDataBackupClientType`
"""
server_id = self._target_to_target_address(target)
response = self.connection.request_with_orgId_api_1(
'server/%s/backup/client/type' % (server_id),
method='GET').object
return self._to_client_types(response)
def ex_list_available_storage_policies(self, target):
"""
Returns a list of available backup storage policies
:param target: The backup target to list available policies for
:type target: :class:`BackupTarget` or ``str``
:rtype: ``list`` of :class:`DimensionDataBackupStoragePolicy`
"""
server_id = self._target_to_target_address(target)
response = self.connection.request_with_orgId_api_1(
'server/%s/backup/client/storagePolicy' % (server_id),
method='GET').object
return self._to_storage_policies(response)
def ex_list_available_schedule_policies(self, target):
"""
Returns a list of available backup schedule policies
:param target: The backup target to list available policies for
:type target: :class:`BackupTarget` or ``str``
:rtype: ``list`` of :class:`DimensionDataBackupSchedulePolicy`
"""
server_id = self._target_to_target_address(target)
response = self.connection.request_with_orgId_api_1(
'server/%s/backup/client/schedulePolicy' % (server_id),
method='GET').object
return self._to_schedule_policies(response)
def _to_storage_policies(self, object):
elements = object.findall(fixxpath('storagePolicy', BACKUP_NS))
return [self._to_storage_policy(el) for el in elements]
def _to_storage_policy(self, element):
return DimensionDataBackupStoragePolicy(
retention_period=int(element.get('retentionPeriodInDays')),
name=element.get('name'),
secondary_location=element.get('secondaryLocation')
)
def _to_schedule_policies(self, object):
elements = object.findall(fixxpath('schedulePolicy', BACKUP_NS))
return [self._to_schedule_policy(el) for el in elements]
def _to_schedule_policy(self, element):
return DimensionDataBackupSchedulePolicy(
name=element.get('name'),
description=element.get('description')
)
def _to_client_types(self, object):
elements = object.findall(fixxpath('backupClientType', BACKUP_NS))
return [self._to_client_type(el) for el in elements]
def _to_client_type(self, element):
description = element.get('description')
if description is None:
description = findtext(element, 'description', BACKUP_NS)
return DimensionDataBackupClientType(
type=element.get('type'),
description=description,
is_file_system=bool(element.get('isFileSystem') == 'true')
)
def _to_backup_details(self, object, target):
return DimensionDataBackupDetails(
asset_id=object.get('assetId'),
service_plan=object.get('servicePlan'),
status=object.get('state'),
clients=self._to_clients(object, target)
)
def _to_clients(self, object, target):
elements = object.findall(fixxpath('backupClient', BACKUP_NS))
return [self._to_client(el, target) for el in elements]
def _to_client(self, element, target):
client_id = element.get('id')
return DimensionDataBackupClient(
id=client_id,
type=self._to_client_type(element),
status=element.get('status'),
schedule_policy=findtext(element, 'schedulePolicyName', BACKUP_NS),
storage_policy=findtext(element, 'storagePolicyName', BACKUP_NS),
download_url=findtext(element, 'downloadUrl', BACKUP_NS),
running_job=self._to_backup_job(element, target, client_id),
alert=self._to_alert(element)
)
def _to_alert(self, element):
alert = element.find(fixxpath('alerting', BACKUP_NS))
if alert is not None:
notify_list = [
email_addr.text for email_addr
in alert.findall(fixxpath('emailAddress', BACKUP_NS))
]
return DimensionDataBackupClientAlert(
trigger=element.get('trigger'),
notify_list=notify_list
)
return None
def _to_backup_job(self, element, target, client_id):
running_job = element.find(fixxpath('runningJob', BACKUP_NS))
if running_job is not None:
return BackupTargetJob(
id=running_job.get('id'),
status=running_job.get('status'),
progress=int(running_job.get('percentageComplete')),
driver=self.connection.driver,
target=target,
extra={'clientId': client_id}
)
return None
def _to_targets(self, object):
node_elements = object.findall(fixxpath('server', TYPES_URN))
return [self._to_target(el) for el in node_elements]
def _to_target(self, element):
backup = findall(element, 'backup', TYPES_URN)
if len(backup) == 0:
return
extra = {
'description': findtext(element, 'description', TYPES_URN),
'sourceImageId': findtext(element, 'sourceImageId', TYPES_URN),
'datacenterId': element.get('datacenterId'),
'deployedTime': findtext(element, 'createTime', TYPES_URN),
'servicePlan': backup[0].get('servicePlan')
}
n = BackupTarget(id=backup[0].get('assetId'),
name=findtext(element, 'name', TYPES_URN),
address=element.get('id'),
driver=self.connection.driver,
type=BackupTargetType.VIRTUAL,
extra=extra)
return n
@staticmethod
def _client_to_client_id(backup_client):
return dd_object_to_id(backup_client, DimensionDataBackupClient)
@staticmethod
def _target_to_target_address(target):
return dd_object_to_id(target, BackupTarget, id_value='address')
| |
#!/usr/bin/python3
#
# Copyright (C) 2006, 2007, 2010, 2011 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Script for unittesting the utils module"""
import array
import errno
import fcntl
import glob
import mock
import os
import os.path
import random
import re
import shutil
import signal
import socket
import stat
import tempfile
import time
import unittest
import warnings
import testutils
from ganeti import constants
from ganeti import compat
from ganeti import utils
from ganeti import errors
from ganeti import constants
from ganeti.utils import RunCmd, \
FirstFree, \
RunParts
class TestParseCpuMask(unittest.TestCase):
"""Test case for the ParseCpuMask function."""
def testWellFormed(self):
self.assertEqual(utils.ParseCpuMask(""), [])
self.assertEqual(utils.ParseCpuMask("1"), [1])
self.assertEqual(utils.ParseCpuMask("0-2,4,5-5"), [0,1,2,4,5])
def testInvalidInput(self):
for data in ["garbage", "0,", "0-1-2", "2-1", "1-a"]:
self.assertRaises(errors.ParseError, utils.ParseCpuMask, data)
class TestParseMultiCpuMask(unittest.TestCase):
"""Test case for the ParseMultiCpuMask function."""
def testWellFormed(self):
self.assertEqual(utils.ParseMultiCpuMask(""), [])
self.assertEqual(utils.ParseMultiCpuMask("1"), [[1]])
self.assertEqual(utils.ParseMultiCpuMask("0-2,4,5-5"), [[0, 1, 2, 4, 5]])
self.assertEqual(utils.ParseMultiCpuMask("all"), [[-1]])
self.assertEqual(utils.ParseMultiCpuMask("0-2:all:4,6-8"),
[[0, 1, 2], [-1], [4, 6, 7, 8]])
def testInvalidInput(self):
for data in ["garbage", "0,", "0-1-2", "2-1", "1-a", "all-all"]:
self.assertRaises(errors.ParseError, utils.ParseCpuMask, data)
class TestGetMounts(unittest.TestCase):
"""Test case for GetMounts()."""
TESTDATA = (
"rootfs / rootfs rw 0 0\n"
"none /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0\n"
"none /proc proc rw,nosuid,nodev,noexec,relatime 0 0\n")
def setUp(self):
self.tmpfile = tempfile.NamedTemporaryFile()
utils.WriteFile(self.tmpfile.name, data=self.TESTDATA)
def testGetMounts(self):
self.assertEqual(utils.GetMounts(filename=self.tmpfile.name),
[
("rootfs", "/", "rootfs", "rw"),
("none", "/sys", "sysfs", "rw,nosuid,nodev,noexec,relatime"),
("none", "/proc", "proc", "rw,nosuid,nodev,noexec,relatime"),
])
class TestFirstFree(unittest.TestCase):
"""Test case for the FirstFree function"""
def test(self):
"""Test FirstFree"""
self.assertEqual(FirstFree([0, 1, 3]), 2)
self.assertEqual(FirstFree([]), None)
self.assertEqual(FirstFree([3, 4, 6]), 0)
self.assertEqual(FirstFree([3, 4, 6], base=3), 5)
self.assertRaises(AssertionError, FirstFree, [0, 3, 4, 6], base=3)
class TestTimeFunctions(unittest.TestCase):
"""Test case for time functions"""
def runTest(self):
self.assertEqual(utils.SplitTime(1), (1, 0))
self.assertEqual(utils.SplitTime(1.5), (1, 500000))
self.assertEqual(utils.SplitTime(1218448917.4809151), (1218448917, 480915))
self.assertEqual(utils.SplitTime(123.48012), (123, 480120))
self.assertEqual(utils.SplitTime(123.9996), (123, 999600))
self.assertEqual(utils.SplitTime(123.9995), (123, 999500))
self.assertEqual(utils.SplitTime(123.9994), (123, 999400))
self.assertEqual(utils.SplitTime(123.999999999), (123, 999999))
self.assertRaises(AssertionError, utils.SplitTime, -1)
self.assertEqual(utils.MergeTime((1, 0)), 1.0)
self.assertEqual(utils.MergeTime((1, 500000)), 1.5)
self.assertEqual(utils.MergeTime((1218448917, 500000)), 1218448917.5)
self.assertEqual(round(utils.MergeTime((1218448917, 481000)), 3),
1218448917.481)
self.assertEqual(round(utils.MergeTime((1, 801000)), 3), 1.801)
self.assertRaises(AssertionError, utils.MergeTime, (0, -1))
self.assertRaises(AssertionError, utils.MergeTime, (0, 1000000))
self.assertRaises(AssertionError, utils.MergeTime, (0, 9999999))
self.assertRaises(AssertionError, utils.MergeTime, (-1, 0))
self.assertRaises(AssertionError, utils.MergeTime, (-9999, 0))
class FieldSetTestCase(unittest.TestCase):
"""Test case for FieldSets"""
def testSimpleMatch(self):
f = utils.FieldSet("a", "b", "c", "def")
self.assertTrue(f.Matches("a"))
self.assertFalse(f.Matches("d"), "Substring matched")
self.assertFalse(f.Matches("defghi"), "Prefix string matched")
self.assertFalse(f.NonMatching(["b", "c"]))
self.assertFalse(f.NonMatching(["a", "b", "c", "def"]))
self.assertTrue(f.NonMatching(["a", "d"]))
def testRegexMatch(self):
f = utils.FieldSet("a", "b([0-9]+)", "c")
self.assertTrue(f.Matches("b1"))
self.assertTrue(f.Matches("b99"))
self.assertFalse(f.Matches("b/1"))
self.assertFalse(f.NonMatching(["b12", "c"]))
self.assertTrue(f.NonMatching(["a", "1"]))
class TestForceDictType(unittest.TestCase):
"""Test case for ForceDictType"""
KEY_TYPES = {
"a": constants.VTYPE_INT,
"b": constants.VTYPE_BOOL,
"c": constants.VTYPE_STRING,
"d": constants.VTYPE_SIZE,
"e": constants.VTYPE_MAYBE_STRING,
}
def _fdt(self, dict, allowed_values=None):
if allowed_values is None:
utils.ForceDictType(dict, self.KEY_TYPES)
else:
utils.ForceDictType(dict, self.KEY_TYPES, allowed_values=allowed_values)
return dict
def testSimpleDict(self):
self.assertEqual(self._fdt({}), {})
self.assertEqual(self._fdt({"a": 1}), {"a": 1})
self.assertEqual(self._fdt({"a": "1"}), {"a": 1})
self.assertEqual(self._fdt({"a": 1, "b": 1}), {"a":1, "b": True})
self.assertEqual(self._fdt({"b": 1, "c": "foo"}), {"b": True, "c": "foo"})
self.assertEqual(self._fdt({"b": 1, "c": False}), {"b": True, "c": ""})
self.assertEqual(self._fdt({"b": "false"}), {"b": False})
self.assertEqual(self._fdt({"b": "False"}), {"b": False})
self.assertEqual(self._fdt({"b": False}), {"b": False})
self.assertEqual(self._fdt({"b": "true"}), {"b": True})
self.assertEqual(self._fdt({"b": "True"}), {"b": True})
self.assertEqual(self._fdt({"d": "4"}), {"d": 4})
self.assertEqual(self._fdt({"d": "4M"}), {"d": 4})
self.assertEqual(self._fdt({"e": None, }), {"e": None, })
self.assertEqual(self._fdt({"e": "Hello World", }), {"e": "Hello World", })
self.assertEqual(self._fdt({"e": False, }), {"e": "", })
self.assertEqual(self._fdt({"b": "hello", }, ["hello"]), {"b": "hello"})
def testErrors(self):
self.assertRaises(errors.TypeEnforcementError, self._fdt, {"a": "astring"})
self.assertRaises(errors.TypeEnforcementError, self._fdt, {"b": "hello"})
self.assertRaises(errors.TypeEnforcementError, self._fdt, {"c": True})
self.assertRaises(errors.TypeEnforcementError, self._fdt, {"d": "astring"})
self.assertRaises(errors.TypeEnforcementError, self._fdt, {"d": "4 L"})
self.assertRaises(errors.TypeEnforcementError, self._fdt, {"e": object(), })
self.assertRaises(errors.TypeEnforcementError, self._fdt, {"e": [], })
self.assertRaises(errors.TypeEnforcementError, self._fdt, {"x": None, })
self.assertRaises(errors.TypeEnforcementError, self._fdt, [])
self.assertRaises(errors.ProgrammerError, utils.ForceDictType,
{"b": "hello"}, {"b": "no-such-type"})
class TestValidateServiceName(unittest.TestCase):
def testValid(self):
testnames = [
0, 1, 2, 3, 1024, 65000, 65534, 65535,
"ganeti",
"gnt-masterd",
"HELLO_WORLD_SVC",
"hello.world.1",
"0", "80", "1111", "65535",
]
for name in testnames:
self.assertEqual(utils.ValidateServiceName(name), name)
def testInvalid(self):
testnames = [
-15756, -1, 65536, 133428083,
"", "Hello World!", "!", "'", "\"", "\t", "\n", "`",
"-8546", "-1", "65536",
(129 * "A"),
]
for name in testnames:
self.assertRaises(errors.OpPrereqError, utils.ValidateServiceName, name)
class TestReadLockedPidFile(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmpdir)
def testNonExistent(self):
path = utils.PathJoin(self.tmpdir, "nonexist")
self.assertTrue(utils.ReadLockedPidFile(path) is None)
def testUnlocked(self):
path = utils.PathJoin(self.tmpdir, "pid")
utils.WriteFile(path, data="123")
self.assertTrue(utils.ReadLockedPidFile(path) is None)
def testLocked(self):
path = utils.PathJoin(self.tmpdir, "pid")
utils.WriteFile(path, data="123")
fl = utils.FileLock.Open(path)
try:
fl.Exclusive(blocking=True)
self.assertEqual(utils.ReadLockedPidFile(path), 123)
finally:
fl.Close()
self.assertTrue(utils.ReadLockedPidFile(path) is None)
def testError(self):
path = utils.PathJoin(self.tmpdir, "foobar", "pid")
utils.WriteFile(utils.PathJoin(self.tmpdir, "foobar"), data="")
# open(2) should return ENOTDIR
self.assertRaises(EnvironmentError, utils.ReadLockedPidFile, path)
class TestFindMatch(unittest.TestCase):
def test(self):
data = {
"aaaa": "Four A",
"bb": {"Two B": True},
re.compile(r"^x(foo|bar|bazX)([0-9]+)$"): (1, 2, 3),
}
self.assertEqual(utils.FindMatch(data, "aaaa"), ("Four A", []))
self.assertEqual(utils.FindMatch(data, "bb"), ({"Two B": True}, []))
for i in ["foo", "bar", "bazX"]:
for j in range(1, 100, 7):
self.assertEqual(utils.FindMatch(data, "x%s%s" % (i, j)),
((1, 2, 3), [i, str(j)]))
def testNoMatch(self):
self.assertTrue(utils.FindMatch({}, "") is None)
self.assertTrue(utils.FindMatch({}, "foo") is None)
self.assertTrue(utils.FindMatch({}, 1234) is None)
data = {
"X": "Hello World",
re.compile("^(something)$"): "Hello World",
}
self.assertTrue(utils.FindMatch(data, "") is None)
self.assertTrue(utils.FindMatch(data, "Hello World") is None)
class TestTryConvert(unittest.TestCase):
def test(self):
for src, fn, result in [
("1", int, 1),
("a", int, "a"),
("", bool, False),
("a", bool, True),
]:
self.assertEqual(utils.TryConvert(fn, src), result)
class TestVerifyDictOptions(unittest.TestCase):
def setUp(self):
self.defaults = {
"first_key": "foobar",
"foobar": {
"key1": "value2",
"key2": "value1",
},
"another_key": "another_value",
}
def test(self):
some_keys = {
"first_key": "blubb",
"foobar": {
"key2": "foo",
},
}
utils.VerifyDictOptions(some_keys, self.defaults)
def testInvalid(self):
some_keys = {
"invalid_key": "blubb",
"foobar": {
"key2": "foo",
},
}
self.assertRaises(errors.OpPrereqError, utils.VerifyDictOptions,
some_keys, self.defaults)
def testNestedInvalid(self):
some_keys = {
"foobar": {
"key2": "foo",
"key3": "blibb"
},
}
self.assertRaises(errors.OpPrereqError, utils.VerifyDictOptions,
some_keys, self.defaults)
def testMultiInvalid(self):
some_keys = {
"foobar": {
"key1": "value3",
"key6": "Right here",
},
"invalid_with_sub": {
"sub1": "value3",
},
}
self.assertRaises(errors.OpPrereqError, utils.VerifyDictOptions,
some_keys, self.defaults)
class TestValidateDeviceNames(unittest.TestCase):
def testEmpty(self):
utils.ValidateDeviceNames("NIC", [])
utils.ValidateDeviceNames("disk", [])
def testNoName(self):
nics = [{}, {}]
utils.ValidateDeviceNames("NIC", nics)
def testInvalidName(self):
self.assertRaises(errors.OpPrereqError, utils.ValidateDeviceNames,
"disk", [{constants.IDISK_NAME: "42"}])
self.assertRaises(errors.OpPrereqError, utils.ValidateDeviceNames,
"NIC", [{constants.INIC_NAME: "42"}])
def testUsedName(self):
disks = [{constants.IDISK_NAME: "name1"}, {constants.IDISK_NAME: "name1"}]
self.assertRaises(errors.OpPrereqError, utils.ValidateDeviceNames,
"disk", disks)
def Disk(dev_type):
return mock.Mock(dev_type=dev_type)
def Drbd():
return Disk(constants.DT_DRBD8)
def Rbd():
return Disk(constants.DT_RBD)
class AllDiskTemplateTest(unittest.TestCase):
def testAllDiskless(self):
self.assertTrue(utils.AllDiskOfType([], [constants.DT_DISKLESS]))
def testOrDiskless(self):
self.assertTrue(utils.AllDiskOfType(
[], [constants.DT_DISKLESS, constants.DT_DRBD8]))
def testOrDrbd(self):
self.assertTrue(utils.AllDiskOfType(
[Drbd()], [constants.DT_DISKLESS, constants.DT_DRBD8]))
def testOrRbd(self):
self.assertTrue(utils.AllDiskOfType(
[Rbd()], [constants.DT_RBD, constants.DT_DRBD8]))
def testNotRbd(self):
self.assertFalse(utils.AllDiskOfType(
[Rbd()], [constants.DT_DRBD8]))
def testNotDiskless(self):
self.assertFalse(utils.AllDiskOfType(
[], [constants.DT_DRBD8]))
def testNotRbdDiskless(self):
self.assertFalse(utils.AllDiskOfType(
[Rbd()], [constants.DT_DISKLESS]))
def testHeterogeneous(self):
self.assertFalse(utils.AllDiskOfType(
[Rbd(), Drbd()], [constants.DT_DRBD8]))
def testHeterogeneousDiskless(self):
self.assertFalse(utils.AllDiskOfType(
[Rbd(), Drbd()], [constants.DT_DISKLESS]))
class AnyDiskTemplateTest(unittest.TestCase):
def testAnyDiskless(self):
self.assertTrue(utils.AnyDiskOfType([], [constants.DT_DISKLESS]))
def testOrDiskless(self):
self.assertTrue(utils.AnyDiskOfType(
[], [constants.DT_DISKLESS, constants.DT_DRBD8]))
def testOrDrbd(self):
self.assertTrue(utils.AnyDiskOfType(
[Drbd()], [constants.DT_DISKLESS, constants.DT_DRBD8]))
def testOrRbd(self):
self.assertTrue(utils.AnyDiskOfType(
[Rbd()], [constants.DT_RBD, constants.DT_DRBD8]))
def testNotRbd(self):
self.assertFalse(utils.AnyDiskOfType(
[Rbd()], [constants.DT_DRBD8]))
def testNotDiskless(self):
self.assertFalse(utils.AnyDiskOfType(
[], [constants.DT_DRBD8]))
def testNotRbdDiskless(self):
self.assertFalse(utils.AnyDiskOfType(
[Rbd()], [constants.DT_DISKLESS]))
def testHeterogeneous(self):
self.assertTrue(utils.AnyDiskOfType(
[Rbd(), Drbd()], [constants.DT_DRBD8]))
def testHeterogeneousDiskless(self):
self.assertFalse(utils.AnyDiskOfType(
[Rbd(), Drbd()], [constants.DT_DISKLESS]))
class GetDiskTemplateTest(unittest.TestCase):
def testUnique(self):
self.assertEqual(utils.GetDiskTemplate([Rbd()]), constants.DT_RBD)
def testDiskless(self):
self.assertEqual(utils.GetDiskTemplate([]), constants.DT_DISKLESS)
def testMultiple(self):
self.assertEqual(utils.GetDiskTemplate([Rbd(), Rbd()]),
constants.DT_RBD)
def testMixed(self):
self.assertEqual(utils.GetDiskTemplate([Rbd(), Drbd()]),
constants.DT_MIXED)
class TestSendFds(unittest.TestCase):
def testSendFds(self):
sender, receiver = socket.socketpair(socket.AF_UNIX, socket.SOCK_DGRAM)
# Attempt to send both, file-like objects and fds
tempfiles = [tempfile.TemporaryFile() for _ in range(3)]
tempfds = [tempfile.mkstemp()[0] for _ in range(3)]
utils.SendFds(sender, b" ", tempfiles + tempfds)
_, ancdata, __, ___ = receiver.recvmsg(10, 1024)
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
received_fds = array.array("i")
received_fds.frombytes(cmsg_data)
sent_fds = tempfds + [f.fileno() for f in tempfiles]
# The received file descriptors are essentially dup()'d, so we can't
# compare them directly. Instead we need to check that they are referring
# to the same files.
received_inodes = set(os.fstat(fd) for fd in received_fds)
sent_inodes = set(os.fstat(fd) for fd in sent_fds)
self.assertSetEqual(sent_inodes, received_inodes)
sender.close()
receiver.close()
for fd in received_fds.tolist() + tempfds:
os.close(fd)
for file_ in tempfiles:
file_.close()
if __name__ == "__main__":
testutils.GanetiTestProgram()
| |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# -------------------------------------------------------------------------
# pylint: disable=too-many-lines
import time
import datetime
import uuid
import logging
from typing import Optional, Dict, List, Union, Iterable, TYPE_CHECKING, Any, Mapping, cast
import six
import uamqp.errors
import uamqp.message
from .constants import (
_BATCH_MESSAGE_OVERHEAD_COST,
ServiceBusReceiveMode,
ServiceBusMessageState,
_X_OPT_ENQUEUED_TIME,
_X_OPT_SEQUENCE_NUMBER,
_X_OPT_ENQUEUE_SEQUENCE_NUMBER,
_X_OPT_PARTITION_KEY,
_X_OPT_LOCKED_UNTIL,
_X_OPT_LOCK_TOKEN,
_X_OPT_SCHEDULED_ENQUEUE_TIME,
_X_OPT_DEAD_LETTER_SOURCE,
PROPERTIES_DEAD_LETTER_REASON,
PROPERTIES_DEAD_LETTER_ERROR_DESCRIPTION,
ANNOTATION_SYMBOL_PARTITION_KEY,
ANNOTATION_SYMBOL_SCHEDULED_ENQUEUE_TIME,
ANNOTATION_SYMBOL_KEY_MAP,
MESSAGE_PROPERTY_MAX_LENGTH,
MAX_ABSOLUTE_EXPIRY_TIME,
MAX_DURATION_VALUE,
MESSAGE_STATE_NAME
)
from ..amqp import (
AmqpAnnotatedMessage,
AmqpMessageBodyType,
AmqpMessageHeader,
AmqpMessageProperties
)
from ..exceptions import MessageSizeExceededError
from .utils import (
utc_from_timestamp,
utc_now,
trace_message,
transform_messages_if_needed,
)
if TYPE_CHECKING:
from ..aio._servicebus_receiver_async import (
ServiceBusReceiver as AsyncServiceBusReceiver,
)
from .._servicebus_receiver import ServiceBusReceiver
from azure.core.tracing import AbstractSpan
_LOGGER = logging.getLogger(__name__)
class ServiceBusMessage(
object
): # pylint: disable=too-many-public-methods,too-many-instance-attributes
"""A Service Bus Message.
:param body: The data to send in a single message.
:type body: Optional[Union[str, bytes]]
:keyword Optional[Dict] application_properties: The user defined properties on the message.
:keyword Optional[str] session_id: The session identifier of the message for a sessionful entity.
:keyword Optional[str] message_id: The id to identify the message.
:keyword Optional[datetime.datetime] scheduled_enqueue_time_utc: The utc scheduled enqueue time to the message.
:keyword Optional[datetime.timedelta] time_to_live: The life duration of a message.
:keyword Optional[str] content_type: The content type descriptor.
:keyword Optional[str] correlation_id: The correlation identifier.
:keyword Optional[str] subject: The application specific subject, sometimes referred to as label.
:keyword Optional[str] partition_key: The partition key for sending a message to a partitioned entity.
:keyword Optional[str] to: The `to` address used for auto_forward chaining scenarios.
:keyword Optional[str] reply_to: The address of an entity to send replies to.
:keyword Optional[str] reply_to_session_id: The session identifier augmenting the `reply_to` address.
.. admonition:: Example:
.. literalinclude:: ../samples/sync_samples/sample_code_servicebus.py
:start-after: [START send_complex_message]
:end-before: [END send_complex_message]
:language: python
:dedent: 4
:caption: Sending a message with additional properties
"""
def __init__(
self,
body: Optional[Union[str, bytes]],
*,
application_properties: Optional[Dict[str, Any]] = None,
session_id: Optional[str] = None,
message_id: Optional[str] = None,
scheduled_enqueue_time_utc: Optional[datetime.datetime] = None,
time_to_live: Optional[datetime.timedelta] = None,
content_type: Optional[str] = None,
correlation_id: Optional[str] = None,
subject: Optional[str] = None,
partition_key: Optional[str] = None,
to: Optional[str] = None,
reply_to: Optional[str] = None,
reply_to_session_id: Optional[str] = None,
**kwargs: Any
) -> None:
# Although we might normally thread through **kwargs this causes
# problems as MessageProperties won't absorb spurious args.
self._encoding = kwargs.pop("encoding", "UTF-8")
if "raw_amqp_message" in kwargs and "message" in kwargs:
# Internal usage only for transforming AmqpAnnotatedMessage to outgoing ServiceBusMessage
self.message = kwargs["message"]
self._raw_amqp_message = kwargs["raw_amqp_message"]
elif "message" in kwargs:
# Note: This cannot be renamed until UAMQP no longer relies on this specific name.
self.message = kwargs["message"]
self._raw_amqp_message = AmqpAnnotatedMessage(message=self.message)
else:
self._build_message(body)
self.application_properties = application_properties
self.session_id = session_id
self.message_id = message_id
self.content_type = content_type
self.correlation_id = correlation_id
self.to = to
self.reply_to = reply_to
self.reply_to_session_id = reply_to_session_id
self.subject = subject
self.scheduled_enqueue_time_utc = scheduled_enqueue_time_utc
self.time_to_live = time_to_live
self.partition_key = partition_key
def __str__(self):
# type: () -> str
return str(self.raw_amqp_message)
def __repr__(self):
# type: () -> str
# pylint: disable=bare-except
message_repr = "body={}".format(
str(self)
)
try:
message_repr += ", application_properties={}".format(self.application_properties)
except:
message_repr += ", application_properties=<read-error>"
try:
message_repr += ", session_id={}".format(self.session_id)
except:
message_repr += ", session_id=<read-error>"
try:
message_repr += ", message_id={}".format(self.message_id)
except:
message_repr += ", message_id=<read-error>"
try:
message_repr += ", content_type={}".format(self.content_type)
except:
message_repr += ", content_type=<read-error>"
try:
message_repr += ", correlation_id={}".format(self.correlation_id)
except:
message_repr += ", correlation_id=<read-error>"
try:
message_repr += ", to={}".format(self.to)
except:
message_repr += ", to=<read-error>"
try:
message_repr += ", reply_to={}".format(self.reply_to)
except:
message_repr += ", reply_to=<read-error>"
try:
message_repr += ", reply_to_session_id={}".format(self.reply_to_session_id)
except:
message_repr += ", reply_to_session_id=<read-error>"
try:
message_repr += ", subject={}".format(self.subject)
except:
message_repr += ", subject=<read-error>"
try:
message_repr += ", time_to_live={}".format(self.time_to_live)
except:
message_repr += ", time_to_live=<read-error>"
try:
message_repr += ", partition_key={}".format(self.partition_key)
except:
message_repr += ", partition_key=<read-error>"
try:
message_repr += ", scheduled_enqueue_time_utc={}".format(self.scheduled_enqueue_time_utc)
except:
message_repr += ", scheduled_enqueue_time_utc=<read-error>"
return "ServiceBusMessage({})".format(message_repr)[:1024]
def _build_message(self, body):
if not (
isinstance(body, (six.string_types, six.binary_type)) or (body is None)
):
raise TypeError(
"ServiceBusMessage body must be a string, bytes, or None. Got instead: {}".format(
type(body)
)
)
self._raw_amqp_message = AmqpAnnotatedMessage(value_body=None, encoding=self._encoding) \
if body is None else AmqpAnnotatedMessage(data_body=body, encoding=self._encoding)
self._raw_amqp_message.header = AmqpMessageHeader()
self._raw_amqp_message.properties = AmqpMessageProperties()
def _set_message_annotations(self, key, value):
if not self._raw_amqp_message.annotations:
self._raw_amqp_message.annotations = {}
if isinstance(self, ServiceBusReceivedMessage):
try:
del self._raw_amqp_message.annotations[key]
except KeyError:
pass
if value is None:
try:
del self._raw_amqp_message.annotations[ANNOTATION_SYMBOL_KEY_MAP[key]]
except KeyError:
pass
else:
self._raw_amqp_message.annotations[ANNOTATION_SYMBOL_KEY_MAP[key]] = value
def _to_outgoing_message(self):
# type: () -> ServiceBusMessage
# pylint: disable=protected-access
self.message = self.raw_amqp_message._to_outgoing_amqp_message()
return self
@property
def raw_amqp_message(self):
# type: () -> AmqpAnnotatedMessage
"""Advanced usage only. The internal AMQP message payload that is sent or received."""
return self._raw_amqp_message
@property
def session_id(self):
# type: () -> Optional[str]
"""The session identifier of the message for a sessionful entity.
For sessionful entities, this application-defined value specifies the session affiliation of the message.
Messages with the same session identifier are subject to summary locking and enable exact in-order
processing and demultiplexing. For non-sessionful entities, this value is ignored.
See Message Sessions in `https://docs.microsoft.com/azure/service-bus-messaging/message-sessions`.
:rtype: str
"""
if not self._raw_amqp_message.properties:
return None
try:
return self._raw_amqp_message.properties.group_id.decode("UTF-8")
except (AttributeError, UnicodeDecodeError):
return self._raw_amqp_message.properties.group_id
@session_id.setter
def session_id(self, value):
# type: (str) -> None
if value and len(value) > MESSAGE_PROPERTY_MAX_LENGTH:
raise ValueError(
"session_id cannot be longer than {} characters.".format(
MESSAGE_PROPERTY_MAX_LENGTH
)
)
if not self._raw_amqp_message.properties:
self._raw_amqp_message.properties = AmqpMessageProperties()
self._raw_amqp_message.properties.group_id = value
@property
def application_properties(self):
# type: () -> Optional[Dict]
"""The user defined properties on the message.
:rtype: dict
"""
return self._raw_amqp_message.application_properties
@application_properties.setter
def application_properties(self, value):
# type: (Dict) -> None
self._raw_amqp_message.application_properties = value
@property
def partition_key(self):
# type: () -> Optional[str]
"""The partition key for sending a message to a partitioned entity.
Setting this value enables assigning related messages to the same internal partition, so that submission
sequence order is correctly recorded.
The partition is chosen by a hash function over this value and cannot be chosen directly.
See Partitioned queues and topics in
`https://docs.microsoft.com/azure/service-bus-messaging/service-bus-partitioning`.
:rtype: str
"""
p_key = None
try:
# opt_p_key is used on the incoming message
opt_p_key = self._raw_amqp_message.annotations.get(_X_OPT_PARTITION_KEY) # type: ignore
if opt_p_key is not None:
p_key = opt_p_key
# symbol_p_key is used on the outgoing message
symbol_p_key = self._raw_amqp_message.annotations.get(ANNOTATION_SYMBOL_PARTITION_KEY) # type: ignore
if symbol_p_key is not None:
p_key = symbol_p_key
return p_key.decode("UTF-8") # type: ignore
except (AttributeError, UnicodeDecodeError):
return p_key
@partition_key.setter
def partition_key(self, value):
# type: (str) -> None
if value and len(value) > MESSAGE_PROPERTY_MAX_LENGTH:
raise ValueError(
"partition_key cannot be longer than {} characters.".format(
MESSAGE_PROPERTY_MAX_LENGTH
)
)
if value and self.session_id is not None and value != self.session_id:
raise ValueError(
"partition_key:{} cannot be set to a different value than session_id:{}".format(
value, self.session_id
)
)
self._set_message_annotations(_X_OPT_PARTITION_KEY, value)
@property
def time_to_live(self):
# type: () -> Optional[datetime.timedelta]
"""The life duration of a message.
This value is the relative duration after which the message expires, starting from the instant the message
has been accepted and stored by the broker, as captured in `enqueued_time_utc`.
When not set explicitly, the assumed value is the DefaultTimeToLive for the respective queue or topic.
A message-level time-to-live value cannot be longer than the entity's time-to-live setting and it is silently
adjusted if it does.
See Expiration in `https://docs.microsoft.com/azure/service-bus-messaging/message-expiration`
:rtype: ~datetime.timedelta
"""
if self._raw_amqp_message.header and self._raw_amqp_message.header.time_to_live:
return datetime.timedelta(milliseconds=self._raw_amqp_message.header.time_to_live)
return None
@time_to_live.setter
def time_to_live(self, value):
# type: (datetime.timedelta) -> None
if not self._raw_amqp_message.header:
self._raw_amqp_message.header = AmqpMessageHeader()
if value is None:
self._raw_amqp_message.header.time_to_live = value
if self._raw_amqp_message.properties.absolute_expiry_time:
self._raw_amqp_message.properties.absolute_expiry_time = value
elif isinstance(value, datetime.timedelta):
self._raw_amqp_message.header.time_to_live = int(value.total_seconds()) * 1000
else:
self._raw_amqp_message.header.time_to_live = int(value) * 1000
if self._raw_amqp_message.header.time_to_live and \
self._raw_amqp_message.header.time_to_live != MAX_DURATION_VALUE:
if not self._raw_amqp_message.properties:
self._raw_amqp_message.properties = AmqpMessageProperties()
self._raw_amqp_message.properties.creation_time = int(time.mktime(utc_now().timetuple())) * 1000
self._raw_amqp_message.properties.absolute_expiry_time = min(
MAX_ABSOLUTE_EXPIRY_TIME,
self._raw_amqp_message.properties.creation_time + self._raw_amqp_message.header.time_to_live
)
@property
def scheduled_enqueue_time_utc(self):
# type: () -> Optional[datetime.datetime]
"""The utc scheduled enqueue time to the message.
This property can be used for scheduling when sending a message through `ServiceBusSender.send` method.
If cancelling scheduled messages is required, you should use the `ServiceBusSender.schedule` method,
which returns sequence numbers that can be used for future cancellation.
`scheduled_enqueue_time_utc` is None if not set.
:rtype: ~datetime.datetime
"""
if self._raw_amqp_message.annotations:
timestamp = self._raw_amqp_message.annotations.get(
_X_OPT_SCHEDULED_ENQUEUE_TIME
) or self._raw_amqp_message.annotations.get(ANNOTATION_SYMBOL_SCHEDULED_ENQUEUE_TIME)
if timestamp:
try:
in_seconds = timestamp / 1000.0
return utc_from_timestamp(in_seconds)
except TypeError:
return timestamp
return None
@scheduled_enqueue_time_utc.setter
def scheduled_enqueue_time_utc(self, value):
# type: (datetime.datetime) -> None
if not self._raw_amqp_message.properties:
self._raw_amqp_message.properties = AmqpMessageProperties()
if not self._raw_amqp_message.properties.message_id:
self._raw_amqp_message.properties.message_id = str(uuid.uuid4())
self._set_message_annotations(_X_OPT_SCHEDULED_ENQUEUE_TIME, value)
@property
def body(self):
# type: () -> Any
"""The body of the Message. The format may vary depending on the body type:
For :class:`azure.servicebus.amqp.AmqpMessageBodyType.DATA<azure.servicebus.amqp.AmqpMessageBodyType.DATA>`,
the body could be bytes or Iterable[bytes].
For
:class:`azure.servicebus.amqp.AmqpMessageBodyType.SEQUENCE<azure.servicebus.amqp.AmqpMessageBodyType.SEQUENCE>`,
the body could be List or Iterable[List].
For :class:`azure.servicebus.amqp.AmqpMessageBodyType.VALUE<azure.servicebus.amqp.AmqpMessageBodyType.VALUE>`,
the body could be any type.
:rtype: Any
"""
return self._raw_amqp_message.body
@property
def body_type(self):
# type: () -> AmqpMessageBodyType
"""The body type of the underlying AMQP message.
:rtype: ~azure.servicebus.amqp.AmqpMessageBodyType
"""
return self._raw_amqp_message.body_type
@property
def content_type(self):
# type: () -> Optional[str]
"""The content type descriptor.
Optionally describes the payload of the message, with a descriptor following the format of RFC2045, Section 5,
for example "application/json".
:rtype: str
"""
if not self._raw_amqp_message.properties:
return None
try:
return self._raw_amqp_message.properties.content_type.decode("UTF-8")
except (AttributeError, UnicodeDecodeError):
return self._raw_amqp_message.properties.content_type
@content_type.setter
def content_type(self, value):
# type: (str) -> None
if not self._raw_amqp_message.properties:
self._raw_amqp_message.properties = AmqpMessageProperties()
self._raw_amqp_message.properties.content_type = value
@property
def correlation_id(self):
# type: () -> Optional[str]
# pylint: disable=line-too-long
"""The correlation identifier.
Allows an application to specify a context for the message for the purposes of correlation, for example
reflecting the MessageId of a message that is being replied to.
See Message Routing and Correlation in
`https://docs.microsoft.com/azure/service-bus-messaging/service-bus-messages-payloads?#message-routing-and-correlation`.
:rtype: str
"""
if not self._raw_amqp_message.properties:
return None
try:
return self._raw_amqp_message.properties.correlation_id.decode("UTF-8")
except (AttributeError, UnicodeDecodeError):
return self._raw_amqp_message.properties.correlation_id
@correlation_id.setter
def correlation_id(self, value):
# type: (str) -> None
if not self._raw_amqp_message.properties:
self._raw_amqp_message.properties = AmqpMessageProperties()
self._raw_amqp_message.properties.correlation_id = value
@property
def subject(self):
# type: () -> Optional[str]
"""The application specific subject, sometimes referred to as a label.
This property enables the application to indicate the purpose of the message to the receiver in a standardized
fashion, similar to an email subject line.
:rtype: str
"""
if not self._raw_amqp_message.properties:
return None
try:
return self._raw_amqp_message.properties.subject.decode("UTF-8")
except (AttributeError, UnicodeDecodeError):
return self._raw_amqp_message.properties.subject
@subject.setter
def subject(self, value):
# type: (str) -> None
if not self._raw_amqp_message.properties:
self._raw_amqp_message.properties = AmqpMessageProperties()
self._raw_amqp_message.properties.subject = value
@property
def message_id(self):
# type: () -> Optional[str]
"""The id to identify the message.
The message identifier is an application-defined value that uniquely identifies the message and its payload.
The identifier is a free-form string and can reflect a GUID or an identifier derived from the
application context. If enabled, the duplicate detection (see
`https://docs.microsoft.com/azure/service-bus-messaging/duplicate-detection`)
feature identifies and removes second and further submissions of messages with the same message id.
:rtype: str
"""
if not self._raw_amqp_message.properties:
return None
try:
return self._raw_amqp_message.properties.message_id.decode("UTF-8")
except (AttributeError, UnicodeDecodeError):
return self._raw_amqp_message.properties.message_id
@message_id.setter
def message_id(self, value):
# type: (str) -> None
if value and len(str(value)) > MESSAGE_PROPERTY_MAX_LENGTH:
raise ValueError(
"message_id cannot be longer than {} characters.".format(
MESSAGE_PROPERTY_MAX_LENGTH
)
)
if not self._raw_amqp_message.properties:
self._raw_amqp_message.properties = AmqpMessageProperties()
self._raw_amqp_message.properties.message_id = value
@property
def reply_to(self):
# type: () -> Optional[str]
# pylint: disable=line-too-long
"""The address of an entity to send replies to.
This optional and application-defined value is a standard way to express a reply path to the receiver of
the message. When a sender expects a reply, it sets the value to the absolute or relative path of the queue
or topic it expects the reply to be sent to.
See Message Routing and Correlation in
`https://docs.microsoft.com/azure/service-bus-messaging/service-bus-messages-payloads?#message-routing-and-correlation`.
:rtype: str
"""
if not self._raw_amqp_message.properties:
return None
try:
return self._raw_amqp_message.properties.reply_to.decode("UTF-8")
except (AttributeError, UnicodeDecodeError):
return self._raw_amqp_message.properties.reply_to
@reply_to.setter
def reply_to(self, value):
# type: (str) -> None
if not self._raw_amqp_message.properties:
self._raw_amqp_message.properties = AmqpMessageProperties()
self._raw_amqp_message.properties.reply_to = value
@property
def reply_to_session_id(self):
# type: () -> Optional[str]
# pylint: disable=line-too-long
"""The session identifier augmenting the `reply_to` address.
This value augments the `reply_to` information and specifies which session id should be set for the reply
when sent to the reply entity.
See Message Routing and Correlation in
`https://docs.microsoft.com/azure/service-bus-messaging/service-bus-messages-payloads?#message-routing-and-correlation`.
:rtype: str
"""
if not self._raw_amqp_message.properties:
return None
try:
return self._raw_amqp_message.properties.reply_to_group_id.decode("UTF-8")
except (AttributeError, UnicodeDecodeError):
return self._raw_amqp_message.properties.reply_to_group_id
@reply_to_session_id.setter
def reply_to_session_id(self, value):
# type: (str) -> None
if value and len(value) > MESSAGE_PROPERTY_MAX_LENGTH:
raise ValueError(
"reply_to_session_id cannot be longer than {} characters.".format(
MESSAGE_PROPERTY_MAX_LENGTH
)
)
if not self._raw_amqp_message.properties:
self._raw_amqp_message.properties = AmqpMessageProperties()
self._raw_amqp_message.properties.reply_to_group_id = value
@property
def to(self):
# type: () -> Optional[str]
"""The `to` address.
This property is reserved for future use in routing scenarios and presently ignored by the broker itself.
Applications can use this value in rule-driven auto-forward chaining scenarios to indicate the intended
logical destination of the message.
See https://docs.microsoft.com/azure/service-bus-messaging/service-bus-auto-forwarding for more details.
:rtype: str
"""
if not self._raw_amqp_message.properties:
return None
try:
return self._raw_amqp_message.properties.to.decode("UTF-8")
except (AttributeError, UnicodeDecodeError):
return self._raw_amqp_message.properties.to
@to.setter
def to(self, value):
# type: (str) -> None
if not self._raw_amqp_message.properties:
self._raw_amqp_message.properties = AmqpMessageProperties()
self._raw_amqp_message.properties.to = value
class ServiceBusMessageBatch(object):
"""A batch of messages.
Sending messages in a batch is more performant than sending individual message.
ServiceBusMessageBatch helps you create the maximum allowed size batch of `Message` to improve sending performance.
Use the `add` method to add messages until the maximum batch size limit in bytes has been reached -
at which point a `MessageSizeExceededError` will be raised.
**Please use the create_message_batch method of ServiceBusSender
to create a ServiceBusMessageBatch object instead of instantiating a ServiceBusMessageBatch object directly.**
:param Optional[int] max_size_in_bytes: The maximum size of bytes data that a ServiceBusMessageBatch object
can hold.
"""
def __init__(self, max_size_in_bytes=None):
# type: (Optional[int]) -> None
self.message = uamqp.BatchMessage(
data=[], multi_messages=False, properties=None
)
self._max_size_in_bytes = (
max_size_in_bytes or uamqp.constants.MAX_MESSAGE_LENGTH_BYTES
)
self._size = self.message.gather()[0].get_message_encoded_size()
self._count = 0
self._messages = [] # type: List[ServiceBusMessage]
def __repr__(self):
# type: () -> str
batch_repr = "max_size_in_bytes={}, message_count={}".format(
self.max_size_in_bytes, self._count
)
return "ServiceBusMessageBatch({})".format(batch_repr)
def __len__(self):
# type: () -> int
return self._count
def _from_list(self, messages, parent_span=None):
# type: (Iterable[ServiceBusMessage], AbstractSpan) -> None
for message in messages:
self._add(message, parent_span)
def _add(self, add_message, parent_span=None):
# type: (Union[ServiceBusMessage, Mapping[str, Any], AmqpAnnotatedMessage], AbstractSpan) -> None
"""Actual add implementation. The shim exists to hide the internal parameters such as parent_span."""
message = transform_messages_if_needed(add_message, ServiceBusMessage)
message = cast(ServiceBusMessage, message)
trace_message(
message, parent_span
) # parent_span is e.g. if built as part of a send operation.
message_size = (
message.message.get_message_encoded_size()
)
# For a ServiceBusMessageBatch, if the encoded_message_size of event_data is < 256, then the overhead cost to
# encode that message into the ServiceBusMessageBatch would be 5 bytes, if >= 256, it would be 8 bytes.
size_after_add = (
self._size
+ message_size
+ _BATCH_MESSAGE_OVERHEAD_COST[0 if (message_size < 256) else 1]
)
if size_after_add > self.max_size_in_bytes:
raise MessageSizeExceededError(
message="ServiceBusMessageBatch has reached its size limit: {}".format(
self.max_size_in_bytes
)
)
self.message._body_gen.append(message) # pylint: disable=protected-access
self._size = size_after_add
self._count += 1
self._messages.append(message)
@property
def max_size_in_bytes(self):
# type: () -> int
"""The maximum size of bytes data that a ServiceBusMessageBatch object can hold.
:rtype: int
"""
return self._max_size_in_bytes
@property
def size_in_bytes(self):
# type: () -> int
"""The combined size of the messages in the batch, in bytes.
:rtype: int
"""
return self._size
def add_message(self, message):
# type: (Union[ServiceBusMessage, AmqpAnnotatedMessage, Mapping[str, Any]]) -> None
"""Try to add a single Message to the batch.
The total size of an added message is the sum of its body, properties, etc.
If this added size results in the batch exceeding the maximum batch size, a `MessageSizeExceededError` will
be raised.
:param message: The Message to be added to the batch.
:type message: Union[~azure.servicebus.ServiceBusMessage, ~azure.servicebus.amqp.AmqpAnnotatedMessage]
:rtype: None
:raises: :class: ~azure.servicebus.exceptions.MessageSizeExceededError, when exceeding the size limit.
"""
return self._add(message)
class ServiceBusReceivedMessage(ServiceBusMessage):
"""
A Service Bus Message received from service side.
:ivar auto_renew_error: Error when AutoLockRenewer is used and it fails to renew the message lock.
:vartype auto_renew_error: ~azure.servicebus.AutoLockRenewTimeout or ~azure.servicebus.AutoLockRenewFailed
.. admonition:: Example:
.. literalinclude:: ../samples/sync_samples/sample_code_servicebus.py
:start-after: [START receive_complex_message]
:end-before: [END receive_complex_message]
:language: python
:dedent: 4
:caption: Checking the properties on a received message.
"""
def __init__(self, message, receive_mode=ServiceBusReceiveMode.PEEK_LOCK, **kwargs):
# type: (uamqp.message.Message, Union[ServiceBusReceiveMode, str], Any) -> None
super(ServiceBusReceivedMessage, self).__init__(None, message=message) # type: ignore
self._settled = receive_mode == ServiceBusReceiveMode.RECEIVE_AND_DELETE
self._received_timestamp_utc = utc_now()
self._is_deferred_message = kwargs.get("is_deferred_message", False)
self._is_peeked_message = kwargs.get("is_peeked_message", False)
self.auto_renew_error = None # type: Optional[Exception]
try:
self._receiver = kwargs.pop(
"receiver"
) # type: Union[ServiceBusReceiver, AsyncServiceBusReceiver]
except KeyError:
raise TypeError(
"ServiceBusReceivedMessage requires a receiver to be initialized. "
+ "This class should never be initialized by a user; "
+ "for outgoing messages, the ServiceBusMessage class should be utilized instead."
)
self._expiry = None # type: Optional[datetime.datetime]
@property
def _lock_expired(self):
# type: () -> bool
# pylint: disable=protected-access
"""
Whether the lock on the message has expired.
:rtype: bool
"""
try:
if self._receiver.session: # type: ignore
raise TypeError(
"Session messages do not expire. Please use the Session expiry instead."
)
except AttributeError: # Is not a session receiver
pass
if self.locked_until_utc and self.locked_until_utc <= utc_now():
return True
return False
def _to_outgoing_message(self):
# type: () -> ServiceBusMessage
# pylint: disable=protected-access
return ServiceBusMessage(body=None, message=self.raw_amqp_message._to_outgoing_amqp_message())
def __repr__(self): # pylint: disable=too-many-branches,too-many-statements
# type: () -> str
# pylint: disable=bare-except
message_repr = "body={}".format(
str(self)
)
try:
message_repr += ", application_properties={}".format(self.application_properties)
except:
message_repr += ", application_properties=<read-error>"
try:
message_repr += ", session_id={}".format(self.session_id)
except:
message_repr += ", session_id=<read-error>"
try:
message_repr += ", message_id={}".format(self.message_id)
except:
message_repr += ", message_id=<read-error>"
try:
message_repr += ", content_type={}".format(self.content_type)
except:
message_repr += ", content_type=<read-error>"
try:
message_repr += ", correlation_id={}".format(self.correlation_id)
except:
message_repr += ", correlation_id=<read-error>"
try:
message_repr += ", to={}".format(self.to)
except:
message_repr += ", to=<read-error>"
try:
message_repr += ", reply_to={}".format(self.reply_to)
except:
message_repr += ", reply_to=<read-error>"
try:
message_repr += ", reply_to_session_id={}".format(self.reply_to_session_id)
except:
message_repr += ", reply_to_session_id=<read-error>"
try:
message_repr += ", subject={}".format(self.subject)
except:
message_repr += ", subject=<read-error>"
try:
message_repr += ", time_to_live={}".format(self.time_to_live)
except:
message_repr += ", time_to_live=<read-error>"
try:
message_repr += ", partition_key={}".format(self.partition_key)
except:
message_repr += ", partition_key=<read-error>"
try:
message_repr += ", scheduled_enqueue_time_utc={}".format(self.scheduled_enqueue_time_utc)
except:
message_repr += ", scheduled_enqueue_time_utc=<read-error>"
try:
message_repr += ", auto_renew_error={}".format(self.auto_renew_error)
except:
message_repr += ", auto_renew_error=<read-error>"
try:
message_repr += ", dead_letter_error_description={}".format(self.dead_letter_error_description)
except:
message_repr += ", dead_letter_error_description=<read-error>"
try:
message_repr += ", dead_letter_reason={}".format(self.dead_letter_reason)
except:
message_repr += ", dead_letter_reason=<read-error>"
try:
message_repr += ", dead_letter_source={}".format(self.dead_letter_source)
except:
message_repr += ", dead_letter_source=<read-error>"
try:
message_repr += ", delivery_count={}".format(self.delivery_count)
except:
message_repr += ", delivery_count=<read-error>"
try:
message_repr += ", enqueued_sequence_number={}".format(self.enqueued_sequence_number)
except:
message_repr += ", enqueued_sequence_number=<read-error>"
try:
message_repr += ", enqueued_time_utc={}".format(self.enqueued_time_utc)
except:
message_repr += ", enqueued_time_utc=<read-error>"
try:
message_repr += ", expires_at_utc={}".format(self.expires_at_utc)
except:
message_repr += ", expires_at_utc=<read-error>"
try:
message_repr += ", sequence_number={}".format(self.sequence_number)
except:
message_repr += ", sequence_number=<read-error>"
try:
message_repr += ", lock_token={}".format(self.lock_token)
except:
message_repr += ", lock_token=<read-error>"
try:
message_repr += ", locked_until_utc={}".format(self.locked_until_utc)
except:
message_repr += ", locked_until_utc=<read-error>"
return "ServiceBusReceivedMessage({})".format(message_repr)[:1024]
@property
def dead_letter_error_description(self):
# type: () -> Optional[str]
"""
Dead letter error description, when the message is received from a deadletter subqueue of an entity.
:rtype: str
"""
if self._raw_amqp_message.application_properties:
try:
return self._raw_amqp_message.application_properties.get( # type: ignore
PROPERTIES_DEAD_LETTER_ERROR_DESCRIPTION
).decode("UTF-8")
except AttributeError:
pass
return None
@property
def dead_letter_reason(self):
# type: () -> Optional[str]
"""
Dead letter reason, when the message is received from a deadletter subqueue of an entity.
:rtype: str
"""
if self._raw_amqp_message.application_properties:
try:
return self._raw_amqp_message.application_properties.get( # type: ignore
PROPERTIES_DEAD_LETTER_REASON
).decode("UTF-8")
except AttributeError:
pass
return None
@property
def dead_letter_source(self):
# type: () -> Optional[str]
"""
The name of the queue or subscription that this message was enqueued on, before it was deadlettered.
This property is only set in messages that have been dead-lettered and subsequently auto-forwarded
from the dead-letter queue to another entity. Indicates the entity in which the message was dead-lettered.
:rtype: str
"""
if self._raw_amqp_message.annotations:
try:
return self._raw_amqp_message.annotations.get(_X_OPT_DEAD_LETTER_SOURCE).decode( # type: ignore
"UTF-8"
)
except AttributeError:
pass
return None
@property
def state(self):
# type: () -> ServiceBusMessageState
"""
Defaults to Active. Represents the message state of the message. Can be Active, Deferred.
or Scheduled.
:rtype: ~azure.servicebus.ServiceBusMessageState
"""
try:
message_state = self._raw_amqp_message.annotations.get(MESSAGE_STATE_NAME)
try:
return ServiceBusMessageState(message_state)
except ValueError:
return ServiceBusMessageState.ACTIVE if not message_state else message_state
except AttributeError:
return ServiceBusMessageState.ACTIVE
@property
def delivery_count(self):
# type: () -> Optional[int]
"""
Number of deliveries that have been attempted for this message. The count is incremented
when a message lock expires or the message is explicitly abandoned by the receiver.
:rtype: int
"""
if self._raw_amqp_message.header:
return self._raw_amqp_message.header.delivery_count
return None
@property
def enqueued_sequence_number(self):
# type: () -> Optional[int]
"""
For messages that have been auto-forwarded, this property reflects the sequence number that had
first been assigned to the message at its original point of submission.
:rtype: int
"""
if self._raw_amqp_message.annotations:
return self._raw_amqp_message.annotations.get(_X_OPT_ENQUEUE_SEQUENCE_NUMBER)
return None
@property
def enqueued_time_utc(self):
# type: () -> Optional[datetime.datetime]
"""
The UTC datetime at which the message has been accepted and stored in the entity.
:rtype: ~datetime.datetime
"""
if self._raw_amqp_message.annotations:
timestamp = self._raw_amqp_message.annotations.get(_X_OPT_ENQUEUED_TIME)
if timestamp:
in_seconds = timestamp / 1000.0
return utc_from_timestamp(in_seconds)
return None
@property
def expires_at_utc(self):
# type: () -> Optional[datetime.datetime]
"""
The UTC datetime at which the message is marked for removal and no longer available for retrieval
from the entity due to expiration. Expiry is controlled by the `Message.time_to_live` property.
This property is computed from `Message.enqueued_time_utc` + `Message.time_to_live`.
:rtype: ~datetime.datetime
"""
if self.enqueued_time_utc and self.time_to_live:
return self.enqueued_time_utc + self.time_to_live
return None
@property
def sequence_number(self):
# type: () -> Optional[int]
"""
The unique number assigned to a message by Service Bus. The sequence number is a unique 64-bit integer
assigned to a message as it is accepted and stored by the broker and functions as its true identifier.
For partitioned entities, the topmost 16 bits reflect the partition identifier.
Sequence numbers monotonically increase. They roll over to 0 when the 48-64 bit range is exhausted.
:rtype: int
"""
if self._raw_amqp_message.annotations:
return self._raw_amqp_message.annotations.get(_X_OPT_SEQUENCE_NUMBER)
return None
@property
def lock_token(self):
# type: () -> Optional[Union[uuid.UUID, str]]
"""
The lock token for the current message serving as a reference to the lock that
is being held by the broker in PEEK_LOCK mode.
:rtype: ~uuid.UUID or str
"""
if self._settled:
return None
if self.message.delivery_tag:
return uuid.UUID(bytes_le=self.message.delivery_tag)
delivery_annotations = self._raw_amqp_message.delivery_annotations
if delivery_annotations:
return delivery_annotations.get(_X_OPT_LOCK_TOKEN)
return None
@property
def locked_until_utc(self):
# type: () -> Optional[datetime.datetime]
# pylint: disable=protected-access
"""
The UTC datetime until which the message will be locked in the queue/subscription.
When the lock expires, delivery count of hte message is incremented and the message
is again available for retrieval.
:rtype: datetime.datetime
"""
try:
if self._settled or self._receiver.session: # type: ignore
return None
except AttributeError: # not settled, and isn't session receiver.
pass
if self._expiry:
return self._expiry
if self._raw_amqp_message.annotations and _X_OPT_LOCKED_UNTIL in self._raw_amqp_message.annotations:
expiry_in_seconds = self._raw_amqp_message.annotations[_X_OPT_LOCKED_UNTIL] / 1000
self._expiry = utc_from_timestamp(expiry_in_seconds)
return self._expiry
| |
# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Supports the parsing of command-line options for check-webkit-style."""
import logging
from optparse import OptionParser
import os.path
import sys
from filter import validate_filter_rules
# This module should not import anything from checker.py.
_log = logging.getLogger(__name__)
_USAGE = """usage: %prog [--help] [options] [path1] [path2] ...
Overview:
Check coding style according to WebKit style guidelines:
http://webkit.org/coding/coding-style.html
Path arguments can be files and directories. If neither a git commit nor
paths are passed, then all changes in your source control working directory
are checked.
Style errors:
This script assigns to every style error a confidence score from 1-5 and
a category name. A confidence score of 5 means the error is certainly
a problem, and 1 means it could be fine.
Category names appear in error messages in brackets, for example
[whitespace/indent]. See the options section below for an option that
displays all available categories and which are reported by default.
Filters:
Use filters to configure what errors to report. Filters are specified using
a comma-separated list of boolean filter rules. The script reports errors
in a category if the category passes the filter, as described below.
All categories start out passing. Boolean filter rules are then evaluated
from left to right, with later rules taking precedence. For example, the
rule "+foo" passes any category that starts with "foo", and "-foo" fails
any such category. The filter input "-whitespace,+whitespace/braces" fails
the category "whitespace/tab" and passes "whitespace/braces".
Examples: --filter=-whitespace,+whitespace/braces
--filter=-whitespace,-runtime/printf,+runtime/printf_format
--filter=-,+build/include_what_you_use
Paths:
Certain style-checking behavior depends on the paths relative to
the WebKit source root of the files being checked. For example,
certain types of errors may be handled differently for files in
WebKit/gtk/webkit/ (e.g. by suppressing "readability/naming" errors
for files in this directory).
Consequently, if the path relative to the source root cannot be
determined for a file being checked, then style checking may not
work correctly for that file. This can occur, for example, if no
WebKit checkout can be found, or if the source root can be detected,
but one of the files being checked lies outside the source tree.
If a WebKit checkout can be detected and all files being checked
are in the source tree, then all paths will automatically be
converted to paths relative to the source root prior to checking.
This is also useful for display purposes.
Currently, this command can detect the source root only if the
command is run from within a WebKit checkout (i.e. if the current
working directory is below the root of a checkout). In particular,
it is not recommended to run this script from a directory outside
a checkout.
Running this script from a top-level WebKit source directory and
checking only files in the source tree will ensure that all style
checking behaves correctly -- whether or not a checkout can be
detected. This is because all file paths will already be relative
to the source root and so will not need to be converted."""
_EPILOG = ("This script can miss errors and does not substitute for "
"code review.")
# This class should not have knowledge of the flag key names.
class DefaultCommandOptionValues(object):
"""Stores the default check-webkit-style command-line options.
Attributes:
output_format: A string that is the default output format.
min_confidence: An integer that is the default minimum confidence level.
"""
def __init__(self, min_confidence, output_format):
self.min_confidence = min_confidence
self.output_format = output_format
# This class should not have knowledge of the flag key names.
class CommandOptionValues(object):
"""Stores the option values passed by the user via the command line.
Attributes:
is_verbose: A boolean value of whether verbose logging is enabled.
filter_rules: The list of filter rules provided by the user.
These rules are appended to the base rules and
path-specific rules and so take precedence over
the base filter rules, etc.
git_commit: A string representing the git commit to check.
The default is None.
min_confidence: An integer between 1 and 5 inclusive that is the
minimum confidence level of style errors to report.
The default is 1, which reports all errors.
output_format: A string that is the output format. The supported
output formats are "emacs" which emacs can parse
and "vs7" which Microsoft Visual Studio 7 can parse.
"""
def __init__(self,
filter_rules=None,
git_commit=None,
diff_files=None,
is_verbose=False,
min_confidence=1,
output_format="emacs"):
if filter_rules is None:
filter_rules = []
if (min_confidence < 1) or (min_confidence > 5):
raise ValueError('Invalid "min_confidence" parameter: value '
"must be an integer between 1 and 5 inclusive. "
'Value given: "%s".' % min_confidence)
if output_format not in ("emacs", "vs7"):
raise ValueError('Invalid "output_format" parameter: '
'value must be "emacs" or "vs7". '
'Value given: "%s".' % output_format)
self.filter_rules = filter_rules
self.git_commit = git_commit
self.diff_files = diff_files
self.is_verbose = is_verbose
self.min_confidence = min_confidence
self.output_format = output_format
# Useful for unit testing.
def __eq__(self, other):
"""Return whether this instance is equal to another."""
if self.filter_rules != other.filter_rules:
return False
if self.git_commit != other.git_commit:
return False
if self.diff_files != other.diff_files:
return False
if self.is_verbose != other.is_verbose:
return False
if self.min_confidence != other.min_confidence:
return False
if self.output_format != other.output_format:
return False
return True
# Useful for unit testing.
def __ne__(self, other):
# Python does not automatically deduce this from __eq__().
return not self.__eq__(other)
class ArgumentPrinter(object):
"""Supports the printing of check-webkit-style command arguments."""
def _flag_pair_to_string(self, flag_key, flag_value):
return '--%(key)s=%(val)s' % {'key': flag_key, 'val': flag_value }
def to_flag_string(self, options):
"""Return a flag string of the given CommandOptionValues instance.
This method orders the flag values alphabetically by the flag key.
Args:
options: A CommandOptionValues instance.
"""
flags = {}
flags['min-confidence'] = options.min_confidence
flags['output'] = options.output_format
# Only include the filter flag if user-provided rules are present.
filter_rules = options.filter_rules
if filter_rules:
flags['filter'] = ",".join(filter_rules)
if options.git_commit:
flags['git-commit'] = options.git_commit
if options.diff_files:
flags['diff_files'] = options.diff_files
flag_string = ''
# Alphabetizing lets us unit test this method.
for key in sorted(flags.keys()):
flag_string += self._flag_pair_to_string(key, flags[key]) + ' '
return flag_string.strip()
class ArgumentParser(object):
# FIXME: Move the documentation of the attributes to the __init__
# docstring after making the attributes internal.
"""Supports the parsing of check-webkit-style command arguments.
Attributes:
create_usage: A function that accepts a DefaultCommandOptionValues
instance and returns a string of usage instructions.
Defaults to the function that generates the usage
string for check-webkit-style.
default_options: A DefaultCommandOptionValues instance that provides
the default values for options not explicitly
provided by the user.
stderr_write: A function that takes a string as a parameter and
serves as stderr.write. Defaults to sys.stderr.write.
This parameter should be specified only for unit tests.
"""
def __init__(self,
all_categories,
default_options,
base_filter_rules=None,
mock_stderr=None,
usage=None):
"""Create an ArgumentParser instance.
Args:
all_categories: The set of all available style categories.
default_options: See the corresponding attribute in the class
docstring.
Keyword Args:
base_filter_rules: The list of filter rules at the beginning of
the list of rules used to check style. This
list has the least precedence when checking
style and precedes any user-provided rules.
The class uses this parameter only for display
purposes to the user. Defaults to the empty list.
create_usage: See the documentation of the corresponding
attribute in the class docstring.
stderr_write: See the documentation of the corresponding
attribute in the class docstring.
"""
if base_filter_rules is None:
base_filter_rules = []
stderr = sys.stderr if mock_stderr is None else mock_stderr
if usage is None:
usage = _USAGE
self._all_categories = all_categories
self._base_filter_rules = base_filter_rules
# FIXME: Rename these to reflect that they are internal.
self.default_options = default_options
self.stderr_write = stderr.write
self._parser = self._create_option_parser(stderr=stderr,
usage=usage,
default_min_confidence=self.default_options.min_confidence,
default_output_format=self.default_options.output_format)
def _create_option_parser(self, stderr, usage,
default_min_confidence, default_output_format):
# Since the epilog string is short, it is not necessary to replace
# the epilog string with a mock epilog string when testing.
# For this reason, we use _EPILOG directly rather than passing it
# as an argument like we do for the usage string.
parser = OptionParser(usage=usage, epilog=_EPILOG)
filter_help = ('set a filter to control what categories of style '
'errors to report. Specify a filter using a comma-'
'delimited list of boolean filter rules, for example '
'"--filter -whitespace,+whitespace/braces". To display '
'all categories and which are enabled by default, pass '
"""no value (e.g. '-f ""' or '--filter=').""")
parser.add_option("-f", "--filter-rules", metavar="RULES",
dest="filter_value", help=filter_help)
git_commit_help = ("check all changes in the given commit. "
"Use 'commit_id..' to check all changes after commmit_id")
parser.add_option("-g", "--git-diff", "--git-commit",
metavar="COMMIT", dest="git_commit", help=git_commit_help,)
diff_files_help = "diff the files passed on the command line rather than checking the style of every line"
parser.add_option("--diff-files", action="store_true", dest="diff_files", default=False, help=diff_files_help)
min_confidence_help = ("set the minimum confidence of style errors "
"to report. Can be an integer 1-5, with 1 "
"displaying all errors. Defaults to %default.")
parser.add_option("-m", "--min-confidence", metavar="INT",
type="int", dest="min_confidence",
default=default_min_confidence,
help=min_confidence_help)
output_format_help = ('set the output format, which can be "emacs" '
'or "vs7" (for Visual Studio). '
'Defaults to "%default".')
parser.add_option("-o", "--output-format", metavar="FORMAT",
choices=["emacs", "vs7"],
dest="output_format", default=default_output_format,
help=output_format_help)
verbose_help = "enable verbose logging."
parser.add_option("-v", "--verbose", dest="is_verbose", default=False,
action="store_true", help=verbose_help)
# Override OptionParser's error() method so that option help will
# also display when an error occurs. Normally, just the usage
# string displays and not option help.
parser.error = self._parse_error
# Override OptionParser's print_help() method so that help output
# does not render to the screen while running unit tests.
print_help = parser.print_help
parser.print_help = lambda: print_help(file=stderr)
return parser
def _parse_error(self, error_message):
"""Print the help string and an error message, and exit."""
# The method format_help() includes both the usage string and
# the flag options.
help = self._parser.format_help()
# Separate help from the error message with a single blank line.
self.stderr_write(help + "\n")
if error_message:
_log.error(error_message)
# Since we are using this method to replace/override the Python
# module optparse's OptionParser.error() method, we match its
# behavior and exit with status code 2.
#
# As additional background, Python documentation says--
#
# "Unix programs generally use 2 for command line syntax errors
# and 1 for all other kind of errors."
#
# (from http://docs.python.org/library/sys.html#sys.exit )
sys.exit(2)
def _exit_with_categories(self):
"""Exit and print the style categories and default filter rules."""
self.stderr_write('\nAll categories:\n')
for category in sorted(self._all_categories):
self.stderr_write(' ' + category + '\n')
self.stderr_write('\nDefault filter rules**:\n')
for filter_rule in sorted(self._base_filter_rules):
self.stderr_write(' ' + filter_rule + '\n')
self.stderr_write('\n**The command always evaluates the above rules, '
'and before any --filter flag.\n\n')
sys.exit(0)
def _parse_filter_flag(self, flag_value):
"""Parse the --filter flag, and return a list of filter rules.
Args:
flag_value: A string of comma-separated filter rules, for
example "-whitespace,+whitespace/indent".
"""
filters = []
for uncleaned_filter in flag_value.split(','):
filter = uncleaned_filter.strip()
if not filter:
continue
filters.append(filter)
return filters
def parse(self, args):
"""Parse the command line arguments to check-webkit-style.
Args:
args: A list of command-line arguments as returned by sys.argv[1:].
Returns:
A tuple of (paths, options)
paths: The list of paths to check.
options: A CommandOptionValues instance.
"""
(options, paths) = self._parser.parse_args(args=args)
filter_value = options.filter_value
git_commit = options.git_commit
diff_files = options.diff_files
is_verbose = options.is_verbose
min_confidence = options.min_confidence
output_format = options.output_format
if filter_value is not None and not filter_value:
# Then the user explicitly passed no filter, for
# example "-f ''" or "--filter=".
self._exit_with_categories()
# Validate user-provided values.
min_confidence = int(min_confidence)
if (min_confidence < 1) or (min_confidence > 5):
self._parse_error('option --min-confidence: invalid integer: '
'%s: value must be between 1 and 5'
% min_confidence)
if filter_value:
filter_rules = self._parse_filter_flag(filter_value)
else:
filter_rules = []
try:
validate_filter_rules(filter_rules, self._all_categories)
except ValueError, err:
self._parse_error(err)
options = CommandOptionValues(filter_rules=filter_rules,
git_commit=git_commit,
diff_files=diff_files,
is_verbose=is_verbose,
min_confidence=min_confidence,
output_format=output_format)
return (paths, options)
| |
# Imports
import pygame
# Import game states
import escmenu
import end
import game
import options
import mainmenu
import button
import translate
import menumusic
import config
import database
import questions
import checkbox
import textbox
import player
import highscores
import listbox
import serverlist
import instructions
import lobby
import console
import packetevent
import game_mp
class Game:
def __init__(self):
self.state = 0 # state 0 = mainmenu
self.last_state = 0
# Game variables
self.players = []
self.playercount = 0
self.current_player = 0
self.has_started = False
self.question = 0
self.chosen = []
self.winner = ""
self.drawconsole = False
self.isMP = False
self.name = ""
self.index = 0
self.lobbyname = ""
self.sockets = packetevent.Client()
self.angle = 0
self.angle1 = 0
# Start PyGame
pygame.init()
pygame.font.init()
# Init game funcs
packetevent.init(self)
database.init()
config.init()
translate.init()
menumusic.init()
questions.init()
# Initiate the game window
self.width = 800
self.height = 600
# Set the resolution
self.screen = pygame.display.set_mode((self.width, self.height))
def get_player_count(self):
cnt = 0
for x in self.players:
cnt += 1
return cnt
def get_player_by_index(self, index):
for x in self.players:
if x.index == index:
return x
return None
def save(self):
# save game information in the database
sid = database.insert("INSERT INTO savegames (players, currentplayer) VALUES ('{}', '{}')".format(self.playercount, self.current_player))
# save each player in the database
for x in self.players:
x.save(sid)
# go to the main menu after saving our data
self.set_state(0)
self.has_started = False
def load(self, sid):
# load game data into the database
res = database.execute_query("SELECT * FROM savegames WHERE id = '{}'".format(sid))
# get game info
self.playercount = res[0]["players"]
self.current_player = res[0]["currentplayer"]
# get info for each player
res = database.execute_query("SELECT * FROM savegames_player WHERE sid = '{}'".format(sid))
# loop through all players and load playerdata
for x in res:
plr = player.Player(self)
plr.load(x)
self.players.append(plr)
# set state to ingame
self.set_state(2)
self.has_started = True
self.isMP = False
def get_current_player(self):
if not self.isMP:
return self.players[self.current_player]
else:
return self.get_player_by_index(self.current_player)
def set_next_player(self):
self.get_current_player().did_roll = False
self.get_current_player().did_answer = False
self.get_current_player().did_generate_question = False
self.get_current_player().dice_roll = 0
self.get_current_player().our_turn = False
self.current_player += 1
if self.current_player == self.playercount:
self.current_player = 0
self.get_current_player().our_turn = True
def set_current_player(self, idx):
self.current_player = idx
def get_last_player(self):
idx = self.current_player
if idx == 0:
idx = self.playercount - 1
else:
idx -= 1
return self.players[idx]
# sets the current game state
def set_state(self, state):
checkbox.remove(self)
textbox.remove(self)
listbox.remove(self)
self.last_state = self.state
self.state = state # update game state
if self.state == 0:
mainmenu.init(self)
elif self.state == 1:
options.init(self)
elif self.state == 2:
game.init(self)
elif self.state == 3:
end.init(self)
elif self.state == 4:
escmenu.init(self)
elif self.state == 5:
highscores.init(self)
elif self.state == 6:
serverlist.init(self)
elif self.state == 7:
instructions.init(self)
elif self.state == 8:
lobby.init(self)
elif self.state == 9:
game_mp.init(self)
# updates the game state
def update(self):
button.update(self)
checkbox.update(self)
textbox.update(self)
listbox.update(self)
if self.state == 0:
mainmenu.update(self)
elif self.state == 1:
options.update(self)
elif self.state == 2:
game.update(self)
elif self.state == 3:
end.update(self)
elif self.state == 4:
escmenu.update(self)
elif self.state == 5:
highscores.update(self)
elif self.state == 6:
serverlist.update(self)
elif self.state == 7:
instructions.update(self)
elif self.state == 8:
lobby.update(self)
elif self.state == 9:
game_mp.update(self)
# draws the current frame
def draw(self):
# draw the background
self.screen.fill((0, 0, 0))
# Draw the correct data
if self.state == 0:
mainmenu.draw(self)
elif self.state == 1:
options.draw(self)
elif self.state == 2:
game.draw(self)
elif self.state == 3:
end.draw(self)
elif self.state == 4:
escmenu.draw(self)
elif self.state == 5:
highscores.draw(self)
elif self.state == 6:
serverlist.draw(self)
elif self.state == 7:
instructions.draw(self)
elif self.state == 8:
lobby.draw(self)
elif self.state == 9:
game_mp.draw(self)
if self.drawconsole:
console.draw(self)
# Flip buffer
pygame.display.flip()
# game loop
def loop(self):
while process_events():
self.update()
self.draw()
# Add all functions that require shutdown here
def exit(self):
config.quit()
database.quit()
quit()
_game = Game()
def process_events():
for event in pygame.event.get():
if event.type == pygame.QUIT:
_game.exit()
elif event.type == pygame.MOUSEBUTTONDOWN:
button.click(event.pos)
checkbox.click(event.pos)
textbox.click(event.pos)
listbox.click(event.pos)
elif event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
if _game.state == 2:
_game.state = 4
elif _game.state == 4:
_game.state = 2
# elif event.type == pygame.KEYDOWN and event.key == 96:
# _game.drawconsole = not _game.drawconsole
# if _game.drawconsole:
# console.init(_game)
# else:
# console.remove(_game)
elif event.type == pygame.KEYDOWN:
textbox.key_event(event, pygame.event.get())
return True
# main function
def Program():
# _game = Game()
_game.loop()
# Start the game
Program()
| |
from __future__ import absolute_import, division, print_function
import json
from toolz.curried import map, take, pipe, pluck, get, concat, filter
from collections import Iterator, Iterable
import os
from contextlib import contextmanager
from datashape import discover, var, dshape, Record, DataShape
from datashape import coretypes as ct
from datashape.dispatch import dispatch
import gzip
import datetime
from ..append import append
from ..convert import convert, ooc_types
from ..resource import resource
from ..chunks import chunks
from ..utils import tuples_to_records
class JSON(object):
""" Proxy for a JSON file
Parameters
----------
path : str
Path to file on disk
See Also
--------
JSONLines - Line-delimited JSON
"""
def __init__(self, path):
self.path = path
class JSONLines(object):
""" Proxy for a line-delimited JSON file
Each line in the file is a valid JSON entry
Parameters
----------
path : str
Path to file on disk
See Also
--------
JSON - Not-line-delimited JSON
"""
def __init__(self, path):
self.path = path
def date_to_datetime_dshape(ds):
shape = ds.shape
if isinstance(ds.measure, Record):
measure = Record([[name, ct.datetime_ if typ == ct.date_ else typ]
for name, typ in ds.measure.parameters[0]])
else:
measure = ds.measure
return DataShape(*(shape + (measure,)))
@discover.register(JSON)
def discover_json(j, **kwargs):
data = json_load(j.path)
ds = discover(data)
return date_to_datetime_dshape(ds)
def nonempty(line):
return len(line.strip()) > 0
@discover.register(JSONLines)
def discover_jsonlines(j, n=10, encoding='utf-8', **kwargs):
with json_lines(j.path, encoding=encoding) as lines:
data = pipe(lines, filter(nonempty), map(json.loads), take(n), list)
if len(data) < n:
ds = discover(data)
else:
ds = var * discover(data).subshape[0]
return date_to_datetime_dshape(ds)
@convert.register(list, JSON)
def json_to_list(j, dshape=None, **kwargs):
return json_load(j.path, **kwargs)
@convert.register(Iterator, JSONLines)
def json_lines_to_iterator(j, encoding='utf-8', **kwargs):
with json_lines(j.path, encoding=encoding) as lines:
for item in pipe(lines, filter(nonempty), map(json.loads)):
yield item
@contextmanager
def json_lines(path, encoding='utf-8'):
""" Return lines of a json-lines file
Handles compression like gzip """
if path.split(os.path.extsep)[-1] == 'gz':
f = gzip.open(path)
lines = (line.decode(encoding) for line in f)
else:
f = open(path)
lines = f
try:
yield lines
finally:
f.close()
def json_load(path, encoding='utf-8', **kwargs):
""" Return data of a json file
Handles compression like gzip """
if path.split(os.path.extsep)[-1] == 'gz':
f = gzip.open(path)
s = f.read().decode(encoding)
else:
f = open(path)
s = f.read()
data = json.loads(s)
f.close()
return data
@append.register(JSONLines, object)
def object_to_jsonlines(j, o, **kwargs):
return append(j, convert(Iterator, o, **kwargs), **kwargs)
@append.register(JSONLines, Iterator)
def iterator_to_json_lines(j, seq, dshape=None, encoding='utf-8', **kwargs):
row = next(seq)
seq = concat([[row], seq])
if not isinstance(row, (dict, str)) and isinstance(row, Iterable):
seq = tuples_to_records(dshape, seq)
lines = (json.dumps(item, default=json_dumps) for item in seq)
# Open file
if j.path.split(os.path.extsep)[-1] == 'gz':
f = gzip.open(j.path, 'ab')
lines2 = (line.encode(encoding) for line in lines)
endl = b'\n'
else:
f = open(j.path, 'a')
lines2 = lines
endl = '\n'
for line in lines2:
f.write(line)
f.write(endl)
f.close()
return j
@append.register(JSON, list)
def list_to_json(j, seq, dshape=None, encoding='utf-8', **kwargs):
if not isinstance(seq[0], (dict, str)) and isinstance(seq[0], Iterable):
seq = list(tuples_to_records(dshape, seq))
if os.path.exists(j.path):
with open(j.path) as f:
if json.load(f):
raise ValueError("Can only append to empty JSON File.\n"
"Either remove contents from this file, save to a new file \n"
"or use line-delimited JSON format.\n"
"Consider using the jsonlines:// protocol, e.g.\n"
"\tinto('jsonlines://%s', your-data)" % j.path)
text = json.dumps(seq, default=json_dumps)
if j.path.split(os.path.extsep)[-1] == 'gz':
f = gzip.open(j.path, 'wb')
text = text.encode(encoding)
else:
f = open(j.path, 'w')
f.write(text)
f.close()
return j
@append.register(JSON, object)
def object_to_json(j, o, **kwargs):
return append(j, convert(list, o, **kwargs), **kwargs)
@resource.register('json://.*\.json(\.gz)?', priority=11)
def resource_json(path, **kwargs):
if 'json://' in path:
path = path[len('json://'):]
return JSON(path)
@resource.register('.*\.jsonlines(\.gz)?', priority=11)
@resource.register('jsonlines://.*\.json(\.gz)?', priority=11)
def resource_jsonlines(path, **kwargs):
if 'jsonlines://' in path:
path = path[len('jsonlines://'):]
return JSONLines(path)
@resource.register('.*\.json(\.gz)?')
def resource_json_ambiguous(path, **kwargs):
""" Try to guess if this file is line-delimited or not """
if os.path.exists(path):
f = open(path)
one = next(f)
try:
two = next(f)
except StopIteration: # only one line
f.close()
return resource_json(path, **kwargs)
try:
json.loads(one)
f.close()
return resource_jsonlines(path, **kwargs)
except:
f.close()
return resource_json(path, **kwargs)
# File doesn't exist, is the dshape variable length?
dshape = kwargs.get('expected_dshape', None)
if dshape and dshape[0] == var:
return resource_jsonlines(path, **kwargs)
else:
return resource_json(path, **kwargs)
@dispatch(datetime.datetime)
def json_dumps(dt):
s = dt.isoformat()
if not dt.tzname():
s = s + 'Z'
return s
@dispatch(datetime.date)
def json_dumps(dt):
return dt.isoformat()
@convert.register(chunks(list), chunks(JSON))
def convert_glob_of_jsons_into_chunks_of_lists(jsons, **kwargs):
def _():
return concat(convert(chunks(list), js, **kwargs) for js in jsons)
return chunks(list)(_)
@convert.register(chunks(Iterator), chunks(JSONLines))
def convert_glob_of_jsons_into_chunks_of_lists(jsons, **kwargs):
def _():
return concat(convert(chunks(Iterator), js, **kwargs) for js in jsons)
return chunks(Iterator)(_)
ooc_types.add(JSONLines)
| |
#!/usr/bin/python
import argparse, sys, os
from shutil import rmtree, move
from multiprocessing import cpu_count, Pool, Lock, Queue
from tempfile import mkdtemp, gettempdir
import math
import re
##################################
# A python parallel appraoch to
# mergesort
def do_inputs():
# Setup command line inputs
parser=argparse.ArgumentParser(description="Merge sort. Low memory multi-threaded. Not as fast as unix sort.")
parser.add_argument('input',help="INPUT FILE or '-' for STDIN")
parser.add_argument('-o','--output',help="OUTPUTFILE or STDOUT if not set")
parser.add_argument('--threads',type=int,default=cpu_count(),help="INT number of threads to run. Default is system cpu count")
# Temporary working directory step 1 of 3 - Definition
group = parser.add_mutually_exclusive_group()
group.add_argument('--tempdir',default=gettempdir(),help="The temporary directory is made and destroyed here.")
group.add_argument('--specific_tempdir',help="This temporary directory will be used, but will remain after executing.")
group.add_argument('--memory','-m',action='store_true',help="Do sort in memory")
parser.add_argument('--buffer_size',default=100000,type=int,help="INT Number of lines to sort at at time")
parser.add_argument('--fields','-f',help="Search fields '1,2n,3ni' would do field 1 first, then field 2 numerically,then field 3 numerically but inverted")
parser.add_argument('--maxbytes',type=int,default=100000000,help="Max temporary file to pull into memory\n")
args = parser.parse_args()
# Setup inputs
if args.input == '-':
args.input = sys.stdin
else:
args.input = open(args.input)
# Temporary working directory step 2 of 3 - Creation
if not args.memory:
setup_tempdir(args)
return args
def main():
#do our inputs
args = do_inputs()
#1. Stream through the initial sorts
buffer = []
cnt = 0
results = [] #only filled if we are using args.memory
if args.threads > 1:
p = Pool(processes=args.threads)
for line in args.input:
buffer.append(line)
if len(buffer) >= args.buffer_size:
cnt += 1
if args.threads > 1:
r = p.apply_async(process_buffer,args=(buffer,cnt,args))
results.append(r)
else:
r = process_buffer(buffer,cnt,args) #consider making copy of buffer here with slice but i think it gets a new copy anyways on multiprocessing
q = Queue()
q.put(r)
results.append(q)
buffer = []
if len(buffer) > 0:
cnt += 1
if args.threads > 1:
r = p.apply_async(process_buffer,args=(buffer,cnt,args))
results.append(r)
else:
r = process_buffer(buffer,cnt,args)
q = Queue()
q.put(r)
results.append(q)
if args.threads > 1:
p.close()
p.join()
#2. merge the files from the bottom up
if not args.memory:
while cnt != 1:
cnt = bottom_up(args,cnt)
#3. Do output
# Setup outputs
if args.output:
args.output = open(args.output,'w')
else:
args.output = sys.stdout
with open(args.tempdir+'/l.1') as inf:
for line in inf:
args.output.write(line)
args.output.close()
else: # do the memory way
while len(results) > 1:
results = bottom_up_mem(results,args)
#for r in results:
#v = list(results[0].get())
#sys.exit()
if args.output:
args.output = open(args.output,'w')
else:
args.output = sys.stdout
for vals in results:
for val in vals.get():
args.output.write(val)
args.output.close()
# Temporary working directory step 3 of 3 - Cleanup
if not args.specific_tempdir and not args.memory:
rmtree(args.tempdir)
def bottom_up_mem(results,args):
rlen = len(results)
newresults = []
if args.threads > 1:
p = Pool(processes=args.threads)
while(len(results) > 0):
r1t = results.pop(0)
r1 = r1t.get()
r2 = None
if len(results) > 0:
r2t = results.pop(0)
r2 = r2t.get()
if args.threads > 1:
r = p.apply_async(merge_mem,args=(r1,r2,args))
newresults.append(r)
else:
r = merge_mem(r1,r2,args)
q = Queue()
q.put(r)
newresults.append(q)
#now lets move the names back to 'l' type to make recursion easy
if args.threads > 1:
p.close()
p.join()
return newresults
def do_compare(line1,line2,args):
if not args.fields:
if line1 < line2:
return True
return False
fields = args.fields.split(',')
lf1 = line1.rstrip().split("\t")
lf2 = line2.rstrip().split("\t")
for f in fields:
myTrue = True
myFalse = False
#see if we are inverting
if re.search('i',f):
myTrue = False
myFalse = True
m = re.search('(\d+)',f)
if not m:
sys.stderr.write("ERROR: must specify a field index (base-1) to sort on sort on with fields option\n")
sys.exit()
i = int(m.group(1))-1
isN = False
if re.search('n',f): isN = True
if isN:
if float(lf1[i]) < float(lf2[i]):
return myTrue
elif float(lf1[i]) > float(lf2[i]):
return myFalse
else:
if lf1[i] < lf2[i]:
return myTrue
elif lf1[i] > lf2[i]:
return myFalse
if line1 < line2: #default to string sort
return True
return False
def merge_mem(r1,r2,args):
# case where f2 is not there is first
if not r2:
return r1
# case where we merge by file
r1ind = [0]
r2ind = [0]
r1len = len(r1)
r2len = len(r2)
used1 = True
used2 = True
inf1 = None
inf2 = None
line1 = get_line(inf1,r1,r1ind,r1len)
line2 = get_line(inf2,r2,r2ind,r2len)
rout = []
while True:
if not line1 and not line2:
break #at both EOFs
if line1 and not line2:
rout.append(line1)
# finish 1
while True:
line1 = get_line(inf1,r1,r1ind,r1len)
if not line1: break
rout.append(line1)
break
elif line2 and not line1:
rout.append(line2)
# finish 2
while True:
line2 = get_line(inf2,r2,r2ind,r2len)
if not line2: break
rout.append(line2)
break
elif do_compare(line1,line2,args):
rout.append(line1)
line1 = get_line(inf1,r1,r1ind,r1len)
else:
rout.append(line2)
line2 = get_line(inf2,r2,r2ind,r2len)
# Finished merge now clean up
return rout
def merge_files(f1,f2,cnt,args):
# case where f2 is not there is first
if not f2:
move(f1,args.tempdir+'/m.'+str(cnt))
return
# case where we merge by file
of = open(args.tempdir+'/m.'+str(cnt),'w')
f1size = os.path.getsize(f1)
f2size = os.path.getsize(f2)
useFiles = True
inf1 = None
inf2 = None
f1lines =None
f2lines = None
f1ind = [0]
f2ind = [0]
f1len = None
f2len = None
if f1size <= args.maxbytes and f2size <= args.maxbytes:
useFiles = False
if not useFiles: #get arrays ready if the data is small
f1lines = []
f2lines = []
# do them all in memory
with open(f1) as inf:
for line in inf: f1lines.append(line)
f1len = len(f1lines)
with open(f2) as inf:
for line in inf: f2lines.append(line)
f2len = len(f2lines)
else:
inf1 = open(f1)
inf2 = open(f2)
used1 = True
used2 = True
line1 = get_line(inf1,f1lines,f1ind,f1len)
line2 = get_line(inf2,f2lines,f2ind,f2len)
#line1 = inf1.readline()
#line2 = inf2.readline()
while True:
if not line1 and not line2:
break #at both EOFs
if line1 and not line2:
of.write(line1)
# finish 1
while True:
line1 = get_line(inf1,f1lines,f1ind,f1len)
if not line1: break
of.write(line1)
break
elif line2 and not line1:
of.write(line2)
# finish 2
while True:
line2 = get_line(inf2,f2lines,f2ind,f2len)
if not line2: break
of.write(line2)
break
elif do_compare(line1,line2,args):
of.write(line1)
line1 = get_line(inf1,f1lines,f1ind,f1len)
else:
of.write(line2)
line2 = get_line(inf2,f2lines,f2ind,f2len)
# Finished merge now clean up
if useFiles:
inf1.close()
inf2.close()
of.close()
os.remove(f1)
os.remove(f2)
def get_line(fh,farr,find,flen):
if not fh and not farr:
return None
if fh:
return fh.readline()
if find[0] < flen:
curr = find[0]
find[0]+=1
return farr[curr]
return None
def bottom_up(args,cnt):
if args.threads > 1:
p = Pool(processes=args.threads)
files = [args.tempdir+'/l.'+str(x+1) for x in range(0,cnt)]
newcount = 0
while(len(files) > 0):
f1 = files.pop(0)
f2 = None
if len(files) > 0:
f2 = files.pop(0)
newcount += 1
if args.threads > 1:
p.apply_async(merge_files,args=(f1,f2,newcount,args))
else:
merge_files(f1,f2,newcount,args)
#now lets move the names back to 'l' type to make recursion easy
if args.threads > 1:
p.close()
p.join()
for i in range(1,newcount+1):
move(args.tempdir+'/m.'+str(i),args.tempdir+'/l.'+str(i))
return newcount
def process_buffer(buffer,cnt,args):
sorted_buffer = merge_sort(buffer,args)
if args.memory:
# we just need the buffer if we're staying in memory lane
return sorted_buffer
of = open(args.tempdir+"/l."+str(cnt),'w')
for line in sorted_buffer:
of.write(line)
of.close()
return None
#sys.stderr.write(str(len(sorted_buffer))+"\n")
return [sorted_buffer,cnt,args]
def write_temp(s):
return
def merge_sort(a,args):
length_a = len(a)
if length_a <= 1: return a
m = int(math.floor(length_a/2))
a_left = a[0:m]
a_right = a[m:]
a_left = merge_sort(a_left,args)
a_right = merge_sort(a_right,args)
return merge(a_left,a_right,args)
def merge(left,right,args):
a = []
while len(left) > 0 or len(right) > 0:
if len(left) > 0 and len(right) > 0:
if do_compare(left[0],right[0],args):
a.append(left.pop(0))
else:
a.append(right.pop(0))
elif len(left) > 0:
a.append(left.pop(0))
elif len(right) > 0:
a.append(right.pop(0))
return a
def setup_tempdir(args):
if args.specific_tempdir:
if not os.path.exists(args.specific_tempdir):
os.makedirs(args.specific_tempdir.rstrip('/'))
args.tempdir = args.specific_tempdir.rstrip('/')
if not os.path.exists(args.specific_tempdir.rstrip('/')):
sys.stderr.write("ERROR: Problem creating temporary directory\n")
sys.exit()
else:
args.tempdir = mkdtemp(prefix="weirathe.",dir=args.tempdir.rstrip('/'))
if not os.path.exists(args.tempdir.rstrip('/')):
sys.stderr.write("ERROR: Problem creating temporary directory\n")
sys.exit()
if not os.path.exists(args.tempdir):
sys.stderr.write("ERROR: Problem creating temporary directory\n")
sys.exit()
return
if __name__=="__main__":
main()
| |
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import webob
from nova.api.openstack.compute import rescue as rescue_v21
from nova import compute
import nova.conf
from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_instance
CONF = nova.conf.CONF
UUID = '70f6db34-de8d-4fbd-aafb-4065bdfa6114'
def rescue(self, context, instance, rescue_password=None,
rescue_image_ref=None):
pass
def unrescue(self, context, instance):
pass
def fake_compute_get(*args, **kwargs):
return fake_instance.fake_instance_obj(args[1], id=1,
uuid=UUID, **kwargs)
class RescueTestV21(test.NoDBTestCase):
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
def setUp(self):
super(RescueTestV21, self).setUp()
self.stubs.Set(compute.api.API, "get", fake_compute_get)
self.stubs.Set(compute.api.API, "rescue", rescue)
self.stubs.Set(compute.api.API, "unrescue", unrescue)
self.controller = self._set_up_controller()
self.fake_req = fakes.HTTPRequest.blank('')
def _set_up_controller(self):
return rescue_v21.RescueController()
def test_rescue_from_locked_server(self):
def fake_rescue_from_locked_server(self, context,
instance, rescue_password=None, rescue_image_ref=None):
raise exception.InstanceIsLocked(instance_uuid=instance['uuid'])
self.stubs.Set(compute.api.API,
'rescue',
fake_rescue_from_locked_server)
body = {"rescue": {"adminPass": "AABBCC112233"}}
self.assertRaises(webob.exc.HTTPConflict,
self.controller._rescue,
self.fake_req, UUID, body=body)
def test_rescue_with_preset_password(self):
body = {"rescue": {"adminPass": "AABBCC112233"}}
resp = self.controller._rescue(self.fake_req, UUID, body=body)
self.assertEqual("AABBCC112233", resp['adminPass'])
def test_rescue_generates_password(self):
body = dict(rescue=None)
resp = self.controller._rescue(self.fake_req, UUID, body=body)
self.assertEqual(CONF.password_length, len(resp['adminPass']))
def test_rescue_of_rescued_instance(self):
body = dict(rescue=None)
def fake_rescue(*args, **kwargs):
raise exception.InstanceInvalidState('fake message')
self.stubs.Set(compute.api.API, "rescue", fake_rescue)
self.assertRaises(webob.exc.HTTPConflict,
self.controller._rescue,
self.fake_req, UUID, body=body)
def test_unrescue(self):
body = dict(unrescue=None)
resp = self.controller._unrescue(self.fake_req, UUID, body=body)
# NOTE: on v2.1, http status code is set as wsgi_code of API
# method instead of status_int in a response object.
if isinstance(self.controller,
rescue_v21.RescueController):
status_int = self.controller._unrescue.wsgi_code
else:
status_int = resp.status_int
self.assertEqual(202, status_int)
def test_unrescue_from_locked_server(self):
def fake_unrescue_from_locked_server(self, context,
instance):
raise exception.InstanceIsLocked(instance_uuid=instance['uuid'])
self.stubs.Set(compute.api.API,
'unrescue',
fake_unrescue_from_locked_server)
body = dict(unrescue=None)
self.assertRaises(webob.exc.HTTPConflict,
self.controller._unrescue,
self.fake_req, UUID, body=body)
def test_unrescue_of_active_instance(self):
body = dict(unrescue=None)
def fake_unrescue(*args, **kwargs):
raise exception.InstanceInvalidState('fake message')
self.stubs.Set(compute.api.API, "unrescue", fake_unrescue)
self.assertRaises(webob.exc.HTTPConflict,
self.controller._unrescue,
self.fake_req, UUID, body=body)
def test_rescue_raises_unrescuable(self):
body = dict(rescue=None)
def fake_rescue(*args, **kwargs):
raise exception.InstanceNotRescuable('fake message')
self.stubs.Set(compute.api.API, "rescue", fake_rescue)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._rescue,
self.fake_req, UUID, body=body)
def test_rescue_with_bad_image_specified(self):
body = {"rescue": {"adminPass": "ABC123",
"rescue_image_ref": "img-id"}}
self.assertRaises(exception.ValidationError,
self.controller._rescue,
self.fake_req, UUID, body=body)
def test_rescue_with_imageRef_as_full_url(self):
image_href = ('http://localhost/v2/fake/images/'
'76fa36fc-c930-4bf3-8c8a-ea2a2420deb6')
body = {"rescue": {"adminPass": "ABC123",
"rescue_image_ref": image_href}}
self.assertRaises(exception.ValidationError,
self.controller._rescue,
self.fake_req, UUID, body=body)
def test_rescue_with_imageRef_as_empty_string(self):
body = {"rescue": {"adminPass": "ABC123",
"rescue_image_ref": ''}}
self.assertRaises(exception.ValidationError,
self.controller._rescue,
self.fake_req, UUID, body=body)
@mock.patch('nova.compute.api.API.rescue')
@mock.patch('nova.api.openstack.common.get_instance')
def test_rescue_with_image_specified(
self, get_instance_mock, mock_compute_api_rescue):
instance = fake_instance.fake_instance_obj(
self.fake_req.environ['nova.context'])
get_instance_mock.return_value = instance
body = {"rescue": {"adminPass": "ABC123",
"rescue_image_ref": self.image_uuid}}
resp_json = self.controller._rescue(self.fake_req, UUID, body=body)
self.assertEqual("ABC123", resp_json['adminPass'])
mock_compute_api_rescue.assert_called_with(
mock.ANY,
instance,
rescue_password=u'ABC123',
rescue_image_ref=self.image_uuid)
@mock.patch('nova.compute.api.API.rescue')
@mock.patch('nova.api.openstack.common.get_instance')
def test_rescue_without_image_specified(
self, get_instance_mock, mock_compute_api_rescue):
instance = fake_instance.fake_instance_obj(
self.fake_req.environ['nova.context'])
get_instance_mock.return_value = instance
body = {"rescue": {"adminPass": "ABC123"}}
resp_json = self.controller._rescue(self.fake_req, UUID, body=body)
self.assertEqual("ABC123", resp_json['adminPass'])
mock_compute_api_rescue.assert_called_with(mock.ANY, instance,
rescue_password=u'ABC123',
rescue_image_ref=None)
def test_rescue_with_none(self):
body = dict(rescue=None)
resp = self.controller._rescue(self.fake_req, UUID, body=body)
self.assertEqual(CONF.password_length, len(resp['adminPass']))
def test_rescue_with_empty_dict(self):
body = dict(rescue=dict())
resp = self.controller._rescue(self.fake_req, UUID, body=body)
self.assertEqual(CONF.password_length, len(resp['adminPass']))
def test_rescue_disable_password(self):
self.flags(enable_instance_password=False)
body = dict(rescue=None)
resp_json = self.controller._rescue(self.fake_req, UUID, body=body)
self.assertNotIn('adminPass', resp_json)
def test_rescue_with_invalid_property(self):
body = {"rescue": {"test": "test"}}
self.assertRaises(exception.ValidationError,
self.controller._rescue,
self.fake_req, UUID, body=body)
class RescuePolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(RescuePolicyEnforcementV21, self).setUp()
self.controller = rescue_v21.RescueController()
self.req = fakes.HTTPRequest.blank('')
@mock.patch('nova.api.openstack.common.get_instance')
def test_rescue_policy_failed_with_other_project(self, get_instance_mock):
get_instance_mock.return_value = fake_instance.fake_instance_obj(
self.req.environ['nova.context'],
project_id=self.req.environ['nova.context'].project_id)
rule_name = "os_compute_api:os-rescue"
self.policy.set_rules({rule_name: "project_id:%(project_id)s"})
body = {"rescue": {"adminPass": "AABBCC112233"}}
# Change the project_id in request context.
self.req.environ['nova.context'].project_id = 'other-project'
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller._rescue, self.req, fakes.FAKE_UUID,
body=body)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
@mock.patch('nova.api.openstack.common.get_instance')
def test_rescue_overridden_policy_failed_with_other_user_in_same_project(
self, get_instance_mock):
get_instance_mock.return_value = (
fake_instance.fake_instance_obj(self.req.environ['nova.context']))
rule_name = "os_compute_api:os-rescue"
self.policy.set_rules({rule_name: "user_id:%(user_id)s"})
# Change the user_id in request context.
self.req.environ['nova.context'].user_id = 'other-user'
body = {"rescue": {"adminPass": "AABBCC112233"}}
exc = self.assertRaises(exception.PolicyNotAuthorized,
self.controller._rescue, self.req,
fakes.FAKE_UUID, body=body)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
@mock.patch('nova.compute.api.API.rescue')
@mock.patch('nova.api.openstack.common.get_instance')
def test_lock_overridden_policy_pass_with_same_user(self,
get_instance_mock,
rescue_mock):
instance = fake_instance.fake_instance_obj(
self.req.environ['nova.context'],
user_id=self.req.environ['nova.context'].user_id)
get_instance_mock.return_value = instance
rule_name = "os_compute_api:os-rescue"
self.policy.set_rules({rule_name: "user_id:%(user_id)s"})
body = {"rescue": {"adminPass": "AABBCC112233"}}
self.controller._rescue(self.req, fakes.FAKE_UUID, body=body)
rescue_mock.assert_called_once_with(self.req.environ['nova.context'],
instance,
rescue_password='AABBCC112233',
rescue_image_ref=None)
def test_unrescue_policy_failed(self):
rule_name = "os_compute_api:os-rescue"
self.policy.set_rules({rule_name: "project:non_fake"})
body = dict(unrescue=None)
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller._unrescue, self.req, fakes.FAKE_UUID,
body=body)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
| |
# Copyright 2011-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Functions and classes common to multiple pymongo modules."""
import sys
import warnings
from bson.binary import (OLD_UUID_SUBTYPE, UUID_SUBTYPE,
JAVA_LEGACY, CSHARP_LEGACY)
from bson.codec_options import CodecOptions
from pymongo import read_preferences
from pymongo.auth import MECHANISMS
from pymongo.read_preferences import ReadPreference
from pymongo.errors import ConfigurationError
HAS_SSL = True
try:
import ssl
except ImportError:
HAS_SSL = False
# Jython 2.7 includes an incomplete ssl module. See PYTHON-498.
if sys.platform.startswith('java'):
HAS_SSL = False
# Defaults until we connect to a server and get updated limits.
MAX_BSON_SIZE = 16 * (1024 ** 2)
MAX_MESSAGE_SIZE = 2 * MAX_BSON_SIZE
MIN_WIRE_VERSION = 0
MAX_WIRE_VERSION = 0
MAX_WRITE_BATCH_SIZE = 1000
# What this version of PyMongo supports.
MIN_SUPPORTED_WIRE_VERSION = 0
MAX_SUPPORTED_WIRE_VERSION = 3
# mongod/s 2.6 and above return code 59 when a
# command doesn't exist. mongod versions previous
# to 2.6 and mongos 2.4.x return no error code
# when a command does exist. mongos versions previous
# to 2.4.0 return code 13390 when a command does not
# exist.
COMMAND_NOT_FOUND_CODES = (59, 13390, None)
def raise_config_error(key, dummy):
"""Raise ConfigurationError with the given key name."""
raise ConfigurationError("Unknown option %s" % (key,))
# Mapping of URI uuid representation options to valid subtypes.
_UUID_SUBTYPES = {
'standard': UUID_SUBTYPE,
'pythonLegacy': OLD_UUID_SUBTYPE,
'javaLegacy': JAVA_LEGACY,
'csharpLegacy': CSHARP_LEGACY
}
def validate_boolean(option, value):
"""Validates that 'value' is 'true' or 'false'.
"""
if isinstance(value, bool):
return value
elif isinstance(value, basestring):
if value not in ('true', 'false'):
raise ConfigurationError("The value of %s must be "
"'true' or 'false'" % (option,))
return value == 'true'
raise TypeError("Wrong type for %s, value must be a boolean" % (option,))
def validate_integer(option, value):
"""Validates that 'value' is an integer (or basestring representation).
"""
if isinstance(value, (int, long)):
return value
elif isinstance(value, basestring):
if not value.isdigit():
raise ConfigurationError("The value of %s must be "
"an integer" % (option,))
return int(value)
raise TypeError("Wrong type for %s, value must be an integer" % (option,))
def validate_positive_integer(option, value):
"""Validate that 'value' is a positive integer, which does not include 0.
"""
val = validate_integer(option, value)
if val <= 0:
raise ConfigurationError("The value of %s must be "
"a positive integer" % (option,))
return val
def validate_non_negative_integer(option, value):
"""Validate that 'value' is a positive integer or 0.
"""
val = validate_integer(option, value)
if val < 0:
raise ConfigurationError("The value of %s must be "
"a non negative integer" % (option,))
return val
def validate_readable(option, value):
"""Validates that 'value' is file-like and readable.
"""
if value is None:
return value
# First make sure its a string py3.3 open(True, 'r') succeeds
# Used in ssl cert checking due to poor ssl module error reporting
value = validate_basestring(option, value)
open(value, 'r').close()
return value
def validate_cert_reqs(option, value):
"""Validate the cert reqs are valid. It must be None or one of the three
values ``ssl.CERT_NONE``, ``ssl.CERT_OPTIONAL`` or ``ssl.CERT_REQUIRED``"""
if value is None:
return value
if HAS_SSL:
if isinstance(value, basestring) and hasattr(ssl, value):
value = getattr(ssl, value)
if value in (ssl.CERT_NONE, ssl.CERT_OPTIONAL, ssl.CERT_REQUIRED):
return value
raise ConfigurationError("The value of %s must be one of: "
"`ssl.CERT_NONE`, `ssl.CERT_OPTIONAL` or "
"`ssl.CERT_REQUIRED" % (option,))
else:
raise ConfigurationError("The value of %s is set but can't be "
"validated. The ssl module is not available"
% (option,))
def validate_non_negative_integer_or_none(option, value):
"""Validate that 'value' is a positive integer or 0 or None.
"""
if value is None:
return value
return validate_non_negative_integer(option, value)
def validate_positive_integer_or_none(option, value):
"""Validate that 'value' is a positive integer or None.
"""
if value is None:
return value
return validate_positive_integer(option, value)
def validate_basestring(option, value):
"""Validates that 'value' is an instance of `basestring`.
"""
if isinstance(value, basestring):
return value
raise TypeError("Wrong type for %s, value must be an "
"instance of %s" % (option, basestring.__name__))
def validate_basestring_or_none(option, value):
"""Validates that 'value' is an instance of `basestring` or `None`.
"""
if value is None:
return value
return validate_basestring(option, value)
def validate_int_or_basestring(option, value):
"""Validates that 'value' is an integer or string.
"""
if isinstance(value, (int, long)):
return value
elif isinstance(value, basestring):
if value.isdigit():
return int(value)
return value
raise TypeError("Wrong type for %s, value must be an "
"integer or a string" % (option,))
def validate_positive_float(option, value):
"""Validates that 'value' is a float, or can be converted to one, and is
positive.
"""
err = ConfigurationError("%s must be a positive int or float" % (option,))
try:
value = float(value)
except (ValueError, TypeError):
raise err
# float('inf') doesn't work in 2.4 or 2.5 on Windows, so just cap floats at
# one billion - this is a reasonable approximation for infinity
if not 0 < value < 1e9:
raise err
return value
def validate_timeout_or_none(option, value):
"""Validates a timeout specified in milliseconds returning
a value in floating point seconds.
"""
if value is None:
return value
return validate_positive_float(option, value) / 1000.0
def validate_positive_float_or_zero(option, value):
"""Validates that 'value' is 0 or a positive float or can be converted to
0 or a positive float.
"""
if value == 0 or value == "0":
return 0
return validate_positive_float(option, value)
def validate_read_preference(dummy, value):
"""Validate read preference for a ReplicaSetConnection.
"""
if value in read_preferences.modes:
return value
# Also allow string form of enum for uri_parser
try:
return read_preferences.mongos_enum(value)
except ValueError:
raise ConfigurationError("Not a valid read preference")
def _validate_tag_sets_format(option, value):
if not isinstance(value, list):
raise ConfigurationError("%s %r invalid, must be "
"a list" % (option, value))
elif not value:
raise ConfigurationError("%s %r invalid, must be None or contain "
"at least one set of tags" % (option, value))
def _validate_dict_list(option, value):
for elt in value:
if not isinstance(elt, dict):
raise ConfigurationError(
"%s %r invalid, must be a dict" % (option, elt))
def validate_read_preference_tags(option, value):
"""Parse readPreferenceTags if passed as a client kwarg.
"""
if value is None:
return [{}]
if isinstance(value, basestring):
value = [value]
else:
_validate_tag_sets_format(option, value)
if isinstance(value[0], dict):
_validate_dict_list("Tag set", value)
return value
tag_sets = []
for tag_set in value:
if tag_set == '':
tag_sets.append({})
continue
try:
tag_sets.append(dict([tag.split(":")
for tag in tag_set.split(",")]))
except Exception:
raise ValueError("%r not a valid "
"value for %s" % (tag_set, option))
return tag_sets
def validate_tag_sets(option, value):
"""Validate tag sets for a ReplicaSetConnection.
"""
if value is None:
return [{}]
_validate_tag_sets_format(option, value)
_validate_dict_list("Tag set", value)
return value
def validate_auth_mechanism(option, value):
"""Validate the authMechanism URI option.
"""
# CRAM-MD5 is for server testing only. Undocumented,
# unsupported, may be removed at any time. You have
# been warned.
if value not in MECHANISMS and value != 'CRAM-MD5':
raise ConfigurationError("%s must be in "
"%s" % (option, MECHANISMS))
return value
def validate_uuid_representation(dummy, value):
"""Validate the uuid representation option selected in the URI.
"""
if value not in _UUID_SUBTYPES.keys():
raise ConfigurationError("%s is an invalid UUID representation. "
"Must be one of "
"%s" % (value, _UUID_SUBTYPES.keys()))
return _UUID_SUBTYPES[value]
def validate_uuid_subtype(dummy, value):
"""Validate the uuid subtype option, a numerical value whose acceptable
values are defined in bson.binary."""
if value not in _UUID_SUBTYPES.values():
raise ConfigurationError("Not a valid setting for uuid_subtype.")
return value
_MECHANISM_PROPS = frozenset(['SERVICE_NAME'])
def validate_auth_mechanism_properties(option, value):
"""Validate authMechanismProperties."""
value = validate_basestring(option, value)
props = {}
for opt in value.split(','):
try:
key, val = opt.split(':')
if key not in _MECHANISM_PROPS:
raise ConfigurationError("%s is not a supported auth "
"mechanism property. Must be one of "
"%s." % (key, tuple(_MECHANISM_PROPS)))
props[key] = val
except ValueError:
raise ConfigurationError("auth mechanism properties must be "
"key:value pairs like SERVICE_NAME:"
"mongodb, not %s." % (opt,))
return props
def validate_is_dict(option, value):
"""Validate the type of method arguments that expect a document."""
if not isinstance(value, dict):
raise TypeError("%s must be an instance of dict, bson.son.SON, or"
"another subclass of dict" % (option,))
def validate_ok_for_replace(replacement):
"""Validate a replacement document."""
validate_is_dict("replacement", replacement)
# Replacement can be {}
if replacement:
first = iter(replacement).next()
if first.startswith('$'):
raise ValueError('replacement can not include $ operators')
def validate_ok_for_update(update):
"""Validate an update document."""
validate_is_dict("update", update)
# Update can not be {}
if not update:
raise ValueError('update only works with $ operators')
first = iter(update).next()
if not first.startswith('$'):
raise ValueError('update only works with $ operators')
# jounal is an alias for j,
# wtimeoutms is an alias for wtimeout,
# readpreferencetags is an alias for tag_sets.
VALIDATORS = {
'replicaset': validate_basestring_or_none,
'slaveok': validate_boolean,
'slave_okay': validate_boolean,
'safe': validate_boolean,
'w': validate_int_or_basestring,
'wtimeout': validate_integer,
'wtimeoutms': validate_integer,
'fsync': validate_boolean,
'j': validate_boolean,
'journal': validate_boolean,
'connecttimeoutms': validate_timeout_or_none,
'sockettimeoutms': validate_timeout_or_none,
'waitqueuetimeoutms': validate_timeout_or_none,
'waitqueuemultiple': validate_non_negative_integer_or_none,
'ssl': validate_boolean,
'ssl_keyfile': validate_readable,
'ssl_certfile': validate_readable,
'ssl_cert_reqs': validate_cert_reqs,
'ssl_ca_certs': validate_readable,
'ssl_match_hostname': validate_boolean,
'readpreference': validate_read_preference,
'read_preference': validate_read_preference,
'readpreferencetags': validate_read_preference_tags,
'tag_sets': validate_tag_sets,
'secondaryacceptablelatencyms': validate_positive_float_or_zero,
'secondary_acceptable_latency_ms': validate_positive_float_or_zero,
'localthresholdms': validate_positive_float_or_zero,
'auto_start_request': validate_boolean,
'use_greenlets': validate_boolean,
'authmechanism': validate_auth_mechanism,
'authsource': validate_basestring,
'gssapiservicename': validate_basestring,
'authmechanismproperties': validate_auth_mechanism_properties,
'uuidrepresentation': validate_uuid_representation,
'socketkeepalive': validate_boolean,
'maxpoolsize': validate_positive_integer_or_none,
'connect': validate_boolean,
'_connect': validate_boolean
}
_AUTH_OPTIONS = frozenset(['gssapiservicename', 'authmechanismproperties'])
def validate_auth_option(option, value):
"""Validate optional authentication parameters.
"""
lower, value = validate(option, value)
if lower not in _AUTH_OPTIONS:
raise ConfigurationError('Unknown '
'authentication option: %s' % (option,))
return lower, value
def validate(option, value):
"""Generic validation function.
"""
lower = option.lower()
validator = VALIDATORS.get(lower, raise_config_error)
value = validator(option, value)
return lower, value
SAFE_OPTIONS = frozenset([
'w',
'wtimeout',
'wtimeoutms',
'fsync',
'j',
'journal'
])
class WriteConcern(dict):
def __init__(self, *args, **kwargs):
"""A subclass of dict that overrides __setitem__ to
validate write concern options.
"""
super(WriteConcern, self).__init__(*args, **kwargs)
def __setitem__(self, key, value):
if key not in SAFE_OPTIONS:
raise ConfigurationError("%s is not a valid write "
"concern option." % (key,))
key, value = validate(key, value)
super(WriteConcern, self).__setitem__(key, value)
class BaseObject(object):
"""A base class that provides attributes and methods common
to multiple pymongo classes.
SHOULD NOT BE USED BY DEVELOPERS EXTERNAL TO MONGODB.
"""
def __init__(self, **options):
self._codec_options = options.get('codec_options')
if not isinstance(self._codec_options, CodecOptions):
raise TypeError("codec_options must be an instance of "
"bson.codec_options.CodecOptions")
self._read_pref = ReadPreference.PRIMARY
self._tag_sets = [{}]
self._secondary_acceptable_latency_ms = 15
self._write_concern = WriteConcern()
self.__slave_okay = False
self.__safe = None
self.__set_options(options)
if (self._read_pref == ReadPreference.PRIMARY
and self._tag_sets != [{}]):
raise ConfigurationError(
"ReadPreference PRIMARY cannot be combined with tags")
# If safe hasn't been implicitly set by write concerns then set it.
if self.__safe is None:
if options.get("w") == 0:
self.__safe = False
else:
self.__safe = validate_boolean('safe',
options.get("safe", True))
# Note: 'safe' is always passed by Connection and ReplicaSetConnection
# Always do the most "safe" thing, but warn about conflicts.
if self.__safe and options.get('w') == 0:
warnings.warn("Conflicting write concerns: %s. Write concern "
"options were configured, but w=0 disables all "
"other options." % self.write_concern,
UserWarning)
def __set_safe_option(self, option, value):
"""Validates and sets getlasterror options for this
object (Connection, Database, Collection, etc.)
"""
if value is None:
self._write_concern.pop(option, None)
else:
self._write_concern[option] = value
if option != "w" or value != 0:
self.__safe = True
def __set_options(self, options):
"""Validates and sets all options passed to this object."""
for option, value in options.iteritems():
if option in ('slave_okay', 'slaveok'):
self.__slave_okay = validate_boolean(option, value)
elif option in ('read_preference', "readpreference"):
self._read_pref = validate_read_preference(option, value)
elif option in ('tag_sets', 'readpreferencetags'):
self._tag_sets = validate_tag_sets(option, value)
elif option in ('secondaryacceptablelatencyms',
'secondary_acceptable_latency_ms'):
self._secondary_acceptable_latency_ms = (
validate_positive_float_or_zero(option, value))
elif option in SAFE_OPTIONS:
if option == 'journal':
self.__set_safe_option('j', value)
elif option == 'wtimeoutms':
self.__set_safe_option('wtimeout', value)
else:
self.__set_safe_option(option, value)
@property
def codec_options(self):
"""Read only access to the :class:`~bson.codec_options.CodecOptions`
of this instance.
The value of :attr:`codec_options` can be changed through
:meth:`~pymongo.mongo_client.MongoClient.get_database`,
:meth:`~pymongo.database.Database.get_collection`,
or :meth:`~pymongo.collection.Collection.with_options`,
.. versionadded:: 2.9
"""
return self._codec_options
def __set_write_concern(self, value):
"""Property setter for write_concern."""
warnings.warn("Changing write_concern by setting it directly is "
"deprecated in this version of PyMongo and prohibited "
"in PyMongo 3. See the write_concern docstring for more "
"information.", DeprecationWarning, stacklevel=2)
if not isinstance(value, dict):
raise ConfigurationError("write_concern must be an "
"instance of dict or a subclass.")
# Make a copy here to avoid users accidentally setting the
# same dict on multiple instances.
wc = WriteConcern()
for k, v in value.iteritems():
# Make sure we validate each option.
wc[k] = v
self._write_concern = wc
def __get_write_concern(self):
"""The default write concern for this instance.
Valid options include:
- `w`: (integer or string) If this is a replica set, write operations
will block until they have been replicated to the specified number
or tagged set of servers. `w=<int>` always includes the replica set
primary (e.g. w=3 means write to the primary and wait until
replicated to **two** secondaries). **Setting w=0 disables write
acknowledgement and all other write concern options.**
- `wtimeout`: (integer) Used in conjunction with `w`. Specify a value
in milliseconds to control how long to wait for write propagation
to complete. If replication does not complete in the given
timeframe, a timeout exception is raised.
- `j`: If ``True`` block until write operations have been committed
to the journal. Cannot be used in combination with `fsync`. Prior
to MongoDB 2.6 this option was ignored if the server was running
without journaling. Starting with MongoDB 2.6 write operations will
fail with an exception if this option is used when the server is
running without journaling.
- `fsync`: If ``True`` and the server is running without journaling,
blocks until the server has synced all data files to disk. If the
server is running with journaling, this acts the same as the `j`
option, blocking until write operations have been committed to the
journal. Cannot be used in combination with `j`.
.. note:: Accessing :attr:`write_concern` returns its value
(a subclass of :class:`dict`), not a copy.
.. warning:: If you are using :class:`~pymongo.connection.Connection`
or :class:`~pymongo.replica_set_connection.ReplicaSetConnection`
make sure you explicitly set ``w`` to 1 (or a greater value) or
:attr:`safe` to ``True``. Unlike calling
:meth:`set_lasterror_options`, setting an option in
:attr:`write_concern` does not implicitly set :attr:`safe`
to ``True``.
.. warning:: :attr:`write_concern` is read only in PyMongo 3. Use
:class:`~pymongo.write_concern.WriteConcern` with
:meth:`~pymongo.mongo_client.MongoClient.get_database`,
:meth:`~pymongo.database.Database.get_collection`,
or :meth:`~pymongo.collection.Collection.with_options` to set write
concern.
See the :doc:`/migrate-to-pymongo3` for examples.
.. versionchanged:: 2.9
Deprecated directly setting write_concern.
"""
# To support dict style access we have to return the actual
# WriteConcern here, not a copy.
return self._write_concern
write_concern = property(__get_write_concern, __set_write_concern)
def __get_slave_okay(self):
"""**DEPRECATED** Use read preference "secondaryPreferred" instead.
.. warning:: :attr:`slave_okay` is deprecated in this version of
PyMongo and removed in PyMongo 3. Use read preference
:class:`~pymongo.read_preferences.SecondaryPreferred` with
:meth:`~pymongo.mongo_client.MongoClient.get_database`,
:meth:`~pymongo.database.Database.get_collection`,
or :meth:`~pymongo.collection.Collection.with_options` instead.
See the :doc:`/migrate-to-pymongo3` for examples.
.. versionchanged:: 2.1
Deprecated slave_okay.
.. versionadded:: 2.0
"""
return self.__slave_okay
def __set_slave_okay(self, value):
"""Property setter for slave_okay"""
warnings.warn("slave_okay is deprecated in this version of PyMongo "
"and removed in PyMongo 3. See the slave_okay docstring "
"for more information.",
DeprecationWarning, stacklevel=2)
self.__slave_okay = validate_boolean('slave_okay', value)
slave_okay = property(__get_slave_okay, __set_slave_okay)
def __get_read_pref(self):
"""The read preference mode for this instance.
See :class:`~pymongo.read_preferences.ReadPreference` for
available options.
.. warning:: :attr:`read_preference` is read only in PyMongo 3. Use the
read preference classes from :mod:`~pymongo.read_preferences` with
:meth:`~pymongo.mongo_client.MongoClient.get_database`,
:meth:`~pymongo.database.Database.get_collection`,
or :meth:`~pymongo.collection.Collection.with_options` to set read
preference.
See the :doc:`/migrate-to-pymongo3` for examples.
.. versionchanged:: 2.9
Deprecated directly setting read_preference.
.. versionadded:: 2.1
"""
return self._read_pref
def __set_read_pref(self, value):
"""Property setter for read_preference"""
warnings.warn("Changing read_preference by setting it directly is "
"deprecated in this version of PyMongo and prohibited "
"in PyMongo 3. See the read_preference docstring for "
"more information.", DeprecationWarning, stacklevel=2)
self._read_pref = validate_read_preference('read_preference', value)
read_preference = property(__get_read_pref, __set_read_pref)
def __get_acceptable_latency(self):
"""**DEPRECATED** Any replica set member whose ping time is within
:attr:`secondary_acceptable_latency_ms` of the nearest member may
accept reads. Defaults to 15 milliseconds.
See :class:`~pymongo.read_preferences.ReadPreference`.
.. note:: :attr:`secondary_acceptable_latency_ms` is ignored when talking
to a replica set *through* a mongos. The equivalent is the
localThreshold_ command line option.
.. warning:: :attr:`secondary_acceptable_latency_ms` is deprecated in
this version of PyMongo and removed in PyMongo 3. Use the
`localThresholdMS` option with
:class:`~pymongo.mongo_client.MongoClient` or
:class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient`
instead. See the :doc:`/migrate-to-pymongo3` for more information.
.. versionchanged:: 2.9
Deprecated secondary_acceptable_latency_ms.
.. versionadded:: 2.3
.. _localThreshold:
http://docs.mongodb.org/manual/reference/program/mongos/#cmdoption--localThreshold
"""
return self._secondary_acceptable_latency_ms
def __set_acceptable_latency(self, value):
"""Property setter for secondary_acceptable_latency_ms"""
warnings.warn("secondary_acceptable_latency_ms is deprecated in this "
"version of PyMongo and removed in PyMongo 3. See the "
"PyMongo 3 migration guide for more information.",
DeprecationWarning, stacklevel=3)
self._secondary_acceptable_latency_ms = (
validate_positive_float_or_zero(
'secondary_acceptable_latency_ms', value))
secondary_acceptable_latency_ms = property(
__get_acceptable_latency, __set_acceptable_latency)
def __get_tag_sets(self):
"""**DEPRECATED** Set ``tag_sets`` to a list of dictionaries like
[{'dc': 'ny'}] to read only from members whose ``dc`` tag has the value
``"ny"``. To specify a priority-order for tag sets, provide a list of
tag sets: ``[{'dc': 'ny'}, {'dc': 'la'}, {}]``. A final, empty tag
set, ``{}``, means "read from any member that matches the mode,
ignoring tags." ReplicaSetConnection tries each set of tags in turn
until it finds a set of tags with at least one matching member.
.. seealso:: `Data-Center Awareness
<http://www.mongodb.org/display/DOCS/Data+Center+Awareness>`_
.. warning:: :attr:`tag_sets` is deprecated in this version of PyMongo
and removed in PyMongo 3. Use the read preference classes from
:mod:`~pymongo.read_preferences` with
:meth:`~pymongo.mongo_client.MongoClient.get_database`,
:meth:`~pymongo.database.Database.get_collection`,
or :meth:`~pymongo.collection.Collection.with_options` instead.
See the :doc:`/migrate-to-pymongo3` for examples.
.. versionchanged:: 2.9
Deprecated tag_sets.
.. versionadded:: 2.3
"""
return self._tag_sets
def __set_tag_sets(self, value):
"""Property setter for tag_sets"""
warnings.warn("tag_sets is deprecated in this version of PyMongo and "
"removed in PyMongo 3. See the tag_sets docstring for "
"more information.", DeprecationWarning, stacklevel=2)
self._tag_sets = validate_tag_sets('tag_sets', value)
tag_sets = property(__get_tag_sets, __set_tag_sets)
def __get_uuid_subtype(self):
"""**DEPRECATED** This attribute specifies which BSON Binary subtype is
used when storing UUIDs. Historically UUIDs have been stored as BSON Binary
subtype 3. This attribute is used to switch to the newer BSON Binary
subtype 4. It can also be used to force legacy byte order and subtype
compatibility with the Java and C# drivers. See the :mod:`bson.binary`
module for all options.
.. warning:: :attr:`uuid_subtype` is deprecated in this version of
PyMongo and removed in PyMongo 3. Use
:class:`~bson.codec_options.CodecOptions` with
:meth:`~pymongo.mongo_client.MongoClient.get_database`,
:meth:`~pymongo.database.Database.get_collection`,
or :meth:`~pymongo.collection.Collection.with_options` instead.
See the :doc:`/migrate-to-pymongo3` for examples.
.. versionchanged:: 2.9
Deprecated uuid_subtype.
"""
return self._codec_options.uuid_representation
def __set_uuid_subtype(self, value):
"""Sets the BSON Binary subtype to be used when storing UUIDs."""
warnings.warn("uuid_subtype is deprecated in this version of PyMongo "
"and removed in PyMongo 3. See the uuid_subtype "
"docstring for more information.",
DeprecationWarning, stacklevel=2)
as_class = self._codec_options.document_class
tz_aware = self._codec_options.tz_aware
uuid_rep = validate_uuid_subtype("uuid_subtype", value)
self._codec_options = CodecOptions(as_class, tz_aware, uuid_rep)
uuid_subtype = property(__get_uuid_subtype, __set_uuid_subtype)
def __get_safe(self):
"""**DEPRECATED** Use getlasterror with every write operation?
.. warning:: :attr:`safe` is deprecated in this version of PyMongo
and removed in PyMongo 3. Use
:class:`~pymongo.write_concern.WriteConcern` with
:meth:`~pymongo.mongo_client.MongoClient.get_database`,
:meth:`~pymongo.database.Database.get_collection`,
or :meth:`~pymongo.collection.Collection.with_options` instead.
See the :doc:`/migrate-to-pymongo3` for examples.
.. versionchanged:: 2.4
Deprecated safe.
.. versionadded:: 2.0
"""
return self.__safe
def __set_safe(self, value):
"""Property setter for safe"""
warnings.warn("safe is deprecated in this version of PyMongo and "
"removed in PyMongo 3. See the safe docstring for more "
"information.", DeprecationWarning, stacklevel=2)
self.__safe = validate_boolean('safe', value)
safe = property(__get_safe, __set_safe)
def get_lasterror_options(self):
"""**DEPRECATED** Returns a dict of the getlasterror options set on this
instance.
.. warning:: :meth:`get_lasterror_options` is deprecated in this
version of PyMongo and removed in PyMongo 3. Use
:attr:`write_concern` instead.
.. versionchanged:: 2.4
Deprecated get_lasterror_options.
.. versionadded:: 2.0
"""
warnings.warn("get_lasterror_options is deprecated in this version of "
"PyMongo and removed in PyMongo 3. See the "
"get_lasterror_options docstring for more information.",
DeprecationWarning, stacklevel=2)
return self._write_concern.copy()
def set_lasterror_options(self, **kwargs):
"""**DEPRECATED** Set getlasterror options for this instance.
Valid options include j=<bool>, w=<int/string>, wtimeout=<int>,
and fsync=<bool>. Implies safe=True.
:Parameters:
- `**kwargs`: Options should be passed as keyword
arguments (e.g. w=2, fsync=True)
.. warning:: :meth:`set_lasterror_options` is deprecated in this
version of PyMongo and removed in PyMongo 3. Use
:class:`~pymongo.write_concern.WriteConcern` with
:meth:`~pymongo.mongo_client.MongoClient.get_database`,
:meth:`~pymongo.database.Database.get_collection`,
or :meth:`~pymongo.collection.Collection.with_options` instead.
See the :doc:`/migrate-to-pymongo3` for examples.
.. versionchanged:: 2.4
Deprecated set_lasterror_options.
.. versionadded:: 2.0
"""
warnings.warn("set_lasterror_options is deprecated in this version of "
"PyMongo and removed in PyMongo 3. See the "
"set_lasterror_options docstring for more information.",
DeprecationWarning, stacklevel=2)
for key, value in kwargs.iteritems():
self.__set_safe_option(key, value)
def unset_lasterror_options(self, *options):
"""**DEPRECATED** Unset getlasterror options for this instance.
If no options are passed unsets all getlasterror options.
This does not set `safe` to False.
:Parameters:
- `*options`: The list of options to unset.
.. warning:: :meth:`unset_lasterror_options` is deprecated in this
version of PyMongo and removed in PyMongo 3. Use
:class:`~pymongo.write_concern.WriteConcern` with
:meth:`~pymongo.mongo_client.MongoClient.get_database`,
:meth:`~pymongo.database.Database.get_collection`,
or :meth:`~pymongo.collection.Collection.with_options` instead.
See the :doc:`/migrate-to-pymongo3` for examples.
.. versionchanged:: 2.4
Deprecated unset_lasterror_options.
.. versionadded:: 2.0
"""
warnings.warn("unset_lasterror_options is deprecated in this version "
"of PyMongo and removed in PyMongo 3. See the "
"unset_lasterror_options docstring for more information.",
DeprecationWarning, stacklevel=2)
if len(options):
for option in options:
self._write_concern.pop(option, None)
else:
self._write_concern = WriteConcern()
def _get_wc_override(self):
"""Get write concern override.
Used in internal methods that **must** do acknowledged write ops.
We don't want to override user write concern options if write concern
is already enabled.
"""
if self.safe and self._write_concern.get('w') != 0:
return {}
return {'w': 1}
def _get_write_mode(self, safe=None, **options):
"""Get the current write mode.
Determines if the current write is safe or not based on the
passed in or inherited safe value, write_concern values, or
passed options.
:Parameters:
- `safe`: check that the operation succeeded?
- `**options`: overriding write concern options.
.. versionadded:: 2.3
"""
if safe is not None:
warnings.warn("The safe parameter is deprecated. Please use "
"write concern options instead.", DeprecationWarning,
stacklevel=3)
validate_boolean('safe', safe)
# Passed options override collection level defaults.
if safe is not None or options:
if safe or options:
if not options:
options = self._write_concern.copy()
# Backwards compatability edge case. Call getLastError
# with no options if safe=True was passed but collection
# level defaults have been disabled with w=0.
# These should be equivalent:
# Connection(w=0).foo.bar.insert({}, safe=True)
# MongoClient(w=0).foo.bar.insert({}, w=1)
if options.get('w') == 0:
return True, {}
# Passing w=0 overrides passing safe=True.
return options.get('w') != 0, options
return False, {}
# Fall back to collection level defaults.
# w=0 takes precedence over self.safe = True
if self._write_concern.get('w') == 0:
return False, {}
elif self.safe or self._write_concern.get('w', 0) != 0:
return True, self._write_concern.copy()
return False, {}
| |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import pkg_resources
import google.auth # type: ignore
import google.api_core
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.api_core import operations_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.assuredworkloads_v1.types import assuredworkloads
from google.longrunning import operations_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-assured-workloads",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class AssuredWorkloadsServiceTransport(abc.ABC):
"""Abstract transport class for AssuredWorkloadsService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
DEFAULT_HOST: str = "assuredworkloads.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.create_workload: gapic_v1.method.wrap_method(
self.create_workload, default_timeout=None, client_info=client_info,
),
self.update_workload: gapic_v1.method.wrap_method(
self.update_workload, default_timeout=None, client_info=client_info,
),
self.delete_workload: gapic_v1.method.wrap_method(
self.delete_workload, default_timeout=None, client_info=client_info,
),
self.get_workload: gapic_v1.method.wrap_method(
self.get_workload, default_timeout=None, client_info=client_info,
),
self.list_workloads: gapic_v1.method.wrap_method(
self.list_workloads, default_timeout=None, client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def operations_client(self):
"""Return the client designed to process long-running operations."""
raise NotImplementedError()
@property
def create_workload(
self,
) -> Callable[
[assuredworkloads.CreateWorkloadRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def update_workload(
self,
) -> Callable[
[assuredworkloads.UpdateWorkloadRequest],
Union[assuredworkloads.Workload, Awaitable[assuredworkloads.Workload]],
]:
raise NotImplementedError()
@property
def delete_workload(
self,
) -> Callable[
[assuredworkloads.DeleteWorkloadRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def get_workload(
self,
) -> Callable[
[assuredworkloads.GetWorkloadRequest],
Union[assuredworkloads.Workload, Awaitable[assuredworkloads.Workload]],
]:
raise NotImplementedError()
@property
def list_workloads(
self,
) -> Callable[
[assuredworkloads.ListWorkloadsRequest],
Union[
assuredworkloads.ListWorkloadsResponse,
Awaitable[assuredworkloads.ListWorkloadsResponse],
],
]:
raise NotImplementedError()
__all__ = ("AssuredWorkloadsServiceTransport",)
| |
"""
Define a simple format for saving numpy arrays to disk with the full
information about them.
The ``.npy`` format is the standard binary file format in NumPy for
persisting a *single* arbitrary NumPy array on disk. The format stores all
of the shape and dtype information necessary to reconstruct the array
correctly even on another machine with a different architecture.
The format is designed to be as simple as possible while achieving
its limited goals.
The ``.npz`` format is the standard format for persisting *multiple* NumPy
arrays on disk. A ``.npz`` file is a zip file containing multiple ``.npy``
files, one for each array.
Capabilities
------------
- Can represent all NumPy arrays including nested record arrays and
object arrays.
- Represents the data in its native binary form.
- Supports Fortran-contiguous arrays directly.
- Stores all of the necessary information to reconstruct the array
including shape and dtype on a machine of a different
architecture. Both little-endian and big-endian arrays are
supported, and a file with little-endian numbers will yield
a little-endian array on any machine reading the file. The
types are described in terms of their actual sizes. For example,
if a machine with a 64-bit C "long int" writes out an array with
"long ints", a reading machine with 32-bit C "long ints" will yield
an array with 64-bit integers.
- Is straightforward to reverse engineer. Datasets often live longer than
the programs that created them. A competent developer should be
able to create a solution in their preferred programming language to
read most ``.npy`` files that he has been given without much
documentation.
- Allows memory-mapping of the data. See `open_memmep`.
- Can be read from a filelike stream object instead of an actual file.
- Stores object arrays, i.e. arrays containing elements that are arbitrary
Python objects. Files with object arrays are not to be mmapable, but
can be read and written to disk.
Limitations
-----------
- Arbitrary subclasses of numpy.ndarray are not completely preserved.
Subclasses will be accepted for writing, but only the array data will
be written out. A regular numpy.ndarray object will be created
upon reading the file.
.. warning::
Due to limitations in the interpretation of structured dtypes, dtypes
with fields with empty names will have the names replaced by 'f0', 'f1',
etc. Such arrays will not round-trip through the format entirely
accurately. The data is intact; only the field names will differ. We are
working on a fix for this. This fix will not require a change in the
file format. The arrays with such structures can still be saved and
restored, and the correct dtype may be restored by using the
``loadedarray.view(correct_dtype)`` method.
File extensions
---------------
We recommend using the ``.npy`` and ``.npz`` extensions for files saved
in this format. This is by no means a requirement; applications may wish
to use these file formats but use an extension specific to the
application. In the absence of an obvious alternative, however,
we suggest using ``.npy`` and ``.npz``.
Version numbering
-----------------
The version numbering of these formats is independent of NumPy version
numbering. If the format is upgraded, the code in `numpy.io` will still
be able to read and write Version 1.0 files.
Format Version 1.0
------------------
The first 6 bytes are a magic string: exactly ``\\x93NUMPY``.
The next 1 byte is an unsigned byte: the major version number of the file
format, e.g. ``\\x01``.
The next 1 byte is an unsigned byte: the minor version number of the file
format, e.g. ``\\x00``. Note: the version of the file format is not tied
to the version of the numpy package.
The next 2 bytes form a little-endian unsigned short int: the length of
the header data HEADER_LEN.
The next HEADER_LEN bytes form the header data describing the array's
format. It is an ASCII string which contains a Python literal expression
of a dictionary. It is terminated by a newline (``\\n``) and padded with
spaces (``\\x20``) to make the total length of
``magic string + 4 + HEADER_LEN`` be evenly divisible by 16 for alignment
purposes.
The dictionary contains three keys:
"descr" : dtype.descr
An object that can be passed as an argument to the `numpy.dtype`
constructor to create the array's dtype.
"fortran_order" : bool
Whether the array data is Fortran-contiguous or not. Since
Fortran-contiguous arrays are a common form of non-C-contiguity,
we allow them to be written directly to disk for efficiency.
"shape" : tuple of int
The shape of the array.
For repeatability and readability, the dictionary keys are sorted in
alphabetic order. This is for convenience only. A writer SHOULD implement
this if possible. A reader MUST NOT depend on this.
Following the header comes the array data. If the dtype contains Python
objects (i.e. ``dtype.hasobject is True``), then the data is a Python
pickle of the array. Otherwise the data is the contiguous (either C-
or Fortran-, depending on ``fortran_order``) bytes of the array.
Consumers can figure out the number of bytes by multiplying the number
of elements given by the shape (noting that ``shape=()`` means there is
1 element) by ``dtype.itemsize``.
Format Version 2.0
------------------
The version 1.0 format only allowed the array header to have a total size of
65535 bytes. This can be exceeded by structured arrays with a large number of
columns. The version 2.0 format extends the header size to 4 GiB.
`numpy.save` will automatically save in 2.0 format if the data requires it,
else it will always use the more compatible 1.0 format.
The description of the fourth element of the header therefore has become:
"The next 4 bytes form a little-endian unsigned int: the length of the header
data HEADER_LEN."
Notes
-----
The ``.npy`` format, including reasons for creating it and a comparison of
alternatives, is described fully in the "npy-format" NEP.
"""
from __future__ import division, absolute_import, print_function
import numpy
import sys
import io
import warnings
from numpy.lib.utils import safe_eval
from numpy.compat import asbytes, asstr, isfileobj, long, basestring
if sys.version_info[0] >= 3:
import pickle
else:
import cPickle as pickle
MAGIC_PREFIX = asbytes('\x93NUMPY')
MAGIC_LEN = len(MAGIC_PREFIX) + 2
BUFFER_SIZE = 2**18 # size of buffer for reading npz files in bytes
# difference between version 1.0 and 2.0 is a 4 byte (I) header length
# instead of 2 bytes (H) allowing storage of large structured arrays
def _check_version(version):
if version not in [(1, 0), (2, 0), None]:
msg = "we only support format version (1,0) and (2, 0), not %s"
raise ValueError(msg % (version,))
def magic(major, minor):
""" Return the magic string for the given file format version.
Parameters
----------
major : int in [0, 255]
minor : int in [0, 255]
Returns
-------
magic : str
Raises
------
ValueError if the version cannot be formatted.
"""
if major < 0 or major > 255:
raise ValueError("major version must be 0 <= major < 256")
if minor < 0 or minor > 255:
raise ValueError("minor version must be 0 <= minor < 256")
if sys.version_info[0] < 3:
return MAGIC_PREFIX + chr(major) + chr(minor)
else:
return MAGIC_PREFIX + bytes([major, minor])
def read_magic(fp):
""" Read the magic string to get the version of the file format.
Parameters
----------
fp : filelike object
Returns
-------
major : int
minor : int
"""
magic_str = _read_bytes(fp, MAGIC_LEN, "magic string")
if magic_str[:-2] != MAGIC_PREFIX:
msg = "the magic string is not correct; expected %r, got %r"
raise ValueError(msg % (MAGIC_PREFIX, magic_str[:-2]))
if sys.version_info[0] < 3:
major, minor = map(ord, magic_str[-2:])
else:
major, minor = magic_str[-2:]
return major, minor
def dtype_to_descr(dtype):
"""
Get a serializable descriptor from the dtype.
The .descr attribute of a dtype object cannot be round-tripped through
the dtype() constructor. Simple types, like dtype('float32'), have
a descr which looks like a record array with one field with '' as
a name. The dtype() constructor interprets this as a request to give
a default name. Instead, we construct descriptor that can be passed to
dtype().
Parameters
----------
dtype : dtype
The dtype of the array that will be written to disk.
Returns
-------
descr : object
An object that can be passed to `numpy.dtype()` in order to
replicate the input dtype.
"""
if dtype.names is not None:
# This is a record array. The .descr is fine. XXX: parts of the
# record array with an empty name, like padding bytes, still get
# fiddled with. This needs to be fixed in the C implementation of
# dtype().
return dtype.descr
else:
return dtype.str
def header_data_from_array_1_0(array):
""" Get the dictionary of header metadata from a numpy.ndarray.
Parameters
----------
array : numpy.ndarray
Returns
-------
d : dict
This has the appropriate entries for writing its string representation
to the header of the file.
"""
d = {'shape': array.shape}
if array.flags.c_contiguous:
d['fortran_order'] = False
elif array.flags.f_contiguous:
d['fortran_order'] = True
else:
# Totally non-contiguous data. We will have to make it C-contiguous
# before writing. Note that we need to test for C_CONTIGUOUS first
# because a 1-D array is both C_CONTIGUOUS and F_CONTIGUOUS.
d['fortran_order'] = False
d['descr'] = dtype_to_descr(array.dtype)
return d
def _write_array_header(fp, d, version=None):
""" Write the header for an array and returns the version used
Parameters
----------
fp : filelike object
d : dict
This has the appropriate entries for writing its string representation
to the header of the file.
version: tuple or None
None means use oldest that works
explicit version will raise a ValueError if the format does not
allow saving this data. Default: None
Returns
-------
version : tuple of int
the file version which needs to be used to store the data
"""
import struct
header = ["{"]
for key, value in sorted(d.items()):
# Need to use repr here, since we eval these when reading
header.append("'%s': %s, " % (key, repr(value)))
header.append("}")
header = "".join(header)
# Pad the header with spaces and a final newline such that the magic
# string, the header-length short and the header are aligned on a
# 16-byte boundary. Hopefully, some system, possibly memory-mapping,
# can take advantage of our premature optimization.
current_header_len = MAGIC_LEN + 2 + len(header) + 1 # 1 for the newline
topad = 16 - (current_header_len % 16)
header = header + ' '*topad + '\n'
header = asbytes(_filter_header(header))
hlen = len(header)
if hlen < 256*256 and version in (None, (1, 0)):
version = (1, 0)
header_prefix = magic(1, 0) + struct.pack('<H', hlen)
elif hlen < 2**32 and version in (None, (2, 0)):
version = (2, 0)
header_prefix = magic(2, 0) + struct.pack('<I', hlen)
else:
msg = "Header length %s too big for version=%s"
msg %= (hlen, version)
raise ValueError(msg)
fp.write(header_prefix)
fp.write(header)
return version
def write_array_header_1_0(fp, d):
""" Write the header for an array using the 1.0 format.
Parameters
----------
fp : filelike object
d : dict
This has the appropriate entries for writing its string
representation to the header of the file.
"""
_write_array_header(fp, d, (1, 0))
def write_array_header_2_0(fp, d):
""" Write the header for an array using the 2.0 format.
The 2.0 format allows storing very large structured arrays.
.. versionadded:: 1.9.0
Parameters
----------
fp : filelike object
d : dict
This has the appropriate entries for writing its string
representation to the header of the file.
"""
_write_array_header(fp, d, (2, 0))
def read_array_header_1_0(fp):
"""
Read an array header from a filelike object using the 1.0 file format
version.
This will leave the file object located just after the header.
Parameters
----------
fp : filelike object
A file object or something with a `.read()` method like a file.
Returns
-------
shape : tuple of int
The shape of the array.
fortran_order : bool
The array data will be written out directly if it is either
C-contiguous or Fortran-contiguous. Otherwise, it will be made
contiguous before writing it out.
dtype : dtype
The dtype of the file's data.
Raises
------
ValueError
If the data is invalid.
"""
return _read_array_header(fp, version=(1, 0))
def read_array_header_2_0(fp):
"""
Read an array header from a filelike object using the 2.0 file format
version.
This will leave the file object located just after the header.
.. versionadded:: 1.9.0
Parameters
----------
fp : filelike object
A file object or something with a `.read()` method like a file.
Returns
-------
shape : tuple of int
The shape of the array.
fortran_order : bool
The array data will be written out directly if it is either
C-contiguous or Fortran-contiguous. Otherwise, it will be made
contiguous before writing it out.
dtype : dtype
The dtype of the file's data.
Raises
------
ValueError
If the data is invalid.
"""
return _read_array_header(fp, version=(2, 0))
def _filter_header(s):
"""Clean up 'L' in npz header ints.
Cleans up the 'L' in strings representing integers. Needed to allow npz
headers produced in Python2 to be read in Python3.
Parameters
----------
s : byte string
Npy file header.
Returns
-------
header : str
Cleaned up header.
"""
import tokenize
if sys.version_info[0] >= 3:
from io import StringIO
else:
from StringIO import StringIO
tokens = []
last_token_was_number = False
for token in tokenize.generate_tokens(StringIO(asstr(s)).read):
token_type = token[0]
token_string = token[1]
if (last_token_was_number and
token_type == tokenize.NAME and
token_string == "L"):
continue
else:
tokens.append(token)
last_token_was_number = (token_type == tokenize.NUMBER)
return tokenize.untokenize(tokens)
def _read_array_header(fp, version):
"""
see read_array_header_1_0
"""
# Read an unsigned, little-endian short int which has the length of the
# header.
import struct
if version == (1, 0):
hlength_str = _read_bytes(fp, 2, "array header length")
header_length = struct.unpack('<H', hlength_str)[0]
header = _read_bytes(fp, header_length, "array header")
elif version == (2, 0):
hlength_str = _read_bytes(fp, 4, "array header length")
header_length = struct.unpack('<I', hlength_str)[0]
header = _read_bytes(fp, header_length, "array header")
else:
raise ValueError("Invalid version %r" % version)
# The header is a pretty-printed string representation of a literal
# Python dictionary with trailing newlines padded to a 16-byte
# boundary. The keys are strings.
# "shape" : tuple of int
# "fortran_order" : bool
# "descr" : dtype.descr
header = _filter_header(header)
try:
d = safe_eval(header)
except SyntaxError as e:
msg = "Cannot parse header: %r\nException: %r"
raise ValueError(msg % (header, e))
if not isinstance(d, dict):
msg = "Header is not a dictionary: %r"
raise ValueError(msg % d)
keys = sorted(d.keys())
if keys != ['descr', 'fortran_order', 'shape']:
msg = "Header does not contain the correct keys: %r"
raise ValueError(msg % (keys,))
# Sanity-check the values.
if (not isinstance(d['shape'], tuple) or
not numpy.all([isinstance(x, (int, long)) for x in d['shape']])):
msg = "shape is not valid: %r"
raise ValueError(msg % (d['shape'],))
if not isinstance(d['fortran_order'], bool):
msg = "fortran_order is not a valid bool: %r"
raise ValueError(msg % (d['fortran_order'],))
try:
dtype = numpy.dtype(d['descr'])
except TypeError as e:
msg = "descr is not a valid dtype descriptor: %r"
raise ValueError(msg % (d['descr'],))
return d['shape'], d['fortran_order'], dtype
def write_array(fp, array, version=None, allow_pickle=True, pickle_kwargs=None):
"""
Write an array to an NPY file, including a header.
If the array is neither C-contiguous nor Fortran-contiguous AND the
file_like object is not a real file object, this function will have to
copy data in memory.
Parameters
----------
fp : file_like object
An open, writable file object, or similar object with a
``.write()`` method.
array : ndarray
The array to write to disk.
version : (int, int) or None, optional
The version number of the format. None means use the oldest
supported version that is able to store the data. Default: None
allow_pickle : bool, optional
Whether to allow writing pickled data. Default: True
pickle_kwargs : dict, optional
Additional keyword arguments to pass to pickle.dump, excluding
'protocol'. These are only useful when pickling objects in object
arrays on Python 3 to Python 2 compatible format.
Raises
------
ValueError
If the array cannot be persisted. This includes the case of
allow_pickle=False and array being an object array.
Various other errors
If the array contains Python objects as part of its dtype, the
process of pickling them may raise various errors if the objects
are not picklable.
"""
_check_version(version)
used_ver = _write_array_header(fp, header_data_from_array_1_0(array),
version)
# this warning can be removed when 1.9 has aged enough
if version != (2, 0) and used_ver == (2, 0):
warnings.warn("Stored array in format 2.0. It can only be"
"read by NumPy >= 1.9", UserWarning)
# Set buffer size to 16 MiB to hide the Python loop overhead.
buffersize = max(16 * 1024 ** 2 // array.itemsize, 1)
if array.dtype.hasobject:
# We contain Python objects so we cannot write out the data
# directly. Instead, we will pickle it out with version 2 of the
# pickle protocol.
if not allow_pickle:
raise ValueError("Object arrays cannot be saved when "
"allow_pickle=False")
if pickle_kwargs is None:
pickle_kwargs = {}
pickle.dump(array, fp, protocol=2, **pickle_kwargs)
elif array.flags.f_contiguous and not array.flags.c_contiguous:
if isfileobj(fp):
array.T.tofile(fp)
else:
for chunk in numpy.nditer(
array, flags=['external_loop', 'buffered', 'zerosize_ok'],
buffersize=buffersize, order='F'):
fp.write(chunk.tobytes('C'))
else:
if isfileobj(fp):
array.tofile(fp)
else:
for chunk in numpy.nditer(
array, flags=['external_loop', 'buffered', 'zerosize_ok'],
buffersize=buffersize, order='C'):
fp.write(chunk.tobytes('C'))
def read_array(fp, allow_pickle=True, pickle_kwargs=None):
"""
Read an array from an NPY file.
Parameters
----------
fp : file_like object
If this is not a real file object, then this may take extra memory
and time.
allow_pickle : bool, optional
Whether to allow reading pickled data. Default: True
pickle_kwargs : dict
Additional keyword arguments to pass to pickle.load. These are only
useful when loading object arrays saved on Python 2 when using
Python 3.
Returns
-------
array : ndarray
The array from the data on disk.
Raises
------
ValueError
If the data is invalid, or allow_pickle=False and the file contains
an object array.
"""
version = read_magic(fp)
_check_version(version)
shape, fortran_order, dtype = _read_array_header(fp, version)
if len(shape) == 0:
count = 1
else:
count = numpy.multiply.reduce(shape, dtype=numpy.int64)
# Now read the actual data.
if dtype.hasobject:
# The array contained Python objects. We need to unpickle the data.
if not allow_pickle:
raise ValueError("Object arrays cannot be loaded when "
"allow_pickle=False")
if pickle_kwargs is None:
pickle_kwargs = {}
try:
array = pickle.load(fp, **pickle_kwargs)
except UnicodeError as err:
if sys.version_info[0] >= 3:
# Friendlier error message
raise UnicodeError("Unpickling a python object failed: %r\n"
"You may need to pass the encoding= option "
"to numpy.load" % (err,))
raise
else:
if isfileobj(fp):
# We can use the fast fromfile() function.
array = numpy.fromfile(fp, dtype=dtype, count=count)
else:
# This is not a real file. We have to read it the
# memory-intensive way.
# crc32 module fails on reads greater than 2 ** 32 bytes,
# breaking large reads from gzip streams. Chunk reads to
# BUFFER_SIZE bytes to avoid issue and reduce memory overhead
# of the read. In non-chunked case count < max_read_count, so
# only one read is performed.
max_read_count = BUFFER_SIZE // min(BUFFER_SIZE, dtype.itemsize)
array = numpy.empty(count, dtype=dtype)
for i in range(0, count, max_read_count):
read_count = min(max_read_count, count - i)
read_size = int(read_count * dtype.itemsize)
data = _read_bytes(fp, read_size, "array data")
array[i:i+read_count] = numpy.frombuffer(data, dtype=dtype,
count=read_count)
if fortran_order:
array.shape = shape[::-1]
array = array.transpose()
else:
array.shape = shape
return array
def open_memmap(filename, mode='r+', dtype=None, shape=None,
fortran_order=False, version=None):
"""
Open a .npy file as a memory-mapped array.
This may be used to read an existing file or create a new one.
Parameters
----------
filename : str
The name of the file on disk. This may *not* be a file-like
object.
mode : str, optional
The mode in which to open the file; the default is 'r+'. In
addition to the standard file modes, 'c' is also accepted to mean
"copy on write." See `memmap` for the available mode strings.
dtype : data-type, optional
The data type of the array if we are creating a new file in "write"
mode, if not, `dtype` is ignored. The default value is None, which
results in a data-type of `float64`.
shape : tuple of int
The shape of the array if we are creating a new file in "write"
mode, in which case this parameter is required. Otherwise, this
parameter is ignored and is thus optional.
fortran_order : bool, optional
Whether the array should be Fortran-contiguous (True) or
C-contiguous (False, the default) if we are creating a new file in
"write" mode.
version : tuple of int (major, minor) or None
If the mode is a "write" mode, then this is the version of the file
format used to create the file. None means use the oldest
supported version that is able to store the data. Default: None
Returns
-------
marray : memmap
The memory-mapped array.
Raises
------
ValueError
If the data or the mode is invalid.
IOError
If the file is not found or cannot be opened correctly.
See Also
--------
memmap
"""
if not isinstance(filename, basestring):
raise ValueError("Filename must be a string. Memmap cannot use"
" existing file handles.")
if 'w' in mode:
# We are creating the file, not reading it.
# Check if we ought to create the file.
_check_version(version)
# Ensure that the given dtype is an authentic dtype object rather
# than just something that can be interpreted as a dtype object.
dtype = numpy.dtype(dtype)
if dtype.hasobject:
msg = "Array can't be memory-mapped: Python objects in dtype."
raise ValueError(msg)
d = dict(
descr=dtype_to_descr(dtype),
fortran_order=fortran_order,
shape=shape,
)
# If we got here, then it should be safe to create the file.
fp = open(filename, mode+'b')
try:
used_ver = _write_array_header(fp, d, version)
# this warning can be removed when 1.9 has aged enough
if version != (2, 0) and used_ver == (2, 0):
warnings.warn("Stored array in format 2.0. It can only be"
"read by NumPy >= 1.9", UserWarning)
offset = fp.tell()
finally:
fp.close()
else:
# Read the header of the file first.
fp = open(filename, 'rb')
try:
version = read_magic(fp)
_check_version(version)
shape, fortran_order, dtype = _read_array_header(fp, version)
if dtype.hasobject:
msg = "Array can't be memory-mapped: Python objects in dtype."
raise ValueError(msg)
offset = fp.tell()
finally:
fp.close()
if fortran_order:
order = 'F'
else:
order = 'C'
# We need to change a write-only mode to a read-write mode since we've
# already written data to the file.
if mode == 'w+':
mode = 'r+'
marray = numpy.memmap(filename, dtype=dtype, shape=shape, order=order,
mode=mode, offset=offset)
return marray
def _read_bytes(fp, size, error_template="ran out of data"):
"""
Read from file-like object until size bytes are read.
Raises ValueError if not EOF is encountered before size bytes are read.
Non-blocking objects only supported if they derive from io objects.
Required as e.g. ZipExtFile in python 2.6 can return less data than
requested.
"""
data = bytes()
while True:
# io files (default in python3) return None or raise on
# would-block, python2 file will truncate, probably nothing can be
# done about that. note that regular files can't be non-blocking
try:
r = fp.read(size - len(data))
data += r
if len(r) == 0 or len(data) == size:
break
except io.BlockingIOError:
pass
if len(data) != size:
msg = "EOF: reading %s, expected %d bytes got %d"
raise ValueError(msg % (error_template, size, len(data)))
else:
return data
| |
"""Fixer that inserts mypy annotations into all methods.
This transforms e.g.
def foo(self, bar, baz=12):
return bar + baz
into a type annoted version:
def foo(self, bar, baz=12):
# type: (Any, int) -> Any # noqa: F821
return bar + baz
or (when setting options['annotation_style'] to 'py3'):
def foo(self, bar : Any, baz : int = 12) -> Any:
return bar + baz
It does not do type inference but it recognizes some basic default
argument values such as numbers and strings (and assumes their type
implies the argument type).
It also uses some basic heuristics to decide whether to ignore the
first argument:
- always if it's named 'self'
- if there's a @classmethod decorator
Finally, it knows that __init__() is supposed to return None.
"""
from __future__ import print_function
import os
import re
from lib2to3.fixer_base import BaseFix
from lib2to3.fixer_util import syms, touch_import, find_indentation
from lib2to3.patcomp import compile_pattern
from lib2to3.pgen2 import token
from lib2to3.pytree import Leaf, Node
class FixAnnotate(BaseFix):
# This fixer is compatible with the bottom matcher.
BM_compatible = True
# This fixer shouldn't run by default.
explicit = True
# The pattern to match.
PATTERN = """
funcdef< 'def' name=any parameters=parameters< '(' [args=any] rpar=')' > ':' suite=any+ >
"""
_maxfixes = os.getenv('MAXFIXES')
counter = None if not _maxfixes else int(_maxfixes)
def transform(self, node, results):
if FixAnnotate.counter is not None:
if FixAnnotate.counter <= 0:
return
# Check if there's already a long-form annotation for some argument.
parameters = results.get('parameters')
if parameters is not None:
for ch in parameters.pre_order():
if ch.prefix.lstrip().startswith('# type:'):
return
args = results.get('args')
if args is not None:
for ch in args.pre_order():
if ch.prefix.lstrip().startswith('# type:'):
return
children = results['suite'][0].children
# NOTE: I've reverse-engineered the structure of the parse tree.
# It's always a list of nodes, the first of which contains the
# entire suite. Its children seem to be:
#
# [0] NEWLINE
# [1] INDENT
# [2...n-2] statements (the first may be a docstring)
# [n-1] DEDENT
#
# Comments before the suite are part of the INDENT's prefix.
#
# "Compact" functions (e.g. "def foo(x, y): return max(x, y)")
# have a different structure (no NEWLINE, INDENT, or DEDENT).
# Check if there's already an annotation.
for ch in children:
if ch.prefix.lstrip().startswith('# type:'):
return # There's already a # type: comment here; don't change anything.
# Python 3 style return annotation are already skipped by the pattern
### Python 3 style argument annotation structure
#
# Structure of the arguments tokens for one positional argument without default value :
# + LPAR '('
# + NAME_NODE_OR_LEAF arg1
# + RPAR ')'
#
# NAME_NODE_OR_LEAF is either:
# 1. Just a leaf with value NAME
# 2. A node with children: NAME, ':", node expr or value leaf
#
# Structure of the arguments tokens for one args with default value or multiple
# args, with or without default value, and/or with extra arguments :
# + LPAR '('
# + node
# [
# + NAME_NODE_OR_LEAF
# [
# + EQUAL '='
# + node expr or value leaf
# ]
# (
# + COMMA ','
# + NAME_NODE_OR_LEAF positional argn
# [
# + EQUAL '='
# + node expr or value leaf
# ]
# )*
# ]
# [
# + STAR '*'
# [
# + NAME_NODE_OR_LEAF positional star argument name
# ]
# ]
# [
# + COMMA ','
# + DOUBLESTAR '**'
# + NAME_NODE_OR_LEAF positional keyword argument name
# ]
# + RPAR ')'
# Let's skip Python 3 argument annotations
it = iter(args.children) if args else iter([])
for ch in it:
if ch.type == token.STAR:
# *arg part
ch = next(it)
if ch.type == token.COMMA:
continue
elif ch.type == token.DOUBLESTAR:
# *arg part
ch = next(it)
if ch.type > 256:
# this is a node, therefore an annotation
assert ch.children[0].type == token.NAME
return
try:
ch = next(it)
if ch.type == token.COLON:
# this is an annotation
return
elif ch.type == token.EQUAL:
ch = next(it)
ch = next(it)
assert ch.type == token.COMMA
continue
except StopIteration:
break
# Compute the annotation
annot = self.make_annotation(node, results)
if annot is None:
return
argtypes, restype = annot
if self.options['annotation_style'] == 'py3':
self.add_py3_annot(argtypes, restype, node, results)
else:
self.add_py2_annot(argtypes, restype, node, results)
# Common to py2 and py3 style annotations:
if FixAnnotate.counter is not None:
FixAnnotate.counter -= 1
# Also add 'from typing import Any' at the top if needed.
self.patch_imports(argtypes + [restype], node)
def add_py3_annot(self, argtypes, restype, node, results):
args = results.get('args')
argleaves = []
if args is None:
# function with 0 arguments
it = iter([])
elif len(args.children) == 0:
# function with 1 argument
it = iter([args])
else:
# function with multiple arguments or 1 arg with default value
it = iter(args.children)
for ch in it:
argstyle = 'name'
if ch.type == token.STAR:
# *arg part
argstyle = 'star'
ch = next(it)
if ch.type == token.COMMA:
continue
elif ch.type == token.DOUBLESTAR:
# *arg part
argstyle = 'keyword'
ch = next(it)
assert ch.type == token.NAME
argleaves.append((argstyle, ch))
try:
ch = next(it)
if ch.type == token.EQUAL:
ch = next(it)
ch = next(it)
assert ch.type == token.COMMA
continue
except StopIteration:
break
# when self or cls is not annotated, argleaves == argtypes+1
argleaves = argleaves[len(argleaves) - len(argtypes):]
for ch_withstyle, chtype in zip(argleaves, argtypes):
style, ch = ch_withstyle
if style == 'star':
assert chtype[0] == '*'
assert chtype[1] != '*'
chtype = chtype[1:]
elif style == 'keyword':
assert chtype[0:2] == '**'
assert chtype[2] != '*'
chtype = chtype[2:]
ch.value = '%s: %s' % (ch.value, chtype)
# put spaces around the equal sign
if ch.next_sibling and ch.next_sibling.type == token.EQUAL:
nextch = ch.next_sibling
if not nextch.prefix[:1].isspace():
nextch.prefix = ' ' + nextch.prefix
nextch = nextch.next_sibling
assert nextch != None
if not nextch.prefix[:1].isspace():
nextch.prefix = ' ' + nextch.prefix
# Add return annotation
rpar = results['rpar']
rpar.value = '%s -> %s' % (rpar.value, restype)
rpar.changed()
def add_py2_annot(self, argtypes, restype, node, results):
children = results['suite'][0].children
# Insert '# type: {annot}' comment.
# For reference, see lib2to3/fixes/fix_tuple_params.py in stdlib.
if len(children) >= 1 and children[0].type != token.NEWLINE:
# one liner function
if children[0].prefix.strip() == '':
children[0].prefix = ''
children.insert(0, Leaf(token.NEWLINE, '\n'))
children.insert(
1, Leaf(token.INDENT, find_indentation(node) + ' '))
children.append(Leaf(token.DEDENT, ''))
if len(children) >= 2 and children[1].type == token.INDENT:
degen_str = '(...) -> %s' % restype
short_str = '(%s) -> %s' % (', '.join(argtypes), restype)
if (len(short_str) > 64 or len(argtypes) > 5) and len(short_str) > len(degen_str):
self.insert_long_form(node, results, argtypes)
annot_str = degen_str
else:
annot_str = short_str
children[1].prefix = '%s# type: %s\n%s' % (children[1].value, annot_str,
children[1].prefix)
children[1].changed()
else:
self.log_message("%s:%d: cannot insert annotation for one-line function" %
(self.filename, node.get_lineno()))
def insert_long_form(self, node, results, argtypes):
argtypes = list(argtypes) # We destroy it
args = results['args']
if isinstance(args, Node):
children = args.children
elif isinstance(args, Leaf):
children = [args]
else:
children = []
# Interpret children according to the following grammar:
# (('*'|'**')? NAME ['=' expr] ','?)*
flag = False # Set when the next leaf should get a type prefix
indent = '' # Will be set by the first child
def set_prefix(child):
if argtypes:
arg = argtypes.pop(0).lstrip('*')
else:
arg = 'Any' # Somehow there aren't enough args
if not arg:
# Skip self (look for 'check_self' below)
prefix = child.prefix.rstrip()
else:
prefix = ' # type: ' + arg
old_prefix = child.prefix.strip()
if old_prefix:
assert old_prefix.startswith('#')
prefix += ' ' + old_prefix
child.prefix = prefix + '\n' + indent
check_self = self.is_method(node)
for child in children:
if check_self and isinstance(child, Leaf) and child.type == token.NAME:
check_self = False
if child.value in ('self', 'cls'):
argtypes.insert(0, '')
if not indent:
indent = ' ' * child.column
if isinstance(child, Leaf) and child.value == ',':
flag = True
elif isinstance(child, Leaf) and flag:
set_prefix(child)
flag = False
need_comma = len(children) >= 1 and children[-1].type != token.COMMA
if need_comma and len(children) >= 2:
if (children[-1].type == token.NAME and
(children[-2].type in (token.STAR, token.DOUBLESTAR))):
need_comma = False
if need_comma:
children.append(Leaf(token.COMMA, u","))
# Find the ')' and insert a prefix before it too.
parameters = args.parent
close_paren = parameters.children[-1]
assert close_paren.type == token.RPAR, close_paren
set_prefix(close_paren)
assert not argtypes, argtypes
def patch_imports(self, types, node):
for typ in types:
if 'Any' in typ:
touch_import('typing', 'Any', node)
break
def make_annotation(self, node, results):
name = results['name']
assert isinstance(name, Leaf), repr(name)
assert name.type == token.NAME, repr(name)
decorators = self.get_decorators(node)
is_method = self.is_method(node)
if name.value == '__init__' or not self.has_return_exprs(node):
restype = 'None'
else:
restype = 'Any'
args = results.get('args')
argtypes = []
if isinstance(args, Node):
children = args.children
elif isinstance(args, Leaf):
children = [args]
else:
children = []
# Interpret children according to the following grammar:
# (('*'|'**')? NAME ['=' expr] ','?)*
stars = inferred_type = ''
in_default = False
at_start = True
for child in children:
if isinstance(child, Leaf):
if child.value in ('*', '**'):
stars += child.value
elif child.type == token.NAME and not in_default:
if not is_method or not at_start or 'staticmethod' in decorators:
inferred_type = 'Any'
else:
# Always skip the first argument if it's named 'self'.
# Always skip the first argument of a class method.
if child.value == 'self' or 'classmethod' in decorators:
pass
else:
inferred_type = 'Any'
elif child.value == '=':
in_default = True
elif in_default and child.value != ',':
if child.type == token.NUMBER:
if re.match(r'\d+[lL]?$', child.value):
inferred_type = 'int'
else:
inferred_type = 'float' # TODO: complex?
elif child.type == token.STRING:
if child.value.startswith(('u', 'U')):
inferred_type = 'unicode'
else:
inferred_type = 'str'
elif child.type == token.NAME and child.value in ('True', 'False'):
inferred_type = 'bool'
elif child.value == ',':
if inferred_type:
argtypes.append(stars + inferred_type)
# Reset
stars = inferred_type = ''
in_default = False
at_start = False
if inferred_type:
argtypes.append(stars + inferred_type)
return argtypes, restype
# The parse tree has a different shape when there is a single
# decorator vs. when there are multiple decorators.
DECORATED = "decorated< (d=decorator | decorators< dd=decorator+ >) funcdef >"
decorated = compile_pattern(DECORATED)
def get_decorators(self, node):
"""Return a list of decorators found on a function definition.
This is a list of strings; only simple decorators
(e.g. @staticmethod) are returned.
If the function is undecorated or only non-simple decorators
are found, return [].
"""
if node.parent is None:
return []
results = {}
if not self.decorated.match(node.parent, results):
return []
decorators = results.get('dd') or [results['d']]
decs = []
for d in decorators:
for child in d.children:
if isinstance(child, Leaf) and child.type == token.NAME:
decs.append(child.value)
return decs
def is_method(self, node):
"""Return whether the node occurs (directly) inside a class."""
node = node.parent
while node is not None:
if node.type == syms.classdef:
return True
if node.type == syms.funcdef:
return False
node = node.parent
return False
RETURN_EXPR = "return_stmt< 'return' any >"
return_expr = compile_pattern(RETURN_EXPR)
def has_return_exprs(self, node):
"""Traverse the tree below node looking for 'return expr'.
Return True if at least 'return expr' is found, False if not.
(If both 'return' and 'return expr' are found, return True.)
"""
results = {}
if self.return_expr.match(node, results):
return True
for child in node.children:
if child.type not in (syms.funcdef, syms.classdef):
if self.has_return_exprs(child):
return True
return False
YIELD_EXPR = "yield_expr< 'yield' [any] >"
yield_expr = compile_pattern(YIELD_EXPR)
def is_generator(self, node):
"""Traverse the tree below node looking for 'yield [expr]'."""
results = {}
if self.yield_expr.match(node, results):
return True
for child in node.children:
if child.type not in (syms.funcdef, syms.classdef):
if self.is_generator(child):
return True
return False
| |
import unittest
import tkinter
import os
import sys
from test.support import requires
from tkinter.test.support import (tcl_version, requires_tcl,
get_tk_patchlevel, widget_eq)
from tkinter.test.widget_tests import (
add_standard_options, noconv, pixels_round,
AbstractWidgetTest, StandardOptionsTests, IntegerSizeTests, PixelSizeTests,
setUpModule)
requires('gui')
def float_round(x):
return float(round(x))
class AbstractToplevelTest(AbstractWidgetTest, PixelSizeTests):
_conv_pad_pixels = noconv
def test_class(self):
widget = self.create()
self.assertEqual(widget['class'],
widget.__class__.__name__.title())
self.checkInvalidParam(widget, 'class', 'Foo',
errmsg="can't modify -class option after widget is created")
widget2 = self.create(class_='Foo')
self.assertEqual(widget2['class'], 'Foo')
def test_colormap(self):
widget = self.create()
self.assertEqual(widget['colormap'], '')
self.checkInvalidParam(widget, 'colormap', 'new',
errmsg="can't modify -colormap option after widget is created")
widget2 = self.create(colormap='new')
self.assertEqual(widget2['colormap'], 'new')
def test_container(self):
widget = self.create()
self.assertEqual(widget['container'], 0 if self.wantobjects else '0')
self.checkInvalidParam(widget, 'container', 1,
errmsg="can't modify -container option after widget is created")
widget2 = self.create(container=True)
self.assertEqual(widget2['container'], 1 if self.wantobjects else '1')
def test_visual(self):
widget = self.create()
self.assertEqual(widget['visual'], '')
self.checkInvalidParam(widget, 'visual', 'default',
errmsg="can't modify -visual option after widget is created")
widget2 = self.create(visual='default')
self.assertEqual(widget2['visual'], 'default')
@add_standard_options(StandardOptionsTests)
class ToplevelTest(AbstractToplevelTest, unittest.TestCase):
OPTIONS = (
'background', 'borderwidth',
'class', 'colormap', 'container', 'cursor', 'height',
'highlightbackground', 'highlightcolor', 'highlightthickness',
'menu', 'padx', 'pady', 'relief', 'screen',
'takefocus', 'use', 'visual', 'width',
)
def _create(self, **kwargs):
return tkinter.Toplevel(self.root, **kwargs)
def test_menu(self):
widget = self.create()
menu = tkinter.Menu(self.root)
self.checkParam(widget, 'menu', menu, eq=widget_eq)
self.checkParam(widget, 'menu', '')
def test_screen(self):
widget = self.create()
self.assertEqual(widget['screen'], '')
try:
display = os.environ['DISPLAY']
except KeyError:
self.skipTest('No $DISPLAY set.')
self.checkInvalidParam(widget, 'screen', display,
errmsg="can't modify -screen option after widget is created")
widget2 = self.create(screen=display)
self.assertEqual(widget2['screen'], display)
def test_use(self):
widget = self.create()
self.assertEqual(widget['use'], '')
parent = self.create(container=True)
wid = parent.winfo_id()
widget2 = self.create(use=wid)
self.assertEqual(int(widget2['use']), wid)
@add_standard_options(StandardOptionsTests)
class FrameTest(AbstractToplevelTest, unittest.TestCase):
OPTIONS = (
'background', 'borderwidth',
'class', 'colormap', 'container', 'cursor', 'height',
'highlightbackground', 'highlightcolor', 'highlightthickness',
'relief', 'takefocus', 'visual', 'width',
)
def _create(self, **kwargs):
return tkinter.Frame(self.root, **kwargs)
@add_standard_options(StandardOptionsTests)
class LabelFrameTest(AbstractToplevelTest, unittest.TestCase):
OPTIONS = (
'background', 'borderwidth',
'class', 'colormap', 'container', 'cursor',
'font', 'foreground', 'height',
'highlightbackground', 'highlightcolor', 'highlightthickness',
'labelanchor', 'labelwidget', 'padx', 'pady', 'relief',
'takefocus', 'text', 'visual', 'width',
)
def _create(self, **kwargs):
return tkinter.LabelFrame(self.root, **kwargs)
def test_labelanchor(self):
widget = self.create()
self.checkEnumParam(widget, 'labelanchor',
'e', 'en', 'es', 'n', 'ne', 'nw',
's', 'se', 'sw', 'w', 'wn', 'ws')
self.checkInvalidParam(widget, 'labelanchor', 'center')
def test_labelwidget(self):
widget = self.create()
label = tkinter.Label(self.root, text='Mupp', name='foo')
self.checkParam(widget, 'labelwidget', label, expected='.foo')
label.destroy()
class AbstractLabelTest(AbstractWidgetTest, IntegerSizeTests):
_conv_pixels = noconv
def test_highlightthickness(self):
widget = self.create()
self.checkPixelsParam(widget, 'highlightthickness',
0, 1.3, 2.6, 6, -2, '10p')
@add_standard_options(StandardOptionsTests)
class LabelTest(AbstractLabelTest, unittest.TestCase):
OPTIONS = (
'activebackground', 'activeforeground', 'anchor',
'background', 'bitmap', 'borderwidth', 'compound', 'cursor',
'disabledforeground', 'font', 'foreground', 'height',
'highlightbackground', 'highlightcolor', 'highlightthickness',
'image', 'justify', 'padx', 'pady', 'relief', 'state',
'takefocus', 'text', 'textvariable',
'underline', 'width', 'wraplength',
)
def _create(self, **kwargs):
return tkinter.Label(self.root, **kwargs)
@add_standard_options(StandardOptionsTests)
class ButtonTest(AbstractLabelTest, unittest.TestCase):
OPTIONS = (
'activebackground', 'activeforeground', 'anchor',
'background', 'bitmap', 'borderwidth',
'command', 'compound', 'cursor', 'default',
'disabledforeground', 'font', 'foreground', 'height',
'highlightbackground', 'highlightcolor', 'highlightthickness',
'image', 'justify', 'overrelief', 'padx', 'pady', 'relief',
'repeatdelay', 'repeatinterval',
'state', 'takefocus', 'text', 'textvariable',
'underline', 'width', 'wraplength')
def _create(self, **kwargs):
return tkinter.Button(self.root, **kwargs)
def test_default(self):
widget = self.create()
self.checkEnumParam(widget, 'default', 'active', 'disabled', 'normal')
@add_standard_options(StandardOptionsTests)
class CheckbuttonTest(AbstractLabelTest, unittest.TestCase):
OPTIONS = (
'activebackground', 'activeforeground', 'anchor',
'background', 'bitmap', 'borderwidth',
'command', 'compound', 'cursor',
'disabledforeground', 'font', 'foreground', 'height',
'highlightbackground', 'highlightcolor', 'highlightthickness',
'image', 'indicatoron', 'justify',
'offrelief', 'offvalue', 'onvalue', 'overrelief',
'padx', 'pady', 'relief', 'selectcolor', 'selectimage', 'state',
'takefocus', 'text', 'textvariable',
'tristateimage', 'tristatevalue',
'underline', 'variable', 'width', 'wraplength',
)
def _create(self, **kwargs):
return tkinter.Checkbutton(self.root, **kwargs)
def test_offvalue(self):
widget = self.create()
self.checkParams(widget, 'offvalue', 1, 2.3, '', 'any string')
def test_onvalue(self):
widget = self.create()
self.checkParams(widget, 'onvalue', 1, 2.3, '', 'any string')
@add_standard_options(StandardOptionsTests)
class RadiobuttonTest(AbstractLabelTest, unittest.TestCase):
OPTIONS = (
'activebackground', 'activeforeground', 'anchor',
'background', 'bitmap', 'borderwidth',
'command', 'compound', 'cursor',
'disabledforeground', 'font', 'foreground', 'height',
'highlightbackground', 'highlightcolor', 'highlightthickness',
'image', 'indicatoron', 'justify', 'offrelief', 'overrelief',
'padx', 'pady', 'relief', 'selectcolor', 'selectimage', 'state',
'takefocus', 'text', 'textvariable',
'tristateimage', 'tristatevalue',
'underline', 'value', 'variable', 'width', 'wraplength',
)
def _create(self, **kwargs):
return tkinter.Radiobutton(self.root, **kwargs)
def test_value(self):
widget = self.create()
self.checkParams(widget, 'value', 1, 2.3, '', 'any string')
@add_standard_options(StandardOptionsTests)
class MenubuttonTest(AbstractLabelTest, unittest.TestCase):
OPTIONS = (
'activebackground', 'activeforeground', 'anchor',
'background', 'bitmap', 'borderwidth',
'compound', 'cursor', 'direction',
'disabledforeground', 'font', 'foreground', 'height',
'highlightbackground', 'highlightcolor', 'highlightthickness',
'image', 'indicatoron', 'justify', 'menu',
'padx', 'pady', 'relief', 'state',
'takefocus', 'text', 'textvariable',
'underline', 'width', 'wraplength',
)
_conv_pixels = staticmethod(pixels_round)
def _create(self, **kwargs):
return tkinter.Menubutton(self.root, **kwargs)
def test_direction(self):
widget = self.create()
self.checkEnumParam(widget, 'direction',
'above', 'below', 'flush', 'left', 'right')
def test_height(self):
widget = self.create()
self.checkIntegerParam(widget, 'height', 100, -100, 0, conv=str)
test_highlightthickness = StandardOptionsTests.test_highlightthickness
@unittest.skipIf(sys.platform == 'darwin',
'crashes with Cocoa Tk (issue19733)')
def test_image(self):
widget = self.create()
image = tkinter.PhotoImage('image1')
self.checkParam(widget, 'image', image, conv=str)
errmsg = 'image "spam" doesn\'t exist'
with self.assertRaises(tkinter.TclError) as cm:
widget['image'] = 'spam'
if errmsg is not None:
self.assertEqual(str(cm.exception), errmsg)
with self.assertRaises(tkinter.TclError) as cm:
widget.configure({'image': 'spam'})
if errmsg is not None:
self.assertEqual(str(cm.exception), errmsg)
def test_menu(self):
widget = self.create()
menu = tkinter.Menu(widget, name='menu')
self.checkParam(widget, 'menu', menu, eq=widget_eq)
menu.destroy()
def test_padx(self):
widget = self.create()
self.checkPixelsParam(widget, 'padx', 3, 4.4, 5.6, '12m')
self.checkParam(widget, 'padx', -2, expected=0)
def test_pady(self):
widget = self.create()
self.checkPixelsParam(widget, 'pady', 3, 4.4, 5.6, '12m')
self.checkParam(widget, 'pady', -2, expected=0)
def test_width(self):
widget = self.create()
self.checkIntegerParam(widget, 'width', 402, -402, 0, conv=str)
class OptionMenuTest(MenubuttonTest, unittest.TestCase):
def _create(self, default='b', values=('a', 'b', 'c'), **kwargs):
return tkinter.OptionMenu(self.root, None, default, *values, **kwargs)
@add_standard_options(IntegerSizeTests, StandardOptionsTests)
class EntryTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'background', 'borderwidth', 'cursor',
'disabledbackground', 'disabledforeground',
'exportselection', 'font', 'foreground',
'highlightbackground', 'highlightcolor', 'highlightthickness',
'insertbackground', 'insertborderwidth',
'insertofftime', 'insertontime', 'insertwidth',
'invalidcommand', 'justify', 'readonlybackground', 'relief',
'selectbackground', 'selectborderwidth', 'selectforeground',
'show', 'state', 'takefocus', 'textvariable',
'validate', 'validatecommand', 'width', 'xscrollcommand',
)
def _create(self, **kwargs):
return tkinter.Entry(self.root, **kwargs)
def test_disabledbackground(self):
widget = self.create()
self.checkColorParam(widget, 'disabledbackground')
def test_insertborderwidth(self):
widget = self.create(insertwidth=100)
self.checkPixelsParam(widget, 'insertborderwidth',
0, 1.3, 2.6, 6, -2, '10p')
# insertborderwidth is bounded above by a half of insertwidth.
self.checkParam(widget, 'insertborderwidth', 60, expected=100//2)
def test_insertwidth(self):
widget = self.create()
self.checkPixelsParam(widget, 'insertwidth', 1.3, 3.6, '10p')
self.checkParam(widget, 'insertwidth', 0.1, expected=2)
self.checkParam(widget, 'insertwidth', -2, expected=2)
if pixels_round(0.9) <= 0:
self.checkParam(widget, 'insertwidth', 0.9, expected=2)
else:
self.checkParam(widget, 'insertwidth', 0.9, expected=1)
def test_invalidcommand(self):
widget = self.create()
self.checkCommandParam(widget, 'invalidcommand')
self.checkCommandParam(widget, 'invcmd')
def test_readonlybackground(self):
widget = self.create()
self.checkColorParam(widget, 'readonlybackground')
def test_show(self):
widget = self.create()
self.checkParam(widget, 'show', '*')
self.checkParam(widget, 'show', '')
self.checkParam(widget, 'show', ' ')
def test_state(self):
widget = self.create()
self.checkEnumParam(widget, 'state',
'disabled', 'normal', 'readonly')
def test_validate(self):
widget = self.create()
self.checkEnumParam(widget, 'validate',
'all', 'key', 'focus', 'focusin', 'focusout', 'none')
def test_validatecommand(self):
widget = self.create()
self.checkCommandParam(widget, 'validatecommand')
self.checkCommandParam(widget, 'vcmd')
@add_standard_options(StandardOptionsTests)
class SpinboxTest(EntryTest, unittest.TestCase):
OPTIONS = (
'activebackground', 'background', 'borderwidth',
'buttonbackground', 'buttoncursor', 'buttondownrelief', 'buttonuprelief',
'command', 'cursor', 'disabledbackground', 'disabledforeground',
'exportselection', 'font', 'foreground', 'format', 'from',
'highlightbackground', 'highlightcolor', 'highlightthickness',
'increment',
'insertbackground', 'insertborderwidth',
'insertofftime', 'insertontime', 'insertwidth',
'invalidcommand', 'justify', 'relief', 'readonlybackground',
'repeatdelay', 'repeatinterval',
'selectbackground', 'selectborderwidth', 'selectforeground',
'state', 'takefocus', 'textvariable', 'to',
'validate', 'validatecommand', 'values',
'width', 'wrap', 'xscrollcommand',
)
def _create(self, **kwargs):
return tkinter.Spinbox(self.root, **kwargs)
test_show = None
def test_buttonbackground(self):
widget = self.create()
self.checkColorParam(widget, 'buttonbackground')
def test_buttoncursor(self):
widget = self.create()
self.checkCursorParam(widget, 'buttoncursor')
def test_buttondownrelief(self):
widget = self.create()
self.checkReliefParam(widget, 'buttondownrelief')
def test_buttonuprelief(self):
widget = self.create()
self.checkReliefParam(widget, 'buttonuprelief')
def test_format(self):
widget = self.create()
self.checkParam(widget, 'format', '%2f')
self.checkParam(widget, 'format', '%2.2f')
self.checkParam(widget, 'format', '%.2f')
self.checkParam(widget, 'format', '%2.f')
self.checkInvalidParam(widget, 'format', '%2e-1f')
self.checkInvalidParam(widget, 'format', '2.2')
self.checkInvalidParam(widget, 'format', '%2.-2f')
self.checkParam(widget, 'format', '%-2.02f')
self.checkParam(widget, 'format', '% 2.02f')
self.checkParam(widget, 'format', '% -2.200f')
self.checkParam(widget, 'format', '%09.200f')
self.checkInvalidParam(widget, 'format', '%d')
def test_from(self):
widget = self.create()
self.checkParam(widget, 'to', 100.0)
self.checkFloatParam(widget, 'from', -10, 10.2, 11.7)
self.checkInvalidParam(widget, 'from', 200,
errmsg='-to value must be greater than -from value')
def test_increment(self):
widget = self.create()
self.checkFloatParam(widget, 'increment', -1, 1, 10.2, 12.8, 0)
def test_to(self):
widget = self.create()
self.checkParam(widget, 'from', -100.0)
self.checkFloatParam(widget, 'to', -10, 10.2, 11.7)
self.checkInvalidParam(widget, 'to', -200,
errmsg='-to value must be greater than -from value')
def test_values(self):
# XXX
widget = self.create()
self.assertEqual(widget['values'], '')
self.checkParam(widget, 'values', 'mon tue wed thur')
self.checkParam(widget, 'values', ('mon', 'tue', 'wed', 'thur'),
expected='mon tue wed thur')
self.checkParam(widget, 'values', (42, 3.14, '', 'any string'),
expected='42 3.14 {} {any string}')
self.checkParam(widget, 'values', '')
def test_wrap(self):
widget = self.create()
self.checkBooleanParam(widget, 'wrap')
def test_bbox(self):
widget = self.create()
bbox = widget.bbox(0)
self.assertEqual(len(bbox), 4)
for item in bbox:
self.assertIsInstance(item, int)
self.assertRaises(tkinter.TclError, widget.bbox, 'noindex')
self.assertRaises(tkinter.TclError, widget.bbox, None)
self.assertRaises(TypeError, widget.bbox)
self.assertRaises(TypeError, widget.bbox, 0, 1)
@add_standard_options(StandardOptionsTests)
class TextTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'autoseparators', 'background', 'blockcursor', 'borderwidth',
'cursor', 'endline', 'exportselection',
'font', 'foreground', 'height',
'highlightbackground', 'highlightcolor', 'highlightthickness',
'inactiveselectbackground', 'insertbackground', 'insertborderwidth',
'insertofftime', 'insertontime', 'insertunfocussed', 'insertwidth',
'maxundo', 'padx', 'pady', 'relief',
'selectbackground', 'selectborderwidth', 'selectforeground',
'setgrid', 'spacing1', 'spacing2', 'spacing3', 'startline', 'state',
'tabs', 'tabstyle', 'takefocus', 'undo', 'width', 'wrap',
'xscrollcommand', 'yscrollcommand',
)
if tcl_version < (8, 5):
wantobjects = False
def _create(self, **kwargs):
return tkinter.Text(self.root, **kwargs)
def test_autoseparators(self):
widget = self.create()
self.checkBooleanParam(widget, 'autoseparators')
@requires_tcl(8, 5)
def test_blockcursor(self):
widget = self.create()
self.checkBooleanParam(widget, 'blockcursor')
@requires_tcl(8, 5)
def test_endline(self):
widget = self.create()
text = '\n'.join('Line %d' for i in range(100))
widget.insert('end', text)
self.checkParam(widget, 'endline', 200, expected='')
self.checkParam(widget, 'endline', -10, expected='')
self.checkInvalidParam(widget, 'endline', 'spam',
errmsg='expected integer but got "spam"')
self.checkParam(widget, 'endline', 50)
self.checkParam(widget, 'startline', 15)
self.checkInvalidParam(widget, 'endline', 10,
errmsg='-startline must be less than or equal to -endline')
def test_height(self):
widget = self.create()
self.checkPixelsParam(widget, 'height', 100, 101.2, 102.6, '3c')
self.checkParam(widget, 'height', -100, expected=1)
self.checkParam(widget, 'height', 0, expected=1)
def test_maxundo(self):
widget = self.create()
self.checkIntegerParam(widget, 'maxundo', 0, 5, -1)
@requires_tcl(8, 5)
def test_inactiveselectbackground(self):
widget = self.create()
self.checkColorParam(widget, 'inactiveselectbackground')
@requires_tcl(8, 6)
def test_insertunfocussed(self):
widget = self.create()
self.checkEnumParam(widget, 'insertunfocussed',
'hollow', 'none', 'solid')
def test_selectborderwidth(self):
widget = self.create()
self.checkPixelsParam(widget, 'selectborderwidth',
1.3, 2.6, -2, '10p', conv=noconv,
keep_orig=tcl_version >= (8, 5))
def test_spacing1(self):
widget = self.create()
self.checkPixelsParam(widget, 'spacing1', 20, 21.4, 22.6, '0.5c')
self.checkParam(widget, 'spacing1', -5, expected=0)
def test_spacing2(self):
widget = self.create()
self.checkPixelsParam(widget, 'spacing2', 5, 6.4, 7.6, '0.1c')
self.checkParam(widget, 'spacing2', -1, expected=0)
def test_spacing3(self):
widget = self.create()
self.checkPixelsParam(widget, 'spacing3', 20, 21.4, 22.6, '0.5c')
self.checkParam(widget, 'spacing3', -10, expected=0)
@requires_tcl(8, 5)
def test_startline(self):
widget = self.create()
text = '\n'.join('Line %d' for i in range(100))
widget.insert('end', text)
self.checkParam(widget, 'startline', 200, expected='')
self.checkParam(widget, 'startline', -10, expected='')
self.checkInvalidParam(widget, 'startline', 'spam',
errmsg='expected integer but got "spam"')
self.checkParam(widget, 'startline', 10)
self.checkParam(widget, 'endline', 50)
self.checkInvalidParam(widget, 'startline', 70,
errmsg='-startline must be less than or equal to -endline')
def test_state(self):
widget = self.create()
if tcl_version < (8, 5):
self.checkParams(widget, 'state', 'disabled', 'normal')
else:
self.checkEnumParam(widget, 'state', 'disabled', 'normal')
def test_tabs(self):
widget = self.create()
if get_tk_patchlevel() < (8, 5, 11):
self.checkParam(widget, 'tabs', (10.2, 20.7, '1i', '2i'),
expected=('10.2', '20.7', '1i', '2i'))
else:
self.checkParam(widget, 'tabs', (10.2, 20.7, '1i', '2i'))
self.checkParam(widget, 'tabs', '10.2 20.7 1i 2i',
expected=('10.2', '20.7', '1i', '2i'))
self.checkParam(widget, 'tabs', '2c left 4c 6c center',
expected=('2c', 'left', '4c', '6c', 'center'))
self.checkInvalidParam(widget, 'tabs', 'spam',
errmsg='bad screen distance "spam"',
keep_orig=tcl_version >= (8, 5))
@requires_tcl(8, 5)
def test_tabstyle(self):
widget = self.create()
self.checkEnumParam(widget, 'tabstyle', 'tabular', 'wordprocessor')
def test_undo(self):
widget = self.create()
self.checkBooleanParam(widget, 'undo')
def test_width(self):
widget = self.create()
self.checkIntegerParam(widget, 'width', 402)
self.checkParam(widget, 'width', -402, expected=1)
self.checkParam(widget, 'width', 0, expected=1)
def test_wrap(self):
widget = self.create()
if tcl_version < (8, 5):
self.checkParams(widget, 'wrap', 'char', 'none', 'word')
else:
self.checkEnumParam(widget, 'wrap', 'char', 'none', 'word')
def test_bbox(self):
widget = self.create()
bbox = widget.bbox('1.1')
self.assertEqual(len(bbox), 4)
for item in bbox:
self.assertIsInstance(item, int)
self.assertIsNone(widget.bbox('end'))
self.assertRaises(tkinter.TclError, widget.bbox, 'noindex')
self.assertRaises(tkinter.TclError, widget.bbox, None)
self.assertRaises(tkinter.TclError, widget.bbox)
self.assertRaises(tkinter.TclError, widget.bbox, '1.1', 'end')
@add_standard_options(PixelSizeTests, StandardOptionsTests)
class CanvasTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'background', 'borderwidth',
'closeenough', 'confine', 'cursor', 'height',
'highlightbackground', 'highlightcolor', 'highlightthickness',
'insertbackground', 'insertborderwidth',
'insertofftime', 'insertontime', 'insertwidth',
'relief', 'scrollregion',
'selectbackground', 'selectborderwidth', 'selectforeground',
'state', 'takefocus',
'xscrollcommand', 'xscrollincrement',
'yscrollcommand', 'yscrollincrement', 'width',
)
_conv_pixels = round
wantobjects = False
def _create(self, **kwargs):
return tkinter.Canvas(self.root, **kwargs)
def test_closeenough(self):
widget = self.create()
self.checkFloatParam(widget, 'closeenough', 24, 2.4, 3.6, -3,
conv=float)
def test_confine(self):
widget = self.create()
self.checkBooleanParam(widget, 'confine')
def test_scrollregion(self):
widget = self.create()
self.checkParam(widget, 'scrollregion', '0 0 200 150')
self.checkParam(widget, 'scrollregion', (0, 0, 200, 150),
expected='0 0 200 150')
self.checkParam(widget, 'scrollregion', '')
self.checkInvalidParam(widget, 'scrollregion', 'spam',
errmsg='bad scrollRegion "spam"')
self.checkInvalidParam(widget, 'scrollregion', (0, 0, 200, 'spam'))
self.checkInvalidParam(widget, 'scrollregion', (0, 0, 200))
self.checkInvalidParam(widget, 'scrollregion', (0, 0, 200, 150, 0))
def test_state(self):
widget = self.create()
self.checkEnumParam(widget, 'state', 'disabled', 'normal',
errmsg='bad state value "{}": must be normal or disabled')
def test_xscrollincrement(self):
widget = self.create()
self.checkPixelsParam(widget, 'xscrollincrement',
40, 0, 41.2, 43.6, -40, '0.5i')
def test_yscrollincrement(self):
widget = self.create()
self.checkPixelsParam(widget, 'yscrollincrement',
10, 0, 11.2, 13.6, -10, '0.1i')
@add_standard_options(IntegerSizeTests, StandardOptionsTests)
class ListboxTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'activestyle', 'background', 'borderwidth', 'cursor',
'disabledforeground', 'exportselection',
'font', 'foreground', 'height',
'highlightbackground', 'highlightcolor', 'highlightthickness',
'listvariable', 'relief',
'selectbackground', 'selectborderwidth', 'selectforeground',
'selectmode', 'setgrid', 'state',
'takefocus', 'width', 'xscrollcommand', 'yscrollcommand',
)
def _create(self, **kwargs):
return tkinter.Listbox(self.root, **kwargs)
def test_activestyle(self):
widget = self.create()
self.checkEnumParam(widget, 'activestyle',
'dotbox', 'none', 'underline')
def test_listvariable(self):
widget = self.create()
var = tkinter.DoubleVar()
self.checkVariableParam(widget, 'listvariable', var)
def test_selectmode(self):
widget = self.create()
self.checkParam(widget, 'selectmode', 'single')
self.checkParam(widget, 'selectmode', 'browse')
self.checkParam(widget, 'selectmode', 'multiple')
self.checkParam(widget, 'selectmode', 'extended')
def test_state(self):
widget = self.create()
self.checkEnumParam(widget, 'state', 'disabled', 'normal')
@add_standard_options(PixelSizeTests, StandardOptionsTests)
class ScaleTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'activebackground', 'background', 'bigincrement', 'borderwidth',
'command', 'cursor', 'digits', 'font', 'foreground', 'from',
'highlightbackground', 'highlightcolor', 'highlightthickness',
'label', 'length', 'orient', 'relief',
'repeatdelay', 'repeatinterval',
'resolution', 'showvalue', 'sliderlength', 'sliderrelief', 'state',
'takefocus', 'tickinterval', 'to', 'troughcolor', 'variable', 'width',
)
default_orient = 'vertical'
def _create(self, **kwargs):
return tkinter.Scale(self.root, **kwargs)
def test_bigincrement(self):
widget = self.create()
self.checkFloatParam(widget, 'bigincrement', 12.4, 23.6, -5)
def test_digits(self):
widget = self.create()
self.checkIntegerParam(widget, 'digits', 5, 0)
def test_from(self):
widget = self.create()
self.checkFloatParam(widget, 'from', 100, 14.9, 15.1, conv=float_round)
def test_label(self):
widget = self.create()
self.checkParam(widget, 'label', 'any string')
self.checkParam(widget, 'label', '')
def test_length(self):
widget = self.create()
self.checkPixelsParam(widget, 'length', 130, 131.2, 135.6, '5i')
def test_resolution(self):
widget = self.create()
self.checkFloatParam(widget, 'resolution', 4.2, 0, 6.7, -2)
def test_showvalue(self):
widget = self.create()
self.checkBooleanParam(widget, 'showvalue')
def test_sliderlength(self):
widget = self.create()
self.checkPixelsParam(widget, 'sliderlength',
10, 11.2, 15.6, -3, '3m')
def test_sliderrelief(self):
widget = self.create()
self.checkReliefParam(widget, 'sliderrelief')
def test_tickinterval(self):
widget = self.create()
self.checkFloatParam(widget, 'tickinterval', 1, 4.3, 7.6, 0,
conv=float_round)
self.checkParam(widget, 'tickinterval', -2, expected=2,
conv=float_round)
def test_to(self):
widget = self.create()
self.checkFloatParam(widget, 'to', 300, 14.9, 15.1, -10,
conv=float_round)
@add_standard_options(PixelSizeTests, StandardOptionsTests)
class ScrollbarTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'activebackground', 'activerelief',
'background', 'borderwidth',
'command', 'cursor', 'elementborderwidth',
'highlightbackground', 'highlightcolor', 'highlightthickness',
'jump', 'orient', 'relief',
'repeatdelay', 'repeatinterval',
'takefocus', 'troughcolor', 'width',
)
_conv_pixels = round
wantobjects = False
default_orient = 'vertical'
def _create(self, **kwargs):
return tkinter.Scrollbar(self.root, **kwargs)
def test_activerelief(self):
widget = self.create()
self.checkReliefParam(widget, 'activerelief')
def test_elementborderwidth(self):
widget = self.create()
self.checkPixelsParam(widget, 'elementborderwidth', 4.3, 5.6, -2, '1m')
def test_orient(self):
widget = self.create()
self.checkEnumParam(widget, 'orient', 'vertical', 'horizontal',
errmsg='bad orientation "{}": must be vertical or horizontal')
@add_standard_options(StandardOptionsTests)
class PanedWindowTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'background', 'borderwidth', 'cursor',
'handlepad', 'handlesize', 'height',
'opaqueresize', 'orient', 'relief',
'sashcursor', 'sashpad', 'sashrelief', 'sashwidth',
'showhandle', 'width',
)
default_orient = 'horizontal'
def _create(self, **kwargs):
return tkinter.PanedWindow(self.root, **kwargs)
def test_handlepad(self):
widget = self.create()
self.checkPixelsParam(widget, 'handlepad', 5, 6.4, 7.6, -3, '1m')
def test_handlesize(self):
widget = self.create()
self.checkPixelsParam(widget, 'handlesize', 8, 9.4, 10.6, -3, '2m',
conv=noconv)
def test_height(self):
widget = self.create()
self.checkPixelsParam(widget, 'height', 100, 101.2, 102.6, -100, 0, '1i',
conv=noconv)
def test_opaqueresize(self):
widget = self.create()
self.checkBooleanParam(widget, 'opaqueresize')
def test_sashcursor(self):
widget = self.create()
self.checkCursorParam(widget, 'sashcursor')
def test_sashpad(self):
widget = self.create()
self.checkPixelsParam(widget, 'sashpad', 8, 1.3, 2.6, -2, '2m')
def test_sashrelief(self):
widget = self.create()
self.checkReliefParam(widget, 'sashrelief')
def test_sashwidth(self):
widget = self.create()
self.checkPixelsParam(widget, 'sashwidth', 10, 11.1, 15.6, -3, '1m',
conv=noconv)
def test_showhandle(self):
widget = self.create()
self.checkBooleanParam(widget, 'showhandle')
def test_width(self):
widget = self.create()
self.checkPixelsParam(widget, 'width', 402, 403.4, 404.6, -402, 0, '5i',
conv=noconv)
@add_standard_options(StandardOptionsTests)
class MenuTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'activebackground', 'activeborderwidth', 'activeforeground',
'background', 'borderwidth', 'cursor',
'disabledforeground', 'font', 'foreground',
'postcommand', 'relief', 'selectcolor', 'takefocus',
'tearoff', 'tearoffcommand', 'title', 'type',
)
_conv_pixels = noconv
def _create(self, **kwargs):
return tkinter.Menu(self.root, **kwargs)
def test_postcommand(self):
widget = self.create()
self.checkCommandParam(widget, 'postcommand')
def test_tearoff(self):
widget = self.create()
self.checkBooleanParam(widget, 'tearoff')
def test_tearoffcommand(self):
widget = self.create()
self.checkCommandParam(widget, 'tearoffcommand')
def test_title(self):
widget = self.create()
self.checkParam(widget, 'title', 'any string')
def test_type(self):
widget = self.create()
self.checkEnumParam(widget, 'type',
'normal', 'tearoff', 'menubar')
@add_standard_options(PixelSizeTests, StandardOptionsTests)
class MessageTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'anchor', 'aspect', 'background', 'borderwidth',
'cursor', 'font', 'foreground',
'highlightbackground', 'highlightcolor', 'highlightthickness',
'justify', 'padx', 'pady', 'relief',
'takefocus', 'text', 'textvariable', 'width',
)
_conv_pad_pixels = noconv
def _create(self, **kwargs):
return tkinter.Message(self.root, **kwargs)
def test_aspect(self):
widget = self.create()
self.checkIntegerParam(widget, 'aspect', 250, 0, -300)
tests_gui = (
ButtonTest, CanvasTest, CheckbuttonTest, EntryTest,
FrameTest, LabelFrameTest,LabelTest, ListboxTest,
MenubuttonTest, MenuTest, MessageTest, OptionMenuTest,
PanedWindowTest, RadiobuttonTest, ScaleTest, ScrollbarTest,
SpinboxTest, TextTest, ToplevelTest,
)
if __name__ == '__main__':
unittest.main()
| |
# Copyright (c) 2010-2013 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2012-2014 Mark D. Hill and David A. Wood
# Copyright (c) 2009-2011 Advanced Micro Devices, Inc.
# Copyright (c) 2006-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Ali Saidi
# Brad Beckmann
import optparse
import sys
import m5
from m5.defines import buildEnv
from m5.objects import *
from m5.util import addToPath, fatal
addToPath('../common')
addToPath('../ruby')
import Ruby
from FSConfig import *
from SysPaths import *
from Benchmarks import *
import Simulation
import CacheConfig
import MemConfig
from Caches import *
import Options
# Check if KVM support has been enabled, we might need to do VM
# configuration if that's the case.
have_kvm_support = 'BaseKvmCPU' in globals()
def is_kvm_cpu(cpu_class):
return have_kvm_support and cpu_class != None and \
issubclass(cpu_class, BaseKvmCPU)
def build_test_system(np):
if buildEnv['TARGET_ISA'] == "alpha":
test_sys = makeLinuxAlphaSystem(test_mem_mode, bm[0], options.ruby)
elif buildEnv['TARGET_ISA'] == "mips":
test_sys = makeLinuxMipsSystem(test_mem_mode, bm[0])
elif buildEnv['TARGET_ISA'] == "sparc":
test_sys = makeSparcSystem(test_mem_mode, bm[0])
elif buildEnv['TARGET_ISA'] == "x86":
test_sys = makeLinuxX86System(test_mem_mode, options.num_cpus, bm[0],
options.ruby)
elif buildEnv['TARGET_ISA'] == "arm":
test_sys = makeArmSystem(test_mem_mode, options.machine_type, bm[0],
options.dtb_filename,
bare_metal=options.bare_metal)
if options.enable_context_switch_stats_dump:
test_sys.enable_context_switch_stats_dump = True
else:
fatal("Incapable of building %s full system!", buildEnv['TARGET_ISA'])
# Set the cache line size for the entire system
test_sys.cache_line_size = options.cacheline_size
# Create a top-level voltage domain
test_sys.voltage_domain = VoltageDomain(voltage = options.sys_voltage)
# Create a source clock for the system and set the clock period
test_sys.clk_domain = SrcClockDomain(clock = options.sys_clock,
voltage_domain = test_sys.voltage_domain)
# Create a CPU voltage domain
test_sys.cpu_voltage_domain = VoltageDomain()
# Create a source clock for the CPUs and set the clock period
test_sys.cpu_clk_domain = SrcClockDomain(clock = options.cpu_clock,
voltage_domain =
test_sys.cpu_voltage_domain)
if options.kernel is not None:
test_sys.kernel = binary(options.kernel)
if options.script is not None:
test_sys.readfile = options.script
if options.lpae:
test_sys.have_lpae = True
if options.virtualisation:
test_sys.have_virtualization = True
test_sys.init_param = options.init_param
# For now, assign all the CPUs to the same clock domain
test_sys.cpu = [TestCPUClass(clk_domain=test_sys.cpu_clk_domain, cpu_id=i)
for i in xrange(np)]
if is_kvm_cpu(TestCPUClass) or is_kvm_cpu(FutureClass):
test_sys.vm = KvmVM()
if options.ruby:
# Check for timing mode because ruby does not support atomic accesses
if not (options.cpu_type == "detailed" or options.cpu_type == "timing"):
print >> sys.stderr, "Ruby requires TimingSimpleCPU or O3CPU!!"
sys.exit(1)
Ruby.create_system(options, test_sys, test_sys.iobus, test_sys._dma_ports)
# Create a seperate clock domain for Ruby
test_sys.ruby.clk_domain = SrcClockDomain(clock = options.ruby_clock,
voltage_domain = test_sys.voltage_domain)
for (i, cpu) in enumerate(test_sys.cpu):
#
# Tie the cpu ports to the correct ruby system ports
#
cpu.clk_domain = test_sys.cpu_clk_domain
cpu.createThreads()
cpu.createInterruptController()
cpu.icache_port = test_sys.ruby._cpu_ports[i].slave
cpu.dcache_port = test_sys.ruby._cpu_ports[i].slave
if buildEnv['TARGET_ISA'] == "x86":
cpu.itb.walker.port = test_sys.ruby._cpu_ports[i].slave
cpu.dtb.walker.port = test_sys.ruby._cpu_ports[i].slave
cpu.interrupts.pio = test_sys.ruby._cpu_ports[i].master
cpu.interrupts.int_master = test_sys.ruby._cpu_ports[i].slave
cpu.interrupts.int_slave = test_sys.ruby._cpu_ports[i].master
test_sys.ruby._cpu_ports[i].access_phys_mem = True
# Create the appropriate memory controllers
# and connect them to the IO bus
test_sys.mem_ctrls = [TestMemClass(range = r) for r in test_sys.mem_ranges]
for i in xrange(len(test_sys.mem_ctrls)):
test_sys.mem_ctrls[i].port = test_sys.iobus.master
else:
if options.caches or options.l2cache:
# By default the IOCache runs at the system clock
test_sys.iocache = IOCache(addr_ranges = test_sys.mem_ranges)
test_sys.iocache.cpu_side = test_sys.iobus.master
test_sys.iocache.mem_side = test_sys.membus.slave
else:
test_sys.iobridge = Bridge(delay='50ns', ranges = test_sys.mem_ranges)
test_sys.iobridge.slave = test_sys.iobus.master
test_sys.iobridge.master = test_sys.membus.slave
# Sanity check
if options.fastmem:
if TestCPUClass != AtomicSimpleCPU:
fatal("Fastmem can only be used with atomic CPU!")
if (options.caches or options.l2cache):
fatal("You cannot use fastmem in combination with caches!")
for i in xrange(np):
if options.fastmem:
test_sys.cpu[i].fastmem = True
if options.checker:
test_sys.cpu[i].addCheckerCpu()
test_sys.cpu[i].createThreads()
CacheConfig.config_cache(options, test_sys)
MemConfig.config_mem(options, test_sys)
return test_sys
def build_drive_system(np):
# driver system CPU is always simple, so is the memory
# Note this is an assignment of a class, not an instance.
DriveCPUClass = AtomicSimpleCPU
drive_mem_mode = 'atomic'
DriveMemClass = SimpleMemory
if buildEnv['TARGET_ISA'] == 'alpha':
drive_sys = makeLinuxAlphaSystem(drive_mem_mode, bm[1])
elif buildEnv['TARGET_ISA'] == 'mips':
drive_sys = makeLinuxMipsSystem(drive_mem_mode, bm[1])
elif buildEnv['TARGET_ISA'] == 'sparc':
drive_sys = makeSparcSystem(drive_mem_mode, bm[1])
elif buildEnv['TARGET_ISA'] == 'x86':
drive_sys = makeLinuxX86System(drive_mem_mode, np, bm[1])
elif buildEnv['TARGET_ISA'] == 'arm':
drive_sys = makeArmSystem(drive_mem_mode, options.machine_type, bm[1])
# Create a top-level voltage domain
drive_sys.voltage_domain = VoltageDomain(voltage = options.sys_voltage)
# Create a source clock for the system and set the clock period
drive_sys.clk_domain = SrcClockDomain(clock = options.sys_clock,
voltage_domain = drive_sys.voltage_domain)
# Create a CPU voltage domain
drive_sys.cpu_voltage_domain = VoltageDomain()
# Create a source clock for the CPUs and set the clock period
drive_sys.cpu_clk_domain = SrcClockDomain(clock = options.cpu_clock,
voltage_domain =
drive_sys.cpu_voltage_domain)
drive_sys.cpu = DriveCPUClass(clk_domain=drive_sys.cpu_clk_domain,
cpu_id=0)
drive_sys.cpu.createThreads()
drive_sys.cpu.createInterruptController()
drive_sys.cpu.connectAllPorts(drive_sys.membus)
if options.fastmem:
drive_sys.cpu.fastmem = True
if options.kernel is not None:
drive_sys.kernel = binary(options.kernel)
if is_kvm_cpu(DriveCPUClass):
drive_sys.vm = KvmVM()
drive_sys.iobridge = Bridge(delay='50ns',
ranges = drive_sys.mem_ranges)
drive_sys.iobridge.slave = drive_sys.iobus.master
drive_sys.iobridge.master = drive_sys.membus.slave
# Create the appropriate memory controllers and connect them to the
# memory bus
drive_sys.mem_ctrls = [DriveMemClass(range = r)
for r in drive_sys.mem_ranges]
for i in xrange(len(drive_sys.mem_ctrls)):
drive_sys.mem_ctrls[i].port = drive_sys.membus.master
drive_sys.init_param = options.init_param
return drive_sys
# Add options
parser = optparse.OptionParser()
Options.addCommonOptions(parser)
Options.addFSOptions(parser)
# Add the ruby specific and protocol specific options
if '--ruby' in sys.argv:
Ruby.define_options(parser)
(options, args) = parser.parse_args()
if args:
print "Error: script doesn't take any positional arguments"
sys.exit(1)
# system under test can be any CPU
(TestCPUClass, test_mem_mode, FutureClass) = Simulation.setCPUClass(options)
# Match the memories with the CPUs, based on the options for the test system
TestMemClass = Simulation.setMemClass(options)
if options.benchmark:
try:
bm = Benchmarks[options.benchmark]
except KeyError:
print "Error benchmark %s has not been defined." % options.benchmark
print "Valid benchmarks are: %s" % DefinedBenchmarks
sys.exit(1)
else:
if options.dual:
bm = [SysConfig(disk=options.disk_image, mem=options.mem_size),
SysConfig(disk=options.disk_image, mem=options.mem_size)]
else:
bm = [SysConfig(disk=options.disk_image, mem=options.mem_size)]
np = options.num_cpus
test_sys = build_test_system(np)
if len(bm) == 2:
drive_sys = build_drive_system(np)
root = makeDualRoot(True, test_sys, drive_sys, options.etherdump)
elif len(bm) == 1:
root = Root(full_system=True, system=test_sys)
else:
print "Error I don't know how to create more than 2 systems."
sys.exit(1)
if options.timesync:
root.time_sync_enable = True
if options.frame_capture:
VncServer.frame_capture = True
m5.disableAllListeners()
test_sys.fi_system=Fi_System(input_fi=options.fi_input,check_before_init=options.exit_on_checkpoint,fi_switch=options.switch_on_fault,text_start=options.text_start,meta_file=options.meta_file,checkBeforeFI=options.checkpoint_on_fault,FileType=options.file_type)
Simulation.setWorkCountOptions(test_sys, options)
Simulation.run(options, root, test_sys, FutureClass)
| |
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import console
from . import remote_servers
class logging(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-system - based on the path /system/logging. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Top-level container for data related to logging / syslog
"""
__slots__ = ("_path_helper", "_extmethods", "__console", "__remote_servers")
_yang_name = "logging"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__console = YANGDynClass(
base=console.console,
is_container="container",
yang_name="console",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/system",
defining_module="openconfig-system",
yang_type="container",
is_config=True,
)
self.__remote_servers = YANGDynClass(
base=remote_servers.remote_servers,
is_container="container",
yang_name="remote-servers",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/system",
defining_module="openconfig-system",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return ["system", "logging"]
def _get_console(self):
"""
Getter method for console, mapped from YANG variable /system/logging/console (container)
YANG Description: Top-level container for data related to console-based
logging
"""
return self.__console
def _set_console(self, v, load=False):
"""
Setter method for console, mapped from YANG variable /system/logging/console (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_console is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_console() directly.
YANG Description: Top-level container for data related to console-based
logging
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=console.console,
is_container="container",
yang_name="console",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/system",
defining_module="openconfig-system",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """console must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=console.console, is_container='container', yang_name="console", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=True)""",
}
)
self.__console = t
if hasattr(self, "_set"):
self._set()
def _unset_console(self):
self.__console = YANGDynClass(
base=console.console,
is_container="container",
yang_name="console",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/system",
defining_module="openconfig-system",
yang_type="container",
is_config=True,
)
def _get_remote_servers(self):
"""
Getter method for remote_servers, mapped from YANG variable /system/logging/remote_servers (container)
YANG Description: Enclosing container for the list of remote log servers
"""
return self.__remote_servers
def _set_remote_servers(self, v, load=False):
"""
Setter method for remote_servers, mapped from YANG variable /system/logging/remote_servers (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_remote_servers is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_remote_servers() directly.
YANG Description: Enclosing container for the list of remote log servers
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=remote_servers.remote_servers,
is_container="container",
yang_name="remote-servers",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/system",
defining_module="openconfig-system",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """remote_servers must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=remote_servers.remote_servers, is_container='container', yang_name="remote-servers", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=True)""",
}
)
self.__remote_servers = t
if hasattr(self, "_set"):
self._set()
def _unset_remote_servers(self):
self.__remote_servers = YANGDynClass(
base=remote_servers.remote_servers,
is_container="container",
yang_name="remote-servers",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/system",
defining_module="openconfig-system",
yang_type="container",
is_config=True,
)
console = __builtin__.property(_get_console, _set_console)
remote_servers = __builtin__.property(_get_remote_servers, _set_remote_servers)
_pyangbind_elements = OrderedDict(
[("console", console), ("remote_servers", remote_servers)]
)
| |
# -*- coding: utf-8 -*-
"""
Global settings:
Those which are typically edited during a deployment are in
000_config.py & their results parsed into here. Deployers
shouldn't typically need to edit any settings here.
"""
# Keep all our configuration options off the main global variables
# Use response.s3 for one-off variables which are visible in views without explicit passing
s3.formats = Storage()
# Workaround for this Bug in Selenium with FF4:
# http://code.google.com/p/selenium/issues/detail?id=1604
s3.interactive = settings.get_ui_confirm()
s3.base_url = "%s/%s" % (settings.get_base_public_url(),
appname)
s3.download_url = "%s/default/download" % s3.base_url
###############
# Client tests
###############
# Check whether browser is Mobile & store result in session
# - commented-out until we make use of it
#if session.s3.mobile is None:
# session.s3.mobile = s3base.s3_is_mobile_client(request)
#if session.s3.browser is None:
# session.s3.browser = s3base.s3_populate_browser_compatibility(request)
##################
# Global variables
##################
# Interactive view formats
s3.interactive_view_formats = ("html", "popup", "iframe")
# Strings to i18n
messages["UNAUTHORISED"] = "Not authorised!"
messages["BADFORMAT"] = "Unsupported data format!"
messages["BADMETHOD"] = "Unsupported method!"
messages["BADRECORD"] = "Record not found!"
messages["INVALIDREQUEST"] = "Invalid request!"
messages["XLWT_ERROR"] = "xlwt module not available within the running Python - this needs installing for XLS output!"
messages["REPORTLAB_ERROR"] = "ReportLab module not available within the running Python - this needs installing for PDF output!"
# Common Labels
#messages["BREADCRUMB"] = ">> "
messages["UNKNOWN_OPT"] = "Unknown"
messages["NONE"] = "-"
messages["READ"] = settings.get_ui_read_label()
messages["UPDATE"] = settings.get_ui_update_label()
messages["DELETE"] = "Delete"
messages["COPY"] = "Copy"
messages["NOT_APPLICABLE"] = "N/A"
messages["ADD_PERSON"] = "Add Person"
messages["ADD_LOCATION"] = "Add Location"
messages["SELECT_LOCATION"] = "Select a location"
for u in messages:
if isinstance(messages[u], str):
globals()[u] = T(messages[u])
# Pass to CRUD
s3mgr.LABEL["READ"] = READ
s3mgr.LABEL["UPDATE"] = UPDATE
s3mgr.LABEL["DELETE"] = DELETE
s3mgr.LABEL["COPY"] = COPY
# To get included in <HEAD>
s3.stylesheets = []
s3.external_stylesheets = []
# To get included at the end of <BODY>
s3.scripts = []
s3.js_global = []
s3.jquery_ready = []
###########
# Languages
###########
s3.l10n_languages = settings.get_L10n_languages()
# Default strings are in US English
T.current_languages = ["en", "en-us"]
# Check if user has selected a specific language
if request.vars._language:
language = request.vars._language
session.s3.language = language
elif session.s3.language:
# Use the last-selected language
language = session.s3.language
elif auth.is_logged_in():
# Use user preference
language = auth.user.language
else:
# Use system default
language = settings.get_L10n_default_language()
#else:
# # Use what browser requests (default web2py behaviour)
# T.force(T.http_accept_language)
# IE doesn't set request.env.http_accept_language
#if language != "en":
T.force(language)
# Store for views (e.g. Ext)
if language.find("-") == -1:
# Ext peculiarities
if language == "vi":
s3.language = "vn"
elif language == "el":
s3.language = "el_GR"
else:
s3.language = language
else:
lang_parts = language.split("-")
s3.language = "%s_%s" % (lang_parts[0], lang_parts[1].upper())
# List of Languages which use a Right-to-Left script (Arabic, Hebrew, Farsi, Urdu)
s3_rtl_languages = ["ur", "ar"]
if T.accepted_language in s3_rtl_languages:
s3.rtl = True
else:
s3.rtl = False
######
# Auth
######
_settings = auth.settings
_settings.lock_keys = False
_settings.password_min_length = 4
_settings.expiration = 28800 # seconds
_settings.facebook = settings.get_auth_facebook()
_settings.google = settings.get_auth_google()
if settings.get_auth_openid():
# Requires http://pypi.python.org/pypi/python-openid/
try:
from gluon.contrib.login_methods.openid_auth import OpenIDAuth
openid_login_form = OpenIDAuth(auth)
from gluon.contrib.login_methods.extended_login_form import ExtendedLoginForm
extended_login_form = ExtendedLoginForm(auth, openid_login_form,
signals=["oid", "janrain_nonce"])
auth.settings.login_form = extended_login_form
except ImportError:
session.warning = T("Library support not available for OpenID")
# Allow use of LDAP accounts for login
# NB Currently this means that change password should be disabled:
#_settings.actions_disabled.append("change_password")
# (NB These are not automatically added to PR or to Authenticated role since they enter via the login() method not register())
#from gluon.contrib.login_methods.ldap_auth import ldap_auth
# Require even alternate login methods to register users 1st
#_settings.alternate_requires_registration = True
# Active Directory
#_settings.login_methods.append(ldap_auth(mode="ad", server="dc.domain.org", base_dn="ou=Users,dc=domain,dc=org"))
# or if not wanting local users at all (no passwords saved within DB):
#_settings.login_methods = [ldap_auth(mode="ad", server="dc.domain.org", base_dn="ou=Users,dc=domain,dc=org")]
# Domino
#_settings.login_methods.append(ldap_auth(mode="domino", server="domino.domain.org"))
# OpenLDAP
#_settings.login_methods.append(ldap_auth(server="directory.sahanafoundation.org", base_dn="ou=users,dc=sahanafoundation,dc=org"))
# Allow use of Email accounts for login
#_settings.login_methods.append(email_auth("smtp.gmail.com:587", "@gmail.com"))
# Require captcha verification for registration
#auth.settings.captcha = RECAPTCHA(request, public_key="PUBLIC_KEY", private_key="PRIVATE_KEY")
# Require Email Verification
_settings.registration_requires_verification = settings.get_auth_registration_requires_verification()
_settings.on_failed_authorization = URL(c="default", f="user",
args="not_authorized")
_settings.reset_password_requires_verification = True
_settings.verify_email_next = URL(c="default", f="index")
# Notify Approver of new pending user registration. Action may be required.
_settings.verify_email_onaccept = auth.s3_verify_email_onaccept
# Auth Messages
_messages = auth.messages
_messages.verify_email = "Click on the link %(url)s%(key)s to verify your email" % \
dict(url="%s/default/user/verify_email/" % s3.base_url,
key="%(key)s")
_messages.verify_email_subject = "%(system_name)s - Verify Email" % \
{"system_name" : settings.get_system_name()}
_messages.reset_password = "%s %s/default/user/reset_password/%s %s" % \
(T("Click on the link"),
s3.base_url,
"%(key)s",
T("to reset your password"))
_messages.help_mobile_phone = T("Entering a phone number is optional, but doing so allows you to subscribe to receive SMS messages.")
# Require Admin approval for self-registered users
_settings.registration_requires_approval = settings.get_auth_registration_requires_approval()
_messages.registration_pending = "Registration is still pending approval from Approver (%s) - please wait until confirmation received." % \
settings.get_mail_approver()
_messages.registration_pending_approval = "Thank you for validating your email. Your user account is still pending for approval by the system administator (%s).You will get a notification by email when your account is activated." % \
settings.get_mail_approver()
_messages["approve_user"] = \
"""Your action is required to approve a New User for %(system_name)s:
%(name_format)s
Please go to %(base_url)s/admin/user to approve this user.""" \
% dict(system_name = settings.get_system_name(),
name_format = \
"""%(first_name)s %(last_name)s
%(email)s""",
base_url = s3.base_url)
_messages["new_user"] = \
"""A New User has registered for %(system_name)s:
%(name_format)s
No action is required.""" \
% dict(system_name = settings.get_system_name(),
name_format = \
"""%(first_name)s %(last_name)s
%(email)s""")
# We don't wish to clutter the groups list with 1 per user.
_settings.create_user_groups = False
# We need to allow basic logins for Webservices
_settings.allow_basic_login = True
_settings.logout_onlogout = s3_auth_on_logout
_settings.login_onaccept = s3_auth_on_login
_settings.login_next = settings.get_auth_login_next()
if settings.get_auth_registration_volunteer() and \
settings.has_module("vol"):
_settings.register_next = URL(c="vol", f="person")
# Default Language for authenticated users
_settings.table_user.language.default = settings.get_L10n_default_language()
# Languages available in User Profiles
field = _settings.table_user.language
if len(s3.l10n_languages) > 1:
field.requires = IS_IN_SET(s3.l10n_languages,
zero=None)
else:
field.default = s3.l10n_languages.keys()[0]
field.readable = False
field.writable = False
_settings.lock_keys = True
######
# Mail
######
# These settings could be made configurable as part of the Messaging Module
# - however also need to be used by Auth (order issues), DB calls are overheads
# - as easy for admin to edit source here as to edit DB (although an admin panel can be nice)
mail.settings.server = settings.get_mail_server()
mail.settings.tls = settings.get_mail_server_tls()
mail_server_login = settings.get_mail_server_login()
if mail_server_login:
mail.settings.login = mail_server_login
mail.settings.sender = settings.get_mail_sender()
# Email settings for registration verification
_settings.mailer = mail
#########
# Session
#########
# Custom Notifications
response.error = session.error
response.confirmation = session.confirmation
response.information = session.information
response.warning = session.warning
session.error = []
session.confirmation = []
session.information = []
session.warning = []
# Shortcuts for system role IDs, see modules/s3aaa.py/AuthS3
system_roles = auth.get_system_roles()
ADMIN = system_roles.ADMIN
AUTHENTICATED = system_roles.AUTHENTICATED
ANONYMOUS = system_roles.ANONYMOUS
EDITOR = system_roles.EDITOR
MAP_ADMIN = system_roles.MAP_ADMIN
ORG_ADMIN = system_roles.ORG_ADMIN
if s3.debug:
# Add the developer toolbar from modules/s3/s3utils.py
s3.toolbar = s3base.s3_dev_toolbar
######
# CRUD
######
def s3_formstyle(id, label, widget, comment, hidden=False):
"""
Provide the Sahana Eden Form Style
Label above the Inputs:
http://uxmovement.com/design-articles/faster-with-top-aligned-labels
"""
row = []
if hidden:
_class = "hide"
else:
_class = ""
# Label on the 1st row
row.append(TR(TD(label, _class="w2p_fl"), TD(""), _id=id + "1", _class=_class))
# Widget & Comment on the 2nd Row
row.append(TR(widget, TD(comment, _class="w2p_fc"), _id=id, _class=_class))
return tuple(row)
s3_formstyle_mobile = s3_formstyle
_crud = s3.crud
_crud.formstyle = s3_formstyle
_crud.submit_button = T("Save")
# Optional class for Submit buttons
#_crud.submit_style = "submit-button"
_crud.confirm_delete = T("Do you really want to delete these records?")
_crud.archive_not_delete = settings.get_security_archive_not_delete()
_crud.navigate_away_confirm = settings.get_ui_navigate_away_confirm()
# Web2py Crud
# Breaks refresh of List after Create: http://groups.google.com/group/web2py/browse_thread/thread/d5083ed08c685e34
#crud.settings.keepvalues = True
crud.messages.submit_button = s3.crud.submit_button
crud.settings.formstyle = s3.crud.formstyle
##################
# XML/JSON Formats
##################
s3mgr.crud = s3base.S3CRUD
s3mgr.search = s3base.S3Search
# Content Type Headers, default is application/xml for XML formats
# and text/x-json for JSON formats, other content types must be
# specified here:
s3mgr.content_type = Storage(
tc = "application/atom+xml", # TableCast feeds
rss = "application/rss+xml", # RSS
georss = "application/rss+xml", # GeoRSS
kml = "application/vnd.google-earth.kml+xml", # KML
)
# JSON Formats
s3mgr.json_formats = ["geojson", "s3json"]
# CSV Formats
s3mgr.csv_formats = ["hrf", "s3csv"]
s3mgr.ROWSPERPAGE = 20
#######
# Menus
#######
# Import menus and layouts
from eden.layouts import *
import eden.menus as default_menus
S3MainMenu = default_menus.S3MainMenu
S3OptionsMenu = default_menus.S3OptionsMenu
current.menu = Storage(options=None, override={})
if auth.permission.format in ("html"):
menus = "applications.%s.private.templates.%s.menus" % \
(appname, settings.get_theme())
try:
exec("import %s as deployment_menus" % menus)
except ImportError:
pass
else:
if "S3MainMenu" in deployment_menus.__dict__:
S3MainMenu = deployment_menus.S3MainMenu
if "S3OptionsMenu" in deployment_menus.__dict__:
S3OptionsMenu = deployment_menus.S3OptionsMenu
main = S3MainMenu.menu()
else:
main = None
menu = current.menu
menu["main"] = main
# Override controller menus
# @todo: replace by current.menu.override
s3_menu_dict = {}
##########
# Messages
##########
s3.messages = Messages(T)
system_name = settings.get_system_name_short()
s3.messages.confirmation_email_subject = "%s %s" % (system_name,
T("access granted"))
s3.messages.confirmation_email = "%s %s %s %s. %s." % (T("Welcome to the"),
system_name,
T("Portal at"),
s3.base_url,
T("Thanks for your assistance"))
# Valid Extensions for Image Upload fields
s3.IMAGE_EXTENSIONS = ["png", "PNG", "jpg", "JPG", "jpeg", "JPEG", "gif", "GIF", "tif", "TIF", "tiff", "TIFF", "bmp", "BMP", "raw", "RAW"]
# Default CRUD strings
ADD_RECORD = T("Add Record")
s3.crud_strings = Storage(
title_create = ADD_RECORD,
title_display = T("Record Details"),
title_list = T("Records"),
title_update = T("Edit Record"),
title_search = T("Search Records"),
title_map = T("Map"),
subtitle_create = T("Add New Record"),
label_list_button = T("List Records"),
label_create_button = ADD_RECORD,
label_delete_button = T("Delete Record"),
msg_record_created = T("Record added"),
msg_record_modified = T("Record updated"),
msg_record_deleted = T("Record deleted"),
msg_list_empty = T("No Records currently available"),
msg_match = T("Matching Records"),
msg_no_match = T("No Matching Records"),
name_nice = T("Record"),
name_nice_plural = T("Records"))
# END =========================================================================
| |
# -*- coding: utf-8 -*-
#
# google-cloud-billing-budgets documentation build configuration file
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(".."))
__version__ = "0.1.0"
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "1.6.3"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
]
# autodoc/autosummary flags
autoclass_content = "both"
autodoc_default_flags = ["members"]
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# Allow markdown includes (so releases.md can include CHANGLEOG.md)
# http://www.sphinx-doc.org/en/master/markdown.html
source_parsers = {".md": "recommonmark.parser.CommonMarkParser"}
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = [".rst", ".md"]
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"google-cloud-billing-budgets"
copyright = u"2017, Google"
author = u"Google APIs"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = __version__
# The short X.Y version.
version = ".".join(release.split(".")[0:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"description": "Google Cloud Client Libraries for Python",
"github_user": "googleapis",
"github_repo": "google-cloud-python",
"github_banner": True,
"font_family": "'Roboto', Georgia, sans",
"head_font_family": "'Roboto', Georgia, serif",
"code_font_family": "'Roboto Mono', 'Consolas', monospace",
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "google-cloud-billing-budgets-doc"
# -- Options for warnings ------------------------------------------------------
suppress_warnings = [
# Temporarily suppress this to avoid "more than one target found for
# cross-reference" warning, which are intractable for us to avoid while in
# a mono-repo.
# See https://github.com/sphinx-doc/sphinx/blob
# /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843
"ref.python"
]
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"google-cloud-billing-budgets.tex",
u"google-cloud-billing-budgets Documentation",
author,
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
master_doc,
"google-cloud-billing-budgets",
u"google-cloud-billing-budgets Documentation",
[author],
1,
)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"google-cloud-billing-budgets",
u"google-cloud-billing-budgets Documentation",
author,
"google-cloud-billing-budgets",
"GAPIC library for the {metadata.shortName} v1beta1 service",
"APIs",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ("http://python.readthedocs.org/en/latest/", None),
"gax": ("https://gax-python.readthedocs.org/en/latest/", None),
"google-auth": ("https://google-auth.readthedocs.io/en/stable", None),
"google-gax": ("https://gax-python.readthedocs.io/en/latest/", None),
"google.api_core": ("https://googleapis.dev/python/google-api-core/latest", None),
"grpc": ("https://grpc.io/grpc/python/", None),
"requests": ("https://requests.kennethreitz.org/en/master/", None),
"fastavro": ("https://fastavro.readthedocs.io/en/stable/", None),
"pandas": ("https://pandas.pydata.org/pandas-docs/stable/", None),
}
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
| |
# Copyright (c) 2009 The PyAMF Project.
# See LICENSE.txt for details.
"""
Tests for L{ClassAlias} and L{register_class}. Both are the most
fundamental parts of PyAMF and the test suite for it is big so it makes sense
to have them in one file.
@since: 0.5
"""
import unittest
import pyamf
from pyamf import ClassAlias
from pyamf.tests.util import ClassCacheClearingTestCase, Spam, get_fqcn
try:
set
except NameError:
from sets import Set as set
class ClassAliasTestCase(ClassCacheClearingTestCase):
"""
Test all functionality relating to the class L{ClassAlias}.
"""
def test_init(self):
x = ClassAlias(Spam)
self.assertTrue(x.anonymous)
self.assertTrue(x.dynamic)
self.assertFalse(x.amf3)
self.assertFalse(x.external)
self.assertEquals(x.readonly_attrs, None)
self.assertEquals(x.static_attrs, None)
self.assertEquals(x.exclude_attrs, None)
self.assertEquals(x.proxy_attrs, None)
self.assertEquals(x.alias, '')
self.assertEquals(x.klass, Spam)
# compiled attributes
self.assertEquals(x.decodable_properties, None)
self.assertEquals(x.encodable_properties, None)
self.assertTrue(x._compiled)
def test_init_deferred(self):
"""
Test for initial deferred compliation
"""
x = ClassAlias(Spam, defer=True)
self.assertTrue(x.anonymous)
self.assertEquals(x.dynamic, None)
self.assertFalse(x.amf3)
self.assertFalse(x.external)
self.assertEquals(x.readonly_attrs, None)
self.assertEquals(x.static_attrs, None)
self.assertEquals(x.exclude_attrs, None)
self.assertEquals(x.proxy_attrs, None)
self.assertEquals(x.alias, '')
self.assertEquals(x.klass, Spam)
# compiled attributes
self.assertFalse(hasattr(x, 'static_properties'))
self.assertFalse(x._compiled)
def test_init_kwargs(self):
x = ClassAlias(Spam, alias='foo', static_attrs=('bar',),
exclude_attrs=('baz',), readonly_attrs='gak', amf3='spam',
external='eggs', dynamic='goo', proxy_attrs=('blarg',))
self.assertFalse(x.anonymous)
self.assertEquals(x.dynamic, 'goo')
self.assertEquals(x.amf3, 'spam')
self.assertEquals(x.external, 'eggs')
self.assertEquals(x.readonly_attrs, ['a', 'g', 'k'])
self.assertEquals(x.static_attrs, ['bar'])
self.assertEquals(x.exclude_attrs, ['baz'])
self.assertEquals(x.proxy_attrs, ['blarg'])
self.assertEquals(x.alias, 'foo')
self.assertEquals(x.klass, Spam)
# compiled attributes
self.assertEquals(x.encodable_properties, ['bar'])
self.assertEquals(x.decodable_properties, ['bar'])
self.assertTrue(x._compiled)
def test_bad_class(self):
self.assertRaises(TypeError, ClassAlias, 'eggs', 'blah')
def test_init_args(self):
class ClassicFoo:
def __init__(self, foo, bar):
pass
class NewFoo(object):
def __init__(self, foo, bar):
pass
self.assertRaises(TypeError, ClassAlias, ClassicFoo)
self.assertRaises(TypeError, ClassAlias, NewFoo)
def test_createInstance(self):
x = ClassAlias(Spam, 'org.example.spam.Spam')
y = x.createInstance()
self.assertTrue(isinstance(y, Spam))
def test_str(self):
class Eggs(object):
pass
x = ClassAlias(Eggs, 'org.example.eggs.Eggs')
self.assertEquals(str(x), 'org.example.eggs.Eggs')
def test_eq(self):
class A(object):
pass
class B(object):
pass
x = ClassAlias(A, 'org.example.A')
y = ClassAlias(A, 'org.example.A')
z = ClassAlias(B, 'org.example.B')
self.assertEquals(x, A)
self.assertEquals(x, y)
self.assertNotEquals(x, z)
class GetEncodableAttributesTestCase(unittest.TestCase):
"""
Tests for L{ClassAlias.getEncodableAttributes}
"""
def setUp(self):
self.alias = ClassAlias(Spam, 'foo', defer=True)
self.obj = Spam()
def test_empty(self):
sa, da = self.alias.getEncodableAttributes(self.obj)
self.assertEquals(sa, da, None)
def test_static(self):
self.alias.static_attrs = ['foo', 'bar']
self.alias.compile()
self.obj.foo = 'bar'
# leave self.obj.bar
self.assertFalse(hasattr(self.obj, 'bar'))
sa, da = self.alias.getEncodableAttributes(self.obj)
self.assertEquals(sa, {'foo': 'bar', 'bar': pyamf.Undefined})
self.assertEquals(da, None)
def test_not_dynamic(self):
self.alias.compile()
self.alias.dynamic = False
self.assertEquals(self.alias.getEncodableAttributes(self.obj), (None, None))
def test_dynamic(self):
self.alias.compile()
self.assertEquals(self.alias.encodable_properties, None)
self.obj.foo = 'bar'
self.obj.bar = 'foo'
sa, da = self.alias.getEncodableAttributes(self.obj)
self.assertEquals(sa, None)
self.assertEquals(da, {'foo': 'bar', 'bar': 'foo'})
def test_proxy(self):
from pyamf import flex
self.alias.amf3 = True
self.alias.proxy_attrs = ('foo', 'bar')
self.alias.compile()
self.assertEquals(self.alias.proxy_attrs, ['bar', 'foo'])
self.obj.foo = ['bar', 'baz']
self.obj.bar = {'foo': 'gak'}
sa, da = self.alias.getEncodableAttributes(self.obj)
self.assertEquals(sa, None)
self.assertEquals(da.keys(), ['foo', 'bar'])
self.assertTrue(isinstance(da['foo'], flex.ArrayCollection))
self.assertEquals(da['foo'], ['bar', 'baz'])
self.assertTrue(isinstance(da['bar'], flex.ObjectProxy))
self.assertEquals(da['bar']._amf_object, {'foo': 'gak'})
class GetDecodableAttributesTestCase(unittest.TestCase):
"""
Tests for L{ClassAlias.getDecodableAttributes}
"""
def setUp(self):
self.alias = ClassAlias(Spam, 'foo', defer=True)
self.obj = Spam()
def test_compile(self):
self.assertFalse(self.alias._compiled)
self.alias.applyAttributes(self.obj, {})
self.assertTrue(self.alias._compiled)
def test_missing_static_property(self):
self.alias.static_attrs = ['foo', 'bar']
self.alias.compile()
attrs = {'foo': None} # missing bar key ..
self.assertRaises(AttributeError, self.alias.getDecodableAttributes,
self.obj, attrs)
def test_no_static(self):
self.alias.compile()
attrs = {'foo': None, 'bar': [1, 2, 3]}
ret = self.alias.getDecodableAttributes(self.obj, attrs)
self.assertEquals(attrs, {'foo': None, 'bar': [1, 2, 3]})
def test_readonly(self):
self.alias.compile()
self.alias.readonly_attrs = ['bar']
attrs = {'foo': None, 'bar': [1, 2, 3]}
ret = self.alias.getDecodableAttributes(self.obj, attrs)
self.assertEquals(ret, {'foo': None})
def test_not_dynamic(self):
self.alias.compile()
self.alias.decodable_properties = set(['bar'])
self.alias.dynamic = False
attrs = {'foo': None, 'bar': [1, 2, 3]}
ret = self.alias.getDecodableAttributes(self.obj, attrs)
self.assertEquals(ret, {'bar': [1, 2, 3]})
def test_dynamic(self):
self.alias.compile()
self.alias.static_properties = ['bar']
self.alias.dynamic = True
attrs = {'foo': None, 'bar': [1, 2, 3]}
ret = self.alias.getDecodableAttributes(self.obj, attrs)
self.assertEquals(ret, {'foo': None, 'bar': [1, 2, 3]})
def test_complex(self):
self.alias.compile()
self.alias.static_properties = ['foo', 'bar']
self.alias.exclude_attrs = ['baz', 'gak']
self.alias.readonly_attrs = ['spam', 'eggs']
attrs = {
'foo': 'foo',
'bar': 'bar',
'baz': 'baz',
'gak': 'gak',
'spam': 'spam',
'eggs': 'eggs',
'dyn1': 'dyn1',
'dyn2': 'dyn2'
}
ret = self.alias.getDecodableAttributes(self.obj, attrs)
self.assertEquals(ret, {'foo': 'foo', 'bar': 'bar', 'dyn2': 'dyn2', 'dyn1': 'dyn1'})
def test_complex_not_dynamic(self):
self.alias.compile()
self.alias.decodable_properties = ['foo', 'bar']
self.alias.exclude_attrs = ['baz', 'gak']
self.alias.readonly_attrs = ['spam', 'eggs']
self.alias.dynamic = False
attrs = {
'foo': 'foo',
'bar': 'bar',
'baz': 'baz',
'gak': 'gak',
'spam': 'spam',
'eggs': 'eggs',
'dyn1': 'dyn1',
'dyn2': 'dyn2'
}
ret = self.alias.getDecodableAttributes(self.obj, attrs)
self.assertEquals(ret, {'foo': 'foo', 'bar': 'bar'})
def test_static(self):
self.alias.dynamic = False
self.alias.compile()
self.alias.decodable_properties = set(['foo', 'bar'])
attrs = {
'foo': 'foo',
'bar': 'bar',
'baz': 'baz',
'gak': 'gak',
}
ret = self.alias.getDecodableAttributes(self.obj, attrs)
self.assertEquals(ret, {'foo': 'foo', 'bar': 'bar'})
def test_proxy(self):
from pyamf import flex
self.alias.amf3 = True
self.alias.proxy_attrs = ('foo', 'bar')
self.alias.compile()
self.assertEquals(self.alias.proxy_attrs, ['bar', 'foo'])
attrs = {
'foo': flex.ArrayCollection(['bar', 'baz']),
'bar': flex.ObjectProxy({'foo': 'gak'})
}
ret = self.alias.getDecodableAttributes(self.obj, attrs)
self.assertEquals(ret, {
'foo': ['bar', 'baz'],
'bar': {'foo': 'gak'}
})
class ApplyAttributesTestCase(unittest.TestCase):
"""
Tests for L{ClassAlias.applyAttributes}
"""
def setUp(self):
self.alias = ClassAlias(Spam, 'foo')
self.obj = Spam()
def test_object(self):
class Foo(object):
pass
attrs = {'foo': 'spam', 'bar': 'eggs'}
self.obj = Foo()
self.alias = ClassAlias(Foo, 'foo', defer=True)
self.assertEquals(self.obj.__dict__, {})
self.alias.applyAttributes(self.obj, attrs)
self.assertEquals(self.obj.__dict__, {'foo': 'spam', 'bar': 'eggs'})
def test_classic(self):
class Foo:
pass
attrs = {'foo': 'spam', 'bar': 'eggs'}
self.obj = Foo()
self.alias = ClassAlias(Foo, 'foo', defer=True)
self.assertEquals(self.obj.__dict__, {})
self.alias.applyAttributes(self.obj, attrs)
self.assertEquals(self.obj.__dict__, {'foo': 'spam', 'bar': 'eggs'})
def test_readonly(self):
self.alias.readonly_attrs = ['foo', 'bar']
attrs = {'foo': 'spam', 'bar': 'eggs'}
self.assertEquals(self.obj.__dict__, {})
self.alias.applyAttributes(self.obj, attrs)
self.assertEquals(self.obj.__dict__, {})
def test_exclude(self):
self.alias.exclude_attrs = ['foo', 'bar']
attrs = {'foo': 'spam', 'bar': 'eggs'}
self.assertEquals(self.obj.__dict__, {})
self.alias.applyAttributes(self.obj, attrs)
self.assertEquals(self.obj.__dict__, {})
def test_not_dynamic(self):
self.alias.static_properties = None
self.alias.dynamic = False
attrs = {'foo': 'spam', 'bar': 'eggs'}
self.assertEquals(self.obj.__dict__, {})
self.alias.applyAttributes(self.obj, attrs)
self.assertEquals(self.obj.__dict__, {})
def test_dict(self):
attrs = {'foo': 'spam', 'bar': 'eggs'}
self.obj = {}
self.assertEquals(self.obj, {})
self.alias.applyAttributes(self.obj, attrs)
self.assertEquals(self.obj, {'foo': 'spam', 'bar': 'eggs'})
class SimpleCompliationTestCase(unittest.TestCase):
"""
Tests for L{ClassAlias} property compliation for no inheritance.
"""
def test_compiled(self):
x = ClassAlias(Spam, defer=True)
self.assertFalse(x._compiled)
x._compiled = True
o = x.static_properties = object()
x.compile()
self.assertTrue(o is x.static_properties)
def test_external(self):
class A(object):
pass
class B:
pass
self.assertRaises(AttributeError, ClassAlias, A, external=True)
self.assertRaises(AttributeError, ClassAlias, B, external=True)
A.__readamf__ = None
B.__readamf__ = None
self.assertRaises(AttributeError, ClassAlias, A, external=True)
self.assertRaises(AttributeError, ClassAlias, B, external=True)
A.__readamf__ = lambda x: None
B.__readamf__ = lambda x: None
self.assertRaises(AttributeError, ClassAlias, A, external=True)
self.assertRaises(AttributeError, ClassAlias, B, external=True)
A.__writeamf__ = 'foo'
B.__writeamf__ = 'bar'
self.assertRaises(TypeError, ClassAlias, A, external=True)
self.assertRaises(TypeError, ClassAlias, B, external=True)
A.__writeamf__ = lambda x: None
B.__writeamf__ = lambda x: None
a = ClassAlias(A, external=True)
b = ClassAlias(B, external=True)
self.assertEquals(a.readonly_attrs, None)
self.assertEquals(a.static_attrs, None)
self.assertEquals(a.decodable_properties, None)
self.assertEquals(a.encodable_properties, None)
self.assertEquals(a.exclude_attrs, None)
self.assertTrue(a.anonymous)
self.assertTrue(a.external)
self.assertTrue(a._compiled)
self.assertEquals(a.klass, A)
self.assertEquals(a.alias, '')
# now b
self.assertEquals(b.readonly_attrs, None)
self.assertEquals(b.static_attrs, None)
self.assertEquals(b.decodable_properties, None)
self.assertEquals(b.encodable_properties, None)
self.assertEquals(b.exclude_attrs, None)
self.assertTrue(b.anonymous)
self.assertTrue(b.external)
self.assertTrue(b._compiled)
self.assertEquals(b.klass, B)
self.assertEquals(b.alias, '')
def test_anonymous(self):
x = ClassAlias(Spam, None)
x.compile()
self.assertTrue(x.anonymous)
self.assertTrue(x._compiled)
self.assertEquals(x.klass, Spam)
self.assertEquals(x.alias, '')
def test_exclude(self):
x = ClassAlias(Spam, exclude_attrs=['foo', 'bar'], defer=True)
self.assertEquals(x.exclude_attrs, ['foo', 'bar'])
x.compile()
self.assertEquals(x.exclude_attrs, ['bar', 'foo'])
def test_readonly(self):
x = ClassAlias(Spam, readonly_attrs=['foo', 'bar'], defer=True)
self.assertEquals(x.readonly_attrs, ['foo', 'bar'])
x.compile()
self.assertEquals(x.readonly_attrs, ['bar', 'foo'])
def test_static(self):
x = ClassAlias(Spam, static_attrs=['foo', 'bar'], defer=True)
self.assertEquals(x.static_attrs, ['foo', 'bar'])
x.compile()
self.assertEquals(x.static_attrs, ['bar', 'foo'])
def test_custom_properties(self):
class A(ClassAlias):
def getCustomProperties(self):
self.encodable_properties.update(['foo', 'bar'])
self.decodable_properties.update(['bar', 'foo'])
a = A(Spam)
self.assertEquals(a.encodable_properties, ['bar', 'foo'])
self.assertEquals(a.decodable_properties, ['bar', 'foo'])
# test combined
b = A(Spam, static_attrs=['foo', 'baz', 'gak'])
self.assertEquals(b.encodable_properties, ['bar', 'baz', 'foo', 'gak'])
self.assertEquals(b.decodable_properties, ['bar', 'baz', 'foo', 'gak'])
def test_amf3(self):
x = ClassAlias(Spam, amf3=True)
self.assertTrue(x.amf3)
def test_dynamic(self):
x = ClassAlias(Spam, dynamic=True)
self.assertTrue(x.dynamic)
x = ClassAlias(Spam, dynamic=False)
self.assertFalse(x.dynamic)
x = ClassAlias(Spam)
self.assertTrue(x.dynamic)
class CompilationInheritanceTestCase(ClassCacheClearingTestCase):
"""
"""
def _register(self, alias):
pyamf.CLASS_CACHE[get_fqcn(alias.klass)] = alias
pyamf.CLASS_CACHE[alias.klass] = alias
return alias
def test_exclude_classic(self):
class A:
pass
class B(A):
pass
class C(B):
pass
a = self._register(ClassAlias(A, 'a', exclude_attrs=['foo'], defer=True))
b = self._register(ClassAlias(B, 'b', defer=True))
c = self._register(ClassAlias(C, 'c', exclude_attrs=['bar'], defer=True))
self.assertFalse(a._compiled)
self.assertFalse(b._compiled)
self.assertFalse(c._compiled)
c.compile()
self.assertTrue(a._compiled)
self.assertTrue(b._compiled)
self.assertTrue(c._compiled)
self.assertEquals(a.exclude_attrs, ['foo'])
self.assertEquals(b.exclude_attrs, ['foo'])
self.assertEquals(c.exclude_attrs, ['bar', 'foo'])
def test_exclude_new(self):
class A(object):
pass
class B(A):
pass
class C(B):
pass
a = self._register(ClassAlias(A, 'a', exclude_attrs=['foo'], defer=True))
b = self._register(ClassAlias(B, 'b', defer=True))
c = self._register(ClassAlias(C, 'c', exclude_attrs=['bar'], defer=True))
self.assertFalse(a._compiled)
self.assertFalse(b._compiled)
self.assertFalse(c._compiled)
c.compile()
self.assertTrue(a._compiled)
self.assertTrue(b._compiled)
self.assertTrue(c._compiled)
self.assertEquals(a.exclude_attrs, ['foo'])
self.assertEquals(b.exclude_attrs, ['foo'])
self.assertEquals(c.exclude_attrs, ['bar', 'foo'])
def test_readonly_classic(self):
class A:
pass
class B(A):
pass
class C(B):
pass
a = self._register(ClassAlias(A, 'a', readonly_attrs=['foo'], defer=True))
b = self._register(ClassAlias(B, 'b', defer=True))
c = self._register(ClassAlias(C, 'c', readonly_attrs=['bar'], defer=True))
self.assertFalse(a._compiled)
self.assertFalse(b._compiled)
self.assertFalse(c._compiled)
c.compile()
self.assertTrue(a._compiled)
self.assertTrue(b._compiled)
self.assertTrue(c._compiled)
self.assertEquals(a.readonly_attrs, ['foo'])
self.assertEquals(b.readonly_attrs, ['foo'])
self.assertEquals(c.readonly_attrs, ['bar', 'foo'])
def test_readonly_new(self):
class A(object):
pass
class B(A):
pass
class C(B):
pass
a = self._register(ClassAlias(A, 'a', readonly_attrs=['foo'], defer=True))
b = self._register(ClassAlias(B, 'b', defer=True))
c = self._register(ClassAlias(C, 'c', readonly_attrs=['bar'], defer=True))
self.assertFalse(a._compiled)
self.assertFalse(b._compiled)
self.assertFalse(c._compiled)
c.compile()
self.assertTrue(a._compiled)
self.assertTrue(b._compiled)
self.assertTrue(c._compiled)
self.assertEquals(a.readonly_attrs, ['foo'])
self.assertEquals(b.readonly_attrs, ['foo'])
self.assertEquals(c.readonly_attrs, ['bar', 'foo'])
def test_static_classic(self):
class A:
pass
class B(A):
pass
class C(B):
pass
a = self._register(ClassAlias(A, 'a', static_attrs=['foo'], defer=True))
b = self._register(ClassAlias(B, 'b', defer=True))
c = self._register(ClassAlias(C, 'c', static_attrs=['bar'], defer=True))
self.assertFalse(a._compiled)
self.assertFalse(b._compiled)
self.assertFalse(c._compiled)
c.compile()
self.assertTrue(a._compiled)
self.assertTrue(b._compiled)
self.assertTrue(c._compiled)
self.assertEquals(a.static_attrs, ['foo'])
self.assertEquals(b.static_attrs, ['foo'])
self.assertEquals(c.static_attrs, ['bar', 'foo'])
def test_static_new(self):
class A(object):
pass
class B(A):
pass
class C(B):
pass
a = self._register(ClassAlias(A, 'a', static_attrs=['foo'], defer=True))
b = self._register(ClassAlias(B, 'b', defer=True))
c = self._register(ClassAlias(C, 'c', static_attrs=['bar'], defer=True))
self.assertFalse(a._compiled)
self.assertFalse(b._compiled)
self.assertFalse(c._compiled)
c.compile()
self.assertTrue(a._compiled)
self.assertTrue(b._compiled)
self.assertTrue(c._compiled)
self.assertEquals(a.static_attrs, ['foo'])
self.assertEquals(b.static_attrs, ['foo'])
self.assertEquals(c.static_attrs, ['bar', 'foo'])
def test_amf3(self):
class A:
pass
class B(A):
pass
class C(B):
pass
a = self._register(ClassAlias(A, 'a', amf3=True, defer=True))
b = self._register(ClassAlias(B, 'b', defer=True))
c = self._register(ClassAlias(C, 'c', amf3=False, defer=True))
self.assertFalse(a._compiled)
self.assertFalse(b._compiled)
self.assertFalse(c._compiled)
c.compile()
self.assertTrue(a._compiled)
self.assertTrue(b._compiled)
self.assertTrue(c._compiled)
self.assertTrue(a.amf3)
self.assertTrue(b.amf3)
self.assertFalse(c.amf3)
def test_dynamic(self):
class A:
pass
class B(A):
pass
class C(B):
pass
a = self._register(ClassAlias(A, 'a', dynamic=False, defer=True))
b = self._register(ClassAlias(B, 'b', defer=True))
c = self._register(ClassAlias(C, 'c', dynamic=True, defer=True))
self.assertFalse(a._compiled)
self.assertFalse(b._compiled)
self.assertFalse(c._compiled)
c.compile()
self.assertTrue(a._compiled)
self.assertTrue(b._compiled)
self.assertTrue(c._compiled)
self.assertFalse(a.dynamic)
self.assertFalse(b.dynamic)
self.assertTrue(c.dynamic)
class CompilationIntegrationTestCase(unittest.TestCase):
"""
Integration tests for ClassAlias's
"""
def test_slots_classic(self):
class A:
__slots__ = ('foo', 'bar')
class B(A):
__slots__ = ('gak',)
class C(B):
pass
class D(C, B):
__slots__ = ('spam',)
a = ClassAlias(A)
self.assertFalse(a.dynamic)
self.assertEquals(a.encodable_properties, ['bar', 'foo'])
self.assertEquals(a.decodable_properties, ['bar', 'foo'])
b = ClassAlias(B)
self.assertFalse(b.dynamic)
self.assertEquals(b.encodable_properties, ['bar', 'foo', 'gak'])
self.assertEquals(b.decodable_properties, ['bar', 'foo', 'gak'])
c = ClassAlias(C)
self.assertFalse(c.dynamic)
self.assertEquals(c.encodable_properties, ['bar', 'foo', 'gak'])
self.assertEquals(c.decodable_properties, ['bar', 'foo', 'gak'])
d = ClassAlias(D)
self.assertFalse(d.dynamic)
self.assertEquals(d.encodable_properties, ['bar', 'foo', 'gak', 'spam'])
self.assertEquals(d.decodable_properties, ['bar', 'foo', 'gak', 'spam'])
def test_slots_new(self):
class A(object):
__slots__ = ('foo', 'bar')
class B(A):
__slots__ = ('gak',)
class C(B):
pass
class D(C, B):
__slots__ = ('spam',)
a = ClassAlias(A)
self.assertFalse(a.dynamic)
self.assertEquals(a.encodable_properties, ['bar', 'foo'])
self.assertEquals(a.decodable_properties, ['bar', 'foo'])
b = ClassAlias(B)
self.assertFalse(b.dynamic)
self.assertEquals(b.encodable_properties, ['bar', 'foo', 'gak'])
self.assertEquals(b.decodable_properties, ['bar', 'foo', 'gak'])
c = ClassAlias(C)
self.assertTrue(c.dynamic)
self.assertEquals(c.encodable_properties, ['bar', 'foo', 'gak'])
self.assertEquals(c.decodable_properties, ['bar', 'foo', 'gak'])
d = ClassAlias(D)
self.assertTrue(d.dynamic)
self.assertEquals(d.encodable_properties, ['bar', 'foo', 'gak', 'spam'])
self.assertEquals(d.decodable_properties, ['bar', 'foo', 'gak', 'spam'])
def test_properties(self):
class A:
a_rw = property(lambda _: None, lambda _, x: None)
a_ro = property(lambda _: None)
class B(A):
b_rw = property(lambda _: None, lambda _, x: None)
b_ro = property(lambda _: None)
class C(B):
pass
a = ClassAlias(A)
self.assertTrue(a.dynamic)
self.assertEquals(a.encodable_properties, ['a_ro', 'a_rw'])
self.assertEquals(a.decodable_properties, ['a_rw'])
b = ClassAlias(B)
self.assertTrue(b.dynamic)
self.assertEquals(b.encodable_properties, ['a_ro', 'a_rw', 'b_ro', 'b_rw'])
self.assertEquals(b.decodable_properties, ['a_rw', 'b_rw'])
c = ClassAlias(C)
self.assertTrue(c.dynamic)
self.assertEquals(c.encodable_properties, ['a_ro', 'a_rw', 'b_ro', 'b_rw'])
self.assertEquals(c.decodable_properties, ['a_rw', 'b_rw'])
class RegisterClassTestCase(ClassCacheClearingTestCase):
"""
Tests for L{pyamf.register_class}
"""
def tearDown(self):
ClassCacheClearingTestCase.tearDown(self)
if hasattr(Spam, '__amf__'):
del Spam.__amf__
def test_meta(self):
self.assertFalse('spam.eggs' in pyamf.CLASS_CACHE.keys())
Spam.__amf__ = {
'alias': 'spam.eggs'
}
alias = pyamf.register_class(Spam)
self.assertTrue('spam.eggs' in pyamf.CLASS_CACHE.keys())
self.assertEquals(pyamf.CLASS_CACHE['spam.eggs'], alias)
self.assertTrue(isinstance(alias, pyamf.ClassAlias))
self.assertEquals(alias.klass, Spam)
self.assertEquals(alias.alias, 'spam.eggs')
self.assertFalse(alias._compiled)
def test_kwarg(self):
self.assertFalse('spam.eggs' in pyamf.CLASS_CACHE.keys())
alias = pyamf.register_class(Spam, 'spam.eggs')
self.assertTrue('spam.eggs' in pyamf.CLASS_CACHE.keys())
self.assertEquals(pyamf.CLASS_CACHE['spam.eggs'], alias)
self.assertTrue(isinstance(alias, pyamf.ClassAlias))
self.assertEquals(alias.klass, Spam)
self.assertEquals(alias.alias, 'spam.eggs')
self.assertFalse(alias._compiled)
class UnregisterClassTestCase(ClassCacheClearingTestCase):
"""
Tests for L{pyamf.unregister_class}
"""
def test_alias(self):
self.assertFalse('foo' in pyamf.CLASS_CACHE)
self.assertRaises(pyamf.UnknownClassAlias, pyamf.unregister_class, 'foo')
def test_class(self):
self.assertFalse(Spam in pyamf.CLASS_CACHE)
self.assertRaises(pyamf.UnknownClassAlias, pyamf.unregister_class, Spam)
def test_remove(self):
alias = ClassAlias(Spam, 'foo', defer=True)
pyamf.CLASS_CACHE['foo'] = alias
pyamf.CLASS_CACHE[Spam] = alias
self.assertFalse(alias.anonymous)
ret = pyamf.unregister_class('foo')
self.assertFalse('foo' in pyamf.CLASS_CACHE)
self.assertFalse(Spam in pyamf.CLASS_CACHE)
self.assertTrue(ret is alias)
def test_anonymous(self):
alias = ClassAlias(Spam, defer=True)
pyamf.CLASS_CACHE['foo'] = alias
pyamf.CLASS_CACHE[Spam] = alias
self.assertTrue(alias.anonymous)
ret = pyamf.unregister_class(Spam)
self.assertTrue('foo' in pyamf.CLASS_CACHE)
self.assertFalse(Spam in pyamf.CLASS_CACHE)
self.assertTrue(ret is alias)
def suite():
suite = unittest.TestSuite()
test_cases = [
ClassAliasTestCase,
GetDecodableAttributesTestCase,
GetEncodableAttributesTestCase,
ApplyAttributesTestCase,
SimpleCompliationTestCase,
CompilationIntegrationTestCase,
CompilationInheritanceTestCase,
RegisterClassTestCase,
UnregisterClassTestCase
]
for tc in test_cases:
suite.addTest(unittest.makeSuite(tc))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| |
# -*- coding: utf-8 -*-
"""
pygments.lexers.r
~~~~~~~~~~~~~~~~~
Lexers for the R/S languages.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer, RegexLexer, include, words, do_insertions
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Generic
__all__ = ['RConsoleLexer', 'SLexer', 'RdLexer']
line_re = re.compile('.*?\n')
class RConsoleLexer(Lexer):
"""
For R console transcripts or R CMD BATCH output files.
"""
name = 'RConsole'
aliases = ['rconsole', 'rout']
filenames = ['*.Rout']
def get_tokens_unprocessed(self, text):
slexer = SLexer(**self.options)
current_code_block = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
if line.startswith('>') or line.startswith('+'):
# Colorize the prompt as such,
# then put rest of line into current_code_block
insertions.append((len(current_code_block),
[(0, Generic.Prompt, line[:2])]))
current_code_block += line[2:]
else:
# We have reached a non-prompt line!
# If we have stored prompt lines, need to process them first.
if current_code_block:
# Weave together the prompts and highlight code.
for item in do_insertions(
insertions, slexer.get_tokens_unprocessed(current_code_block)):
yield item
# Reset vars for next code block.
current_code_block = ''
insertions = []
# Now process the actual line itself, this is output from R.
yield match.start(), Generic.Output, line
# If we happen to end on a code block with nothing after it, need to
# process the last code block. This is neither elegant nor DRY so
# should be changed.
if current_code_block:
for item in do_insertions(
insertions, slexer.get_tokens_unprocessed(current_code_block)):
yield item
class SLexer(RegexLexer):
"""
For S, S-plus, and R source code.
.. versionadded:: 0.10
"""
name = 'S'
aliases = ['splus', 's', 'r']
filenames = ['*.S', '*.R', '.Rhistory', '.Rprofile', '.Renviron']
mimetypes = ['text/S-plus', 'text/S', 'text/x-r-source', 'text/x-r',
'text/x-R', 'text/x-r-history', 'text/x-r-profile']
builtins_base = (
'Arg', 'Conj', 'Cstack_info', 'Encoding', 'FALSE',
'Filter', 'Find', 'I', 'ISOdate', 'ISOdatetime', 'Im', 'Inf',
'La.svd', 'Map', 'Math.Date', 'Math.POSIXt', 'Math.data.frame',
'Math.difftime', 'Math.factor', 'Mod', 'NA_character_',
'NA_complex_', 'NA_real_', 'NCOL', 'NROW', 'NULLNA_integer_', 'NaN',
'Negate', 'NextMethod', 'Ops.Date', 'Ops.POSIXt', 'Ops.data.frame',
'Ops.difftime', 'Ops.factor', 'Ops.numeric_version', 'Ops.ordered',
'Position', 'R.Version', 'R.home', 'R.version', 'R.version.string',
'RNGkind', 'RNGversion', 'R_system_version', 'Re', 'Recall',
'Reduce', 'Summary.Date', 'Summary.POSIXct', 'Summary.POSIXlt',
'Summary.data.frame', 'Summary.difftime', 'Summary.factor',
'Summary.numeric_version', 'Summary.ordered', 'Sys.Date',
'Sys.chmod', 'Sys.getenv', 'Sys.getlocale', 'Sys.getpid',
'Sys.glob', 'Sys.info', 'Sys.localeconv', 'Sys.readlink',
'Sys.setFileTime', 'Sys.setenv', 'Sys.setlocale', 'Sys.sleep',
'Sys.time', 'Sys.timezone', 'Sys.umask', 'Sys.unsetenv',
'Sys.which', 'TRUE', 'UseMethod', 'Vectorize', 'abbreviate', 'abs',
'acos', 'acosh', 'addNA', 'addTaskCallback', 'agrep', 'alist',
'all', 'all.equal', 'all.equal.POSIXct', 'all.equal.character',
'all.equal.default', 'all.equal.factor', 'all.equal.formula',
'all.equal.language', 'all.equal.list', 'all.equal.numeric',
'all.equal.raw', 'all.names', 'all.vars', 'any', 'anyDuplicated',
'anyDuplicated.array', 'anyDuplicated.data.frame',
'anyDuplicated.default', 'anyDuplicated.matrix', 'aperm',
'aperm.default', 'aperm.table', 'append', 'apply', 'args',
'arrayInd', 'as.Date', 'as.Date.POSIXct', 'as.Date.POSIXlt',
'as.Date.character', 'as.Date.date', 'as.Date.dates',
'as.Date.default', 'as.Date.factor', 'as.Date.numeric',
'as.POSIXct', 'as.POSIXct.Date', 'as.POSIXct.POSIXlt',
'as.POSIXct.date', 'as.POSIXct.dates', 'as.POSIXct.default',
'as.POSIXct.numeric', 'as.POSIXlt', 'as.POSIXlt.Date',
'as.POSIXlt.POSIXct', 'as.POSIXlt.character', 'as.POSIXlt.date',
'as.POSIXlt.dates', 'as.POSIXlt.default', 'as.POSIXlt.factor',
'as.POSIXlt.numeric', 'as.array', 'as.array.default', 'as.call',
'as.character', 'as.character.Date', 'as.character.POSIXt',
'as.character.condition', 'as.character.default',
'as.character.error', 'as.character.factor', 'as.character.hexmode',
'as.character.numeric_version', 'as.character.octmode',
'as.character.srcref', 'as.complex', 'as.data.frame',
'as.data.frame.AsIs', 'as.data.frame.Date', 'as.data.frame.POSIXct',
'as.data.frame.POSIXlt', 'as.data.frame.array',
'as.data.frame.character', 'as.data.frame.complex',
'as.data.frame.data.frame', 'as.data.frame.default',
'as.data.frame.difftime', 'as.data.frame.factor',
'as.data.frame.integer', 'as.data.frame.list',
'as.data.frame.logical', 'as.data.frame.matrix',
'as.data.frame.model.matrix', 'as.data.frame.numeric',
'as.data.frame.numeric_version', 'as.data.frame.ordered',
'as.data.frame.raw', 'as.data.frame.table', 'as.data.frame.ts',
'as.data.frame.vector', 'as.difftime', 'as.double',
'as.double.POSIXlt', 'as.double.difftime', 'as.environment',
'as.expression', 'as.expression.default', 'as.factor',
'as.function', 'as.function.default', 'as.hexmode', 'as.integer',
'as.list', 'as.list.Date', 'as.list.POSIXct', 'as.list.data.frame',
'as.list.default', 'as.list.environment', 'as.list.factor',
'as.list.function', 'as.list.numeric_version', 'as.logical',
'as.logical.factor', 'as.matrix', 'as.matrix.POSIXlt',
'as.matrix.data.frame', 'as.matrix.default', 'as.matrix.noquote',
'as.name', 'as.null', 'as.null.default', 'as.numeric',
'as.numeric_version', 'as.octmode', 'as.ordered',
'as.package_version', 'as.pairlist', 'as.qr', 'as.raw', 'as.single',
'as.single.default', 'as.symbol', 'as.table', 'as.table.default',
'as.vector', 'as.vector.factor', 'asNamespace', 'asS3', 'asS4',
'asin', 'asinh', 'assign', 'atan', 'atan2', 'atanh',
'attachNamespace', 'attr', 'attr.all.equal', 'attributes',
'autoload', 'autoloader', 'backsolve', 'baseenv', 'basename',
'besselI', 'besselJ', 'besselK', 'besselY', 'beta',
'bindingIsActive', 'bindingIsLocked', 'bindtextdomain', 'bitwAnd',
'bitwNot', 'bitwOr', 'bitwShiftL', 'bitwShiftR', 'bitwXor', 'body',
'bquote', 'browser', 'browserCondition', 'browserSetDebug',
'browserText', 'builtins', 'by', 'by.data.frame', 'by.default',
'bzfile', 'c.Date', 'c.POSIXct', 'c.POSIXlt', 'c.noquote',
'c.numeric_version', 'call', 'callCC', 'capabilities', 'casefold',
'cat', 'category', 'cbind', 'cbind.data.frame', 'ceiling',
'char.expand', 'charToRaw', 'charmatch', 'chartr', 'check_tzones',
'chol', 'chol.default', 'chol2inv', 'choose', 'class',
'clearPushBack', 'close', 'close.connection', 'close.srcfile',
'close.srcfilealias', 'closeAllConnections', 'col', 'colMeans',
'colSums', 'colnames', 'commandArgs', 'comment', 'computeRestarts',
'conditionCall', 'conditionCall.condition', 'conditionMessage',
'conditionMessage.condition', 'conflicts', 'contributors', 'cos',
'cosh', 'crossprod', 'cummax', 'cummin', 'cumprod', 'cumsum', 'cut',
'cut.Date', 'cut.POSIXt', 'cut.default', 'dQuote', 'data.class',
'data.matrix', 'date', 'debug', 'debugonce',
'default.stringsAsFactors', 'delayedAssign', 'deparse', 'det',
'determinant', 'determinant.matrix', 'dget', 'diag', 'diff',
'diff.Date', 'diff.POSIXt', 'diff.default', 'difftime', 'digamma',
'dim', 'dim.data.frame', 'dimnames', 'dimnames.data.frame', 'dir',
'dir.create', 'dirname', 'do.call', 'dput', 'drop', 'droplevels',
'droplevels.data.frame', 'droplevels.factor', 'dump', 'duplicated',
'duplicated.POSIXlt', 'duplicated.array', 'duplicated.data.frame',
'duplicated.default', 'duplicated.matrix',
'duplicated.numeric_version', 'dyn.load', 'dyn.unload', 'eapply',
'eigen', 'else', 'emptyenv', 'enc2native', 'enc2utf8',
'encodeString', 'enquote', 'env.profile', 'environment',
'environmentIsLocked', 'environmentName', 'eval', 'eval.parent',
'evalq', 'exists', 'exp', 'expand.grid', 'expm1', 'expression',
'factor', 'factorial', 'fifo', 'file', 'file.access', 'file.append',
'file.choose', 'file.copy', 'file.create', 'file.exists',
'file.info', 'file.link', 'file.path', 'file.remove', 'file.rename',
'file.show', 'file.symlink', 'find.package', 'findInterval',
'findPackageEnv', 'findRestart', 'floor', 'flush',
'flush.connection', 'force', 'formals', 'format',
'format.AsIs', 'format.Date', 'format.POSIXct', 'format.POSIXlt',
'format.data.frame', 'format.default', 'format.difftime',
'format.factor', 'format.hexmode', 'format.info',
'format.libraryIQR', 'format.numeric_version', 'format.octmode',
'format.packageInfo', 'format.pval', 'format.summaryDefault',
'formatC', 'formatDL', 'forwardsolve', 'gamma', 'gc', 'gc.time',
'gcinfo', 'gctorture', 'gctorture2', 'get', 'getAllConnections',
'getCallingDLL', 'getCallingDLLe', 'getConnection',
'getDLLRegisteredRoutines', 'getDLLRegisteredRoutines.DLLInfo',
'getDLLRegisteredRoutines.character', 'getElement',
'getExportedValue', 'getHook', 'getLoadedDLLs', 'getNamespace',
'getNamespaceExports', 'getNamespaceImports', 'getNamespaceInfo',
'getNamespaceName', 'getNamespaceUsers', 'getNamespaceVersion',
'getNativeSymbolInfo', 'getOption', 'getRversion', 'getSrcLines',
'getTaskCallbackNames', 'geterrmessage', 'gettext', 'gettextf',
'getwd', 'gl', 'globalenv', 'gregexpr', 'grep', 'grepRaw', 'grepl',
'gsub', 'gzcon', 'gzfile', 'head', 'iconv', 'iconvlist',
'icuSetCollate', 'identical', 'identity', 'ifelse', 'importIntoEnv',
'in', 'inherits', 'intToBits', 'intToUtf8', 'interaction', 'interactive',
'intersect', 'inverse.rle', 'invisible', 'invokeRestart',
'invokeRestartInteractively', 'is.R', 'is.array', 'is.atomic',
'is.call', 'is.character', 'is.complex', 'is.data.frame',
'is.double', 'is.element', 'is.environment', 'is.expression',
'is.factor', 'is.finite', 'is.function', 'is.infinite',
'is.integer', 'is.language', 'is.list', 'is.loaded', 'is.logical',
'is.matrix', 'is.na', 'is.na.POSIXlt', 'is.na.data.frame',
'is.na.numeric_version', 'is.name', 'is.nan', 'is.null',
'is.numeric', 'is.numeric.Date', 'is.numeric.POSIXt',
'is.numeric.difftime', 'is.numeric_version', 'is.object',
'is.ordered', 'is.package_version', 'is.pairlist', 'is.primitive',
'is.qr', 'is.raw', 'is.recursive', 'is.single', 'is.symbol',
'is.table', 'is.unsorted', 'is.vector', 'isBaseNamespace',
'isIncomplete', 'isNamespace', 'isOpen', 'isRestart', 'isS4',
'isSeekable', 'isSymmetric', 'isSymmetric.matrix', 'isTRUE',
'isatty', 'isdebugged', 'jitter', 'julian', 'julian.Date',
'julian.POSIXt', 'kappa', 'kappa.default', 'kappa.lm', 'kappa.qr',
'kronecker', 'l10n_info', 'labels', 'labels.default', 'lapply',
'lazyLoad', 'lazyLoadDBexec', 'lazyLoadDBfetch', 'lbeta', 'lchoose',
'length', 'length.POSIXlt', 'letters', 'levels', 'levels.default',
'lfactorial', 'lgamma', 'library.dynam', 'library.dynam.unload',
'licence', 'license', 'list.dirs', 'list.files', 'list2env', 'load',
'loadNamespace', 'loadedNamespaces', 'loadingNamespaceInfo',
'local', 'lockBinding', 'lockEnvironment', 'log', 'log10', 'log1p',
'log2', 'logb', 'lower.tri', 'ls', 'make.names', 'make.unique',
'makeActiveBinding', 'mapply', 'margin.table', 'mat.or.vec',
'match', 'match.arg', 'match.call', 'match.fun', 'max', 'max.col',
'mean', 'mean.Date', 'mean.POSIXct', 'mean.POSIXlt', 'mean.default',
'mean.difftime', 'mem.limits', 'memCompress', 'memDecompress',
'memory.profile', 'merge', 'merge.data.frame', 'merge.default',
'message', 'mget', 'min', 'missing', 'mode', 'month.abb',
'month.name', 'months', 'months.Date', 'months.POSIXt',
'months.abb', 'months.nameletters', 'names', 'names.POSIXlt',
'namespaceExport', 'namespaceImport', 'namespaceImportClasses',
'namespaceImportFrom', 'namespaceImportMethods', 'nargs', 'nchar',
'ncol', 'new.env', 'ngettext', 'nlevels', 'noquote', 'norm',
'normalizePath', 'nrow', 'numeric_version', 'nzchar', 'objects',
'oldClass', 'on.exit', 'open', 'open.connection', 'open.srcfile',
'open.srcfilealias', 'open.srcfilecopy', 'options', 'order',
'ordered', 'outer', 'packBits', 'packageEvent',
'packageHasNamespace', 'packageStartupMessage', 'package_version',
'pairlist', 'parent.env', 'parent.frame', 'parse',
'parseNamespaceFile', 'paste', 'paste0', 'path.expand',
'path.package', 'pipe', 'pmatch', 'pmax', 'pmax.int', 'pmin',
'pmin.int', 'polyroot', 'pos.to.env', 'pretty', 'pretty.default',
'prettyNum', 'print', 'print.AsIs', 'print.DLLInfo',
'print.DLLInfoList', 'print.DLLRegisteredRoutines', 'print.Date',
'print.NativeRoutineList', 'print.POSIXct', 'print.POSIXlt',
'print.by', 'print.condition', 'print.connection',
'print.data.frame', 'print.default', 'print.difftime',
'print.factor', 'print.function', 'print.hexmode',
'print.libraryIQR', 'print.listof', 'print.noquote',
'print.numeric_version', 'print.octmode', 'print.packageInfo',
'print.proc_time', 'print.restart', 'print.rle',
'print.simple.list', 'print.srcfile', 'print.srcref',
'print.summary.table', 'print.summaryDefault', 'print.table',
'print.warnings', 'prmatrix', 'proc.time', 'prod', 'prop.table',
'provideDimnames', 'psigamma', 'pushBack', 'pushBackLength', 'q',
'qr', 'qr.Q', 'qr.R', 'qr.X', 'qr.coef', 'qr.default', 'qr.fitted',
'qr.qty', 'qr.qy', 'qr.resid', 'qr.solve', 'quarters',
'quarters.Date', 'quarters.POSIXt', 'quit', 'quote', 'range',
'range.default', 'rank', 'rapply', 'raw', 'rawConnection',
'rawConnectionValue', 'rawShift', 'rawToBits', 'rawToChar', 'rbind',
'rbind.data.frame', 'rcond', 'read.dcf', 'readBin', 'readChar',
'readLines', 'readRDS', 'readRenviron', 'readline', 'reg.finalizer',
'regexec', 'regexpr', 'registerS3method', 'registerS3methods',
'regmatches', 'remove', 'removeTaskCallback', 'rep', 'rep.Date',
'rep.POSIXct', 'rep.POSIXlt', 'rep.factor', 'rep.int',
'rep.numeric_version', 'rep_len', 'replace', 'replicate',
'requireNamespace', 'restartDescription', 'restartFormals',
'retracemem', 'rev', 'rev.default', 'rle', 'rm', 'round',
'round.Date', 'round.POSIXt', 'row', 'row.names',
'row.names.data.frame', 'row.names.default', 'rowMeans', 'rowSums',
'rownames', 'rowsum', 'rowsum.data.frame', 'rowsum.default',
'sQuote', 'sample', 'sample.int', 'sapply', 'save', 'save.image',
'saveRDS', 'scale', 'scale.default', 'scan', 'search',
'searchpaths', 'seek', 'seek.connection', 'seq', 'seq.Date',
'seq.POSIXt', 'seq.default', 'seq.int', 'seq_along', 'seq_len',
'sequence', 'serialize', 'set.seed', 'setHook', 'setNamespaceInfo',
'setSessionTimeLimit', 'setTimeLimit', 'setdiff', 'setequal',
'setwd', 'shQuote', 'showConnections', 'sign', 'signalCondition',
'signif', 'simpleCondition', 'simpleError', 'simpleMessage',
'simpleWarning', 'simplify2array', 'sin', 'single',
'sinh', 'sink', 'sink.number', 'slice.index', 'socketConnection',
'socketSelect', 'solve', 'solve.default', 'solve.qr', 'sort',
'sort.POSIXlt', 'sort.default', 'sort.int', 'sort.list', 'split',
'split.Date', 'split.POSIXct', 'split.data.frame', 'split.default',
'sprintf', 'sqrt', 'srcfile', 'srcfilealias', 'srcfilecopy',
'srcref', 'standardGeneric', 'stderr', 'stdin', 'stdout', 'stop',
'stopifnot', 'storage.mode', 'strftime', 'strptime', 'strsplit',
'strtoi', 'strtrim', 'structure', 'strwrap', 'sub', 'subset',
'subset.data.frame', 'subset.default', 'subset.matrix',
'substitute', 'substr', 'substring', 'sum', 'summary',
'summary.Date', 'summary.POSIXct', 'summary.POSIXlt',
'summary.connection', 'summary.data.frame', 'summary.default',
'summary.factor', 'summary.matrix', 'summary.proc_time',
'summary.srcfile', 'summary.srcref', 'summary.table',
'suppressMessages', 'suppressPackageStartupMessages',
'suppressWarnings', 'svd', 'sweep', 'sys.call', 'sys.calls',
'sys.frame', 'sys.frames', 'sys.function', 'sys.load.image',
'sys.nframe', 'sys.on.exit', 'sys.parent', 'sys.parents',
'sys.save.image', 'sys.source', 'sys.status', 'system',
'system.file', 'system.time', 'system2', 't', 't.data.frame',
't.default', 'table', 'tabulate', 'tail', 'tan', 'tanh', 'tapply',
'taskCallbackManager', 'tcrossprod', 'tempdir', 'tempfile',
'testPlatformEquivalence', 'textConnection', 'textConnectionValue',
'toString', 'toString.default', 'tolower', 'topenv', 'toupper',
'trace', 'traceback', 'tracemem', 'tracingState', 'transform',
'transform.data.frame', 'transform.default', 'trigamma', 'trunc',
'trunc.Date', 'trunc.POSIXt', 'truncate', 'truncate.connection',
'try', 'tryCatch', 'typeof', 'unclass', 'undebug', 'union',
'unique', 'unique.POSIXlt', 'unique.array', 'unique.data.frame',
'unique.default', 'unique.matrix', 'unique.numeric_version',
'units', 'units.difftime', 'unix.time', 'unlink', 'unlist',
'unloadNamespace', 'unlockBinding', 'unname', 'unserialize',
'unsplit', 'untrace', 'untracemem', 'unz', 'upper.tri', 'url',
'utf8ToInt', 'vapply', 'version', 'warning', 'warnings', 'weekdays',
'weekdays.Date', 'weekdays.POSIXt', 'which', 'which.max',
'which.min', 'with', 'with.default', 'withCallingHandlers',
'withRestarts', 'withVisible', 'within', 'within.data.frame',
'within.list', 'write', 'write.dcf', 'writeBin', 'writeChar',
'writeLines', 'xor', 'xor.hexmode', 'xor.octmode',
'xpdrows.data.frame', 'xtfrm', 'xtfrm.AsIs', 'xtfrm.Date',
'xtfrm.POSIXct', 'xtfrm.POSIXlt', 'xtfrm.Surv', 'xtfrm.default',
'xtfrm.difftime', 'xtfrm.factor', 'xtfrm.numeric_version', 'xzfile',
'zapsmall'
)
tokens = {
'comments': [
(r'#.*$', Comment.Single),
],
'valid_name': [
(r'[a-zA-Z][\w.]*', Text),
# can begin with ., but not if that is followed by a digit
(r'\.[a-zA-Z_][\w.]*', Text),
],
'punctuation': [
(r'\[{1,2}|\]{1,2}|\(|\)|;|,', Punctuation),
],
'keywords': [
(words(builtins_base, suffix=r'(?![\w. =])'),
Keyword.Pseudo),
(r'(if|else|for|while|repeat|in|next|break|return|switch|function)'
r'(?![\w.])',
Keyword.Reserved),
(r'(array|category|character|complex|double|function|integer|list|'
r'logical|matrix|numeric|vector|data.frame|c)'
r'(?![\w.])',
Keyword.Type),
(r'(library|require|attach|detach|source)'
r'(?![\w.])',
Keyword.Namespace)
],
'operators': [
(r'<<?-|->>?|-|==|<=|>=|<|>|&&?|!=|\|\|?|\?', Operator),
(r'\*|\+|\^|/|!|%[^%]*%|=|~|\$|@|:{1,3}', Operator)
],
'builtin_symbols': [
(r'(NULL|NA(_(integer|real|complex|character)_)?|'
r'letters|LETTERS|Inf|TRUE|FALSE|NaN|pi|\.\.(\.|[0-9]+))'
r'(?![\w.])',
Keyword.Constant),
(r'(T|F)\b', Name.Builtin.Pseudo),
],
'numbers': [
# hex number
(r'0[xX][a-fA-F0-9]+([pP][0-9]+)?[Li]?', Number.Hex),
# decimal number
(r'[+-]?([0-9]+(\.[0-9]+)?|\.[0-9]+|\.)([eE][+-]?[0-9]+)?[Li]?',
Number),
],
'statements': [
include('comments'),
# whitespaces
(r'\s+', Text),
(r'`.*?`', String.Backtick),
(r'\'', String, 'string_squote'),
(r'\"', String, 'string_dquote'),
include('builtin_symbols'),
include('numbers'),
include('keywords'),
include('punctuation'),
include('operators'),
include('valid_name'),
],
'root': [
include('statements'),
# blocks:
(r'\{|\}', Punctuation),
# (r'\{', Punctuation, 'block'),
(r'.', Text),
],
# 'block': [
# include('statements'),
# ('\{', Punctuation, '#push'),
# ('\}', Punctuation, '#pop')
# ],
'string_squote': [
(r'([^\'\\]|\\.)*\'', String, '#pop'),
],
'string_dquote': [
(r'([^"\\]|\\.)*"', String, '#pop'),
],
}
def analyse_text(text):
if re.search(r'[a-z0-9_\])\s]<-(?!-)', text):
return 0.11
class RdLexer(RegexLexer):
"""
Pygments Lexer for R documentation (Rd) files
This is a very minimal implementation, highlighting little more
than the macros. A description of Rd syntax is found in `Writing R
Extensions <http://cran.r-project.org/doc/manuals/R-exts.html>`_
and `Parsing Rd files <developer.r-project.org/parseRd.pdf>`_.
.. versionadded:: 1.6
"""
name = 'Rd'
aliases = ['rd']
filenames = ['*.Rd']
mimetypes = ['text/x-r-doc']
# To account for verbatim / LaTeX-like / and R-like areas
# would require parsing.
tokens = {
'root': [
# catch escaped brackets and percent sign
(r'\\[\\{}%]', String.Escape),
# comments
(r'%.*$', Comment),
# special macros with no arguments
(r'\\(?:cr|l?dots|R|tab)\b', Keyword.Constant),
# macros
(r'\\[a-zA-Z]+\b', Keyword),
# special preprocessor macros
(r'^\s*#(?:ifn?def|endif).*\b', Comment.Preproc),
# non-escaped brackets
(r'[{}]', Name.Builtin),
# everything else
(r'[^\\%\n{}]+', Text),
(r'.', Text),
]
}
| |
#
# Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import itertools
import logging
from osc_lib.command import command
from osc_lib import utils as oscutils
from ironicclient.common.i18n import _, _LW
from ironicclient.common import utils
from ironicclient import exc
from ironicclient.v1 import resource_fields as res_fields
class CreateBaremetalPortGroup(command.ShowOne):
"""Create a new baremetal port group."""
log = logging.getLogger(__name__ + ".CreateBaremetalPortGroup")
def get_parser(self, prog_name):
parser = super(CreateBaremetalPortGroup, self).get_parser(prog_name)
parser.add_argument(
'--node',
dest='node_uuid',
metavar='<uuid>',
required=True,
help='UUID of the node that this port group belongs to.')
parser.add_argument(
'--address',
metavar='<mac-address>',
help='MAC address for this port group.')
parser.add_argument(
'--name',
dest='name',
help='Name of the port group.')
parser.add_argument(
'--uuid',
dest='uuid',
help='UUID of the port group.')
parser.add_argument(
'--extra',
metavar="<key=value>",
action='append',
help="Record arbitrary key/value metadata. "
"Can be specified multiple times.")
parser.add_argument(
'--mode',
help='Mode of the port group. For possible values, refer to '
'https://www.kernel.org/doc/Documentation/networking'
'/bonding.txt.')
parser.add_argument(
'--property',
dest='properties',
metavar="<key=value>",
action='append',
help="Key/value property related to this port group's "
"configuration. Can be specified multiple times."
)
standalone_ports_group = parser.add_mutually_exclusive_group()
standalone_ports_group.add_argument(
'--support-standalone-ports',
action='store_true',
help="Ports that are members of this port group "
"can be used as stand-alone ports. (default)"
)
standalone_ports_group.add_argument(
'--unsupport-standalone-ports',
action='store_true',
help="Ports that are members of this port group "
"cannot be used as stand-alone ports."
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)" % parsed_args)
baremetal_client = self.app.client_manager.baremetal
field_list = ['node_uuid', 'address', 'name', 'uuid', 'extra', 'mode',
'properties']
fields = dict((k, v) for (k, v) in vars(parsed_args).items()
if k in field_list and v is not None)
if parsed_args.support_standalone_ports:
fields['standalone_ports_supported'] = True
if parsed_args.unsupport_standalone_ports:
fields['standalone_ports_supported'] = False
fields = utils.args_array_to_dict(fields, 'extra')
fields = utils.args_array_to_dict(fields, 'properties')
portgroup = baremetal_client.portgroup.create(**fields)
data = dict([(f, getattr(portgroup, f, '')) for f in
res_fields.PORTGROUP_DETAILED_RESOURCE.fields])
return self.dict2columns(data)
class ShowBaremetalPortGroup(command.ShowOne):
"""Show baremetal port group details."""
log = logging.getLogger(__name__ + ".ShowBaremetalPortGroup")
def get_parser(self, prog_name):
parser = super(ShowBaremetalPortGroup, self).get_parser(prog_name)
parser.add_argument(
"portgroup",
metavar="<id>",
help="UUID or name of the port group "
"(or MAC address if --address is specified)."
)
parser.add_argument(
'--address',
dest='address',
action='store_true',
default=False,
help='<id> is the MAC address (instead of UUID or name) '
'of the port group.')
parser.add_argument(
'--fields',
nargs='+',
dest='fields',
metavar='<field>',
action='append',
choices=res_fields.PORTGROUP_DETAILED_RESOURCE.fields,
default=[],
help="One or more port group fields. Only these fields will be "
"fetched from the server.")
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
baremetal_client = self.app.client_manager.baremetal
fields = list(itertools.chain.from_iterable(parsed_args.fields))
fields = fields if fields else None
if parsed_args.address:
portgroup = baremetal_client.portgroup.get_by_address(
parsed_args.portgroup, fields=fields)._info
else:
portgroup = baremetal_client.portgroup.get(
parsed_args.portgroup, fields=fields)._info
portgroup.pop("links", None)
portgroup.pop("ports", None)
return zip(*sorted(portgroup.items()))
class ListBaremetalPortGroup(command.Lister):
"""List baremetal port groups."""
log = logging.getLogger(__name__ + ".ListBaremetalPortGroup")
def get_parser(self, prog_name):
parser = super(ListBaremetalPortGroup, self).get_parser(prog_name)
parser.add_argument(
'--limit',
metavar='<limit>',
type=int,
help='Maximum number of port groups to return per request, '
'0 for no limit. Default is the maximum number used '
'by the Baremetal API Service.'
)
parser.add_argument(
'--marker',
metavar='<port group>',
help='Port group UUID (for example, of the last port group in the '
'list from a previous request). Returns the list of '
'port groups after this UUID.'
)
parser.add_argument(
'--sort',
metavar="<key>[:<direction>]",
help='Sort output by specified port group fields and directions '
'(asc or desc) (default: asc). Multiple fields and '
'directions can be specified, separated by comma.',
)
parser.add_argument(
'--address',
metavar='<mac-address>',
help="Only show information for the port group with this MAC "
"address.",
)
parser.add_argument(
'--node',
dest='node',
metavar='<node>',
help="Only list port groups of this node (name or UUID)."
)
display_group = parser.add_mutually_exclusive_group(required=False)
display_group.add_argument(
'--long',
default=False,
dest='detail',
help="Show detailed information about the port groups.",
action='store_true')
display_group.add_argument(
'--fields',
nargs='+',
dest='fields',
metavar='<field>',
action='append',
default=[],
choices=res_fields.PORTGROUP_DETAILED_RESOURCE.fields,
help="One or more port group fields. Only these fields will be "
"fetched from the server. Can not be used when '--long' is "
"specified.")
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)" % parsed_args)
client = self.app.client_manager.baremetal
columns = res_fields.PORTGROUP_RESOURCE.fields
labels = res_fields.PORTGROUP_RESOURCE.labels
params = {}
if parsed_args.limit is not None and parsed_args.limit < 0:
raise exc.CommandError(
_('Expected non-negative --limit, got %s') %
parsed_args.limit)
params['limit'] = parsed_args.limit
params['marker'] = parsed_args.marker
if parsed_args.address is not None:
params['address'] = parsed_args.address
if parsed_args.node is not None:
params['node'] = parsed_args.node
if parsed_args.detail:
params['detail'] = parsed_args.detail
columns = res_fields.PORTGROUP_DETAILED_RESOURCE.fields
labels = res_fields.PORTGROUP_DETAILED_RESOURCE.labels
elif parsed_args.fields:
params['detail'] = False
fields = itertools.chain.from_iterable(parsed_args.fields)
resource = res_fields.Resource(list(fields))
columns = resource.fields
labels = resource.labels
params['fields'] = columns
self.log.debug("params(%s)" % params)
data = client.portgroup.list(**params)
data = oscutils.sort_items(data, parsed_args.sort)
return (labels,
(oscutils.get_item_properties(s, columns, formatters={
'Properties': oscutils.format_dict},) for s in data))
class DeleteBaremetalPortGroup(command.Command):
"""Unregister baremetal port group(s)."""
log = logging.getLogger(__name__ + ".DeleteBaremetalPortGroup")
def get_parser(self, prog_name):
parser = super(DeleteBaremetalPortGroup, self).get_parser(prog_name)
parser.add_argument(
"portgroups",
metavar="<port group>",
nargs="+",
help="Port group(s) to delete (name or UUID).")
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
baremetal_client = self.app.client_manager.baremetal
failures = []
for portgroup in parsed_args.portgroups:
try:
baremetal_client.portgroup.delete(portgroup)
print(_('Deleted port group %s') % portgroup)
except exc.ClientException as e:
failures.append(_("Failed to delete port group %(portgroup)s: "
" %(error)s")
% {'portgroup': portgroup, 'error': e})
if failures:
raise exc.ClientException("\n".join(failures))
class SetBaremetalPortGroup(command.Command):
"""Set baremetal port group properties."""
log = logging.getLogger(__name__ + ".SetBaremetalPortGroup")
def get_parser(self, prog_name):
parser = super(SetBaremetalPortGroup, self).get_parser(prog_name)
parser.add_argument(
'portgroup',
metavar='<port group>',
help="Name or UUID of the port group.",
)
parser.add_argument(
'--node',
dest='node_uuid',
metavar='<uuid>',
help='Update UUID of the node that this port group belongs to.'
)
parser.add_argument(
"--address",
metavar="<mac-address>",
help="MAC address for this port group.",
)
parser.add_argument(
"--name",
metavar="<name>",
help="Name of the port group.",
)
parser.add_argument(
"--extra",
metavar="<key=value>",
action='append',
help='Extra to set on this baremetal port group '
'(repeat option to set multiple extras).',
)
parser.add_argument(
'--mode',
help='Mode of the port group. For possible values, refer to '
'https://www.kernel.org/doc/Documentation/networking'
'/bonding.txt.')
parser.add_argument(
'--property',
dest='properties',
metavar="<key=value>",
action='append',
help="Key/value property related to this port group's "
"configuration (repeat option to set multiple properties).")
standalone_ports_group = parser.add_mutually_exclusive_group()
standalone_ports_group.add_argument(
'--support-standalone-ports',
action='store_true',
default=None,
help="Ports that are members of this port group "
"can be used as stand-alone ports."
)
standalone_ports_group.add_argument(
'--unsupport-standalone-ports',
action='store_true',
help="Ports that are members of this port group "
"cannot be used as stand-alone ports."
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
baremetal_client = self.app.client_manager.baremetal
properties = []
if parsed_args.node_uuid:
properties.extend(utils.args_array_to_patch(
'add', ["node_uuid=%s" % parsed_args.node_uuid]))
if parsed_args.address:
properties.extend(utils.args_array_to_patch(
'add', ["address=%s" % parsed_args.address]))
if parsed_args.name:
name = ["name=%s" % parsed_args.name]
properties.extend(utils.args_array_to_patch(
'add', name))
if parsed_args.support_standalone_ports:
properties.extend(utils.args_array_to_patch(
'add', ["standalone_ports_supported=True"]))
if parsed_args.unsupport_standalone_ports:
properties.extend(utils.args_array_to_patch(
'add', ["standalone_ports_supported=False"]))
if parsed_args.mode:
properties.extend(utils.args_array_to_patch(
'add', ["mode=%s" % parsed_args.mode]))
if parsed_args.extra:
properties.extend(utils.args_array_to_patch(
'add', ['extra/' + x for x in parsed_args.extra]))
if parsed_args.properties:
properties.extend(utils.args_array_to_patch(
'add', ['properties/' + x for x in parsed_args.properties]))
if properties:
baremetal_client.portgroup.update(parsed_args.portgroup,
properties)
else:
self.log.warning(_LW("Please specify what to set."))
class UnsetBaremetalPortGroup(command.Command):
"""Unset baremetal port group properties."""
log = logging.getLogger(__name__ + ".UnsetBaremetalPortGroup")
def get_parser(self, prog_name):
parser = super(UnsetBaremetalPortGroup, self).get_parser(prog_name)
parser.add_argument(
'portgroup',
metavar='<port group>',
help="Name or UUID of the port group."
)
parser.add_argument(
"--name",
action='store_true',
help="Unset the name of the port group.",
)
parser.add_argument(
"--address",
action='store_true',
help="Unset the address of the port group.",
)
parser.add_argument(
"--extra",
metavar="<key>",
action='append',
help='Extra to unset on this baremetal port group '
'(repeat option to unset multiple extras).',
)
parser.add_argument(
"--property",
dest='properties',
metavar="<key>",
action='append',
help='Property to unset on this baremetal port group '
'(repeat option to unset multiple properties).',
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
baremetal_client = self.app.client_manager.baremetal
properties = []
if parsed_args.name:
properties.extend(utils.args_array_to_patch('remove',
['name']))
if parsed_args.address:
properties.extend(utils.args_array_to_patch('remove',
['address']))
if parsed_args.extra:
properties.extend(utils.args_array_to_patch('remove',
['extra/' + x for x in parsed_args.extra]))
if parsed_args.properties:
properties.extend(utils.args_array_to_patch(
'remove', ['properties/' + x for x in parsed_args.properties]))
if properties:
baremetal_client.portgroup.update(parsed_args.portgroup,
properties)
else:
self.log.warning(_LW("Please specify what to unset."))
| |
import datetime
from django.contrib.auth.models import Group, User
from django.test import TestCase
from django.utils import timezone
from django.utils.timezone import utc
from django.core.files import File
from nose.tools import ok_, eq_
from airmozilla.main.models import (
Approval,
Event,
EventOldSlug,
Location,
most_recent_event,
RecruitmentMessage,
Picture
)
class EventTests(TestCase):
def test_location_time(self):
date = datetime.datetime(2099, 1, 1, 18, 0, 0).replace(tzinfo=utc)
mountain_view = Location.objects.create(
name='Mountain View',
timezone='US/Pacific',
)
event = Event.objects.create(
status=Event.STATUS_INITIATED,
start_time=date,
location=mountain_view,
)
eq_(event.location_time.hour, 10)
paris = Location.objects.create(
name='Paris',
timezone='Europe/Paris'
)
event.location = paris
event.save()
eq_(event.location_time.hour, 19)
def test_most_recent_event(self):
date = datetime.datetime(2099, 1, 1, 18, 0, 0).replace(tzinfo=utc)
mountain_view = Location.objects.create(
name='Mountain View',
timezone='US/Pacific',
)
eq_(most_recent_event(), None)
event1 = Event.objects.create(
title='Event 1',
status=Event.STATUS_INITIATED,
start_time=date,
location=mountain_view,
)
eq_(most_recent_event(), event1)
event2 = Event.objects.create(
title='Event 2',
status=Event.STATUS_INITIATED,
start_time=date + datetime.timedelta(days=1),
location=mountain_view,
)
eq_(most_recent_event(), event2)
event1.start_time -= datetime.timedelta(days=1)
event1.save()
eq_(most_recent_event(), event1)
class EventStateTests(TestCase):
def test_event_state(self):
time_now = timezone.now()
time_soon = time_now + datetime.timedelta(hours=1)
time_before = time_now - datetime.timedelta(hours=1)
# initiated event
initiated = Event.objects.create(
status=Event.STATUS_INITIATED,
start_time=time_now,
)
ok_(initiated in Event.objects.initiated())
ok_(not initiated.needs_approval())
# scheduled event with pending approval
to_approve = Event.objects.create(
status=Event.STATUS_SCHEDULED,
start_time=time_now,
)
ok_(to_approve not in Event.objects.initiated())
ok_(to_approve in Event.objects.approved())
ok_(not to_approve.needs_approval())
app = Approval.objects.create(event=to_approve, group=None)
# attaching the Approval makes the event unapproved
ok_(to_approve not in Event.objects.approved())
ok_(to_approve in Event.objects.initiated())
ok_(to_approve.needs_approval())
app.processed = True
app.approved = True
app.save()
ok_(to_approve in Event.objects.approved())
to_approve.status = Event.STATUS_REMOVED
to_approve.save()
ok_(to_approve in Event.objects.archived_and_removed())
ok_(to_approve not in Event.objects.initiated())
# upcoming event
upcoming = Event.objects.create(
status=Event.STATUS_SCHEDULED,
start_time=time_soon,
archive_time=None
)
ok_(upcoming in Event.objects.approved())
ok_(upcoming in Event.objects.upcoming())
upcoming.status = Event.STATUS_REMOVED
upcoming.save()
ok_(upcoming in Event.objects.archived_and_removed())
ok_(upcoming not in Event.objects.upcoming())
# live event
live = Event.objects.create(
status=Event.STATUS_SCHEDULED,
start_time=time_now,
archive_time=None
)
ok_(live in Event.objects.approved())
ok_(live in Event.objects.live())
live.status = Event.STATUS_REMOVED
live.save()
ok_(live in Event.objects.archived_and_removed())
ok_(live not in Event.objects.live())
# archiving event
archiving = Event.objects.create(
status=Event.STATUS_SCHEDULED,
start_time=time_before,
archive_time=time_soon
)
ok_(archiving in Event.objects.approved())
ok_(archiving in Event.objects.archiving())
ok_(archiving not in Event.objects.live())
archiving.status = Event.STATUS_REMOVED
archiving.save()
ok_(archiving in Event.objects.archived_and_removed())
ok_(archiving not in Event.objects.archiving())
# archived event
archived = Event.objects.create(
status=Event.STATUS_SCHEDULED,
start_time=time_before,
archive_time=time_before
)
ok_(archived in Event.objects.approved())
ok_(archived in Event.objects.archived())
archived.status = Event.STATUS_REMOVED
archived.save()
ok_(archived in Event.objects.archived_and_removed())
ok_(archived not in Event.objects.archived())
archived.status = Event.STATUS_PENDING
archived.save()
ok_(archived not in Event.objects.archived_and_removed())
def test_needs_approval_if_not_approved(self):
time_now = timezone.now()
to_approve = Event.objects.create(
status=Event.STATUS_SCHEDULED,
start_time=time_now,
)
app = Approval.objects.create(event=to_approve, group=None)
ok_(to_approve.needs_approval())
app.processed = True
app.save()
ok_(not to_approve.needs_approval())
class ForeignKeyTests(TestCase):
fixtures = ['airmozilla/manage/tests/main_testdata.json']
def _successful_delete(self, obj):
"""Delete an object and ensure it's deleted."""
model = obj.__class__
obj.delete()
remaining = model.objects.filter(id=obj.id).exists()
ok_(not remaining, 'The object was not deleted. Model: %s' % model)
def _refresh_ok(self, obj, exists=True):
"""Ensure that an object still exists or is gone."""
model = obj.__class__
refresh = model.objects.filter(id=obj.id).exists()
if exists:
ok_(refresh, 'The object no longer exists. Model: %s' % model)
else:
ok_(not refresh, 'The object still exists. Model: %s' % model)
def test_template_remove(self):
"""Deleting a Template does not delete associated Event."""
event = Event.objects.get(id=22)
self._successful_delete(event.template)
self._refresh_ok(event)
def test_location_remove(self):
"""Deleting a Location does not delete associated Event."""
event = Event.objects.get(id=22)
self._successful_delete(event.location)
self._refresh_ok(event)
def test_channel_remove(self):
"""Deleting a Channel does not delete associated Event."""
event = Event.objects.get(id=22)
assert event.channels.all().count()
for channel in event.channels.all():
self._successful_delete(channel)
self._refresh_ok(event)
def test_user_creator_remove(self):
"""Deleting a creator User does not delete associated Event."""
event = Event.objects.get(id=22)
user = User.objects.get(id=1)
event.creator = user
event.modified_user = None
event.save()
self._successful_delete(user)
self._refresh_ok(event)
def test_user_modifier_remove(self):
"""Deleting a modifying User does not delete associated Event."""
event = Event.objects.get(id=22)
user = User.objects.get(id=1)
event.creator = None
event.modified_user = user
event.save()
self._successful_delete(user)
self._refresh_ok(event)
def test_eventoldslug_remove(self):
"""Deleting an EventOldSlug does not delete associated Event."""
event = Event.objects.get(title='Test event')
oldslug = EventOldSlug.objects.create(
event=event,
slug='test-old-slug'
)
self._successful_delete(oldslug)
self._refresh_ok(event)
def test_group_remove(self):
"""Deleting a Group does not delete associated Approval."""
event = Event.objects.get(id=22)
group = Group.objects.get(id=1)
approval = Approval(event=event, group=group)
approval.save()
self._successful_delete(group)
self._refresh_ok(approval)
def test_user_remove(self):
"""Deleting a User does not delete associated Approval."""
event = Event.objects.get(id=22)
user = User.objects.get(id=1)
approval = Approval(event=event, user=user)
approval.save()
self._successful_delete(user)
self._refresh_ok(approval)
def test_approval_remove(self):
"""Deleting an Approval does not delete associated Event."""
event = Event.objects.get(id=22)
approval = Approval(event=event)
approval.save()
self._successful_delete(approval)
self._refresh_ok(event)
def test_tags_remove(self):
"""Deleting all Tags does not delete associated Event."""
event = Event.objects.get(id=22)
tags = event.tags.all()
ok_(tags.exists())
for tag in tags:
self._successful_delete(tag)
self._refresh_ok(event)
def test_event_remove_approval(self):
"""Deleting an Event DOES remove associated Approval."""
event = Event.objects.get(id=22)
approval = Approval(event=event)
approval.save()
self._successful_delete(event)
self._refresh_ok(approval, exists=False)
def test_event_remove_eventoldslug(self):
"""Deleting an Event DOES remove associated EventOldSlug."""
event = Event.objects.get(title='Test event')
oldslug = EventOldSlug.objects.create(
event=event,
slug='test-old-slug'
)
eq_(oldslug.event, event)
self._successful_delete(event)
self._refresh_ok(oldslug, exists=False)
class RecruitmentMessageTests(TestCase):
fixtures = ['airmozilla/manage/tests/main_testdata.json']
def test_create(self):
msg = RecruitmentMessage.objects.create(
text='Check this out',
url='http://www.com'
)
eq_(msg.notes, '')
ok_(msg.modified)
ok_(msg.created)
def test_delete_modified_user(self):
msg = RecruitmentMessage.objects.create(
text='Check this out',
url='http://www.com'
)
bob = User.objects.create(username='bob')
msg.modified_user = bob
msg.save()
bob.delete()
ok_(RecruitmentMessage.objects.all())
def test_delete_unbreaks_user(self):
msg = RecruitmentMessage.objects.create(
text='Check this out',
url='http://www.com'
)
bob = User.objects.create(username='bob')
eq_(User.objects.all().count(), 2)
msg.modified_user = bob
msg.save()
msg.delete()
eq_(User.objects.all().count(), 2)
def test_delete_unbreaks_event(self):
event = Event.objects.get(title='Test event')
msg = RecruitmentMessage.objects.create(
text='Check this out',
url='http://www.com'
)
event.recruitmentmessage = msg
event.save()
msg.delete()
eq_(Event.objects.all().count(), 1)
class PictureTests(TestCase):
main_image = 'airmozilla/manage/tests/firefox.png'
def test_create_picture(self):
# the only thing you should need is the picture itself
with open(self.main_image) as fp:
picture = Picture.objects.create(file=File(fp))
ok_(picture.size > 0)
ok_(picture.width > 0)
ok_(picture.height > 0)
ok_(Picture.__name__ in repr(picture))
picture.notes = "Something"
ok_("Something" in repr(picture))
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class RoutesOperations(object):
"""RoutesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
route_table_name, # type: str
route_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
route_table_name, # type: str
route_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified route from a route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param route_name: The name of the route.
:type route_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
route_name=route_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
route_table_name, # type: str
route_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.Route"
"""Gets the specified route from a route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param route_name: The name of the route.
:type route_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Route, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_04_01.models.Route
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Route"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Route', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
route_table_name, # type: str
route_name, # type: str
route_parameters, # type: "_models.Route"
**kwargs # type: Any
):
# type: (...) -> "_models.Route"
cls = kwargs.pop('cls', None) # type: ClsType["_models.Route"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(route_parameters, 'Route')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Route', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Route', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
route_table_name, # type: str
route_name, # type: str
route_parameters, # type: "_models.Route"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.Route"]
"""Creates or updates a route in the specified route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param route_name: The name of the route.
:type route_name: str
:param route_parameters: Parameters supplied to the create or update route operation.
:type route_parameters: ~azure.mgmt.network.v2020_04_01.models.Route
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either Route or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_04_01.models.Route]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Route"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
route_name=route_name,
route_parameters=route_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Route', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
route_table_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.RouteListResult"]
"""Gets all routes in a route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_04_01.models.RouteListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('RouteListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes'} # type: ignore
| |
"""
Run embedded integration tests from GTClang DSL code.
"""
import sys
import os.path
import re
import subprocess
import filecmp
import argparse
from json import loads as load_json
from difflib import unified_diff
patterns = {
"RUN": re.compile(r"//\s*RUN:\s*(?P<command>[^\n]+)"),
"EXPECTED_LINE": re.compile(r"//\s*EXPECTED\s*%line%:\s*(?P<output>[^\n]+)"),
"EXPECTED": re.compile(r"//\s*EXPECTED:\s*(?!%line%)(?P<output>[^\n]+)"),
"EXPECTED_FILE": re.compile(
r"//\s*EXPECTED_FILE:\s*OUTPUT:\s*(?P<output>[^\s]+)\s*REFERENCE:\s*(?P<reference>[^\s]+)(?P<remainder>[^\n]*)"
),
"EXPECTED_ERROR": re.compile(r"//\s*EXPECTED_ERROR:\s*(?P<output>[^\n]+)"),
}
def print_error(message):
print("FAILURE: " + message, file=sys.stderr)
def print_test(message):
print("TEST:", message)
def compare_json_files(output, reference, ignore_keys=[]):
"""Compare JSON files, ignore certain keys"""
def read_file(filename):
if not os.path.exists(filename):
raise ValueError("Could not find file: {}".format(filename))
else:
with open(filename, mode="r") as f:
content = f.readlines()
return content
def compare_json_trees(t1, t2):
# If entering here, t1 and t2 could be lists or dicts (of values, lists, or dicts)
if isinstance(t1, list):
for v1, v2 in zip(t1, t2):
# Then v1 and v2 are _values_
if any(isinstance(v1, x) for x in (list, dict)):
if not compare_json_trees(v1, v2):
return False
if v1 != v2:
msg = "Values " + str(v1) + " and " + str(v2) + " do not match"
print_error(msg)
return False
elif isinstance(t1, dict):
t1_keys = sorted(t1.keys())
t2_keys = sorted(t2.keys())
for v1, v2 in zip(t1_keys, t2_keys):
# Then v1 and v2 are _keys_
if any(isinstance(t1[v1], x) for x in (list, dict)):
if not compare_json_trees(t1[v1], t2[v2]):
return False
if v1 != v2 and v1 not in ignore_keys:
msg = "Values " + str(v1) + " and " + str(v2) + " do not match"
print_error(msg)
return False
if t1[v1] != t2[v2] and v1 not in ignore_keys:
msg = (
"Values "
+ str(t1[v1])
+ " and "
+ str(t2[v2])
+ " do not match"
)
print_error(msg)
return False
else:
raise ValueError("Logic error")
return True
output_lines = read_file(output)
output_json = load_json(" ".join(output_lines))
reference_lines = read_file(reference)
reference_json = load_json(" ".join(reference_lines))
if not compare_json_trees(reference_json, output_json):
# Compare files
sys.stdout.writelines(
unified_diff(
[l for l in output_lines if not any((p in l for p in ignore_keys))],
[l for l in reference_lines if not any((p in l for p in ignore_keys))],
fromfile=output,
tofile=reference,
)
)
return False
return True
def run_test(content, gtclang_exec, filename, verbose=False, ignore_keys=[]):
"""Main test function."""
def get_line_number(content, m):
return list(map(lambda x: m in x, content.split("\n"))).index(True) + 1
dirname, basename = (
os.path.dirname(args.source),
os.path.splitext(os.path.basename(args.source))[0],
)
cmd = []
error_happened = False
expect_error = False
# Look for RUN
m_runs = patterns["RUN"].findall(content)
if len(m_runs) != 1 or m_runs[0] is None:
raise ValueError("Requires exactly one RUN statement somewhere in the file!")
else:
cmd = (
m_runs[0]
.replace(r"%gtclang%", gtclang_exec)
.replace(r"%file%", filename)
.replace(r"%filename%", basename)
.split(" ")
)
# Run it!
proc = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = (x.decode() for x in (proc.stdout, proc.stderr))
print(" ".join(cmd))
if verbose:
print(stdout.strip("\n"))
print(stderr.strip("\n"))
# Begin tests
# Look for EXPECTED_LINE and EXPECTED
m_expected = patterns["EXPECTED_LINE"].findall(content) + patterns[
"EXPECTED"
].findall(content)
for m in m_expected:
# Replace all possible patterns with regex expressions
m = m.strip(" ")
line_match = re.search(r"%line[\+|\-]?\d*%", m)
if line_match:
line = eval(
line_match.group()
.strip("%")
.replace("line", str(get_line_number(content, m)))
)
m = m.replace(line_match.group(), str(line))
print_test("EXPECTED_LINE: " + m)
# Look for line in stdout
if not re.search(m, stdout):
print_error(f"Could not match: {m}")
error_happened = True
# Look for EXPECTED_ERROR
m_errors = patterns["EXPECTED_ERROR"].findall(content)
for m in m_errors:
# Replace all possible patterns with regex expressions
m = m.strip(" ")
print_test("EXPECTED_ERROR: " + m)
# Look for line in stdout
if re.search(m, stderr) is None:
print_error(f"Could not find error in stdout: {m}")
error_happened = True
# If we expect an error, we want gtclang to return an error
if len(m_errors) > 0:
expect_error = True
# Look for EXPECTED_FILE
m_expected_file = patterns["EXPECTED_FILE"].findall(content)
for m in m_expected_file:
files = (m[0], m[1])
if len(m) > 2:
# test for IGNORE pattern
ignore_keys += re.findall(r"IGNORE:\s*(?P<pattern>[^ ]+)", m[2])
# Replace %filename% in string expression and split on commas
tests = zip(*(x.replace(r"%filename%", basename).split(",") for x in files))
for output, reference in tests:
# Add source directory path to reference
reference = os.path.join(dirname, reference)
print_test(
"EXPECTED_FILE: OUTPUT={} REFERENCE={}{}".format(
output, reference, "".join(" IGNORE: " + k for k in ignore_keys)
)
)
if all(
(
os.path.splitext(f)[-1] in (".iir", ".sir", ".json")
for f in (output, reference)
)
):
# All these are json trees, so we can compare them
same_trees = compare_json_files(
output, reference, ignore_keys=ignore_keys
)
if not same_trees:
error_happened = True
print_error("JSON trees do not match")
else:
raise ValueError("Not yet implemented.")
# Ensure the compiler was successful
successful_code = 0
# Boolean indicating whether gtclang returned the correct ret_val
gtclang_success = (
proc.returncode == successful_code
if not expect_error
else proc.returncode != successful_code
)
# Return
if not gtclang_success:
print_error(
"received return code {}{}".format(
proc.returncode, ":\n{}".format(stderr) if stderr else "",
)
)
return 1
elif error_happened:
return 1
else:
return 0
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Test GTClang Source Code.")
parser.add_argument("gtclang", type=str, help="GTClang executable")
parser.add_argument("source", type=str, help="Source code to compile")
parser.add_argument(
"-i",
"--ignore",
nargs="*",
help="Extra keys to ignore in comparison",
default=[],
)
parser.add_argument(
"-v", "--verbose", help="modify output verbosity", action="store_true"
)
parser.add_argument("options", nargs="*")
args = parser.parse_args()
# Read file
with open(args.source, mode="r") as f:
content = f.read().rstrip("\n")
# Call test function
ret_val = run_test(
content,
args.gtclang,
args.source,
verbose=args.verbose,
ignore_keys=args.ignore,
)
sys.exit(ret_val)
| |
"""Functions for controlling high-level search functionality for the
AFLOW database.
"""
server = "http://aflowlib.duke.edu/search/API/?"
"""str: API server address over HTTP.
"""
from aflow import msg
def search(catalog=None, batch_size=100):
"""Returns a :class:`aflow.control.Query` to help construct the search
query.
Args:
catalog (str): one of the catalogs supported on AFLOW: ['icsd', 'lib1',
'lib2', 'lib3']. Also supports a `list` of catalog names.
batch_size (int): number of data entries to return per HTTP request.
"""
return Query(catalog, batch_size)
class Query(object):
"""Represents a search againts the AFLUX API.
Args:
catalog (str): one of the catalogs supported on AFLOW: ['icsd', 'lib1',
'lib2', 'lib3']. Also supports a `list` of catalog names.
batch_size (int): number of data entries to return per HTTP request.
step (int): step size over entries.
Attributes:
filters (list): of `str` filter arguments to pass to the matchbook
section of the API request.
select (list): of :class:`aflow.keywords.Keyword` to *include* in the request.
excludes (list): of :class:`aflow.keywords.Keyword` to *exclude* in the request.
orderby (str): name of the keyword to order by. AFLUX only supports a
single order-by parameter for now.
catalog (str): one of the catalogs supported on AFLOW: ['icsd', 'lib1',
'lib2', 'lib3']. Also supports a `list` of catalog names.
N (int): number of results in the current search query.
reverse (bool): when True, reverse the order of the results in the
query.
k (int): number of datasets per page for the current iterator. Can be
controlled by `batch_size`.
responses (dict): keys are (n,k) tuples from the pagination; values are
the corresponding JSON dictionaries.
step (int): step size over entries.
"""
def __init__(self, catalog=None, batch_size=100, step=1):
self.filters = []
self.selects = []
self.excludes = []
self.order = None
self.catalog = catalog if isinstance(catalog, (list, tuple, type(None))) else [catalog]
self._N = None
"""int: number of results in the current search query."""
self.reverse = False
self._n = 1
self.k = batch_size
self.step = step
self.responses = {}
self._iter = 0
"""int: current integer id of the iterator in the *whole* dataset; this
means it can have a value greater than :attr:`k`.
"""
self._max_entry = None
"""int: index of the maximum entry that should be returned from this
query.
"""
self._matchbook = None
"""str: matchbook portion of the request URL.
"""
self._final = False
"""bool: when True, this query is finalized and no additional filters,
etc. can be added to it.
"""
def reset_iter(self):
"""Resets the iterator back to zero so that the collection can be
iterated over again *without* needing to request the data from the
server again.
"""
self._iter = 0
@property
def n(self):
"""Current page number for the iterator.
"""
if self.reverse:
return -1*self._n
else:
return self._n
@property
def N(self):
if self._N is None:
self._request(self.n, self.k)
return self._N
@property
def max_N(self):
"""Returns the maximum integer index that will be reached by this query.
"""
if self._max_entry is None:
return self.N
else:
return self._max_entry
def __len__(self):
return self.max_N
def __getitem__(self, seq):
#We need to trigger the first request to make sure that the total
#number of entries is fixed on the parent object, and so that the
#pointers to the response caching are all the same.
assert len(self) > 0
from copy import copy
result = copy(self)
if type(seq) is slice:
#Perform a shallow copy so that we get a new reference for the
#indices.
result._iter = seq.start if seq.start is not None else 0
result._max_entry = seq.stop
result.step = seq.step
return result
elif isinstance(seq, int):
result._iter = seq
return next(result)
def _request(self, n, k):
"""Constructs the query string for this :class:`Query` object for the
specified paging limits and then returns the response from the REST API
as a python object.
Args:
n (int): page number of the results to return.
k (int): number of datasets per page.
"""
if len(self.responses) == 0:
#We are making the very first request, finalize the query.
self.finalize()
import json
from six.moves import urllib
urlopen = urllib.request.urlopen
url = "{0}{1},{2}".format(server, self.matchbook(),
self._directives(n, k))
rawresp = urlopen(url).read().decode("utf-8")
try:
response = json.loads(rawresp)
except:# pragma: no cover
#We can't easily simulate network failure...
msg.err("{}\n\n{}".format(url, rawresp))
return
if not response:
self._N = 0
msg.err("Empty response from API. "
"Check your query filters.\nURI: {}".format(url))
return
#If this is the first request, then save the number of results in the
#query.
if len(self.responses) == 0:
self._N = int(next(iter(response.keys())).split()[-1])
self.responses[n] = response
def finalize(self):
"""Finalizes the current state of the query. This means that the request URL
will be saved, but the individual keyword objects will be
*reset*. Re-executing the search query will reconstruct the same object
and request, but any cached responses will be lost.
"""
#Generate the matchbook query, this has all the filters, selects and
#ordering information.
self.matchbook()
#Next, reset all the keywords for the global AFLOW.
from aflow.keywords import reset
reset()
#Switch out all of the keyword instances for their string
#representations.
self.selects = [str(s) for s in self.selects]
self.filters = [str(f) for f in self.filters]
self.excludes = [str(x) for x in self.excludes]
self.order = str(self.order) if self.order is not None else None
#Set the finalizer flag so that this object doesn't allow mutations.
self._final = True
def matchbook(self):
"""Constructs the matchbook portion of the query.
"""
if not self._final:
items = []
#AFLUX orders by the first element in the query. If we have an orderby
#specified, then place it first.
if self.order is not None:
excludes = [x.name for x in self.excludes]
selects = [v.name for v in self.selects]
if self.order.name in excludes:
items.append("${}".format(str(self.order.name)))
idx = excludes.index(self.order.name)
self.excludes.pop(idx)
else:
items.append(str(self.order.name))
if self.order.name in selects:
idx = selects.index(self.order.name)
self.selects.pop(idx)
items.extend(list(map(str, self.selects)))
items.extend(list(map(str, self.filters)))
items.extend(["${}".format(str(k)) for k in self.excludes])
self._matchbook = ','.join(items)
return self._matchbook
def _directives(self, n, k):
"""Returns the directives portion of the AFLUX query.
Args:
n (int): page number of the results to return.
k (int): number of datasets per page.
"""
items = []
if self.catalog is not None:
items.append("catalog({})".format(':'.join(self.catalog)))
#Next, add the paging context. This query maintains its own paging
#context
items.append("paging({0:d},{1:d})".format(n, k))
return ','.join(items)
def __iter__(self):
return self
def next(self):# pragma: no cover
"""Yields a generator over AFLUX API request results.
"""
return self.__next__()
def __next__(self):
"""Yields a generator over AFLUX API request results.
"""
#First, find out which entry we are on.
n = (self._iter // self.k) + 1
i = self._iter % self.k
#Reverse the sign now that we have figured out the ordinal page number.
if self.reverse:
n *= -1
if self._iter < self.max_N and n not in self.responses:
self._n = abs(n)
self._request(self.n, self.k)
assert len(self.responses) > 0
from aflow.entries import Entry
if self._iter < self.max_N:
index = self.k*(abs(n)-1) + i + 1
key = "{} of {}".format(index, self.N)
raw = self.responses[n][key]
result = Entry(**raw)
#Increment the iterator right before we return the entry.
self._iter += 1
return result
else:
raise StopIteration()
def _final_check(self):
"""Checks whether this object is finalized; if it is, print a friendly
message and return False, otherwise True.
"""
if self._final:
msg.info("This query has been finalized. It cannot be mutated. "
"Create a new query to change the matchbook.")
return not self._final
def filter(self, keyword):
"""Adds a search term to the current filter list. Calling :meth:`filter`
multiple times will join the final filters together using logical *and*.
Args:
keyword (aflow.keywords.Keyword): that encapsulates the AFLUX
request language logic.
"""
if self._final_check():
self._N = None
self.filters.append(keyword)
return self
def select(self, *keywords):
"""Adds a keyword to the list of properties to return for each material
in the request.
Args:
keywords (list): of :class:`aflow.keywords.Keyword` that
encapsulates the AFLUX request language logic.
"""
if self._final_check():
self._N = None
for keyword in keywords:
if keyword is not self.order:
self.selects.append(keyword)
return self
def orderby(self, keyword, reverse=False):
"""Sets a keyword to be the one by which
Args:
keyword (aflow.keywords.Keyword): that encapsulates the AFLUX
request language logic.
reverse (bool): when True, reverse the ordering.
"""
if self._final_check():
self._N = None
self.order = keyword
self.reverse = reverse
return self
def exclude(self, *keywords):
"""Sets a keyword to be *excluded* from the response.
Args:
keywords (list): of :class:`aflow.keywords.Keyword` that
encapsulates the AFLUX request language logic.
"""
if self._final_check():
self._N = None
for keyword in keywords:
self.excludes.append(keyword)
return self
| |
import rdflib
from pyontutils import combinators as cmb
from pyontutils.core import simpleOnt, OntId, OntGraph
from pyontutils.namespaces import OntCuries, makeNamespaces
from pyontutils.namespaces import NIFTTL, NIFRID, ilxtr, BFO
from pyontutils.namespaces import partOf, definition, editorNote, replacedBy
from pyontutils.namespaces import hasParticipant, hasPart, hasInput, hasOutput
from pyontutils.namespaces import prot, proc, tech, asp, dim, unit
from pyontutils.namespaces import owl, rdf, rdfs
from pyontutils.combinators import oc, oc_, odp, oop, olit, oec
from pyontutils.combinators import POCombinator, ObjectCombinator
from pyontutils.combinators import propertyChainAxiom, Combinator, Restriction2, EquivalentClass
from pyontutils.combinators import restriction, restrictions, intersectionOf
collector = OntGraph(path='property-chains.ttl')
def _propertyChainAxiom(*args):
class Derp(Combinator):
def __init__(self):
pass
def __call__(self, *argsi):
[collector.add(_) for _ in propertyChainAxiom(*args)(*argsi)]
yield ilxtr.a, ilxtr.b, ilxtr.c
return Derp()
restN = Restriction2(None, owl.onProperty, owl.someValuesFrom)
restG = POCombinator(rdf.type, owl.Restriction).full_combinator
axiom = POCombinator(rdf.type, owl.Axiom)
blankc = POCombinator
equivalentClassC = POCombinator(owl.equivalentClass, ObjectCombinator).full_combinator
oECN = EquivalentClass(None)
owlClass = oc_
owlClassC = oc_.full_combinator
subClassOf = POCombinator(rdfs.subClassOf, ObjectCombinator).full_combinator
oop_ = POCombinator(rdf.type, owl.ObjectProperty)
def _t(subject, label, *rests, def_=None, synonyms=tuple(), comment=None,
equivalentClass=oec):
members = tuple()
_rests = tuple()
for rest in rests:
if isinstance(rest, tuple):
if len(rest) == 2:
_rests += rest,
else:
raise ValueError(f'length of {rest} is not 2!')
elif isinstance(rest, Combinator):
members += rest,
else:
members += rest,
rests = _rests
if not members:
members = ilxtr.technique,
yield from oc(subject)
yield from equivalentClass.serialize(subject, *members, *restrictions(*rests))
yield from olit(subject, rdfs.label, label)
if def_:
yield from olit(subject, definition, def_)
if synonyms:
if not isinstance(synonyms, tuple):
# this is why python sucks and racket is awesome if this was racket
# the error would show up on the line where the problem was :/
raise TypeError(f'Type of {synonyms!r} should not be {type(synonyms)}!')
yield from olit(subject, NIFRID.synonym, *synonyms)
if comment:
yield from olit(subject, rdfs.comment, comment)
obo, RO, prov, *_ = makeNamespaces('obo', 'RO', 'prov')
filename = 'methods-core'
prefixes = None
OntCuries['HBP_MEM'] = 'http://www.hbp.FIXME.org/hbp_measurement_methods/'
imports = NIFTTL['nif_backend.ttl'],
#imports = obo['bfo.owl'], obo['ro.owl']
#imports = tuple()
comment = 'The core components for modelling techniques and methods.'
branch = 'methods'
_repo = True
debug = True
triples = (
# data properties
odp(ilxtr.hasAspectValue),
odp(ilxtr.hasConstrainingAspect_value, ilxtr.isConstrainedBy), # data type properties spo object property
(ilxtr.hasConstrainingAspect_value, rdfs.subPropertyOf, ilxtr.hasAspectValue),
olit(ilxtr.hasConstrainingAspect_value, rdfs.label,
'has constraining aspect value'),
olit(ilxtr.hasConstrainingAspect_value, definition,
('In some cases a protocol is classified based on the value '
'that a constraining aspect has, not just that it is constrained on that aspect. ')),
olit(ilxtr.hasConstrainingAspect_value, rdfs.comment,
('For example, dead and alive are 0 and 1 on livingness respectively. '
'we can also define dead and alive, as disjoint, but that does not effectively '
'model that they are two sides of the same coin for any binary definition. '
'Note that this implies that these are not just qualities, they must have an '
'explicit value outcome defined.'
)
),
# object properties
oop(ilxtr.hasOperationDefinition),
oop(ilxtr.hasDefiningProtocol, ilxtr.hasOperationDefinition),
oop(ilxtr.hasExecutor, hasParticipant),
olit(ilxtr.hasExecutor, rdfs.label, 'has executor'),
olit(ilxtr.hasExecutor, NIFRID.synonym, 'has executing agent'),
olit(ilxtr.hasExecutor, definition,
'The relationship between a technique and a thing that executes it.'),
olit(ilxtr.hasExecutor, rdfs.comment,
('For example, a scientific protocol hasExecutor some graduateStudent.'
'A case like some parallelProcess hasExecutor personA, personB suggests'
'that the technique is a composite technique and should be broken down.'
'We may ultimately add a cardinality restriction to enforce this and require'
'composite techniques to be modelled using hasPart or hasInputFromPart,'
'but this is too complex to model in owl directly.')),
oop(ilxtr.wasDiscoveredBy),
olit(ilxtr.wasDiscoveredBy, rdfs.label, 'was discovered by'),
olit(ilxtr.wasDiscoveredBy, NIFRID.synonym, 'was invented by'),
olit(ilxtr.wasDiscoveredBy, definition,
'The relationship between a process and the person who discovered it.'),
oop(ilxtr.hasDualTechnique),
(ilxtr.hasDualTechnique, rdf.type, owl.SymmetricProperty),
olit(ilxtr.hasDualTechnique, rdfs.label, 'has dual technique'),
olit(ilxtr.hasDualTechnique, definition,
('The relationship between techniques that are duals of each other. '
'An example usage is in cases where the matter constituting the primary '
'input in one technique is transformed (read: renamed) and becomes the '
'primary output of another technique. Bearing this relation implies that '
'both techniques are part of a same enclosing technique.')),
(ilxtr.hasDualTechnique, rdfs.domain, ilxtr.technique),
(ilxtr.hasDualTechnique, rdfs.range, ilxtr.technique),
oop(ilxtr.hasDualInputTechnique, ilxtr.hasDualTechnique),
olit(ilxtr.hasDualInputTechnique, rdfs.label, 'has dual input technique'),
olit(ilxtr.hasDualInputTechnique, definition,
('The relationship between a technique that has a primary output '
'and a dual technique that has a primary input that is destroyed.')),
oop(ilxtr.hasDualOutputTechnique, ilxtr.hasDualTechnique),
olit(ilxtr.hasDualOutputTechnique, rdfs.label, 'has dual output technique'),
olit(ilxtr.hasDualOutputTechnique, definition,
('The relationship between a technique that has a primary input that is '
'destroyed and a dual technique that has the corresponding primary output.')),
(ilxtr.hasDualInputTechnique, owl.inverseOf, ilxtr.hasDualOutputTechnique),
oop(ilxtr.hasInformationParticipant),
oop_(ilxtr.hasInformationParticipant,
propertyChainAxiom(hasPart, ilxtr.hasInformationParticipant),
propertyChainAxiom(hasPart, ilxtr.hasDirectInformationParticipant)),
olit(ilxtr.hasInformationParticipant, rdfs.label, 'has information participant'),
olit(ilxtr.hasInformationParticipant, NIFRID.synonym,
'has symbolic participant'),
olit(ilxtr.hasInformationParticipant, definition,
('The relationship between a process and some information that participates in it. '
'When this this points to a real thing it is interpreted to mean the information'
'content of the artifact, not the artifact itself.')),
olit(ilxtr.hasInformationParticipant, rdfs.comment,
('This is distinct from RO:0000057 in that we are using RO:0000057 explicitly for '
'real things in the world. Information must be symbolized by something in the world '
'but it is not the scratches on the paper that constrain a process, it is their implication.')),
oop(ilxtr.hasInformationInput, ilxtr.hasInformationParticipant),
oop_(ilxtr.hasInformationInput,
propertyChainAxiom(hasPart, ilxtr.hasInformationInput),
propertyChainAxiom(hasPart, ilxtr.hasDirectInformationInput)),
#(ilxtr.hasInformationInput, owl.propertyDisjointWith, ilxtr.isConstrainedBy), # XXX fact++ issues?
# hermit says cannot use disjointness on non simple properties
olit(ilxtr.hasInformationInput, rdfs.label, 'has information input'),
olit(ilxtr.hasInformationInput, NIFRID.synonym,
'has non-constraining information input',
'has execution time information input',
'has symbolic input'),
olit(ilxtr.hasInformationInput, definition,
'The relationship between a process and information that is an input to the process.'),
olit(ilxtr.hasInformationInput, rdfs.comment,
('This can be though of as the known variables of a process, or the runtime information '
'that cannot be known prior to process execution.')),
(ilxtr.hasInformationInput, rdfs.domain, ilxtr.technique),
(ilxtr.hasInformationInput, rdfs.range, ilxtr.informationEntity),
oop(ilxtr.isConstrainedBy, ilxtr.hasInformationParticipant),
oop_(ilxtr.isConstrainedBy,
propertyChainAxiom(hasPart, ilxtr.isConstrainedBy)),
olit(ilxtr.isConstrainedBy, rdfs.label, 'is constrained by'),
olit(ilxtr.isConstrainedBy, NIFRID.synonym,
'has information constraint',
'has symbolic constraint'),
olit(ilxtr.isConstrainedBy, definition,
('The relationship between a process and prior information that constrains it. '
'For example the code that is compiled to create a piece of analysis software.')),
olit(ilxtr.isConstrainedBy, rdfs.comment,
('These look like free variables inside a process.\n'
'(define (my-process free-variables)\n'
' (define (execute-process inputs)\n'
' (combine free-varibles inputs))\n'
' execute-process)')),
#oop(ilxtr.usedInField), # FIXME molecular techniques...
oop(ilxtr.hasInformationOutput, ilxtr.hasInformationParticipant),
oop(ilxtr.hasInformationOutput, ilxtr.hasIntention),
oop_(ilxtr.hasInformationOutput,
propertyChainAxiom(hasPart, ilxtr.hasInformationOutput),
propertyChainAxiom(hasPart, ilxtr.hasDirectInformationOutput)),
olit(ilxtr.hasInformationOutput, rdfs.label, 'has information output'),
olit(ilxtr.hasInformationOutput, NIFRID.synonym, 'has symbolic output'),
(ilxtr.hasInformationOutput, rdfs.domain, ilxtr.technique),
(ilxtr.hasInformationOutput, rdfs.range, ilxtr.informationEntity),
oop(ilxtr.hasDirectInformationParticipant, ilxtr.hasInformationParticipant),
# implies that there is a time in the full technique prior to which
# the information input did not exist
# FIXME also parent to hasInformationInput/Output??
oop(ilxtr.hasDirectInformationInput, ilxtr.hasDirectInformationParticipant),
oop(ilxtr.hasDirectInformationInput, ilxtr.hasInformationInput),
olit(ilxtr.hasDirectInformationInput, rdfs.label, 'has direct information input'),
oop(ilxtr.hasDirectInformationOutput, ilxtr.hasDirectInformationParticipant),
oop(ilxtr.hasDirectInformationOutput, ilxtr.hasInformationOutput),
oop(ilxtr.hasDirectInformationOutput, ilxtr.hasIntention),
olit(ilxtr.hasDirectInformationOutput, rdfs.label, 'has direct information output'),
## participants
oop(hasParticipant),
olit(hasParticipant, rdfs.label, 'has participant'),
olit(hasParticipant, NIFRID.synonym, 'has physical participant'),
oop(ilxtr.hasInputOutput, hasParticipant),
oop(hasInput, ilxtr.hasInputOutput), # XXX
oop_(hasInput, propertyChainAxiom(hasPart, hasInput)),
olit(hasInput, rdfs.label, 'has input'), # XXX
oop(hasOutput, ilxtr.hasInputOutput), # XXX
oop_(hasOutput, propertyChainAxiom(hasPart, hasOutput)),
olit(hasOutput, rdfs.label, 'has output'), # XXX
# FIXME need a domain restriction to prevent accidental use with aspects!
# very easy to confuse photons and NMR as both being 'phenomena'
# NMR is something we can measure about a real system, but it is not the system
# in the same way that photons are
oop(ilxtr.detects, hasParticipant),
olit(ilxtr.detects, rdfs.label, 'detects'),
olit(ilxtr.detects, NIFRID.synonym, 'has detected phenomena'),
olit(ilxtr.detects, definition,
'The relationship between a technique and the phenomena that it detects.'),
oop(ilxtr.hasProbe, hasParticipant),
olit(ilxtr.hasProbe, rdfs.label, 'has probing phenomena'),
olit(ilxtr.hasProbe, NIFRID.synonym, 'has probe'),
olit(ilxtr.hasProbe, definition,
('The relationship between a technique and the phenomena that it uses to probe other participants. '
'Useful for cases where the probing phenomena is different than the detected phenomena.')),
(ilxtr.hasProbe, rdfs.domain, ilxtr.technique),
(ilxtr.hasProbe, rdfs.range, ilxtr.materialEntity),
#oop(ilxtr.hasEnergyProbe, ilxtr.hasProbe),
# photon
# electron
# neutron
# atomic nucleus
# usually they leave
# non addative probe, may modify, but unlikely to add
#oop(ilxtr.hasMaterialProbe, ilxtr.hasProbe),
oop(ilxtr.hasPrimaryParticipant, hasParticipant),
olit(ilxtr.hasPrimaryParticipant, rdfs.label, 'has primary participant'),
olit(ilxtr.hasPrimaryParticipant, definition, 'The relationship between a process and its primary participant.'),
olit(ilxtr.hasPrimaryParticipant, rdfs.comment,
'This property should be used to mark the key input and/or output of a process if its type is not generic.'),
(ilxtr.hasPrimaryParticipant, rdfs.domain, ilxtr.technique),
(ilxtr.hasPrimaryParticipant, rdfs.range, ilxtr.materialEntity),
oop(ilxtr.primaryParticipantIn),
olit(ilxtr.primaryParticipantIn, rdfs.label, 'primary participant in'),
(ilxtr.primaryParticipantIn, owl.inverseOf, ilxtr.hasPrimaryParticipant),
oop(ilxtr.primaryInputIn, ilxtr.primaryParticipantIn),
oop(ilxtr.primaryOutputIn, ilxtr.primaryParticipantIn),
(ilxtr.primaryInputIn, owl.inverseOf, ilxtr.hasPrimaryInput),
(ilxtr.primaryOutputIn, owl.inverseOf, ilxtr.hasPrimaryOutput),
oop(ilxtr.hasPrimaryInputOutput, ilxtr.hasPrimaryParticipant),
oop(ilxtr.hasPrimaryInputOutput, ilxtr.hasInputOutput),
oop(ilxtr.hasPrimaryInput, ilxtr.hasPrimaryParticipant),
oop(ilxtr.hasPrimaryInput, hasInput),
oop(ilxtr.hasPrimaryOutput, ilxtr.hasPrimaryParticipant),
oop(ilxtr.hasPrimaryOutput, ilxtr.hasIntention),
oop(ilxtr.hasPrimaryOutput, hasOutput),
oop(ilxtr.hasPrimaryInputUnbinding, ilxtr.hasPrimaryInput), # aka NoOutput
(ilxtr.hasPrimaryInputUnbinding, owl.propertyDisjointWith, ilxtr.hasPrimaryOutput),
oop(ilxtr.hasPrimaryParticipantUnbinding, ilxtr.hasPrimaryParticipant), # aka NoOutput
(ilxtr.hasPrimaryParticipantUnbinding, owl.propertyDisjointWith, ilxtr.hasPrimaryOutput),
oop(ilxtr.modifiesPrimaryInputOutput, ilxtr.hasPrimaryInput),
oop(ilxtr.modifiesPrimaryInputOutput, ilxtr.hasPrimaryOutput),
oop(ilxtr.hasPrimaryOutputNoInput, ilxtr.hasPrimaryOutput),
(ilxtr.hasPrimaryOutputNoInput, owl.propertyDisjointWith, ilxtr.hasPrimaryInput),
oop(ilxtr.hasPrimaryParticipantNoInput, ilxtr.hasPrimaryParticipant),
(ilxtr.hasPrimaryParticipantNoInput, owl.propertyDisjointWith, ilxtr.hasPrimaryInput),
oop(ilxtr.hasPrimaryParticipantNoInputNoOutput, ilxtr.hasPrimaryParticipant),
(ilxtr.hasPrimaryParticipantNoInputNoOutput, owl.propertyDisjointWith, ilxtr.hasPrimaryInput),
(ilxtr.hasPrimaryParticipantNoInputNoOutput, owl.propertyDisjointWith, ilxtr.hasPrimaryOutput),
# posterior or knowledge based participants that define techniques
# often they would be part of the actual primary input
# FIXME aspect vs participant ...
# FIXME vs hasConstrainingAspect
oop(ilxtr.knownDetectedPhenomena, ilxtr.hasPrimaryInput), # FIXME confusing naming...
oop(ilxtr.knownProbedPhenomena, ilxtr.hasPrimaryParticipant),
oop(ilxtr.knownDifferentiatingPhenomena, ilxtr.hasAspect),
## naming (naming is distinct from intentions because it always succeeds)
oop(ilxtr.names),
oop(ilxtr.assigns, ilxtr.names),
oop(ilxtr.asserts, ilxtr.names),
#oop_(ilxtr.asserts, propertyChainAxiom(ilxtr.hasPrimaryAspect (asp.nonLocal)))
# assertion occurs when the accompanying information is not present
# this operate on aspects in cases where the aspect is 'extrinsic'
# i.e. where if given only the named thing in question the binding
# of the aspect to the thing cannot be determined making measurements
# on only the thing itself (or that it was not determined in that way)
# these are cases where information from the surrounding environment is
# needed. for example the part of a brain a cell was collected from cannot
# (currently) be determined with 100% certainty by making measurements on
# the cell alone, additional information is required, therefore the 'measurement'
# of the aspect is not a measurement, it is an assertion
## list
# on primary participant
# hasPrimaryAspect (hasPrimaryParticipant, hasAspect)
# hasPrimaryAspectActualized
# hasPrimaryQualifiedAspect (hasPrimaryParticipant, hasQualifiedAspect)
# hasPrimaryQualifiedAspectActualized
#
# hasConstrainingAspect
# hasConstrainingQualifiedAspect
# cannot actualize constraining aspects? but they are defact actualized by thier constraint
#
# but then there are parts of the pp
# hasPrimaryParticipantPartConstrainingAspect
#
# hasInput
# hasPrimaryInput
#
## intentions
oop(ilxtr.hasIntention), # not really sco realizes:? it also includes intended changes in qualities?
olit(ilxtr.hasIntention, rdfs.label, 'has intention'),
olit(ilxtr.hasIntention, definition, 'The relationship between a process and an intended outcome.'),
olit(ilxtr.hasIntention, rdfs.comment, 'Should rarely be used directly.'),
# TODO implies has expected outcome?
# while these exist in principle there is no meaningful way to bind them to a specific
# participant without a qualifier, therefore we are leaving them out
# oop(ilxtr.hasParticipantIntentionAspect, ilxtr.intention),
# oop(ilxtr.hasPrimaryParticipantIntentionAspect, ilxtr.hasParticipantIntentionAspect),
# oop(ilxtr.hasPrimaryParticipantIntentionPrimaryAspect, ilxtr.hasPrimaryParticipantIntentionAspect),
oop(ilxtr.processHasAspect),
olit(ilxtr.processHasAspect, rdfs.label, 'process has aspect'),
(ilxtr.processHasAspect, rdfs.domain, BFO['0000015']),
(ilxtr.processHasAspect, rdfs.range, ilxtr.aspect),
oop(ilxtr.processMeasuresAspect, ilxtr.processHasAspect),
olit(ilxtr.processMeasuresAspect, rdfs.label, 'measures aspect'),
oop(ilxtr.processActualizesAspect, ilxtr.processHasAspect), # there is a tau...
olit(ilxtr.processActualizesAspect, rdfs.label, 'actualizes aspect'),
oop(ilxtr.processIncludesOnAspect, ilxtr.processHasAspect),
oop(ilxtr.processExcludesOnAspect, ilxtr.processHasAspect),
oop(ilxtr.processHasPrimaryAspect, ilxtr.processHasAspect),
#oop(ilxtr.techniqueHasAspect, ilxtr.processHasAspect),
oop_(ilxtr.processHasAspect,
propertyChainAxiom(hasPart, ilxtr.processHasAspect),
propertyChainAxiom(ilxtr.hasConstrainingAspect),
propertyChainAxiom(ilxtr.hasPrimaryAspect),
),
#(ilxtr.techniqueHasAspect, rdfs.domain, ilxtr.technique),
#(ilxtr.techniqueHasAspect, rdfs.range, ilxtr.aspect),
oop(ilxtr.hasConstrainingAspect, ilxtr.processHasAspect), # TODO
oop_(ilxtr.hasConstrainingAspect,
# TODO isConstrainedBy some value on that particular aspect
propertyChainAxiom(ilxtr.hasPrimaryParticipant, ilxtr.hasExpAspect),
#propertyChainAxiom(hasPart, ilxtr.processHasAspect), # XXX fact++ encounters a circularity error
# hermit informs that this is circular
# IF the primary participant is the same
#propertyChainAxiom(hasPart, ilxtr.hasPrimaryAspectActualized),
# sanity check says that if you havePart dnaDeliveryTechnique
# then any parent process will be constrained by some location
# on its primary participant, which is incorrect... at least for
# nonLocal aspects
),
#oc_(None,
# sigh
#intersectionOf(
#restN(ilxtr.hasConstrainingAspect, ilxtr.aspect)),
#intersectionOf(restN(ilxtr.hasPrimaryParticipant,
#ilxtr.theSameThing),
#restN(hasPart,
#restN(ilxtr.hasPrimaryParticipant,
#ilxtr.theSameThing))),
#oECN())
olit(ilxtr.hasConstrainingAspect, rdfs.label, 'has constraining aspect'),
olit(ilxtr.hasConstrainingAspect, NIFRID.synonym,
'has constraining primary participant aspect',
'constrained by aspect'),
olit(ilxtr.hasConstrainingAspect, definition,
# these are definitional to the technique so they are not intentions
# they must be achieved prior in time to the execution of the technique
# FIXME is this true? what if you mess up the measurement?
('The relationship between a technique and an aspect of the primary '
'participant that is constrained as part of a technique.')),
oop(ilxtr.hasPriParticipantPartAspect),
oop(ilxtr.hasPriParticipantPartPriAspect),
oop(ilxtr.hasPartPriAspect, ilxtr.processHasAspect),
oop_(ilxtr.hasPartPriAspect,
propertyChainAxiom(hasPart, ilxtr.hasPrimaryAspect)),
oop(ilxtr.hasPriParticipantPartAspect, ilxtr.processHasAspect),
oop(ilxtr.hasParticipantPartConstrainingAspect, ilxtr.processHasAspect),
oop_(ilxtr.hasParticipantPartConstrainingAspect,
propertyChainAxiom(ilxtr.hasPrimaryParticipant, hasPart, ilxtr.hasExpAspect),
# subprocesses have no executor
propertyChainAxiom(ilxtr.hasSubProcess, ilxtr.hasConstrainingAspect)),
oop(ilxtr.hasPrimaryAspect, ilxtr.hasIntention),
oop(ilxtr.hasPrimaryAspect, ilxtr.processHasPrimaryAspect),
#oop_(ilxtr.hasPrimaryAspect, # this does not help us and breaks reasoners
#propertyChainAxiom(ilxtr.hasPrimaryParticipant, ilxtr.hasMainAspect)),
(ilxtr.hasPrimaryAspect, rdfs.subPropertyOf, ilxtr.processHasAspect),
olit(ilxtr.hasPrimaryAspect, rdfs.label, 'has primary aspect'),
olit(ilxtr.hasPrimaryAspect, NIFRID.synonym,
'has intended primary aspect',
'has intention primary aspect',
'has primary aspect',
'has intention to effect the primary aspect of the primary participant',
'hasPrimaryParticipantIntentionPrimaryAspect',),
olit(ilxtr.hasPrimaryAspect, definition,
('The reltionship between a technique and the primary aspect of the '
'primary participant intended to be effected by the technique.')),
olit(ilxtr.hasPrimaryAspect, rdfs.comment,
('This property is very useful for classifying techniques. '
'For example a flattening technique is intended to effect the flatness '
'aspect of any primary participant, though it may effect other aspects as well.')),
# TODO property chain or general axiom? implies that primary participant of has aspect
# axiom(None, POC(ilxtr.hasPrimarAspec))
oop(ilxtr.hasPrimaryAspectActualized, ilxtr.hasPrimaryAspect),
oop(ilxtr.hasPrimaryAspectActualized, ilxtr.processActualizesAspect),
olit(ilxtr.hasPrimaryAspectActualized, rdfs.label, 'has primary aspect actualized'),
olit(ilxtr.hasPrimaryAspectActualized, NIFRID.synonym,
'has intended primary aspect actualized',),
oop(ilxtr.hasPrimaryAspectMeasured, ilxtr.hasPrimaryAspect),
# cannot be disjoint with actualized
# because all actualization techniques are also measurement techniques
oop(ilxtr.hasParentPrimaryAspect, ilxtr.processHasAspect),
# note that this is distinctly not spo hasPrimaryAspect
oop_(ilxtr.hasParentPrimaryAspect,
propertyChainAxiom(partOf, ilxtr.hasPrimaryParticipant, ilxtr.hasExpAspect)),
oop(ilxtr.hasParticipantPartPrimaryAspect, ilxtr.processHasAspect),
# note that this is distinctly not spo hasPrimaryAspect
oop_(ilxtr.hasParticipantPartPrimaryAspect,
propertyChainAxiom(ilxtr.hasPrimaryParticipant, hasPart, ilxtr.hasExpAspect)),
oop(ilxtr.hasParticipantPartPrimaryAspectActualized, ilxtr.hasParticipantPartPrimaryAspect),
oop(ilxtr.hasParticipantPartPrimaryAspectActualized, ilxtr.processActualizesAspect),
# note that this is distinctly not spo hasPrimaryAspect
oop_(ilxtr.hasParticipantPartPrimaryAspect,
propertyChainAxiom(ilxtr.hasPrimaryParticipant, hasPart, ilxtr.hasExpAspect),
# subprocesses have no executor
propertyChainAxiom(ilxtr.hasSubProcess, ilxtr.hasPrimaryAspect)),
oop(ilxtr.hasSubProcess, hasPart), # TODO see if we really need this
#_t(None, None,
#(ilxtr.hasPrimaryParticipant, restN(hasPart, ilxtr.sameThing)),
#),
oop(ilxtr.hasPartPart, hasPart),
oop_(ilxtr.hasPartPriParticipant,
# invariance to whether the primary participant or
# the technique itself is the first down the partonomy
# FIXME hermit informs that these cause circular dependency issues
propertyChainAxiom(ilxtr.hasPrimaryParticipant, hasPart),#, partOf), # self
propertyChainAxiom(hasPart, ilxtr.hasPrimaryParticipant),
#partOf, ilxtr.primaryParticipantIn) # self
),
(ilxtr.hasPartPart, rdfs.domain, BFO['0000015']),
(ilxtr.hasPartPart, rdfs.range, BFO['0000015']),
oop(ilxtr.hasPartNotPart, hasPart),
#oop_(ilxtr.hasPartNotPart, # XXX fact ++ error
# hermit informs that there is a circular dependency
#propertyChainAxiom(hasPart, ilxtr.hasPrimaryParticipant, ilxtr.primaryParticipantIn)),
(ilxtr.hasPartPart, owl.propertyDisjointWith, ilxtr.hasPartNotPart),
(ilxtr.hasPartNotPart, rdfs.domain, BFO['0000015']),
(ilxtr.hasPartNotPart, rdfs.range, BFO['0000015']),
oop(ilxtr.hasPartAspectInvariant, ilxtr.processHasAspect),
oop_(ilxtr.hasPartAspectInvariant,
propertyChainAxiom(ilxtr.hasPrimaryParticipant, ilxtr.hasExpAspect),
# FIXME need a notion of union of the aspects of the parts...
# so that the output value when measuring the aspect includes
# that aspect as bound to all the parts
propertyChainAxiom(hasPart, ilxtr.hasConstrainingAspect)),
oop(ilxtr.hasPrimaryAspect_dAdS, ilxtr.hasIntention),
olit(ilxtr.hasPrimaryAspect_dAdS, rdfs.label,
# FIXME dAdSdt ?? also include the aspect at different points in time?
#'has intended change in primary aspect as a function of sub parts of the primary participant'
#'has intended change in primary aspect with respect to subset of the primary participant.'
#'has intended change in primary aspect as a function of the subset of the primary participant.'
'has expected difference in the primary aspect with respect to the subset of the primary participant'
),
olit(ilxtr.hasPrimaryAspect_dAdS, NIFRID.synonym,
'has intended dA/dS',
'has intended dAspect/dSubset'
),
olit(ilxtr.hasPrimaryAspect_dAdS, rdfs.comment,
('Full specification requires a rule to determine those subsets. '
'Subsets may be spatial, temporal, or spatio-temporal as long as the temporal '
'component occurs within the temporal confines of the execution of the technique. '
'Subsets are defined by hasPrimaryParticipantSubsetRule.'
)),
oop(ilxtr.hasPrimaryParticipantSubsetRule, ilxtr.hasIntention),
olit(ilxtr.hasPrimaryParticipantSubsetRule, rdfs.label, 'has intended primary participant subset rule'),
olit(ilxtr.hasPrimaryParticipantSubsetRule, NIFRID.synonym,
'has primary participant subset rule',
'has subset rule'),
olit(ilxtr.hasPrimaryParticipantSubsetRule, definition,
('The rule by which subsets of the primary participant are intended to be distingushed by '
'a technique. The dS in dAdS, change in aspect with respect to change in subset of primary '
'participant.')),
oop(ilxtr.hasPrimaryAspect_dAdT, ilxtr.hasIntention),
olit(ilxtr.hasPrimaryAspect_dAdT, rdfs.label,
'has intended change in primary aspect'),
olit(ilxtr.hasPrimaryAspect_dAdT, NIFRID.synonym,
'hasPrimaryParticipantIntentionPrimaryAspect_dAdT'),
olit(ilxtr.hasPrimaryAspect_dAdT, definition,
'The intended change in primary aspect of primary participant before and after technique'),
oop(ilxtr.hasConstrainingAspect_dAdT, ilxtr.hasIntention),
olit(ilxtr.hasConstrainingAspect_dAdT, rdfs.label,
'has intended change in constraining aspect'),
oop(ilxtr.hasAspect, RO['0000086']),
# FIXME make it clear that this is between material entities (it is subclassof quality)
# for hasAspect
#oop_(ilxtr.hasAspect,
#propertyChainAxiom(ilxtr.hasAspect),
#propertyChainAxiom(hasPart, ilxtr.hasAspect),
#),
oop(ilxtr.aspectOf, RO['0000080']),
(ilxtr.aspectOf, owl.inverseOf, ilxtr.hasAspect),
(ilxtr.hasAspect, rdfs.domain, ilxtr.materialEntity),
(ilxtr.hasAspect, rdfs.range, ilxtr.aspect),
oop(ilxtr.hasExpAspect, ilxtr.hasAspect), # has experimental aspect or has operational aspect
#oop(ilxtr.hasMainAspect, ilxtr.hasExpAspect), # this does not help us
# FIXME ilxtr.hasMainAspectInSomeTechnique
## aspect to aspect relationships
oop(ilxtr.hasQualifiedForm),
olit(ilxtr.hasQualifiedForm, rdfs.label, 'has qualified form'),
(ilxtr.hasQualifiedForm, rdfs.domain, asp.Local),
(ilxtr.hasQualifiedForm, rdfs.range, asp.nonLocal),
oop(ilxtr.isQualifiedFormOf),
olit(ilxtr.isQualifiedFormOf, rdfs.label, 'is qualified form of'),
(ilxtr.hasQualifiedForm, owl.inverseOf, ilxtr.isQualifiedFormOf),
oop(ilxtr.hasUnqualifiedEquivalent),
oop(ilxtr.hasComplementAspect), # implies inverse, should be in the symbolic operational definitions
## aspect to value relationships
# FIXME has vs yields vs yielded
oop(ilxtr.aspectHasValue),
oop(ilxtr.hasDefinedValue, ilxtr.aspectHasValue),
oop(ilxtr.hasMeasuredValue, ilxtr.aspectHasValue),
oop(ilxtr.hasActualizedValue, ilxtr.aspectHasValue),
oop(ilxtr.hasDefinedActualizedValue, ilxtr.hasActualizedValue),
oop(ilxtr.hasDefinedActualizedValue, ilxtr.hasDefinedValue),
oop(ilxtr.hasMeasuredActualizedValue, ilxtr.hasActualizedValue),
oop(ilxtr.hasMeasuredActualizedValue, ilxtr.hasMeasuredValue),
## contexts
oop(ilxtr.hasContext),
(ilxtr.hasContext, rdfs.domain, ilxtr.aspect),
oop(ilxtr.hasInformationContext, ilxtr.hasContext),
oop(ilxtr.hasTechniqueContext, ilxtr.hasContext),
oop(ilxtr.hasMaterialContext, ilxtr.hasContext),
oop(ilxtr.hasAspectContext, ilxtr.hasContext),
(ilxtr.hasInformationContext, rdfs.range, ilxtr.informationEntity),
(ilxtr.hasTechniqueContext, rdfs.range, ilxtr.technique), # aka some other step that has some constraint
(ilxtr.hasMaterialContext, rdfs.range, ilxtr.materialEntity), # TODO (hasMatCont (hasPart some matEnt))???
(ilxtr.hasAspectContext, rdfs.range, ilxtr.aspect), # TODO aspectOf some thing partOf ??
#oop(ilxtr.hasMaterialAspectContext, ilxtr.hasContext)
oop(ilxtr.processHasContext),
(ilxtr.processHasContext, rdfs.domain, BFO['0000015']),
# aspects for processes
# these are not modelled with a notion of intention because
# they are processes which _must_ occur in order to be classified as such
# FIXME TODO naming, Required?
oop(ilxtr.hasActualPrimaryAspect),
oop(ilxtr.hasActualPrimaryAspect_dAdT),
# classes
## executor
oc(ilxtr.executor, ilxtr.materialEntity), # probably equivalent to agent in ero
olit(ilxtr.executor, rdfs.label, 'executor'),
olit(ilxtr.executor, NIFRID.synonym, 'executing agent'),
olit(ilxtr.executor, definition,
'An executor is the primary agentous being that participates in a '
'technique and is usually the vehicle by which prior information '
'constrains a technique. Human beings usually play this role, but '
'computers and robots can be considered to be executors when the prior '
'information has been encoded directly into them and their behavior.'),
## aspect
oc(BFO['0000019']), # XXX # vs PATO:0000001 quality:
olit(BFO['0000019'], rdfs.label, 'quality'), # XXX
oc(ilxtr.aspect, BFO['0000019']), # FIXME aspect/
olit(ilxtr.aspect, rdfs.label, 'aspect'),
olit(ilxtr.aspect, definition,
'An aspect is a measurable quantity or quality of a thing. '
'Aspects must be able to act as a function that is dependent '
'only on a measurement device and the thing to which the aspect '
'is bound. This is to say that aspects require a notion of locality. '),
olit(ilxtr.aspect, rdfs.comment,
'To illustrate with an example. The location of a thing cannot be an '
'aspect of that thing because location requires knowing the spatial '
'realtionship between that thing any measurement device. Said in yet '
'another way, (measure thing location) -> here for all values of thing. '
'Aspects all assume an inertial reference frame centered on their named inputs. '
'Location thus can only ever be computed on some part of a larger named system.'
'Therefore, in order to accomodate measurements on composite beings such as '
'the number of geese in a flock or the location of a missing set of keys possibly '
'in the house, we split aspects into signular and composite. The composite are '
'indeed aspects of a single nameable thing, but they make measurements on or '
'between parts of that thing. The simplest version is nearly always named thing '
'plus implied complement of that thing. Note also that the number of geese in a '
'flock is different from the number of things in existence that are geese by the '
'isness aspect. The asp/isness/count can be determined entirely locally because '
'the definition of what a goose is is independent of time and place (where and when). '
'Time dependent or place dependent definitions of geese (such as in the game of '
'duck duck goose) require additional information and thus are not what/singular aspects.'
),
olit(ilxtr.aspect, rdfs.comment,
'PATO has good coverage of many of these aspects though their naming is '
'not alway consistent. And their definition is perhaps overly broad.'),
olit(ilxtr.aspect, NIFRID.synonym,
'measureable',
'measureable aspect'),
# hasValue, hasRealizedValue
# hasActualizedValue
# hasMeasuredValue
#ilxtr.aspect hasOutputValue
#ilxtr.aspect hasMeasurementProtocol
# should aspects not be bound to symbolic entities?
# for example a word count for a document?
# I guess all implementations of symbolic systems are physical
# and ultimately the distinction between symbolic and physical
# isn't actually useful for aspects
# we do however need a notation of bottom for aspects which
# says that for certain inputs the aspect cannot return a
# meaningful (correctly typed non null) value
(asp.Local, owl.disjointWith, asp.nonLocal),
oc(asp.Local, ilxtr.aspect), # aka unqualified or does not need qualification
olit(asp.Local, rdfs.label, 'aspect unqualified'),
olit(asp.Local, definition,
'aspect of thing that is invariant to context'),
oc(asp.nonLocal, ilxtr.aspect), # qualified
olit(asp.nonLocal, rdfs.label, 'aspect qualified'),
oc_(asp.nonLocal, restriction(ilxtr.hasContext, BFO['0000002'])),
# FIXME context isn't just the material entity it is the aspects thereof
# the context probably also needs to be a technique that binds all
# intersectionOf for multiple aspects? hrm
# the additional aspects?
# context dealt with below
# binding a nonLocal aspect to a single entity will
# lead to construction of a context
olit(asp.nonLocal, definition,
'aspect of thing that varies depending on context'),
oop_(hasParticipant,
propertyChainAxiom(ilxtr.processHasAspect, ilxtr.hasContext)),
restG(blankc(owl.onProperty, ilxtr.hasPrimaryAspect_dAdT),
blankc(owl.someValuesFrom, ilxtr.nonZero),
blankc(rdfs.subClassOf,
restN(ilxtr.hasPrimaryAspectActualized,
ilxtr.aspect)))(rdflib.BNode()),
## modalitiy
cmb.Class(
ilxtr.ExperimentalModality,
cmb.Pair(owl.deprecated, rdflib.Literal(True)),
cmb.Pair(replacedBy, ilxtr.ExperimentalApproach),
cmb.Pair(editorNote, rdflib.Literal(
'For clarity switch to use ilxtr:ExperimentalApproach since we are '
'changing the preferred terminology. Hopefully keeping the id '
'aligned will prevent confusion down the line. Follow replacedBy: '
'to find the new id.')),
),
cmb.Class(
ilxtr.ExperimentalApproach,
cmb.Pair(rdfs.label, rdflib.Literal('experimental approach')),
cmb.Pair(NIFRID.synonym, rdflib.Literal('experimental modality')),
cmb.Pair(definition, rdflib.Literal(
'The general experimental approach used to answer a '
'scientific question. Approaches often define whole '
'research disciplines.')),
),
cmb.Class(ilxtr.ExperimentalPreparation,
cmb.Pair(rdfs.label, rdflib.Literal('experimental preparation')),
), # in vivo in vitro
## behavior
cmb.Class(ilxtr.BehavioralTask,
cmb.Pair(rdfs.label, rdflib.Literal('behavioral task')),
), # tasks that have protocols
cmb.Class(ilxtr.BehavioralParadigm,
cmb.Pair(rdfs.label, rdflib.Literal('behavioral paradigm')),
), # more abstract behaviors e.g. forced choice detection
cmb.Class(ilxtr.BehavioralReadout,
cmb.Pair(rdfs.label, rdflib.Literal('behavioral readout')),
), # e.g. delayed saccad
# behaviorl task structure
# reward type
# structure in the sensory environment that they need to process
# working memory is more like a study target
## technique
oc(BFO['0000015']),
olit(BFO['0000015'], rdfs.label, 'process'),
oc(ilxtr.technique, BFO['0000015']), # FIXME technique/
olit(ilxtr.technique, rdfs.label, 'technique'),
olit(ilxtr.technique, NIFRID.synonym, 'method'),
olit(ilxtr.technique, definition,
'A repeatable process that is constrained by some prior information.'),
(ilxtr.technique, ilxtr.hasTempId, OntId('HBP_MEM:0000000')),
# NOTE: not all techniques have primary participants, especially in the case of composite techniques
oc_(ilxtr.technique,
restriction(ilxtr.hasExecutor, ilxtr.executor)),
oc_(rdflib.BNode(),
oECN(intersectionOf(BFO['0000015'],
restN(hasParticipant,
restN(ilxtr.hasAspect, asp.nonLocal))), # vs hasExpAspect
intersectionOf(BFO['0000015'],
restN(ilxtr.processHasAspect,
restN(ilxtr.hasContext, BFO['0000002'])))),
# FIXME still doesn't get the binding right
intersectionOf(BFO['0000015'],
# FIXME nonLocal in time requires some time keeping device
# id some periodic phenomenon
restN(ilxtr.processHasContext, BFO['0000002'])),
),
#oc_(rdflib.BNode(),
#restN(ilxtr.hasAspect, asp.nonLocal),
#restN(ilxtr.hasContext, ilxtr.materialEntity),
# TODO that nonLocal's context should be the same...
#),
oc_(rdflib.BNode(),
oECN(intersectionOf(ilxtr.materialEntity,
restN(ilxtr.hasAspect, asp.nonLocal))),
#restN(ilxtr.hasContext, ),
# this works because immaterial entities have to be anchored to some
# internal frame which requires something with inertia which requires
# a meterial entity...
intersectionOf(ilxtr.materialEntity,
restN(partOf, ilxtr.compositeMaterialEntity)),
),
#_t(tech.test, 'test test test',
#(ilxtr.hasPrimaryParticipant, ilxtr.thingA),
#(ilxtr.hasPrimaryParticipant, ilxtr.thingB)),
#(ilxtr.thingA, rdfs.subClassOf, ilxtr.materialEntity),
#(ilxtr.thingB, rdfs.subClassOf, ilxtr.materialEntity),
#(ilxtr.thingA, owl.disjointWith, ilxtr.thingB),
)
def derp():
b0 = rdflib.BNode()
yield ilxtr.technique, owl.equivalentClass, b0
yield b0, rdf.type, owl.Class
a, b, c = (rdflib.BNode() for _ in range(3))
yield b0, owl.intersectionOf, a
b1 = rdflib.BNode()
yield a, rdf.first, b1
r1 = restG(
blankc(owl.onProperty, ilxtr.hasPrimaryParticipant),
blankc(owl.maxQualifiedCardinality,
rdflib.Literal(1, datatype=rdflib.XSD.nonNegativeInteger)),
blankc(owl.onClass, BFO['0000004']))(b1)
yield from r1
b2 = rdflib.BNode()
yield b, rdf.first, b2
r2 = restG(
blankc(owl.onProperty, ilxtr.hasPrimaryAspect),
blankc(owl.maxQualifiedCardinality,
rdflib.Literal(1, datatype=rdflib.XSD.nonNegativeInteger)),
blankc(owl.onClass, ilxtr.aspect))(b2)
yield from r2
b3 = rdflib.BNode()
yield c, rdf.first, b3
r3 = restG(
blankc(owl.onProperty, ilxtr.hasPrimaryAspect_dAdT),
blankc(owl.maxQualifiedCardinality, rdflib.Literal(1, datatype=rdflib.XSD.nonNegativeInteger)),
blankc(owl.onClass, ilxtr.changeType))(b3)
yield from r3
for _1, _2 in ((a, b), (b, c), (c, rdf.nil)):
yield _1, rdf.rest, _2
triples += tuple(derp())
"""
asdf = tuple(
owlClass(ilxtr.technique,
equivalentClassC(None,
owlClassC(
subClassOf(
restG(blankC(owl.onProperty, ilxtr.hasPrimaryParticipant),
blankC(owl.maxQualifiedCardinality,
rdflib.Literal(1, datatype=rdflib.XSD.nonNegativeInteger)),
blankC(owl.onClass,
# FIXME maybe more specific?
BFO['0000004'])),
restG(blankC(owl.onProperty, ilxtr.hasPrimaryAspect),
blankC(owl.maxQualifiedCardinality,
rdflib.Literal(1, datatype=rdflib.XSD.nonNegativeInteger)),
blankC(owl.onClass, ilxtr.aspect)),
restG(blankC(owl.onProperty, ilxtr.hasPrimaryAspect_dAdT),
blankC(owl.maxQualifiedCardinality,
rdflib.Literal(1, datatype=rdflib.XSD.nonNegativeInteger)),
blankC(owl.onClass, ilxtr.changeType))))))
)
embed()
"""
methods_core = simpleOnt(filename=filename,
prefixes=prefixes,
imports=imports,
triples=triples,
comment=comment,
branch=branch,
_repo=_repo,
calling__file__=__file__,)
methods_core._graph.add_namespace('asp', str(asp))
methods_core._graph.add_namespace('ilxtr', str(ilxtr)) # FIXME why is this now showing up...
#methods_core._graph.add_namespace('tech', str(tech))
methods_core._graph.add_namespace('HBP_MEM', OntCuries['HBP_MEM'])
def main():
# TODO aspects.ttl?
collector.write()
methods_core._graph.write()
if __name__ == '__main__':
main()
| |
import re
import sys
from urlparse import urlsplit, urlunsplit
from xml.dom.minidom import parseString, Node
from django.conf import settings
from django.core import mail
from django.core.management import call_command
from django.core.signals import request_started
from django.core.urlresolvers import clear_url_caches
from django.db import (transaction, connection, connections, DEFAULT_DB_ALIAS,
reset_queries)
from django.http import QueryDict
from django.test import _doctest as doctest
from django.test.client import Client
from django.test.utils import get_warnings_state, restore_warnings_state
from django.utils import simplejson, unittest as ut2
from django.utils.encoding import smart_str
from django.utils.functional import wraps
__all__ = ('DocTestRunner', 'OutputChecker', 'TestCase', 'TransactionTestCase',
'skipIfDBFeature', 'skipUnlessDBFeature')
try:
all
except NameError:
from django.utils.itercompat import all
normalize_long_ints = lambda s: re.sub(r'(?<![\w])(\d+)L(?![\w])', '\\1', s)
normalize_decimals = lambda s: re.sub(r"Decimal\('(\d+(\.\d*)?)'\)", lambda m: "Decimal(\"%s\")" % m.groups()[0], s)
def to_list(value):
"""
Puts value into a list if it's not already one.
Returns an empty list if value is None.
"""
if value is None:
value = []
elif not isinstance(value, list):
value = [value]
return value
real_commit = transaction.commit
real_rollback = transaction.rollback
real_enter_transaction_management = transaction.enter_transaction_management
real_leave_transaction_management = transaction.leave_transaction_management
real_managed = transaction.managed
def nop(*args, **kwargs):
return
def disable_transaction_methods():
transaction.commit = nop
transaction.rollback = nop
transaction.enter_transaction_management = nop
transaction.leave_transaction_management = nop
transaction.managed = nop
def restore_transaction_methods():
transaction.commit = real_commit
transaction.rollback = real_rollback
transaction.enter_transaction_management = real_enter_transaction_management
transaction.leave_transaction_management = real_leave_transaction_management
transaction.managed = real_managed
class OutputChecker(doctest.OutputChecker):
def check_output(self, want, got, optionflags):
"The entry method for doctest output checking. Defers to a sequence of child checkers"
checks = (self.check_output_default,
self.check_output_numeric,
self.check_output_xml,
self.check_output_json)
for check in checks:
if check(want, got, optionflags):
return True
return False
def check_output_default(self, want, got, optionflags):
"The default comparator provided by doctest - not perfect, but good for most purposes"
return doctest.OutputChecker.check_output(self, want, got, optionflags)
def check_output_numeric(self, want, got, optionflags):
"""Doctest does an exact string comparison of output, which means that
some numerically equivalent values aren't equal. This check normalizes
* long integers (22L) so that they equal normal integers. (22)
* Decimals so that they are comparable, regardless of the change
made to __repr__ in Python 2.6.
"""
return doctest.OutputChecker.check_output(self,
normalize_decimals(normalize_long_ints(want)),
normalize_decimals(normalize_long_ints(got)),
optionflags)
def check_output_xml(self, want, got, optionsflags):
"""Tries to do a 'xml-comparision' of want and got. Plain string
comparision doesn't always work because, for example, attribute
ordering should not be important.
Based on http://codespeak.net/svn/lxml/trunk/src/lxml/doctestcompare.py
"""
_norm_whitespace_re = re.compile(r'[ \t\n][ \t\n]+')
def norm_whitespace(v):
return _norm_whitespace_re.sub(' ', v)
def child_text(element):
return ''.join([c.data for c in element.childNodes
if c.nodeType == Node.TEXT_NODE])
def children(element):
return [c for c in element.childNodes
if c.nodeType == Node.ELEMENT_NODE]
def norm_child_text(element):
return norm_whitespace(child_text(element))
def attrs_dict(element):
return dict(element.attributes.items())
def check_element(want_element, got_element):
if want_element.tagName != got_element.tagName:
return False
if norm_child_text(want_element) != norm_child_text(got_element):
return False
if attrs_dict(want_element) != attrs_dict(got_element):
return False
want_children = children(want_element)
got_children = children(got_element)
if len(want_children) != len(got_children):
return False
for want, got in zip(want_children, got_children):
if not check_element(want, got):
return False
return True
want, got = self._strip_quotes(want, got)
want = want.replace('\\n','\n')
got = got.replace('\\n','\n')
# If the string is not a complete xml document, we may need to add a
# root element. This allow us to compare fragments, like "<foo/><bar/>"
if not want.startswith('<?xml'):
wrapper = '<root>%s</root>'
want = wrapper % want
got = wrapper % got
# Parse the want and got strings, and compare the parsings.
try:
want_root = parseString(want).firstChild
got_root = parseString(got).firstChild
except:
return False
return check_element(want_root, got_root)
def check_output_json(self, want, got, optionsflags):
"Tries to compare want and got as if they were JSON-encoded data"
want, got = self._strip_quotes(want, got)
try:
want_json = simplejson.loads(want)
got_json = simplejson.loads(got)
except:
return False
return want_json == got_json
def _strip_quotes(self, want, got):
"""
Strip quotes of doctests output values:
>>> o = OutputChecker()
>>> o._strip_quotes("'foo'")
"foo"
>>> o._strip_quotes('"foo"')
"foo"
>>> o._strip_quotes("u'foo'")
"foo"
>>> o._strip_quotes('u"foo"')
"foo"
"""
def is_quoted_string(s):
s = s.strip()
return (len(s) >= 2
and s[0] == s[-1]
and s[0] in ('"', "'"))
def is_quoted_unicode(s):
s = s.strip()
return (len(s) >= 3
and s[0] == 'u'
and s[1] == s[-1]
and s[1] in ('"', "'"))
if is_quoted_string(want) and is_quoted_string(got):
want = want.strip()[1:-1]
got = got.strip()[1:-1]
elif is_quoted_unicode(want) and is_quoted_unicode(got):
want = want.strip()[2:-1]
got = got.strip()[2:-1]
return want, got
class DocTestRunner(doctest.DocTestRunner):
def __init__(self, *args, **kwargs):
doctest.DocTestRunner.__init__(self, *args, **kwargs)
self.optionflags = doctest.ELLIPSIS
def report_unexpected_exception(self, out, test, example, exc_info):
doctest.DocTestRunner.report_unexpected_exception(self, out, test,
example, exc_info)
# Rollback, in case of database errors. Otherwise they'd have
# side effects on other tests.
for conn in connections:
transaction.rollback_unless_managed(using=conn)
class _AssertNumQueriesContext(object):
def __init__(self, test_case, num, connection):
self.test_case = test_case
self.num = num
self.connection = connection
def __enter__(self):
self.old_debug_cursor = self.connection.use_debug_cursor
self.connection.use_debug_cursor = True
self.starting_queries = len(self.connection.queries)
request_started.disconnect(reset_queries)
return self
def __exit__(self, exc_type, exc_value, traceback):
self.connection.use_debug_cursor = self.old_debug_cursor
request_started.connect(reset_queries)
if exc_type is not None:
return
final_queries = len(self.connection.queries)
executed = final_queries - self.starting_queries
self.test_case.assertEqual(
executed, self.num, "%d queries executed, %d expected" % (
executed, self.num
)
)
class TransactionTestCase(ut2.TestCase):
# The class we'll use for the test client self.client.
# Can be overridden in derived classes.
client_class = Client
def _pre_setup(self):
"""Performs any pre-test setup. This includes:
* Flushing the database.
* If the Test Case class has a 'fixtures' member, installing the
named fixtures.
* If the Test Case class has a 'urls' member, replace the
ROOT_URLCONF with it.
* Clearing the mail test outbox.
"""
self._fixture_setup()
self._urlconf_setup()
mail.outbox = []
def _fixture_setup(self):
# If the test case has a multi_db=True flag, flush all databases.
# Otherwise, just flush default.
if getattr(self, 'multi_db', False):
databases = connections
else:
databases = [DEFAULT_DB_ALIAS]
for db in databases:
call_command('flush', verbosity=0, interactive=False, database=db)
if hasattr(self, 'fixtures'):
# We have to use this slightly awkward syntax due to the fact
# that we're using *args and **kwargs together.
call_command('loaddata', *self.fixtures, **{'verbosity': 0, 'database': db})
def _urlconf_setup(self):
if hasattr(self, 'urls'):
self._old_root_urlconf = settings.ROOT_URLCONF
settings.ROOT_URLCONF = self.urls
clear_url_caches()
def __call__(self, result=None):
"""
Wrapper around default __call__ method to perform common Django test
set up. This means that user-defined Test Cases aren't required to
include a call to super().setUp().
"""
self.client = self.client_class()
try:
self._pre_setup()
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
import sys
result.addError(self, sys.exc_info())
return
super(TransactionTestCase, self).__call__(result)
try:
self._post_teardown()
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
import sys
result.addError(self, sys.exc_info())
return
def _post_teardown(self):
""" Performs any post-test things. This includes:
* Putting back the original ROOT_URLCONF if it was changed.
* Force closing the connection, so that the next test gets
a clean cursor.
"""
self._fixture_teardown()
self._urlconf_teardown()
# Some DB cursors include SQL statements as part of cursor
# creation. If you have a test that does rollback, the effect
# of these statements is lost, which can effect the operation
# of tests (e.g., losing a timezone setting causing objects to
# be created with the wrong time).
# To make sure this doesn't happen, get a clean connection at the
# start of every test.
for connection in connections.all():
connection.close()
def _fixture_teardown(self):
pass
def _urlconf_teardown(self):
if hasattr(self, '_old_root_urlconf'):
settings.ROOT_URLCONF = self._old_root_urlconf
clear_url_caches()
def save_warnings_state(self):
"""
Saves the state of the warnings module
"""
self._warnings_state = get_warnings_state()
def restore_warnings_state(self):
"""
Restores the sate of the warnings module to the state
saved by save_warnings_state()
"""
restore_warnings_state(self._warnings_state)
def assertRedirects(self, response, expected_url, status_code=302,
target_status_code=200, host=None, msg_prefix=''):
"""Asserts that a response redirected to a specific URL, and that the
redirect URL can be loaded.
Note that assertRedirects won't work for external links since it uses
TestClient to do a request.
"""
if msg_prefix:
msg_prefix += ": "
if hasattr(response, 'redirect_chain'):
# The request was a followed redirect
self.assertTrue(len(response.redirect_chain) > 0,
msg_prefix + "Response didn't redirect as expected: Response"
" code was %d (expected %d)" %
(response.status_code, status_code))
self.assertEqual(response.redirect_chain[0][1], status_code,
msg_prefix + "Initial response didn't redirect as expected:"
" Response code was %d (expected %d)" %
(response.redirect_chain[0][1], status_code))
url, status_code = response.redirect_chain[-1]
self.assertEqual(response.status_code, target_status_code,
msg_prefix + "Response didn't redirect as expected: Final"
" Response code was %d (expected %d)" %
(response.status_code, target_status_code))
else:
# Not a followed redirect
self.assertEqual(response.status_code, status_code,
msg_prefix + "Response didn't redirect as expected: Response"
" code was %d (expected %d)" %
(response.status_code, status_code))
url = response['Location']
scheme, netloc, path, query, fragment = urlsplit(url)
redirect_response = response.client.get(path, QueryDict(query))
# Get the redirection page, using the same client that was used
# to obtain the original response.
self.assertEqual(redirect_response.status_code, target_status_code,
msg_prefix + "Couldn't retrieve redirection page '%s':"
" response code was %d (expected %d)" %
(path, redirect_response.status_code, target_status_code))
e_scheme, e_netloc, e_path, e_query, e_fragment = urlsplit(expected_url)
if not (e_scheme or e_netloc):
expected_url = urlunsplit(('http', host or 'testserver', e_path,
e_query, e_fragment))
self.assertEqual(url, expected_url,
msg_prefix + "Response redirected to '%s', expected '%s'" %
(url, expected_url))
def assertContains(self, response, text, count=None, status_code=200,
msg_prefix=''):
"""
Asserts that a response indicates that some content was retrieved
successfully, (i.e., the HTTP status code was as expected), and that
``text`` occurs ``count`` times in the content of the response.
If ``count`` is None, the count doesn't matter - the assertion is true
if the text occurs at least once in the response.
"""
if msg_prefix:
msg_prefix += ": "
self.assertEqual(response.status_code, status_code,
msg_prefix + "Couldn't retrieve content: Response code was %d"
" (expected %d)" % (response.status_code, status_code))
text = smart_str(text, response._charset)
real_count = response.content.count(text)
if count is not None:
self.assertEqual(real_count, count,
msg_prefix + "Found %d instances of '%s' in response"
" (expected %d)" % (real_count, text, count))
else:
self.assertTrue(real_count != 0,
msg_prefix + "Couldn't find '%s' in response" % text)
def assertNotContains(self, response, text, status_code=200,
msg_prefix=''):
"""
Asserts that a response indicates that some content was retrieved
successfully, (i.e., the HTTP status code was as expected), and that
``text`` doesn't occurs in the content of the response.
"""
if msg_prefix:
msg_prefix += ": "
self.assertEqual(response.status_code, status_code,
msg_prefix + "Couldn't retrieve content: Response code was %d"
" (expected %d)" % (response.status_code, status_code))
text = smart_str(text, response._charset)
self.assertEqual(response.content.count(text), 0,
msg_prefix + "Response should not contain '%s'" % text)
def assertFormError(self, response, form, field, errors, msg_prefix=''):
"""
Asserts that a form used to render the response has a specific field
error.
"""
if msg_prefix:
msg_prefix += ": "
# Put context(s) into a list to simplify processing.
contexts = to_list(response.context)
if not contexts:
self.fail(msg_prefix + "Response did not use any contexts to "
"render the response")
# Put error(s) into a list to simplify processing.
errors = to_list(errors)
# Search all contexts for the error.
found_form = False
for i,context in enumerate(contexts):
if form not in context:
continue
found_form = True
for err in errors:
if field:
if field in context[form].errors:
field_errors = context[form].errors[field]
self.assertTrue(err in field_errors,
msg_prefix + "The field '%s' on form '%s' in"
" context %d does not contain the error '%s'"
" (actual errors: %s)" %
(field, form, i, err, repr(field_errors)))
elif field in context[form].fields:
self.fail(msg_prefix + "The field '%s' on form '%s'"
" in context %d contains no errors" %
(field, form, i))
else:
self.fail(msg_prefix + "The form '%s' in context %d"
" does not contain the field '%s'" %
(form, i, field))
else:
non_field_errors = context[form].non_field_errors()
self.assertTrue(err in non_field_errors,
msg_prefix + "The form '%s' in context %d does not"
" contain the non-field error '%s'"
" (actual errors: %s)" %
(form, i, err, non_field_errors))
if not found_form:
self.fail(msg_prefix + "The form '%s' was not used to render the"
" response" % form)
def assertTemplateUsed(self, response, template_name, msg_prefix=''):
"""
Asserts that the template with the provided name was used in rendering
the response.
"""
if msg_prefix:
msg_prefix += ": "
template_names = [t.name for t in response.templates]
if not template_names:
self.fail(msg_prefix + "No templates used to render the response")
self.assertTrue(template_name in template_names,
msg_prefix + "Template '%s' was not a template used to render"
" the response. Actual template(s) used: %s" %
(template_name, u', '.join(template_names)))
def assertTemplateNotUsed(self, response, template_name, msg_prefix=''):
"""
Asserts that the template with the provided name was NOT used in
rendering the response.
"""
if msg_prefix:
msg_prefix += ": "
template_names = [t.name for t in response.templates]
self.assertFalse(template_name in template_names,
msg_prefix + "Template '%s' was used unexpectedly in rendering"
" the response" % template_name)
def assertQuerysetEqual(self, qs, values, transform=repr):
return self.assertEqual(map(transform, qs), values)
def assertNumQueries(self, num, func=None, *args, **kwargs):
using = kwargs.pop("using", DEFAULT_DB_ALIAS)
connection = connections[using]
context = _AssertNumQueriesContext(self, num, connection)
if func is None:
return context
# Basically emulate the `with` statement here.
context.__enter__()
try:
func(*args, **kwargs)
except:
context.__exit__(*sys.exc_info())
raise
else:
context.__exit__(*sys.exc_info())
def connections_support_transactions():
"""
Returns True if all connections support transactions. This is messy
because 2.4 doesn't support any or all.
"""
return all(conn.features.supports_transactions
for conn in connections.all())
class TestCase(TransactionTestCase):
"""
Does basically the same as TransactionTestCase, but surrounds every test
with a transaction, monkey-patches the real transaction management routines to
do nothing, and rollsback the test transaction at the end of the test. You have
to use TransactionTestCase, if you need transaction management inside a test.
"""
def _fixture_setup(self):
if not connections_support_transactions():
return super(TestCase, self)._fixture_setup()
# If the test case has a multi_db=True flag, setup all databases.
# Otherwise, just use default.
if getattr(self, 'multi_db', False):
databases = connections
else:
databases = [DEFAULT_DB_ALIAS]
for db in databases:
transaction.enter_transaction_management(using=db)
transaction.managed(True, using=db)
disable_transaction_methods()
from django.contrib.sites.models import Site
Site.objects.clear_cache()
for db in databases:
if hasattr(self, 'fixtures'):
call_command('loaddata', *self.fixtures, **{
'verbosity': 0,
'commit': False,
'database': db
})
def _fixture_teardown(self):
if not connections_support_transactions():
return super(TestCase, self)._fixture_teardown()
# If the test case has a multi_db=True flag, teardown all databases.
# Otherwise, just teardown default.
if getattr(self, 'multi_db', False):
databases = connections
else:
databases = [DEFAULT_DB_ALIAS]
restore_transaction_methods()
for db in databases:
transaction.rollback(using=db)
transaction.leave_transaction_management(using=db)
def _deferredSkip(condition, reason):
def decorator(test_func):
if not (isinstance(test_func, type) and issubclass(test_func, TestCase)):
@wraps(test_func)
def skip_wrapper(*args, **kwargs):
if condition():
raise ut2.SkipTest(reason)
return test_func(*args, **kwargs)
test_item = skip_wrapper
else:
test_item = test_func
test_item.__unittest_skip_why__ = reason
return test_item
return decorator
def skipIfDBFeature(feature):
"Skip a test if a database has the named feature"
return _deferredSkip(lambda: getattr(connection.features, feature),
"Database has feature %s" % feature)
def skipUnlessDBFeature(feature):
"Skip a test unless a database has the named feature"
return _deferredSkip(lambda: not getattr(connection.features, feature),
"Database doesn't support feature %s" % feature)
class HachiAnalysisCase(TestCase):
def _urlconf_setup(self):
pass
| |
""" Routines to copy / relink library dependencies in trees and wheels
"""
from __future__ import division, print_function
import functools
import logging
import os
import shutil
import warnings
from os.path import abspath, basename, dirname, exists
from os.path import join as pjoin
from os.path import realpath, relpath
from subprocess import PIPE, Popen
from typing import (
Callable,
Dict,
FrozenSet,
Iterable,
List,
Mapping,
Optional,
Set,
Text,
Tuple,
Union,
)
from .libsana import (
_allow_all,
get_rp_stripper,
stripped_lib_dict,
tree_libs,
tree_libs_from_directory,
)
from .tmpdirs import TemporaryDirectory
from .tools import (
dir2zip,
find_package_dirs,
get_archs,
set_install_id,
set_install_name,
validate_signature,
zip2dir,
)
from .wheeltools import InWheel, rewrite_record
logger = logging.getLogger(__name__)
# Prefix for install_name_id of copied libraries
DLC_PREFIX = "/DLC/"
class DelocationError(Exception):
pass
def delocate_tree_libs(
lib_dict, # type: Mapping[Text, Mapping[Text, Text]]
lib_path, # type: Text
root_path, # type: Text
):
# type: (...) -> Dict[Text, Dict[Text, Text]]
"""Move needed libraries in `lib_dict` into `lib_path`
`lib_dict` has keys naming libraries required by the files in the
corresponding value. Call the keys, "required libs". Call the values
"requiring objects".
Copy all the required libs to `lib_path`. Fix up the rpaths and install
names in the requiring objects to point to these new copies.
Exception: required libs within the directory tree pointed to by
`root_path` stay where they are, but we modify requiring objects to use
relative paths to these libraries.
Parameters
----------
lib_dict : dict
Dictionary with (key, value) pairs of (``depended_lib_path``,
``dependings_dict``) (see :func:`libsana.tree_libs`)
lib_path : str
Path in which to store copies of libs referred to in keys of
`lib_dict`. Assumed to exist
root_path : str
Root directory of tree analyzed in `lib_dict`. Any required
library within the subtrees of `root_path` does not get copied, but
libraries linking to it have links adjusted to use relative path to
this library.
Returns
-------
copied_libs : dict
Filtered `lib_dict` dict containing only the (key, value) pairs from
`lib_dict` where the keys are the libraries copied to `lib_path``.
Raises
------
DelocationError
When a malformed `lib_dict` has unresolved paths, missing files, etc.
When two dependencies would've been be copied to the same destination.
"""
# Test for errors first to avoid getting half-way through changing the tree
libraries_to_copy, libraries_to_delocate = _analyze_tree_libs(
lib_dict, root_path
)
# Copy libraries and update lib_dict.
lib_dict, copied_libraries = _copy_required_libs(
lib_dict, lib_path, root_path, libraries_to_copy
)
# Update the install names of local and copied libaries.
_update_install_names(
lib_dict, root_path, libraries_to_delocate | copied_libraries
)
return libraries_to_copy
def _analyze_tree_libs(
lib_dict, # type: Mapping[Text, Mapping[Text, Text]]
root_path, # type: Text
):
# type: (...) -> Tuple[Dict[Text, Dict[Text, Text]], Set[Text]]
"""Verify then return which library files to copy and delocate.
Returns
-------
needs_copying : dict
The libraries outside of `root_path`.
This is in the `lib_dict` format for use by `delocate_tree_libs`.
needs_delocating : set of str
The libraries inside of `root_path` which need to be delocated.
"""
needs_delocating = set() # Libraries which need install names updated.
needs_copying = {} # A report of which libraries were copied.
copied_basenames = set()
rp_root_path = realpath(root_path)
for required, requirings in lib_dict.items():
if required.startswith("@"):
# @rpath, etc, at this point should never happen.
raise DelocationError("%s was expected to be resolved." % required)
r_ed_base = basename(required)
if relpath(required, rp_root_path).startswith(".."):
# Not local, plan to copy
if r_ed_base in copied_basenames:
raise DelocationError(
"Already planning to copy library with same basename as: "
+ r_ed_base
)
if not exists(required):
raise DelocationError(
'library "{0}" does not exist'.format(required)
)
# Copy requirings to preserve it since it will be modified later.
needs_copying[required] = dict(requirings)
copied_basenames.add(r_ed_base)
else: # Is local, plan to set relative loader_path
needs_delocating.add(required)
return needs_copying, needs_delocating
def _copy_required_libs(
lib_dict, # type: Mapping[Text, Mapping[Text, Text]]
lib_path, # type: Text
root_path, # type: Text
libraries_to_copy, # type: Iterable[Text]
):
# type (...) -> Tuple[Dict[Text, Dict[Text, Text]], Set[Text]]
"""Copy libraries outside of root_path to lib_path.
Returns
-------
updated_lib_dict : dict
A copy of `lib_dict` modified so that dependencies now point to
the copied library destinations.
needs_delocating : set of str
A set of the destination files, these need to be delocated.
"""
# Create a copy of lib_dict for this script to modify and return.
out_lib_dict = _copy_lib_dict(lib_dict)
del lib_dict
needs_delocating = set() # Set[Text]
for old_path in libraries_to_copy:
new_path = realpath(pjoin(lib_path, basename(old_path)))
logger.info(
"Copying library %s to %s", old_path, relpath(new_path, root_path)
)
shutil.copy(old_path, new_path)
# Delocate this file now that it is stored locally.
needs_delocating.add(new_path)
# Update out_lib_dict with the new file paths.
out_lib_dict[new_path] = out_lib_dict[old_path]
del out_lib_dict[old_path]
for required in out_lib_dict:
if old_path not in out_lib_dict[required]:
continue
out_lib_dict[required][new_path] = out_lib_dict[required][old_path]
del out_lib_dict[required][old_path]
return out_lib_dict, needs_delocating
def _update_install_names(
lib_dict, # type: Mapping[Text, Mapping[Text, Text]]
root_path, # type: Text
files_to_delocate, # type: Iterable[Text]
):
# type: (...) -> None
"""Update the install names of libraries."""
for required in files_to_delocate:
# Set relative path for local library
for requiring, orig_install_name in lib_dict[required].items():
req_rel = relpath(required, dirname(requiring))
new_install_name = "@loader_path/" + req_rel
logger.info(
"Modifying install name in %s from %s to %s",
relpath(requiring, root_path),
orig_install_name,
new_install_name,
)
set_install_name(requiring, orig_install_name, new_install_name)
def copy_recurse(
lib_path, # type: Text
copy_filt_func=None, # type: Optional[Callable[[Text], bool]]
copied_libs=None, # type: Optional[Dict[Text, Dict[Text, Text]]]
):
# type: (...) -> Dict[Text, Dict[Text, Text]]
"""Analyze `lib_path` for library dependencies and copy libraries
`lib_path` is a directory containing libraries. The libraries might
themselves have dependencies. This function analyzes the dependencies and
copies library dependencies that match the filter `copy_filt_func`. It also
adjusts the depending libraries to use the copy. It keeps iterating over
`lib_path` until all matching dependencies (of dependencies of dependencies
...) have been copied.
Parameters
----------
lib_path : str
Directory containing libraries
copy_filt_func : None or callable, optional
If None, copy any library that found libraries depend on. If callable,
called on each depended library name; copy where
``copy_filt_func(libname)`` is True, don't copy otherwise
copied_libs : dict
Dict with (key, value) pairs of (``copied_lib_path``,
``dependings_dict``) where ``copied_lib_path`` is the canonical path of
a library that has been copied to `lib_path`, and ``dependings_dict``
is a dictionary with (key, value) pairs of (``depending_lib_path``,
``install_name``). ``depending_lib_path`` is the canonical path of the
library depending on ``copied_lib_path``, ``install_name`` is the name
that ``depending_lib_path`` uses to refer to ``copied_lib_path`` (in
its install names).
Returns
-------
copied_libs : dict
Input `copied_libs` dict with any extra libraries and / or dependencies
added.
.. deprecated:: 0.9
This function is obsolete. :func:`delocate_path` handles recursive
dependencies while also supporting `@loader_path`.
"""
warnings.warn(
"copy_recurse is obsolete and should no longer be called.",
DeprecationWarning,
stacklevel=2,
)
if copied_libs is None:
copied_libs = {}
else:
copied_libs = dict(copied_libs)
done = False
while not done:
in_len = len(copied_libs)
_copy_required(lib_path, copy_filt_func, copied_libs)
done = len(copied_libs) == in_len
return copied_libs
def _copy_required(
lib_path, # type: Text
copy_filt_func, # type: Optional[Callable[[Text], bool]]
copied_libs, # type: Dict[Text, Dict[Text, Text]]
):
# type: (...) -> None
"""Copy libraries required for files in `lib_path` to `copied_libs`
Augment `copied_libs` dictionary with any newly copied libraries, modifying
`copied_libs` in-place - see Notes.
This is one pass of ``copy_recurse``
Parameters
----------
lib_path : str
Directory containing libraries
copy_filt_func : None or callable, optional
If None, copy any library that found libraries depend on. If callable,
called on each library name; copy where ``copy_filt_func(libname)`` is
True, don't copy otherwise
copied_libs : dict
See :func:`copy_recurse` for definition.
Notes
-----
If we need to copy another library, add that (``depended_lib_path``,
``dependings_dict``) to `copied_libs`. ``dependings_dict`` has (key,
value) pairs of (``depending_lib_path``, ``install_name``).
``depending_lib_path`` will be the original (canonical) library name, not
the copy in ``lib_path``.
Sometimes we copy a library, that further depends on a library we have
already copied. In this case update ``copied_libs[depended_lib]`` with the
extra dependency (as well as fixing up the install names for the depending
library).
For example, imagine we've start with a lib path like this::
my_lib_path/
libA.dylib
libB.dylib
Our input `copied_libs` has keys ``/sys/libA.dylib``, ``/sys/libB.lib``
telling us we previously copied those guys from the ``/sys`` folder.
On a first pass, we discover that ``libA.dylib`` depends on
``/sys/libC.dylib``, so we copy that.
On a second pass, we discover now that ``libC.dylib`` also depends on
``/sys/libB.dylib``. `copied_libs` tells us that we already have a copy of
``/sys/libB.dylib``, so we fix our copy of `libC.dylib`` to point to
``my_lib_path/libB.dylib`` and add ``/sys/libC.dylib`` as a
``dependings_dict`` entry for ``copied_libs['/sys/libB.dylib']``
.. deprecated:: 0.9
This function is obsolete, and is only used by :func:`copy_recurse`.
"""
# Paths will be prepended with `lib_path`
lib_dict = tree_libs(lib_path)
# Map library paths after copy ('copied') to path before copy ('orig')
rp_lp = realpath(lib_path)
copied2orig = dict((pjoin(rp_lp, basename(c)), c) for c in copied_libs)
for required, requirings in lib_dict.items():
if copy_filt_func is not None and not copy_filt_func(required):
continue
if required.startswith("@"):
# May have been processed by us, or have some rpath, loader_path of
# its own. Either way, leave alone
continue
# Requiring names may well be the copies in lib_path. Replace the copy
# names with the original names for entry into `copied_libs`
procd_requirings = {}
# Set requiring lib install names to point to local copy
for requiring, orig_install_name in requirings.items():
set_install_name(
requiring,
orig_install_name,
"@loader_path/" + basename(required),
)
# Make processed version of ``dependings_dict``
mapped_requiring = copied2orig.get(requiring, requiring)
procd_requirings[mapped_requiring] = orig_install_name
if required in copied_libs:
# Have copied this already, add any new requirings
copied_libs[required].update(procd_requirings)
continue
# Haven't see this one before, add entry to copied_libs
out_path = pjoin(lib_path, basename(required))
if exists(out_path):
raise DelocationError(out_path + " already exists")
shutil.copy(required, lib_path)
copied2orig[out_path] = required
copied_libs[required] = procd_requirings
def _dylibs_only(filename: str) -> bool:
return filename.endswith(".so") or filename.endswith(".dylib")
def filter_system_libs(libname: str) -> bool:
return not (libname.startswith("/usr/lib") or libname.startswith("/System"))
def _delocate_filter_function(
path: str,
*,
lib_filt_func: Callable[[str], bool],
copy_filt_func: Callable[[str], bool],
) -> bool:
"""Combines the library inspection and copy filters so that libraries
which won't be copied will not be followed."""
return lib_filt_func(path) and copy_filt_func(path)
def delocate_path(
tree_path, # type: Text
lib_path, # type: Text
lib_filt_func=None, # type: Optional[Union[str, Callable[[Text], bool]]]
copy_filt_func=filter_system_libs, # type: Optional[Callable[[Text], bool]]
executable_path=None, # type: Optional[Text]
ignore_missing=False, # type: bool
):
# type: (...) -> Dict[Text, Dict[Text, Text]]
"""Copy required libraries for files in `tree_path` into `lib_path`
Parameters
----------
tree_path : str
Root path of tree to search for required libraries
lib_path : str
Directory into which we copy required libraries
lib_filt_func : None or str or callable, optional
If None, inspect all files for dependencies on dynamic libraries. If
callable, accepts filename as argument, returns True if we should
inspect the file, False otherwise. If str == "dylibs-only" then inspect
only files with known dynamic library extensions (``.dylib``, ``.so``).
copy_filt_func : None or callable, optional
If callable, called on each library name detected as a dependency; copy
where ``copy_filt_func(libname)`` is True, don't copy otherwise.
Default is callable rejecting only libraries beginning with
``/usr/lib`` or ``/System``. None means copy all libraries. This will
usually end up copying large parts of the system run-time.
Libraries which won't be copied will not be inspected for dependencies.
executable_path : None or str, optional
If not None, an alternative path to use for resolving
`@executable_path`.
ignore_missing : bool, default=False
Continue even if missing dependencies are detected.
Returns
-------
copied_libs : dict
dict containing the (key, value) pairs of (``copied_lib_path``,
``dependings_dict``), where ``copied_lib_path`` is a library real path
that was copied into `lib_sdir` of the wheel packages, and
``dependings_dict`` is a dictionary with key, value pairs where the key
is a file in the path depending on ``copied_lib_path``, and the value
is the ``install_name`` of ``copied_lib_path`` in the depending
library.
Raises
------
DelocationError
When any dependencies can not be located.
"""
if lib_filt_func == "dylibs-only":
lib_filt_func = _dylibs_only
elif isinstance(lib_filt_func, str):
raise TypeError('lib_filt_func string can only be "dylibs-only"')
if lib_filt_func is None:
lib_filt_func = _allow_all
if copy_filt_func is None:
copy_filt_func = _allow_all
if not exists(lib_path):
os.makedirs(lib_path)
# Do not inspect dependencies of libraries that will not be copied.
filt_func = functools.partial(
_delocate_filter_function,
lib_filt_func=lib_filt_func,
copy_filt_func=copy_filt_func,
)
lib_dict = tree_libs_from_directory(
tree_path,
lib_filt_func=filt_func,
copy_filt_func=filt_func,
executable_path=executable_path,
ignore_missing=ignore_missing,
)
return delocate_tree_libs(lib_dict, lib_path, tree_path)
def _copy_lib_dict(lib_dict):
# type: (Mapping[Text, Mapping[Text, Text]]) -> Dict[Text, Dict[Text, Text]] # noqa: E501
"""Returns a copy of lib_dict."""
return { # Convert nested Mapping types into nested Dict types.
required: dict(requiring) for required, requiring in lib_dict.items()
}
def _decide_dylib_bundle_directory(
wheel_dir: str, package_name: str, lib_sdir: str = ".dylibs"
) -> str:
"""Return a relative directory which should be used to store dylib files.
Parameters
----------
wheel_dir : str
The directory of an unpacked wheel to analyse.
package_name : str
The name of the package.
lib_sdir : str, optional
Default value for lib sub-directory passed in via
:func:`delocate_wheel`.
Ignored if wheel has no package directories.
Returns
-------
dylibs_dir : str
A path to within `wheel_dir` where any library files should be put.
"""
package_dirs = find_package_dirs(wheel_dir)
for directory in package_dirs:
if directory.endswith(package_name):
# Prefer using the directory with the same name as the package.
return pjoin(directory, lib_sdir)
if package_dirs:
# Otherwise, store dylib files in the first package alphabetically.
return pjoin(min(package_dirs), lib_sdir)
# Otherwise, use an auditwheel-style top-level name.
return pjoin(wheel_dir, f"{package_name}.dylibs")
def _make_install_name_ids_unique(
libraries: Iterable[str], install_id_prefix: str
) -> None:
"""Replace each library's install name id with a unique id.
This is to change install ids to be unique within Python space.
Parameters
----------
libraries : iterable of str
The libraries to be modified.
These files are assumed to be in the same directory.
install_id_prefix : str
A unique path to use as a prefix for the install name ids.
This must be a Unix absolute path.
Examples
--------
>>> _make_install_name_ids_unique((), "/")
>>> _make_install_name_ids_unique((), "")
Traceback (most recent call last):
...
ValueError: install_id_prefix should start with '/', got ''
"""
if not install_id_prefix.startswith("/"):
raise ValueError(
"install_id_prefix should start with '/',"
f" got {install_id_prefix!r}"
)
if not install_id_prefix.endswith("/"):
install_id_prefix += "/"
for lib in libraries:
set_install_id(lib, install_id_prefix + basename(lib))
validate_signature(lib)
def delocate_wheel(
in_wheel: str,
out_wheel: Optional[str] = None,
lib_sdir: str = ".dylibs",
lib_filt_func: Union[None, str, Callable[[str], bool]] = None,
copy_filt_func: Optional[Callable[[str], bool]] = filter_system_libs,
require_archs: Union[None, str, Iterable[str]] = None,
check_verbose: Optional[bool] = None,
executable_path: Optional[str] = None,
ignore_missing: bool = False,
) -> Dict[str, Dict[str, str]]:
"""Update wheel by copying required libraries to `lib_sdir` in wheel
Create `lib_sdir` in wheel tree only if we are copying one or more
libraries.
If `out_wheel` is None (the default), overwrite the wheel `in_wheel`
in-place.
Parameters
----------
in_wheel : str
Filename of wheel to process
out_wheel : None or str
Filename of processed wheel to write. If None, overwrite `in_wheel`
lib_sdir : str, optional
Subdirectory name in wheel package directory (or directories) to store
needed libraries.
Ignored if the wheel has no package directories, and only contains
stand-alone modules.
lib_filt_func : None or str or callable, optional
If None, inspect all files for dependencies on dynamic libraries. If
callable, accepts filename as argument, returns True if we should
inspect the file, False otherwise. If str == "dylibs-only" then inspect
only files with known dynamic library extensions (``.dylib``, ``.so``).
copy_filt_func : None or callable, optional
If callable, called on each library name detected as a dependency; copy
where ``copy_filt_func(libname)`` is True, don't copy otherwise.
Default is callable rejecting only libraries beginning with
``/usr/lib`` or ``/System``. None means copy all libraries. This will
usually end up copying large parts of the system run-time.
require_archs : None or str or sequence, optional
If None, do no checks of architectures in libraries. If sequence,
sequence of architectures (output from ``lipo -info``) that every
library in the wheels should have (e.g. ``['x86_64, 'i386']``). An
empty sequence results in checks that depended libraries have the same
archs as depending libraries. If string, either "intel" (corresponds
to sequence ``['x86_64, 'i386']``) or name of required architecture
(e.g "i386" or "x86_64").
check_verbose : bool, optional
This flag is deprecated, and has no effect.
executable_path : None or str, optional, keyword-only
An alternative path to use for resolving `@executable_path`.
ignore_missing : bool, default=False, keyword-only
Continue even if missing dependencies are detected.
Returns
-------
copied_libs : dict
dict containing the (key, value) pairs of (``copied_lib_path``,
``dependings_dict``), where ``copied_lib_path`` is a library real path
that was copied into `lib_sdir` of the wheel packages, and
``dependings_dict`` is a dictionary with key, value pairs where the key
is a path in the wheel depending on ``copied_lib_path``, and the value
is the ``install_name`` of ``copied_lib_path`` in the depending
library. The filenames in the keys are relative to the wheel root path.
"""
if check_verbose is not None:
warnings.warn(
"The check_verbose flag is deprecated and shouldn't be provided,"
" all subsequent parameters should be changed over to keywords.",
DeprecationWarning,
stacklevel=2,
)
in_wheel = abspath(in_wheel)
if out_wheel is None:
out_wheel = in_wheel
else:
out_wheel = abspath(out_wheel)
in_place = in_wheel == out_wheel
with TemporaryDirectory() as tmpdir:
wheel_dir = realpath(pjoin(tmpdir, "wheel"))
zip2dir(in_wheel, wheel_dir)
# Assume the package name from the wheel filename.
package_name = basename(in_wheel).split("-")[0]
lib_sdir = _decide_dylib_bundle_directory(
wheel_dir, package_name, lib_sdir
)
lib_path = pjoin(wheel_dir, lib_sdir)
lib_path_exists_before_delocate = exists(lib_path)
copied_libs = delocate_path(
wheel_dir,
lib_path,
lib_filt_func,
copy_filt_func,
executable_path=executable_path,
ignore_missing=ignore_missing,
)
if copied_libs and lib_path_exists_before_delocate:
raise DelocationError(
"f{lib_path} already exists in wheel but need to copy "
+ "; ".join(copied_libs)
)
if len(os.listdir(lib_path)) == 0:
shutil.rmtree(lib_path)
# Check architectures
if require_archs is not None:
bads = check_archs(copied_libs, require_archs)
if bads:
raise DelocationError(
"Some missing architectures in wheel"
f"\n{bads_report(bads, pjoin(tmpdir, 'wheel'))}"
)
libraries_in_lib_path = [
pjoin(lib_path, basename(lib)) for lib in copied_libs
]
_make_install_name_ids_unique(
libraries=libraries_in_lib_path,
install_id_prefix=DLC_PREFIX + relpath(lib_sdir, wheel_dir),
)
if len(copied_libs):
rewrite_record(wheel_dir)
if len(copied_libs) or not in_place:
dir2zip(wheel_dir, out_wheel)
return stripped_lib_dict(copied_libs, wheel_dir + os.path.sep)
def patch_wheel(in_wheel, patch_fname, out_wheel=None):
# type: (Text, Text, Optional[Text]) -> None
"""Apply ``-p1`` style patch in `patch_fname` to contents of `in_wheel`
If `out_wheel` is None (the default), overwrite the wheel `in_wheel`
in-place.
Parameters
----------
in_wheel : str
Filename of wheel to process
patch_fname : str
Filename of patch file. Will be applied with ``patch -p1 <
patch_fname``
out_wheel : None or str
Filename of patched wheel to write. If None, overwrite `in_wheel`
"""
in_wheel = abspath(in_wheel)
patch_fname = abspath(patch_fname)
if out_wheel is None:
out_wheel = in_wheel
else:
out_wheel = abspath(out_wheel)
if not exists(patch_fname):
raise ValueError("patch file {0} does not exist".format(patch_fname))
with InWheel(in_wheel, out_wheel):
with open(patch_fname, "rb") as fobj:
patch_proc = Popen(
["patch", "-p1"], stdin=fobj, stdout=PIPE, stderr=PIPE
)
stdout, stderr = patch_proc.communicate()
if patch_proc.returncode != 0:
raise RuntimeError(
"Patch failed with stdout:\n" + stdout.decode("latin1")
)
_ARCH_LOOKUP = {"intel": ["i386", "x86_64"], "universal2": ["x86_64", "arm64"]}
def check_archs(
copied_libs, # type: Mapping[Text, Mapping[Text, Text]]
require_archs=(), # type: Union[Text, Iterable[Text]]
stop_fast=False, # type: bool
):
# type: (...) -> Set[Union[Tuple[Text, FrozenSet[Text]], Tuple[Text, Text, FrozenSet[Text]]]] # noqa: E501
"""Check compatibility of archs in `copied_libs` dict
Parameters
----------
copied_libs : dict
dict containing the (key, value) pairs of (``copied_lib_path``,
``dependings_dict``), where ``copied_lib_path`` is a library real path
that has been copied during delocation, and ``dependings_dict`` is a
dictionary with key, value pairs where the key is a path in the target
being delocated (a wheel or path) depending on ``copied_lib_path``, and
the value is the ``install_name`` of ``copied_lib_path`` in the
depending library.
require_archs : str or sequence, optional
Architectures we require to be present in all library files in wheel.
If an empty sequence, just check that depended libraries do have the
architectures of the depending libraries, with no constraints on what
these architectures are. If a sequence, then a set of required
architectures e.g. ``['i386', 'x86_64']`` to specify dual Intel
architectures. If a string, then a standard architecture name as
returned by ``lipo -info``, or the string "intel", corresponding to the
sequence ``['i386', 'x86_64']``, or the string "universal2",
corresponding to ``['x86_64', 'arm64']``.
stop_fast : bool, optional
Whether to give up collecting errors after the first
Returns
-------
bads : set
set of length 2 or 3 tuples. A length 2 tuple is of form
``(depending_lib, missing_archs)`` meaning that an arch in
`require_archs` was missing from ``depending_lib``. A length 3 tuple
is of form ``(depended_lib, depending_lib, missing_archs)`` where
``depended_lib`` is the filename of the library depended on,
``depending_lib`` is the library depending on ``depending_lib`` and
``missing_archs`` is a set of missing architecture strings giving
architectures present in ``depending_lib`` and missing in
``depended_lib``. An empty set means all architectures were present as
required.
"""
if isinstance(require_archs, str):
require_archs = _ARCH_LOOKUP.get(require_archs, [require_archs])
require_archs_set = frozenset(require_archs)
bads = (
[]
) # type: List[Union[Tuple[Text, FrozenSet[Text]], Tuple[Text, Text, FrozenSet[Text]]]] # noqa: E501
for depended_lib, dep_dict in copied_libs.items():
depended_archs = get_archs(depended_lib)
for depending_lib, install_name in dep_dict.items():
depending_archs = get_archs(depending_lib)
all_required = depending_archs | require_archs_set
all_missing = all_required.difference(depended_archs)
if len(all_missing) == 0:
continue
required_missing = require_archs_set.difference(depended_archs)
if len(required_missing):
bads.append((depending_lib, required_missing))
else:
bads.append((depended_lib, depending_lib, all_missing))
if stop_fast:
return set(bads)
return set(bads)
def bads_report(bads, path_prefix=None):
"""Return a nice report of bad architectures in `bads`
Parameters
----------
bads : set
set of length 2 or 3 tuples. A length 2 tuple is of form
``(depending_lib, missing_archs)`` meaning that an arch in
`require_archs` was missing from ``depending_lib``. A length 3 tuple
is of form ``(depended_lib, depending_lib, missing_archs)`` where
``depended_lib`` is the filename of the library depended on,
``depending_lib`` is the library depending on ``depending_lib`` and
``missing_archs`` is a set of missing architecture strings giving
architectures present in ``depending_lib`` and missing in
``depended_lib``. An empty set means all architectures were present as
required.
path_prefix : None or str, optional
Path prefix to strip from ``depended_lib`` and ``depending_lib``. None
means do not strip anything.
Returns
-------
report : str
A nice report for printing
"""
path_processor = (
(lambda x: x) if path_prefix is None else get_rp_stripper(path_prefix)
)
reports = []
for result in bads:
if len(result) == 3:
depended_lib, depending_lib, missing_archs = result
reports.append(
"{0} needs {1} {2} missing from {3}".format(
path_processor(depending_lib),
"archs" if len(missing_archs) > 1 else "arch",
", ".join(sorted(missing_archs)),
path_processor(depended_lib),
)
)
elif len(result) == 2:
depending_lib, missing_archs = result
reports.append(
"Required {0} {1} missing from {2}".format(
"archs" if len(missing_archs) > 1 else "arch",
", ".join(sorted(missing_archs)),
path_processor(depending_lib),
)
)
else:
raise ValueError("Report tuple should be length 2 or 3")
return "\n".join(sorted(reports))
| |
# Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright 2008, Frank Scholz <coherence@beebits.net>
""" simple and hopefully reusable widgets to ease
the creation of UPnP UI applications
icons taken from the Tango Desktop Project
"""
import os.path
import urllib
import traceback
import pygtk
pygtk.require("2.0")
import gtk
import gobject
import dbus
from dbus.mainloop.glib import DBusGMainLoop
DBusGMainLoop(set_as_default=True)
import dbus.service
import mimetypes
mimetypes.init()
# dbus defines
BUS_NAME = 'org.Coherence'
OBJECT_PATH = '/org/Coherence'
# gtk store defines
NAME_COLUMN = 0
ID_COLUMN = 1
UPNP_CLASS_COLUMN = 2
CHILD_COUNT_COLUMN = 3
UDN_COLUMN = 4
SERVICE_COLUMN = 5
ICON_COLUMN = 6
DIDL_COLUMN = 7
TOOLTIP_ICON_COLUMN = 8
from pkg_resources import resource_filename
class ControlPoint(object):
_instance_ = None # Singleton
def __new__(cls, *args, **kwargs):
obj = getattr(cls, '_instance_', None)
if obj is not None:
return obj
else:
obj = super(ControlPoint, cls).__new__(cls, *args, **kwargs)
cls._instance_ = obj
obj._connect(*args, **kwargs)
return obj
def __init__(self):
pass
def _connect(self):
self.bus = dbus.SessionBus()
self.coherence = self.bus.get_object(BUS_NAME,OBJECT_PATH)
class DeviceExportWidget(object):
def __init__(self,name='Nautilus',standalone=True,root=None):
self.root=root
self.uuid = None
self.name = name
self.standalone=standalone
icon = resource_filename(__name__, os.path.join('icons','emblem-new.png'))
self.new_icon = gtk.gdk.pixbuf_new_from_file(icon)
icon = resource_filename(__name__, os.path.join('icons','emblem-shared.png'))
self.shared_icon = gtk.gdk.pixbuf_new_from_file(icon)
icon = resource_filename(__name__, os.path.join('icons','emblem-unreadable.png'))
self.unshared_icon = gtk.gdk.pixbuf_new_from_file(icon)
self.filestore = gtk.ListStore(str,gtk.gdk.Pixbuf)
self.coherence = ControlPoint().coherence
def build_ui(self,root=None):
if root != None:
self.root = root
self.window = gtk.VBox(homogeneous=False, spacing=0)
self.fileview = gtk.TreeView(self.filestore)
column = gtk.TreeViewColumn('Folders to share')
self.fileview.append_column(column)
icon_cell = gtk.CellRendererPixbuf()
text_cell = gtk.CellRendererText()
column.pack_start(icon_cell, False)
column.pack_start(text_cell, True)
column.set_attributes(text_cell, text=0)
column.add_attribute(icon_cell, "pixbuf",1)
self.window.pack_start(self.fileview,expand=True,fill=True)
buttonbox = gtk.HBox(homogeneous=False, spacing=0)
button = gtk.Button(stock=gtk.STOCK_ADD)
button.set_sensitive(False)
button.connect("clicked", self.new_files)
buttonbox.pack_start(button, expand=False,fill=False, padding=2)
button = gtk.Button(stock=gtk.STOCK_REMOVE)
#button.set_sensitive(False)
button.connect("clicked", self.remove_files)
buttonbox.pack_start(button, expand=False,fill=False, padding=2)
button = gtk.Button(stock=gtk.STOCK_CANCEL)
button.connect("clicked", self.share_cancel)
buttonbox.pack_start(button, expand=False,fill=False, padding=2)
button = gtk.Button(stock=gtk.STOCK_APPLY)
button.connect("clicked", self.share_files)
buttonbox.pack_start(button, expand=False,fill=False, padding=2)
self.window.pack_end(buttonbox,expand=False,fill=False)
return self.window
def share_cancel(self,button):
for row in self.filestore:
print row
if row[1] == self.new_icon:
del row
continue
if row[1] == self.unshared_icon:
row[1] = self.shared_icon
if self.standalone:
gtk.main_quit()
else:
self.root.hide()
def share_files(self,button):
print "share_files with", self.uuid
folders = []
for row in self.filestore:
if row[1] == self.unshared_icon:
del row
continue
folders.append(row[0])
if self.uuid == None:
if len(folders) > 0:
self.uuid = self.coherence.add_plugin('FSStore', {'name': self.name,
'version':'1',
'create_root': 'yes',
'import_folder': '/tmp/UPnP Imports',
'content':','.join(folders)},
dbus_interface=BUS_NAME)
#self.coherence.pin('Nautilus::MediaServer::%d'%os.getpid(),self.uuid)
else:
result = self.coherence.call_plugin(self.uuid,'update_config',{'content':','.join(folders)})
if result != self.uuid:
print "something failed", result
for row in self.filestore:
row[1] = self.shared_icon
self.root.hide()
def add_files(self,files):
print "add_files", files
for filename in files:
for row in self.filestore:
if os.path.abspath(filename) == row[0]:
break
else:
self.add_file(filename)
def add_file(self,filename):
self.filestore.append([os.path.abspath(filename),self.new_icon])
def new_files(self,button):
print "new_files"
def remove_files(self,button):
print "remove_files"
selection = self.fileview.get_selection()
print selection
model, selected_rows = selection.get_selected_rows()
for row_path in selected_rows:
#model.remove(model.get_iter(row_path))
row = model[row_path]
row[1] = self.unshared_icon
class DeviceImportWidget(object):
def __init__(self,standalone=True,root=None):
self.standalone=standalone
self.root=root
self.build_ui()
self.init_controlpoint()
def build_ui(self):
self.window = gtk.VBox(homogeneous=False, spacing=0)
self.combobox = gtk.ComboBox()
self.store = gtk.ListStore(str, # 0: friendly name
str, # 1: device udn
gtk.gdk.Pixbuf)
icon = resource_filename(__name__, os.path.join('icons','network-server.png'))
self.device_icon = gtk.gdk.pixbuf_new_from_file(icon)
# create a CellRenderers to render the data
icon_cell = gtk.CellRendererPixbuf()
text_cell = gtk.CellRendererText()
self.combobox.pack_start(icon_cell, False)
self.combobox.pack_start(text_cell, True)
self.combobox.set_attributes(text_cell, text=0)
self.combobox.add_attribute(icon_cell, "pixbuf",2)
self.combobox.set_model(self.store)
item = self.store.append(None)
self.store.set_value(item, 0, 'Select a MediaServer...')
self.store.set_value(item, 1, '')
self.store.set_value(item, 2, None)
self.combobox.set_active(0)
self.window.pack_start(self.combobox,expand=False,fill=False)
self.filestore = gtk.ListStore(str)
self.fileview = gtk.TreeView(self.filestore)
column = gtk.TreeViewColumn('Files')
self.fileview.append_column(column)
text_cell = gtk.CellRendererText()
column.pack_start(text_cell, True)
column.set_attributes(text_cell, text=0)
self.window.pack_start(self.fileview,expand=True,fill=True)
buttonbox = gtk.HBox(homogeneous=False, spacing=0)
button = gtk.Button(stock=gtk.STOCK_ADD)
button.set_sensitive(False)
button.connect("clicked", self.new_files)
buttonbox.pack_start(button, expand=False,fill=False, padding=2)
button = gtk.Button(stock=gtk.STOCK_REMOVE)
button.set_sensitive(False)
button.connect("clicked", self.remove_files)
buttonbox.pack_start(button, expand=False,fill=False, padding=2)
button = gtk.Button(stock=gtk.STOCK_CANCEL)
if self.standalone:
button.connect("clicked", gtk.main_quit)
else:
button.connect("clicked", lambda x: self.root.destroy())
buttonbox.pack_start(button, expand=False,fill=False, padding=2)
button = gtk.Button(stock=gtk.STOCK_APPLY)
button.connect("clicked", self.import_files)
buttonbox.pack_start(button, expand=False,fill=False, padding=2)
self.window.pack_end(buttonbox,expand=False,fill=False)
def add_file(self,filename):
self.filestore.append([os.path.abspath(filename)])
def new_files(self,button):
print "new_files"
def remove_files(self,button):
print "remove_files"
def import_files(self,button):
print "import_files"
active = self.combobox.get_active()
if active <= 0:
print "no MediaServer selected"
return None
friendlyname, uuid,_ = self.store[active]
try:
row = self.filestore[0]
print 'import to', friendlyname,os.path.basename(row[0])
def success(r):
print 'success',r
self.filestore.remove(self.filestore.get_iter(0))
self.import_files(None)
def reply(r):
print 'reply',r['Result'], r['ObjectID']
from coherence.upnp.core import DIDLLite
didl = DIDLLite.DIDLElement.fromString(r['Result'])
item = didl.getItems()[0]
res = item.res.get_matching(['*:*:*:*'], protocol_type='http-get')
if len(res) > 0:
print 'importURI',res[0].importUri
self.coherence.put_resource(res[0].importUri,row[0],
reply_handler=success,
error_handler=self.handle_error)
mimetype,_ = mimetypes.guess_type(row[0], strict=False)
if mimetype.startswith('image/'):
upnp_class = 'object.item.imageItem'
elif mimetype.startswith('video/'):
upnp_class = 'object.item.videoItem'
elif mimetype.startswith('audio/'):
upnp_class = 'object.item.audioItem'
else:
upnp_class = 'object.item'
self.coherence.create_object(uuid,'DLNA.ORG_AnyContainer',
{'parentID':'DLNA.ORG_AnyContainer','upnp_class':upnp_class,'title':os.path.basename(row[0])},
reply_handler=reply,
error_handler=self.handle_error)
except IndexError:
pass
def handle_error(self,error):
print error
def handle_devices_reply(self,devices):
for device in devices:
if device['device_type'].split(':')[3] == 'MediaServer':
self.media_server_found(device)
def init_controlpoint(self):
cp = ControlPoint()
self.bus = cp.bus
self.coherence = cp.coherence
self.coherence.get_devices(dbus_interface=BUS_NAME,
reply_handler=self.handle_devices_reply,
error_handler=self.handle_error)
self.coherence.connect_to_signal('UPnP_ControlPoint_MediaServer_detected', self.media_server_found, dbus_interface=BUS_NAME)
self.coherence.connect_to_signal('UPnP_ControlPoint_MediaServer_removed', self.media_server_removed, dbus_interface=BUS_NAME)
self.devices = {}
def media_server_found(self,device,udn=None):
for service in device['services']:
service_type = service.split('/')[-1]
if service_type == 'ContentDirectory':
def got_icons(r,udn,item):
print 'got_icons', r
for icon in r:
###FIXME, we shouldn't just use the first icon
icon_loader = gtk.gdk.PixbufLoader()
icon_loader.write(urllib.urlopen(str(icon['url'])).read())
icon_loader.close()
icon = icon_loader.get_pixbuf()
icon = icon.scale_simple(16,16,gtk.gdk.INTERP_BILINEAR)
self.store.set_value(item, 2, icon)
break
def reply(r,udn):
if 'CreateObject' in r:
self.devices[udn] = {'ContentDirectory':{}}
self.devices[udn]['ContentDirectory']['actions'] = r
item = self.store.append(None)
self.store.set_value(item, 0, str(device['friendly_name']))
self.store.set_value(item, 1, str(device['udn']))
self.store.set_value(item, 2, self.device_icon)
d = self.bus.get_object(BUS_NAME+'.device',device['path'])
d.get_device_icons(reply_handler=lambda x : got_icons(x,str(device['udn']),item),error_handler=self.handle_error)
s = self.bus.get_object(BUS_NAME+'.service',service)
s.get_available_actions(reply_handler=lambda x : reply(x,str(device['udn'])),error_handler=self.handle_error)
def media_server_removed(self,udn):
row_count = 0
for row in self.store:
if udn == row[1]:
self.store.remove(self.store.get_iter(row_count))
del self.devices[str(udn)]
break
row_count += 1
class TreeWidget(object):
def __init__(self,cb_item_dbl_click=None,
cb_resource_chooser=None):
self.cb_item_dbl_click = cb_item_dbl_click
self.cb_item_right_click = None
self.cb_resource_chooser = cb_resource_chooser
self.build_ui()
self.init_controlpoint()
def build_ui(self):
self.window = gtk.ScrolledWindow()
self.window.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
icon = resource_filename(__name__, os.path.join('icons','network-server.png'))
self.device_icon = gtk.gdk.pixbuf_new_from_file(icon)
icon = resource_filename(__name__, os.path.join('icons','folder.png'))
self.folder_icon = gtk.gdk.pixbuf_new_from_file(icon)
icon = resource_filename(__name__, os.path.join('icons','audio-x-generic.png'))
self.audio_icon = gtk.gdk.pixbuf_new_from_file(icon)
icon = resource_filename(__name__, os.path.join('icons','video-x-generic.png'))
self.video_icon = gtk.gdk.pixbuf_new_from_file(icon)
icon = resource_filename(__name__, os.path.join('icons','image-x-generic.png'))
self.image_icon = gtk.gdk.pixbuf_new_from_file(icon)
self.store = gtk.TreeStore(str, # 0: name or title
str, # 1: id, '0' for the device
str, # 2: upnp_class, 'root' for the device
int, # 3: child count, -1 if not available
str, # 4: device udn, '' for an item
str, # 5: service path, '' for a non container item
gtk.gdk.Pixbuf,
str, # 7: DIDLLite fragment, '' for a non upnp item
gtk.gdk.Pixbuf
)
self.treeview = gtk.TreeView(self.store)
self.column = gtk.TreeViewColumn('MediaServers')
self.treeview.append_column(self.column)
# create a CellRenderers to render the data
icon_cell = gtk.CellRendererPixbuf()
text_cell = gtk.CellRendererText()
self.column.pack_start(icon_cell, False)
self.column.pack_start(text_cell, True)
self.column.set_attributes(text_cell, text=0)
self.column.add_attribute(icon_cell, "pixbuf",6)
#self.column.set_cell_data_func(self.cellpb, get_icon)
#self.treeview.insert_column_with_attributes(-1, 'MediaServers', cell, text=0)
self.treeview.connect("row-activated", self.browse)
self.treeview.connect("row-expanded", self.row_expanded)
self.treeview.connect("button_press_event", self.button_action)
self.treeview.set_property("has-tooltip", True)
self.treeview.connect("query-tooltip", self.show_tooltip)
self.tooltip_path = None
self.we_are_scrolling = None
def end_scrolling():
self.we_are_scrolling = None
def start_scrolling(w,e):
if self.we_are_scrolling != None:
gobject.source_remove(self.we_are_scrolling)
self.we_are_scrolling = gobject.timeout_add(800, end_scrolling)
self.treeview.connect('scroll-event', start_scrolling)
self.window.add(self.treeview)
def show_tooltip(self, widget, x, y, keyboard_mode, tooltip):
if self.we_are_scrolling != None:
return False
ret = False
try:
path = self.treeview.get_dest_row_at_pos(x, y)
iter = self.store.get_iter(path[0])
title,object_id,upnp_class,item = self.store.get(iter,NAME_COLUMN,ID_COLUMN,UPNP_CLASS_COLUMN,DIDL_COLUMN)
from coherence.upnp.core import DIDLLite
if upnp_class == 'object.item.videoItem':
self.tooltip_path = object_id
item = DIDLLite.DIDLElement.fromString(item).getItems()[0]
tooltip_icon, = self.store.get(iter,TOOLTIP_ICON_COLUMN)
if tooltip_icon != None:
tooltip.set_icon(tooltip_icon)
else:
tooltip.set_icon(self.video_icon)
for res in item.res:
protocol,network,content_format,additional_info = res.protocolInfo.split(':')
if(content_format == 'image/jpeg' and
'DLNA.ORG_PN=JPEG_TN' in additional_info.split(';')):
icon_loader = gtk.gdk.PixbufLoader()
icon_loader.write(urllib.urlopen(str(res.data)).read())
icon_loader.close()
icon = icon_loader.get_pixbuf()
tooltip.set_icon(icon)
self.store.set_value(iter, TOOLTIP_ICON_COLUMN, icon)
#print "got poster", icon
break
title = title.replace('&','&')
try:
director = item.director.replace('&','&')
except AttributeError:
director = ""
try:
description = item.description.replace('&','&')
except AttributeError:
description = ""
tooltip.set_markup("<b>%s</b>\n"
"<b>Director:</b> %s\n"
"<b>Description:</b> %s" % (title,
director,
description))
ret = True
except TypeError:
#print traceback.format_exc()
pass
except Exception:
#print traceback.format_exc()
#print "something wrong"
pass
return ret
def button_action(self, widget, event):
#print "button_action", widget, event, event.button
if self.cb_item_right_click != None:
return self.cb_item_right_click(widget, event)
return 0
def handle_error(self,error):
print error
def handle_devices_reply(self,devices):
for device in devices:
if device['device_type'].split(':')[3] == 'MediaServer':
self.media_server_found(device)
def init_controlpoint(self):
cp = ControlPoint()
self.bus = cp.bus
self.coherence = cp.coherence
self.hostname = self.coherence.hostname(dbus_interface=BUS_NAME)
self.coherence.get_devices(dbus_interface=BUS_NAME,
reply_handler=self.handle_devices_reply,
error_handler=self.handle_error)
self.coherence.connect_to_signal('UPnP_ControlPoint_MediaServer_detected', self.media_server_found, dbus_interface=BUS_NAME)
self.coherence.connect_to_signal('UPnP_ControlPoint_MediaServer_removed', self.media_server_removed, dbus_interface=BUS_NAME)
self.devices = {}
def device_has_action(self,udn,service,action):
try:
self.devices[udn][service]['actions'].index(action)
return True
except:
return False
def state_variable_change( self, udn, service, variable, value):
#print "state_variable_change", udn, service, variable, 'changed to', value
if variable == 'ContainerUpdateIDs':
changes = value.split(',')
while len(changes) > 1:
container = changes.pop(0).strip()
update_id = changes.pop(0).strip()
def match_func(model, iter, data):
column, key = data # data is a tuple containing column number, key
value = model.get_value(iter, column)
return value == key
def search(model, iter, func, data):
#print "search", model, iter, data
while iter:
if func(model, iter, data):
return iter
result = search(model, model.iter_children(iter), func, data)
if result: return result
iter = model.iter_next(iter)
return None
row_count = 0
for row in self.store:
if udn == row[UDN_COLUMN]:
iter = self.store.get_iter(row_count)
match_iter = search(self.store, self.store.iter_children(iter),
match_func, (ID_COLUMN, container))
if match_iter:
print "heureka, we have a change in ", container, ", container needs a reload"
path = self.store.get_path(match_iter)
expanded = self.treeview.row_expanded(path)
child = self.store.iter_children(match_iter)
while child:
self.store.remove(child)
child = self.store.iter_children(match_iter)
self.browse(self.treeview,path,None,
starting_index=0,requested_count=0,force=True,expand=expanded)
break
row_count += 1
def media_server_found(self,device,udn=None):
#print "media_server_found", device['friendly_name']
item = self.store.append(None)
self.store.set_value(item, NAME_COLUMN, device['friendly_name'])
self.store.set_value(item, ID_COLUMN, '0')
self.store.set_value(item, UPNP_CLASS_COLUMN, 'root')
self.store.set_value(item, CHILD_COUNT_COLUMN, -1)
self.store.set_value(item, UDN_COLUMN, str(device['udn']))
self.store.set_value(item, ICON_COLUMN, self.device_icon)
self.store.set_value(item, DIDL_COLUMN, '')
self.store.set_value(item, TOOLTIP_ICON_COLUMN, None)
self.store.append(item, ('...loading...','','placeholder',-1,'','',None,'',None))
self.devices[str(device['udn'])] = {'ContentDirectory':{}}
for service in device['services']:
service_type = service.split('/')[-1]
if service_type == 'ContentDirectory':
self.store.set_value(item, SERVICE_COLUMN, service)
self.devices[str(device['udn'])]['ContentDirectory'] = {}
def reply(r,udn):
self.devices[udn]['ContentDirectory']['actions'] = r
def got_icons(r,udn,item):
#print 'got_icons', r
for icon in r:
###FIXME, we shouldn't just use the first icon
icon_loader = gtk.gdk.PixbufLoader()
icon_loader.write(urllib.urlopen(str(icon['url'])).read())
icon_loader.close()
icon = icon_loader.get_pixbuf()
icon = icon.scale_simple(16,16,gtk.gdk.INTERP_BILINEAR)
self.store.set_value(item, ICON_COLUMN, icon)
break
def reply_subscribe(udn, service, r):
for k,v in r.iteritems():
self.state_variable_change(udn,service,k,v)
s = self.bus.get_object(BUS_NAME+'.service',service)
s.connect_to_signal('StateVariableChanged', self.state_variable_change, dbus_interface=BUS_NAME+'.service')
s.get_available_actions(reply_handler=lambda x : reply(x,str(device['udn'])),error_handler=self.handle_error)
s.subscribe(reply_handler=reply_subscribe,error_handler=self.handle_error)
d = self.bus.get_object(BUS_NAME+'.device',device['path'])
d.get_device_icons(reply_handler=lambda x : got_icons(x,str(device['udn']),item),error_handler=self.handle_error)
def media_server_removed(self,udn):
#print "media_server_removed", udn
row_count = 0
for row in self.store:
if udn == row[UDN_COLUMN]:
self.store.remove(self.store.get_iter(row_count))
del self.devices[str(udn)]
break
row_count += 1
def row_expanded(self,view,iter,row_path):
#print "row_expanded", view,iter,row_path
child = self.store.iter_children(iter)
if child:
upnp_class, = self.store.get(child,UPNP_CLASS_COLUMN)
if upnp_class == 'placeholder':
self.browse(view,row_path,None)
def browse(self,view,row_path,column,starting_index=0,requested_count=0,force=False,expand=False):
#print "browse", view,row_path,column,starting_index,requested_count,force
iter = self.store.get_iter(row_path)
child = self.store.iter_children(iter)
if child:
upnp_class, = self.store.get(child,UPNP_CLASS_COLUMN)
if upnp_class != 'placeholder':
if force == False:
if view.row_expanded(row_path):
view.collapse_row(row_path)
else:
view.expand_row(row_path, False)
return
title,object_id,upnp_class = self.store.get(iter,NAME_COLUMN,ID_COLUMN,UPNP_CLASS_COLUMN)
if(not upnp_class.startswith('object.container') and
not upnp_class == 'root'):
url, = self.store.get(iter,SERVICE_COLUMN)
if url == '':
return
print "request to play:", title,object_id,url
if self.cb_item_dbl_click != None:
self.cb_item_dbl_click(url)
return
def reply(r):
#print "browse_reply - %s of %s returned" % (r['NumberReturned'],r['TotalMatches'])
from coherence.upnp.core import DIDLLite
child = self.store.iter_children(iter)
if child:
upnp_class, = self.store.get(child,UPNP_CLASS_COLUMN)
if upnp_class == 'placeholder':
self.store.remove(child)
title, = self.store.get(iter,NAME_COLUMN)
try:
title = title[:title.rindex('(')]
self.store.set_value(iter,NAME_COLUMN, "%s(%d)" % (title,int(r['TotalMatches'])))
except ValueError:
pass
didl = DIDLLite.DIDLElement.fromString(r['Result'])
for item in didl.getItems():
#print item.title, item.id, item.upnp_class
if item.upnp_class.startswith('object.container'):
icon = self.folder_icon
service, = self.store.get(iter,SERVICE_COLUMN)
child_count = item.childCount
try:
title = "%s (%d)" % (item.title,item.childCount)
except TypeError:
title = "%s (n/a)" % item.title
child_count = -1
else:
icon=None
service = ''
if callable(self.cb_resource_chooser):
service = self.cb_resource_chooser(item.res)
else:
res = item.res.get_matching(['*:%s:*:*' % self.hostname], protocol_type='internal')
if len(res) == 0:
res = item.res.get_matching(['*:*:*:*'], protocol_type='http-get')
if len(res) > 0:
res = res[0]
remote_protocol,remote_network,remote_content_format,_ = res.protocolInfo.split(':')
service = res.data
child_count = -1
title = item.title
if item.upnp_class.startswith('object.item.audioItem'):
icon = self.audio_icon
elif item.upnp_class.startswith('object.item.videoItem'):
icon = self.video_icon
elif item.upnp_class.startswith('object.item.imageItem'):
icon = self.image_icon
stored_didl = DIDLLite.DIDLElement()
stored_didl.addItem(item)
new_iter = self.store.append(iter, (title,item.id,item.upnp_class,child_count,'',service,icon,stored_didl.toString(),None))
if item.upnp_class.startswith('object.container'):
self.store.append(new_iter, ('...loading...','','placeholder',-1,'','',None,'',None))
if((int(r['TotalMatches']) > 0 and force==False) or
expand==True):
view.expand_row(row_path, False)
if(requested_count != int(r['NumberReturned']) and
int(r['NumberReturned']) < (int(r['TotalMatches'])-starting_index)):
print "seems we have been returned only a part of the result"
print "requested %d, starting at %d" % (requested_count,starting_index)
print "got %d out of %d" % (int(r['NumberReturned']), int(r['TotalMatches']))
print "requesting more starting now at %d" % (starting_index+int(r['NumberReturned']))
self.browse(view,row_path,column,
starting_index=starting_index+int(r['NumberReturned']),
force=True)
service, = self.store.get(iter,SERVICE_COLUMN)
if service == '':
return
s = self.bus.get_object(BUS_NAME+'.service',service)
s.action('browse',
{'object_id':object_id,'process_result':'no',
'starting_index':str(starting_index),'requested_count':str(requested_count)},
reply_handler=reply,error_handler=self.handle_error)
def destroy_object(self, row_path):
#print "destroy_object", row_path
iter = self.store.get_iter(row_path)
object_id, = self.store.get(iter,ID_COLUMN)
parent_iter = self.store.iter_parent(iter)
service, = self.store.get(parent_iter,SERVICE_COLUMN)
if service == '':
return
def reply(r):
#print "destroy_object reply", r
pass
s = self.bus.get_object(BUS_NAME+'.service',service)
s.action('destroy_object',
{'object_id':object_id},
reply_handler=reply,error_handler=self.handle_error)
if __name__ == '__main__':
ui=TreeWidget()
window = gtk.Window()
window.connect("delete_event", gtk.main_quit)
window.set_default_size(350, 550)
window.add(ui.window)
window.show_all()
gtk.gdk.threads_init()
gtk.main()
| |
#the following list contains all html standard attributes with description and a list of applicable html tags
#source http://www.w3.org/html/wg/drafts/html/master/index.html#attributes-1
#these tags will to be not written into the saved project because created at runtime
import remi.gui as gui
htmlInternallyUsedTags = ('id'\
,'parent_widget'\
,'children_list'\
,'style'\
,'draggable'\
,'tabindex'\
,'onabort'\
,'onautocomplete'\
,'onautocompleteerror'\
,'onafterprint'\
,'onbeforeprint'\
,'onbeforeunload'\
,'onblur'\
,'oncancel'\
,'oncanplay'\
,'oncanplaythrough'\
,'onchange'\
,'onclick'\
,'onclose'\
,'oncontextmenu'\
,'oncuechange'\
,'ondblclick'\
,'ondrag'\
,'ondragend'\
,'ondragenter'\
,'ondragexit'\
,'ondragleave'\
,'ondragover'\
,'ondragstart'\
,'ondrop'\
,'ondurationchange'\
,'onemptied'\
,'onended'\
,'onerror'\
,'onfocus'\
,'onhashchange'\
,'oninput'\
,'oninvalid'\
,'onkeydown'\
,'onkeypress'\
,'onkeyup'\
,'onlanguagechange'\
,'onload'\
,'onloadeddata'\
,'onloadedmetadata'\
,'onloadstart'\
,'onmessage'\
,'onmousedown'\
,'onmouseenter'\
,'onmouseleave'\
,'onmousemove'\
,'onmouseout'\
,'onmouseover'\
,'onmouseup'\
,'onwheel'\
,'onoffline'\
,'ononline'\
,'onpagehide'\
,'onpageshow'\
,'onpause'\
,'onplay'\
,'onplaying'\
,'onpopstate'\
,'onprogress'\
,'onratechange'\
,'onreset'\
,'onresize'\
,'onscroll'\
,'onseeked'\
,'onseeking'\
,'onselect'\
,'onshow'\
,'onsort'\
,'onstalled'\
,'onstorage'\
,'onsubmit'\
,'onsuspend'\
,'ontimeupdate'\
,'ontoggle'\
,'onunload'\
,'onvolumechange'\
,'onwaiting')
editorAttributesGroupOrdering = {
'Generic':1,
'Geometry':2,
'Background':3,
'Border':4,
'Font':5,
'Layout':6
}
editorAttributeDictionary = {
'title':{'type':str, 'description':'Advisory information for the element', 'affected_widget_attribute':'attributes', 'group':'Generic', 'additional_data':{}},
'editor_varname':{'type':str, 'description':'Variable name', 'affected_widget_attribute':'attributes', 'group':'Generic', 'additional_data':{}},
'width':{'type':'css_size', 'description':'Widget width.', 'affected_widget_attribute':'style', 'group':'Geometry', 'additional_data':{}},
'height':{'type':'css_size', 'description':'Widget height.', 'affected_widget_attribute':'style', 'group':'Geometry', 'additional_data':{}},
'left':{'type':'css_size', 'description':'Widget left.', 'affected_widget_attribute':'style', 'group':'Geometry', 'additional_data':{}},
'top':{'type':'css_size', 'description':'Widget top.', 'affected_widget_attribute':'style', 'group':'Geometry', 'additional_data':{}},
'right':{'type':'css_size', 'description':'Widget right.', 'affected_widget_attribute':'style', 'group':'Geometry', 'additional_data':{}},
'bottom':{'type':'css_size', 'description':'Widget bottom.', 'affected_widget_attribute':'style', 'group':'Geometry', 'additional_data':{}},
'background-color':{'type':gui.ColorPicker, 'description':'Background color of the widget', 'affected_widget_attribute':'style', 'group':'Background', 'additional_data':{}},
'background-image':{'type':'url_editor', 'description':'An optional background image', 'affected_widget_attribute':'style', 'group':'Background', 'additional_data':{}},
'background-position':{'type':str, 'description':'The position of an optional background in the form 0% 0%', 'affected_widget_attribute':'style', 'group':'Background', 'additional_data':{}},
'background-repeat':{'type':gui.DropDown, 'description':'The repeat behaviour of an optional background image', 'affected_widget_attribute':'style', 'group':'Background', 'additional_data':{'possible_values':('repeat','repeat-x','repeat-y','no-repeat','round','inherit')}},
'opacity':{'type':float, 'description':"The opacity property sets the opacity level for an element.\nThe opacity-level describes the transparency-level, where 1 is not transparent at all, 0.5 is 50% see-through, and 0 is completely transparent."
, 'affected_widget_attribute':'style', 'group':'Layout', 'additional_data':{'possible_values':'', 'min':0.0, 'max':1.0, 'default':1.0, 'step':0.1}},
'border-color':{'type':gui.ColorPicker, 'description':'Border color', 'affected_widget_attribute':'style', 'group':'Border', 'additional_data':{}},
'border-width':{'type':'css_size', 'description':'Border thickness', 'affected_widget_attribute':'style', 'group':'Border', 'additional_data':{}},
'border-style':{'type':gui.DropDown, 'description':'Border thickness', 'affected_widget_attribute':'style', 'group':'Border', 'additional_data':{'possible_values':('none','solid','dotted','dashed')}},
'color':{'type':gui.ColorPicker, 'description':'Text color', 'affected_widget_attribute':'style', 'group':'Font', 'additional_data':{}},
'font-family':{'type':str, 'description':'Font family name', 'affected_widget_attribute':'style', 'group':'Font', 'additional_data':{}},
'font-size':{'type':'css_size', 'description':'Font size', 'affected_widget_attribute':'style', 'group':'Font', 'additional_data':{}},
'font-style':{'type':gui.DropDown, 'description':'Style', 'affected_widget_attribute':'style', 'group':'Font', 'additional_data':{'possible_values':('normal','italic','oblique','inherit')}},
'font-weight':{'type':gui.DropDown, 'description':'Style', 'affected_widget_attribute':'style', 'group':'Font', 'additional_data':{'possible_values':('normal','bold','bolder','lighter','100','200','300','400','500','600','700','800','900','inherit')}},
'flex-direction':{'type':gui.DropDown, 'description':'The flex-direction property specifies the direction of the flexible items. Note: If the element is not a flexible item, the flex-direction property has no effect.'
, 'affected_widget_attribute':'style', 'group':'Layout', 'additional_data':{'possible_values':('row','row-reverse','column','column-reverse','initial','inherit')}},
'display':{'type':gui.DropDown, 'description':'The display property specifies the type of box used for an HTML element'
, 'affected_widget_attribute':'style', 'group':'Layout', 'additional_data':{'possible_values':('inline','block','flex','inline-block','inline-flex','inline-table','list-item','run-in','none','inherit')}},
'justify-content':{'type':gui.DropDown, 'description':"The justify-content property aligns the flexible container's items when the items do not use all available space on the main-axis (horizontally)"
, 'affected_widget_attribute':'style', 'group':'Layout', 'additional_data':{'possible_values':('flex-start','flex-end','center','space-between','space-around','initial','inherit')}},
'align-items':{'type':gui.DropDown, 'description':'The align-items property specifies the default alignment for items inside the flexible container'
, 'affected_widget_attribute':'style', 'group':'Layout', 'additional_data':{'possible_values':('stretch','center','flex-start','flex-end','baseline','initial','inherit')}},
'flex-wrap':{'type':gui.DropDown, 'description':"The flex-wrap property specifies whether the flexible items should wrap or not. Note: If the elements are not flexible items, the flex-wrap property has no effect"
, 'affected_widget_attribute':'style', 'group':'Layout', 'additional_data':{'possible_values':('nowrap','wrap','wrap-reverse','initial','inherit')}},
'align-content':{'type':gui.DropDown, 'description':"The align-content property modifies the behavior of the flex-wrap property.\nIt is similar to align-items, but instead of aligning flex items, it aligns flex lines. Tip: Use the justify-content property to align the items on the main-axis (horizontally).Note: There must be multiple lines of items for this property to have any effect."
, 'affected_widget_attribute':'style', 'group':'Layout', 'additional_data':{'possible_values':('stretch','center','flex-start','flex-end','space-between','space-around','initial','inherit')}},
'flex-flow':{'type':gui.DropDown, 'description':"The flex-flow property is a shorthand property for the flex-direction and the flex-wrap properties. The flex-direction property specifies the direction of the flexible items."
, 'affected_widget_attribute':'style', 'group':'Layout', 'additional_data':{'possible_values':('flex-direction','flex-wrap','initial','inherit')}},
'order':{'type':int, 'description':"The order property specifies the order of a flexible item relative to the rest of the flexible items inside the same container. Note: If the element is not a flexible item, the order property has no effect."
, 'affected_widget_attribute':'style', 'group':'Layout', 'additional_data':{'possible_values':'', 'min':-10000, 'max':10000, 'default':1, 'step':1}},
'align-self':{'type':gui.DropDown, 'description':"The align-self property specifies the alignment for the selected item inside the flexible container. Note: The align-self property overrides the flexible container's align-items property"
, 'affected_widget_attribute':'style', 'group':'Layout', 'additional_data':{'possible_values':('auto','stretch','center','flex-start','flex-end','baseline','initial','inherit')}},
'flex':{'type':int, 'description':"The flex property specifies the length of the item, relative to the rest of the flexible items inside the same container. The flex property is a shorthand for the flex-grow, flex-shrink, and the flex-basis properties. Note: If the element is not a flexible item, the flex property has no effect."
, 'affected_widget_attribute':'style', 'group':'Layout', 'additional_data':{'possible_values':'', 'min':-10000, 'max':10000, 'default':1, 'step':1}},
'position':{'type':gui.DropDown, 'description':'The position property specifies the type of positioning method used for an element.'
, 'affected_widget_attribute':'style', 'group':'Layout', 'additional_data':{'possible_values':('static','absolute','fixed','relative','initial','inherit')}}
#:{'type':, 'description':'', 'affected_widget_attribute':'style', 'group':'Layout', 'additional_data':{'possible_values':''}},
#'disabled':{'type':bool, 'description':'Whether the form control is disabled', 'affected_widget_attribute':'attributes', 'group':'', 'additional_data':{}},
#'hidden':{'type':bool, 'description':'Whether the element is relevant', 'affected_widget_attribute':'attributes', 'group':'', 'additional_data':{}},
}
#future use
htmlCsvEventsData = """Attribute;Element(s);Description;Value
onabort;all;abort event handler;Event handler content attribute
onautocomplete;all;autocomplete event handler;Event handler content attribute
onautocompleteerror;all;autocompleteerror event handler;Event handler content attribute
onafterprint;body;afterprint event handler for Window object;Event handler content attribute
onbeforeprint;body;beforeprint event handler for Window object;Event handler content attribute
onbeforeunload;body;beforeunload event handler for Window object;Event handler content attribute
onblur;all;blur event handler;Event handler content attribute
oncancel;all;cancel event handler;Event handler content attribute
oncanplay;all;canplay event handler;Event handler content attribute
oncanplaythrough;all;canplaythrough event handler;Event handler content attribute
onchange;all;change event handler;Event handler content attribute
onclick;all;click event handler;Event handler content attribute
onclose;all;close event handler;Event handler content attribute
oncontextmenu;all;contextmenu event handler;Event handler content attribute
oncuechange;all;cuechange event handler;Event handler content attribute
ondblclick;all;dblclick event handler;Event handler content attribute
ondrag;all;drag event handler;Event handler content attribute
ondragend;all;dragend event handler;Event handler content attribute
ondragenter;all;dragenter event handler;Event handler content attribute
ondragexit;all;dragexit event handler;Event handler content attribute
ondragleave;all;dragleave event handler;Event handler content attribute
ondragover;all;dragover event handler;Event handler content attribute
ondragstart;all;dragstart event handler;Event handler content attribute
ondrop;all;drop event handler;Event handler content attribute
ondurationchange;all;durationchange event handler;Event handler content attribute
onemptied;all;emptied event handler;Event handler content attribute
onended;all;ended event handler;Event handler content attribute
onerror;all;error event handler;Event handler content attribute
onfocus;all;focus event handler;Event handler content attribute
onhashchange;body;hashchange event handler for Window object;Event handler content attribute
oninput;all;input event handler;Event handler content attribute
oninvalid;all;invalid event handler;Event handler content attribute
onkeydown;all;keydown event handler;Event handler content attribute
onkeypress;all;keypress event handler;Event handler content attribute
onkeyup;all;keyup event handler;Event handler content attribute
onlanguagechange;body;languagechange event handler for Window object;Event handler content attribute
onload;all;load event handler;Event handler content attribute
onloadeddata;all;loadeddata event handler;Event handler content attribute
onloadedmetadata;all;loadedmetadata event handler;Event handler content attribute
onloadstart;all;loadstart event handler;Event handler content attribute
onmessage;body;message event handler for Window object;Event handler content attribute
onmousedown;all;mousedown event handler;Event handler content attribute
onmouseenter;all;mouseenter event handler;Event handler content attribute
onmouseleave;all;mouseleave event handler;Event handler content attribute
onmousemove;all;mousemove event handler;Event handler content attribute
onmouseout;all;mouseout event handler;Event handler content attribute
onmouseover;all;mouseover event handler;Event handler content attribute
onmouseup;all;mouseup event handler;Event handler content attribute
onwheel;all;wheel event handler;Event handler content attribute
onoffline;body;offline event handler for Window object;Event handler content attribute
ononline;body;online event handler for Window object;Event handler content attribute
onpagehide;body;pagehide event handler for Window object;Event handler content attribute
onpageshow;body;pageshow event handler for Window object;Event handler content attribute
onpause;all;pause event handler;Event handler content attribute
onplay;all;play event handler;Event handler content attribute
onplaying;all;playing event handler;Event handler content attribute
onpopstate;body;popstate event handler for Window object;Event handler content attribute
onprogress;all;progress event handler;Event handler content attribute
onratechange;all;ratechange event handler;Event handler content attribute
onreset;all;reset event handler;Event handler content attribute
onresize;all;resize event handler;Event handler content attribute
onscroll;all;scroll event handler;Event handler content attribute
onseeked;all;seeked event handler;Event handler content attribute
onseeking;all;seeking event handler;Event handler content attribute
onselect;all;select event handler;Event handler content attribute
onshow;all;show event handler;Event handler content attribute
onsort;all;sort event handler;Event handler content attribute
onstalled;all;stalled event handler;Event handler content attribute
onstorage;body;storage event handler for Window object;Event handler content attribute
onsubmit;all;submit event handler;Event handler content attribute
onsuspend;all;suspend event handler;Event handler content attribute
ontimeupdate;all;timeupdate event handler;Event handler content attribute
ontoggle;all;toggle event handler;Event handler content attribute
onunload;body;unload event handler for Window object;Event handler content attribute
onvolumechange;all;volumechange event handler;Event handler content attribute
onwaiting;all;waiting event handler;Event handler content attribute"""
| |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
from cryptography import utils
from cryptography.exceptions import InvalidTag, UnsupportedAlgorithm, _Reasons
from cryptography.hazmat.primitives import interfaces
from cryptography.hazmat.primitives.ciphers.modes import GCM
@utils.register_interface(interfaces.CipherContext)
@utils.register_interface(interfaces.AEADCipherContext)
@utils.register_interface(interfaces.AEADEncryptionContext)
class _CipherContext(object):
_ENCRYPT = 1
_DECRYPT = 0
def __init__(self, backend, cipher, mode, operation):
self._backend = backend
self._cipher = cipher
self._mode = mode
self._operation = operation
self._tag = None
if isinstance(self._cipher, interfaces.BlockCipherAlgorithm):
self._block_size = self._cipher.block_size
else:
self._block_size = 1
ctx = self._backend._lib.EVP_CIPHER_CTX_new()
ctx = self._backend._ffi.gc(
ctx, self._backend._lib.EVP_CIPHER_CTX_free
)
registry = self._backend._cipher_registry
try:
adapter = registry[type(cipher), type(mode)]
except KeyError:
raise UnsupportedAlgorithm(
"cipher {0} in {1} mode is not supported "
"by this backend.".format(
cipher.name, mode.name if mode else mode),
_Reasons.UNSUPPORTED_CIPHER
)
evp_cipher = adapter(self._backend, cipher, mode)
if evp_cipher == self._backend._ffi.NULL:
raise UnsupportedAlgorithm(
"cipher {0} in {1} mode is not supported "
"by this backend.".format(
cipher.name, mode.name if mode else mode),
_Reasons.UNSUPPORTED_CIPHER
)
if isinstance(mode, interfaces.ModeWithInitializationVector):
iv_nonce = mode.initialization_vector
elif isinstance(mode, interfaces.ModeWithNonce):
iv_nonce = mode.nonce
else:
iv_nonce = self._backend._ffi.NULL
# begin init with cipher and operation type
res = self._backend._lib.EVP_CipherInit_ex(ctx, evp_cipher,
self._backend._ffi.NULL,
self._backend._ffi.NULL,
self._backend._ffi.NULL,
operation)
assert res != 0
# set the key length to handle variable key ciphers
res = self._backend._lib.EVP_CIPHER_CTX_set_key_length(
ctx, len(cipher.key)
)
assert res != 0
if isinstance(mode, GCM):
res = self._backend._lib.EVP_CIPHER_CTX_ctrl(
ctx, self._backend._lib.EVP_CTRL_GCM_SET_IVLEN,
len(iv_nonce), self._backend._ffi.NULL
)
assert res != 0
if operation == self._DECRYPT:
res = self._backend._lib.EVP_CIPHER_CTX_ctrl(
ctx, self._backend._lib.EVP_CTRL_GCM_SET_TAG,
len(mode.tag), mode.tag
)
assert res != 0
# pass key/iv
res = self._backend._lib.EVP_CipherInit_ex(
ctx,
self._backend._ffi.NULL,
self._backend._ffi.NULL,
cipher.key,
iv_nonce,
operation
)
assert res != 0
# We purposely disable padding here as it's handled higher up in the
# API.
self._backend._lib.EVP_CIPHER_CTX_set_padding(ctx, 0)
self._ctx = ctx
def update(self, data):
# OpenSSL 0.9.8e has an assertion in its EVP code that causes it
# to SIGABRT if you call update with an empty byte string. This can be
# removed when we drop support for 0.9.8e (CentOS/RHEL 5). This branch
# should be taken only when length is zero and mode is not GCM because
# AES GCM can return improper tag values if you don't call update
# with empty plaintext when authenticating AAD for ...reasons.
if len(data) == 0 and not isinstance(self._mode, GCM):
return b""
buf = self._backend._ffi.new("unsigned char[]",
len(data) + self._block_size - 1)
outlen = self._backend._ffi.new("int *")
res = self._backend._lib.EVP_CipherUpdate(self._ctx, buf, outlen, data,
len(data))
assert res != 0
return self._backend._ffi.buffer(buf)[:outlen[0]]
def finalize(self):
# OpenSSL 1.0.1 on Ubuntu 12.04 (and possibly other distributions)
# appears to have a bug where you must make at least one call to update
# even if you are only using authenticate_additional_data or the
# GCM tag will be wrong. An (empty) call to update resolves this
# and is harmless for all other versions of OpenSSL.
if isinstance(self._mode, GCM):
self.update(b"")
buf = self._backend._ffi.new("unsigned char[]", self._block_size)
outlen = self._backend._ffi.new("int *")
res = self._backend._lib.EVP_CipherFinal_ex(self._ctx, buf, outlen)
if res == 0:
errors = self._backend._consume_errors()
if not errors and isinstance(self._mode, GCM):
raise InvalidTag
assert errors
if errors[0][1:] == (
self._backend._lib.ERR_LIB_EVP,
self._backend._lib.EVP_F_EVP_ENCRYPTFINAL_EX,
self._backend._lib.EVP_R_DATA_NOT_MULTIPLE_OF_BLOCK_LENGTH
) or errors[0][1:] == (
self._backend._lib.ERR_LIB_EVP,
self._backend._lib.EVP_F_EVP_DECRYPTFINAL_EX,
self._backend._lib.EVP_R_DATA_NOT_MULTIPLE_OF_BLOCK_LENGTH
):
raise ValueError(
"The length of the provided data is not a multiple of "
"the block length."
)
else:
raise self._backend._unknown_error(errors[0])
if (isinstance(self._mode, GCM) and
self._operation == self._ENCRYPT):
block_byte_size = self._block_size // 8
tag_buf = self._backend._ffi.new(
"unsigned char[]", block_byte_size
)
res = self._backend._lib.EVP_CIPHER_CTX_ctrl(
self._ctx, self._backend._lib.EVP_CTRL_GCM_GET_TAG,
block_byte_size, tag_buf
)
assert res != 0
self._tag = self._backend._ffi.buffer(tag_buf)[:]
res = self._backend._lib.EVP_CIPHER_CTX_cleanup(self._ctx)
assert res == 1
return self._backend._ffi.buffer(buf)[:outlen[0]]
def authenticate_additional_data(self, data):
outlen = self._backend._ffi.new("int *")
res = self._backend._lib.EVP_CipherUpdate(
self._ctx, self._backend._ffi.NULL, outlen, data, len(data)
)
assert res != 0
@property
def tag(self):
return self._tag
@utils.register_interface(interfaces.CipherContext)
class _AESCTRCipherContext(object):
"""
This is needed to provide support for AES CTR mode in OpenSSL 0.9.8. It can
be removed when we drop 0.9.8 support (RHEL5 extended life ends 2020).
"""
def __init__(self, backend, cipher, mode):
self._backend = backend
self._key = self._backend._ffi.new("AES_KEY *")
assert self._key != self._backend._ffi.NULL
res = self._backend._lib.AES_set_encrypt_key(
cipher.key, len(cipher.key) * 8, self._key
)
assert res == 0
self._ecount = self._backend._ffi.new("char[]", 16)
self._nonce = self._backend._ffi.new("char[16]", mode.nonce)
self._num = self._backend._ffi.new("unsigned int *", 0)
def update(self, data):
buf = self._backend._ffi.new("unsigned char[]", len(data))
self._backend._lib.AES_ctr128_encrypt(
data, buf, len(data), self._key, self._nonce,
self._ecount, self._num
)
return self._backend._ffi.buffer(buf)[:]
def finalize(self):
self._key = None
self._ecount = None
self._nonce = None
self._num = None
return b""
| |
from qrcode import constants, exceptions, util
from qrcode.image.base import BaseImage
def make(data=None, **kwargs):
qr = QRCode(**kwargs)
qr.add_data(data)
return qr.make_image()
class QRCode:
def __init__(self, version=None,
error_correction=constants.ERROR_CORRECT_M,
box_size=10, border=4,
image_factory=None):
self.version = version and int(version)
self.error_correction = int(error_correction)
self.box_size = int(box_size)
# Spec says border should be at least four boxes wide, but allow for
# any (e.g. for producing printable QR codes).
self.border = int(border)
self.image_factory = image_factory
if image_factory is not None:
assert issubclass(image_factory, BaseImage)
self.clear()
def clear(self):
"""
Reset the internal data.
"""
self.modules = None
self.modules_count = 0
self.data_cache = None
self.data_list = []
def add_data(self, data, optimize=20):
"""
Add data to this QR Code.
:param optimize: Data will be split into multiple chunks to optimize
the QR size by finding to more compressed modes of at least this
length. Set to ``0`` to avoid optimizing at all.
"""
if isinstance(data, util.QRData):
self.data_list.append(data)
else:
if optimize:
self.data_list.extend(util.optimal_data_chunks(data))
else:
self.data_list.append(util.QRData(data))
self.data_cache = None
def make(self, fit=True):
"""
Compile the data into a QR Code array.
:param fit: If ``True`` (or if a size has not been provided), find the
best fit for the data to avoid data overflow errors.
"""
if fit or not self.version:
self.best_fit(start=self.version)
self.makeImpl(False, self.best_mask_pattern())
def makeImpl(self, test, mask_pattern):
self.modules_count = self.version * 4 + 17
self.modules = [None] * self.modules_count
for row in range(self.modules_count):
self.modules[row] = [None] * self.modules_count
for col in range(self.modules_count):
self.modules[row][col] = None # (col + row) % 3
self.setup_position_probe_pattern(0, 0)
self.setup_position_probe_pattern(self.modules_count - 7, 0)
self.setup_position_probe_pattern(0, self.modules_count - 7)
self.sutup_position_adjust_pattern()
self.setup_timing_pattern()
self.setup_type_info(test, mask_pattern)
if self.version >= 7:
self.setup_type_number(test)
if self.data_cache is None:
self.data_cache = util.create_data(
self.version, self.error_correction, self.data_list)
self.map_data(self.data_cache, mask_pattern)
def setup_position_probe_pattern(self, row, col):
for r in range(-1, 8):
if row + r <= -1 or self.modules_count <= row + r:
continue
for c in range(-1, 8):
if col + c <= -1 or self.modules_count <= col + c:
continue
if (0 <= r and r <= 6 and (c == 0 or c == 6)
or (0 <= c and c <= 6 and (r == 0 or r == 6))
or (2 <= r and r <= 4 and 2 <= c and c <= 4)):
self.modules[row + r][col + c] = True
else:
self.modules[row + r][col + c] = False
def best_fit(self, start=None):
"""
Find the minimum size required to fit in the data.
"""
size = start or 1
while True:
try:
self.data_cache = util.create_data(
size, self.error_correction, self.data_list)
except exceptions.DataOverflowError:
size += 1
else:
self.version = size
return size
def best_mask_pattern(self):
"""
Find the most efficient mask pattern.
"""
min_lost_point = 0
pattern = 0
for i in range(8):
self.makeImpl(True, i)
lost_point = util.lost_point(self.modules)
if i == 0 or min_lost_point > lost_point:
min_lost_point = lost_point
pattern = i
return pattern
def print_tty(self, out=None):
"""
Output the QR Code to a TTY (potentially useful for debugging).
If the data has not been compiled yet, make it first.
"""
if out is None:
import sys
out = sys.stdout
if not out.isatty():
raise OSError("Not a tty")
if self.data_cache is None:
self.make()
modcount = self.modules_count
out.write("\x1b[1;47m" + (" " * (modcount * 2 + 4)) + "\x1b[0m\n")
for r in range(modcount):
out.write("\x1b[1;47m \x1b[40m")
for c in range(modcount):
if self.modules[r][c]:
out.write(" ")
else:
out.write("\x1b[1;47m \x1b[40m")
out.write("\x1b[1;47m \x1b[0m\n")
out.write("\x1b[1;47m" + (" " * (modcount * 2 + 4)) + "\x1b[0m\n")
out.flush()
def make_image(self, image_factory=None, **kwargs):
"""
Make an image from the QR Code data.
If the data has not been compiled yet, make it first.
"""
if self.data_cache is None:
self.make()
if image_factory is not None:
assert issubclass(image_factory, BaseImage)
else:
image_factory = self.image_factory
if image_factory is None:
# Use PIL by default
from qrcode.image.pil import PilImage
image_factory = PilImage
im = image_factory(
self.border, self.modules_count, self.box_size, **kwargs)
for r in range(self.modules_count):
for c in range(self.modules_count):
if self.modules[r][c]:
im.drawrect(r, c)
return im
def setup_timing_pattern(self):
for r in range(8, self.modules_count - 8):
if self.modules[r][6] is not None:
continue
self.modules[r][6] = (r % 2 == 0)
for c in range(8, self.modules_count - 8):
if self.modules[6][c] is not None:
continue
self.modules[6][c] = (c % 2 == 0)
def sutup_position_adjust_pattern(self):
pos = util.pattern_position(self.version)
for i in range(len(pos)):
for j in range(len(pos)):
row = pos[i]
col = pos[j]
if self.modules[row][col] is not None:
continue
for r in range(-2, 3):
for c in range(-2, 3):
if (r == -2 or r == 2 or c == -2 or c == 2 or
(r == 0 and c == 0)):
self.modules[row + r][col + c] = True
else:
self.modules[row + r][col + c] = False
def setup_type_number(self, test):
bits = util.BCH_type_number(self.version)
for i in range(18):
mod = (not test and ((bits >> i) & 1) == 1)
self.modules[i // 3][i % 3 + self.modules_count - 8 - 3] = mod
for i in range(18):
mod = (not test and ((bits >> i) & 1) == 1)
self.modules[i % 3 + self.modules_count - 8 - 3][i // 3] = mod
def setup_type_info(self, test, mask_pattern):
data = (self.error_correction << 3) | mask_pattern
bits = util.BCH_type_info(data)
# vertical
for i in range(15):
mod = (not test and ((bits >> i) & 1) == 1)
if i < 6:
self.modules[i][8] = mod
elif i < 8:
self.modules[i + 1][8] = mod
else:
self.modules[self.modules_count - 15 + i][8] = mod
# horizontal
for i in range(15):
mod = (not test and ((bits >> i) & 1) == 1)
if i < 8:
self.modules[8][self.modules_count - i - 1] = mod
elif i < 9:
self.modules[8][15 - i - 1 + 1] = mod
else:
self.modules[8][15 - i - 1] = mod
# fixed module
self.modules[self.modules_count - 8][8] = (not test)
def map_data(self, data, mask_pattern):
inc = -1
row = self.modules_count - 1
bitIndex = 7
byteIndex = 0
mask_func = util.mask_func(mask_pattern)
for col in range(self.modules_count - 1, 0, -2):
if col <= 6:
col -= 1
while True:
for c in range(2):
if self.modules[row][col - c] is None:
dark = False
if byteIndex < len(data):
dark = (((data[byteIndex] >> bitIndex) & 1) == 1)
if mask_func(row, col - c):
dark = not dark
self.modules[row][col - c] = dark
bitIndex -= 1
if bitIndex == -1:
byteIndex += 1
bitIndex = 7
row += inc
if row < 0 or self.modules_count <= row:
row -= inc
inc = -inc
break
def get_matrix(self):
"""
Return the QR Code as a multidimensonal array, including the border.
To return the array without a border, set ``self.border`` to 0 first.
"""
if self.data_cache is None:
self.make()
if not self.border:
return self.modules
width = len(self.modules) + self.border*2
code = [[False]*width] * self.border
x_border = [False]*self.border
for module in self.modules:
code.append(x_border + module + x_border)
code += [[False]*width] * self.border
return code
| |
from __future__ import print_function, unicode_literals, division
import math
from time import sleep
from utils import ujoin, range1, enumerate1
nl = '\n'
space = ' '
class BaseTile(object):
""" Base tile that sets a convenience attribute according to the name of the class, e.g. Blank
will have tile.blank=True set automatically.
"""
def __init__(self, loc=None):
self.loc = loc
setattr(self, self.__class__.__name__.lower(), True)
class Loc(object):
""" Location on game board; note that we should not modify the location in place to avoid many
hard to track errors; `moved()` creates and returns a new instance.
"""
def __init__(self, x, y):
self.loc = x, y
self.x, self.y = x, y
def __repr__(self):
return str(self.loc)
def __iter__(self):
return iter(self.loc)
def __eq__(self, other):
return self.loc == getattr(other, "loc", None)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.loc)
def moved(self, x, y):
""" Return a new Loc moved according to delta modifiers `x` and `y`,
e.g. 1,0 to move right.
"""
return Loc(self.x + x, self.y + y)
Dir = Loc # Directions (e.g. 0,1=right) work the same way but should have a different name for clarity
class BaseBoard(object):
""" Base Board for regular and stackable boards.
TODO: add various scrolling and visual area options.
"""
stackable = False
board_initialized = False
def __init__(self, size, num_grid=False, padding=(0, 0), pause_time=0.2, screen_sep=5):
if isinstance(size, int):
size = size, size # handle square board
self.width, self.height = size
self.num_grid = num_grid
self.xpad = padding[0]
self.ypad = padding[1]
self.pause_time = pause_time
self.screen_sep = screen_sep
self.init_tiles = False
self.tiletpl = "%%%ds" % (padding[0] + 1)
self.directions()
def __iter__(self):
return ( self[Loc(x, y)] for y in range(self.height) for x in range(self.width) )
def tiles(self, *attrs):
return [ t for t in self if all(getattr(t, attr) for attr in attrs) ]
def tiles_not(self, *attrs):
return [ t for t in self if all(not getattr(t, attr) for attr in attrs) ]
def locations(self, *attrs):
locs = (Loc(x, y) for y in range(self.height) for x in range(self.width))
return [ l for l in locs if all(getattr(self[l], attr) for attr in attrs) ]
def locations_not(self, *attrs):
locs = (Loc(x, y) for y in range(self.height) for x in range(self.width))
return [ l for l in locs if all(not getattr(self[l], attr) for attr in attrs) ]
def ploc(self, tile_loc):
"""Parse location out of tile-or-loc `tile_loc`."""
if isinstance(tile_loc, Loc) : return tile_loc
else : return tile_loc.loc
def draw(self, pause=None):
pause = pause or self.pause_time
print(nl * self.screen_sep)
if self.num_grid:
print(space, space*(self.xpad + 1), ujoin( range1(self.width), space, self.tiletpl ), nl * self.ypad)
for n, row in enumerate1(self.board):
args = [self.tiletpl % n] if self.num_grid else []
if self.stackable:
row = (tile[-1] for tile in row)
args = [space] + args + [ujoin(row, space, self.tiletpl), nl * self.ypad]
print(*args)
self.status()
sleep(pause)
def status(self):
pass
def valid(self, loc):
return bool( loc.x >= 0 and loc.y >= 0 and loc.x <= self.width-1 and loc.y <= self.height-1 )
def directions(self):
"""Create list and dict of eight directions, going from up clockwise."""
dirs = [(0, -1), (1, -1), (1, 0), (1, 1), (0, 1), (-1, 1), (-1, 0), (-1, -1)]
self.dirlist = [Dir(*d) for d in (dirs[0], dirs[2], dirs[4], dirs[6])]
self.dirlist2 = [Dir(*d) for d in dirs]
self.dirnames = dict(zip(self.dirlist2, "up ru right rd down ld left lu".split()))
def neighbour_locs(self, tile_loc):
"""Return the list of neighbour locations of `tile`."""
x, y = self.ploc(tile_loc)
coords = (-1,0,1)
locs = set((x+n, y+m) for n in coords for m in coords) - set( [(x,y)] )
return [ Loc(*tpl) for tpl in locs if self.valid(Loc(*tpl)) ]
def neighbours(self, tile_loc):
"""Return the list of neighbours of `tile`."""
return [self[loc] for loc in self.neighbour_locs(tile_loc)]
def neighbour_cross_locs(self, tile_loc):
"""Return a generator of neighbour 'cross' (i.e. no diagonal) locations of `tile`."""
x, y = self.ploc(tile_loc)
locs = ((x-1, y), (x+1, y), (x, y-1), (x, y+1))
return [ Loc(*tpl) for tpl in locs if self.valid(Loc(*tpl)) ]
def cross_neighbours(self, tile_loc):
"""Return the generator of 'cross' (i.e. no diagonal) neighbours of `tile`."""
return (self[loc] for loc in self.neighbour_cross_locs(tile_loc))
def make_tile(self, loc):
"""Make a tile using `self.def_tile`. If def_tile is simply a string, return it, otherwise instantiate with x, y as arguments."""
try:
isstr = isinstance(self.def_tile, basestring)
except NameError:
isstr = isinstance(self.def_tile, str)
return self.def_tile if isstr else self.def_tile(loc)
def move(self, tile_loc, newloc):
loc = self.ploc(tile_loc)
item = self[loc]
self[newloc] = item
self[loc] = self.make_tile(loc)
if hasattr(item, "loc"):
item.loc = newloc
def nextloc(self, tile_loc, dir, n=1, wrap=False):
"""Return location next to `tile_loc` point in direction `dir`."""
loc = self.ploc(tile_loc)
x = loc.x + dir.x*n
y = loc.y + dir.y*n
if wrap:
while not self.valid(Loc(x,y)):
if x > (self.width - 1) : x -= self.width
elif x < 0 : x += self.width
if y > (self.height - 1) : y -= self.height
elif y < 0 : y += self.height
loc = Loc(x, y)
return loc if self.valid(loc) else None
def next_tile(self, tile_loc, dir, n=1):
loc = self.nextloc(tile_loc, dir, n)
return self[loc] if loc else None
def dist(self, tile_loc1, tile_loc2):
l1, l2 = self.ploc(tile_loc1), self.ploc(tile_loc2)
return math.sqrt( abs(l2.x - l1.x)**2 + abs(l2.y - l1.y)**2 )
class Board(BaseBoard):
def __init__(self, size, def_tile, **kwargs):
super(Board, self).__init__(size, **kwargs)
self.def_tile = def_tile
xrng, yrng = range(self.width), range(self.height)
self.board = [ [None for x in xrng] for y in yrng ]
def __getitem__(self, loc):
self.init_board()
return self.board[loc.y][loc.x]
def __setitem__(self, tile_loc, item):
self.init_board()
loc = self.ploc(tile_loc)
self.board[loc.y][loc.x] = item
def __delitem__(self, tile_loc):
loc = self.ploc(tile_loc)
self.board[loc.y][loc.x] = self.make_tile(loc)
def init_board(self):
""" To allow tiles that place themselves on the board, board is first initialized with None values in __init__,
then on the first __setitem__ or __getitem__, init_board() runs; self.board_initialized needs to be set
immediately to avoid recursion.
"""
if not self.board_initialized:
self.board_initialized = True
xrng, yrng = range(self.width), range(self.height)
self.board = [ [self.make_tile(Loc(x, y)) for x in xrng] for y in yrng ]
class StackableBoard(BaseBoard):
stackable = True
def __init__(self, size, def_tile, **kwargs):
super(StackableBoard, self).__init__(size, **kwargs)
self.def_tile = def_tile
xrng, yrng = range(self.width), range(self.height)
self.board = [ [[None] for x in xrng] for y in yrng ]
def __getitem__(self, loc):
self.init_board()
return self.board[loc.y][loc.x][-1]
def __setitem__(self, tile_loc, item):
self.init_board()
loc = self.ploc(tile_loc)
self.board[loc.y][loc.x].append(item)
def __delitem__(self, tile_loc):
loc = self.ploc(tile_loc)
del self.board[loc.y][loc.x][-1]
def init_board(self):
if not self.board_initialized:
self.board_initialized = True
xrng, yrng = range(self.width), range(self.height)
self.board = [ [[self.make_tile(Loc(x, y))] for x in xrng] for y in yrng ]
def items(self, tile_loc):
loc = self.ploc(tile_loc)
return self.board[loc.y][loc.x]
def move(self, tile_loc, newloc):
loc = self.ploc(tile_loc)
item = self[loc]
self[newloc] = item
self.items(loc).remove(item)
if hasattr(item, "loc"):
item.loc = newloc
| |
"""
The batch interface allows insert, update, and remove operations to be performed
in batches. This allows a convenient mechanism for streaming updates or doing a
large number of operations while reducing number of RPC roundtrips.
Batch mutator objects are synchronized and can be safely passed around threads.
.. code-block:: python
>>> b = cf.batch(queue_size=10)
>>> b.insert('key1', {'col1':'value11', 'col2':'value21'})
>>> b.insert('key2', {'col1':'value12', 'col2':'value22'}, ttl=15)
>>> b.remove('key1', ['col2'])
>>> b.remove('key2')
>>> b.send()
One can use the `queue_size` argument to control how many mutations will be
queued before an automatic :meth:`send` is performed. This allows simple streaming
of updates. If set to ``None``, automatic checkpoints are disabled. Default is 100.
Supercolumns are supported:
.. code-block:: python
>>> b = scf.batch()
>>> b.insert('key1', {'supercol1': {'colA':'value1a', 'colB':'value1b'}
... {'supercol2': {'colA':'value2a', 'colB':'value2b'}})
>>> b.remove('key1', ['colA'], 'supercol1')
>>> b.send()
You may also create a :class:`.Mutator` directly, allowing operations
on multiple column families:
.. code-block:: python
>>> b = Mutator(pool)
>>> b.insert(cf, 'key1', {'col1':'value1', 'col2':'value2'})
>>> b.insert(supercf, 'key1', {'subkey1': {'col1':'value1', 'col2':'value2'}})
>>> b.send()
.. note:: This interface does not implement atomic operations across column
families. All the limitations of the `batch_mutate` Thrift API call
applies. Remember, a mutation in Cassandra is always atomic per key per
column family only.
.. note:: If a single operation in a batch fails, the whole batch fails.
In Python >= 2.5, mutators can be used as context managers, where an implicit
:meth:`send` will be called upon exit.
.. code-block:: python
>>> with cf.batch() as b:
... b.insert('key1', {'col1':'value11', 'col2':'value21'})
... b.insert('key2', {'col1':'value12', 'col2':'value22'})
Calls to :meth:`insert` and :meth:`remove` can also be chained:
.. code-block:: python
>>> cf.batch().remove('foo').remove('bar').send()
"""
import threading
from pycassa.cassandra.ttypes import (Column, ColumnOrSuperColumn,
CounterColumn, CounterSuperColumn,
ConsistencyLevel, Deletion, Mutation,
SlicePredicate, SuperColumn)
__all__ = ['Mutator', 'CfMutator']
class Mutator(object):
"""
Batch update convenience mechanism.
Queues insert/update/remove operations and executes them when the queue
is full or `send` is called explicitly.
"""
def __init__(self, pool, queue_size=100, write_consistency_level=None):
"""Creates a new Mutator object.
`pool` is the :class:`~pycassa.pool.ConnectionPool` that will be used
for operations.
After `queue_size` operations, :meth:`send()` will be executed
automatically. Use 0 to disable automatic sends.
"""
self._buffer = []
self._lock = threading.RLock()
self.pool = pool
self.limit = queue_size
if write_consistency_level is None:
self.write_consistency_level = ConsistencyLevel.ONE
else:
self.write_consistency_level = write_consistency_level
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.send()
def _enqueue(self, key, column_family, mutations):
self._lock.acquire()
try:
mutation = (key, column_family.column_family, mutations)
self._buffer.append(mutation)
if self.limit and len(self._buffer) >= self.limit:
self.send()
finally:
self._lock.release()
return self
def send(self, write_consistency_level=None):
""" Sends all operations currently in the batch and clears the batch. """
if write_consistency_level is None:
write_consistency_level = self.write_consistency_level
mutations = {}
conn = None
self._lock.acquire()
try:
for key, column_family, cols in self._buffer:
mutations.setdefault(key, {}).setdefault(column_family, []).extend(cols)
if mutations:
conn = self.pool.get()
conn.batch_mutate(mutations, write_consistency_level)
self._buffer = []
finally:
if conn:
conn.return_to_pool()
self._lock.release()
def _make_mutations_insert(self, column_family, columns, timestamp, ttl):
_pack_name = column_family._pack_name
_pack_value = column_family._pack_value
_get_type = column_family._get_data_type_for_col
if column_family.super:
for c, v in columns.iteritems():
cos = ColumnOrSuperColumn()
dtype = _get_type(c)
if dtype == 'CounterColumnType':
subc = [CounterColumn(_pack_name(subname), subvalue)
for subname, subvalue in v.iteritems()]
cos.counter_super_column = CounterSuperColumn(name=_pack_name(c, True),
columns=subc)
else:
subc = [Column(name=_pack_name(subname),
value=_pack_value(subvalue, subname),
timestamp=timestamp, ttl=ttl)
for subname, subvalue in v.iteritems()]
cos.super_column = SuperColumn(name=_pack_name(c, True),
columns=subc)
yield Mutation(column_or_supercolumn=cos)
else:
for c, v in columns.iteritems():
cos = ColumnOrSuperColumn()
dtype = _get_type(c)
if dtype == 'CounterColumnType':
cos.counter_column = CounterColumn(_pack_name(c), v)
else:
cos.column = Column(name=_pack_name(c), value=_pack_value(v, c),
timestamp=timestamp, ttl=ttl)
yield Mutation(column_or_supercolumn=cos)
def insert(self, column_family, key, columns, timestamp=None, ttl=None):
"""
Adds a single row insert to the batch.
`column_family` is the :class:`~pycassa.columnfamily.ColumnFamily`
that the insert will be executed on.
"""
if columns:
if timestamp == None:
timestamp = column_family.timestamp()
packed_key = column_family._pack_key(key)
mutations = self._make_mutations_insert(column_family, columns,
timestamp, ttl)
self._enqueue(packed_key, column_family, mutations)
return self
def remove(self, column_family, key, columns=None, super_column=None, timestamp=None):
"""
Adds a single row remove to the batch.
`column_family` is the :class:`~pycassa.columnfamily.ColumnFamily`
that the remove will be executed on.
"""
if timestamp == None:
timestamp = column_family.timestamp()
deletion = Deletion(timestamp=timestamp)
_pack_name = column_family._pack_name
if super_column:
deletion.super_column = _pack_name(super_column, True)
if columns:
packed_cols = [_pack_name(col, column_family.super and not super_column)
for col in columns]
deletion.predicate = SlicePredicate(column_names=packed_cols)
mutation = Mutation(deletion=deletion)
packed_key = column_family._pack_key(key)
self._enqueue(packed_key, column_family, (mutation,))
return self
class CfMutator(Mutator):
"""
A :class:`~pycassa.batch.Mutator` that deals only with one column family.
"""
def __init__(self, column_family, queue_size=100, write_consistency_level=None):
""" A :class:`~pycassa.batch.Mutator` that deals only with one column family.
`column_family` is the :class:`~pycassa.columnfamily.ColumnFamily`
that all operations will be executed on.
"""
wcl = write_consistency_level or column_family.write_consistency_level
super(CfMutator, self).__init__(column_family.pool, queue_size=queue_size,
write_consistency_level=wcl)
self._column_family = column_family
def insert(self, key, cols, timestamp=None, ttl=None):
""" Adds a single row insert to the batch. """
return super(CfMutator, self).insert(self._column_family, key, cols,
timestamp=timestamp, ttl=ttl)
def remove(self, key, columns=None, super_column=None, timestamp=None):
""" Adds a single row remove to the batch. """
return super(CfMutator, self).remove(self._column_family, key,
columns=columns,
super_column=super_column,
timestamp=timestamp)
| |
import taichi as ti
from tests import test_utils
def _test_basic():
x = ti.field(ti.i32)
c = ti.field(ti.i32)
s = ti.field(ti.i32)
bm = ti.root.bitmasked(ti.ij, (3, 6)).bitmasked(ti.i, 5)
bm.place(x)
ti.root.place(c, s)
@ti.kernel
def run():
x[5, 1] = 2
x[9, 4] = 20
x[0, 3] = 20
@ti.kernel
def sum():
for i, j in x:
c[None] += ti.is_active(bm, [i, j])
s[None] += x[i, j]
run()
sum()
assert c[None] == 3
assert s[None] == 42
@test_utils.test(require=ti.extension.sparse)
def test_basic():
_test_basic()
@test_utils.test(require=[ti.extension.sparse, ti.extension.packed],
packed=True)
def test_basic_packed():
_test_basic()
@test_utils.test(require=ti.extension.sparse)
def test_bitmasked_then_dense():
x = ti.field(ti.f32)
s = ti.field(ti.i32)
n = 128
ti.root.bitmasked(ti.i, n).dense(ti.i, n).place(x)
ti.root.place(s)
@ti.kernel
def func():
for i in x:
s[None] += 1
x[0] = 1
x[127] = 1
x[256] = 1
x[257] = 1
func()
assert s[None] == 256
@test_utils.test(require=ti.extension.sparse)
def test_bitmasked_bitmasked():
x = ti.field(ti.f32)
s = ti.field(ti.i32)
n = 128
ti.root.bitmasked(ti.i, n).bitmasked(ti.i, n).place(x)
ti.root.place(s)
@ti.kernel
def func():
for i in x:
s[None] += 1
x[0] = 1
x[127] = 1
x[256] = 1
x[257] = 1
func()
assert s[None] == 4
@test_utils.test(require=ti.extension.sparse)
def test_huge_bitmasked():
# Mainly for testing Metal listgen's grid-stride loop implementation.
x = ti.field(ti.f32)
s = ti.field(ti.i32)
n = 1024
ti.root.bitmasked(ti.i, n).bitmasked(ti.i, 2 * n).place(x)
ti.root.place(s)
@ti.kernel
def func():
for i in range(n * n * 2):
if i % 32 == 0:
x[i] = 1.0
@ti.kernel
def count():
for i in x:
s[None] += 1
func()
count()
assert s[None] == (n * n * 2) // 32
@test_utils.test(require=ti.extension.sparse)
def test_bitmasked_listgen_bounded():
# Mainly for testing Metal's listgen is bounded by the actual number of
# elements possible for that SNode. Note that 1) SNode's size is padded
# to POT, and 2) Metal ListManager's data size is not padded, we need to
# make sure listgen doesn't go beyond ListManager's capacity.
x = ti.field(ti.i32)
c = ti.field(ti.i32)
# A prime that is bit higher than 65536, which is Metal's maximum number of
# threads for listgen.
n = 80173
ti.root.dense(ti.i, n).bitmasked(ti.i, 1).place(x)
ti.root.place(c)
@ti.kernel
def func():
for i in range(n):
x[i] = 1
@ti.kernel
def count():
for i in x:
c[None] += 1
func()
count()
assert c[None] == n
@test_utils.test(require=ti.extension.sparse)
def test_deactivate():
# https://github.com/taichi-dev/taichi/issues/778
a = ti.field(ti.i32)
a_a = ti.root.bitmasked(ti.i, 4)
a_b = a_a.dense(ti.i, 4)
a_b.place(a)
c = ti.field(ti.i32)
ti.root.place(c)
@ti.kernel
def run():
a[0] = 123
@ti.kernel
def is_active():
c[None] = ti.is_active(a_a, [0])
@ti.kernel
def deactivate():
ti.deactivate(a_a, [0])
run()
is_active()
assert c[None] == 1
deactivate()
is_active()
assert c[None] == 0
def _test_sparsity_changes():
x = ti.field(ti.i32)
c = ti.field(ti.i32)
s = ti.field(ti.i32)
bm = ti.root.bitmasked(ti.i, 5).bitmasked(ti.i, 3)
bm.place(x)
ti.root.place(c, s)
@ti.kernel
def run():
for i in x:
s[None] += x[i]
c[None] += 1
# Only two elements of |x| are activated
x[1] = 2
x[8] = 20
run()
assert c[None] == 2
assert s[None] == 22
c[None] = 0
s[None] = 0
# Four elements are activated now
x[7] = 15
x[14] = 5
run()
assert c[None] == 4
assert s[None] == 42
@test_utils.test(require=ti.extension.sparse)
def test_sparsity_changes():
_test_sparsity_changes()
@test_utils.test(require=[ti.extension.sparse, ti.extension.packed],
packed=True)
def test_sparsity_changes_packed():
_test_sparsity_changes()
@test_utils.test(require=ti.extension.sparse)
def test_bitmasked_offset_child():
x = ti.field(ti.i32)
x2 = ti.field(ti.i32)
y = ti.field(ti.i32)
y2 = ti.field(ti.i32)
y3 = ti.field(ti.i32)
z = ti.field(ti.i32)
s = ti.field(ti.i32, shape=())
n = 16
# Offset children:
# * In |bm|'s cell: |bm2| has a non-zero offset
# * In |bm2|'s cell: |z| has a non-zero offset
# * We iterate over |z| to test the listgen handles offsets correctly
bm = ti.root.bitmasked(ti.i, n)
bm.dense(ti.i, 16).place(x, x2)
bm2 = bm.bitmasked(ti.i, 4)
bm2.dense(ti.i, 4).place(y, y2, y3)
bm2.bitmasked(ti.i, 4).place(z)
@ti.kernel
def func():
for _ in z:
s[None] += 1
z[0] = 1
z[7] = 1
z[42] = 1
z[53] = 1
z[88] = 1
z[101] = 1
z[233] = 1
func()
assert s[None] == 7
@test_utils.test(require=ti.extension.sparse)
def test_bitmasked_2d_power_of_two():
some_val = ti.field(dtype=float)
width, height = 10, 10
total = width * height
ptr = ti.root.bitmasked(ti.ij, (width, height))
ptr.place(some_val)
num_active = ti.field(dtype=int, shape=())
@ti.kernel
def init():
num_active[None] = 0
for x, y in ti.ndrange(width, height):
some_val[x, y] = 5
num_active[None] += 1
@ti.kernel
def run():
num_active[None] = 0
for x, y in some_val:
num_active[None] += 1
init()
assert num_active[None] == total
run()
assert num_active[None] == total
@test_utils.test(require=ti.extension.sparse)
def test_root_deactivate():
a = ti.field(ti.i32)
a_a = ti.root.bitmasked(ti.i, 4)
a_b = a_a.dense(ti.i, 4)
a_b.place(a)
c = ti.field(ti.i32)
ti.root.place(c)
@ti.kernel
def run():
a[0] = 123
@ti.kernel
def is_active():
c[None] = ti.is_active(a_a, [0])
run()
is_active()
assert c[None] == 1
ti.root.deactivate_all()
is_active()
assert c[None] == 0
| |
# -*- coding: utf-8 -*-
from __future__ import with_statement
import sys
from django.core.urlresolvers import clear_url_caches, reverse
from cms.api import create_page, create_title
from cms.apphook_pool import apphook_pool
from cms.compat import get_user_model
from cms.appresolver import applications_page_check, clear_app_resolvers, get_app_patterns
from cms.models import Title
from cms.test_utils.testcases import CMSTestCase, SettingsOverrideTestCase
from cms.test_utils.util.context_managers import SettingsOverride
from cms.tests.menu_utils import DumbPageLanguageUrl
from cms.utils.compat.type_checks import string_types
from cms.utils.i18n import force_language
APP_NAME = 'SampleApp'
NS_APP_NAME = 'NamespacedApp'
APP_MODULE = "cms.test_utils.project.sampleapp.cms_app"
class ApphooksTestCase(CMSTestCase):
def setUp(self):
clear_app_resolvers()
clear_url_caches()
if APP_MODULE in sys.modules:
del sys.modules[APP_MODULE]
self.reload_urls()
def tearDown(self):
clear_app_resolvers()
clear_url_caches()
if APP_MODULE in sys.modules:
del sys.modules[APP_MODULE]
self.reload_urls()
apphook_pool.clear()
def reload_urls(self):
from django.conf import settings
url_modules = [
'cms.urls',
# TODO: Add here intermediary modules which may
# include() the 'cms.urls' if it isn't included
# directly in the root urlconf.
# '...',
'cms.test_utils.project.second_cms_urls_for_apphook_tests',
'cms.test_utils.project.urls_for_apphook_tests',
settings.ROOT_URLCONF,
]
clear_app_resolvers()
clear_url_caches()
for module in url_modules:
if module in sys.modules:
del sys.modules[module]
def create_base_structure(self, apphook, title_langs, namespace=None):
apphook_pool.clear()
superuser = get_user_model().objects.create_superuser('admin', 'admin@admin.com', 'admin')
self.superuser = superuser
page = create_page("home", "nav_playground.html", "en",
created_by=superuser, published=True)
create_title('de', page.get_title(), page)
page.publish('de')
child_page = create_page("child_page", "nav_playground.html", "en",
created_by=superuser, published=True, parent=page)
create_title('de', child_page.get_title(), child_page)
child_page.publish('de')
child_child_page = create_page("child_child_page", "nav_playground.html",
"en", created_by=superuser, published=True, parent=child_page, apphook=apphook,
apphook_namespace=namespace)
create_title("de", child_child_page.get_title(), child_child_page)
child_child_page.publish('de')
# publisher_public is set to draft on publish, issue with onetoone reverse
child_child_page = self.reload(child_child_page)
if isinstance(title_langs, string_types):
titles = child_child_page.publisher_public.get_title_obj(title_langs)
else:
titles = [child_child_page.publisher_public.get_title_obj(l) for l in title_langs]
self.reload_urls()
return titles
def test_explicit_apphooks(self):
"""
Test explicit apphook loading with the CMS_APPHOOKS setting.
"""
apphooks = (
'%s.%s' % (APP_MODULE, APP_NAME),
)
with SettingsOverride(CMS_APPHOOKS=apphooks):
apphook_pool.clear()
hooks = apphook_pool.get_apphooks()
app_names = [hook[0] for hook in hooks]
self.assertEqual(len(hooks), 1)
self.assertEqual(app_names, [APP_NAME])
apphook_pool.clear()
def test_implicit_apphooks(self):
"""
Test implicit apphook loading with INSTALLED_APPS cms_app.py
"""
apps = ['cms.test_utils.project.sampleapp']
with SettingsOverride(INSTALLED_APPS=apps, ROOT_URLCONF='cms.test_utils.project.urls_for_apphook_tests'):
apphook_pool.clear()
hooks = apphook_pool.get_apphooks()
app_names = [hook[0] for hook in hooks]
self.assertEqual(len(hooks), 3)
self.assertIn(NS_APP_NAME, app_names)
self.assertIn(APP_NAME, app_names)
apphook_pool.clear()
def test_apphook_on_root(self):
with SettingsOverride(ROOT_URLCONF='cms.test_utils.project.urls_for_apphook_tests'):
apphook_pool.clear()
superuser = get_user_model().objects.create_superuser('admin', 'admin@admin.com', 'admin')
page = create_page("apphooked-page", "nav_playground.html", "en",
created_by=superuser, published=True, apphook="SampleApp")
blank_page = create_page("not-apphooked-page", "nav_playground.html", "en",
created_by=superuser, published=True, apphook="", slug='blankapp')
english_title = page.title_set.all()[0]
self.assertEqual(english_title.language, 'en')
create_title("de", "aphooked-page-de", page)
self.assertTrue(page.publish('en'))
self.assertTrue(page.publish('de'))
self.assertTrue(blank_page.publish('en'))
with force_language("en"):
response = self.client.get(self.get_pages_root())
self.assertTemplateUsed(response, 'sampleapp/home.html')
self.assertContains(response, '<--noplaceholder-->')
response = self.client.get('/en/blankapp/')
self.assertTemplateUsed(response, 'nav_playground.html')
apphook_pool.clear()
def test_apphook_on_root_reverse(self):
with SettingsOverride(ROOT_URLCONF='cms.test_utils.project.urls_for_apphook_tests'):
apphook_pool.clear()
superuser = get_user_model().objects.create_superuser('admin', 'admin@admin.com', 'admin')
page = create_page("apphooked-page", "nav_playground.html", "en",
created_by=superuser, published=True, apphook="SampleApp")
create_title("de", "aphooked-page-de", page)
self.assertTrue(page.publish('de'))
self.assertTrue(page.publish('en'))
self.reload_urls()
self.assertFalse(reverse('sample-settings').startswith('//'))
apphook_pool.clear()
def test_get_page_for_apphook(self):
with SettingsOverride(ROOT_URLCONF='cms.test_utils.project.second_urls_for_apphook_tests'):
en_title, de_title = self.create_base_structure(APP_NAME, ['en', 'de'])
with force_language("en"):
path = reverse('sample-settings')
request = self.get_request(path)
request.LANGUAGE_CODE = 'en'
attached_to_page = applications_page_check(request, path=path[1:]) # strip leading slash
self.assertEqual(attached_to_page.pk, en_title.page.pk)
response = self.client.get(path)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'sampleapp/home.html')
self.assertContains(response, en_title.title)
with force_language("de"):
path = reverse('sample-settings')
request = self.get_request(path)
request.LANGUAGE_CODE = 'de'
attached_to_page = applications_page_check(request, path=path[1:]) # strip leading slash and language prefix
self.assertEqual(attached_to_page.pk, de_title.page.pk)
response = self.client.get(path)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'sampleapp/home.html')
self.assertContains(response, de_title.title)
apphook_pool.clear()
def test_get_page_for_apphook_on_preview_or_edit(self):
if get_user_model().USERNAME_FIELD == 'email':
superuser = get_user_model().objects.create_superuser('admin', 'admin@admin.com', 'admin@admin.com')
else:
superuser = get_user_model().objects.create_superuser('admin', 'admin@admin.com', 'admin')
page = create_page("home", "nav_playground.html", "en",
created_by=superuser, published=True, apphook=APP_NAME)
create_title('de', page.get_title(), page)
page.publish('en')
page.publish('de')
public_page = page.get_public_object()
with self.login_user_context(superuser):
with force_language("en"):
path = reverse('sample-settings')
request = self.get_request(path + '?edit')
request.LANGUAGE_CODE = 'en'
attached_to_page = applications_page_check(request, path=path[1:]) # strip leading slash
self.assertEqual(attached_to_page.pk, public_page.pk)
with force_language("de"):
path = reverse('sample-settings')
request = self.get_request(path + '?edit')
request.LANGUAGE_CODE = 'de'
attached_to_page = applications_page_check(request, path=path[1:]) # strip leading slash
self.assertEqual(attached_to_page.pk, public_page.pk)
def test_get_root_page_for_apphook_with_instance_namespace(self):
with SettingsOverride(ROOT_URLCONF='cms.test_utils.project.second_urls_for_apphook_tests'):
en_title = self.create_base_structure(NS_APP_NAME, 'en', 'instance_ns')
self.reload_urls()
with force_language("en"):
reverse("example_app:example")
reverse("example1:example")
reverse("example2:example")
path = reverse('namespaced_app_ns:sample-root')
path_instance = reverse('instance_ns:sample-root')
self.assertEqual(path, path_instance)
request = self.get_request(path)
request.LANGUAGE_CODE = 'en'
attached_to_page = applications_page_check(request, path=path[1:]) # strip leading slash
self.assertEqual(attached_to_page.pk, en_title.page.pk)
apphook_pool.clear()
def test_get_child_page_for_apphook_with_instance_namespace(self):
with SettingsOverride(ROOT_URLCONF='cms.test_utils.project.second_urls_for_apphook_tests'):
en_title = self.create_base_structure(NS_APP_NAME, 'en', 'instance_ns')
with force_language("en"):
path = reverse('namespaced_app_ns:sample-settings')
path_instance1 = reverse('instance_ns:sample-settings')
path_instance2 = reverse('namespaced_app_ns:sample-settings', current_app='instance_ns')
self.assertEqual(path, path_instance1)
self.assertEqual(path, path_instance2)
request = self.get_request(path)
request.LANGUAGE_CODE = 'en'
attached_to_page = applications_page_check(request, path=path[1:]) # strip leading slash
self.assertEqual(attached_to_page.pk, en_title.page_id)
apphook_pool.clear()
def test_get_sub_page_for_apphook_with_implicit_current_app(self):
with SettingsOverride(ROOT_URLCONF='cms.test_utils.project.second_urls_for_apphook_tests'):
en_title = self.create_base_structure(NS_APP_NAME, 'en', 'namespaced_app_ns')
with force_language("en"):
path = reverse('namespaced_app_ns:current-app')
request = self.get_request(path)
request.LANGUAGE_CODE = 'en'
attached_to_page = applications_page_check(request, path=path[1:]) # strip leading slash
self.assertEqual(attached_to_page.pk, en_title.page.pk)
response = self.client.get(path)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'sampleapp/app.html')
self.assertContains(response, 'namespaced_app_ns')
self.assertContains(response, path)
apphook_pool.clear()
def test_get_i18n_apphook_with_explicit_current_app(self):
with SettingsOverride(ROOT_URLCONF='cms.test_utils.project.second_urls_for_apphook_tests'):
titles = self.create_base_structure(NS_APP_NAME, ['en', 'de'], 'instance_1')
public_de_title = titles[1]
de_title = Title.objects.get(page=public_de_title.page.publisher_draft, language="de")
de_title.slug = "de"
de_title.save()
de_title.page.publish('de')
page2 = create_page("page2", "nav_playground.html",
"en", created_by=self.superuser, published=True, parent=de_title.page.parent,
apphook=NS_APP_NAME,
apphook_namespace="instance_2")
create_title("de", "de_title", page2, slug="slug")
page2.publish('de')
clear_app_resolvers()
clear_url_caches()
if APP_MODULE in sys.modules:
del sys.modules[APP_MODULE]
self.reload_urls()
with force_language("de"):
reverse('namespaced_app_ns:current-app', current_app="instance_1")
reverse('namespaced_app_ns:current-app', current_app="instance_2")
reverse('namespaced_app_ns:current-app')
with force_language("en"):
reverse('namespaced_app_ns:current-app', current_app="instance_1")
reverse('namespaced_app_ns:current-app', current_app="instance_2")
reverse('namespaced_app_ns:current-app')
def test_apphook_include_extra_parameters(self):
with SettingsOverride(ROOT_URLCONF='cms.test_utils.project.second_urls_for_apphook_tests'):
self.create_base_structure(NS_APP_NAME, ['en', 'de'], 'instance_1')
with force_language("en"):
path = reverse('namespaced_app_ns:extra_second')
request = self.get_request(path)
request.LANGUAGE_CODE = 'en'
response = self.client.get(path)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'sampleapp/extra.html')
self.assertContains(response, 'someopts')
def test_get_sub_page_for_apphook_with_explicit_current_app(self):
with SettingsOverride(ROOT_URLCONF='cms.test_utils.project.second_urls_for_apphook_tests'):
en_title = self.create_base_structure(NS_APP_NAME, 'en', 'instance_ns')
with force_language("en"):
path = reverse('namespaced_app_ns:current-app')
request = self.get_request(path)
request.LANGUAGE_CODE = 'en'
attached_to_page = applications_page_check(request, path=path[1:]) # strip leading slash
self.assertEqual(attached_to_page.pk, en_title.page.pk)
response = self.client.get(path)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'sampleapp/app.html')
self.assertContains(response, 'instance_ns')
self.assertContains(response, path)
apphook_pool.clear()
def test_include_urlconf(self):
with SettingsOverride(ROOT_URLCONF='cms.test_utils.project.second_urls_for_apphook_tests'):
self.create_base_structure(APP_NAME, 'en')
path = reverse('extra_second')
response = self.client.get(path)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'sampleapp/extra.html')
self.assertContains(response, "test included urlconf")
path = reverse('extra_first')
response = self.client.get(path)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'sampleapp/extra.html')
self.assertContains(response, "test urlconf")
with force_language("de"):
path = reverse('extra_first')
response = self.client.get(path)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'sampleapp/extra.html')
self.assertContains(response, "test urlconf")
with force_language("de"):
path = reverse('extra_second')
response = self.client.get(path)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'sampleapp/extra.html')
self.assertContains(response, "test included urlconf")
apphook_pool.clear()
def test_apphook_breaking_under_home_with_new_path_caching(self):
with SettingsOverride(CMS_PERMISSION=False):
home = create_page("home", "nav_playground.html", "en", published=True)
child = create_page("child", "nav_playground.html", "en", published=True, parent=home)
# not-home is what breaks stuff, because it contains the slug of the home page
not_home = create_page("not-home", "nav_playground.html", "en", published=True, parent=child)
create_page("subchild", "nav_playground.html", "en", published=True, parent=not_home, apphook='SampleApp')
with force_language("en"):
self.reload_urls()
urlpatterns = get_app_patterns()
resolver = urlpatterns[0]
url = resolver.reverse('sample-root')
self.assertEqual(url, 'child/not-home/subchild/')
def test_apphook_urlpattern_order(self):
# this one includes the actual cms.urls, so it can be tested if
# they are loaded in the correct order (the cms page pattern must be last)
# (the other testcases replicate the inclusion code and thus don't test this)
with SettingsOverride(ROOT_URLCONF='cms.test_utils.project.urls'):
self.create_base_structure(APP_NAME, 'en')
path = reverse('extra_second')
response = self.client.get(path)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'sampleapp/extra.html')
self.assertContains(response, "test included urlconf")
def test_apphooks_receive_url_params(self):
# make sure that urlparams actually reach the apphook views
with SettingsOverride(ROOT_URLCONF='cms.test_utils.project.urls'):
self.create_base_structure(APP_NAME, 'en')
path = reverse('sample-params', kwargs=dict(my_params='is-my-param-really-in-the-context-QUESTIONMARK'))
response = self.client.get(path)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'sampleapp/home.html')
self.assertContains(response, 'my_params: is-my-param-really-in-the-context-QUESTIONMARK')
def test_multiple_apphooks(self):
# test for #1538
with SettingsOverride(ROOT_URLCONF='cms.test_utils.project.third_urls_for_apphook_tests'):
apphook_pool.clear()
superuser = get_user_model().objects.create_superuser('admin', 'admin@admin.com', 'admin')
create_page("home", "nav_playground.html", "en", created_by=superuser, published=True, )
create_page("apphook1-page", "nav_playground.html", "en",
created_by=superuser, published=True, apphook="SampleApp")
create_page("apphook2-page", "nav_playground.html", "en",
created_by=superuser, published=True, apphook="SampleApp2")
reverse('sample-root')
reverse('sample2-root')
apphook_pool.clear()
class ApphooksPageLanguageUrlTestCase(SettingsOverrideTestCase):
settings_overrides = {'ROOT_URLCONF': 'cms.test_utils.project.second_urls_for_apphook_tests'}
def setUp(self):
clear_app_resolvers()
clear_url_caches()
if APP_MODULE in sys.modules:
del sys.modules[APP_MODULE]
self.reload_urls()
def tearDown(self):
clear_app_resolvers()
clear_url_caches()
if APP_MODULE in sys.modules:
del sys.modules[APP_MODULE]
apphook_pool.clear()
def reload_urls(self):
from django.conf import settings
url_modules = [
'cms.urls',
'cms.test_utils.project.second_cms_urls_for_apphook_tests',
settings.ROOT_URLCONF,
]
clear_app_resolvers()
clear_url_caches()
for module in url_modules:
if module in sys.modules:
del sys.modules[module]
def test_page_language_url_for_apphook(self):
apphook_pool.clear()
superuser = get_user_model().objects.create_superuser('admin', 'admin@admin.com', 'admin')
page = create_page("home", "nav_playground.html", "en",
created_by=superuser)
create_title('de', page.get_title(), page)
page.publish('en')
page.publish('de')
child_page = create_page("child_page", "nav_playground.html", "en",
created_by=superuser, parent=page)
create_title('de', child_page.get_title(), child_page)
child_page.publish('en')
child_page.publish('de')
child_child_page = create_page("child_child_page", "nav_playground.html",
"en", created_by=superuser, parent=child_page, apphook='SampleApp')
create_title("de", '%s_de' % child_child_page.get_title(), child_child_page)
child_child_page.publish('en')
child_child_page.publish('de')
# publisher_public is set to draft on publish, issue with one to one reverse
child_child_page = self.reload(child_child_page)
with force_language("en"):
path = reverse('extra_first')
request = self.get_request(path)
request.LANGUAGE_CODE = 'en'
request.current_page = child_child_page
fake_context = {'request': request}
tag = DumbPageLanguageUrl()
output = tag.get_context(fake_context, 'en')
url = output['content']
self.assertEqual(url, '/en/child_page/child_child_page/extra_1/')
output = tag.get_context(fake_context, 'de')
url = output['content']
# look the extra "_de"
self.assertEqual(url, '/de/child_page/child_child_page_de/extra_1/')
output = tag.get_context(fake_context, 'fr')
url = output['content']
self.assertEqual(url, '/fr/child_page/child_child_page/extra_1/')
apphook_pool.clear()
| |
import numpy as np
import pytest
from pandas.compat import PY36
from pandas import DataFrame, Index, MultiIndex, Series
import pandas.util.testing as tm
from pandas.util.testing import assert_frame_equal
# Column add, remove, delete.
class TestDataFrameMutateColumns:
def test_assign(self):
df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]})
original = df.copy()
result = df.assign(C=df.B / df.A)
expected = df.copy()
expected['C'] = [4, 2.5, 2]
assert_frame_equal(result, expected)
# lambda syntax
result = df.assign(C=lambda x: x.B / x.A)
assert_frame_equal(result, expected)
# original is unmodified
assert_frame_equal(df, original)
# Non-Series array-like
result = df.assign(C=[4, 2.5, 2])
assert_frame_equal(result, expected)
# original is unmodified
assert_frame_equal(df, original)
result = df.assign(B=df.B / df.A)
expected = expected.drop('B', axis=1).rename(columns={'C': 'B'})
assert_frame_equal(result, expected)
# overwrite
result = df.assign(A=df.A + df.B)
expected = df.copy()
expected['A'] = [5, 7, 9]
assert_frame_equal(result, expected)
# lambda
result = df.assign(A=lambda x: x.A + x.B)
assert_frame_equal(result, expected)
def test_assign_multiple(self):
df = DataFrame([[1, 4], [2, 5], [3, 6]], columns=['A', 'B'])
result = df.assign(C=[7, 8, 9], D=df.A, E=lambda x: x.B)
expected = DataFrame([[1, 4, 7, 1, 4], [2, 5, 8, 2, 5],
[3, 6, 9, 3, 6]], columns=list('ABCDE'))
assert_frame_equal(result, expected)
def test_assign_order(self):
# GH 9818
df = DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
result = df.assign(D=df.A + df.B, C=df.A - df.B)
if PY36:
expected = DataFrame([[1, 2, 3, -1], [3, 4, 7, -1]],
columns=list('ABDC'))
else:
expected = DataFrame([[1, 2, -1, 3], [3, 4, -1, 7]],
columns=list('ABCD'))
assert_frame_equal(result, expected)
result = df.assign(C=df.A - df.B, D=df.A + df.B)
expected = DataFrame([[1, 2, -1, 3], [3, 4, -1, 7]],
columns=list('ABCD'))
assert_frame_equal(result, expected)
def test_assign_bad(self):
df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]})
# non-keyword argument
with pytest.raises(TypeError):
df.assign(lambda x: x.A)
with pytest.raises(AttributeError):
df.assign(C=df.A, D=df.A + df.C)
@pytest.mark.skipif(PY36, reason="""Issue #14207: valid for python
3.6 and above""")
def test_assign_dependent_old_python(self):
df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]})
# Key C does not exist at definition time of df
with pytest.raises(KeyError):
df.assign(C=lambda df: df.A,
D=lambda df: df['A'] + df['C'])
with pytest.raises(KeyError):
df.assign(C=df.A, D=lambda x: x['A'] + x['C'])
@pytest.mark.skipif(not PY36, reason="""Issue #14207: not valid for
python 3.5 and below""")
def test_assign_dependent(self):
df = DataFrame({'A': [1, 2], 'B': [3, 4]})
result = df.assign(C=df.A, D=lambda x: x['A'] + x['C'])
expected = DataFrame([[1, 3, 1, 2], [2, 4, 2, 4]],
columns=list('ABCD'))
assert_frame_equal(result, expected)
result = df.assign(C=lambda df: df.A,
D=lambda df: df['A'] + df['C'])
expected = DataFrame([[1, 3, 1, 2], [2, 4, 2, 4]],
columns=list('ABCD'))
assert_frame_equal(result, expected)
def test_insert_error_msmgs(self):
# GH 7432
df = DataFrame({'foo': ['a', 'b', 'c'], 'bar': [
1, 2, 3], 'baz': ['d', 'e', 'f']}).set_index('foo')
s = DataFrame({'foo': ['a', 'b', 'c', 'a'], 'fiz': [
'g', 'h', 'i', 'j']}).set_index('foo')
msg = 'cannot reindex from a duplicate axis'
with pytest.raises(ValueError, match=msg):
df['newcol'] = s
# GH 4107, more descriptive error message
df = DataFrame(np.random.randint(0, 2, (4, 4)),
columns=['a', 'b', 'c', 'd'])
msg = 'incompatible index of inserted column with frame index'
with pytest.raises(TypeError, match=msg):
df['gr'] = df.groupby(['b', 'c']).count()
def test_insert_benchmark(self):
# from the vb_suite/frame_methods/frame_insert_columns
N = 10
K = 5
df = DataFrame(index=range(N))
new_col = np.random.randn(N)
for i in range(K):
df[i] = new_col
expected = DataFrame(np.repeat(new_col, K).reshape(N, K),
index=range(N))
assert_frame_equal(df, expected)
def test_insert(self):
df = DataFrame(np.random.randn(5, 3), index=np.arange(5),
columns=['c', 'b', 'a'])
df.insert(0, 'foo', df['a'])
tm.assert_index_equal(df.columns, Index(['foo', 'c', 'b', 'a']))
tm.assert_series_equal(df['a'], df['foo'], check_names=False)
df.insert(2, 'bar', df['c'])
tm.assert_index_equal(df.columns,
Index(['foo', 'c', 'bar', 'b', 'a']))
tm.assert_almost_equal(df['c'], df['bar'], check_names=False)
# diff dtype
# new item
df['x'] = df['a'].astype('float32')
result = Series(dict(float32=1, float64=5))
assert (df.get_dtype_counts().sort_index() == result).all()
# replacing current (in different block)
df['a'] = df['a'].astype('float32')
result = Series(dict(float32=2, float64=4))
assert (df.get_dtype_counts().sort_index() == result).all()
df['y'] = df['a'].astype('int32')
result = Series(dict(float32=2, float64=4, int32=1))
assert (df.get_dtype_counts().sort_index() == result).all()
with pytest.raises(ValueError, match='already exists'):
df.insert(1, 'a', df['b'])
msg = "cannot insert c, already exists"
with pytest.raises(ValueError, match=msg):
df.insert(1, 'c', df['b'])
df.columns.name = 'some_name'
# preserve columns name field
df.insert(0, 'baz', df['c'])
assert df.columns.name == 'some_name'
# GH 13522
df = DataFrame(index=['A', 'B', 'C'])
df['X'] = df.index
df['X'] = ['x', 'y', 'z']
exp = DataFrame(data={'X': ['x', 'y', 'z']}, index=['A', 'B', 'C'])
assert_frame_equal(df, exp)
def test_delitem(self, float_frame):
del float_frame['A']
assert 'A' not in float_frame
def test_delitem_multiindex(self):
midx = MultiIndex.from_product([['A', 'B'], [1, 2]])
df = DataFrame(np.random.randn(4, 4), columns=midx)
assert len(df.columns) == 4
assert ('A', ) in df.columns
assert 'A' in df.columns
result = df['A']
assert isinstance(result, DataFrame)
del df['A']
assert len(df.columns) == 2
# A still in the levels, BUT get a KeyError if trying
# to delete
assert ('A', ) not in df.columns
with pytest.raises(KeyError):
del df[('A',)]
# behavior of dropped/deleted MultiIndex levels changed from
# GH 2770 to GH 19027: MultiIndex no longer '.__contains__'
# levels which are dropped/deleted
assert 'A' not in df.columns
with pytest.raises(KeyError):
del df['A']
def test_pop(self, float_frame):
float_frame.columns.name = 'baz'
float_frame.pop('A')
assert 'A' not in float_frame
float_frame['foo'] = 'bar'
float_frame.pop('foo')
assert 'foo' not in float_frame
assert float_frame.columns.name == 'baz'
# gh-10912: inplace ops cause caching issue
a = DataFrame([[1, 2, 3], [4, 5, 6]], columns=[
'A', 'B', 'C'], index=['X', 'Y'])
b = a.pop('B')
b += 1
# original frame
expected = DataFrame([[1, 3], [4, 6]], columns=[
'A', 'C'], index=['X', 'Y'])
tm.assert_frame_equal(a, expected)
# result
expected = Series([2, 5], index=['X', 'Y'], name='B') + 1
tm.assert_series_equal(b, expected)
def test_pop_non_unique_cols(self):
df = DataFrame({0: [0, 1], 1: [0, 1], 2: [4, 5]})
df.columns = ["a", "b", "a"]
res = df.pop("a")
assert type(res) == DataFrame
assert len(res) == 2
assert len(df.columns) == 1
assert "b" in df.columns
assert "a" not in df.columns
assert len(df.index) == 2
def test_insert_column_bug_4032(self):
# GH4032, inserting a column and renaming causing errors
df = DataFrame({'b': [1.1, 2.2]})
df = df.rename(columns={})
df.insert(0, 'a', [1, 2])
result = df.rename(columns={})
str(result)
expected = DataFrame([[1, 1.1], [2, 2.2]], columns=['a', 'b'])
assert_frame_equal(result, expected)
df.insert(0, 'c', [1.3, 2.3])
result = df.rename(columns={})
str(result)
expected = DataFrame([[1.3, 1, 1.1], [2.3, 2, 2.2]],
columns=['c', 'a', 'b'])
assert_frame_equal(result, expected)
| |
"""
termcolors.py
"""
from django.utils import six
color_names = ('black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white')
foreground = {color_names[x]: '3%s' % x for x in range(8)}
background = {color_names[x]: '4%s' % x for x in range(8)}
RESET = '0'
opt_dict = {'bold': '1', 'underscore': '4', 'blink': '5', 'reverse': '7', 'conceal': '8'}
def colorize(text='', opts=(), **kwargs):
"""
Returns your text, enclosed in ANSI graphics codes.
Depends on the keyword arguments 'fg' and 'bg', and the contents of
the opts tuple/list.
Returns the RESET code if no parameters are given.
Valid colors:
'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white'
Valid options:
'bold'
'underscore'
'blink'
'reverse'
'conceal'
'noreset' - string will not be auto-terminated with the RESET code
Examples:
colorize('hello', fg='red', bg='blue', opts=('blink',))
colorize()
colorize('goodbye', opts=('underscore',))
print(colorize('first line', fg='red', opts=('noreset',)))
print('this should be red too')
print(colorize('and so should this'))
print('this should not be red')
"""
code_list = []
if text == '' and len(opts) == 1 and opts[0] == 'reset':
return '\x1b[%sm' % RESET
for k, v in six.iteritems(kwargs):
if k == 'fg':
code_list.append(foreground[v])
elif k == 'bg':
code_list.append(background[v])
for o in opts:
if o in opt_dict:
code_list.append(opt_dict[o])
if 'noreset' not in opts:
text = '%s\x1b[%sm' % (text or '', RESET)
return '%s%s' % (('\x1b[%sm' % ';'.join(code_list)), text or '')
def make_style(opts=(), **kwargs):
"""
Returns a function with default parameters for colorize()
Example:
bold_red = make_style(opts=('bold',), fg='red')
print(bold_red('hello'))
KEYWORD = make_style(fg='yellow')
COMMENT = make_style(fg='blue', opts=('bold',))
"""
return lambda text: colorize(text, opts, **kwargs)
NOCOLOR_PALETTE = 'nocolor'
DARK_PALETTE = 'dark'
LIGHT_PALETTE = 'light'
PALETTES = {
NOCOLOR_PALETTE: {
'ERROR': {},
'SUCCESS': {},
'WARNING': {},
'NOTICE': {},
'SQL_FIELD': {},
'SQL_COLTYPE': {},
'SQL_KEYWORD': {},
'SQL_TABLE': {},
'HTTP_INFO': {},
'HTTP_SUCCESS': {},
'HTTP_REDIRECT': {},
'HTTP_NOT_MODIFIED': {},
'HTTP_BAD_REQUEST': {},
'HTTP_NOT_FOUND': {},
'HTTP_SERVER_ERROR': {},
'MIGRATE_HEADING': {},
'MIGRATE_LABEL': {},
'MIGRATE_SUCCESS': {},
'MIGRATE_FAILURE': {},
},
DARK_PALETTE: {
'ERROR': {'fg': 'red', 'opts': ('bold',)},
'SUCCESS': {'fg': 'green', 'opts': ('bold',)},
'WARNING': {'fg': 'yellow', 'opts': ('bold',)},
'NOTICE': {'fg': 'red'},
'SQL_FIELD': {'fg': 'green', 'opts': ('bold',)},
'SQL_COLTYPE': {'fg': 'green'},
'SQL_KEYWORD': {'fg': 'yellow'},
'SQL_TABLE': {'opts': ('bold',)},
'HTTP_INFO': {'opts': ('bold',)},
'HTTP_SUCCESS': {},
'HTTP_REDIRECT': {'fg': 'green'},
'HTTP_NOT_MODIFIED': {'fg': 'cyan'},
'HTTP_BAD_REQUEST': {'fg': 'red', 'opts': ('bold',)},
'HTTP_NOT_FOUND': {'fg': 'yellow'},
'HTTP_SERVER_ERROR': {'fg': 'magenta', 'opts': ('bold',)},
'MIGRATE_HEADING': {'fg': 'cyan', 'opts': ('bold',)},
'MIGRATE_LABEL': {'opts': ('bold',)},
'MIGRATE_SUCCESS': {'fg': 'green', 'opts': ('bold',)},
'MIGRATE_FAILURE': {'fg': 'red', 'opts': ('bold',)},
},
LIGHT_PALETTE: {
'ERROR': {'fg': 'red', 'opts': ('bold',)},
'SUCCESS': {'fg': 'green', 'opts': ('bold',)},
'WARNING': {'fg': 'yellow', 'opts': ('bold',)},
'NOTICE': {'fg': 'red'},
'SQL_FIELD': {'fg': 'green', 'opts': ('bold',)},
'SQL_COLTYPE': {'fg': 'green'},
'SQL_KEYWORD': {'fg': 'blue'},
'SQL_TABLE': {'opts': ('bold',)},
'HTTP_INFO': {'opts': ('bold',)},
'HTTP_SUCCESS': {},
'HTTP_REDIRECT': {'fg': 'green', 'opts': ('bold',)},
'HTTP_NOT_MODIFIED': {'fg': 'green'},
'HTTP_BAD_REQUEST': {'fg': 'red', 'opts': ('bold',)},
'HTTP_NOT_FOUND': {'fg': 'red'},
'HTTP_SERVER_ERROR': {'fg': 'magenta', 'opts': ('bold',)},
'MIGRATE_HEADING': {'fg': 'cyan', 'opts': ('bold',)},
'MIGRATE_LABEL': {'opts': ('bold',)},
'MIGRATE_SUCCESS': {'fg': 'green', 'opts': ('bold',)},
'MIGRATE_FAILURE': {'fg': 'red', 'opts': ('bold',)},
}
}
DEFAULT_PALETTE = DARK_PALETTE
def parse_color_setting(config_string):
"""Parse a DJANGO_COLORS environment variable to produce the system palette
The general form of a pallete definition is:
"palette;role=fg;role=fg/bg;role=fg,option,option;role=fg/bg,option,option"
where:
palette is a named palette; one of 'light', 'dark', or 'nocolor'.
role is a named style used by Django
fg is a background color.
bg is a background color.
option is a display options.
Specifying a named palette is the same as manually specifying the individual
definitions for each role. Any individual definitions following the pallete
definition will augment the base palette definition.
Valid roles:
'error', 'notice', 'sql_field', 'sql_coltype', 'sql_keyword', 'sql_table',
'http_info', 'http_success', 'http_redirect', 'http_bad_request',
'http_not_found', 'http_server_error'
Valid colors:
'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white'
Valid options:
'bold', 'underscore', 'blink', 'reverse', 'conceal'
"""
if not config_string:
return PALETTES[DEFAULT_PALETTE]
# Split the color configuration into parts
parts = config_string.lower().split(';')
palette = PALETTES[NOCOLOR_PALETTE].copy()
for part in parts:
if part in PALETTES:
# A default palette has been specified
palette.update(PALETTES[part])
elif '=' in part:
# Process a palette defining string
definition = {}
# Break the definition into the role,
# plus the list of specific instructions.
# The role must be in upper case
role, instructions = part.split('=')
role = role.upper()
styles = instructions.split(',')
styles.reverse()
# The first instruction can contain a slash
# to break apart fg/bg.
colors = styles.pop().split('/')
colors.reverse()
fg = colors.pop()
if fg in color_names:
definition['fg'] = fg
if colors and colors[-1] in color_names:
definition['bg'] = colors[-1]
# All remaining instructions are options
opts = tuple(s for s in styles if s in opt_dict.keys())
if opts:
definition['opts'] = opts
# The nocolor palette has all available roles.
# Use that palette as the basis for determining
# if the role is valid.
if role in PALETTES[NOCOLOR_PALETTE] and definition:
palette[role] = definition
# If there are no colors specified, return the empty palette.
if palette == PALETTES[NOCOLOR_PALETTE]:
return None
return palette
| |
import re
import time
import datetime
from tests.test_helper import *
from braintree.test.credit_card_numbers import CreditCardNumbers
class TestDisputes(unittest.TestCase):
def create_evidence_document(self):
file_path = os.path.join(os.path.dirname(__file__), "..", "fixtures/bt_logo.png")
png_file = open(file_path, "rb")
return DocumentUpload.create({
"kind": braintree.DocumentUpload.Kind.EvidenceDocument,
"file": png_file
}).document_upload
def create_sample_dispute(self):
return Transaction.sale({
"amount": "100.00",
"credit_card": {
"number": CreditCardNumbers.Disputes.Chargeback,
"expiration_date": "12/2019"
}
}).transaction.disputes[0]
def test_accept_changes_dispute_status_to_accepted(self):
dispute = self.create_sample_dispute()
result = Dispute.accept(dispute.id)
self.assertTrue(result.is_success)
updated_dispute = Dispute.find(dispute.id)
self.assertEqual(updated_dispute.status, Dispute.Status.Accepted)
dispute_from_transaction = Transaction.find(dispute.transaction.id).disputes[0]
self.assertEqual(dispute_from_transaction.status, Dispute.Status.Accepted)
def test_accept_errors_when_dispute_not_open(self):
result = Dispute.accept("wells_dispute")
self.assertFalse(result.is_success)
self.assertEqual(result.errors.for_object("dispute")[0].code, ErrorCodes.Dispute.CanOnlyAcceptOpenDispute)
self.assertEqual(result.errors.for_object("dispute")[0].message, "Disputes can only be accepted when they are in an Open state")
@raises_with_regexp(NotFoundError, "dispute with id 'invalid-id' not found")
def test_accept_raises_error_when_dispute_not_found(self):
dispute = Dispute.accept("invalid-id")
def test_add_file_evidence_adds_evidence(self):
dispute = self.create_sample_dispute()
document = self.create_evidence_document()
result = Dispute.add_file_evidence(dispute.id, document.id)
self.assertTrue(result.is_success)
updated_dispute = Dispute.find(dispute.id)
self.assertEqual(updated_dispute.evidence[0].id, result.evidence.id)
def test_add_file_evidence_adds_category_file_evidence(self):
dispute = self.create_sample_dispute()
document = self.create_evidence_document()
result = Dispute.add_file_evidence(dispute.id, { "document_id": document.id, "category": "GENERAL" })
self.assertTrue(result.is_success)
self.assertEqual(result.evidence.category, "GENERAL")
@raises_with_regexp(NotFoundError, "dispute with id 'unknown_dispute_id' not found")
def test_add_file_evidence_raises_error_when_dispute_not_found(self):
dispute = Dispute.add_file_evidence("unknown_dispute_id", "text evidence")
def test_add_file_evidence_raises_error_when_dispute_not_open(self):
dispute = self.create_sample_dispute()
document = self.create_evidence_document()
Dispute.accept(dispute.id)
result = Dispute.add_file_evidence(dispute.id, document.id)
self.assertFalse(result.is_success)
self.assertEqual(result.errors.for_object("dispute")[0].code, ErrorCodes.Dispute.CanOnlyAddEvidenceToOpenDispute)
self.assertEqual(result.errors.for_object("dispute")[0].message, "Evidence can only be attached to disputes that are in an Open state")
def test_categorized_file_evidence_for_text_only_category(self):
dispute = self.create_sample_dispute()
document = self.create_evidence_document()
result = Dispute.add_file_evidence(dispute.id, { "document_id": document.id, "category": "DEVICE_ID" })
self.assertFalse(result.is_success)
self.assertEqual(result.errors.for_object("dispute")[0].code, ErrorCodes.Dispute.EvidenceCategoryTextOnly)
self.assertEqual(result.errors.for_object("dispute")[0].message, "Only text evidence can be provided for this category")
def test_categorized_file_evidence_with_unsupported_category(self):
dispute = self.create_sample_dispute()
document = self.create_evidence_document()
result = Dispute.add_file_evidence(dispute.id, { "document_id": document.id, "category": "DOESNOTEXIST" })
self.assertFalse(result.is_success)
self.assertEqual(result.errors.for_object("dispute")[0].code, ErrorCodes.Dispute.CanOnlyCreateEvidenceWithValidCategory)
self.assertEqual(result.errors.for_object("dispute")[0].message, "The category you supplied on the evidence record is not valid")
def test_add_text_evidence_adds_text_evidence(self):
dispute = self.create_sample_dispute()
result = Dispute.add_text_evidence(dispute.id, "text evidence")
evidence = result.evidence
self.assertTrue(result.is_success)
self.assertEqual(evidence.comment, "text evidence")
self.assertIsNotNone(evidence.created_at)
self.assertTrue(re.match("^\w{16,}$", evidence.id))
self.assertIsNone(evidence.sent_to_processor_at)
self.assertIsNone(evidence.url)
self.assertIsNone(evidence.category)
self.assertIsNone(evidence.sequence_number)
def test_add_text_evidence_adds_category_and_sequence_number_text_evidence(self):
dispute = self.create_sample_dispute()
result = Dispute.add_text_evidence(dispute.id, { "content": "PROOF_OF_FULFILLMENT", "category": "DEVICE_ID", "sequence_number": "0" })
self.assertTrue(result.is_success)
evidence = result.evidence
self.assertEqual(evidence.comment, "PROOF_OF_FULFILLMENT")
self.assertEqual(evidence.category, "DEVICE_ID")
self.assertEqual(evidence.sequence_number, 0)
@raises_with_regexp(NotFoundError, "Dispute with ID 'unknown_dispute_id' not found")
def test_add_text_evidence_raises_error_when_dispute_not_found(self):
dispute = Dispute.add_text_evidence("unknown_dispute_id", "text evidence")
def test_add_text_evidence_raises_error_when_dispute_not_open(self):
dispute = self.create_sample_dispute()
Dispute.accept(dispute.id)
result = Dispute.add_text_evidence(dispute.id, "text evidence")
self.assertFalse(result.is_success)
self.assertEqual(result.errors.for_object("dispute")[0].code, ErrorCodes.Dispute.CanOnlyAddEvidenceToOpenDispute)
self.assertEqual(result.errors.for_object("dispute")[0].message, "Evidence can only be attached to disputes that are in an Open state")
def test_add_text_evidence_shows_new_record_in_find(self):
dispute = self.create_sample_dispute()
evidence = Dispute.add_text_evidence(dispute.id, "text evidence").evidence
refreshed_dispute = Dispute.find(dispute.id)
self.assertEqual(refreshed_dispute.evidence[0].id, evidence.id)
self.assertEqual(refreshed_dispute.evidence[0].comment, "text evidence")
def test_categorized_text_evidence_with_unsupported_category(self):
dispute = self.create_sample_dispute()
result = Dispute.add_text_evidence(dispute.id, { "content": "evidence", "category": "DOESNOTEXIST" })
self.assertFalse(result.is_success)
self.assertEqual(result.errors.for_object("dispute")[0].code, ErrorCodes.Dispute.CanOnlyCreateEvidenceWithValidCategory)
self.assertEqual(result.errors.for_object("dispute")[0].message, "The category you supplied on the evidence record is not valid")
def test_categorized_text_evidence_with_file_category(self):
dispute = self.create_sample_dispute()
result = Dispute.add_text_evidence(dispute.id, { "content": "evidence", "category": "MERCHANT_WEBSITE_OR_APP_ACCESS" })
self.assertFalse(result.is_success)
self.assertEqual(result.errors.for_object("dispute")[0].code, ErrorCodes.Dispute.EvidenceCategoryDocumentOnly)
self.assertEqual(result.errors.for_object("dispute")[0].message, "Only document evidence can be provided for this category")
def test_categorized_text_evidence_with_invalid_date_time_format(self):
dispute = self.create_sample_dispute()
result = Dispute.add_text_evidence(dispute.id, { "content": "not a date", "category": "DOWNLOAD_DATE_TIME" })
self.assertFalse(result.is_success)
self.assertEqual(result.errors.for_object("dispute")[0].code, ErrorCodes.Dispute.EvidenceContentDateInvalid)
def test_categorized_text_evidence_with_valid_date_time_format(self):
dispute = self.create_sample_dispute()
result = Dispute.add_text_evidence(dispute.id, { "content": "2018-10-20T18:00:00-0500", "category": "DOWNLOAD_DATE_TIME" })
self.assertTrue(result.is_success)
def test_finalize_changes_dispute_status_to_disputed(self):
dispute = self.create_sample_dispute()
result = Dispute.finalize(dispute.id)
self.assertTrue(result.is_success)
updated_dispute = Dispute.find(dispute.id)
self.assertEqual(updated_dispute.status, Dispute.Status.Disputed)
def test_finalize_errors_when_dispute_not_open(self):
result = Dispute.finalize("wells_dispute")
self.assertFalse(result.is_success)
self.assertEqual(result.errors.for_object("dispute")[0].code, ErrorCodes.Dispute.CanOnlyFinalizeOpenDispute)
self.assertEqual(result.errors.for_object("dispute")[0].message, "Disputes can only be finalized when they are in an Open state")
def test_finalize_when_digital_goods_missing(self):
dispute = self.create_sample_dispute()
result = Dispute.add_text_evidence(dispute.id, { "content": "device_id", "category": "DEVICE_ID" })
self.assertTrue(result.is_success)
result = dispute.finalize(dispute.id)
self.assertFalse(result.is_success)
error_codes = [error.code for error in result.errors.for_object("dispute")]
self.assertIn(ErrorCodes.Dispute.DigitalGoodsMissingDownloadDate, error_codes)
self.assertIn(ErrorCodes.Dispute.DigitalGoodsMissingEvidence, error_codes)
def test_finalize_when_missing_non_disputed_payments_date(self):
dispute = self.create_sample_dispute()
result = Dispute.add_text_evidence(dispute.id, { "content": "123", "category": "PRIOR_NON_DISPUTED_TRANSACTION_ARN" })
self.assertTrue(result.is_success)
result = dispute.finalize(dispute.id)
self.assertFalse(result.is_success)
error_codes = [error.code for error in result.errors.for_object("dispute")]
self.assertIn(ErrorCodes.Dispute.NonDisputedPriorTransactionEvidenceMissingDate, error_codes)
@raises_with_regexp(NotFoundError, "dispute with id 'invalid-id' not found")
def test_finalize_raises_error_when_dispute_not_found(self):
dispute = Dispute.finalize("invalid-id")
def test_find_returns_dispute_with_given_id(self):
dispute = Dispute.find("open_dispute")
self.assertEqual(dispute.amount_disputed, 31.0)
self.assertEqual(dispute.amount_won, 0.0)
self.assertEqual(dispute.id, "open_dispute")
self.assertEqual(dispute.status, Dispute.Status.Open)
self.assertEqual(dispute.transaction.id, "open_disputed_transaction")
self.assertEqual(None, dispute.transaction.installment_count)
self.assertNotEqual(None, dispute.graphql_id)
@raises_with_regexp(NotFoundError, "dispute with id 'invalid-id' not found")
def test_find_raises_error_when_dispute_not_found(self):
dispute = Dispute.find("invalid-id")
def test_remove_evidence_removes_evidence_from_the_dispute(self):
dispute = self.create_sample_dispute()
evidence = Dispute.add_text_evidence(dispute.id, "text evidence").evidence
result = Dispute.remove_evidence(dispute.id, evidence.id)
self.assertTrue(result.is_success)
@raises_with_regexp(NotFoundError, "evidence with id 'unknown_evidence_id' for dispute with id 'unknown_dispute_id' not found")
def test_remove_evidence_raises_error_when_dispute_or_evidence_not_found(self):
Dispute.remove_evidence("unknown_dispute_id", "unknown_evidence_id")
def test_remove_evidence_errors_when_dispute_not_open(self):
dispute = self.create_sample_dispute()
evidence = Dispute.add_text_evidence(dispute.id, "text evidence").evidence
Dispute.accept(dispute.id)
result = Dispute.remove_evidence(dispute.id, evidence.id)
self.assertFalse(result.is_success)
self.assertEqual(result.errors.for_object("dispute")[0].code, ErrorCodes.Dispute.CanOnlyRemoveEvidenceFromOpenDispute)
self.assertEqual(result.errors.for_object("dispute")[0].message, "Evidence can only be removed from disputes that are in an Open state")
| |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility module for interaction with adb."""
import enum
import json
import os
import re
import subprocess
import time
from typing import Any, List, Optional, Tuple
from gazoo_device import config
from gazoo_device import errors
from gazoo_device import gdm_logger
from gazoo_device.utility import host_utils
from gazoo_device.utility import retry
ADB_RETRY_SLEEP = 10
DEFAULT_PORT = 5555
FASTBOOT_TIMEOUT = 10.0
PROPERTY_PATTERN = r"\[(.*)\]: \[(.*)\]\n"
SYSENV_PATTERN = r"(.*)=(.*)\n"
class AdbDeviceState(enum.Enum):
"""ADB device states as found in output of 'adb devices'.
The values come from connection_state_name() in
https://android.googlesource.com/platform/system/adb/+/refs/heads/master/transport.cpp#759.
"""
BOOTLOADER = "bootloader"
DEVICE = "device"
HOST = "host"
OFFLINE = "offline"
NO_PERMISSIONS = "no permissions"
RECOVERY = "recovery"
SIDELOAD = "sideload"
UNAUTHORIZED = "unauthorized"
UNKNOWN = "unknown"
# Not defined by ADB (in case we fail to parse the state).
UNRECOGNIZED = "unrecognized"
logger = gdm_logger.get_logger()
def bugreport(adb_identifier: str,
destination_path: str = "./",
adb_path: Optional[str] = None) -> str:
"""Gets a bugreport to destination_path on host for the adb_identifier.
Args:
adb_identifier: Device ADB identifier, a serial number ("abcde123") or an
IP address and a port number ("12.34.56.78:5555").
destination_path: Path to destination on host computer where bugreport
should copied to.
adb_path: optional alternative path to adb executable. If adb_path is not
provided then path returned by get_adb_path will be used instead.
Raises:
RuntimeError: if adb_path is invalid or adb executable was not found by
get_adb_path or bugreport failed.
ValueError: if destination_path does not exist.
Returns:
Output from calling 'adb bugreport'.
"""
destination_dir = os.path.dirname(destination_path)
if not os.path.exists(destination_dir):
raise ValueError(f"The destination_path directory {destination_dir} does "
"not exist.")
output, returncode = _adb_command(command=["bugreport", destination_path],
adb_serial=adb_identifier,
adb_path=adb_path,
include_return_code=True)
if returncode != 0:
raise RuntimeError(f"Getting bugreport on ADB device {adb_identifier} to "
f"{destination_path} failed. "
f"Error: {output!r} with return code {returncode}")
return output
def enter_fastboot(adb_serial, adb_path=None):
"""Enters fastboot mode by calling 'adb reboot bootloader' for the adb_serial provided.
Args:
adb_serial (str): Device serial number.
adb_path (str): optional alternative path to adb executable
Raises:
RuntimeError: if adb_path is invalid or adb executable was not found by
get_adb_path.
Returns:
str: Output from calling 'adb reboot' or None if call fails with
non-zero
return code.
Note:
If adb_path is not provided then path returned by get_adb_path will be
used instead. If adb returns a non-zero return code then None will be
returned.
"""
return _adb_command(("reboot", "bootloader"), adb_serial, adb_path=adb_path)
def exit_fastboot(fastboot_serial,
fastboot_path=None,
timeout=FASTBOOT_TIMEOUT):
"""Exits fastboot mode by calling 'fastboot reboot' for the fastboot_serial provided.
Args:
fastboot_serial (str): Device fastboot serial number.
fastboot_path (str): optional alternative path to fastboot executable
timeout (float): in seconds to wait for fastboot reboot to return
Raises:
RuntimeError: if fastboot_path is invalid or fastboot executable was not
found by get_fastboot_path.
Returns:
str: Output from calling 'fastboot reboot' or None if call fails with
non-zero
return code.
Note:
If fastboot_path is not provided then path returned by get_fastboot_path
will be used instead. If fastboot returns a non-zero return code then
None will be returned.
"""
if fastboot_path is None:
fastboot_path = get_fastboot_path()
if not os.path.exists(fastboot_path):
raise RuntimeError(
"The fastboot_path of {} appears to be invalid.".format(fastboot_path))
try:
args = ("timeout", str(timeout), fastboot_path, "-s", fastboot_serial,
"reboot")
return subprocess.check_output(
args, stderr=subprocess.STDOUT).decode("utf-8", "replace")
except subprocess.CalledProcessError:
return None
def fastboot_unlock_device(fastboot_serial,
fastboot_path=None,
timeout=FASTBOOT_TIMEOUT):
"""Unlock the device through fastboot.
Args:
fastboot_serial (str): Device serial number
fastboot_path (str): optional alternative path to fastboot executable
timeout (float): in seconds to wait for fastboot command to return
Returns:
str: response from fastboot command
"""
return _fastboot_command(("flashing", "unlock"),
fastboot_serial=fastboot_serial,
fastboot_path=fastboot_path,
timeout=timeout)
def fastboot_lock_device(fastboot_serial,
fastboot_path=None,
timeout=FASTBOOT_TIMEOUT):
"""Lock the device through fastboot.
Args:
fastboot_serial (str): Device serial number
fastboot_path (str): optional alternative path to fastboot executable
timeout (float): in seconds to wait for fastboot command to return
Returns:
str: response from fastboot command
"""
return _fastboot_command(("flashing", "lock"),
fastboot_serial=fastboot_serial,
fastboot_path=fastboot_path,
timeout=timeout)
def fastboot_check_is_unlocked(fastboot_serial: str,
fastboot_path: Optional[str] = None,
timeout: float = FASTBOOT_TIMEOUT) -> bool:
"""Checks if the device is unlocked.
Args:
fastboot_serial: Device serial number
fastboot_path: Optional alternative path to fastboot executable
timeout: In seconds to wait for fastboot command to return
Raises:
RuntimeError: If the response is not in the expected format.
Returns:
True if the device is unlocked, else False.
"""
raw_result = _fastboot_command(("getvar", "unlocked"),
fastboot_serial=fastboot_serial,
fastboot_path=fastboot_path,
timeout=timeout)
# Raw result would be like "unlocked: yes"
match = re.search(r"unlocked: (yes|no)", raw_result)
if not match:
raise RuntimeError(
f"Unknown output from fastboot getvar unlocked: {raw_result}")
return match.group(1) == "yes"
def fastboot_wipe_userdata(fastboot_serial,
fastboot_path=None,
timeout=FASTBOOT_TIMEOUT):
"""Wipe user data on the device through fastboot.
Args:
fastboot_serial (str): Device serial number
fastboot_path (str): optional alternative path to fastboot executable
timeout (float): in seconds to wait for fastboot command to return
Returns:
str: response from fastboot command
"""
return _fastboot_command(
"-w",
fastboot_serial=fastboot_serial,
fastboot_path=fastboot_path,
timeout=timeout)
def enter_sideload(adb_serial, adb_path=None, auto_reboot=False):
"""Enters sideload mode by calling 'adb reboot sideload' for the adb_serial provided.
Args:
adb_serial (str): Device serial number.
adb_path (str): optional alternative path to adb executable.
auto_reboot (bool): whether to auto reboot after sideload complete.
Raises:
RuntimeError: if adb_path is invalid or adb executable was not found by
get_adb_path.
Returns:
str: Output from command call.
"""
if auto_reboot:
command = ("reboot", "sideload-auto-reboot")
else:
command = ("reboot", "sideload")
return _adb_command(command, adb_serial=adb_serial, adb_path=adb_path)
def sideload_package(package_path, adb_serial, adb_path=None):
"""Perform "adb sideload <package>" command.
Args:
package_path (str): the path of the package to sideload.
adb_serial (str): Device serial number.
adb_path (str): optional alternative path to adb executable.
Returns:
str: the command output.
Raises:
RuntimeError: if package_path is invalid.
"""
if not os.path.isfile(package_path):
raise RuntimeError(
"sideload_package failed: {} is not a file.".format(package_path))
return _adb_command(("sideload", package_path),
adb_serial=adb_serial,
adb_path=adb_path)
def adb_devices(
adb_path: Optional[str] = None,
state: Optional[AdbDeviceState] = None
) -> List[Tuple[str, AdbDeviceState]]:
"""Returns parsed output of 'adb devices'.
Args:
adb_path: Optional alternative path to the 'adb' executable. Defaults to the
path returned by get_adb_path().
state: If provided, only include devices in the given ADB state.
Returns:
List of (device identifier, device ADB state) tuples found in
'adb devices'. The device identifiers are either serial numbers
("abcde123") or IP addresses and ports ("12.34.56.78:5555").
"""
try:
output = _adb_command("devices", adb_path=adb_path)
except RuntimeError as err:
logger.warning(repr(err))
return []
output_lines = output.splitlines()
output_start_marker = "List of devices attached"
if output_start_marker not in output_lines:
return []
output_start_index = output_lines.index(output_start_marker) + 1
device_lines = [line for line in output_lines[output_start_index:]
if line and "\t" in line]
identifiers_and_states = []
for device_line in device_lines:
identifier, _, device_state_str = device_line.partition("\t")
if "no permissions" in device_state_str:
# Reasons and URLs vary: "no permissions (<reason>); see [<url>]".
device_state = AdbDeviceState.NO_PERMISSIONS
else:
try:
device_state = AdbDeviceState(device_state_str)
except ValueError as e:
logger.debug(
f"Failed to parse ADB state {device_state_str!r}. Error: {e!r}")
device_state = AdbDeviceState.UNRECOGNIZED
if state is None or state == device_state:
identifiers_and_states.append((identifier, device_state))
return identifiers_and_states
def get_adb_devices(adb_path: Optional[str] = None) -> List[str]:
"""Returns ADB device identifiers of available ("device") ADB devices.
Args:
adb_path: Optional alternative path to the 'adb' executable. Defaults to the
path returned by get_adb_path().
Returns:
Returns ADB device identifiers of available ("device") ADB devices. The
device identifiers are either serial numbers ("abcde123") or IP addresses
and ports ("12.34.56.78:5555").
"""
return [identifier
for identifier, _ in adb_devices(adb_path, AdbDeviceState.DEVICE)]
def get_sideload_devices(adb_path: Optional[str] = None) -> List[str]:
"""Returns ADB device identifiers of devices in "sideload" mode.
Args:
adb_path: Optional alternative path to the 'adb' executable. Defaults to
the path returned by get_adb_path().
Returns:
Returns ADB device identifiers of devices in "sideload" mode. The device
identifiers are either serial numbers ("abcde123") or IP addresses and ports
("12.34.56.78:5555").
"""
return [identifier
for identifier, _ in adb_devices(adb_path, AdbDeviceState.SIDELOAD)]
def is_adb_mode(adb_identifier: str, adb_path: Optional[str] = None) -> bool:
"""Returns whether the ADB identifier is shown as a "device" in 'adb devices'.
Args:
adb_identifier: Device ADB identifier, a serial number ("abcde123") or an IP
address and a port number ("12.34.56.78:5555").
adb_path: An optional alternative path to the 'adb' executable. If not
provided, the path returned by get_adb_path() is used instead.
"""
is_available = adb_identifier in get_adb_devices(adb_path=adb_path)
if not re.search(host_utils.IP_ADDRESS, adb_identifier):
return is_available
ip_address = adb_identifier.split(":")[0] # Remove the port number.
# Devices connected through ADB over IP can show up as available ("device") in
# 'adb devices' even if the device is offline.
return is_available and host_utils.is_pingable(ip_address)
def is_sideload_mode(
adb_identifier: str, adb_path: Optional[str] = None) -> bool:
"""Returns True if the adb_identifier is in sideload mode.
Args:
adb_identifier: Device ADB identifier, a serial number ("abcde123") or an IP
address and a port number ("12.34.56.78:5555").
adb_path: An optional alternative path to the 'adb' executable. If not
provided, the path returned by get_adb_path() is used instead.
"""
return adb_identifier in get_sideload_devices(adb_path=adb_path)
def get_adb_path(adb_path=None):
"""Returns the correct adb path to use.
Args:
adb_path (str): path to "adb" executable.
Notes: Starts with passed in path, then looks at config, and finally
system's default adb if available.
Raises:
RuntimeError: if no valid adb path could be found
Returns:
str: Path to correct adb executable to use.
"""
if is_valid_path(adb_path):
return adb_path
try:
with open(config.DEFAULT_GDM_CONFIG_FILE) as config_file:
gdm_config = json.load(config_file)
adb_path = gdm_config[config.ADB_BIN_PATH_CONFIG]
except (IOError, KeyError, ValueError):
pass
if is_valid_path(adb_path):
return adb_path
elif adb_path:
logger.warning("adb path {!r} stored in {} does not exist."
.format(adb_path, config.DEFAULT_GDM_CONFIG_FILE))
if host_utils.has_command("adb"):
return host_utils.get_command_path("adb")
raise RuntimeError("No valid adb path found using 'which adb'")
def is_valid_path(path):
return path and os.path.exists(path)
def get_adb_over_ip_identifier(
adb_identifier: str, port: int = DEFAULT_PORT) -> str:
"""Returns an ADB over IP identifier in the format 'IP_address:port'.
If the provided adb_identifier does not contain a port number, the default
port (":5555") is appended to the identifier.
Args:
adb_identifier: IP address or IP address and a port number.
port: TCP port number.
Raises:
ValueError: if adb_identifier is not an IP address.
"""
if not re.search(host_utils.IP_ADDRESS, adb_identifier):
raise ValueError(f"ADB identifier {adb_identifier!r} is not an IP address.")
if ":" not in adb_identifier:
adb_identifier = f"{adb_identifier}:{port}"
return adb_identifier
def _is_connect_successful(output_and_return_code: Tuple[str, int]) -> bool:
"""Returns whether the connect() call was successful.
Args:
output_and_return_code: Output and return code of 'adb connect' command.
Returns:
True if the return code of 'adb connect' is 0 and there are no failure
markers in the command output. Note that 'adb connect' can return a 0 exit
code even when it fails, which is why we have to check for failure markers.
"""
output, return_code = output_and_return_code
failure_markers = [
"failed to connect",
"failed to resolve host",
"missing port in specification",
"unable to connect",
]
return (return_code == 0 and
all(marker not in output for marker in failure_markers))
def connect(adb_identifier: str, attempts: int = 3) -> str:
"""Connects to the device via ADB and returns the command output.
Args:
adb_identifier: IP address ("12.34.56.78") or IP address and a port number
("12.34.56.78:5555"). If a port number is not provided, defaults to 5555.
attempts: Number of attempts for performing 'adb connect'.
Raises:
DeviceError: if 'adb connect' fails, or adb_identifier is not found in
'adb devices' after 'adb connect'.
Returns:
Output of the 'adb connect' command.
"""
adb_identifier = get_adb_over_ip_identifier(adb_identifier)
retry_interval_s = 3
try:
output, _ = retry.retry(
func=_adb_command,
func_kwargs={
"command": ["connect", adb_identifier],
"include_return_code": True,
},
is_successful=_is_connect_successful,
timeout=retry_interval_s * attempts,
interval=retry_interval_s,
reraise=False)
except errors.CommunicationTimeoutError as e:
raise errors.DeviceError(
f"Unable to connect to device {adb_identifier!r} via ADB. Error: {e!r}")
try:
retry.retry(
func=is_adb_mode,
func_args=[adb_identifier],
is_successful=bool,
timeout=retry_interval_s * attempts,
interval=retry_interval_s)
except errors.CommunicationTimeoutError as e:
raise errors.DeviceError(
f"{adb_identifier!r} was not found in 'adb devices' after "
f"'adb connect'. Error: {e!r}")
return output
def _is_disconnected(adb_identifier: str) -> bool:
"""Returns True if adb_identifier is not present in 'adb devices'."""
return adb_identifier not in [adb_id for adb_id, _ in adb_devices()]
def disconnect(adb_identifier: str, attempts: int = 3) -> str:
"""Disconnects ADB from the device and returns the command output.
Args:
adb_identifier: IP address ("12.34.56.78") or IP address and a port number
("12.34.56.78:5555"). If a port number is not provided, defaults to 5555.
attempts: Number of attempts for performing 'adb disconnect'.
Raises:
DeviceError: if 'adb disconnect' fails, or if adb_identifier is still
present in 'adb devices' after 'adb disconnect'.
Returns:
Output of the 'adb disconnect' command.
"""
adb_identifier = get_adb_over_ip_identifier(adb_identifier)
retry_interval_s = 3
try:
output, _ = retry.retry(
func=_adb_command,
func_kwargs={
"command": ["disconnect", adb_identifier],
"include_return_code": True,
},
is_successful=lambda output_and_code: output_and_code[1] == 0,
timeout=retry_interval_s * attempts,
interval=retry_interval_s,
reraise=False)
except errors.CommunicationTimeoutError as e:
raise errors.DeviceError(
f"Unable to disconnect ADB from device {adb_identifier!r}. "
f"Error: {e!r}")
try:
retry.retry(
func=_is_disconnected,
func_kwargs={"adb_identifier": adb_identifier},
is_successful=bool,
timeout=retry_interval_s * attempts,
interval=retry_interval_s)
except errors.CommunicationTimeoutError as e:
raise errors.DeviceError(
f"{adb_identifier!r} was still found in 'adb devices' after "
f"'adb connect'. Error: {e!r}")
return output
def shell(adb_serial: str,
command: str,
adb_path: Optional[str] = None,
timeout: Optional[int] = None,
retries: int = 1,
include_return_code: bool = False) -> Any:
"""Issues a command to the shell of the adb_serial provided.
Args:
adb_serial: Device serial number.
command: Command to send.
adb_path: Optional alternative path to adb executable.
timeout: Time in seconds to wait for adb process to complete.
retries: Number of times to retry adb command.
include_return_code: A flag indicating return code should also be
returned.
Returns:
Response string if include_return_code is False; (response, return code)
tuple otherwise.
"""
return _adb_command(["shell", command], adb_serial,
adb_path=adb_path, timeout=timeout, retries=retries,
include_return_code=include_return_code)
def get_fastboot_devices(fastboot_path=None):
"""Returns list of ADB devices in fastboot (bootloader) mode.
Args:
fastboot_path (str): optional alternative path to fastboot executable
Returns:
list: A list of ADB device serial numbers in fastboot mode.
Note:
If fastboot_path is not provided then path returned by get_fastboot_path
will be used instead.
If fastboot path invalid, will return empty list.
"""
try:
fastboot_path = get_fastboot_path(fastboot_path)
except RuntimeError as err:
logger.warning(repr(err))
return []
try:
output = subprocess.check_output((fastboot_path, "devices"),
stderr=subprocess.STDOUT)
output = output.decode("utf-8", "replace")
device_lines = [x for x in output.splitlines() if "astboot" in x]
return [x.split()[0] for x in device_lines]
except subprocess.CalledProcessError:
return []
def get_fastboot_path(fastboot_path=None):
"""Returns the fastboot executable path to use.
Args:
fastboot_path (str): path to "fastboot" executable.
Raises:
RuntimeError: if no valid fastboot executable could be found
Returns:
str: Path to correct fastboot executable to use.
"""
if is_valid_path(fastboot_path):
return fastboot_path
if host_utils.has_command("fastboot"):
return host_utils.get_command_path("fastboot")
raise RuntimeError("No valid fastboot path found using 'which fastboot'")
def is_device_online(adb_serial, adb_path=None, fastboot_path=None):
"""Returns true if the device appears in either 'adb devices' or 'fastboot devices'.
Args:
adb_serial (str): Device serial number.
adb_path (str): optional alternative path to adb executable
fastboot_path (str): optional alternative path to fastboot executable
Returns:
bool: True if device is in adb or fastboot mode. False otherwise.
Note:
If adb_path is not provided then path returned by get_adb_path will be
used instead. If fastboot_path is not provided then path returned by
get_fastboot_path will be used instead.
"""
return (is_adb_mode(adb_serial, adb_path=adb_path) or
is_fastboot_mode(adb_serial, fastboot_path=fastboot_path))
def is_fastboot_mode(adb_serial, fastboot_path=None):
"""Checks if device is in fastboot mode.
Args:
adb_serial (str): Device serial number.
fastboot_path (str): optional alternative path to fastboot executable
Raises:
RuntimeError: if fastboot_path is invalid or fastboot executable was
not found by get_fastboot_path.
Returns:
bool: True if device is in fastboot mode. False otherwise.
Note:
If fastboot_path is not provided then path returned by get_fastboot_path
will be used instead.
"""
return adb_serial in get_fastboot_devices(fastboot_path=fastboot_path)
def pull_from_device(adb_serial, sources, destination_path="./", adb_path=None):
"""Pulls sources from device to destination_path on host for adb_serial provided.
Args:
adb_serial (str): Device serial number.
sources (str or list): Path to one or more source files on device to
copy to host.
destination_path (str): Path to destination on host computer where file
should copied to.
adb_path (str): optional alternative path to adb executable
Raises:
RuntimeError: if adb_path is invalid or adb executable was not found by
get_adb_path or push failed.
ValueError: if destination_path directory doesn't exist.
Returns:
str: Output from calling 'adb push' or None if call raises an erroro
return code.
Note:
If adb_path is not provided then path returned by get_adb_path will be
used instead. If adb returns a non-zero return code then None will be
returned. If no destination_path is provided the file will be copied to
the current working directory on the host computer.
"""
destination_dir = os.path.dirname(destination_path)
if destination_dir != "." and not os.path.exists(destination_dir):
raise ValueError(
"The destination_path directory {} appears to be invalid.".format(
destination_dir))
args = ["pull"]
if isinstance(sources, list):
for source_path in sources:
args.append(source_path)
else:
args.append(sources)
args.append(destination_path)
output, returncode = _adb_command(
args, adb_serial, adb_path=adb_path, include_return_code=True)
if returncode != 0:
raise RuntimeError("Pulling file(s) {} on ADB device {} to {} failed. "
"Error: {!r}".format(sources, adb_serial,
destination_path, output))
return output
def push_to_device(adb_serial, sources, destination_path, adb_path=None):
"""Pushes sources to destination_path on device for adb_serial provided.
Args:
adb_serial (str): Device serial number.
sources (str or list): Path to one or more source files on host computer
to copy to device.
destination_path (str): Path to destination on device where file should
copied to.
adb_path (str): optional alternative path to adb executable
Raises:
RuntimeError: if adb_path is invalid or adb executable was not found by
get_adb_path or push failed.
ValueError: if source_path doesn't exist.
Returns:
str: Output from calling 'adb push' or None if call raises an erroro
return code.
Note:
If adb_path is not provided then path returned by get_adb_path will be
used instead. If adb returns a non-zero return code then None will be
returned.
"""
args = ["push"]
if isinstance(sources, list):
for source_path in sources:
args.append(source_path)
if not os.path.exists(source_path):
raise ValueError(
"The source file {} appears to be invalid.".format(source_path))
else:
args.append(sources)
if not os.path.exists(sources):
raise ValueError(
"The source file {} appears to be invalid.".format(sources))
args.append(destination_path)
output, returncode = _adb_command(
args, adb_serial, adb_path=adb_path, include_return_code=True)
if returncode != 0:
raise RuntimeError("Pushing file(s) {} to {} on ADB device {} failed. "
"Error: {!r}".format(sources, destination_path,
adb_serial, output))
return output
def reboot_device(adb_serial, adb_path=None, retries=1):
"""Calls 'adb reboot' for the adb_serial provided using adb_path.
Args:
adb_serial (str): Device serial number.
adb_path (str): optional alternative path to adb executables
retries (int): number of times to retry adb command.
Raises:
RuntimeError: if adb_path is invalid or adb executable was not found by
get_adb_path.
Returns:
str: Output from calling 'adb reboot'.
Note:
If adb_path is not provided then path returned by get_adb_path will be
used instead. If adb returns a non-zero return code then None will be
returned.
"""
return _adb_command("reboot", adb_serial, adb_path=adb_path, retries=retries)
def root_device(adb_serial, adb_path=None):
"""Calls 'adb root' for the adb_serial provided using adb_path.
Args:
adb_serial (str): Device serial number.
adb_path (str): optional alternative path to adb executable
Raises:
RuntimeError: if adb_path is invalid or adb executable was not found by
get_adb_path.
Returns:
str: Output from calling 'adb root'.
Note:
If adb_path is not provided then path returned by get_adb_path will be
used instead. If adb returns a non-zero return code then None will be
returned.
"""
return _adb_command("root", adb_serial, adb_path=adb_path)
def verify_user_has_fastboot(device_name):
"""Verifies fastboot available and user is root or in plugdev group.
Args:
device_name (str): Device name to use in error output.
Raises:
DeviceError: Fastboot is not on computer OR
'plugdev' group doesn't exist OR
current user is not in the 'plugdev' group.
"""
if not host_utils.has_command("fastboot"):
raise errors.DeviceError("Device {} verify user has fastboot failed. "
"Fastboot executable is not installed. "
"See readme about installing adb (which installs "
"fastboot) then su -$USER (or logout and back in) "
"to add user to plugdev group".format(device_name))
def wait_for_device(adb_identifier: str, timeout: float) -> None:
"""Waits until the device is detected by ADB.
Args:
adb_identifier: ADB device identifier, e.g. serial number or IP address.
timeout: Time in seconds to wait before giving up.
Raises:
CommunicationTimeoutError: Timed out waiting for device detection.
"""
_, return_code = _adb_command(command="wait-for-device",
adb_serial=adb_identifier,
timeout=timeout,
include_return_code=True)
if return_code != 0:
raise errors.CommunicationTimeoutError(
f"Timeout waiting for device {adb_identifier} after {timeout} seconds")
def wait_for_device_offline(adb_identifier: str, timeout: float = 20,
check_interval: float = 1) -> None:
"""Waits until the device is not seen via adb, e.g. reboot started.
Args:
adb_identifier: ADB device identifier, e.g. serial number or IP address.
timeout: Time in seconds to wait before raising timeout.
check_interval: Interval between checks to see if device is offline.
Raises:
CommunicationTimeoutError: Timed out waiting for device to go offline.
"""
end_time = time.time() + timeout
while is_adb_mode(adb_identifier):
if time.time() > end_time:
raise errors.CommunicationTimeoutError(
f"Device {adb_identifier} did not go offline in {timeout} seconds.")
time.sleep(check_interval)
def _adb_command(command,
adb_serial=None,
adb_path=None,
include_return_code=False,
timeout=None,
retries=1):
"""Returns the output of the adb command and optionally the return code.
Args:
command (str or tuple): ADB command and optionally arguments to execute.
adb_serial (str): Device serial number
adb_path (str): optional alternative path to adb executable
include_return_code (bool): flag indicating return code should also be
returned.
timeout (int): time in seconds to wait for adb process to complete.
retries (int): number of times to retry adb command.
Raises:
RuntimeError: if adb_path provided or obtained from get_adb_path is
invalid (executable at path doesn't exist).
Returns:
str: The ADB command output (including stderr)
tuple: The ADB command output (including stderr) and return code
Note:
The stderr is redirected to stdout so callers should use the return code
or search the output for known errors if they want to determine if the
command succeeded or not.
"""
adb_path = get_adb_path(adb_path)
if adb_serial is None:
args = [adb_path]
else:
args = [adb_path, "-s", adb_serial]
if isinstance(command, str):
args.append(command)
elif isinstance(command, (list, tuple)):
args.extend(command)
for i in range(0, retries):
proc = subprocess.Popen(
args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
try:
output, _ = proc.communicate(timeout=timeout)
except subprocess.TimeoutExpired:
proc.terminate()
output, _ = proc.communicate()
output = output.decode("utf-8", "replace")
logger.debug("adb command {!r} to {} returned {!r}".format(
command, adb_serial, output))
if include_return_code:
return output, proc.returncode
adb_failure_messages = ["error: closed", "adb: device offline"]
if not any(msg in output for msg in adb_failure_messages):
return output
if i < retries - 1:
logger.info(f"Retrying adb command: {command} in {ADB_RETRY_SLEEP}s")
time.sleep(ADB_RETRY_SLEEP)
raise errors.DeviceError(
f"ADB command failed: {command} with output: {output}")
def _fastboot_command(command,
fastboot_serial=None,
fastboot_path=None,
include_return_code=False,
timeout=FASTBOOT_TIMEOUT):
"""Returns the output of the fastboot command and optionally the return code.
Args:
command (str or tuple): fastboot command and optionally arguments to
execute.
fastboot_serial (str): Device fastboot serial number.
fastboot_path (str): optional alternative path to fastboot executable
include_return_code (bool): flag indicating return code should also be
returned.
timeout (float): in seconds to wait for fastboot command to return
Raises:
RuntimeError: if fastboot_path provided or obtained from
get_fastboot_path is invalid (executable at path doesn't exist).
Returns:
str: The fastboot command output (including stderr)
tuple: The fastboot command output (including stderr) and return code
Note:
The stderr is redirected to stdout so callers should use the return code
or search the output for known errors if they want to determine if the
command succeeded or not.
"""
if fastboot_path is None:
fastboot_path = get_fastboot_path()
if not os.path.exists(fastboot_path):
raise RuntimeError(
"The fastboot_path of {} appears to be invalid.".format(fastboot_path))
if fastboot_serial is None:
args = ["timeout", str(timeout), fastboot_path]
else:
args = ["timeout", str(timeout), fastboot_path, "-s", fastboot_serial]
if isinstance(command, str):
args.append(command)
elif isinstance(command, (list, tuple)):
args.extend(command)
proc = subprocess.Popen(
args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output, _ = proc.communicate()
output = output.decode("utf-8", "replace")
if include_return_code:
return output, proc.returncode
return output
def install_package_on_device(package_path: str,
adb_serial: Optional[str] = None,
adb_path: Optional[str] = None,
allow_downgrade: bool = False,
allow_test_apk: bool = False,
reinstall: bool = False,
all_permissions: bool = False) -> None:
"""Installs an apk on a target device.
Use adb install command to install a package to the system.
The options are subjected to the adb install command. See the doc.
https://developer.android.com/studio/command-line/adb#shellcommands
Args:
package_path: The path to the package on host machine.
adb_serial: The device serial, optional.
adb_path: An optional alternative path to adb executable.
allow_downgrade: Allows version code downgrade.
allow_test_apk: Allows test APKs to be installed.
reinstall: Reinstalls an existing app and keeps its data.
all_permissions: Grants all runtime permission to the app.
Raises:
ValueError: when package_path is not valid.
DeviceError: when installation failed.
"""
if not os.path.exists(package_path):
raise ValueError(
"install_package_on_device received invalid package_path: {}".format(
package_path))
flags_map = {
"-d": allow_downgrade,
"-t": allow_test_apk,
"-r": reinstall,
"-g": all_permissions,
}
command_list = ["install"]
flags = sorted([flag for flag, value in flags_map.items() if value])
command_list.extend(flags)
command_list.append(package_path)
response = _adb_command(
tuple(command_list), adb_serial=adb_serial, adb_path=adb_path)
if "Success\n" not in response:
raise errors.DeviceError(
"install_package_on_device failed: {}".format(response))
def uninstall_package_on_device(package_name, adb_serial=None, adb_path=None):
"""Uninstall a package on a target device.
Args:
package_name (str): the name of the package, e.g.,
"com.google.android.apps.somepackage.someapp".
adb_serial (str): the device serial, optional.
adb_path (str): optional alternative path to adb executable.
Raises:
DeviceError: when uninstall failed.
"""
response = _adb_command(("uninstall", package_name),
adb_serial=adb_serial,
adb_path=adb_path)
if "Success\n" not in response:
raise errors.DeviceError("uninstall_package_on_device failed.")
def add_port_forwarding(host_port: int,
device_port: int,
adb_serial: Optional[str] = None,
adb_path: Optional[str] = None) -> str:
"""Forwards the socket connection from host to device.
Args:
host_port: The port on the host side.
device_port: The port on the device side.
adb_serial: The device serial, optional.
adb_path: Alternative path to adb executable.
Raises:
RuntimeError: If the port forwarding doesn't exist.
Returns:
The command output.
"""
commands = ("forward", f"tcp:{host_port}", f"tcp:{device_port}")
output, returncode = _adb_command(commands,
adb_serial=adb_serial,
adb_path=adb_path,
include_return_code=True)
if returncode != 0:
raise RuntimeError(
f"Failed to add port forwarding on device serial:{adb_serial} from "
f"host port {host_port} to device port {device_port}. {output}")
return output
def remove_port_forwarding(host_port: int,
adb_serial: Optional[str] = None,
adb_path: Optional[str] = None) -> str:
"""Removes a forward socket connection.
Args:
host_port: The port on the host side.
adb_serial: The device serial, optional.
adb_path: Alternative path to adb executable.
Raises:
RuntimeError: If the port forwarding doesn't exist.
Returns:
The command output.
"""
commands = ("forward", "--remove", f"tcp:{host_port}")
output, returncode = _adb_command(commands,
adb_serial=adb_serial,
adb_path=adb_path,
include_return_code=True)
if returncode != 0:
raise RuntimeError(
f"Failed to remove port forwarding on device serial: {adb_serial} "
f"on host port {host_port}. {output}")
return output
def tcpip(adb_serial: Optional[str] = None,
port: Optional[int] = DEFAULT_PORT,
adb_path: Optional[str] = None) -> str:
"""Restarts adbd listening on the provided TCP port for the adb_serial.
Args:
adb_serial: ADB serial number.
port: TCP port to listen on.
adb_path: Alternative path to 'adb' executable.
Raises:
RuntimeError: If the command fails.
Returns:
The command output.
"""
output, return_code = _adb_command(
["tcpip", str(port)],
adb_serial=adb_serial,
adb_path=adb_path,
include_return_code=True)
if return_code != 0:
raise RuntimeError(
f"ADB failed to start listening on port {port} for ADB serial "
f"{adb_serial}. Return code: {return_code}, output: {output}.")
return output
| |
import py
from prolog.interpreter.parsing import TermBuilder
from prolog.interpreter.parsing import parse_query_term, get_engine
from prolog.interpreter.error import UnificationFailed
from prolog.interpreter.continuation import Heap, Engine
from prolog.interpreter import error
from prolog.interpreter.test.tool import collect_all, assert_false, assert_true
from prolog.interpreter.test.tool import prolog_raises
def test_or():
assert_false("fail;fail.")
e = get_engine("""
f(X, Y) :-
( fail
; X \== Y
).
""")
assert_false("f(X,X).", e)
def test_fail():
e = get_engine("""
g(a).
f(X) :- g(X), fail.
f(a).
""")
heaps = collect_all(e, "f(X).")
assert len(heaps) == 1
def test_not():
assert_true("not(fail).")
assert_false("not(true).")
e = get_engine("""
g(a, a).
g(b, a).
g(b, b).
m(o, a).
m(o, b).
m(o, c).
same(X, X).
sibling(X, Y) :- m(Z, X), m(Z, Y), \\+same(X, Y).
""")
assert_true("not(g(b, c)).", e)
assert_false("not(g(a, a)).", e)
assert_true("\\+(g(b, c)).", e)
assert_false("\\+(g(a, a)).", e)
heaps = collect_all(e, "sibling(a, X).")
assert len(heaps) == 2
def test_and():
assert_false("fail, X.")
prolog_raises("type_error(callable, 1)", "(fail, 1)")
def test_nonvar():
e = get_engine("""
g(X) :- nonvar(X).
g(x, X) :- nonvar(x), nonvar(X).
f(X, Y) :- nonvar(X), nonvar(Y).
""")
assert_true("g(a).", e)
assert_false("g(X).", e)
assert_true("g(x).", e)
assert_true("g(x, a).", e)
assert_true("g(X, X).", e)
assert_false("f(X, X).", e)
def test_consult():
p = py.test.ensuretemp("prolog")
f = p.join("test.pl")
f.write("g(a, a). g(a, b).")
e = get_engine("g(c, c).")
assert_true("g(c, c).", e)
assert_true("consult('%s')." % (f, ), e)
assert_true("g(c, c).", e)
assert_true("g(a, a).", e)
assert_true("g(a, b).", e)
prolog_raises("_", "consult('/hopefully/does/not/exist')")
def test_assert_retract():
e = get_engine("g(b, b).")
assert_true("g(B, B).", e)
assert_true("assert(g(c, d)).", e)
assert_true("assert(g(a, b)).", e)
assert_true("assert(g(a, b)).", e) # assert the same rule multiple times
assert_true("g(B, B).", e)
assert_true("g(a, b).", e)
assert_true("g(c, d).", e)
assert_true("retract(g(B, B)).", e)
assert_false("g(B, B).", e)
assert_true("retract(g(a, b)).", e)
assert_true("g(a, b).", e)
assert_true("retract(g(a, b)).", e)
assert_false("retract(g(a, b)).", e)
assert_false("g(a, b).", e)
assert_true("g(c, d).", e)
e = get_engine("""
g(b, b).
f(X) :- g(X, b).
f(a).
""")
assert_true("f(b).", e)
assert_true("f(a).", e)
assert_true("retract(f(X) :- g(X, Y)), Y == b.", e)
assert_false("f(b).", e)
assert_true("f(a).", e)
prolog_raises("permission_error(X, Y, Z)", "retract(atom(X))")
def test_assert_at_right_end():
e = get_engine("g(b, b). f(b, b). h(b, b).")
assert_true("assert(g(a, a)).", e)
assert_true("assertz(f(a, a)).", e)
assert_true("A = a, asserta(h(A, A)).", e)
f = assert_true("g(B, B).", e)
assert f['B'].name()== "b"
f = assert_true("f(B, B).", e)
assert f['B'].name()== "b"
assert_false("h(c, c).", e)
f = assert_true("h(B, B).", e)
assert f['B'].name()== "a"
def test_assert_logical_update_view():
e = get_engine("""
g(a).
g(c) :- assertz(g(d)).
g(b).
""")
heaps = collect_all(e, "g(X).")
assert len(heaps) == 3
e = get_engine("""
p :- assertz(p), fail.
p :- fail.
""")
assert_false("p.", e)
e = get_engine("""
q :- fail.
q :- assertz(q), fail.
""")
assert_false("q.", e)
def test_assert_retract_colon():
e = get_engine("""
:(1, 2, 3).
:(a).
""")
assert_true(":(1, 2, 3), :(a).", e)
assert_true("assert(:(a, b, c, d)).", e)
assert_true(":(a, b, c, d).", e)
assert_true("retract(:(a, b, c, d)).", e)
prolog_raises("existence_error(_, _)", ":(a, b, c, d)", e)
def test_abolish_colon():
e = get_engine("""
:(a).
:(1, 2, 3).
""")
assert_true("abolish(:/1).", e)
prolog_raises("existence_error(_, _)", ":(a)", e)
assert_true(":(1, 2, 3).", e)
assert_true("abolish(:/3).", e)
prolog_raises("existence_error(_, _)", ":(1, 2, 3)", e)
def test_retract_logical_update_view():
e = get_engine("""
p :- retract(p :- true), fail.
p :- true.
""")
assert_true("p.", e)
assert_false("p.", e)
def test_abolish():
e = get_engine("g(b, b). g(c, c). g(a). f(b, b). h(b, b).")
assert_true("abolish(g/2).", e)
assert_true("g(a).", e)
prolog_raises("existence_error(X, Y)", "g(A, B)", e)
prolog_raises("type_error(predicate_indicator, a)", "abolish(a)", e)
def test_unify():
assert_true("g(b, B) = g(b, b).")
assert_true("X = Y.")
assert_true("X = f(X).")
assert_false("g(b, B) \\= g(b, b).")
assert_false("X \\= Y.")
assert_false("X \\= f(X).")
assert_true("x \\= y.")
assert_true("f(X, b) \\= f(a, c), X = c.")
assert_true("unify_with_occurs_check(X, Y).")
assert_true("unify_with_occurs_check(X, X).")
assert_false("unify_with_occurs_check(X, f(X)).")
assert_false("unify_with_occurs_check(X, f(g(h(a, b, c, d(X, e), e)))).")
assert_false("unify_with_occurs_check(g(X), X).")
assert_false("X = Y, unify_with_occurs_check(X, f(d(Y), Y)).")
def test_call():
e = get_engine("g(b, b).")
assert_true("call(g(X, X)).", e)
assert_true("X =.. [g, b, b], call(X).", e)
e = get_engine("""
g(X) :- call(f(X)).
g(a).
g(b).
f(X) :- !, h(X).
f(a).
f(b).
h(X) :- fail.
withcut(X) :- call(!), fail.
withcut(a).
""")
heaps = collect_all(e, "g(X).")
assert len(heaps) == 2
assert_true("withcut(a).", e)
assert_true("call((!, true)).")
def test_cut():
e = get_engine("""
f(0).
f(X) :- Y is X - 1, !, f(Y).
f(X) :- Y is X - 2, !, f(Y).
""")
assert_true("f(20).", e)
def test_cut_with_throw():
e = get_engine("""
raise_if_var(X) :-
var(X), !, throw(unbound).
raise_if_var(X) :- X = a.
c(X, Y) :- catch((raise_if_var(X), Y = b), E, Y = a).
""")
assert_true("c(_, Y), Y == a.", e)
def test_cut_with_throw_direct():
e = get_engine("""
c(X, Y) :- catch(((X = a; X = b), !, X = b, Y = b), E, Y = a); X = c.
""")
assert_true("c(X, Y), X == c.", e)
def test_call_cut():
e = get_engine("""
f(X) :- call(X).
f(!).
""")
heaps = collect_all(e, "f(!).")
assert len(heaps) == 2
assert_true("call(((X = a; X = b), !, X = b)); X = c.")
assert_false("(((X = a; X = b), !, X = b)); X = c.")
def test_bug_or_exposing_problem_of_cyclic_term_support():
e = get_engine("""
f(X) :- (X = 1; X = 2), X = 2.
""")
assert_true("f(X).", e)
def test_or_and_call_with_cut():
e = get_engine("""
f :- (!, fail); true.
g :- (call(!), fail); true.
""")
assert_false("f.", e)
assert_true("g.", e)
def test_or_with_cut():
e = get_engine("""
f(X) :- ((X = 1, !); X = 2), X = 2.
g(X) :- ((X = 1, !); X = 2), X = 1.
""")
assert_false("f(X).", e)
assert_true("g(X).", e)
def test_cut1():
e = get_engine("""
g(a).
g(b).
a(a).
b(b).
f(X) :- g(X),!,b(X).
f(x).
f(y).
""")
heaps = collect_all(e, "f(X).")
assert len(heaps) == 0
assert_true("!.")
def test_cut2():
e = get_engine("""
g(a).
g(b).
h(a, x).
h(a, y).
f(X, Y) :- g(X), !, !, !, !, !, h(X, Y).
""")
heaps = collect_all(e, "f(X, Y).")
assert len(heaps) == 2
def test_cut3():
e = get_engine("""
member(H, [H | _]).
member(H, [_ | T]) :- member(H, T).
s(a, L) :- !, fail.
s(b, L).
s(X, L) :-
member(Y, L),
L = [_| S],
s(Y, S).
""")
# import pdb; pdb.set_trace()
assert_true("s(d, [a, b]).", e)
def test_rule_with_cut_calling_rule_with_cut():
e = get_engine("""
e(a).
e(b).
f(b) :- e(_), !.
f(c).
g(X) :- f(X), !.
g(a).
""")
heaps = collect_all(e, "g(X).")
assert len(heaps) == 1
def test_not_with_cut():
assert_true("not((!, fail)).")
assert_false("not(!).")
e = get_engine("""
p1 :- \\+ q1.
q1 :- fail.
q1 :- true.
p2:- \\+ q2.
q2 :- !, fail.
q2 :- true.
""")
assert_false("p1.", e)
assert_true("p2.", e)
def test_not_stops_cut():
e = get_engine("""
f(X) :- (X = a; X = b), not((!, fail)).
""")
assert_true("f(X), X = b.", e)
assert_true("not(((X = 1; X = 2), !, X=2)).", e)
def test_two_cuts():
e = get_engine("""
f(>, X) :- X > 0, !.
f(=, X) :- X = 0, !.
f(<, _).
""")
assert_true("f(X, 1), X = '>'.", e)
assert_true("f(X, 0), X = '='.", e)
assert_true("f(X, -1), X = '<'.", e)
def test_listify():
e = get_engine("""
listify(_X, _X) :-
(var(_X); atomic(_X)), !.
listify(_Expr, [_Op|_LArgs]) :-
functor(_Expr, _Op, N),
listify_list(1, N, _Expr, _LArgs).
listify_list(I, N, _, []) :- I>N, !.
listify_list(I, N, _Expr, [_LA|_LArgs]) :- I=<N, !,
arg(I, _Expr, _A),
listify(_A, _LA),
I1 is I+1,
listify_list(I1, N, _Expr, _LArgs).
""")
assert_true("listify(f(X), Y), Y = [f, X].", e)
assert_true("listify(f(X, g(1)), Y).", e)
assert_true("listify(f(X, 1, g(1)), Y), Y = [f, X, 1, [g, 1]].", e)
def test_univ():
assert_true("g(a, b, c) =.. [G, A, B, C].")
assert_true("g(a, b, c) =.. [g, a, b, c].")
assert_true("X =.. [g, a, b, c], X = g(a, b, c).")
assert_true("L = [a|X], X = [], Z =.. L, Z == a.")
assert_true("L = [X, 1, 2], X = a, Z =.. L, Z == a(1, 2).")
def test_arg():
assert_true("arg(1, g(a, b, c), a).")
assert_true("arg(2, g(a, b, c), b).")
assert_true("arg(3, g(a, b, c), c).")
assert_false("arg(3, g(a, b, c), d).")
assert_false("arg(0, g(a, b, c), X).")
assert_false("arg(10, g(a, b, c), X).")
assert_true("arg(1, g(a, b, c), X), X = a.")
assert_true("arg(2, f(a, b, c), X), X = b.")
assert_true("arg(3, h(a, b, c), X), X = c.")
e = get_engine("""
f(1, a).
f(2, b).
f(3, c).
""")
heaps = collect_all(e, "arg(X, g(a, b, c), A), f(X, A).")
assert len(heaps) == 3
assert_true("arg(X, h(a, b, c), b), X = 2.")
assert_true("arg(X, h(a, b, g(X, b)), g(3, B)), X = 3, B = b.")
assert_false("arg(X, a, Y).")
prolog_raises("_", "arg(X, 1, Y)")
def test_copy_term():
assert_true("copy_term(X, Y), X = 1, Y = 2.")
assert_true("copy_term(a, a).")
assert_false("copy_term(f(X), g(X)).")
assert_true("copy_term(f(X), f(a)), X = b.")
def test_type_checks():
assert_true("integer(123).")
assert_true("integer(1000000000000000000000000000000000000).")
assert_true("integer(-1000000000000000000000000000000000000).")
assert_false("integer(a).")
assert_false("integer(X).")
assert_true("float(123.12).")
assert_false("float(a).")
assert_false("float(12).")
assert_true("number(123).")
assert_true("number(1000000000000000000000000000000000000).")
assert_true("number(-1000000000000000000000000000000000000).")
assert_true("number(42.42).")
assert_false("number(abc).")
assert_false("integer(a).")
assert_false("integer(X).")
assert_true("var(X).")
assert_false("X = a, var(X).")
assert_true("compound(g(a)).")
assert_false("compound(gxx).")
assert_false("compound(123).")
assert_false("compound([]).")
assert_false("compound(X).")
assert_true("atom(a).")
assert_true("atom('asdf').")
assert_false("atom(12).")
assert_false("atom(X).")
assert_true("atomic('asdf').")
assert_true("atomic(12.5).")
assert_false("atomic(f(1, 2, 3)).")
assert_false("atomic(X).")
assert_false("callable(X).")
assert_false("callable(1).")
assert_true("callable(asdf).")
assert_true("callable(asdf(a, b, c, d, e, f)).")
assert_true("ground(a).")
assert_true("ground(t(a, b, f(a, b, g(a, b)))).")
assert_false("ground(t(a, b, f(a, b, g(a, X)))).")
assert_true("X = 13, ground(t(a, b, f(a, b, g(a, X)))).")
assert_false("ground(X).")
def test_repeat():
assert_true("repeat, true.")
e = Engine()
assert_false("repeat, !, fail.")
# hard to test repeat differently
e = get_engine('f :- repeat, !, fail.')
assert_false('f.', e)
assert_true('f; true.', e)
def test_exception_handling():
assert_true("catch(f, E, true).")
assert_true("catch(throw(error), E, true).")
prolog_raises("_", "catch(true, E, fail), f")
prolog_raises("_", "catch(throw(error(x)), error(failure), fail)")
assert_true("catch(catch(throw(error), failure, fail), error, true).")
assert_true("catch((X = y, throw(X)), E, E == y).")
def test_exception_forces_backtracking():
assert_true("catch((X = 1, throw(f(X))), Y, (var(X), Y == f(1))), var(X).")
def test_between():
assert_true("between(12, 15, 12).")
assert_true("between(-5, 15, 0).")
assert_false("between(12, 15, 6).")
assert_false("between(12, 15, 16).")
heaps = collect_all(Engine(), "between(1, 4, X).")
assert len(heaps) == 4
assert heaps[0]['X'].num == 1
assert heaps[-1]['X'].num == 4
def test_is():
assert_true("5 is 1 + 1 + 1 + 1 + 1.")
@py.test.mark.xfail
def test_parser_access():
assert_true("current_op(200, xfx, **).")
f = collect_all(Engine(), "current_op(200, Form, X).")
assert len(f) == 2
e = get_engine("""
foo(a, b).
""")
assert_true("op(450, xfy, foo).", e)
assert_true("a foo b.", e)
assert_true("op(0, xfy, foo).", e)
# XXX really a ParseError
py.test.raises(Exception, assert_false, "a foo b.", e)
# change precedence of + for funny results :-)
assert_true("14 is 2 + 3 * 4.", e)
assert_true("op(350, xfy, +).", e)
assert_true("20 is 2 + 3 * 4.", e)
assert_true("op(500, xfy, +).", e)
def test_functor():
assert_true("functor(f(a, b, c), f, 3).")
assert_true("functor(f(a, b, c), X, Y), X=f, Y=3.")
assert_true("functor(f, X, Y), X=f, Y=0.")
assert_true("functor(1, X, Y), X=1, Y=0.")
assert_true("functor(F, a, 0), F=a.")
assert_true("functor(F, 12, 0), F=12.")
assert_true("functor(F, 12.5, 0), F=12.5.")
assert_true("functor(F, f, 4), F=f(1, 2, 3, 4).")
assert_true("functor(F, g, 1), F=g(asdf).")
assert_true("functor(F, g, 3), F=g(X, Y, 1), X = 12, Y = 34, ground(F).")
def test_compare():
assert_true("X = Y, compare(R, f(X, Y, X, Y), f(X, X, Y, Y)), R == '='.")
assert_true("X = f(a), Y = f(b), compare(R, Y, X), R == '>'.")
def test_atom_length():
assert_true("atom_length('abc', 3).")
assert_true("atom_length('\\\\', 1).")
assert_true("atom_length('abc', X), X = 3.")
def test_atom_concat():
assert_true("atom_concat(ab, cdef, abcdef).")
assert_true("atom_concat(ab, cdef, X), X = abcdef.")
assert_true("atom_concat(ab, X, abcdef), X = cdef.")
assert_true("atom_concat(X, cdef, abcdef), X = ab.")
assert_true("atom_concat(1, Y, '1def'), Y = def.")
heaps = collect_all(
Engine(),
"atom_concat(X, Y, abcd), atom(X), atom(Y).")
assert len(heaps) == 5
@py.test.mark.xfail
def test_sub_atom():
assert_true("sub_atom(abc, B, L, A, bc), B=1, L=2, A=0.")
@py.test.mark.xfail
def test_sub_atom2():
assert_false("sub_atom(abc, B, 1, A, bc).")
@py.test.mark.xfail
def test_sub_atom3():
assert_true("sub_atom(abcabcabc, 3, 3, A, abc), A=3.")
@py.test.mark.xfail
def test_sub_atom4():
assert_true("sub_atom(abcabcabc, B, L, 3, abc), B=3, L=3.")
@py.test.mark.xfail
def test_sub_atom_with_non_var_sub():
assert_true("sub_atom(abcabc, Before, Length, After, a), Before=3, Length=1, After=2.")
assert_false("sub_atom(abcabc, Before, Length, After, b), Before==3, Length==1, After==2.")
@py.test.mark.xfail
def test_sub_atom_with_var_after():
assert_true("sub_atom(abcabd, 2, 1, After, Sub), After=3, Sub=c.")
assert_true("sub_atom(abcabc, Before, Length, After, Sub), Before=1, Length=3, After=2, Sub=bca.")
assert_false("sub_atom(abcabc, 1, 3, After, Sub), Sub=abc.")
@py.test.mark.xfail
def test_sub_atom_var_sub_and_non_var_after():
assert_true("sub_atom(abcabd, 2, 1, 3, Sub), Sub=c.")
assert_true("sub_atom(abcabc, Before, Length, 2, Sub), Before=1, Length=3, Sub=bca.")
assert_false("sub_atom(abcabc, 1, 3, 2, Sub), Sub=abc.")
def test_findall():
assert_true("findall(X, (X = a; X = b; X = c), L), L == [a, b, c].")
assert_true("findall(X + Y, (X = 1; X = 2), [1+A, 2+B]), A \== B.")
e = get_engine("""
app([], X, X).
app([H | T1], T2, [H | T3]) :-
app(T1, T2, T3).
""")
assert_true("findall(X+Y, app(X, Y, [1, 2, 3]), L), L == [[]+[1, 2, 3], [1]+[2, 3], [1, 2]+[3], [1, 2, 3]+[]].", e)
def test_findall_and_exception_bug():
prolog_raises("instantiation_error", "findall(1, 0 is X, _)")
def test_ifthenelse():
assert_false("true -> fail.")
assert_false("true -> fail ; true.")
assert_true("fail -> fail ; true.")
assert_true("fail -> true ; true.")
assert_true("(true -> fail ; true) ; true.")
e = get_engine("f(x). f(y). f(z).")
assert_false("f(c) -> true.", e)
assert_false("f(X) -> X \\= x; f(z).", e)
assert_true("f(X) -> X == x; f(z).", e)
assert_true("""
L = [X, Y],
(L = []
->
true
;
[Head|Tail] = L
).
""")
def test_cut_in_ifthenelse():
e = get_engine("""
f(X) :- ! -> fail.
f(0).
""")
assert_true("f(0).", e)
def test_once():
assert_true("once(repeat).")
def test_write_term():
py.test.skip("test behaves funnily")
prolog_raises("domain_error(write_option, E)",
"write_term(a, [quoted(af)])")
prolog_raises("type_error(list, E)",
"write_term(a, asdf)")
def test_number_chars():
assert_true("number_chars(123, ['1', '2', '3']).")
assert_true("number_chars(123, X), X = ['1', '2', '3'].")
prolog_raises("type_error(text, E)", "number_chars(X, [f(a)])")
prolog_raises("type_error(list, E)", "number_chars(X, a)")
prolog_raises("syntax_error(E)", "number_chars(X, ['-', '-'])")
prolog_raises("syntax_error(E)", "number_chars(X, ['1', '-'])")
prolog_raises("syntax_error(E)", "number_chars(X, ['.', '1', '-'])")
prolog_raises("syntax_error(E)", "number_chars(X, ['1', '.', '2', '.'])")
assert_true("number_chars(X, ['1', '2', '3']), X = 123.")
prolog_raises("type_error(list, E)", "number_chars(123, 123)")
prolog_raises("type_error(list, E)", "number_chars(b, a)")
assert_true("number_chars(-123, ['-', '1', '2', '3']).")
assert_true("number_chars(123.1, ['1', '2', '3', '.', '1']).")
assert_true("number_chars(1000000000000000, ['1','0','0','0','0','0','0','0','0','0','0','0','0','0','0','0']).")
prolog_raises("instantiation_error", "number_chars(X, Y)")
prolog_raises("type_error(list, E)", "number_chars(1, ['a'|2])")
prolog_raises("type_error(number, a)", "number_chars(a, X)")
prolog_raises("type_error(number, a)", "number_chars(a, X)")
prolog_raises("syntax_error(E)", "number_chars(A, ['-', '.', '1'])")
def test_atom_chars():
assert_true("atom_chars(abc, X), X = [a, b, c].")
assert_true("atom_chars(a12, [a, '1', '2']).")
assert_true("atom_chars('', []).")
prolog_raises("instantiation_error", "atom_chars(X, Y)")
assert_true("atom_chars(X, [a, b, '1']), X = ab1.")
prolog_raises("type_error(text, E)", "atom_chars(X, [a, b, '10'])")
prolog_raises("type_error(list, E)", "atom_chars(X, a)")
prolog_raises("type_error(text, E)", "atom_chars(X, [f(a)])")
prolog_raises("type_error(list, E)", "atom_chars(X, f(a))")
prolog_raises("type_error(text, E)", "atom_chars(X, [[]])")
def test_atom_chars_2():
assert_true("atom_chars(ab, [a|B]), B = [b].")
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""`LinearOperator` acting like a diagonal matrix."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linalg_impl as linalg
from tensorflow.python.ops.linalg import linear_operator
from tensorflow.python.ops.linalg import linear_operator_util
from tensorflow.python.util.tf_export import tf_export
__all__ = ["LinearOperatorDiag",]
@tf_export("linalg.LinearOperatorDiag")
class LinearOperatorDiag(linear_operator.LinearOperator):
"""`LinearOperator` acting like a [batch] square diagonal matrix.
This operator acts like a [batch] diagonal matrix `A` with shape
`[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a
batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
an `N x N` matrix. This matrix `A` is not materialized, but for
purposes of broadcasting this shape will be relevant.
`LinearOperatorDiag` is initialized with a (batch) vector.
```python
# Create a 2 x 2 diagonal linear operator.
diag = [1., -1.]
operator = LinearOperatorDiag(diag)
operator.to_dense()
==> [[1., 0.]
[0., -1.]]
operator.shape
==> [2, 2]
operator.log_abs_determinant()
==> scalar Tensor
x = ... Shape [2, 4] Tensor
operator.matmul(x)
==> Shape [2, 4] Tensor
# Create a [2, 3] batch of 4 x 4 linear operators.
diag = tf.random_normal(shape=[2, 3, 4])
operator = LinearOperatorDiag(diag)
# Create a shape [2, 1, 4, 2] vector. Note that this shape is compatible
# since the batch dimensions, [2, 1], are brodcast to
# operator.batch_shape = [2, 3].
y = tf.random_normal(shape=[2, 1, 4, 2])
x = operator.solve(y)
==> operator.matmul(x) = y
```
#### Shape compatibility
This operator acts on [batch] matrix with compatible shape.
`x` is a batch matrix with compatible shape for `matmul` and `solve` if
```
operator.shape = [B1,...,Bb] + [N, N], with b >= 0
x.shape = [C1,...,Cc] + [N, R],
and [C1,...,Cc] broadcasts with [B1,...,Bb] to [D1,...,Dd]
```
#### Performance
Suppose `operator` is a `LinearOperatorDiag` of shape `[N, N]`,
and `x.shape = [N, R]`. Then
* `operator.matmul(x)` involves `N * R` multiplications.
* `operator.solve(x)` involves `N` divisions and `N * R` multiplications.
* `operator.determinant()` involves a size `N` `reduce_prod`.
If instead `operator` and `x` have shape `[B1,...,Bb, N, N]` and
`[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`.
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
diag,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name="LinearOperatorDiag"):
r"""Initialize a `LinearOperatorDiag`.
Args:
diag: Shape `[B1,...,Bb, N]` `Tensor` with `b >= 0` `N >= 0`.
The diagonal of the operator. Allowed dtypes: `float16`, `float32`,
`float64`, `complex64`, `complex128`.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose. If `diag.dtype` is real, this is auto-set to `True`.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix\
#Extension_for_non_symmetric_matrices
is_square: Expect that this operator acts like square [batch] matrices.
name: A name for this `LinearOperator`.
Raises:
TypeError: If `diag.dtype` is not an allowed type.
ValueError: If `diag.dtype` is real, and `is_self_adjoint` is not `True`.
"""
with ops.name_scope(name, values=[diag]):
self._diag = ops.convert_to_tensor(diag, name="diag")
self._check_diag(self._diag)
# Check and auto-set hints.
if not self._diag.dtype.is_complex:
if is_self_adjoint is False:
raise ValueError("A real diagonal operator is always self adjoint.")
else:
is_self_adjoint = True
if is_square is False:
raise ValueError("Only square diagonal operators currently supported.")
is_square = True
super(LinearOperatorDiag, self).__init__(
dtype=self._diag.dtype,
graph_parents=[self._diag],
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name)
def _check_diag(self, diag):
"""Static check of diag."""
allowed_dtypes = [
dtypes.float16,
dtypes.float32,
dtypes.float64,
dtypes.complex64,
dtypes.complex128,
]
dtype = diag.dtype
if dtype not in allowed_dtypes:
raise TypeError(
"Argument diag must have dtype in %s. Found: %s"
% (allowed_dtypes, dtype))
if diag.get_shape().ndims is not None and diag.get_shape().ndims < 1:
raise ValueError("Argument diag must have at least 1 dimension. "
"Found: %s" % diag)
def _shape(self):
# If d_shape = [5, 3], we return [5, 3, 3].
d_shape = self._diag.get_shape()
return d_shape.concatenate(d_shape[-1:])
def _shape_tensor(self):
d_shape = array_ops.shape(self._diag)
k = d_shape[-1]
return array_ops.concat((d_shape, [k]), 0)
def _assert_non_singular(self):
return linear_operator_util.assert_no_entries_with_modulus_zero(
self._diag,
message="Singular operator: Diagonal contained zero values.")
def _assert_positive_definite(self):
if self.dtype.is_complex:
message = (
"Diagonal operator had diagonal entries with non-positive real part, "
"thus was not positive definite.")
else:
message = (
"Real diagonal operator had non-positive diagonal entries, "
"thus was not positive definite.")
return check_ops.assert_positive(
math_ops.real(self._diag),
message=message)
def _assert_self_adjoint(self):
return linear_operator_util.assert_zero_imag_part(
self._diag,
message=(
"This diagonal operator contained non-zero imaginary values. "
" Thus it was not self-adjoint."))
def _matmul(self, x, adjoint=False, adjoint_arg=False):
diag_term = math_ops.conj(self._diag) if adjoint else self._diag
x = linalg.adjoint(x) if adjoint_arg else x
diag_mat = array_ops.expand_dims(diag_term, -1)
return diag_mat * x
def _determinant(self):
return math_ops.reduce_prod(self._diag, reduction_indices=[-1])
def _log_abs_determinant(self):
return math_ops.reduce_sum(
math_ops.log(math_ops.abs(self._diag)), reduction_indices=[-1])
def _solve(self, rhs, adjoint=False, adjoint_arg=False):
diag_term = math_ops.conj(self._diag) if adjoint else self._diag
rhs = linalg.adjoint(rhs) if adjoint_arg else rhs
inv_diag_mat = array_ops.expand_dims(1. / diag_term, -1)
return rhs * inv_diag_mat
def _to_dense(self):
return array_ops.matrix_diag(self._diag)
def _diag_part(self):
return self.diag
def _add_to_tensor(self, x):
x_diag = array_ops.matrix_diag_part(x)
new_diag = self._diag + x_diag
return array_ops.matrix_set_diag(x, new_diag)
@property
def diag(self):
return self._diag
| |
import sys
import re
import codecs
import types
import csv
import json
import optparse
import fileinput
import collections
import datetime
import time
"""
Essentially reverses the process of bundle-items.
Processes the CSV download from MTurk and bursts out multiple items in each HIT.
Each field name that ends in "_1", "_2" etc is assumed to be such a multiplexed field.
Any other fields will be repeated in the output.
Can produce JSON format rather than CSV if desired.
"""
csv.field_size_limit(10**6)
######################################################################
def maybeOpen (file, mode="r", encoding="utf8"):
if type(file) is types.StringType:
file = open(file, mode)
if encoding:
file = (mode == "r" and codecs.getreader or codecs.getwriter)(encoding)(file)
return file
######################################################################
# class batchFileReader:
# def __init__ (self, file):
# self.csvReader = csv.DictReader(maybeOpen(file, "r", None))
# wsRE = re.compile(r"\s") # re.U # NO!!!
# def __iter__ (self):
# n = 0
# for row in self.csvReader:
# n += 1
# for key, old in row.items():
# if old:
# new = self.wsRE.sub(" ", row[key])
# if new is not old:
# row[key] = new
# yield row
# # print >>sys.stderr, self, self.csvReader.fieldnames
# print >>sys.stderr, "%s: %d" % (self, n)
wsRE = re.compile(r"\s") # re.U # NO!!!
def readBatchFile (input):
n = 0
for n, row in enumerate(csv.DictReader(input), 1):
for key, old in row.items():
if old:
new = wsRE.sub(" ", row[key])
if new is not old:
row[key] = new
yield row
# print >>sys.stderr, self, self.csvReader.fieldnames
print >>sys.stderr, "%d batch items read" % n
# class hitUnbundler:
# def __init__ (self, source, burstplain=False, addSequenceID=False):
# self.source = source
# self.addSequenceID = addSequenceID
# self.splitKeyRE = re.compile(burstplain and "^(.*[^0-9])([0-9]+)$" or "^(.+)_([0-9]+)$",
# re.U | re.I)
def burstBundle (bundle, splitKeyRE):
burst = {} # Maps sequence number to attributes with that numeric suffix
shared = {} # Attributes without a numeric suffix
for key in bundle:
m = splitKeyRE.match(key)
if m:
newKey, index = m.groups()
index = int(index)
assert(index > 0)
subBundle = burst.get(index, None)
if not subBundle:
burst[index] = subBundle = {}
subBundle[newKey] = bundle[key]
else:
shared[key] = bundle[key]
return burst, shared
def unbundleHITs (source, burstplain=False, addSequenceID=False):
splitKeyRE = re.compile(burstplain and "^(.*[^0-9])([0-9]+)$" or "^(.+)_([0-9]+)$",
re.U | re.I)
nIn = nOut = 0
# indexCount = {}
for bundle in source:
nIn += 1
# tempIndex = {}
# for index in tempIndex:
# indexCount[index] = indexCount.get(index, 0) + 1
burst, shared = burstBundle(bundle, splitKeyRE)
if addSequenceID:
for index, subBundle in burst.iteritems():
subBundle["sequenceID"] = index
for item in burst.values() or [{}]:
# Add the shared ones back in to the burst items
# print >>sys.stderr, "Burst item:", item
for key in shared:
if item.has_key(key):
print >>sys.stderr, "Collision: %s=%s %s_n=%s" % (key, shared[key], key, item[key])
item[key] = shared[key]
nOut += 1
yield item
print >>sys.stderr, "%s: %d => %d" % ("unbundle", nIn, nOut)
def adjustTimes (bundles):
groups = collections.defaultdict(list)
for b in bundles:
# Clip out the timezone, which is not reliably parsed by strptime
aTime, sTime = map(lambda t: time.mktime(datetime.datetime.strptime(t[:-8] + t[-5:], "%a %b %d %H:%M:%S %Y").timetuple()),
(b["AcceptTime"], b["SubmitTime"]))
groups[b["WorkerId"]].append((sTime, aTime, b))
origWorktime = adjustedWorktime = 0
for workerID, bundles in groups.iteritems():
lastSubmit = 0
bundles.sort()
for i, (sTime, aTime, b) in enumerate(bundles):
b["AdjustedWorkTime"] = sTime - max(aTime, lastSubmit)
assert b["AdjustedWorkTime"] >= 0
# if b["AdjustedWorkTime"] == 0:
# print >>sys.stderr, "Zero work time!!! (%s %s), (%s %s)" % (bundles[i - 1][-1]["AcceptTime"], bundles[i - 1][-1]["SubmitTime"], b["AcceptTime"], b["SubmitTime"])
lastSubmit = sTime
class tabItemWriter:
def __init__ (self, file):
self.file = maybeOpen(file, "w", None)
# Hacky stuff to make some columns come first
keyWeights = [(1, re.compile("^answer[.]", re.I | re.U)),
(2, re.compile("^input[.]", re.I | re.U)),
]
knowns = "itemID en fr1 score1 fr2 score2 fr3 score3 control_wrong control_right".split()
for i in xrange(len(knowns)):
keyWeights.append((100 + i, re.compile("^%s$" % knowns[i])))
keyWeights.append((1000, None))
self.keyWeights = keyWeights
def sortKeys (self, keys):
weightedKeys = []
for key in keys:
for weight, pattern in self.keyWeights:
if not pattern or pattern.match(key):
weightedKeys.append((weight, key))
break
keys = weightedKeys
keys.sort()
# keys.reverse()
return [key for (weight, key) in keys]
def writeAll (self, source):
source = iter(source)
firstItem = source.next()
keys = self.sortKeys(firstItem.keys())
print >>self.file, "\t".join(keys)
for fake in ((firstItem, ), source):
for item in fake:
print >>self.file, "\t".join([str(item.get(key, "EMPTY")) for key in keys])
class jsonItemWriter:
def __init__ (self, file):
self.file = maybeOpen(file, "w", None)
def writeAll (self, source):
for item in source:
print >>self.file, json.dumps(item, sort_keys=True)
######################################################################
optparser = optparse.OptionParser()
optparser.add_option("-v", "--verbose", dest="verbose", action = "count",
help = "More verbose output")
optparser.add_option("--plain", action="store_true",
help="Burst keys that end in digits; Default is to burst keys that end in underscore-digit")
optparser.add_option("--addseq", action="store_true", help="Add a sequence ID to the burst items")
optparser.add_option("--json", action="store_true",
help="Produce json output rather than tab-sep")
(options, args) = optparser.parse_args()
# (infile, ) = args or (None, )
# infile = infile in ("-", None) and sys.stdin or open(infile, "r")
bundles = list(readBatchFile(fileinput.input(args)))
adjustTimes(bundles)
print >>sys.stderr, "Average adjusted worktime %.1fs" % (sum(b["AdjustedWorkTime"] for b in bundles)/(len(bundles) or 1))
items = unbundleHITs(bundles, burstplain=options.plain, addSequenceID=options.addseq)
writer = (options.json and jsonItemWriter or tabItemWriter)(sys.stdout)
writer.writeAll(items)
######################################################################
| |
"""This module holds a ConnectionWrapper that is used with a
JDBC Connection. The module should only be used when running Jython.
"""
# Copyright (c) 2009-2014, Aalborg University (chr@cs.aau.dk)
# All rights reserved.
# Redistribution and use in source anqd binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import java.sql as jdbc
from copy import copy as pcopy
from datetime import datetime
from sys import modules
from threading import Thread
from queue import Queue
import pygrametl
from pygrametl.FIFODict import FIFODict
# NOTE: This module is made for Jython.
__author__ = "Christian Thomsen"
__maintainer__ = "Christian Thomsen"
__version__ = '2.2'
__all__ = ['JDBCConnectionWrapper', 'BackgroundJDBCConnectionWrapper']
class JDBCConnectionWrapper(object):
"""Wrap a JDBC Connection.
All Dimension and FactTable communicate with the data warehouse using
a ConnectionWrapper. In this way, the code for loading the DW does not
have to care about which parameter format is used.
This ConnectionWrapper is a special one for JDBC in Jython.
"""
def __init__(self, jdbcconn, stmtcachesize=20):
"""Create a ConnectionWrapper around the given JDBC connection.
If no default ConnectionWrapper already exists, the new
ConnectionWrapper is set to be the default ConnectionWrapper.
Arguments:
- jdbcconn: An open JDBC Connection (not a PEP249 Connection)
- stmtcachesize: The maximum number of PreparedStatements kept
open. Default: 20.
"""
if not isinstance(jdbcconn, jdbc.Connection):
raise TypeError('1st argument must implement java.sql.Connection')
if jdbcconn.isClosed():
raise ValueError('1st argument must be an open Connection')
self.__jdbcconn = jdbcconn
# Add a finalizer to __prepstmts to close PreparedStatements when
# they are pushed out
self.__prepstmts = FIFODict(stmtcachesize, lambda k, v: v[0].close())
self.__resultmeta = FIFODict(stmtcachesize)
self.__resultset = None
self.__resultnames = None
self.__resulttypes = None
self.nametranslator = lambda s: s
self.__jdbcconn.setAutoCommit(False)
if pygrametl._defaulttargetconnection is None:
pygrametl._defaulttargetconnection = self
def __preparejdbcstmt(self, sql):
# Find pyformat arguments and change them to question marks while
# appending the attribute names to a list
names = []
newsql = sql
while True:
start = newsql.find('%(')
if start == -1:
break
end = newsql.find(')s', start)
if end == -1:
break
name = newsql[start+2 : end]
names.append(name)
newsql = newsql.replace(newsql[start:end+2], '?', 1)
ps = self.__jdbcconn.prepareStatement(newsql)
# Find parameter types
types = []
parmeta = ps.getParameterMetaData()
for i in range(len(names)):
types.append(parmeta.getParameterType(i+1))
self.__prepstmts[sql] = (ps, names, types)
def __executejdbcstmt(self, sql, args):
if self.__resultset:
self.__resultset.close()
if sql not in self.__prepstmts:
self.__preparejdbcstmt(sql)
(ps, names, types) = self.__prepstmts[sql]
for pos in range(len(names)): # Not very Pythonic, but we're doing Java
if args[names[pos]] is None:
ps.setNull(pos + 1, types[pos])
else:
ps.setObject(pos + 1, args[names[pos]], types[pos])
if ps.execute():
self.__resultset = ps.getResultSet()
if sql not in self.__resultmeta:
self.__resultmeta[sql] = \
self.__extractresultmetadata(self.__resultset)
(self.__resultnames, self.__resulttypes) = self.__resultmeta[sql]
else:
self.__resultset = None
(self.__resultnames, self.__resulttypes) = (None, None)
def __extractresultmetadata(self, resultset):
# Get jdbc resultset metadata. extract names and types
# and add it to self.__resultmeta
meta = resultset.getMetaData()
names = []
types = []
for col in range(meta.getColumnCount()):
names.append(meta.getColumnName(col+1))
types.append(meta.getColumnType(col+1))
return (names, types)
def __readresultrow(self):
if self.__resultset is None:
return None
result = []
for i in range(len(self.__resulttypes)):
e = self.__resulttypes[i] # Not Pythonic, but we need i for JDBC
if e in (jdbc.Types.CHAR, jdbc.Types.VARCHAR,
jdbc.Types.LONGVARCHAR):
result.append(self.__resultset.getString(i+1))
elif e in (jdbc.Types.BIT, jdbc.Types.BOOLEAN):
result.append(self.__resultset.getBool(i+1))
elif e in (jdbc.Types.TINYINT, jdbc.Types.SMALLINT,
jdbc.Types.INTEGER):
result.append(self.__resultset.getInt(i+1))
elif e in (jdbc.Types.BIGINT, ):
result.append(self.__resultset.getLong(i+1))
elif e in (jdbc.Types.DATE, ):
result.append(self.__resultset.getDate(i+1))
elif e in (jdbc.Types.TIMESTAMP, ):
result.append(self.__resultset.getTimestamp(i+1))
elif e in (jdbc.Types.TIME, ):
result.append(self.__resultset.getTime(i+1))
else:
# Try this and hope for the best...
result.append(self.__resultset.getString(i+1))
return tuple(result)
def execute(self, stmt, arguments=None, namemapping=None, ignored=None):
"""Execute a statement.
Arguments:
- stmt: the statement to execute
- arguments: a mapping with the arguments. Default: None.
- namemapping: a mapping of names such that if stmt uses %(arg)s
and namemapping[arg]=arg2, the value arguments[arg2] is used
instead of arguments[arg]
- ignored: An ignored argument only present to accept the same
number of arguments as ConnectionWrapper.execute
"""
if namemapping and arguments:
arguments = pygrametl.copy(arguments, **namemapping)
self.__executejdbcstmt(stmt, arguments)
def executemany(self, stmt, params, ignored=None):
"""Execute a sequence of statements.
Arguments:
- stmt: the statement to execute
- params: a sequence of arguments
- ignored: An ignored argument only present to accept the same
number of arguments as ConnectionWrapper.executemany
"""
for paramset in params:
self.__executejdbcstmt(stmt, paramset)
def rowfactory(self, names=None):
"""Return a generator object returning result rows (i.e. dicts)."""
if names is None:
if self.__resultnames is None:
return
else:
names = [self.nametranslator(t[0]) for t in self.__resultnames]
empty = (None, ) * len(self.__resultnames)
while True:
tuple = self.fetchonetuple()
if tuple == empty:
return
yield dict(list(zip(names, tuple)))
def fetchone(self, names=None):
"""Return one result row (i.e. dict)."""
if self.__resultset is None:
return {}
if names is None:
names = [self.nametranslator(t[0]) for t in self.__resultnames]
values = self.fetchonetuple()
return dict(list(zip(names, values)))
def fetchonetuple(self):
"""Return one result tuple."""
if self.__resultset is None:
return ()
if not next(self.__resultset):
return (None, ) * len(self.__resultnames)
else:
return self.__readresultrow()
def fetchmanytuples(self, cnt):
"""Return cnt result tuples."""
if self.__resultset is None:
return []
empty = (None, ) * len(self.__resultnames)
result = []
for i in range(cnt):
tmp = self.fetchonetuple()
if tmp == empty:
break
result.append(tmp)
return result
def fetchalltuples(self):
"""Return all result tuples"""
if self.__resultset is None:
return []
result = []
empty = (None, ) * len(self.__resultnames)
while True:
tmp = self.fetchonetuple()
if tmp == empty:
return result
result.append(tmp)
def rowcount(self):
"""Not implemented. Return 0. Should return the size of the result."""
return 0
def getunderlyingmodule(self):
"""Return a reference to the underlying connection's module."""
return modules[self.__class__.__module__]
def commit(self):
"""Commit the transaction."""
pygrametl.endload()
self.__jdbcconn.commit()
def close(self):
"""Close the connection to the database,"""
self.__jdbcconn.close()
def rollback(self):
"""Rollback the transaction."""
self.__jdbcconn.rollback()
def setasdefault(self):
"""Set this ConnectionWrapper as the default connection."""
pygrametl._defaulttargetconnection = self
def cursor(self):
"""Not implemented for this JDBC connection wrapper!"""
raise NotImplementedError(".cursor() not supported")
def resultnames(self):
if self.__resultnames is None:
return None
else:
return tuple(self.__resultnames)
# BackgroundJDBCConnectionWrapper is added for experiments. It is quite similar
# to JDBCConnectionWrapper and one of them may be removed.
class BackgroundJDBCConnectionWrapper(object):
"""Wrap a JDBC Connection and do all DB communication in the background.
All Dimension and FactTable communicate with the data warehouse using
a ConnectionWrapper. In this way, the code for loading the DW does not
have to care about which parameter format is used.
This ConnectionWrapper is a special one for JDBC in Jython and does DB
communication from a Thread.
.. Note::
BackgroundJDBCConnectionWrapper is added for experiments.
It is quite similar to JDBCConnectionWrapper and one of them may be
removed.
"""
def __init__(self, jdbcconn, stmtcachesize=20):
"""Create a ConnectionWrapper around the given JDBC connection
Arguments:
- jdbcconn: An open JDBC Connection (not a PEP249 Connection)
- stmtcachesize: The maximum number of PreparedStatements kept
open. Default: 20.
"""
self.__jdbcconn = jdbcconn
# Add a finalizer to __prepstmts to close PreparedStatements when
# they are pushed out
self.__prepstmts = FIFODict(stmtcachesize, lambda k, v: v[0].close())
self.__resultmeta = FIFODict(stmtcachesize)
self.__resultset = None
self.__resultnames = None
self.__resulttypes = None
self.nametranslator = lambda s: s
self.__jdbcconn.setAutoCommit(False)
self.__queue = Queue(5000)
t = Thread(target=self.__worker)
t.setDaemon(True) # NB: "t.daemon = True" does NOT work...
t.setName('BackgroundJDBCConnectionWrapper')
t.start()
def __worker(self):
while True:
(sql, args) = self.__queue.get()
self.__executejdbcstmt(sql, args)
self.__queue.task_done()
def __preparejdbcstmt(self, sql):
# Find pyformat arguments and change them to question marks while
# appending the attribute names to a list
names = []
newsql = sql
while True:
start = newsql.find('%(')
if start == -1:
break
end = newsql.find(')s', start)
if end == -1:
break
name = newsql[start+2 : end]
names.append(name)
newsql = newsql.replace(newsql[start:end+2], '?', 1)
ps = self.__jdbcconn.prepareStatement(newsql)
# Find parameter types
types = []
parmeta = ps.getParameterMetaData()
for i in range(len(names)):
types.append(parmeta.getParameterType(i+1))
self.__prepstmts[sql] = (ps, names, types)
def __executejdbcstmt(self, sql, args):
if self.__resultset:
self.__resultset.close()
if sql not in self.__prepstmts:
self.__preparejdbcstmt(sql)
(ps, names, types) = self.__prepstmts[sql]
for pos in range(len(names)): # Not very Pythonic, but we're doing Java
if args[names[pos]] is None:
ps.setNull(pos + 1, types[pos])
else:
ps.setObject(pos + 1, args[names[pos]], types[pos])
if ps.execute():
self.__resultset = ps.getResultSet()
if sql not in self.__resultmeta:
self.__resultmeta[sql] = \
self.__extractresultmetadata(self.__resultset)
(self.__resultnames, self.__resulttypes) = self.__resultmeta[sql]
else:
self.__resultset = None
(self.__resultnames, self.__resulttypes) = (None, None)
def __extractresultmetadata(self, resultset):
# Get jdbc resultset metadata. extract names and types
# and add it to self.__resultmeta
meta = resultset.getMetaData()
names = []
types = []
for col in range(meta.getColumnCount()):
names.append(meta.getColumnName(col+1))
types.append(meta.getColumnType(col+1))
return (names, types)
def __readresultrow(self):
if self.__resultset is None:
return None
result = []
for i in range(len(self.__resulttypes)):
e = self.__resulttypes[i] # Not Pythonic, but we need i for JDBC
if e in (jdbc.Types.CHAR, jdbc.Types.VARCHAR,
jdbc.Types.LONGVARCHAR):
result.append(self.__resultset.getString(i+1))
elif e in (jdbc.Types.BIT, jdbc.Types.BOOLEAN):
result.append(self.__resultset.getBool(i+1))
elif e in (jdbc.Types.TINYINT, jdbc.Types.SMALLINT,
jdbc.Types.INTEGER):
result.append(self.__resultset.getInt(i+1))
elif e in (jdbc.Types.BIGINT, ):
result.append(self.__resultset.getLong(i+1))
elif e in (jdbc.Types.DATE, ):
result.append(self.__resultset.getDate(i+1))
elif e in (jdbc.Types.TIMESTAMP, ):
result.append(self.__resultset.getTimestamp(i+1))
elif e in (jdbc.Types.TIME, ):
result.append(self.__resultset.getTime(i+1))
else:
# Try this and hope for the best...
result.append(self.__resultset.getString(i+1))
return tuple(result)
def execute(self, stmt, arguments=None, namemapping=None, ignored=None):
"""Execute a statement.
Arguments:
- stmt: the statement to execute
- arguments: a mapping with the arguments. Default: None.
- namemapping: a mapping of names such that if stmt uses %(arg)s
and namemapping[arg]=arg2, the value arguments[arg2] is used
instead of arguments[arg]
- ignored: An ignored argument only present to accept the same
number of arguments as ConnectionWrapper.execute
"""
if namemapping and arguments:
arguments = pygrametl.copy(arguments, **namemapping)
else:
arguments = pcopy(arguments)
self.__queue.put((stmt, arguments))
def executemany(self, stmt, params, ignored=None):
"""Execute a sequence of statements.
Arguments:
- stmt: the statement to execute
- params: a sequence of arguments
- ignored: An ignored argument only present to accept the same
number of arguments as ConnectionWrapper.executemany
"""
for paramset in params:
self.__queue.put((stmt, paramset))
def rowfactory(self, names=None):
"""Return a generator object returning result rows (i.e. dicts)."""
self.__queue.join()
if names is None:
if self.__resultnames is None:
return
else:
names = [self.nametranslator(t[0]) for t in self.__resultnames]
empty = (None, ) * len(self.__resultnames)
while True:
tuple = self.fetchonetuple()
if tuple == empty:
return
yield dict(list(zip(names, tuple)))
def fetchone(self, names=None):
"""Return one result row (i.e. dict)."""
self.__queue.join()
if self.__resultset is None:
return {}
if names is None:
names = [self.nametranslator(t[0]) for t in self.__resultnames]
values = self.fetchonetuple()
return dict(list(zip(names, values)))
def fetchonetuple(self):
"""Return one result tuple."""
self.__queue.join()
if self.__resultset is None:
return ()
if not next(self.__resultset):
return (None, ) * len(self.__resultnames)
else:
return self.__readresultrow()
def fetchmanytuples(self, cnt):
"""Return cnt result tuples."""
self.__queue.join()
if self.__resultset is None:
return []
empty = (None, ) * len(self.__resultnames)
result = []
for i in range(cnt):
tmp = self.fetchonetuple()
if tmp == empty:
break
result.append(tmp)
return result
def fetchalltuples(self):
"""Return all result tuples"""
self.__queue.join()
if self.__resultset is None:
return []
result = []
empty = (None, ) * len(self.__resultnames)
while True:
tmp = self.fetchonetuple()
if tmp == empty:
return result
result.append(tmp)
def rowcount(self):
"""Not implemented. Return 0. Should return the size of the result."""
return 0
def getunderlyingmodule(self):
"""Return a reference to the underlying connection's module."""
return modules[self.__class__.__module__]
def commit(self):
"""Commit the transaction."""
pygrametl.endload()
self.__queue.join()
self.__jdbcconn.commit()
def close(self):
"""Close the connection to the database,"""
self.__queue.join()
self.__jdbcconn.close()
def rollback(self):
"""Rollback the transaction."""
self.__queue.join()
self.__jdbcconn.rollback()
def setasdefault(self):
"""Set this ConnectionWrapper as the default connection."""
pygrametl._defaulttargetconnection = self
def cursor(self):
"""Not implemented for this JDBC connection wrapper!"""
raise NotImplementedError(".cursor() not supported")
def resultnames(self):
self.__queue.join()
if self.__resultnames is None:
return None
else:
return tuple(self.__resultnames)
def Date(year, month, day):
date = '%s-%s-%s' % \
(str(year).zfill(4), str(month).zfill(2), str(day).zfill(2))
return jdbc.Date.valueOf(date)
def Timestamp(year, month, day, hour, minute, second):
date = '%s-%s-%s %s:%s:%s' % \
(str(year).zfill(4), str(month).zfill(2), str(day).zfill(2),
str(hour).zfill(2), str(minute).zfill(2), str(second).zfill(2))
return jdbc.Timestamp.valueOf(date)
| |
# -*- coding: utf-8 -*-
"""
shellstreaming.master.master
~~~~~~~~~~~~~~~~~~~~~~~~~~
:synopsis: Provides master process's entry point
"""
# standard module
import time
import argparse
import os
from os.path import abspath, dirname, join
import shlex
import logging
from subprocess import Popen
from ConfigParser import SafeConfigParser
# 3rd party
import cPickle as pickle
import networkx as nx
# my module
from shellstreaming.config import DEFAULT_CONFIG, DEFAULT_CONFIG_LOCATION
from shellstreaming.config.parse import parse_worker_hosts
from shellstreaming.util.logger import setup_TerminalLogger
from shellstreaming.util.importer import import_from_file
from shellstreaming.worker.run_worker_server import start_worker_server_thread
from shellstreaming.scheduler.master_main import sched_loop
from shellstreaming.util.comm import wait_worker_server, kill_worker_server, rpyc_namespace, connect_or_msg
from shellstreaming.master.job_placement import JobPlacement
import shellstreaming.master.master_struct as ms
from shellstreaming import api
def main():
"""Master process's entry point.
:returns: exit status of master process
"""
# parse args
args = _parse_args()
# setup config
cnfpath = args.config if args.config else _get_existing_cnf(DEFAULT_CONFIG_LOCATION)
config = _setup_config(cnfpath)
# setup logger
setup_TerminalLogger(config.get('shellstreaming', 'log_level'))
logger = logging.getLogger('TerminalLogger')
logger.info('Used config file: %s' % (cnfpath))
# overwrite worker_hosts when localhost_debug
if config.getboolean('shellstreaming', 'localhost_debug'):
config.set('shellstreaming', 'worker_hosts', 'localhost')
# launch worker servers
ms.WORKER_IDS = parse_worker_hosts(config.get('shellstreaming', 'worker_hosts'),
config.getint('shellstreaming', 'worker_default_port'))
if config.getboolean('shellstreaming', 'localhost_debug'):
# launch a worker server on localhost
logger.debug('Entering localhost_debug mode (launching worker on localhost:%s)' %
(config.getint('shellstreaming', 'worker_default_port')))
th_service = start_worker_server_thread(config.getint('shellstreaming', 'worker_default_port'), logger)
else:
# auto-deploy, launch worker server on worker hosts
_launch_workers(
ms.WORKER_IDS,
cnf_sent_to_worker=cnfpath,
worker_log_path=config.get('shellstreaming', 'worker_log_path'),
parallel_deploy=config.getboolean('shellstreaming', 'parallel_deploy'),
ssh_private_key=config.get('shellstreaming', 'ssh_private_key'),
send_latest_config_on_start=config.getboolean('shellstreaming', 'send_latest_config_on_start'),
send_latest_codes_on_start=config.getboolean('shellstreaming', 'send_latest_codes_on_start'),
)
try:
# make job graph from user's stream app description
api.DEFAULT_PORT = config.getint('shellstreaming', 'worker_default_port')
job_graph = _parse_stream_py(args.stream_py)
# draw job graph
if config.get('shellstreaming', 'job_graph_path') != '':
_draw_job_graph(job_graph,
config.get('shellstreaming', 'job_graph_path'),
config.getint('shellstreaming', 'job_graph_dpi'))
# initialize :module:`master_struct`
ms.job_placement = JobPlacement(job_graph)
for host_port in ms.WORKER_IDS:
ms.conn_pool[host_port] = connect_or_msg(*host_port)
ms.MIN_RECORDS_IN_AGGREGATED_BATCHES = config.getint('shellstreaming', 'min_records_in_aggregated_batches')
# initialize workers at a time (less rpc call)
pickled_worker_num_dict = pickle.dumps({w: num for num, w in enumerate(ms.WORKER_IDS)})
pickled_job_graph = pickle.dumps(job_graph)
map(lambda w: rpyc_namespace(w).init(w, pickled_worker_num_dict, pickled_job_graph,
config.getboolean('shellstreaming', 'worker_set_cpu_affinity'),
config.get('shellstreaming', 'worker_scheduler_module'),
config.getfloat('shellstreaming', 'worker_reschedule_interval_sec'),
config.get('shellstreaming', 'in_queue_selection_module'),
config.getboolean('shellstreaming', 'check_datatype')),
ms.WORKER_IDS)
# start master's main loop.
t_sched_loop_sec0 = time.time()
sched_loop(job_graph, ms.WORKER_IDS,
config.get('shellstreaming', 'master_scheduler_module'),
config.getfloat('shellstreaming', 'master_reschedule_interval_sec'))
t_sched_loop_sec1 = time.time()
# kill workers after all jobs are finieshd
logger.debug('Finished all job execution. Killing worker servers...')
map(lambda w: kill_worker_server(*w), ms.WORKER_IDS)
except KeyboardInterrupt:
logger.debug('Received `KeyboardInterrupt`. Killing all worker servers ...')
map(lambda w: kill_worker_server(*w), ms.WORKER_IDS)
raise
except:
map(lambda w: kill_worker_server(*w), ms.WORKER_IDS)
raise
# message
logger.info('''
Finished all job execution.
Execution time: %(t_sched_loop_sec)f sec.
''' % {
't_sched_loop_sec': t_sched_loop_sec1 - t_sched_loop_sec0
})
# run user's validation codes
_run_test(args.stream_py)
# message
logger.info('passed test()!')
return 0
def _parse_args():
from shellstreaming import __description__
parser = argparse.ArgumentParser(description=__description__)
parser.add_argument(
'--config', '-c',
default=None,
help='''Configuration file. If not specified, %(default_configs)s are searched (from left) and one found is used.''' % {
'default_configs': ', '.join(DEFAULT_CONFIG_LOCATION),
})
parser.add_argument(
'stream_py',
help='''Python script describing stream processings. Must have `.py` as suffix''',
)
args = parser.parse_args()
return args
def _setup_config(cnfpath):
if cnfpath is None:
raise IOError('Config file not found: Specify via `--config` option or put one of %s.' % (DEFAULT_CONFIG_LOCATION))
config = SafeConfigParser(DEFAULT_CONFIG)
config.read(cnfpath)
return config
def _get_existing_cnf(cnf_candidates=DEFAULT_CONFIG_LOCATION):
for cnfpath in cnf_candidates:
if os.path.exists(cnfpath):
return cnfpath
return None
def _launch_workers(workers,
cnf_sent_to_worker,
worker_log_path,
parallel_deploy,
ssh_private_key,
send_latest_config_on_start,
send_latest_codes_on_start,
):
"""Launch every worker server and return.
:param worker_hosts: worker hosts to launch worker servers
:type worker_hosts: list of hostname string
:param worker_port: worker servers' TCP port number
:param worker_log_path: worker servers' log path
:param cnf_sent_to_worker: if not `None`, specified config file is sent to worker hosts and used by them
:param parallel_deploy: If `True`, auto-deploy is done in parallel. Especially useful when you have
many :param:`worker_hosts`.
However, if you have to input anything (pass for secret key, login password, ...),
:param:`parallel_deploy` has to be `False`.
:param ssh_private_key: if not `None`, specified private key is used for ssh-login to every worker host
"""
# [todo] - make use of ssh_config (`fabric.api.env.ssh_config_path` must be True (via cmd opt?))
logger = logging.getLogger('TerminalLogger')
# deploy & start workers' server
scriptpath = join(abspath(dirname(__file__)), '..', 'autodeploy', 'auto_deploy.py')
fab_tasks = []
if send_latest_codes_on_start:
fab_tasks.append('pack')
fab_tasks.append('deploy_codes')
if send_latest_config_on_start or send_latest_codes_on_start: # config is removed for latter case
fab_tasks.append('deploy_config:cnfpath=%s' % (cnf_sent_to_worker))
fab_tasks.append('start_worker:cnfpath=%s,logpath=%s' % (cnf_sent_to_worker, worker_log_path))
cmd = 'fab -f %(script)s -H %(hosts)s %(tasks)s %(parallel_deploy)s %(ssh_priv_key)s' % {
'script' : scriptpath,
'hosts' : ','.join([w[0] for w in workers]),
'tasks' : ' '.join(fab_tasks),
'parallel_deploy' : '-P' if parallel_deploy else '',
'ssh_priv_key' : '-i ' + ssh_private_key if ssh_private_key else '',
}
logger.debug('Auto-deploy starts with this command:%s%s' % (os.linesep, cmd))
p = Popen(shlex.split(cmd), env=os.environ)
exitcode = p.wait()
assert(exitcode == 0)
# wait for all workers' server to start
# [todo] - parallel wait
for w in workers:
wait_worker_server(*w)
logger.debug('connected to %s:%d' % (w[0], w[1]))
def _parse_stream_py(stream_py):
"""Parse stream processing description and return job graph.
:param stream_py: python script in which stream processings are described by users
:returns: job graph
:rtype: :class:`JobGraph()`
"""
module = import_from_file(stream_py)
main_func = getattr(module, 'main')
main_func() # `api._job_graph` is changed internally
return api._job_graph
def _run_test(stream_py):
"""Run validation code in user's script"""
logger = logging.getLogger('TerminalLogger')
module = import_from_file(stream_py)
test_func = getattr(module, 'test', None)
if test_func is None:
return
test_func_name = '%s.%s' % (test_func.__module__, test_func.__name__)
try:
test_func()
logger.info('%s finished without any exception' % (test_func_name))
except:
logger.error('Exception has been raised in %s' % (test_func_name))
raise
def _draw_job_graph(job_graph, path, dpi):
import matplotlib.pyplot as plt
pos = nx.spring_layout(job_graph)
nx.draw(job_graph, pos)
# red color for istream
nx.draw_networkx_nodes(job_graph, pos, nodelist=job_graph.begin_nodes(), node_color='r')
# blue color for ostream
nx.draw_networkx_nodes(job_graph, pos, nodelist=job_graph.end_nodes(), node_color='b')
# white color for operator
nx.draw_networkx_nodes(
job_graph, pos,
nodelist=tuple(set(job_graph.nodes()) - set(job_graph.begin_nodes()) - set(job_graph.end_nodes())),
node_color='w')
# edge label
nx.draw_networkx_edge_labels(job_graph, pos, job_graph.edge_labels)
plt.savefig(path, dpi=dpi)
logger = logging.getLogger('TerminalLogger')
logger.info('Job graph figure is generated on: %s' % (path))
| |
"""
Find a primer sequence in a gapped alignment, trim to amplicon
"""
import argparse
import itertools
import logging
import operator
import sys
from Bio import Alphabet, SeqIO, pairwise2
from Bio.Alphabet import IUPAC
from Bio.Seq import Seq
from seqmagick import transform, fileformat
from . import common
def build_parser(parser):
parser.add_argument('source_file', help="Source alignment file",
type=argparse.FileType('r'))
parser.add_argument('output_file', help="Destination trimmed file",
type=argparse.FileType('w'))
parser.add_argument('forward_primer',
help="The forward primer used", type=iupac_ambiguous_sequence)
parser.add_argument('reverse_primer', help="""The reverse primer used. By
default the reverse primer is assumed to be a subsequence of the
top strand (that is, the reverse complement of an actual downstream
PCR primer). Use --reverse-is-revcomp if this is not the case.""",
type=iupac_ambiguous_sequence)
parser.add_argument('--reverse-is-revcomp', default=False,
action='store_true', help="""Reverse primer is written as the
reverse complement of the top strand (default: %(default)s)""",
dest="reverse_complement")
parser.add_argument('--source-format', default=None,
help='Alignment format (default: detect from extension')
parser.add_argument('--output-format', default=None,
help='Alignment format (default: detect from extension')
parser.add_argument('--include-primers', default=False,
action="store_true", help='''Include the primers in the output
(default: %(default)s)''')
parser.add_argument('--max-hamming-distance',
type=common.positive_value(int), default=1, help="""Maximum Hamming
distance between primer and alignment site (default: %(default)s).
IUPAC ambiguous bases in the primer matching unambiguous bases in
the alignment are not penalized""")
parser.add_argument('--prune-action', choices=_ACTIONS.keys(),
default='trim',
help="""Action to take. Options are trim (trim to the region
defined by the two primers, decreasing the width of the alignment),
or isolate (convert all characters outside the primer-defined area
to gaps). default: %(default)s""")
# Sequence-related functions
def ungap_index_map(sequence, gap_chars='-'):
"""
Returns a dict mapping from an index in the ungapped sequence to an index
in the gapped sequence.
>>> ungap_index_map('AC-TG-')
{0: 0, 1: 1, 2: 3, 3: 4}
"""
counter = itertools.count(0).next
ungap_indexes = [counter() if c not in gap_chars else None
for c in iter(sequence)]
return dict((ungapped, gapped)
for ungapped, gapped in zip(ungap_indexes,
xrange(len(sequence)))
if ungapped is not None)
def gap_index_map(sequence, gap_chars='-'):
"""
Opposite of ungap_index_map: returns mapping from gapped index to ungapped
index.
>>> gap_index_map('AC-TG-')
{0: 0, 1: 1, 3: 2, 4: 3}
"""
return dict((v, k)
for k, v in ungap_index_map(sequence, gap_chars).items())
def _iupac_ambiguous_equal(ambig_base, unambig_base):
"""
Tests two bases for equality, accounting for IUPAC ambiguous DNA
ambiguous base may be IUPAC ambiguous, unambiguous must be one of ACGT
"""
iupac_translation = {'A': 'A', 'C': 'C', 'G': 'G',
'T': 'T', 'U': 'U', 'R': 'AG', 'Y': 'CT',
'S': 'GC', 'W': 'AT', 'K': 'GT', 'M': 'AC',
'B': 'CGT', 'D': 'AGT', 'H': 'ACT', 'V': 'ACG',
'N': 'ACGT', '-': '-'}
for i in (ambig_base, unambig_base):
if not len(i) == 1:
raise ValueError("only one base may be passed.")
return unambig_base.upper() in iupac_translation[ambig_base.upper()]
def hamming_distance(s1, s2, equality_function=operator.eq):
"""
Returns the hamming distance between two strings.
"""
if not len(s1) == len(s2):
raise ValueError("String lengths are not equal")
# Number of non-matching characters:
return sum(not equality_function(c1, c2) for c1, c2 in zip(s1, s2))
class PrimerNotFound(Exception):
pass
class PrimerOrderError(Exception):
def __init__(self, forward_indexes, reverse_indexes):
super(PrimerOrderError, self).__init__(
"Reverse primer before forward primer: {0} > {1}".format(
forward_indexes, reverse_indexes))
class PrimerAligner(object):
"""
Get positions of pairwise alignments of a primer to a sequence.
"""
def __init__(self, primer, match=5, difference=-4, gap_open=-10,
gap_extend=-0.5):
self.primer = primer
self.match = match
self.difference = difference
self.gap_open = gap_open
self.gap_extend = gap_extend
def align(self, sequence):
"""
Aligns the primer to the given query sequence, returning a tuple of:
hamming_distance, start, end
Where hamming distance is the distance between the primer and aligned
sequence, and start and end give the start and end index of the primer
relative to the input sequence.
"""
seq_aln, primer_aln, score, start, end = \
pairwise2.align.globalms(str(sequence).upper(), str(self.primer).upper(),
self.match, self.difference, self.gap_open,
self.gap_extend, one_alignment_only=True)[0]
# Get an ungapped mapping on the sequence
index_map = gap_index_map(seq_aln)
ungap_map = ungap_index_map(primer_aln)
# Trim to primer
start = ungap_map[0]
end = ungap_map[len(self.primer) - 1]
trimmed = seq_aln[start:end+1]
ham_dist = hamming_distance(primer_aln[start:end+1],
trimmed, _iupac_ambiguous_equal)
#assert primer_aln[start:end].replace('-', '') == str(self.primer)
# TODO: handle start or end being gap better. For now, just give up
# and return maxint for the hamming distance
if trimmed.endswith('-'):
tail = len(trimmed) - len(trimmed.rstrip('-'))
end = index_map[end-tail] + 1
ham_dist = sys.maxint
else:
end = index_map[end]
if trimmed.startswith('-'):
start = 0
ham_dist = sys.maxint
else:
start = index_map[start]
return ham_dist, start, end
@property
def max_score(self):
"""
Maximum possible alignment score
"""
return len(self.primer) * self.match
# Types for argparse
def iupac_ambiguous_sequence(string):
return Seq(string, IUPAC.ambiguous_dna)
def locate_primers(sequences, forward_primer, reverse_primer,
reverse_complement, max_hamming_distance):
"""
Find forward and reverse primers in a set of sequences, return two tuples:
(forward_start, forward_end), (reverse_start, reverse_end)
"""
forward_loc = None
reverse_loc = None
seq_length = None
# Reverse complement the reverse primer, if appropriate
if reverse_complement:
reverse_primer = reverse_primer.reverse_complement()
forward_aligner = PrimerAligner(forward_primer)
reverse_aligner = PrimerAligner(reverse_primer)
for i, sequence in enumerate(sequences):
if seq_length is None:
seq_length = len(sequence)
elif len(sequence) != seq_length:
raise ValueError(("Sequence Length Heterogeneity: {0} != {1}. "
"Is this an alignment?").format(len(sequence), seq_length))
index_map = ungap_index_map(sequence.seq)
if forward_loc is None:
ham_dist, start, end = \
forward_aligner.align(sequence.seq.ungap())
if ham_dist <= max_hamming_distance:
forward_loc = index_map[start], index_map[end]
logging.info("Forward in sequence %d: indexes %d to %d", i + 1,
*forward_loc)
if reverse_loc is None:
ham_dist, start, end = \
reverse_aligner.align(sequence.seq.ungap())
if ham_dist <= max_hamming_distance:
reverse_loc = index_map[start], index_map[end]
logging.info("Reverse in sequence %d: indexes %d to %d", i + 1,
*reverse_loc)
if forward_loc and reverse_loc:
# Both found
# Check order
if forward_loc[0] > reverse_loc[0]:
raise PrimerOrderError(forward_loc[0], reverse_loc[0])
return forward_loc, reverse_loc
else:
logging.debug("Sequence %d: %d/2 primers found",
i + 1, sum(j is not None
for j in (forward_loc, reverse_loc)))
# Did not find either the forward or reverse primer:
if not forward_loc:
raise PrimerNotFound(forward_primer)
else:
raise PrimerNotFound(reverse_primer)
def trim(sequences, start, end):
"""
Slice the input sequences from start to end
"""
logging.info("Trimming from %d to %d", start, end)
return (sequence[start:end] for sequence in sequences)
# Prune actions
_ACTIONS = {'trim': trim, 'isolate': transform.isolate_region}
def action(arguments):
"""
Trim the alignment as specified
"""
# Determine file format for input and output
source_format = (arguments.source_format or
fileformat.from_handle(arguments.source_file))
output_format = (arguments.output_format or
fileformat.from_handle(arguments.output_file))
# Load the alignment
with arguments.source_file:
sequences = SeqIO.parse(arguments.source_file, source_format,
alphabet=Alphabet.Gapped(Alphabet.single_letter_alphabet))
# Locate primers
(forward_start, forward_end), (reverse_start, reverse_end) = \
locate_primers(sequences, arguments.forward_primer,
arguments.reverse_primer, arguments.reverse_complement,
arguments.max_hamming_distance)
# Generate slice indexes
if arguments.include_primers:
start = forward_start
end = reverse_end + 1
else:
start = forward_end + 1
end = reverse_start
# Rewind the input file
arguments.source_file.seek(0)
sequences = SeqIO.parse(arguments.source_file,
source_format,
alphabet=Alphabet.Gapped(Alphabet.single_letter_alphabet))
# Apply the transformation
prune_action = _ACTIONS[arguments.prune_action]
transformed_sequences = prune_action(sequences, start, end)
with arguments.output_file:
SeqIO.write(transformed_sequences, arguments.output_file,
output_format)
| |
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
The worker communicates with the scheduler and does two things:
1. Sends all tasks that has to be run
2. Gets tasks from the scheduler that should be run
When running in local mode, the worker talks directly to a :py:class:`~luigi.scheduler.Scheduler` instance.
When you run a central server, the worker will talk to the scheduler using a :py:class:`~luigi.rpc.RemoteScheduler` instance.
Everything in this module is private to luigi and may change in incompatible
ways between versions. The exception is the exception types and the
:py:class:`worker` config class.
"""
import collections
import getpass
import logging
import multiprocessing
import os
import signal
import subprocess
import sys
try:
import Queue
except ImportError:
import queue as Queue
import random
import socket
import threading
import time
import traceback
import types
from luigi import six
from luigi import notifications
from luigi.event import Event
from luigi.task_register import load_task
from luigi.scheduler import DISABLED, DONE, FAILED, PENDING, UNKNOWN, Scheduler, RetryPolicy
from luigi.scheduler import WORKER_STATE_ACTIVE, WORKER_STATE_DISABLED
from luigi.target import Target
from luigi.task import Task, flatten, getpaths, Config
from luigi.task_register import TaskClassException
from luigi.task_status import RUNNING
from luigi.parameter import FloatParameter, IntParameter, BoolParameter
try:
import simplejson as json
except ImportError:
import json
logger = logging.getLogger('luigi-interface')
# Prevent fork() from being called during a C-level getaddrinfo() which uses a process-global mutex,
# that may not be unlocked in child process, resulting in the process being locked indefinitely.
fork_lock = threading.Lock()
# Why we assert on _WAIT_INTERVAL_EPS:
# multiprocessing.Queue.get() is undefined for timeout=0 it seems:
# https://docs.python.org/3.4/library/multiprocessing.html#multiprocessing.Queue.get.
# I also tried with really low epsilon, but then ran into the same issue where
# the test case "test_external_dependency_worker_is_patient" got stuck. So I
# unscientifically just set the final value to a floating point number that
# "worked for me".
_WAIT_INTERVAL_EPS = 0.00001
def _is_external(task):
return task.run is None or task.run == NotImplemented
def _get_retry_policy_dict(task):
return RetryPolicy(task.retry_count, task.disable_hard_timeout, task.disable_window_seconds)._asdict()
class TaskException(Exception):
pass
GetWorkResponse = collections.namedtuple('GetWorkResponse', (
'task_id',
'running_tasks',
'n_pending_tasks',
'n_unique_pending',
'n_pending_last_scheduled',
'worker_state',
))
class TaskProcess(multiprocessing.Process):
""" Wrap all task execution in this class.
Mainly for convenience since this is run in a separate process. """
def __init__(self, task, worker_id, result_queue, tracking_url_callback,
status_message_callback, use_multiprocessing=False, worker_timeout=0):
super(TaskProcess, self).__init__()
self.task = task
self.worker_id = worker_id
self.result_queue = result_queue
self.tracking_url_callback = tracking_url_callback
self.status_message_callback = status_message_callback
if task.worker_timeout is not None:
worker_timeout = task.worker_timeout
self.timeout_time = time.time() + worker_timeout if worker_timeout else None
self.use_multiprocessing = use_multiprocessing or self.timeout_time is not None
def _run_get_new_deps(self):
self.task.set_tracking_url = self.tracking_url_callback
self.task.set_status_message = self.status_message_callback
task_gen = self.task.run()
self.task.set_tracking_url = None
self.task.set_status_message = None
if not isinstance(task_gen, types.GeneratorType):
return None
next_send = None
while True:
try:
if next_send is None:
requires = six.next(task_gen)
else:
requires = task_gen.send(next_send)
except StopIteration:
return None
new_req = flatten(requires)
if all(t.complete() for t in new_req):
next_send = getpaths(requires)
else:
new_deps = [(t.task_module, t.task_family, t.to_str_params())
for t in new_req]
return new_deps
def run(self):
logger.info('[pid %s] Worker %s running %s', os.getpid(), self.worker_id, self.task)
if self.use_multiprocessing:
# Need to have different random seeds if running in separate processes
random.seed((os.getpid(), time.time()))
status = FAILED
expl = ''
missing = []
new_deps = []
try:
# Verify that all the tasks are fulfilled! For external tasks we
# don't care about unfulfilled dependencies, because we are just
# checking completeness of self.task so outputs of dependencies are
# irrelevant.
if not _is_external(self.task):
missing = [dep.task_id for dep in self.task.deps() if not dep.complete()]
if missing:
deps = 'dependency' if len(missing) == 1 else 'dependencies'
raise RuntimeError('Unfulfilled %s at run time: %s' % (deps, ', '.join(missing)))
self.task.trigger_event(Event.START, self.task)
t0 = time.time()
status = None
if _is_external(self.task):
# External task
# TODO(erikbern): We should check for task completeness after non-external tasks too!
# This will resolve #814 and make things a lot more consistent
if self.task.complete():
status = DONE
else:
status = FAILED
expl = 'Task is an external data dependency ' \
'and data does not exist (yet?).'
else:
new_deps = self._run_get_new_deps()
status = DONE if not new_deps else PENDING
if new_deps:
logger.info(
'[pid %s] Worker %s new requirements %s',
os.getpid(), self.worker_id, self.task)
elif status == DONE:
self.task.trigger_event(
Event.PROCESSING_TIME, self.task, time.time() - t0)
expl = self.task.on_success()
logger.info('[pid %s] Worker %s done %s', os.getpid(),
self.worker_id, self.task)
self.task.trigger_event(Event.SUCCESS, self.task)
except KeyboardInterrupt:
raise
except BaseException as ex:
status = FAILED
logger.exception("[pid %s] Worker %s failed %s", os.getpid(), self.worker_id, self.task)
self.task.trigger_event(Event.FAILURE, self.task, ex)
raw_error_message = self.task.on_failure(ex)
expl = raw_error_message
finally:
self.result_queue.put(
(self.task.task_id, status, expl, missing, new_deps))
def _recursive_terminate(self):
import psutil
try:
parent = psutil.Process(self.pid)
children = parent.children(recursive=True)
# terminate parent. Give it a chance to clean up
super(TaskProcess, self).terminate()
parent.wait()
# terminate children
for child in children:
try:
child.terminate()
except psutil.NoSuchProcess:
continue
except psutil.NoSuchProcess:
return
def terminate(self):
"""Terminate this process and its subprocesses."""
# default terminate() doesn't cleanup child processes, it orphans them.
try:
return self._recursive_terminate()
except ImportError:
return super(TaskProcess, self).terminate()
class SingleProcessPool(object):
"""
Dummy process pool for using a single processor.
Imitates the api of multiprocessing.Pool using single-processor equivalents.
"""
def apply_async(self, function, args):
return function(*args)
def close(self):
pass
def join(self):
pass
class DequeQueue(collections.deque):
"""
deque wrapper implementing the Queue interface.
"""
def put(self, obj, block=None, timeout=None):
return self.append(obj)
def get(self, block=None, timeout=None):
try:
return self.pop()
except IndexError:
raise Queue.Empty
class AsyncCompletionException(Exception):
"""
Exception indicating that something went wrong with checking complete.
"""
def __init__(self, trace):
self.trace = trace
class TracebackWrapper(object):
"""
Class to wrap tracebacks so we can know they're not just strings.
"""
def __init__(self, trace):
self.trace = trace
def check_complete(task, out_queue):
"""
Checks if task is complete, puts the result to out_queue.
"""
logger.debug("Checking if %s is complete", task)
try:
is_complete = task.complete()
except Exception:
is_complete = TracebackWrapper(traceback.format_exc())
out_queue.put((task, is_complete))
class worker(Config):
# NOTE: `section.config-variable` in the config_path argument is deprecated in favor of `worker.config_variable`
ping_interval = FloatParameter(default=1.0,
config_path=dict(section='core', name='worker-ping-interval'))
keep_alive = BoolParameter(default=False,
config_path=dict(section='core', name='worker-keep-alive'))
count_uniques = BoolParameter(default=False,
config_path=dict(section='core', name='worker-count-uniques'),
description='worker-count-uniques means that we will keep a '
'worker alive only if it has a unique pending task, as '
'well as having keep-alive true')
count_last_scheduled = BoolParameter(default=False,
description='Keep a worker alive only if there are '
'pending tasks which it was the last to '
'schedule.')
wait_interval = FloatParameter(default=1.0,
config_path=dict(section='core', name='worker-wait-interval'))
wait_jitter = FloatParameter(default=5.0)
max_reschedules = IntParameter(default=1,
config_path=dict(section='core', name='worker-max-reschedules'))
timeout = IntParameter(default=0,
config_path=dict(section='core', name='worker-timeout'))
task_limit = IntParameter(default=None,
config_path=dict(section='core', name='worker-task-limit'))
retry_external_tasks = BoolParameter(default=False,
config_path=dict(section='core', name='retry-external-tasks'),
description='If true, incomplete external tasks will be '
'retested for completion while Luigi is running.')
send_failure_email = BoolParameter(default=True,
description='If true, send e-mails directly from the worker'
'on failure')
no_install_shutdown_handler = BoolParameter(default=False,
description='If true, the SIGUSR1 shutdown handler will'
'NOT be install on the worker')
class KeepAliveThread(threading.Thread):
"""
Periodically tell the scheduler that the worker still lives.
"""
def __init__(self, scheduler, worker_id, ping_interval):
super(KeepAliveThread, self).__init__()
self._should_stop = threading.Event()
self._scheduler = scheduler
self._worker_id = worker_id
self._ping_interval = ping_interval
def stop(self):
self._should_stop.set()
def run(self):
while True:
self._should_stop.wait(self._ping_interval)
if self._should_stop.is_set():
logger.info("Worker %s was stopped. Shutting down Keep-Alive thread" % self._worker_id)
break
with fork_lock:
try:
self._scheduler.ping(worker=self._worker_id)
except: # httplib.BadStatusLine:
logger.warning('Failed pinging scheduler')
class Worker(object):
"""
Worker object communicates with a scheduler.
Simple class that talks to a scheduler and:
* tells the scheduler what it has to do + its dependencies
* asks for stuff to do (pulls it in a loop and runs it)
"""
def __init__(self, scheduler=None, worker_id=None, worker_processes=1, assistant=False, **kwargs):
if scheduler is None:
scheduler = Scheduler()
self.worker_processes = int(worker_processes)
self._worker_info = self._generate_worker_info()
if not worker_id:
worker_id = 'Worker(%s)' % ', '.join(['%s=%s' % (k, v) for k, v in self._worker_info])
self._config = worker(**kwargs)
assert self._config.wait_interval >= _WAIT_INTERVAL_EPS, "[worker] wait_interval must be positive"
assert self._config.wait_jitter >= 0.0, "[worker] wait_jitter must be equal or greater than zero"
self._id = worker_id
self._scheduler = scheduler
self._assistant = assistant
self._stop_requesting_work = False
self.host = socket.gethostname()
self._scheduled_tasks = {}
self._suspended_tasks = {}
self._batch_running_tasks = {}
self._batch_families_sent = set()
self._first_task = None
self.add_succeeded = True
self.run_succeeded = True
self.unfulfilled_counts = collections.defaultdict(int)
# note that ``signal.signal(signal.SIGUSR1, fn)`` only works inside the main execution thread, which is why we
# provide the ability to conditionally install the hook.
if not self._config.no_install_shutdown_handler:
try:
signal.signal(signal.SIGUSR1, self.handle_interrupt)
signal.siginterrupt(signal.SIGUSR1, False)
except AttributeError:
pass
# Keep info about what tasks are running (could be in other processes)
self._task_result_queue = multiprocessing.Queue()
self._running_tasks = {}
# Stuff for execution_summary
self._add_task_history = []
self._get_work_response_history = []
def _add_task(self, *args, **kwargs):
"""
Call ``self._scheduler.add_task``, but store the values too so we can
implement :py:func:`luigi.execution_summary.summary`.
"""
task_id = kwargs['task_id']
status = kwargs['status']
runnable = kwargs['runnable']
task = self._scheduled_tasks.get(task_id)
if task:
msg = (task, status, runnable)
self._add_task_history.append(msg)
kwargs['owners'] = task._owner_list()
if task_id in self._batch_running_tasks:
for batch_task in self._batch_running_tasks.pop(task_id):
self._add_task_history.append((batch_task, status, True))
self._scheduler.add_task(*args, **kwargs)
logger.info('Informed scheduler that task %s has status %s', task_id, status)
def __enter__(self):
"""
Start the KeepAliveThread.
"""
self._keep_alive_thread = KeepAliveThread(self._scheduler, self._id, self._config.ping_interval)
self._keep_alive_thread.daemon = True
self._keep_alive_thread.start()
return self
def __exit__(self, type, value, traceback):
"""
Stop the KeepAliveThread and kill still running tasks.
"""
self._keep_alive_thread.stop()
self._keep_alive_thread.join()
for task in self._running_tasks.values():
if task.is_alive():
task.terminate()
return False # Don't suppress exception
def _generate_worker_info(self):
# Generate as much info as possible about the worker
# Some of these calls might not be available on all OS's
args = [('salt', '%09d' % random.randrange(0, 999999999)),
('workers', self.worker_processes)]
try:
args += [('host', socket.gethostname())]
except BaseException:
pass
try:
args += [('username', getpass.getuser())]
except BaseException:
pass
try:
args += [('pid', os.getpid())]
except BaseException:
pass
try:
sudo_user = os.getenv("SUDO_USER")
if sudo_user:
args.append(('sudo_user', sudo_user))
except BaseException:
pass
return args
def _validate_task(self, task):
if not isinstance(task, Task):
raise TaskException('Can not schedule non-task %s' % task)
if not task.initialized():
# we can't get the repr of it since it's not initialized...
raise TaskException('Task of class %s not initialized. Did you override __init__ and forget to call super(...).__init__?' % task.__class__.__name__)
def _log_complete_error(self, task, tb):
log_msg = "Will not run {task} or any dependencies due to error in complete() method:\n{tb}".format(task=task, tb=tb)
logger.warning(log_msg)
def _log_dependency_error(self, task, tb):
log_msg = "Will not run {task} or any dependencies due to error in deps() method:\n{tb}".format(task=task, tb=tb)
logger.warning(log_msg)
def _log_unexpected_error(self, task):
logger.exception("Luigi unexpected framework error while scheduling %s", task) # needs to be called from within except clause
def _announce_scheduling_failure(self, task, expl):
try:
self._scheduler.announce_scheduling_failure(
worker=self._id,
task_name=str(task),
family=task.task_family,
params=task.to_str_params(only_significant=True),
expl=expl,
owners=task._owner_list(),
)
except Exception:
raise
formatted_traceback = traceback.format_exc()
self._email_unexpected_error(task, formatted_traceback)
def _email_complete_error(self, task, formatted_traceback):
self._announce_scheduling_failure(task, formatted_traceback)
if self._config.send_failure_email:
self._email_error(task, formatted_traceback,
subject="Luigi: {task} failed scheduling. Host: {host}",
headline="Will not run {task} or any dependencies due to error in complete() method",
)
def _email_dependency_error(self, task, formatted_traceback):
self._announce_scheduling_failure(task, formatted_traceback)
if self._config.send_failure_email:
self._email_error(task, formatted_traceback,
subject="Luigi: {task} failed scheduling. Host: {host}",
headline="Will not run {task} or any dependencies due to error in deps() method",
)
def _email_unexpected_error(self, task, formatted_traceback):
# this sends even if failure e-mails are disabled, as they may indicate
# a more severe failure that may not reach other alerting methods such
# as scheduler batch notification
self._email_error(task, formatted_traceback,
subject="Luigi: Framework error while scheduling {task}. Host: {host}",
headline="Luigi framework error",
)
def _email_task_failure(self, task, formatted_traceback):
if self._config.send_failure_email:
self._email_error(task, formatted_traceback,
subject="Luigi: {task} FAILED. Host: {host}",
headline="A task failed when running. Most likely run() raised an exception.",
)
def _email_error(self, task, formatted_traceback, subject, headline):
formatted_subject = subject.format(task=task, host=self.host)
command = subprocess.list2cmdline(sys.argv)
message = notifications.format_task_error(headline, task, command, formatted_traceback)
notifications.send_error_email(formatted_subject, message, task.owner_email)
def _handle_task_load_error(self, exception, task_ids):
msg = 'Cannot find task(s) sent by scheduler: {}'.format(','.join(task_ids))
logger.exception(msg)
subject = 'Luigi: {}'.format(msg)
error_message = notifications.wrap_traceback(exception)
for task_id in task_ids:
self._add_task(
worker=self._id,
task_id=task_id,
status=FAILED,
runnable=False,
expl=error_message,
)
notifications.send_error_email(subject, error_message)
def add(self, task, multiprocess=False):
"""
Add a Task for the worker to check and possibly schedule and run.
Returns True if task and its dependencies were successfully scheduled or completed before.
"""
if self._first_task is None and hasattr(task, 'task_id'):
self._first_task = task.task_id
self.add_succeeded = True
if multiprocess:
queue = multiprocessing.Manager().Queue()
pool = multiprocessing.Pool()
else:
queue = DequeQueue()
pool = SingleProcessPool()
self._validate_task(task)
pool.apply_async(check_complete, [task, queue])
# we track queue size ourselves because len(queue) won't work for multiprocessing
queue_size = 1
try:
seen = set([task.task_id])
while queue_size:
current = queue.get()
queue_size -= 1
item, is_complete = current
for next in self._add(item, is_complete):
if next.task_id not in seen:
self._validate_task(next)
seen.add(next.task_id)
pool.apply_async(check_complete, [next, queue])
queue_size += 1
except (KeyboardInterrupt, TaskException):
raise
except Exception as ex:
self.add_succeeded = False
formatted_traceback = traceback.format_exc()
self._log_unexpected_error(task)
task.trigger_event(Event.BROKEN_TASK, task, ex)
self._email_unexpected_error(task, formatted_traceback)
raise
finally:
pool.close()
pool.join()
return self.add_succeeded
def _add_task_batcher(self, task):
family = task.task_family
if family not in self._batch_families_sent:
task_class = type(task)
batch_param_names = task_class.batch_param_names()
if batch_param_names:
self._scheduler.add_task_batcher(
worker=self._id,
task_family=family,
batched_args=batch_param_names,
max_batch_size=task.max_batch_size,
)
self._batch_families_sent.add(family)
def _add(self, task, is_complete):
if self._config.task_limit is not None and len(self._scheduled_tasks) >= self._config.task_limit:
logger.warning('Will not run %s or any dependencies due to exceeded task-limit of %d', task, self._config.task_limit)
deps = None
status = UNKNOWN
runnable = False
else:
formatted_traceback = None
try:
self._check_complete_value(is_complete)
except KeyboardInterrupt:
raise
except AsyncCompletionException as ex:
formatted_traceback = ex.trace
except BaseException:
formatted_traceback = traceback.format_exc()
if formatted_traceback is not None:
self.add_succeeded = False
self._log_complete_error(task, formatted_traceback)
task.trigger_event(Event.DEPENDENCY_MISSING, task)
self._email_complete_error(task, formatted_traceback)
deps = None
status = UNKNOWN
runnable = False
elif is_complete:
deps = None
status = DONE
runnable = False
task.trigger_event(Event.DEPENDENCY_PRESENT, task)
elif _is_external(task):
deps = None
status = PENDING
runnable = worker().retry_external_tasks
task.trigger_event(Event.DEPENDENCY_MISSING, task)
logger.warning('Data for %s does not exist (yet?). The task is an '
'external data depedency, so it can not be run from'
' this luigi process.', task)
else:
try:
deps = task.deps()
self._add_task_batcher(task)
except Exception as ex:
formatted_traceback = traceback.format_exc()
self.add_succeeded = False
self._log_dependency_error(task, formatted_traceback)
task.trigger_event(Event.BROKEN_TASK, task, ex)
self._email_dependency_error(task, formatted_traceback)
deps = None
status = UNKNOWN
runnable = False
else:
status = PENDING
runnable = True
if task.disabled:
status = DISABLED
if deps:
for d in deps:
self._validate_dependency(d)
task.trigger_event(Event.DEPENDENCY_DISCOVERED, task, d)
yield d # return additional tasks to add
deps = [d.task_id for d in deps]
self._scheduled_tasks[task.task_id] = task
self._add_task(
worker=self._id,
task_id=task.task_id,
status=status,
deps=deps,
runnable=runnable,
priority=task.priority,
resources=task.process_resources(),
params=task.to_str_params(),
family=task.task_family,
module=task.task_module,
batchable=task.batchable,
retry_policy_dict=_get_retry_policy_dict(task),
)
def _validate_dependency(self, dependency):
if isinstance(dependency, Target):
raise Exception('requires() can not return Target objects. Wrap it in an ExternalTask class')
elif not isinstance(dependency, Task):
raise Exception('requires() must return Task objects')
def _check_complete_value(self, is_complete):
if is_complete not in (True, False):
if isinstance(is_complete, TracebackWrapper):
raise AsyncCompletionException(is_complete.trace)
raise Exception("Return value of Task.complete() must be boolean (was %r)" % is_complete)
def _add_worker(self):
self._worker_info.append(('first_task', self._first_task))
self._scheduler.add_worker(self._id, self._worker_info)
def _log_remote_tasks(self, get_work_response):
logger.debug("Done")
logger.debug("There are no more tasks to run at this time")
if get_work_response.running_tasks:
for r in get_work_response.running_tasks:
logger.debug('%s is currently run by worker %s', r['task_id'], r['worker'])
elif get_work_response.n_pending_tasks:
logger.debug(
"There are %s pending tasks possibly being run by other workers",
get_work_response.n_pending_tasks)
if get_work_response.n_unique_pending:
logger.debug(
"There are %i pending tasks unique to this worker",
get_work_response.n_unique_pending)
if get_work_response.n_pending_last_scheduled:
logger.debug(
"There are %i pending tasks last scheduled by this worker",
get_work_response.n_pending_last_scheduled)
def _get_work_task_id(self, get_work_response):
if get_work_response.get('task_id') is not None:
return get_work_response['task_id']
elif 'batch_id' in get_work_response:
try:
task = load_task(
module=get_work_response.get('task_module'),
task_name=get_work_response['task_family'],
params_str=get_work_response['task_params'],
)
except Exception as ex:
self._handle_task_load_error(ex, get_work_response['batch_task_ids'])
self.run_succeeded = False
return None
self._scheduler.add_task(
worker=self._id,
task_id=task.task_id,
module=get_work_response.get('task_module'),
family=get_work_response['task_family'],
params=task.to_str_params(),
status=RUNNING,
batch_id=get_work_response['batch_id'],
)
return task.task_id
else:
return None
def _get_work(self):
if self._stop_requesting_work:
return GetWorkResponse(None, 0, 0, 0, 0, WORKER_STATE_DISABLED)
if self.worker_processes > 0:
logger.debug("Asking scheduler for work...")
r = self._scheduler.get_work(
worker=self._id,
host=self.host,
assistant=self._assistant,
current_tasks=list(self._running_tasks.keys()),
)
else:
logger.debug("Checking if tasks are still pending")
r = self._scheduler.count_pending(worker=self._id)
running_tasks = r['running_tasks']
task_id = self._get_work_task_id(r)
self._get_work_response_history.append({
'task_id': task_id,
'running_tasks': running_tasks,
})
if task_id is not None and task_id not in self._scheduled_tasks:
logger.info('Did not schedule %s, will load it dynamically', task_id)
try:
# TODO: we should obtain the module name from the server!
self._scheduled_tasks[task_id] = \
load_task(module=r.get('task_module'),
task_name=r['task_family'],
params_str=r['task_params'])
except TaskClassException as ex:
self._handle_task_load_error(ex, [task_id])
task_id = None
self.run_succeeded = False
if task_id is not None and 'batch_task_ids' in r:
batch_tasks = filter(None, [
self._scheduled_tasks.get(batch_id) for batch_id in r['batch_task_ids']])
self._batch_running_tasks[task_id] = batch_tasks
return GetWorkResponse(
task_id=task_id,
running_tasks=running_tasks,
n_pending_tasks=r['n_pending_tasks'],
n_unique_pending=r['n_unique_pending'],
# TODO: For a tiny amount of time (a month?) we'll keep forwards compatibility
# That is you can user a newer client than server (Sep 2016)
n_pending_last_scheduled=r.get('n_pending_last_scheduled', 0),
worker_state=r.get('worker_state', WORKER_STATE_ACTIVE),
)
def _run_task(self, task_id):
task = self._scheduled_tasks[task_id]
task_process = self._create_task_process(task)
self._running_tasks[task_id] = task_process
if task_process.use_multiprocessing:
with fork_lock:
task_process.start()
else:
# Run in the same process
task_process.run()
def _create_task_process(self, task):
def update_tracking_url(tracking_url):
self._scheduler.add_task(
task_id=task.task_id,
worker=self._id,
status=RUNNING,
tracking_url=tracking_url,
)
def update_status_message(message):
self._scheduler.set_task_status_message(task.task_id, message)
return TaskProcess(
task, self._id, self._task_result_queue, update_tracking_url, update_status_message,
use_multiprocessing=bool(self.worker_processes > 1),
worker_timeout=self._config.timeout
)
def _purge_children(self):
"""
Find dead children and put a response on the result queue.
:return:
"""
for task_id, p in six.iteritems(self._running_tasks):
if not p.is_alive() and p.exitcode:
error_msg = 'Task {} died unexpectedly with exit code {}'.format(task_id, p.exitcode)
p.task.trigger_event(Event.PROCESS_FAILURE, p.task, error_msg)
elif p.timeout_time is not None and time.time() > float(p.timeout_time) and p.is_alive():
p.terminate()
error_msg = 'Task {} timed out after {} seconds and was terminated.'.format(task_id, p.task.worker_timeout)
p.task.trigger_event(Event.TIMEOUT, p.task, error_msg)
else:
continue
logger.info(error_msg)
self._task_result_queue.put((task_id, FAILED, error_msg, [], []))
def _handle_next_task(self):
"""
We have to catch three ways a task can be "done":
1. normal execution: the task runs/fails and puts a result back on the queue,
2. new dependencies: the task yielded new deps that were not complete and
will be rescheduled and dependencies added,
3. child process dies: we need to catch this separately.
"""
while True:
self._purge_children() # Deal with subprocess failures
try:
task_id, status, expl, missing, new_requirements = (
self._task_result_queue.get(
timeout=self._config.wait_interval))
except Queue.Empty:
return
task = self._scheduled_tasks[task_id]
if not task or task_id not in self._running_tasks:
continue
# Not a running task. Probably already removed.
# Maybe it yielded something?
# external task if run not implemented, retry-able if config option is enabled.
external_task_retryable = _is_external(task) and self._config.retry_external_tasks
if status == FAILED and not external_task_retryable:
self._email_task_failure(task, expl)
new_deps = []
if new_requirements:
new_req = [load_task(module, name, params)
for module, name, params in new_requirements]
for t in new_req:
self.add(t)
new_deps = [t.task_id for t in new_req]
self._add_task(worker=self._id,
task_id=task_id,
status=status,
expl=json.dumps(expl),
resources=task.process_resources(),
runnable=None,
params=task.to_str_params(),
family=task.task_family,
module=task.task_module,
new_deps=new_deps,
assistant=self._assistant)
self._running_tasks.pop(task_id)
# re-add task to reschedule missing dependencies
if missing:
reschedule = True
# keep out of infinite loops by not rescheduling too many times
for task_id in missing:
self.unfulfilled_counts[task_id] += 1
if (self.unfulfilled_counts[task_id] >
self._config.max_reschedules):
reschedule = False
if reschedule:
self.add(task)
self.run_succeeded &= (status == DONE) or (len(new_deps) > 0)
return
def _sleeper(self):
# TODO is exponential backoff necessary?
while True:
jitter = self._config.wait_jitter
wait_interval = self._config.wait_interval + random.uniform(0, jitter)
logger.debug('Sleeping for %f seconds', wait_interval)
time.sleep(wait_interval)
yield
def _keep_alive(self, get_work_response):
"""
Returns true if a worker should stay alive given.
If worker-keep-alive is not set, this will always return false.
For an assistant, it will always return the value of worker-keep-alive.
Otherwise, it will return true for nonzero n_pending_tasks.
If worker-count-uniques is true, it will also
require that one of the tasks is unique to this worker.
"""
if not self._config.keep_alive:
return False
elif self._assistant:
return True
elif self._config.count_last_scheduled:
return get_work_response.n_pending_last_scheduled > 0
elif self._config.count_uniques:
return get_work_response.n_unique_pending > 0
else:
return get_work_response.n_pending_tasks > 0
def handle_interrupt(self, signum, _):
"""
Stops the assistant from asking for more work on SIGUSR1
"""
if signum == signal.SIGUSR1:
self._start_phasing_out()
def _start_phasing_out(self):
"""
Go into a mode where we dont ask for more work and quit once existing
tasks are done.
"""
self._config.keep_alive = False
self._stop_requesting_work = True
def run(self):
"""
Returns True if all scheduled tasks were executed successfully.
"""
logger.info('Running Worker with %d processes', self.worker_processes)
sleeper = self._sleeper()
self.run_succeeded = True
self._add_worker()
while True:
while len(self._running_tasks) >= self.worker_processes > 0:
logger.debug('%d running tasks, waiting for next task to finish', len(self._running_tasks))
self._handle_next_task()
get_work_response = self._get_work()
if get_work_response.worker_state == WORKER_STATE_DISABLED:
self._start_phasing_out()
if get_work_response.task_id is None:
if not self._stop_requesting_work:
self._log_remote_tasks(get_work_response)
if len(self._running_tasks) == 0:
if self._keep_alive(get_work_response):
six.next(sleeper)
continue
else:
break
else:
self._handle_next_task()
continue
# task_id is not None:
logger.debug("Pending tasks: %s", get_work_response.n_pending_tasks)
self._run_task(get_work_response.task_id)
while len(self._running_tasks):
logger.debug('Shut down Worker, %d more tasks to go', len(self._running_tasks))
self._handle_next_task()
return self.run_succeeded
| |
# -*- coding: utf-8 -*-
#
# This document is free and open-source software, subject to the OSI-approved
# BSD license below.
#
# Copyright (c) 2013 - 2015 by Alexis Petrounias <www.petrounias.org>,
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the author nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""
__status__ = "beta"
__version__ = "1.0.0b1"
__maintainer__ = (u"Alexis Petrounias <www.petrounias.org>", )
__author__ = (u"Alexis Petrounias <www.petrounias.org>", )
# Python
import copy
import datetime
# JSON Document
from json_document.document import Document, DocumentFragment
# JSON Schema Validator
from json_schema_validator.extensions import datetime_extension, \
timedelta_extension
class FragmentProxy(object):
"""
"""
def __init__(self, fragment):
super(FragmentProxy, self).__init__()
self.__getattribute__('__dict__')['__fragment'] = fragment
@property
def _fragment(self) :
return self.__getattribute__('__dict__')['__fragment']
def __getattr__(self, item):
return self[item]
def __setattr__(self, key, value):
self[key] = value
def __delattr__(self, item):
del self[item]
def __getitem__(self, item):
fragment = self._fragment[item]
if fragment.schema.type in [ 'object', 'array', ]:
return FragmentProxy(fragment)
return fragment.value
def __setitem__(self, key, value):
self._fragment[key] = value
def __delitem__(self, key):
_value = copy.copy(self._fragment.value)
del _value[key]
self._fragment.value = _value
class JSONDocument(Document):
"""
"""
def __init__(self, value, validator = None):
# set _fields before __getattribute__ is processed
self._fields = {}
super(JSONDocument, self).__init__(value, self._generate_schema(),
validator = validator)
def _generate_schema(self):
base = {
'type' : 'object',
'title' : self.Meta.title,
'description' : self.Meta.description,
'properties' : {},
}
for name, field in self.__class__.__dict__.items():
if isinstance(field, JSONDocumentField):
base['properties'][name] = field._generate_schema()
self._fields[name] = field
return base
def __getattribute__(self, name):
_fields = Document.__getattribute__(self, '_fields')
if name in _fields:
if isinstance(_fields[name], JSONObjectField) or \
isinstance(_fields[name], JSONListField):
return FragmentProxy(self[name])
return self[name].value
return Document.__getattribute__(self, name)
def __setattr__(self, key, value):
if hasattr(self, '_fields') and key in self._fields:
self[key] = value
else:
return Document.__setattr__(self, key, value)
def __delattr__(self, item):
if hasattr(self, '_fields') and item in self._fields:
del self[item]
else:
return Document.__delattr__(self, item)
def __delitem__(self, key):
# in order to bump the document revision, we must ensure
# Document._set_value is invoked.
_value = copy.copy(self.value)
del _value[key]
self.value = _value
class JSONDocumentFragment(DocumentFragment):
"""
"""
def _get_value(self):
return super(JSONDocumentFragment, self)._get_value()
def _set_value(self, new_value):
if isinstance(new_value, datetime.datetime):
new_value = datetime_extension.to_json(new_value)
if isinstance(new_value, datetime.timedelta):
new_value = timedelta_extension.to_json(new_value)
super(JSONDocumentFragment, self)._set_value(new_value)
value = property(_get_value, _set_value)
class JSONDocumentField(object):
"""
"""
TYPE = 'any'
def __init__(self, title = None, description = None, default = None,
optional = False, null = False, pattern = None, content = None,
enum = None, implementation = None):
super(JSONDocumentField, self).__init__()
self.title = title
self.description = description
self.default = default
self.optional = optional
self.null = null
self.pattern = pattern if pattern is not None else ''
self.content = content
self.enum = enum
self.implementation = implementation or JSONDocumentFragment
def _generate_schema(self):
SCHEMA = {
'type' : self.TYPE if not self.null else [ self.TYPE, 'null', ],
'title' : self.title,
'description' : self.description,
'default' : self.default,
'optional' : self.optional,
'null' : self.null,
'pattern' : self.pattern,
'properties' : {},
'__field' : self,
'__fragment_cls' : self.implementation,
}
if self.enum is not None:
SCHEMA['enum']=self.enum
return SCHEMA
class JSONBooleanField(JSONDocumentField):
"""
"""
TYPE = 'boolean'
def __init__(self, title = None, description = None, default = None,
optional = False, null = False, pattern = None, content = None,
enum = None, implementation = None):
super(JSONBooleanField, self).__init__(title = title,
description = description, default = default, optional = optional,
null = null, pattern = pattern, content = content, enum = enum,
implementation = implementation)
class JSONIntegerField(JSONDocumentField):
"""
"""
TYPE = 'integer'
def __init__(self, title = None, description = None, default = None,
optional = False, null = False, pattern = None, content = None,
enum = None, implementation = None, max_value = None, min_value = None):
super(JSONIntegerField, self).__init__(title = title,
description = description, default = default, optional = optional,
null = null, pattern = pattern, content = content, enum = enum,
implementation = implementation)
self.minimum = min_value
self.maximum = max_value
def _generate_schema(self):
schema = super(JSONIntegerField, self)._generate_schema()
if self.minimum is not None: schema['minimum'] = self.minimum
if self.maximum is not None: schema['maximum'] = self.maximum
return schema
class JSONDecimalField(JSONDocumentField):
"""
"""
TYPE = 'number'
def __init__(self, title = None, description = None, default = None,
optional = False, null = False, pattern = None, content = None,
enum = None, implementation = None, max_value = None, min_value = None):
super(JSONDecimalField, self).__init__(title = title,
description = description, default = default, optional = optional,
null = null, pattern = pattern, content = content, enum = enum,
implementation = implementation)
self.minimum = min_value
self.maximum = max_value
def _generate_schema(self):
schema = super(JSONDecimalField, self)._generate_schema()
if self.minimum is not None: schema['minimum'] = self.minimum
if self.maximum is not None: schema['maximum'] = self.maximum
return schema
class JSONStringField(JSONDocumentField):
"""
"""
TYPE = 'string'
def __init__(self, title = None, description = None, default = None,
optional = False, null = False, pattern = None, content = None,
enum = None, implementation = None, min_length = None,
max_length = None):
super(JSONStringField, self).__init__(title = title,
description = description, default = default, optional = optional,
null = null, pattern = pattern, content = content, enum = enum,
implementation = implementation)
self.minLength = min_length
self.maxLength = max_length
def _generate_schema(self):
schema = super(JSONStringField, self)._generate_schema()
if self.minLength is not None: schema['minLength'] = self.minLength
if self.maxLength is not None: schema['maxLength'] = self.maxLength
return schema
class JSONDateTimeField(JSONDocumentField):
"""
"""
TYPE = 'string'
def __init__(self, title = None, description = None, default = None,
optional = False, null = False, pattern = None, content = None,
enum = None, implementation = None):
super(JSONDateTimeField, self).__init__(title = title,
description = description, default = default, optional = optional,
null = null, pattern = pattern, content = content, enum = enum,
implementation = implementation)
def _generate_schema(self):
schema = super(JSONDateTimeField, self)._generate_schema()
schema['format'] = 'date-time'
return schema
class JSONDateField(JSONDocumentField):
"""
"""
TYPE = 'string'
# PATTERN matching the following date formats:
# - YYYY-MM-DD
# - MM/DD/YYYY
# - MM/DD/YY
PATTERN = (r"^(19|20)\d\d[-](0[1-9]|1[012])[-](0[1-9]|[12][0-9]|3[01])$|"
"^(0[1-9]|1[012])[/](0[1-9]|[12][0-9]|3[01])[/](19|20)\d\d$|"
"^(0[1-9]|1[012])[/](0[1-9]|[12][0-9]|3[01])[/]\d\d$")
def __init__(self, title = None, description = None, default = None,
optional = False, null = False, pattern = None, content = None,
enum = None, implementation = None):
super(JSONDateField, self).__init__(title = title,
description = description, default = default, optional = optional,
null = null, pattern = pattern or self.PATTERN, content = content,
enum = enum, implementation = implementation)
class JSONTimeField(JSONDocumentField):
"""
"""
TYPE = 'string'
# PATTERN matching the following time formats:
# - HH:MM:SS
# - HH:MM
PATTERN = (r"^([0-1]?[0-9]|[2][0-3]):([0-5][0-9])$|"
"^([0-1]?[0-9]|[2][0-3]):([0-5][0-9]):([0-5][0-9])$")
def __init__(self, title = None, description = None, default = None,
optional = False, null = False, pattern = None, content = None,
enum = None, implementation = None):
super(JSONTimeField, self).__init__(title = title,
description = description, default = default, optional = optional,
null = null, pattern = pattern or self.PATTERN, content = content,
enum = enum, implementation = implementation)
class JSONTimeDeltaField(JSONDocumentField):
"""
"""
TYPE = 'string'
PATTERN = r"^(\d+)d (\d+)s (\d+)us$"
def __init__(self, title = None, description = None, default = None,
optional = False, null = False, pattern = None, content = None,
enum = None, implementation = None):
super(JSONTimeDeltaField, self).__init__(title = title,
description = description, default = default, optional = optional,
null = null, pattern = pattern or self.PATTERN, content = content,
enum = enum, implementation = implementation)
class JSONObjectField(JSONDocumentField):
"""
"""
TYPE = 'object'
def __init__(self, title = None, description = None, default = None,
optional = False, null = False, pattern = None, content = None,
enum = None, implementation = None):
super(JSONObjectField, self).__init__(title = title,
description = description, default = default, optional = optional,
null = null, pattern = pattern, content = content, enum = enum,
implementation = implementation)
def _generate_schema(self):
schema = super(JSONObjectField, self)._generate_schema()
if not self.content is None:
for key, value in self.content.items():
schema['properties'][key] = value._generate_schema()
return schema
class JSONListField(JSONDocumentField):
"""
"""
TYPE = 'array'
def __init__(self, title = None, description = None, default = None,
optional = False, null = False, pattern = None, content = None,
enum = None, implementation = None):
super(JSONListField, self).__init__(title = title,
description = description, default = default, optional = optional,
null = null, pattern = pattern, content = content, enum = enum,
implementation = implementation)
def _generate_schema(self):
schema = super(JSONListField, self)._generate_schema()
if not self.content is None:
schema['items'] = [value._generate_schema() for value in
self.content]
return schema
class JSONEmailField(JSONStringField):
"""
"""
def __init__(self, title = None, description = None, default = None,
optional = False, null = False, pattern = None, content = None,
enum = None, implementation = None, min_length = None,
max_length = None):
super(JSONEmailField, self).__init__(title = title,
description = description, default = default, optional = optional,
null = null, pattern = pattern, content = content, enum = enum,
implementation = implementation, min_length = min_length,
max_length = max_length)
def _generate_schema(self):
schema = super(JSONEmailField, self)._generate_schema()
schema['format'] = 'email'
return schema
class JSONIPAddressField(JSONStringField):
"""
"""
DEFAULT_PROTOCOL = 'ipv4'
def __init__(self, title = None, description = None, default = None,
optional = False, null = False, pattern = None, content = None,
enum = None, protocol=None, implementation = None):
super(JSONIPAddressField, self).__init__(title = title,
description = description, default = default, optional = optional,
null = null, pattern = pattern, content = content, enum = enum,
implementation = implementation)
self.protocol = protocol if protocol is not None else \
self.DEFAULT_PROTOCOL
def _generate_schema(self):
schema = super(JSONIPAddressField, self)._generate_schema()
schema['format'] = self.protocol
return schema
class JSONSlugField(JSONStringField):
"""
"""
PATTERN = r"^[a-z0-9-]+$"
def __init__(self, title = None, description = None, default = None,
optional = False, null = False, pattern = None, content = None,
enum = None, implementation = None, min_length = None, max_length = None):
super(JSONSlugField, self).__init__(title = title,
description = description, default = default, optional = optional,
null = null, pattern = pattern or self.PATTERN, content = content,
enum = enum, implementation = implementation, min_length = min_length,
max_length = max_length)
class JSONURLField(JSONStringField):
"""
"""
# Pattern based on django's URLValidator regex pattern
PATTERN = (r"^(http|ftp)s?://(([A-Za-z0-9]([A-Za-z0-9-]{0,61}[A-Za-z0-9])?\.)"
"+([A-Za-z]{2,6}\.?|[A-Za-z0-9-]{2,}\.?)|localhost|\d{1,3}\."
"\d{1,3}\.\d{1,3}\.\d{1,3}|\[?[a-fA-F0-9]*:[A-Fa-f0-9:]+\]?)"
"(:\d+)?(/?|[/?]\S+)$")
def __init__(self, title = None, description = None, default = None,
optional = False, null = False, pattern = None, content = None,
enum = None, implementation = None, min_length = None, max_length = None):
super(JSONURLField, self).__init__(title = title,
description = description, default = default, optional = optional,
null = null, pattern = pattern or self.PATTERN, content = content,
enum = enum, implementation = implementation,
min_length = min_length, max_length = max_length)
| |
from collections import OrderedDict
from decimal import Decimal
from typing import Any, Dict, List
from django.conf import settings
from django.contrib.contenttypes.fields import GenericRelation
from django.core.validators import MinValueValidator
from django.db import models
from openslides.agenda.models import Item, Speaker
from openslides.core.config import config
from openslides.core.models import Tag
from openslides.poll.models import (
BaseOption,
BasePoll,
BaseVote,
CollectDefaultVotesMixin,
PublishPollMixin,
)
from openslides.utils.autoupdate import inform_changed_data
from openslides.utils.exceptions import OpenSlidesError
from openslides.utils.models import RESTModelMixin
from .access_permissions import AssignmentAccessPermissions
class AssignmentRelatedUser(RESTModelMixin, models.Model):
"""
Many to Many table between an assignment and user.
"""
assignment = models.ForeignKey(
"Assignment", on_delete=models.CASCADE, related_name="assignment_related_users"
)
"""
ForeinKey to the assignment.
"""
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
"""
ForeinKey to the user who is related to the assignment.
"""
elected = models.BooleanField(default=False)
"""
Saves the election state of each user
"""
weight = models.IntegerField(default=0)
"""
The sort order of the candidates.
"""
class Meta:
default_permissions = ()
unique_together = ("assignment", "user")
def __str__(self):
return f"{self.assignment} <-> {self.user}"
def get_root_rest_element(self):
"""
Returns the assignment to this instance which is the root REST element.
"""
return self.assignment
class AssignmentManager(models.Manager):
"""
Customized model manager to support our get_full_queryset method.
"""
def get_full_queryset(self):
"""
Returns the normal queryset with all assignments. In the background
all related users (candidates), the related agenda item and all
polls are prefetched from the database.
"""
return self.get_queryset().prefetch_related(
"related_users", "agenda_items", "polls", "tags"
)
class Assignment(RESTModelMixin, models.Model):
"""
Model for assignments.
"""
access_permissions = AssignmentAccessPermissions()
can_see_permission = "assignments.can_see"
objects = AssignmentManager()
PHASE_SEARCH = 0
PHASE_VOTING = 1
PHASE_FINISHED = 2
PHASES = (
(PHASE_SEARCH, "Searching for candidates"),
(PHASE_VOTING, "Voting"),
(PHASE_FINISHED, "Finished"),
)
title = models.CharField(max_length=100)
"""
Title of the assignment.
"""
description = models.TextField(blank=True)
"""
Text to describe the assignment.
"""
open_posts = models.PositiveSmallIntegerField()
"""
The number of members to be elected.
"""
poll_description_default = models.CharField(max_length=79, blank=True)
"""
Default text for the poll description.
"""
phase = models.IntegerField(choices=PHASES, default=PHASE_SEARCH)
"""
Phase in which the assignment is.
"""
related_users = models.ManyToManyField(
settings.AUTH_USER_MODEL, through="AssignmentRelatedUser"
)
"""
Users that are candidates or elected.
See AssignmentRelatedUser for more information.
"""
tags = models.ManyToManyField(Tag, blank=True)
"""
Tags for the assignment.
"""
# In theory there could be one then more agenda_item. But we support only
# one. See the property agenda_item.
agenda_items = GenericRelation(Item, related_name="assignments")
class Meta:
default_permissions = ()
permissions = (
("can_see", "Can see elections"),
("can_nominate_other", "Can nominate another participant"),
("can_nominate_self", "Can nominate oneself"),
("can_manage", "Can manage elections"),
)
ordering = ("title",)
verbose_name = "Election"
def __str__(self):
return self.title
@property
def candidates(self):
"""
Queryset that represents the candidates for the assignment.
"""
return self.related_users.filter(assignmentrelateduser__elected=False)
@property
def elected(self):
"""
Queryset that represents all elected users for the assignment.
"""
return self.related_users.filter(assignmentrelateduser__elected=True)
def is_candidate(self, user):
"""
Returns True if user is a candidate.
Costs one database query.
"""
return self.candidates.filter(pk=user.pk).exists()
def is_elected(self, user):
"""
Returns True if the user is elected for this assignment.
Costs one database query.
"""
return self.elected.filter(pk=user.pk).exists()
def set_candidate(self, user):
"""
Adds the user as candidate.
"""
weight = (
self.assignment_related_users.aggregate(models.Max("weight"))["weight__max"]
or 0
)
defaults = {"elected": False, "weight": weight + 1}
self.assignment_related_users.update_or_create(user=user, defaults=defaults)
def set_elected(self, user):
"""
Makes user an elected user for this assignment.
"""
self.assignment_related_users.update_or_create(
user=user, defaults={"elected": True}
)
def delete_related_user(self, user):
"""
Delete the connection from the assignment to the user.
"""
self.assignment_related_users.filter(user=user).delete()
inform_changed_data(self)
def set_phase(self, phase):
"""
Sets the phase attribute of the assignment.
Raises a ValueError if the phase is not valide.
"""
if phase not in dict(self.PHASES):
raise ValueError(f"Invalid phase {phase}")
self.phase = phase
def create_poll(self):
"""
Creates a new poll for the assignment and adds all candidates to all
lists of speakers of related agenda items.
"""
candidates = self.candidates.all()
# Find out the method of the election
if config["assignments_poll_vote_values"] == "votes":
pollmethod = "votes"
elif config["assignments_poll_vote_values"] == "yesnoabstain":
pollmethod = "yna"
elif config["assignments_poll_vote_values"] == "yesno":
pollmethod = "yn"
else:
# config['assignments_poll_vote_values'] == 'auto'
# candidates <= available posts -> yes/no/abstain
if len(candidates) <= (self.open_posts - self.elected.count()):
pollmethod = "yna"
else:
pollmethod = "votes"
# Create the poll with the candidates.
poll = self.polls.create(
description=self.poll_description_default, pollmethod=pollmethod
)
options = []
related_users = AssignmentRelatedUser.objects.filter(
assignment__id=self.id
).exclude(elected=True)
for related_user in related_users:
options.append(
{"candidate": related_user.user, "weight": related_user.weight}
)
poll.set_options(options, skip_autoupdate=True)
inform_changed_data(self)
# Add all candidates to list of speakers of related agenda item
# TODO: Try to do this in a bulk create
if config["assignments_add_candidates_to_list_of_speakers"]:
for candidate in self.candidates:
try:
Speaker.objects.add(
candidate, self.agenda_item, skip_autoupdate=True
)
except OpenSlidesError:
# The Speaker is already on the list. Do nothing.
# TODO: Find a smart way not to catch the error concerning AnonymousUser.
pass
inform_changed_data(self.agenda_item)
return poll
def vote_results(self, only_published):
"""
Returns a table represented as a list with all candidates from all
related polls and their vote results.
"""
vote_results_dict: Dict[Any, List[AssignmentVote]] = OrderedDict()
polls = self.polls.all()
if only_published:
polls = polls.filter(published=True)
# All PollOption-Objects related to this assignment
options: List[AssignmentOption] = []
for poll in polls:
options += poll.get_options()
for option in options:
candidate = option.candidate
if candidate in vote_results_dict:
continue
vote_results_dict[candidate] = []
for poll in polls:
votes: Any = {}
try:
# candidate related to this poll
poll_option = poll.get_options().get(candidate=candidate)
for vote in poll_option.get_votes():
votes[vote.value] = vote.print_weight()
except AssignmentOption.DoesNotExist:
# candidate not in related to this poll
votes = None
vote_results_dict[candidate].append(votes)
return vote_results_dict
"""
Container for runtime information for agenda app (on create or update of this instance).
"""
agenda_item_update_information: Dict[str, Any] = {}
def get_agenda_title(self):
"""
Returns the title for the agenda.
"""
return str(self)
def get_agenda_title_with_type(self):
"""
Return a title for the agenda with the appended assignment verbose name.
Note: It has to be the same return value like in JavaScript.
"""
return f"{self.get_agenda_title()} (self._meta.verbose_name)"
@property
def agenda_item(self):
"""
Returns the related agenda item.
"""
# We support only one agenda item so just return the first element of
# the queryset.
return self.agenda_items.all()[0]
@property
def agenda_item_id(self):
"""
Returns the id of the agenda item object related to this object.
"""
return self.agenda_item.pk
class AssignmentVote(RESTModelMixin, BaseVote):
option = models.ForeignKey(
"AssignmentOption", on_delete=models.CASCADE, related_name="votes"
)
class Meta:
default_permissions = ()
def get_root_rest_element(self):
"""
Returns the assignment to this instance which is the root REST element.
"""
return self.option.poll.assignment
class AssignmentOption(RESTModelMixin, BaseOption):
poll = models.ForeignKey(
"AssignmentPoll", on_delete=models.CASCADE, related_name="options"
)
candidate = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
weight = models.IntegerField(default=0)
vote_class = AssignmentVote
class Meta:
default_permissions = ()
def __str__(self):
return str(self.candidate)
def get_root_rest_element(self):
"""
Returns the assignment to this instance which is the root REST element.
"""
return self.poll.assignment
# TODO: remove the type-ignoring in the next line, after this is solved:
# https://github.com/python/mypy/issues/3855
class AssignmentPoll( # type: ignore
RESTModelMixin, CollectDefaultVotesMixin, PublishPollMixin, BasePoll
):
option_class = AssignmentOption
assignment = models.ForeignKey(
Assignment, on_delete=models.CASCADE, related_name="polls"
)
pollmethod = models.CharField(max_length=5, default="yna")
description = models.CharField(max_length=79, blank=True)
votesabstain = models.DecimalField(
null=True,
blank=True,
validators=[MinValueValidator(Decimal("-2"))],
max_digits=15,
decimal_places=6,
)
""" General abstain votes, used for pollmethod 'votes' """
votesno = models.DecimalField(
null=True,
blank=True,
validators=[MinValueValidator(Decimal("-2"))],
max_digits=15,
decimal_places=6,
)
""" General no votes, used for pollmethod 'votes' """
class Meta:
default_permissions = ()
def get_assignment(self):
return self.assignment
def get_vote_values(self):
if self.pollmethod == "yna":
return ["Yes", "No", "Abstain"]
if self.pollmethod == "yn":
return ["Yes", "No"]
return ["Votes"]
def get_ballot(self):
return self.assignment.polls.filter(id__lte=self.pk).count()
def get_percent_base_choice(self):
return config["assignments_poll_100_percent_base"]
def get_root_rest_element(self):
"""
Returns the assignment to this instance which is the root REST element.
"""
return self.assignment
| |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import tensorflow as tf
import numpy as np
import time
import os
import scipy
from scipy import misc
# from vgg import vgg19
FLAGS = None
IMAGENET_MEAN = 117.0
STYLE_LAYERS = ('relu1_1', 'relu2_1', 'relu3_1', 'relu4_1', 'relu5_1')
def load_vgg19():
model = vgg19.Vgg19(FLAGS.model_path)
t_input = tf.get_variable(
name='input', \
shape=[1, FLAGS.img_height, FLAGS.img_width, 3], \
initializer=tf.random_normal_initializer())
model.build(t_input / 255.0)
return t_input
def load_vgg19_v2():
from neural_style import vgg
# model = vgg19.Vgg19(FLAGS.model_path)
t_input = tf.get_variable(
name='input', \
shape=[1, FLAGS.img_height, FLAGS.img_width, 3], \
initializer=tf.random_normal_initializer())
red, green, blue = tf.split(3, 3, t_input)
print(red.get_shape().as_list(), green.get_shape().as_list(), blue.get_shape().as_list())
normalized_input = tf.concat(3, [red - 123.68, green - 116.779, blue - 103.939])
vgg.net('imagenet-vgg-verydeep-19.mat', normalized_input)
return t_input
def print_graph_node_names(graph):
'''For printing out graph structure'''
for node in graph.as_graph_def().node:
# if node.op == 'Const':
print(node.name, node.op)
def T(graph, layer):
'''Helper for getting layer output tensor'''
return graph.get_tensor_by_name("%s:0" % layer)
def imread(path):
img = scipy.misc.imread(path).astype(np.float)
img = misc.imresize(img, size=(FLAGS.img_height, FLAGS.img_width, 3)).astype(np.float)
if len(img.shape) == 2:
img = np.dstack((img,img,img))
img = np.expand_dims(img, 0)
return img
def get_activation():
with tf.Graph().as_default() as graph:
session = tf.InteractiveSession(graph=graph)
X = load_vgg19_v2()
style_image = imread(FLAGS.style_image)
style_layers = [
# VGG19 layers
'Relu',
'Relu_2',
'Relu_4',
'Relu_8',
'Relu_12',
]
layer_style_loss_list = []
session.run(tf.global_variables_initializer())
session.run(X.assign(style_image))
for layer_name in style_layers:
t_layer = T(graph, layer_name)
_, height, width, channel = t_layer.get_shape().as_list()
t_layer_vectorized = tf.reshape(t_layer,
shape=[-1, channel])
size = height * width * channel
t_gram_mat = tf.matmul(t_layer_vectorized, t_layer_vectorized, transpose_a=True) / size
style_activation = session.run(t_gram_mat)
def main():
graph = tf.Graph()
with graph.as_default():
session = tf.InteractiveSession(graph=graph)
# Load pretrained model
X = load_vgg19_v2()
# print_graph_node_names(graph)
content_image = imread(FLAGS.content_image)
style_image = imread(FLAGS.style_image)
style_layers = [
# VGG19 layers
'Relu',
'Relu_2',
'Relu_4',
'Relu_8',
'Relu_12',
# 'BiasAdd', # 'conv1_1/Relu',
# 'BiasAdd_2', # 'conv2_1/Relu',
# 'BiasAdd_4', # 'conv3_1/Relu',
# 'BiasAdd_8', # 'conv4_1/Relu',
# 'BiasAdd_12', # 'conv5_1/Relu',
]
layer_style_loss_list = []
session.run(tf.global_variables_initializer())
session.run(X.assign(style_image))
for layer_name in style_layers:
t_layer = T(graph, layer_name)
_, height, width, channel = t_layer.get_shape().as_list()
t_layer_vectorized = tf.reshape(t_layer,
shape=[-1, channel])
size = height * width * channel
t_gram_mat = tf.matmul(
tf.transpose(t_layer_vectorized),
t_layer_vectorized,
) / size
style_activation = t_gram_mat.eval()
# print(layer_name, style_activation.flatten()[: 20])
t_style_loss = 2 * tf.nn.l2_loss(t_gram_mat - style_activation) / style_activation.size
layer_style_loss_list.append(t_style_loss)
content_layers = [
# VGG19
# 'MaxPool',
# 'pool1',
'Relu_9',
]
for layer_name in content_layers:
print('=================Running layer', layer_name, 'as content layer')
t_layer = T(graph, layer_name)
session.run(X.assign(content_image))
content_activation = session.run(t_layer)
t_content_loss = 2 * tf.nn.l2_loss(t_layer - content_activation) / content_activation.size
for i in [4]: #reversed(range(5)): # reversed([1, 4, 7, 10, 13]):
print('Running', style_layers[: i + 1], 'as style layers')
t_total_loss = FLAGS.alpha * t_content_loss
for t_style_loss in layer_style_loss_list[: i + 1]:
t_total_loss += (1 - FLAGS.alpha) * t_style_loss
grad_op = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate) \
.minimize(t_total_loss, var_list=[X])
# Initialize variables needed by Adam
session.run(tf.global_variables_initializer())
def get_file_save_path(iter_num):
return FLAGS.output_dir + '/%s_syn_%d(%d).jpg' \
% (layer_name.replace('/', '-'), iter_num, i)
def get_model_save_path(iter_num=None):
path = FLAGS.save_dir + '/model-%s_syn(%d).cpkt' \
% (layer_name.replace('/', '-'), i)
if iter_num is not None:
path += '-%d' % iter_num
return path
saver = tf.train.Saver()
print('initial path: ', FLAGS.initial)
if FLAGS.resume_iter > 0:
saver.restore(
session,
get_model_save_path(FLAGS.resume_iter))
else:
if FLAGS.initial:
initial = imread(FLAGS.initial)
else:
initial = np.random.normal(size=(1, FLAGS.img_height, FLAGS.img_width, 3), \
scale=np.std(content_image) * 0.1) * 0.256
# initial = np.random.uniform(size=(1, FLAGS.img_height, FLAGS.img_width, 3))
session.run(X.assign(initial))
print(X.eval().flatten()[: 20])
print('Loss before starting: ', t_total_loss.eval())
def check_point(num_iter):
img = session.run(X).squeeze()
print(img.flatten()[: 20])
misc.imsave(get_file_save_path(num_iter), img)
saver.save(session, get_model_save_path(), global_step=num_iter)
start_iter = 0
if FLAGS.resume_iter > 0:
start_iter = FLAGS.resume_iter + 1
for k in range(start_iter, FLAGS.iter_num + 1):
_, loss, content_loss = session.run([grad_op, t_total_loss, t_content_loss])
# if k % 10 == 0 and k:
print(k, loss, content_loss * FLAGS.alpha, content_loss)
if k % 10 == 0 and k:
check_point(k)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--model_path',
type=str,
default='tensorflow_inception_graph.pb',
help='Path to pretrained model',
)
parser.add_argument(
'--content_image',
type=str,
default='content.jpg',
help='Path for content image',
)
parser.add_argument(
'--style_image',
type=str,
default='style.jpg',
help='Path for style image',
)
parser.add_argument(
'--initial',
type=str,
default='',
help='Initiation for optimization',
)
parser.add_argument(
'--learning_rate',
type=float,
default=1.0,
help='Learning rate',
)
parser.add_argument(
'--alpha',
type=float,
default=0.9,
help='Weight on the content loss',
)
parser.add_argument(
'--iter_num',
type=int,
default=20,
help='Learning iter',
)
parser.add_argument(
'--img_width',
type=int,
default=224,
help='Image width',
)
parser.add_argument(
'--img_height',
type=int,
default=224,
help='Image height',
)
parser.add_argument(
'--resume_iter',
type=int,
default=0,
help='If positive, resume from previous training by loading previous result',
)
parser.add_argument(
'--output_dir',
type=str,
default='syn_images',
help='Output directory for images',
)
parser.add_argument(
'--save_dir',
type=str,
default='models',
help='Output directory for models',
)
FLAGS, _ = parser.parse_known_args()
main()
# get_activation()
| |
from __future__ import division
import numpy as np
import pytest
from pandas import Interval, Timedelta, Timestamp
import pandas.core.common as com
@pytest.fixture
def interval():
return Interval(0, 1)
class TestInterval(object):
def test_properties(self, interval):
assert interval.closed == 'right'
assert interval.left == 0
assert interval.right == 1
assert interval.mid == 0.5
def test_repr(self, interval):
assert repr(interval) == "Interval(0, 1, closed='right')"
assert str(interval) == "(0, 1]"
interval_left = Interval(0, 1, closed='left')
assert repr(interval_left) == "Interval(0, 1, closed='left')"
assert str(interval_left) == "[0, 1)"
def test_contains(self, interval):
assert 0.5 in interval
assert 1 in interval
assert 0 not in interval
msg = "__contains__ not defined for two intervals"
with pytest.raises(TypeError, match=msg):
interval in interval
interval_both = Interval(0, 1, closed='both')
assert 0 in interval_both
assert 1 in interval_both
interval_neither = Interval(0, 1, closed='neither')
assert 0 not in interval_neither
assert 0.5 in interval_neither
assert 1 not in interval_neither
def test_equal(self):
assert Interval(0, 1) == Interval(0, 1, closed='right')
assert Interval(0, 1) != Interval(0, 1, closed='left')
assert Interval(0, 1) != 0
def test_comparison(self):
with pytest.raises(TypeError, match='unorderable types'):
Interval(0, 1) < 2
assert Interval(0, 1) < Interval(1, 2)
assert Interval(0, 1) < Interval(0, 2)
assert Interval(0, 1) < Interval(0.5, 1.5)
assert Interval(0, 1) <= Interval(0, 1)
assert Interval(0, 1) > Interval(-1, 2)
assert Interval(0, 1) >= Interval(0, 1)
def test_hash(self, interval):
# should not raise
hash(interval)
@pytest.mark.parametrize('left, right, expected', [
(0, 5, 5),
(-2, 5.5, 7.5),
(10, 10, 0),
(10, np.inf, np.inf),
(-np.inf, -5, np.inf),
(-np.inf, np.inf, np.inf),
(Timedelta('0 days'), Timedelta('5 days'), Timedelta('5 days')),
(Timedelta('10 days'), Timedelta('10 days'), Timedelta('0 days')),
(Timedelta('1H10M'), Timedelta('5H5M'), Timedelta('3H55M')),
(Timedelta('5S'), Timedelta('1H'), Timedelta('59M55S'))])
def test_length(self, left, right, expected):
# GH 18789
iv = Interval(left, right)
result = iv.length
assert result == expected
@pytest.mark.parametrize('left, right, expected', [
('2017-01-01', '2017-01-06', '5 days'),
('2017-01-01', '2017-01-01 12:00:00', '12 hours'),
('2017-01-01 12:00', '2017-01-01 12:00:00', '0 days'),
('2017-01-01 12:01', '2017-01-05 17:31:00', '4 days 5 hours 30 min')])
@pytest.mark.parametrize('tz', (None, 'UTC', 'CET', 'US/Eastern'))
def test_length_timestamp(self, tz, left, right, expected):
# GH 18789
iv = Interval(Timestamp(left, tz=tz), Timestamp(right, tz=tz))
result = iv.length
expected = Timedelta(expected)
assert result == expected
@pytest.mark.parametrize('left, right', [
('a', 'z'),
(('a', 'b'), ('c', 'd')),
(list('AB'), list('ab')),
(Interval(0, 1), Interval(1, 2))])
def test_length_errors(self, left, right):
# GH 18789
iv = Interval(left, right)
msg = 'cannot compute length between .* and .*'
with pytest.raises(TypeError, match=msg):
iv.length
def test_math_add(self, closed):
interval = Interval(0, 1, closed=closed)
expected = Interval(1, 2, closed=closed)
result = interval + 1
assert result == expected
result = 1 + interval
assert result == expected
result = interval
result += 1
assert result == expected
msg = r"unsupported operand type\(s\) for \+"
with pytest.raises(TypeError, match=msg):
interval + interval
with pytest.raises(TypeError, match=msg):
interval + 'foo'
def test_math_sub(self, closed):
interval = Interval(0, 1, closed=closed)
expected = Interval(-1, 0, closed=closed)
result = interval - 1
assert result == expected
result = interval
result -= 1
assert result == expected
msg = r"unsupported operand type\(s\) for -"
with pytest.raises(TypeError, match=msg):
interval - interval
with pytest.raises(TypeError, match=msg):
interval - 'foo'
def test_math_mult(self, closed):
interval = Interval(0, 1, closed=closed)
expected = Interval(0, 2, closed=closed)
result = interval * 2
assert result == expected
result = 2 * interval
assert result == expected
result = interval
result *= 2
assert result == expected
msg = r"unsupported operand type\(s\) for \*"
with pytest.raises(TypeError, match=msg):
interval * interval
msg = r"can\'t multiply sequence by non-int"
with pytest.raises(TypeError, match=msg):
interval * 'foo'
def test_math_div(self, closed):
interval = Interval(0, 1, closed=closed)
expected = Interval(0, 0.5, closed=closed)
result = interval / 2.0
assert result == expected
result = interval
result /= 2.0
assert result == expected
msg = r"unsupported operand type\(s\) for /"
with pytest.raises(TypeError, match=msg):
interval / interval
with pytest.raises(TypeError, match=msg):
interval / 'foo'
def test_math_floordiv(self, closed):
interval = Interval(1, 2, closed=closed)
expected = Interval(0, 1, closed=closed)
result = interval // 2
assert result == expected
result = interval
result //= 2
assert result == expected
msg = r"unsupported operand type\(s\) for //"
with pytest.raises(TypeError, match=msg):
interval // interval
with pytest.raises(TypeError, match=msg):
interval // 'foo'
def test_constructor_errors(self):
msg = "invalid option for 'closed': foo"
with pytest.raises(ValueError, match=msg):
Interval(0, 1, closed='foo')
msg = 'left side of interval must be <= right side'
with pytest.raises(ValueError, match=msg):
Interval(1, 0)
@pytest.mark.parametrize('tz_left, tz_right', [
(None, 'UTC'), ('UTC', None), ('UTC', 'US/Eastern')])
def test_constructor_errors_tz(self, tz_left, tz_right):
# GH 18538
left = Timestamp('2017-01-01', tz=tz_left)
right = Timestamp('2017-01-02', tz=tz_right)
error = TypeError if com._any_none(tz_left, tz_right) else ValueError
with pytest.raises(error):
Interval(left, right)
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops.matrix_inverse."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
def _AddTest(test_class, op_name, testcase_name, fn):
test_name = "_".join(["test", op_name, testcase_name])
if hasattr(test_class, test_name):
raise RuntimeError("Test %s defined more than once" % test_name)
setattr(test_class, test_name, fn)
class SvdOpTest(test.TestCase):
def testWrongDimensions(self):
# The input to svd should be a tensor of at least rank 2.
scalar = constant_op.constant(1.)
with self.assertRaisesRegexp(ValueError,
"Shape must be at least rank 2 but is rank 0"):
linalg_ops.svd(scalar)
vector = constant_op.constant([1., 2.])
with self.assertRaisesRegexp(ValueError,
"Shape must be at least rank 2 but is rank 1"):
linalg_ops.svd(vector)
def testConcurrentExecutesWithoutError(self):
with self.test_session(use_gpu=True) as sess:
all_ops = []
for compute_uv_ in True, False:
for full_matrices_ in True, False:
matrix1 = random_ops.random_normal([5, 5], seed=42)
matrix2 = random_ops.random_normal([5, 5], seed=42)
if compute_uv_:
s1, u1, v1 = linalg_ops.svd(
matrix1, compute_uv=compute_uv_, full_matrices=full_matrices_)
s2, u2, v2 = linalg_ops.svd(
matrix2, compute_uv=compute_uv_, full_matrices=full_matrices_)
all_ops += [s1, u1, v1, s2, u2, v2]
else:
s1 = linalg_ops.svd(
matrix1, compute_uv=compute_uv_, full_matrices=full_matrices_)
s2 = linalg_ops.svd(
matrix2, compute_uv=compute_uv_, full_matrices=full_matrices_)
all_ops += [s1, s2]
val = sess.run(all_ops)
for i in range(2):
s = 6 * i
self.assertAllEqual(val[s], val[s + 3]) # s1 == s2
self.assertAllEqual(val[s + 1], val[s + 4]) # u1 == u2
self.assertAllEqual(val[s + 2], val[s + 5]) # v1 == v2
for i in range(2):
s = 12 + 2 * i
self.assertAllEqual(val[s], val[s + 1]) # s1 == s2
def _GetSvdOpTest(dtype_, shape_, use_static_shape_, compute_uv_,
full_matrices_):
def CompareSingularValues(self, x, y, tol):
self.assertAllClose(x, y, atol=(x[0] + y[0]) * tol)
def CompareSingularVectors(self, x, y, rank, tol):
# We only compare the first 'rank' singular vectors since the
# remainder form an arbitrary orthonormal basis for the
# (row- or column-) null space, whose exact value depends on
# implementation details. Notice that since we check that the
# matrices of singular vectors are unitary elsewhere, we do
# implicitly test that the trailing vectors of x and y span the
# same space.
x = x[..., 0:rank]
y = y[..., 0:rank]
# Singular vectors are only unique up to sign (complex phase factor for
# complex matrices), so we normalize the sign first.
sum_of_ratios = np.sum(np.divide(y, x), -2, keepdims=True)
phases = np.divide(sum_of_ratios, np.abs(sum_of_ratios))
x *= phases
self.assertAllClose(x, y, atol=2 * tol)
def CheckApproximation(self, a, u, s, v, full_matrices_, tol):
# Tests that a ~= u*diag(s)*transpose(v).
batch_shape = a.shape[:-2]
m = a.shape[-2]
n = a.shape[-1]
diag_s = math_ops.cast(array_ops.matrix_diag(s), dtype=dtype_)
if full_matrices_:
if m > n:
zeros = array_ops.zeros(batch_shape + (m - n, n), dtype=dtype_)
diag_s = array_ops.concat([diag_s, zeros], a.ndim - 2)
elif n > m:
zeros = array_ops.zeros(batch_shape + (m, n - m), dtype=dtype_)
diag_s = array_ops.concat([diag_s, zeros], a.ndim - 1)
a_recon = math_ops.matmul(u, diag_s)
a_recon = math_ops.matmul(a_recon, v, adjoint_b=True)
self.assertAllClose(a_recon.eval(), a, rtol=tol, atol=tol)
def CheckUnitary(self, x, tol):
# Tests that x[...,:,:]^H * x[...,:,:] is close to the identity.
xx = math_ops.matmul(x, x, adjoint_a=True)
identity = array_ops.matrix_band_part(array_ops.ones_like(xx), 0, 0)
self.assertAllClose(identity.eval(), xx.eval(), atol=tol)
def Test(self):
is_complex = dtype_ in (np.complex64, np.complex128)
is_single = dtype_ in (np.float32, np.complex64)
tol = 3e-4 if is_single else 1e-12
if test.is_gpu_available():
# The gpu version returns results that are much less accurate.
tol *= 100
np.random.seed(42)
x_np = np.random.uniform(
low=-1.0, high=1.0, size=np.prod(shape_)).reshape(shape_).astype(dtype_)
if is_complex:
x_np += 1j * np.random.uniform(
low=-1.0, high=1.0,
size=np.prod(shape_)).reshape(shape_).astype(dtype_)
with self.test_session(use_gpu=True) as sess:
if use_static_shape_:
x_tf = constant_op.constant(x_np)
else:
x_tf = array_ops.placeholder(dtype_)
if compute_uv_:
s_tf, u_tf, v_tf = linalg_ops.svd(
x_tf, compute_uv=compute_uv_, full_matrices=full_matrices_)
if use_static_shape_:
s_tf_val, u_tf_val, v_tf_val = sess.run([s_tf, u_tf, v_tf])
else:
s_tf_val, u_tf_val, v_tf_val = sess.run(
[s_tf, u_tf, v_tf], feed_dict={x_tf: x_np})
else:
s_tf = linalg_ops.svd(
x_tf, compute_uv=compute_uv_, full_matrices=full_matrices_)
if use_static_shape_:
s_tf_val = sess.run(s_tf)
else:
s_tf_val = sess.run(s_tf, feed_dict={x_tf: x_np})
if compute_uv_:
u_np, s_np, v_np = np.linalg.svd(
x_np, compute_uv=compute_uv_, full_matrices=full_matrices_)
else:
s_np = np.linalg.svd(
x_np, compute_uv=compute_uv_, full_matrices=full_matrices_)
# We explicitly avoid the situation where numpy eliminates a first
# dimension that is equal to one.
s_np = np.reshape(s_np, s_tf_val.shape)
CompareSingularValues(self, s_np, s_tf_val, tol)
if compute_uv_:
CompareSingularVectors(self, u_np, u_tf_val, min(shape_[-2:]), tol)
CompareSingularVectors(self,
np.conj(np.swapaxes(v_np, -2, -1)), v_tf_val,
min(shape_[-2:]), tol)
CheckApproximation(self, x_np, u_tf_val, s_tf_val, v_tf_val,
full_matrices_, tol)
CheckUnitary(self, u_tf_val, tol)
CheckUnitary(self, v_tf_val, tol)
return Test
class SvdGradOpTest(test.TestCase):
pass # Filled in below
def _GetSvdGradOpTest(dtype_, shape_, compute_uv_, full_matrices_):
def _NormalizingSvd(tf_a):
tf_s, tf_u, tf_v = linalg_ops.svd(
tf_a, compute_uv=True, full_matrices=full_matrices_)
# Singular vectors are only unique up to an arbitrary phase. We normalize
# the vectors such that the first component of u (if m >=n) or v (if n > m)
# have phase 0.
m = tf_a.shape[-2]
n = tf_a.shape[-1]
if m >= n:
top_rows = tf_u[..., 0:1, :]
else:
top_rows = tf_v[..., 0:1, :]
if tf_u.dtype.is_complex:
angle = -math_ops.angle(top_rows)
phase = math_ops.complex(math_ops.cos(angle), math_ops.sin(angle))
else:
phase = math_ops.sign(top_rows)
tf_u *= phase[..., :m]
tf_v *= phase[..., :n]
return tf_s, tf_u, tf_v
def Test(self):
np.random.seed(42)
a = np.random.uniform(low=-1.0, high=1.0, size=shape_).astype(dtype_)
if dtype_ in [np.complex64, np.complex128]:
a += 1j * np.random.uniform(
low=-1.0, high=1.0, size=shape_).astype(dtype_)
# Optimal stepsize for central difference is O(epsilon^{1/3}).
# See Equation (21) in:
# http://www.karenkopecky.net/Teaching/eco613614/Notes_NumericalDifferentiation.pdf
# TODO(rmlarsen): Move step size control to gradient checker.
epsilon = np.finfo(dtype_).eps
delta = 0.1 * epsilon**(1.0 / 3.0)
if dtype_ in [np.float32, np.complex64]:
tol = 3e-2
else:
tol = 1e-6
with self.test_session(use_gpu=True):
tf_a = constant_op.constant(a)
if compute_uv_:
tf_s, tf_u, tf_v = _NormalizingSvd(tf_a)
outputs = [tf_s, tf_u, tf_v]
else:
tf_s = linalg_ops.svd(tf_a, compute_uv=False)
outputs = [tf_s]
for b in outputs:
x_init = np.random.uniform(
low=-1.0, high=1.0, size=shape_).astype(dtype_)
if dtype_ in [np.complex64, np.complex128]:
x_init += 1j * np.random.uniform(
low=-1.0, high=1.0, size=shape_).astype(dtype_)
theoretical, numerical = gradient_checker.compute_gradient(
tf_a,
tf_a.get_shape().as_list(),
b,
b.get_shape().as_list(),
x_init_value=x_init,
delta=delta)
self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
return Test
if __name__ == "__main__":
for compute_uv in False, True:
for full_matrices in False, True:
for dtype in np.float32, np.float64, np.complex64, np.complex128:
for rows in 1, 2, 5, 10, 32, 100:
for cols in 1, 2, 5, 10, 32, 100:
for batch_dims in [(), (3,)] + [(3, 2)] * (max(rows, cols) < 10):
shape = batch_dims + (rows, cols)
for use_static_shape in True, False:
name = "%s_%s_static_shape_%s__compute_uv_%s_full_%s" % (
dtype.__name__, "_".join(map(str, shape)), use_static_shape,
compute_uv, full_matrices)
_AddTest(SvdOpTest, "Svd", name,
_GetSvdOpTest(dtype, shape, use_static_shape,
compute_uv, full_matrices))
for compute_uv in False, True:
for full_matrices in False, True:
dtypes = ([np.float32, np.float64]
+ [np.complex64, np.complex128] * (not compute_uv))
for dtype in dtypes:
mat_shapes = [(10, 11), (11, 10), (11, 11)]
if not full_matrices or not compute_uv:
mat_shapes += [(5, 11), (11, 5)]
for mat_shape in mat_shapes:
for batch_dims in [(), (3,)]:
shape = batch_dims + mat_shape
name = "%s_%s_compute_uv_%s_full_%s" % (
dtype.__name__, "_".join(map(str, shape)), compute_uv,
full_matrices)
_AddTest(SvdGradOpTest, "SvdGrad", name,
_GetSvdGradOpTest(dtype, shape, compute_uv, full_matrices))
test.main()
| |
import pytest
from DHS_Feed import *
def compare(object_a, object_b):
if isinstance(object_a, List):
return compare_list(object_a, object_b)
elif isinstance(object_a, Dict):
return compare_dict(object_a, object_b)
else:
return object_a == object_b
def compare_list(list_a, list_b):
try:
list_a = list(sorted(list_a))
list_b = list(sorted(list_b))
except TypeError:
pass
if len(list_a) != len(list_b):
return False
for a_obj, b_obj in zip(list_a, list_b):
if not compare(a_obj, b_obj):
return False
return True
def compare_dict(dict_a, dict_b):
keys = dict_a.keys()
if not compare_list(keys, dict_b.keys()):
return False
for key in keys:
if not compare(dict_a[key], dict_b[key]):
return False
return True
class TestTempFile:
def test_create_file(self):
data = 'test'
temp_file = TempFile(data)
with open(temp_file.path, 'r') as _file:
assert _file.read() == data, 'temp file content failed'
def test_removing_file(self):
temp_file_name = TempFile('test').path
assert not os.path.isfile(temp_file_name), 'file was not removed at the ens of live of the TempFile objet.'
@pytest.mark.parametrize('suffix', ['test', 'py', 'pem', 'crt'])
def test_suffix(self, suffix):
temp_file_name = TempFile('test', suffix=suffix).path
assert temp_file_name.endswith(suffix), 'file suffix dos not working as expected'
class TestHelpers:
data_test_fix_rsa_data = [
('test_data/rsa/test_fix_rsa_data.txt', 4),
('test_data/rsa/test_fix_rsa_data2.txt', 2),
]
@pytest.mark.parametrize('path, count', data_test_fix_rsa_data)
def test_fix_rsa_data(self, path, count):
with open(path, 'r') as _file:
data = _file.read()
demisto_data = data.replace('\n', ' ')
fixed_data = fix_rsa_data(demisto_data, count)
assert data == fixed_data, 'failed to parse the data from demisto params to RSA file'
data_test_insert_id = [
('te{ID}st', 'tetestst'),
('{ID}', 'test'),
]
@pytest.mark.parametrize('input_str, expected_output', data_test_insert_id)
def test_insert_id(self, input_str, expected_output, mocker):
mocker.patch.object(uuid, 'uuid4', return_value='test')
output = insert_id(input_str)
assert output == expected_output, 'failed to insert uuid'
data_test_ssl_files_checker = [
('test_data/rsa/2048b-rsa-example-keypair.pem', 'test_data/rsa/2048b-rsa-example-cert.pem')
]
@pytest.mark.parametrize('input_key, input_public', data_test_ssl_files_checker)
def test_ssl_files_checker(self, input_key, input_public):
with open(input_key, 'r') as input_key:
with open(input_public, 'r') as input_public:
ssl_files_checker(input_public.read(), input_key.read())
@pytest.mark.parametrize('input_key, input_public', data_test_ssl_files_checker)
def test_ssl_files_checker_with_invalid_files(self, input_key, input_public):
with open(input_key, 'r') as input_key:
input_key = input_key.read()
with open(input_public, 'r') as input_public:
input_public = input_public.read()
try:
temp_input_public = input_public.split('\n')[:-6]
temp_input_public.extend(input_public.split('\n')[-7:])
ssl_files_checker('\n'.join(temp_input_public), input_key)
except ValueError as error:
assert str(error).startswith('Unable to load certificate')
else:
raise Exception
try:
temp_input_private = input_key.split('\n')[:-6]
temp_input_private.extend(input_key.split('\n')[-7:])
ssl_files_checker(input_public, '\n'.join(temp_input_private))
except ValueError as error:
assert str(error) == 'Could not deserialize key data. The data may be in an incorrect format or it may be encrypted with an unsupported algorithm.' # noqa: E501
else:
raise Exception
class TestSafeDataGet:
data_test_safe_data_get = [
('without_list', 'one_level_get'),
(['standard_list', 'test'], 'multi_level_get')
]
@pytest.mark.parametrize('get_list, expected_output', data_test_safe_data_get)
def test_safe_data_get(self, get_list, expected_output):
dict_data = {
'without_list': 'one_level_get',
'standard_list': {'test': 'multi_level_get'}
}
output = safe_data_get(dict_data, get_list)
assert output == expected_output, 'failed to get the relevant data'
def test_without_list(self):
dict_data = {'without_list': 'one_level_get'}
output = safe_data_get(dict_data, 'without_list')
assert output == 'one_level_get'
def test_with_list(self):
dict_data = {'standard_list': {'test': 'multi_level_get'}}
output = safe_data_get(dict_data, ['standard_list', 'test'])
assert output == 'multi_level_get'
def test_one_level_with_prefix(self):
dict_data = {'TEST:without_list': 'one_level_get'}
output = safe_data_get(dict_data, 'without_list', prefix='TEST')
assert output == 'one_level_get'
def test_multi_level_with_prefix(self):
dict_data = {'TEST:standard_list': {'TEST:test': 'multi_level_get'}}
output = safe_data_get(dict_data, ['standard_list', 'test'], prefix='TEST')
assert output == 'multi_level_get'
data_test_get_none_existing_path_with_prefix = [
(
{'TEST:standard_list': {'TEST:test': 'multi_level_get'}},
['standard_list1', 'test']
),
(
{'TEST:standard_list': {'TEST:test': 'multi_level_get'}},
['standard_list', 'test1']
),
(
{'without_list': 'one_level_get'},
'without_list1'
),
]
@pytest.mark.parametrize('dict_data, path', data_test_get_none_existing_path_with_prefix)
def test_get_none_existing_path_with_prefix(self, dict_data, path):
output = safe_data_get(dict_data, path, prefix='TEST')
assert output is None
data_test_get_none_existing_path_with_prefix = [
({'standard_list': {'test': 'multi_level_get'}}, ['standard_list1', 'test']),
({'standard_list': {'test': 'multi_level_get'}}, ['standard_list', 'test1']),
({'without_list': 'one_level_get'}, 'without_list1'),
]
@pytest.mark.parametrize('dict_data, path', data_test_get_none_existing_path_with_prefix)
def test_get_none_existing_path_without_prefix(self, dict_data, path):
output = safe_data_get(dict_data, path)
assert output is None
data_test_with_default_value = [
({}, 'test', 'test'),
({}, None, None),
({'test': None}, 'something', None),
({'something': 'nothing'}, 'something', 'something')
]
@pytest.mark.parametrize('dict_data, default, expected_output', data_test_with_default_value)
def test_with_default_value(self, dict_data, default, expected_output):
output = safe_data_get(dict_data, 'test', default=default)
assert output == expected_output
class TestIndicators:
@staticmethod
def read_json(path):
with open(path, 'r') as json_file:
json_file = json_file.read()
return json.loads(json_file)
@staticmethod
def get_stix_header(block):
return block.get('stix:STIX_Header', {})
File = 'File'
IP = 'IP'
Domain = 'Domain'
URL = 'URL'
Email = 'Email'
data_types = [IP, Domain, URL, Email, File]
@pytest.mark.parametrize('data_type', data_types)
def test_data_to_blocks(self, data_type):
data = self.read_json(f'test_data/data_from_DHS/{data_type}_data.json')
test_blocks = Indicators._blocks(data)
blocks = self.read_json(f'test_data/blocks/blocks_from_{data_type}_data.json')
assert test_blocks == blocks
@pytest.mark.parametrize('data_type', data_types)
def test_blocks_to_indicators(self, data_type):
blocks = self.read_json(f'test_data/blocks/blocks_from_{data_type}_data.json')
test_indicators = []
for block in blocks:
test_indicators.extend(list(Indicators._indicators(block)))
indicators = self.read_json(f'test_data/indicators/indicators_from_{data_type}_data.json')
assert test_indicators == indicators
@pytest.mark.parametrize('data_type', data_types)
def test_indicators_to_indicator_data(self, data_type):
indicators = self.read_json(f'test_data/indicators/indicators_from_{data_type}_data.json')
test_data_indicators = list(map(lambda x: Indicators._indicator_data(x, 'source', 'color', ['tag']), indicators))
data_indicators = self.read_json(f'test_data/data_indicators/{data_type}_data_indicators.json')
assert test_data_indicators == data_indicators
@pytest.mark.parametrize('data_type', data_types)
def test_indicators_to_context_indicators(self, data_type):
indicators = self.read_json(f'test_data/data_indicators/{data_type}_data_indicators.json')
test_context_indicators = list(map(indicator_to_context, indicators))
context_indicators = self.read_json(f'test_data/context_indicators/context_from_{data_type}_data.json')
assert test_context_indicators == context_indicators
data_test_tlp_color_from_header = [
(IP, ['WHITE', 'WHITE', 'WHITE']),
(Domain, ['GREEN']),
(URL, ['AMBER']),
(Email, ['AMBER']),
(File, ['GREEN', 'GREEN', 'GREEN'])
]
@pytest.mark.parametrize('data_type, tlp_colors', data_test_tlp_color_from_header)
def test_tlp_color_from_header(self, data_type, tlp_colors):
blocks_headers = list(
map(self.get_stix_header, self.read_json(f'test_data/blocks/blocks_from_{data_type}_data.json')))
test_tlp_colors = list(map(Indicators._tlp_color_from_header, blocks_headers))
assert test_tlp_colors == tlp_colors
data_test_source_from_header = [
(IP, ['Infoblox Inc'] * 3),
(File, ['Reversing Labs'] * 3)
]
@pytest.mark.parametrize('data_type, sources', data_test_source_from_header)
def test_source_from_header(self, data_type, sources):
sources_headers = list(
map(self.get_stix_header, self.read_json(f'test_data/blocks/blocks_from_{data_type}_data.json')))
test_sources = list(map(Indicators._source_from_header, sources_headers))
assert test_sources == sources
class TestCommandTestModule:
def nothing(self, *args, **kwargs):
return lambda: self.data
def setup_class(self):
self.client = TaxiiClient('', '', '')
self.data = None
self.ssl_files_checker = self.get_first_fetch = self.discovery_request = None
def mock_data(self, mocker):
self.ssl_files_checker = mocker.patch('DHS_Feed.ssl_files_checker', new=self.nothing)
self.get_first_fetch = mocker.patch('DHS_Feed.get_first_fetch', new=self.nothing)
self.discovery_request = mocker.patch.object(self.client, 'discovery_request', new_callable=self.nothing)
def test_command_test_module(self, mocker):
self.mock_data(mocker)
self.data = {'taxii_11:Discovery_Response': {'taxii_11:Service_Instance': ['somthing']}}
assert command_test_module(self.client, '', '', '') == 'ok'
def test_command_test_module_with_invalid_credential(self, mocker):
self.mock_data(mocker)
self.data = {'taxii_11:Status_Message': {'@status_type': 'UNAUTHORIZED'}}
try:
command_test_module(self.client, '', '', '')
except DemistoException as error:
assert str(error) == 'invalid credential.'
else:
raise Exception
def test_command_test_module_with_unknown_error(self, mocker):
self.mock_data(mocker)
self.data = {}
try:
command_test_module(self.client, '', '', '')
except DemistoException as error:
assert str(error) == 'unknown error.'
else:
raise Exception
| |
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This script is a modified version of the one created by the Spark
# project (https://github.com/apache/spark/blob/master/dev/merge_spark_pr.py).
# Utility for creating well-formed pull request merges and pushing them to Apache.
# usage: ./merge_rocketmq_pr.py (see config env vars below)
#
# This utility assumes you already have local a RocketMQ git folder and that you
# have added remotes corresponding to both (i) the github apache RocketMQ
# mirror and (ii) the apache git repo.
import json
import os
import re
import subprocess
import sys
import urllib2
try:
import jira.client
JIRA_IMPORTED = True
except ImportError:
JIRA_IMPORTED = False
# Location of your RocketMQ git development area
ROCKETMQ_HOME = os.environ.get("ROCKETMQ_HOME", os.getcwd())
# Remote name which points to the Gihub site
PR_REMOTE_NAME = os.environ.get("PR_REMOTE_NAME", "apache-github")
# Remote name which points to Apache git
PUSH_REMOTE_NAME = os.environ.get("PUSH_REMOTE_NAME", "origin")
# ASF JIRA username
JIRA_USERNAME = os.environ.get("JIRA_USERNAME", "")
# ASF JIRA password
JIRA_PASSWORD = os.environ.get("JIRA_PASSWORD", "")
# OAuth key used for issuing requests against the GitHub API. If this is not defined, then requests
# will be unauthenticated. You should only need to configure this if you find yourself regularly
# exceeding your IP's unauthenticated request rate limit. You can create an OAuth key at
# https://github.com/settings/tokens. This script only requires the "public_repo" scope.
GITHUB_OAUTH_KEY = os.environ.get("GITHUB_OAUTH_KEY")
GITHUB_BASE = "https://github.com/apache/incubator-rocketmq/pull"
GITHUB_API_BASE = "https://api.github.com/repos/apache/incubator-rocketmq"
JIRA_BASE = "https://issues.apache.org/jira/browse"
JIRA_API_BASE = "https://issues.apache.org/jira"
# Prefix added to temporary branches
BRANCH_PREFIX = "PR_TOOL"
DEVELOP_BRANCH = "develop"
def get_json(url):
try:
request = urllib2.Request(url)
if GITHUB_OAUTH_KEY:
request.add_header('Authorization', 'token %s' % GITHUB_OAUTH_KEY)
return json.load(urllib2.urlopen(request))
except urllib2.HTTPError as e:
if "X-RateLimit-Remaining" in e.headers and e.headers["X-RateLimit-Remaining"] == '0':
print("Exceeded the GitHub API rate limit; see the instructions in " +
"dev/merge_rocketmq_pr.py to configure an OAuth token for making authenticated " +
"GitHub requests.")
else:
print("Unable to fetch URL, exiting: %s" % url)
sys.exit(-1)
def fail(msg):
print(msg)
clean_up()
sys.exit(-1)
def run_cmd(cmd):
print(cmd)
if isinstance(cmd, list):
return subprocess.check_output(cmd)
else:
return subprocess.check_output(cmd.split(" "))
def continue_maybe(prompt):
result = raw_input("\n%s (y/n): " % prompt)
if result.lower() != "y":
fail("Okay, exiting")
def clean_up():
print("Restoring head pointer to %s" % original_head)
run_cmd("git checkout %s" % original_head)
branches = run_cmd("git branch").replace(" ", "").split("\n")
for branch in filter(lambda x: x.startswith(BRANCH_PREFIX), branches):
print("Deleting local branch %s" % branch)
run_cmd("git branch -D %s" % branch)
# merge the requested PR and return the merge hash
def merge_pr(pr_num, target_ref, title, body, pr_repo_desc):
pr_branch_name = "%s_MERGE_PR_%s" % (BRANCH_PREFIX, pr_num)
target_branch_name = "%s_MERGE_PR_%s_%s" % (BRANCH_PREFIX, pr_num, target_ref.upper())
run_cmd("git fetch %s pull/%s/head:%s" % (PR_REMOTE_NAME, pr_num, pr_branch_name))
run_cmd("git fetch %s %s:%s" % (PUSH_REMOTE_NAME, target_ref, target_branch_name))
run_cmd("git checkout %s" % target_branch_name)
had_conflicts = False
try:
run_cmd(['git', 'merge', pr_branch_name, '--squash'])
except Exception as e:
msg = "Error merging: %s\nWould you like to manually fix-up this merge?" % e
continue_maybe(msg)
msg = "Okay, please fix any conflicts and 'git add' conflicting files... Finished?"
continue_maybe(msg)
had_conflicts = True
commit_authors = run_cmd(['git', 'log', 'HEAD..%s' % pr_branch_name,
'--pretty=format:%an <%ae>']).split("\n")
distinct_authors = sorted(set(commit_authors),
key=lambda x: commit_authors.count(x), reverse=True)
primary_author = raw_input(
"Enter primary author in the format of \"name <email>\" [%s]: " %
distinct_authors[0])
if primary_author == "":
primary_author = distinct_authors[0]
commits = run_cmd(['git', 'log', 'HEAD..%s' % pr_branch_name,
'--pretty=format:%h [%an] %s']).split("\n\n")
merge_message_flags = []
modified_title = raw_input("Modify commit log [%s]: " % title)
if modified_title == "":
modified_title = title
merge_message_flags += ["-m", modified_title]
authors = "\n".join(["Author: %s" % a for a in distinct_authors])
merge_message_flags += ["-m", authors]
if had_conflicts:
committer_name = run_cmd("git config --get user.name").strip()
committer_email = run_cmd("git config --get user.email").strip()
message = "This patch had conflicts when merged, resolved by\nCommitter: %s <%s>" % (
committer_name, committer_email)
merge_message_flags += ["-m", message]
# The string "Closes #%s" string is required for GitHub to correctly close the PR
merge_message_flags += ["-m", "Closes #%s from %s." % (pr_num, pr_repo_desc)]
run_cmd(['git', 'commit', '--author="%s"' % primary_author] + merge_message_flags)
continue_maybe("Merge complete (local ref %s). Push to %s?" % (
target_branch_name, PUSH_REMOTE_NAME))
try:
run_cmd('git push %s %s:%s' % (PUSH_REMOTE_NAME, target_branch_name, target_ref))
except Exception as e:
clean_up()
fail("Exception while pushing: %s" % e)
merge_hash = run_cmd("git rev-parse %s" % target_branch_name)[:8]
clean_up()
print("Pull request #%s merged!" % pr_num)
print("Merge hash: %s" % merge_hash)
return merge_hash
def cherry_pick(pr_num, merge_hash, default_branch):
pick_ref = raw_input("Enter a branch name [%s]: " % default_branch)
if pick_ref == "":
pick_ref = default_branch
pick_branch_name = "%s_PICK_PR_%s_%s" % (BRANCH_PREFIX, pr_num, pick_ref.upper())
run_cmd("git fetch %s %s:%s" % (PUSH_REMOTE_NAME, pick_ref, pick_branch_name))
run_cmd("git checkout %s" % pick_branch_name)
try:
run_cmd("git cherry-pick -sx %s" % merge_hash)
except Exception as e:
msg = "Error cherry-picking: %s\nWould you like to manually fix-up this merge?" % e
continue_maybe(msg)
msg = "Okay, please fix any conflicts and finish the cherry-pick. Finished?"
continue_maybe(msg)
continue_maybe("Pick complete (local ref %s). Push to %s?" % (
pick_branch_name, PUSH_REMOTE_NAME))
try:
run_cmd('git push %s %s:%s' % (PUSH_REMOTE_NAME, pick_branch_name, pick_ref))
except Exception as e:
clean_up()
fail("Exception while pushing: %s" % e)
pick_hash = run_cmd("git rev-parse %s" % pick_branch_name)[:8]
clean_up()
print("Pull request #%s picked into %s!" % (pr_num, pick_ref))
print("Pick hash: %s" % pick_hash)
return pick_ref
def fix_version_from_branch(branch, versions):
# Note: Assumes this is a sorted (newest->oldest) list of un-released versions
if branch == "master":
return versions[0]
else:
branch_ver = branch.replace("branch-", "")
return filter(lambda x: x.name.startswith(branch_ver), versions)[-1]
def resolve_jira_issue(merge_branches, comment, default_jira_id=""):
asf_jira = jira.client.JIRA({'server': JIRA_API_BASE},
basic_auth=(JIRA_USERNAME, JIRA_PASSWORD))
jira_id = raw_input("Enter a JIRA id [%s]: " % default_jira_id)
if jira_id == "":
jira_id = default_jira_id
try:
issue = asf_jira.issue(jira_id)
except Exception as e:
fail("ASF JIRA could not find %s\n%s" % (jira_id, e))
cur_status = issue.fields.status.name
cur_summary = issue.fields.summary
cur_assignee = issue.fields.assignee
if cur_assignee is None:
cur_assignee = "NOT ASSIGNED!!!"
else:
cur_assignee = cur_assignee.displayName
if cur_status == "Resolved" or cur_status == "Closed":
fail("JIRA issue %s already has status '%s'" % (jira_id, cur_status))
print("=== JIRA %s ===" % jira_id)
print("summary\t\t%s\nassignee\t%s\nstatus\t\t%s\nurl\t\t%s/%s\n" %
(cur_summary, cur_assignee, cur_status, JIRA_BASE, jira_id))
versions = asf_jira.project_versions("ROCKETMQ")
versions = sorted(versions, key=lambda x: x.name, reverse=True)
versions = filter(lambda x: x.raw['released'] is False, versions)
# Consider only x.y.z versions
versions = filter(lambda x: re.match('\d+\.\d+\.\d+', x.name), versions)
default_fix_versions = map(lambda x: fix_version_from_branch(x, versions).name, merge_branches)
for v in default_fix_versions:
# Handles the case where we have forked a release branch but not yet made the release.
# In this case, if the PR is committed to the master branch and the release branch, we
# only consider the release branch to be the fix version. E.g. it is not valid to have
# both 1.1.0 and 1.0.0 as fix versions.
(major, minor, patch) = v.split(".")
if patch == "0":
previous = "%s.%s.%s" % (major, int(minor) - 1, 0)
if previous in default_fix_versions:
default_fix_versions = filter(lambda x: x != v, default_fix_versions)
default_fix_versions = ",".join(default_fix_versions)
fix_versions = raw_input("Enter comma-separated fix version(s) [%s]: " % default_fix_versions)
if fix_versions == "":
fix_versions = default_fix_versions
fix_versions = fix_versions.replace(" ", "").split(",")
def get_version_json(version_str):
return filter(lambda v: v.name == version_str, versions)[0].raw
jira_fix_versions = map(lambda v: get_version_json(v), fix_versions)
resolve = filter(lambda a: a['name'] == "Resolve Issue", asf_jira.transitions(jira_id))[0]
resolution = filter(lambda r: r.raw['name'] == "Fixed", asf_jira.resolutions())[0]
asf_jira.transition_issue(
jira_id, resolve["id"], fixVersions=jira_fix_versions,
comment=comment, resolution={'id': resolution.raw['id']})
print("Successfully resolved %s with fixVersions=%s!" % (jira_id, fix_versions))
def resolve_jira_issues(title, merge_branches, comment):
jira_ids = re.findall("ROCKETMQ-[0-9]{4,5}", title)
if len(jira_ids) == 0:
resolve_jira_issue(merge_branches, comment)
for jira_id in jira_ids:
resolve_jira_issue(merge_branches, comment, jira_id)
def standardize_jira_ref(text):
"""
Standardize the [ROCKETMQ-XXXXX] [MODULE] prefix
Converts "[ROCKETMQ-XXX][mllib] Issue", "[MLLib] ROCKETMQ-XXX. Issue" or "ROCKETMQ XXX [MLLIB]: Issue" to
"[ROCKETMQ-XXX][MLLIB] Issue"
"""
jira_refs = []
components = []
# If the string is compliant, no need to process any further
if (re.search(r'^\[ROCKETMQ-[0-9]{3,6}\](\[[A-Z0-9_\s,]+\] )+\S+', text)):
return text
# Extract JIRA ref(s):
pattern = re.compile(r'(ROCKETMQ[-\s]*[0-9]{3,6})+', re.IGNORECASE)
for ref in pattern.findall(text):
# Add brackets, replace spaces with a dash, & convert to uppercase
jira_refs.append('[' + re.sub(r'\s+', '-', ref.upper()) + ']')
text = text.replace(ref, '')
# Extract rocketmq component(s):
# Look for alphanumeric chars, spaces, dashes, periods, and/or commas
pattern = re.compile(r'(\[[\w\s,-\.]+\])', re.IGNORECASE)
for component in pattern.findall(text):
components.append(component.upper())
text = text.replace(component, '')
# Cleanup any remaining symbols:
pattern = re.compile(r'^\W+(.*)', re.IGNORECASE)
if (pattern.search(text) is not None):
text = pattern.search(text).groups()[0]
# Assemble full text (JIRA ref(s), module(s), remaining text)
clean_text = ''.join(jira_refs).strip() + ''.join(components).strip() + " " + text.strip()
# Replace multiple spaces with a single space, e.g. if no jira refs and/or components were
# included
clean_text = re.sub(r'\s+', ' ', clean_text.strip())
return clean_text
def get_current_ref():
ref = run_cmd("git rev-parse --abbrev-ref HEAD").strip()
if ref == 'HEAD':
# The current ref is a detached HEAD, so grab its SHA.
return run_cmd("git rev-parse HEAD").strip()
else:
return ref
def main():
global original_head
os.chdir(ROCKETMQ_HOME)
original_head = get_current_ref()
latest_branch = DEVELOP_BRANCH
pr_num = raw_input("Which pull request would you like to merge? (e.g. 34): ")
pr = get_json("%s/pulls/%s" % (GITHUB_API_BASE, pr_num))
pr_events = get_json("%s/issues/%s/events" % (GITHUB_API_BASE, pr_num))
url = pr["url"]
# Decide whether to use the modified title or not
modified_title = standardize_jira_ref(pr["title"])
if modified_title != pr["title"]:
print("I've re-written the title as follows to match the standard format:")
print("Original: %s" % pr["title"])
print("Modified: %s" % modified_title)
result = raw_input("Would you like to use the modified title? (y/n): ")
if result.lower() == "y":
title = modified_title
print("Using modified title:")
else:
title = pr["title"]
print("Using original title:")
print(title)
else:
title = pr["title"]
body = pr["body"]
target_ref = pr["base"]["ref"]
user_login = pr["user"]["login"]
base_ref = pr["head"]["ref"]
pr_repo_desc = "%s/%s" % (user_login, base_ref)
# Merged pull requests don't appear as merged in the GitHub API;
# Instead, they're closed by asfgit.
merge_commits = \
[e for e in pr_events if e["actor"]["login"] == "asfgit" and e["event"] == "closed"]
if merge_commits:
merge_hash = merge_commits[0]["commit_id"]
message = get_json("%s/commits/%s" % (GITHUB_API_BASE, merge_hash))["commit"]["message"]
print("Pull request %s has already been merged, assuming you want to backport" % pr_num)
commit_is_downloaded = run_cmd(['git', 'rev-parse', '--quiet', '--verify',
"%s^{commit}" % merge_hash]).strip() != ""
if not commit_is_downloaded:
fail("Couldn't find any merge commit for #%s, you may need to update HEAD." % pr_num)
print("Found commit %s:\n%s" % (merge_hash, message))
cherry_pick(pr_num, merge_hash, latest_branch)
sys.exit(0)
if not bool(pr["mergeable"]):
msg = "Pull request %s is not mergeable in its current form.\n" % pr_num + \
"Continue? (experts only!)"
continue_maybe(msg)
print("\n=== Pull Request #%s ===" % pr_num)
print("title\t%s\nsource\t%s\ntarget\t%s\nurl\t%s" %
(title, pr_repo_desc, target_ref, url))
continue_maybe("Proceed with merging pull request #%s?" % pr_num)
merged_refs = [target_ref]
merge_hash = merge_pr(pr_num, target_ref, title, body, pr_repo_desc)
pick_prompt = "Would you like to pick %s into another branch?" % merge_hash
while raw_input("\n%s (y/n): " % pick_prompt).lower() == "y":
merged_refs = merged_refs + [cherry_pick(pr_num, merge_hash, latest_branch)]
if JIRA_IMPORTED:
if JIRA_USERNAME and JIRA_PASSWORD:
continue_maybe("Would you like to update an associated JIRA?")
jira_comment = "Issue resolved by pull request %s\n[%s/%s]" % \
(pr_num, GITHUB_BASE, pr_num)
resolve_jira_issues(title, merged_refs, jira_comment)
else:
print("JIRA_USERNAME and JIRA_PASSWORD not set")
print("Exiting without trying to close the associated JIRA.")
else:
print("Could not find jira-python library. Run 'sudo pip install jira' to install.")
print("Exiting without trying to close the associated JIRA.")
if __name__ == "__main__":
import doctest
(failure_count, test_count) = doctest.testmod()
if failure_count:
exit(-1)
try:
main()
except:
clean_up()
raise
| |
import pandas as pd
import pickle
import re
import matplotlib.pyplot as plt
import collections
from pprint import pprint
from daisylu_vectors import *
#
# merged here: from daisylu_new_dryrun import *
from daisylu_config import *
from daisylu_system import *
from sentences import *
import networkx as nx
import argparse
import subprocess
import threading
import time
import sys
#
# 1 merge daisylu_new_dryrun to here
# 2 daisylu_dryrun?
#
#
def addTorchNetworkResults(sents, dbrf, dbf, systemName, NoConceptThreshold = 0.5, conceptRejectionThreshold=0.0):
def logPMToProb(logPM):
pMass = [math.exp(float(z)) for z in logPM ]
probs = [pm/sum(pMass) for pm in pMass]
return probs
dbfn = getSystemPath('daisylu') + 'data/%s' % dbf
dbrfn = getSystemPath('daisylu') + 'results/%s' % dbrf
if (systemName == 'AMRL0'):
# 2c. add L0 nn output to data frames
tp, _, _, features, _ = getComparisonDFrames(dbfn, dbrfn)
for sentIX in range(len(sents['test'])):
sentence = sents['test'][sentIX]
singleSentDF = tp[ tp['sentIX']==(sentIX+1) ]
df = sentence.predictedDFrame
for _, row in singleSentDF.iterrows():
wordIX = row['wordIX']
result = row['result']
pVector = row['pVector']
df.loc[df.wordIX == wordIX, 'pVectorL0'] = pVector
lProb = np.array(floatCSVToList(pVector)[0])
if (True):
# With: F1 57.18, prec 59.71, recall 54.85
# With 0.65 threshold: F1 57.49, prec 58.09, recall 56.90
# Without: F1 57.35, prec 60.57, recall 54.46
feats = features['L0']['tokens']
lst = floatCSVToList(pVector)
logProbs, probs = normalizeLogProbs1d({0:lst[0]})
L0ToProb = dict(zip(feats,probs[0]))
sortedTuples = sorted(zip(feats,probs[0]), key=lambda x: x[1], reverse=True )
if sortedTuples[0][0] == 'O':
if sortedTuples[0][1] >= NoConceptThreshold:
result = 'O'
prob = sortedTuples[0][1]
else:
result = sortedTuples[1][0]
prob = sortedTuples[1][1]
else:
result = sortedTuples[0][0]
prob = sortedTuples[0][1]
if result=='UNKNOWN':
if not 'NEWPrediction' in df:
result = 'S_txPred-01' # a default
else:
result = 'S_txPred' # NEW default, was S_txPred-01
if (conceptRejectionThreshold > 0.0) and not (result=='txNamed'):
if prob <= conceptRejectionThreshold:
result='O'
dsfString = listToCSV(logPMToProb(lProb))
df.loc[df.wordIX == wordIX, 'distSG'] = dsfString
df.loc[df.wordIX == wordIX, 'txBIOES'] = result
df.loc[df.wordIX == wordIX, 'txBIOESProb'] = prob
# 2d. add wikification to data frames (alternate is to run wikification first and use NER as input to L0
# Wikification should happen here, creating the 'namedCategory'
# and the 'wiki' attribute
df['nameCategory'] = 'GetFromWikification'
df['wiki'] = 'GetFromWikification'
if not 'NEWPrediction' in df:
predictConceptKinds(df, None)
elif (systemName == 'AMRL0Args'):
tp, df, _, _, _ = getComparisonDFrames(dbfn, dbrfn, pVector2d=True)
for sentIX in range(len(sents['test'])):
sentence = sents['test'][sentIX]
singleSentDF = tp[ tp['sentIX']==(sentIX+1) ]
df = sentence.predictedDFrame
argsDict = {}
for _, row in singleSentDF.iterrows():
pWordIX = row['pWordIX']-1 # adjust for lua
wordIX = row['wordIX']
result = row['result']
pVector = row['pVector']
if pVector:
df.loc[df.wordIX == pWordIX, 'pVectorL0Args'] = pVector
if result != 'O':
if not pWordIX in argsDict:
argsDict[pWordIX] = []
argsDict[pWordIX].append([wordIX, result])
for pWordIX in argsDict.keys():
for i,rel in enumerate(argsDict[pWordIX]):
df.loc[df.wordIX == pWordIX, 'ar%d_ix' % i] = rel[0]
df.loc[df.wordIX == pWordIX, 'ar%d_arg' % i] = rel[1]
elif (systemName == 'AMRL0Nargs'):
# 2c. add L0 nn output to data frames
tp, df, _, _, _ = getComparisonDFrames(dbfn, dbrfn, pVector2d=True)
for sentIX in range(len(sents['test'])):
sentence = sents['test'][sentIX]
singleSentDF = tp[ tp['sentIX']==(sentIX+1) ]
df = sentence.predictedDFrame
nargsDict = {}
for _, row in singleSentDF.iterrows():
pWordIX = row['pWordIX']-1 # adjust for lua
wordIX = row['wordIX']
result = row['result']
pVector = row['pVector']
if pVector:
df.loc[df.wordIX == wordIX, 'pVectorL0Nargs'] = pVector
if result != 'O':
if not pWordIX in nargsDict:
nargsDict[pWordIX] = []
nargsDict[pWordIX].append([wordIX, result])
#print 'DEBUG', pWordIX, wordIX, result
for pWordIX in nargsDict.keys():
for i,rel in enumerate(nargsDict[pWordIX]):
df.loc[df.wordIX == pWordIX, 'nar%d_ix' % i] = rel[0]
df.loc[df.wordIX == pWordIX, 'nar%d_lbl' % i] = rel[1]
elif (systemName == 'AMRL0Attr'):
# 2c. add L0 nn output to data frames
tp, df, _, _, _ = getComparisonDFrames(dbfn, dbrfn)
for sentIX in range(len(sents['test'])):
sentence = sents['test'][sentIX]
singleSentDF = tp[ tp['sentIX']==(sentIX+1) ]
df = sentence.predictedDFrame
for _, row in singleSentDF.iterrows():
pWordIX = row['pWordIX']-1 # adjust for lua
wordIX = row['wordIX']
result = row['result']
pVector = row['pVector']
df.loc[df.wordIX == wordIX, 'pVectorL0Attr'] = pVector
i=0
if result == 'polarity':
df.loc[df.wordIX == wordIX, 'attr%d_val' % i] = '-'
df.loc[df.wordIX == wordIX, 'attr%d_lbl' % i] = 'polarity'
elif result == 'TOP':
df.loc[df.wordIX == wordIX, 'attr%d_val' % i] = df.loc[df.wordIX == wordIX, 'kind']
df.loc[df.wordIX == wordIX, 'attr%d_lbl' % i] = 'TOP'
elif result == 'quant':
print 'skipping quant HMM'
#df.loc[df.wordIX == wordIX, 'attr%d_val' % i] = 'HMM'
#df.loc[df.wordIX == wordIX, 'attr%d_lbl' % i] = 'quant'
if (systemName == 'AMRL0Ncat'):
# 2c. add L0 nn output to data frames
tp, _, _, features, _ = getComparisonDFrames(dbfn, dbrfn)
for sentIX in range(len(sents['test'])):
sentence = sents['test'][sentIX]
singleSentDF = tp[ tp['sentIX']==(sentIX+1) ]
df = sentence.predictedDFrame
for _, row in singleSentDF.iterrows():
wordIX = row['wordIX']
result = row['result']
pVector = row['pVector']
df.loc[df.wordIX == wordIX, 'pVectorNcat'] = pVector
if (True):
# With: F1 57.18, prec 59.71, recall 54.85
# With 0.65 threshold: F1 57.49, prec 58.09, recall 56.90
# Without: F1 57.35, prec 60.57, recall 54.46
feats = features['ncat']['tokens']
lst = floatCSVToList(pVector)
logProbs, probs = normalizeLogProbs1d({0:lst[0]})
L0ToProb = dict(zip(feats,probs[0]))
sortedTuples = sorted(zip(feats,probs[0]), key=lambda x: x[1], reverse=True )
if sortedTuples[0][0] == 'O':
if sortedTuples[0][1] >= 0.95:
result = '-'
prob = sortedTuples[0][1]
else:
result = sortedTuples[1][0]
prob = sortedTuples[1][1]
else:
result = sortedTuples[0][0]
prob = sortedTuples[0][1]
if result=='UNKNOWN':
result = 'person' # a default
df.loc[df.wordIX == wordIX, 'NcatResult'] = result
df.loc[df.wordIX == wordIX, 'NcatProb'] = prob
def filenamesForNNTag(nnTag, modelInfo, sessionTag):
pid = modelInfo[nnTag]['id']
if isinstance(pid, int):
modelFn = '%05d_best_dev' % pid
else:
modelFn = pid
modelCreationDBFn = modelInfo[nnTag]['db']
if not sessionTag:
sessionTag = ''
z = sessionTag.split('/')
stag = z[-1]
testVectorDBFn = '%s_%stestVectors.db' % (nnTag, stag)
resultsDBFn = '%s_%sresults.db' % (nnTag, stag)
return (modelCreationDBFn, modelFn, testVectorDBFn, resultsDBFn)
def daisyluSystemEndToEnd(inputFn, sents=None, useNER=True, useCacheIfAvail=True, sessionTag = None,
modelInfo=None, debugSave=False, NoConceptThreshold=0.5, conceptRejectionThreshold=0.0,
NEWPrediction=False, L0OnlyFromFeaturesDB=False, useDistSG=False):
'''
Generate list of sentence objects with predicted dataframes from an input text file.
:param inputFn: Text input file with "::tags" including ::snt specification separated by blank lines
:param sents: Optional array of pre-processed sentence objects (could include golden info)
:param useNER: Use NER output from wikification
:param sessionTag: optional Prefix tag for generated files
:param modelInfo: Optional structure defining saved nn models and vector/architecture files
'''
#if NEWPrediction:
# keepSense=False
#else:
keepSense=True
modelDBFn=''
pid=0
# 1. create a standard data frame with one row per word and add to each sentence
if not sents:
sentsRaw = {'test':[]}
sentsRaw['test'], _ = readAllAMR(inputFn)
sents = sentsRaw
ixList = range(len(sents['test']))
initializePredictionDataFrames(sents, ixList, NEWPrediction=NEWPrediction)
# 2a. create vector db from the sentences
if (useNER):
nnTag = 'AMRL0'
else:
nnTag = 'AMRL0NoNER'
(modelCreationDBFn, modelFn, testVectorDBFn, resultsDBFn) = filenamesForNNTag(nnTag, modelInfo, sessionTag)
wordDF = createVectorsFromDataFrames(sents, 'predictedDFrame', modelCreationDBFn, testVectorDBFn, nnTag, keepSense=keepSense)
# 2b. run SG neural net
runNetwork('SG',testVectorDBFn, modelFn, resultsDBFn)
# 2c. add SG nn output to data frames
addTorchNetworkResults(sents, resultsDBFn, testVectorDBFn, 'AMRL0',
NoConceptThreshold = NoConceptThreshold,
conceptRejectionThreshold = conceptRejectionThreshold)
if debugSave: pickle.dump( sents, open( 'e2eDebug2.pcl', "wb" ) )
nnTag = 'AMRL0Ncat'
if nnTag in modelInfo: # named category is a new option
(modelCreationDBFn, modelFn, testVectorDBFn, resultsDBFn) = filenamesForNNTag(nnTag, modelInfo, sessionTag)
createVectorsFromDataFrames(sents, 'predictedDFrame', modelCreationDBFn, testVectorDBFn, nnTag, keepSense=keepSense,
L0OnlyFromFeaturesDB=L0OnlyFromFeaturesDB, useDistSG=useDistSG)
runNetwork('Cat',testVectorDBFn, modelFn, resultsDBFn)
addTorchNetworkResults(sents, resultsDBFn, testVectorDBFn, nnTag)
if debugSave: pickle.dump( sents, open( 'e2eDebugNcat.pcl', "wb" ) )
nnTag = 'AMRL0Args'
if nnTag in modelInfo: # named category is a new option
(modelCreationDBFn, modelFn, testVectorDBFn, resultsDBFn) = filenamesForNNTag(nnTag, modelInfo, sessionTag)
createVectorsFromDataFrames(sents, 'predictedDFrame', modelCreationDBFn, testVectorDBFn, nnTag, keepSense=keepSense,
L0OnlyFromFeaturesDB=L0OnlyFromFeaturesDB, useDistSG=useDistSG)
runNetwork('Args',testVectorDBFn, modelFn, resultsDBFn)
addTorchNetworkResults(sents, resultsDBFn, testVectorDBFn, nnTag)
if debugSave: pickle.dump( sents, open( 'e2eDebug3.pcl', "wb" ) )
nnTag = 'AMRL0Nargs'
if nnTag in modelInfo: # named category is a new option
(modelCreationDBFn, modelFn, testVectorDBFn, resultsDBFn) = filenamesForNNTag(nnTag, modelInfo, sessionTag)
createVectorsFromDataFrames(sents, 'predictedDFrame', modelCreationDBFn, testVectorDBFn, nnTag, keepSense=keepSense,
L0OnlyFromFeaturesDB=L0OnlyFromFeaturesDB, useDistSG=useDistSG)
runNetwork('Nargs',testVectorDBFn, modelFn, resultsDBFn)
addTorchNetworkResults(sents, resultsDBFn, testVectorDBFn, nnTag)
if debugSave: pickle.dump( sents, open( 'e2eDebug4.pcl', "wb" ) )
nnTag = 'AMRL0Attr'
if nnTag in modelInfo: # named category is a new option
(modelCreationDBFn, modelFn, testVectorDBFn, resultsDBFn) = filenamesForNNTag(nnTag, modelInfo, sessionTag)
createVectorsFromDataFrames(sents, 'predictedDFrame', modelCreationDBFn, testVectorDBFn, nnTag, keepSense=keepSense,
L0OnlyFromFeaturesDB=L0OnlyFromFeaturesDB, useDistSG=useDistSG)
runNetwork('Attr',testVectorDBFn, modelFn, resultsDBFn)
addTorchNetworkResults(sents, resultsDBFn, testVectorDBFn, nnTag)
if debugSave: pickle.dump( sents, open( 'e2eDebug5.pcl', "wb" ) )
return sents, wordDF
def alignedInputDryrunFlow(amrSents, outFn, sessionTag,
modelInfo=None,
useCacheIfAvail=True,
useNER=False, debugSave=False, checkResults=False,
NoConceptThreshold=0.65,
conceptRejectionThreshold=0.0,
forceSubGroupConnectionThreshold=0.35,
NEWPrediction=False,
L0OnlyFromFeaturesDB=False,
useDistSG=False): # instead of just ::snt, read from alignments amr, try to use the same sentence boundaries in multi-sent
sents, wordDF = daisyluSystemEndToEnd(None, sents=amrSents, useNER=useNER,
sessionTag = sessionTag,
modelInfo=modelInfo,
useCacheIfAvail= useCacheIfAvail,
conceptRejectionThreshold=conceptRejectionThreshold,
debugSave=debugSave,
NoConceptThreshold=NoConceptThreshold,
NEWPrediction=NEWPrediction,
L0OnlyFromFeaturesDB=L0OnlyFromFeaturesDB,
useDistSG=useDistSG )
for i in range(len(sents['test'])):
s = sents['test'][i]
print i, s.source['metadata']['id'], s.multiSentIX, s.tokens
createOutputTextFile(sents, outFn, modelInfo=modelInfo,
forceSubGroupConnectionThreshold=forceSubGroupConnectionThreshold)
if checkResults:
pickle.dump( sents, open( sessionTag + outFn + '_2.pcl', "wb" ) )
return sents
def removeWikiAttrs(sents):
for i,sentence in enumerate(sents['test']):
G = sentence.singleComponentGraph['graph']
for lbl in G.nodes():
for a in G.node[lbl]['attributes'].keys():
if (a == 'wiki'):
print 'removing wiki', G.node[lbl]['attributes'][a]
del G.node[lbl]['attributes'][a]
return sents
if __name__ == '__main__':
desc = """
python daisylu_main.py
"""
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('-a','--aligned', help='aligned input', action='store_true', default=True)
parser.add_argument('-i','--infile', help='input file name', required=False, default='TINY_amr-bank-struct-v1.6-test.txt')
parser.add_argument('-o','--outfile', help='output file name', required=False, default='TINY_amr-bank-struct-v1.6-test.amr')
parser.add_argument('-g','--goldfile', help='gold file name', required=False, default='TINY_amr-bank-struct-v1.6-test')
parser.add_argument('-t','--tag', help='results and temp file tag', required=False, default='tmp_')
parser.add_argument('-m','--modelString', help='modelString, like REFERENCE_MODELS', required=False, default='REFERENCE_MODELS')
parser.add_argument('-pid','--pid', help='pid for AWS', required=False, default=-1, type=int)
parser.add_argument('-nct','--noConceptThreshold', help='no Concept Threshold prob', required=False, default=0.65, type=float)
parser.add_argument('-sgt','--subGroupThreshold', help='sub Group Threshold prob', required=False, default=0.55, type=float)
parser.add_argument('-noWiki','--noWiki', help='remove wiki references (LDC2014)', action='store_true', default=False )
args = vars(parser.parse_args())
pprint (args)
WordRepsFileLocations.init('../data/WORD_LIST.txt')
pd.set_option('display.width', 1000)
pd.set_option('display.max_rows', 2000)
useDistSG=True
mi = {}
mi['AMRL0NoNER'] = { 'id': 0, 'db': 'None' }
mi['AMRL0'] = { 'id': './models/SG.model@./models/SG.weights' , 'db': 'LDC15_G300ML_Concepts.db' }
mi['AMRL0Args'] = { 'id': './models/Args.model@./models/Args.weights' , 'db': 'LDC15_G300ML_SG_prob_Args.db' }
mi['AMRL0Nargs'] = { 'id': './models/Nargs.model@./models/Nargs.weights' , 'db': 'LDC15_G300ML_SG_prob_Nargs.db' }
mi['AMRL0Attr'] = { 'id': './models/Attr.model@./models/Attr.weights' , 'db': 'LDC15_G300ML_SG_prob_Attr.db' }
mi['AMRL0Ncat'] = { 'id': './models/Ncat.model@./models/Ncat.weights' , 'db': 'LDC15_G300ML_SG_prob_Cat.db' }
modelInfoDict = {'REFERENCE_MODELS': mi}
outfile1 = args['outfile']
outfile2 = 'corrected-' + outfile1
sList={}
sList['test'], _ = readAllAMR(args['infile'])
sents = alignedInputDryrunFlow(sList, outfile1,
args['tag'],
useNER=True,
modelInfo = modelInfoDict[args['modelString']],
conceptRejectionThreshold=0.20, # <------------------------ New
NoConceptThreshold=args['noConceptThreshold'],
forceSubGroupConnectionThreshold=args['subGroupThreshold'],
NEWPrediction=True,
useDistSG=useDistSG )
forceICorefs(sents)
removeQuantHMMAttrs(sents)
translateCountryCat(sents)
if args['noWiki']:
removeWikiAttrs(sents)
createOutputTextFile(sents, outfile2, modelInfo=modelInfoDict[args['modelString']], forceSubGroupConnectionThreshold=args['subGroupThreshold'] )
if args['goldfile']:
cmd = getSystemPath('smatchCommand') + ' -r 25 -f %s %s' % ( args['goldfile'], outfile2)
print cmd
res = subprocess.check_output(cmd, shell=True)
print 'result is ', res
print 'Done'
exit(1)
| |
# Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Denis Engemann <denis-alexander.engemann@inria.fr>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import linalg
from scipy import stats
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import skip_if_32bit
from sklearn.utils.testing import SkipTest
from sklearn.utils.fixes import np_version
from sklearn.utils.extmath import density
from sklearn.utils.extmath import logsumexp
from sklearn.utils.extmath import norm, squared_norm
from sklearn.utils.extmath import randomized_svd
from sklearn.utils.extmath import row_norms
from sklearn.utils.extmath import weighted_mode
from sklearn.utils.extmath import cartesian
from sklearn.utils.extmath import log_logistic
from sklearn.utils.extmath import fast_dot, _fast_dot
from sklearn.utils.extmath import svd_flip
from sklearn.utils.extmath import _incremental_mean_and_var
from sklearn.utils.extmath import _deterministic_vector_sign_flip
from sklearn.utils.extmath import softmax
from sklearn.utils.extmath import stable_cumsum
from sklearn.datasets.samples_generator import make_low_rank_matrix
def test_density():
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 5))
X[1, 2] = 0
X[5, 3] = 0
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_coo = sparse.coo_matrix(X)
X_lil = sparse.lil_matrix(X)
for X_ in (X_csr, X_csc, X_coo, X_lil):
assert_equal(density(X_), density(X))
def test_uniform_weights():
# with uniform weights, results should be identical to stats.mode
rng = np.random.RandomState(0)
x = rng.randint(10, size=(10, 5))
weights = np.ones(x.shape)
for axis in (None, 0, 1):
mode, score = stats.mode(x, axis)
mode2, score2 = weighted_mode(x, weights, axis)
assert_array_equal(mode, mode2)
assert_array_equal(score, score2)
def test_random_weights():
# set this up so that each row should have a weighted mode of 6,
# with a score that is easily reproduced
mode_result = 6
rng = np.random.RandomState(0)
x = rng.randint(mode_result, size=(100, 10))
w = rng.random_sample(x.shape)
x[:, :5] = mode_result
w[:, :5] += 1
mode, score = weighted_mode(x, w, axis=1)
assert_array_equal(mode, mode_result)
assert_array_almost_equal(score.ravel(), w[:, :5].sum(1))
def test_logsumexp():
# Try to add some smallish numbers in logspace
x = np.array([1e-40] * 1000000)
logx = np.log(x)
assert_almost_equal(np.exp(logsumexp(logx)), x.sum())
X = np.vstack([x, x])
logX = np.vstack([logx, logx])
assert_array_almost_equal(np.exp(logsumexp(logX, axis=0)), X.sum(axis=0))
assert_array_almost_equal(np.exp(logsumexp(logX, axis=1)), X.sum(axis=1))
def test_randomized_svd_low_rank():
# Check that extmath.randomized_svd is consistent with linalg.svd
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X of approximate effective rank `rank` and no noise
# component (very structured signal):
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
U, s, V = linalg.svd(X, full_matrices=False)
for normalizer in ['auto', 'LU', 'QR']: # 'none' would not be stable
# compute the singular values of X using the fast approximate method
Ua, sa, Va = \
randomized_svd(X, k, power_iteration_normalizer=normalizer,
random_state=0)
assert_equal(Ua.shape, (n_samples, k))
assert_equal(sa.shape, (k,))
assert_equal(Va.shape, (k, n_features))
# ensure that the singular values of both methods are equal up to the
# real rank of the matrix
assert_almost_equal(s[:k], sa)
# check the singular vectors too (while not checking the sign)
assert_almost_equal(np.dot(U[:, :k], V[:k, :]), np.dot(Ua, Va))
# check the sparse matrix representation
X = sparse.csr_matrix(X)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = \
randomized_svd(X, k, power_iteration_normalizer=normalizer,
random_state=0)
assert_almost_equal(s[:rank], sa[:rank])
def test_norm_squared_norm():
X = np.random.RandomState(42).randn(50, 63)
X *= 100 # check stability
X += 200
assert_almost_equal(np.linalg.norm(X.ravel()), norm(X))
assert_almost_equal(norm(X) ** 2, squared_norm(X), decimal=6)
assert_almost_equal(np.linalg.norm(X), np.sqrt(squared_norm(X)), decimal=6)
# Check the warning with an int array and np.dot potential overflow
assert_warns_message(
UserWarning, 'Array type is integer, np.dot may '
'overflow. Data should be float type to avoid this issue',
squared_norm, X.astype(int))
def test_row_norms():
X = np.random.RandomState(42).randn(100, 100)
for dtype in (np.float32, np.float64):
if dtype is np.float32:
precision = 4
else:
precision = 5
X = X.astype(dtype)
sq_norm = (X ** 2).sum(axis=1)
assert_array_almost_equal(sq_norm, row_norms(X, squared=True),
precision)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(X), precision)
Xcsr = sparse.csr_matrix(X, dtype=dtype)
assert_array_almost_equal(sq_norm, row_norms(Xcsr, squared=True),
precision)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(Xcsr), precision)
def test_randomized_svd_low_rank_with_noise():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X wity structure approximate rank `rank` and an
# important noisy component
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.1,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
for normalizer in ['auto', 'none', 'LU', 'QR']:
# compute the singular values of X using the fast approximate
# method without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0,
power_iteration_normalizer=normalizer,
random_state=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.01)
# compute the singular values of X using the fast approximate
# method with iterated power method
_, sap, _ = randomized_svd(X, k,
power_iteration_normalizer=normalizer,
random_state=0)
# the iterated power method is helping getting rid of the noise:
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_infinite_rank():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# let us try again without 'low_rank component': just regularly but slowly
# decreasing singular values: the rank of the data matrix is infinite
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=1.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
for normalizer in ['auto', 'none', 'LU', 'QR']:
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0,
power_iteration_normalizer=normalizer)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.1)
# compute the singular values of X using the fast approximate method
# with iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5,
power_iteration_normalizer=normalizer)
# the iterated power method is still managing to get most of the
# structure at the requested rank
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_transpose_consistency():
# Check that transposing the design matrix has limited impact
n_samples = 100
n_features = 500
rank = 4
k = 10
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
U1, s1, V1 = randomized_svd(X, k, n_iter=3, transpose=False,
random_state=0)
U2, s2, V2 = randomized_svd(X, k, n_iter=3, transpose=True,
random_state=0)
U3, s3, V3 = randomized_svd(X, k, n_iter=3, transpose='auto',
random_state=0)
U4, s4, V4 = linalg.svd(X, full_matrices=False)
assert_almost_equal(s1, s4[:k], decimal=3)
assert_almost_equal(s2, s4[:k], decimal=3)
assert_almost_equal(s3, s4[:k], decimal=3)
assert_almost_equal(np.dot(U1, V1), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
assert_almost_equal(np.dot(U2, V2), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
# in this case 'auto' is equivalent to transpose
assert_almost_equal(s2, s3)
def test_randomized_svd_power_iteration_normalizer():
# randomized_svd with power_iteration_normalized='none' diverges for
# large number of power iterations on this dataset
rng = np.random.RandomState(42)
X = make_low_rank_matrix(100, 500, effective_rank=50, random_state=rng)
X += 3 * rng.randint(0, 2, size=X.shape)
n_components = 50
# Check that it diverges with many (non-normalized) power iterations
U, s, V = randomized_svd(X, n_components, n_iter=2,
power_iteration_normalizer='none')
A = X - U.dot(np.diag(s).dot(V))
error_2 = linalg.norm(A, ord='fro')
U, s, V = randomized_svd(X, n_components, n_iter=20,
power_iteration_normalizer='none')
A = X - U.dot(np.diag(s).dot(V))
error_20 = linalg.norm(A, ord='fro')
assert_greater(np.abs(error_2 - error_20), 100)
for normalizer in ['LU', 'QR', 'auto']:
U, s, V = randomized_svd(X, n_components, n_iter=2,
power_iteration_normalizer=normalizer,
random_state=0)
A = X - U.dot(np.diag(s).dot(V))
error_2 = linalg.norm(A, ord='fro')
for i in [5, 10, 50]:
U, s, V = randomized_svd(X, n_components, n_iter=i,
power_iteration_normalizer=normalizer,
random_state=0)
A = X - U.dot(np.diag(s).dot(V))
error = linalg.norm(A, ord='fro')
assert_greater(15, np.abs(error_2 - error))
def test_svd_flip():
# Check that svd_flip works in both situations, and reconstructs input.
rs = np.random.RandomState(1999)
n_samples = 20
n_features = 10
X = rs.randn(n_samples, n_features)
# Check matrix reconstruction
U, S, V = linalg.svd(X, full_matrices=False)
U1, V1 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U1 * S, V1), X, decimal=6)
# Check transposed matrix reconstruction
XT = X.T
U, S, V = linalg.svd(XT, full_matrices=False)
U2, V2 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U2 * S, V2), XT, decimal=6)
# Check that different flip methods are equivalent under reconstruction
U_flip1, V_flip1 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U_flip1 * S, V_flip1), XT, decimal=6)
U_flip2, V_flip2 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U_flip2 * S, V_flip2), XT, decimal=6)
def test_randomized_svd_sign_flip():
a = np.array([[2.0, 0.0], [0.0, 1.0]])
u1, s1, v1 = randomized_svd(a, 2, flip_sign=True, random_state=41)
for seed in range(10):
u2, s2, v2 = randomized_svd(a, 2, flip_sign=True, random_state=seed)
assert_almost_equal(u1, u2)
assert_almost_equal(v1, v2)
assert_almost_equal(np.dot(u2 * s2, v2), a)
assert_almost_equal(np.dot(u2.T, u2), np.eye(2))
assert_almost_equal(np.dot(v2.T, v2), np.eye(2))
def test_randomized_svd_sign_flip_with_transpose():
# Check if the randomized_svd sign flipping is always done based on u
# irrespective of transpose.
# See https://github.com/scikit-learn/scikit-learn/issues/5608
# for more details.
def max_loading_is_positive(u, v):
"""
returns bool tuple indicating if the values maximising np.abs
are positive across all rows for u and across all columns for v.
"""
u_based = (np.abs(u).max(axis=0) == u.max(axis=0)).all()
v_based = (np.abs(v).max(axis=1) == v.max(axis=1)).all()
return u_based, v_based
mat = np.arange(10 * 8).reshape(10, -1)
# Without transpose
u_flipped, _, v_flipped = randomized_svd(mat, 3, flip_sign=True)
u_based, v_based = max_loading_is_positive(u_flipped, v_flipped)
assert_true(u_based)
assert_false(v_based)
# With transpose
u_flipped_with_transpose, _, v_flipped_with_transpose = randomized_svd(
mat, 3, flip_sign=True, transpose=True)
u_based, v_based = max_loading_is_positive(
u_flipped_with_transpose, v_flipped_with_transpose)
assert_true(u_based)
assert_false(v_based)
def test_cartesian():
# Check if cartesian product delivers the right results
axes = (np.array([1, 2, 3]), np.array([4, 5]), np.array([6, 7]))
true_out = np.array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
out = cartesian(axes)
assert_array_equal(true_out, out)
# check single axis
x = np.arange(3)
assert_array_equal(x[:, np.newaxis], cartesian((x,)))
def test_logistic_sigmoid():
# Check correctness and robustness of logistic sigmoid implementation
def naive_log_logistic(x):
return np.log(1 / (1 + np.exp(-x)))
x = np.linspace(-2, 2, 50)
assert_array_almost_equal(log_logistic(x), naive_log_logistic(x))
extreme_x = np.array([-100., 100.])
assert_array_almost_equal(log_logistic(extreme_x), [-100, 0])
def test_fast_dot():
# Check fast dot blas wrapper function
if fast_dot is np.dot:
return
rng = np.random.RandomState(42)
A = rng.random_sample([2, 10])
B = rng.random_sample([2, 10])
try:
linalg.get_blas_funcs(['gemm'])[0]
has_blas = True
except (AttributeError, ValueError):
has_blas = False
if has_blas:
# Test _fast_dot for invalid input.
# Maltyped data.
for dt1, dt2 in [['f8', 'f4'], ['i4', 'i4']]:
assert_raises(ValueError, _fast_dot, A.astype(dt1),
B.astype(dt2).T)
# Malformed data.
# ndim == 0
E = np.empty(0)
assert_raises(ValueError, _fast_dot, E, E)
# ndim == 1
assert_raises(ValueError, _fast_dot, A, A[0])
# ndim > 2
assert_raises(ValueError, _fast_dot, A.T, np.array([A, A]))
# min(shape) == 1
assert_raises(ValueError, _fast_dot, A, A[0, :][None, :])
# test for matrix mismatch error
assert_raises(ValueError, _fast_dot, A, A)
# Test cov-like use case + dtypes.
for dtype in ['f8', 'f4']:
A = A.astype(dtype)
B = B.astype(dtype)
# col < row
C = np.dot(A.T, A)
C_ = fast_dot(A.T, A)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A.T, B)
C_ = fast_dot(A.T, B)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A, B.T)
C_ = fast_dot(A, B.T)
assert_almost_equal(C, C_, decimal=5)
# Test square matrix * rectangular use case.
A = rng.random_sample([2, 2])
for dtype in ['f8', 'f4']:
A = A.astype(dtype)
B = B.astype(dtype)
C = np.dot(A, B)
C_ = fast_dot(A, B)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A.T, B)
C_ = fast_dot(A.T, B)
assert_almost_equal(C, C_, decimal=5)
if has_blas:
for x in [np.array([[d] * 10] * 2) for d in [np.inf, np.nan]]:
assert_raises(ValueError, _fast_dot, x, x.T)
def test_incremental_variance_update_formulas():
# Test Youngs and Cramer incremental variance formulas.
# Doggie data from http://www.mathsisfun.com/data/standard-deviation.html
A = np.array([[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300]]).T
idx = 2
X1 = A[:idx, :]
X2 = A[idx:, :]
old_means = X1.mean(axis=0)
old_variances = X1.var(axis=0)
old_sample_count = X1.shape[0]
final_means, final_variances, final_count = \
_incremental_mean_and_var(X2, old_means, old_variances,
old_sample_count)
assert_almost_equal(final_means, A.mean(axis=0), 6)
assert_almost_equal(final_variances, A.var(axis=0), 6)
assert_almost_equal(final_count, A.shape[0])
@skip_if_32bit
def test_incremental_variance_numerical_stability():
# Test Youngs and Cramer incremental variance formulas.
def np_var(A):
return A.var(axis=0)
# Naive one pass variance computation - not numerically stable
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
def one_pass_var(X):
n = X.shape[0]
exp_x2 = (X ** 2).sum(axis=0) / n
expx_2 = (X.sum(axis=0) / n) ** 2
return exp_x2 - expx_2
# Two-pass algorithm, stable.
# We use it as a benchmark. It is not an online algorithm
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Two-pass_algorithm
def two_pass_var(X):
mean = X.mean(axis=0)
Y = X.copy()
return np.mean((Y - mean)**2, axis=0)
# Naive online implementation
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online_algorithm
# This works only for chunks for size 1
def naive_mean_variance_update(x, last_mean, last_variance,
last_sample_count):
updated_sample_count = (last_sample_count + 1)
samples_ratio = last_sample_count / float(updated_sample_count)
updated_mean = x / updated_sample_count + last_mean * samples_ratio
updated_variance = last_variance * samples_ratio + \
(x - last_mean) * (x - updated_mean) / updated_sample_count
return updated_mean, updated_variance, updated_sample_count
# We want to show a case when one_pass_var has error > 1e-3 while
# _batch_mean_variance_update has less.
tol = 200
n_features = 2
n_samples = 10000
x1 = np.array(1e8, dtype=np.float64)
x2 = np.log(1e-5, dtype=np.float64)
A0 = x1 * np.ones((n_samples // 2, n_features), dtype=np.float64)
A1 = x2 * np.ones((n_samples // 2, n_features), dtype=np.float64)
A = np.vstack((A0, A1))
# Older versions of numpy have different precision
# In some old version, np.var is not stable
if np.abs(np_var(A) - two_pass_var(A)).max() < 1e-6:
stable_var = np_var
else:
stable_var = two_pass_var
# Naive one pass var: >tol (=1063)
assert_greater(np.abs(stable_var(A) - one_pass_var(A)).max(), tol)
# Starting point for online algorithms: after A0
# Naive implementation: >tol (436)
mean, var, n = A0[0, :], np.zeros(n_features), n_samples // 2
for i in range(A1.shape[0]):
mean, var, n = \
naive_mean_variance_update(A1[i, :], mean, var, n)
assert_equal(n, A.shape[0])
# the mean is also slightly unstable
assert_greater(np.abs(A.mean(axis=0) - mean).max(), 1e-6)
assert_greater(np.abs(stable_var(A) - var).max(), tol)
# Robust implementation: <tol (177)
mean, var, n = A0[0, :], np.zeros(n_features), n_samples // 2
for i in range(A1.shape[0]):
mean, var, n = \
_incremental_mean_and_var(A1[i, :].reshape((1, A1.shape[1])),
mean, var, n)
assert_equal(n, A.shape[0])
assert_array_almost_equal(A.mean(axis=0), mean)
assert_greater(tol, np.abs(stable_var(A) - var).max())
def test_incremental_variance_ddof():
# Test that degrees of freedom parameter for calculations are correct.
rng = np.random.RandomState(1999)
X = rng.randn(50, 10)
n_samples, n_features = X.shape
for batch_size in [11, 20, 37]:
steps = np.arange(0, X.shape[0], batch_size)
if steps[-1] != X.shape[0]:
steps = np.hstack([steps, n_samples])
for i, j in zip(steps[:-1], steps[1:]):
batch = X[i:j, :]
if i == 0:
incremental_means = batch.mean(axis=0)
incremental_variances = batch.var(axis=0)
# Assign this twice so that the test logic is consistent
incremental_count = batch.shape[0]
sample_count = batch.shape[0]
else:
result = _incremental_mean_and_var(
batch, incremental_means, incremental_variances,
sample_count)
(incremental_means, incremental_variances,
incremental_count) = result
sample_count += batch.shape[0]
calculated_means = np.mean(X[:j], axis=0)
calculated_variances = np.var(X[:j], axis=0)
assert_almost_equal(incremental_means, calculated_means, 6)
assert_almost_equal(incremental_variances,
calculated_variances, 6)
assert_equal(incremental_count, sample_count)
def test_vector_sign_flip():
# Testing that sign flip is working & largest value has positive sign
data = np.random.RandomState(36).randn(5, 5)
max_abs_rows = np.argmax(np.abs(data), axis=1)
data_flipped = _deterministic_vector_sign_flip(data)
max_rows = np.argmax(data_flipped, axis=1)
assert_array_equal(max_abs_rows, max_rows)
signs = np.sign(data[range(data.shape[0]), max_abs_rows])
assert_array_equal(data, data_flipped * signs[:, np.newaxis])
def test_softmax():
rng = np.random.RandomState(0)
X = rng.randn(3, 5)
exp_X = np.exp(X)
sum_exp_X = np.sum(exp_X, axis=1).reshape((-1, 1))
assert_array_almost_equal(softmax(X), exp_X / sum_exp_X)
def test_stable_cumsum():
if np_version < (1, 9):
raise SkipTest("Sum is as unstable as cumsum for numpy < 1.9")
assert_array_equal(stable_cumsum([1, 2, 3]), np.cumsum([1, 2, 3]))
r = np.random.RandomState(0).rand(100000)
assert_warns(RuntimeWarning, stable_cumsum, r, rtol=0, atol=0)
# test axis parameter
A = np.random.RandomState(36).randint(1000, size=(5, 5, 5))
assert_array_equal(stable_cumsum(A, axis=0), np.cumsum(A, axis=0))
assert_array_equal(stable_cumsum(A, axis=1), np.cumsum(A, axis=1))
assert_array_equal(stable_cumsum(A, axis=2), np.cumsum(A, axis=2))
| |
'''
@author: Dallas Fraser
@date: 2016-04-12
@organization: MLSB API
@summary: The basic team API
'''
from flask_restful import Resource, reqparse
from flask import Response, request
from json import dumps
from datetime import date
from api import DB
from api.model import Team
from api.authentication import requires_admin
from api.errors import TeamDoesNotExist
from api.variables import PAGE_SIZE
from api.routes import Routes
from api.helper import pagination_response
from api.cached_items import handle_table_change
from api.tables import Tables
parser = reqparse.RequestParser()
parser.add_argument('sponsor_id', type=int)
parser.add_argument('color', type=str)
parser.add_argument('league_id', type=int)
parser.add_argument('year', type=int)
post_parser = reqparse.RequestParser(bundle_errors=True)
post_parser.add_argument('sponsor_id', type=int, required=True)
post_parser.add_argument('color', type=str, required=True)
post_parser.add_argument('league_id', type=int, required=True)
post_parser.add_argument('year', type=int, required=True)
class TeamAPI(Resource):
def get(self, team_id):
"""
GET request for Team Object matching given team_id
Route: Routes['team']/<team_id:int>
Returns:
if found
status: 200
mimetype: application/json
data:
{
'team_id': int,
'team_name': string,
'color': string,
'sponsor_id': int,
'league_id': int,
'year': int,
'espys': int,
'captain': string
}
otherwise
status: 404
mimetype: application/json
data:
None
"""
# expose a single team
entry = Team.query.get(team_id)
if entry is None:
raise TeamDoesNotExist(payload={'details': team_id})
response = Response(dumps(entry.json()), status=200,
mimetype="application/json")
return response
@requires_admin
def delete(self, team_id):
"""
DELETE request for Team
Route: Routes['team']/<team_id:int>
Returns:
if found
status: 200
mimetype: application/json
data: None
otherwise
status: 404
mimetype: application/json
data: None
"""
team = Team.query.get(team_id)
if team is None:
raise TeamDoesNotExist(payload={'details': team_id})
# delete a single team
team_json = team.json()
DB.session.delete(team)
DB.session.commit()
response = Response(dumps(None), status=200,
mimetype="application/json")
handle_table_change(Tables.TEAM, item=team_json)
return response
@requires_admin
def put(self, team_id):
"""
PUT request for team
Route: Routes['team']/<team_id:int>
Parameters :
team_id: The team's id (int)
team_name: The team's name (string)
sponsor_id: The sponsor's id (int)
league_id: The league's id (int)
color: the color of the team (string)
year: the year of the team (int)
espys: the total espys points of the team (int)
Returns:
if found and updated successfully
status: 200
mimetype: application/json
data: None
otherwise possible errors are
status: 404, IFSC, LDNESC, PDNESC or SDNESC
mimetype: application/json
data: None
"""
# update a single user
team = Team.query.get(team_id)
args = parser.parse_args()
color = None
sponsor_id = None
league_id = None
year = None
if team is None:
raise TeamDoesNotExist(payload={'details': team_id})
if args['color']:
color = args['color']
if args['sponsor_id']:
sponsor_id = args['sponsor_id']
if args['league_id']:
league_id = args['league_id']
if args['year']:
year = args['year']
team.update(color=color,
sponsor_id=sponsor_id,
league_id=league_id,
year=year
)
DB.session.commit()
response = Response(dumps(None), status=200,
mimetype="application/json")
handle_table_change(Tables.TEAM, item=team.json())
return response
def option(self):
return {'Allow': 'PUT'}, 200, \
{'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'PUT,GET'}
class TeamListAPI(Resource):
def get(self):
"""
GET request for Teams List
Route: Routes['team']
Parameters :
Returns:
status: 200
mimetype: application/json
data:
teams: [ {
'team_id': int,
'team_name': string,
'color': string,
'sponsor_id': int,
'league_id': int,
'year': int,
'espys': int,
'captain': string
}
,{...}
]
"""
# return a pagination of teams
page = request.args.get('page', 1, type=int)
pagination = Team.query.paginate(page, PAGE_SIZE, False)
result = pagination_response(pagination, Routes['team'])
resp = Response(dumps(result), status=200,
mimetype="application/json")
return resp
@requires_admin
def post(self):
"""
POST request for Teams List
Route: Routes['team']
Parameters :
league_id: the league's id (int)
sponsor_id: the sponsor's id (int)
color: the color of the team (string)
year: the year the team is playing in (int)
espys: the team espys points (int)
Returns:
if successful
status: 200
mimetype: application/json
data: the create team id (int)
possible errors
status: 400, IFSC, LDNESC, PDNESC or SDNESC
mimetype: application/json
data: the create team id (int)
"""
# create a new user
args = post_parser.parse_args()
color = None
sponsor_id = None
league_id = None
year = date.today().year
if args['color']:
color = args['color']
if args['sponsor_id']:
sponsor_id = args['sponsor_id']
if args['league_id']:
league_id = args['league_id']
if args['year']:
year = args['year']
t = Team(color=color,
sponsor_id=sponsor_id,
league_id=league_id,
year=year)
DB.session.add(t)
DB.session.commit()
result = t.id
handle_table_change(Tables.TEAM, item=t.json())
return Response(dumps(result), status=201, mimetype="application/json")
def option(self):
return {'Allow': 'PUT'}, 200, \
{'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'PUT,GET'}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.