gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
"""Extension to execute code outside the Python shell window.
This adds the following commands:
- Check module does a full syntax check of the current module.
It also runs the tabnanny to catch any inconsistent tabs.
- Run module executes the module's code in the __main__ namespace. The window
must have been saved previously. The module is added to sys.modules, and is
also added to the __main__ namespace.
XXX GvR Redesign this interface (yet again) as follows:
- Present a dialog box for ``Run Module''
- Allow specify command line arguments in the dialog box
"""
import os
import re
import string
import tabnanny
import tokenize
import tkMessageBox
import PyShell
from configHandler import idleConf
IDENTCHARS = string.ascii_letters + string.digits + "_"
indent_message = """Error: Inconsistent indentation detected!
This means that either:
1) your indentation is outright incorrect (easy to fix), or
2) your indentation mixes tabs and spaces in a way that depends on \
how many spaces a tab is worth.
To fix case 2, change all tabs to spaces by using Select All followed \
by Untabify Region (both in the Edit menu)."""
class ScriptBinding:
menudefs = [
('run', [None,
('Check Module', '<<check-module>>'),
('Run Module', '<<run-module>>'), ]), ]
def __init__(self, editwin):
self.editwin = editwin
# Provide instance variables referenced by Debugger
# XXX This should be done differently
self.flist = self.editwin.flist
self.root = self.flist.root
def check_module_event(self, event):
filename = self.getfilename()
if not filename:
return
if not self.tabnanny(filename):
return
self.checksyntax(filename)
def tabnanny(self, filename):
f = open(filename, 'r')
try:
tabnanny.process_tokens(tokenize.generate_tokens(f.readline))
except tokenize.TokenError, msg:
msgtxt, (lineno, start) = msg
self.editwin.gotoline(lineno)
self.errorbox("Tabnanny Tokenizing Error",
"Token Error: %s" % msgtxt)
return False
except tabnanny.NannyNag, nag:
# The error messages from tabnanny are too confusing...
self.editwin.gotoline(nag.get_lineno())
self.errorbox("Tab/space error", indent_message)
return False
return True
def checksyntax(self, filename):
self.shell = shell = self.flist.open_shell()
saved_stream = shell.get_warning_stream()
shell.set_warning_stream(shell.stderr)
f = open(filename, 'r')
source = f.read()
f.close()
if '\r' in source:
source = re.sub(r"\r\n", "\n", source)
if source and source[-1] != '\n':
source = source + '\n'
text = self.editwin.text
text.tag_remove("ERROR", "1.0", "end")
try:
try:
# If successful, return the compiled code
return compile(source, filename, "exec")
except (SyntaxError, OverflowError), err:
try:
msg, (errorfilename, lineno, offset, line) = err
if not errorfilename:
err.args = msg, (filename, lineno, offset, line)
err.filename = filename
self.colorize_syntax_error(msg, lineno, offset)
except:
msg = "*** " + str(err)
self.errorbox("Syntax error",
"There's an error in your program:\n" + msg)
return False
finally:
shell.set_warning_stream(saved_stream)
def colorize_syntax_error(self, msg, lineno, offset):
text = self.editwin.text
pos = "0.0 + %d lines + %d chars" % (lineno-1, offset-1)
text.tag_add("ERROR", pos)
char = text.get(pos)
if char and char in IDENTCHARS:
text.tag_add("ERROR", pos + " wordstart", pos)
if '\n' == text.get(pos): # error at line end
text.mark_set("insert", pos)
else:
text.mark_set("insert", pos + "+1c")
text.see(pos)
def run_module_event(self, event):
"""Run the module after setting up the environment.
First check the syntax. If OK, make sure the shell is active and
then transfer the arguments, set the run environment's working
directory to the directory of the module being executed and also
add that directory to its sys.path if not already included.
"""
filename = self.getfilename()
if not filename:
return
code = self.checksyntax(filename)
if not code:
return
shell = self.shell
interp = shell.interp
if PyShell.use_subprocess:
shell.restart_shell()
dirname = os.path.dirname(filename)
# XXX Too often this discards arguments the user just set...
interp.runcommand("""if 1:
_filename = %r
import sys as _sys
from os.path import basename as _basename
if (not _sys.argv or
_basename(_sys.argv[0]) != _basename(_filename)):
_sys.argv = [_filename]
import os as _os
_os.chdir(%r)
del _filename, _sys, _basename, _os
\n""" % (filename, dirname))
interp.prepend_syspath(filename)
# XXX KBK 03Jul04 When run w/o subprocess, runtime warnings still
# go to __stderr__. With subprocess, they go to the shell.
# Need to change streams in PyShell.ModifiedInterpreter.
interp.runcode(code)
def getfilename(self):
"""Get source filename. If not saved, offer to save (or create) file
The debugger requires a source file. Make sure there is one, and that
the current version of the source buffer has been saved. If the user
declines to save or cancels the Save As dialog, return None.
If the user has configured IDLE for Autosave, the file will be
silently saved if it already exists and is dirty.
"""
filename = self.editwin.io.filename
if not self.editwin.get_saved():
autosave = idleConf.GetOption('main', 'General',
'autosave', type='bool')
if autosave and filename:
self.editwin.io.save(None)
else:
reply = self.ask_save_dialog()
self.editwin.text.focus_set()
if reply == "ok":
self.editwin.io.save(None)
filename = self.editwin.io.filename
else:
filename = None
return filename
def ask_save_dialog(self):
msg = "Source Must Be Saved\n" + 5*' ' + "OK to Save?"
mb = tkMessageBox.Message(title="Save Before Run or Check",
message=msg,
icon=tkMessageBox.QUESTION,
type=tkMessageBox.OKCANCEL,
default=tkMessageBox.OK,
master=self.editwin.text)
return mb.show()
def errorbox(self, title, message):
# XXX This should really be a function of EditorWindow...
tkMessageBox.showerror(title, message, master=self.editwin.text)
self.editwin.text.focus_set()
| |
#!/usr/bin/env python3
# coding=utf-8
"""
Multiple helper class for plugins creation. All plugins implemented are required to at least extend BasePlugin.
"""
# noinspection PyProtectedMember
from argparse import _SubParsersAction, ArgumentParser
from abc import abstractmethod, ABCMeta
import logging
import os
import shutil
from lib.installer import Installer
from lib.installer.context_managers import ExtensionPatcherManager
from lib.parsers.configuration import get_global_conf
from lib.trigger import RawTrigger
__author__ = "Benjamin Schubert, benjamin.schubert@epfl.ch"
class BasePlugin(metaclass=ABCMeta):
"""
The base plugin architecture. Used for plugin discovery and calls.
"""
help = None
def configure(self, *args, **kwargs) -> None:
"""
Function called on configure, after the base is done
:param args: additional arguments
:param kwargs: additional keyword arguments
"""
pass
@classmethod
def register_for_install(cls, *args, **kwargs) -> None:
"""
Function called before parsing arguments for install, used to enable a plugin for installation
:param args: additional arguments
:param kwargs: additional keyword arguments
"""
pass
@classmethod
def register_for_trigger(cls, parser, subparser, *args, **kwargs) -> None:
"""
Function called before parsing arguments for trigger, used to enable a plugin when triggering
:param args: additional arguments
:param kwargs: additional keyword arguments
"""
pass
def pre_trigger_run(self, trigger: RawTrigger, *args, **kwargs) -> None:
"""
Function called before running the trigger.
:param trigger: the trigger instance to run
:param args: additional arguments
:param kwargs: additional keyword arguments
"""
pass
def post_trigger_run(self, trigger: RawTrigger, error: int, *args, **kwargs) -> None:
"""
Function called after the trigger ran successfully running the trigger.
:param trigger: the trigger that has run
:param error: the error code returned by the trigger
:param args: additional arguments
:param kwargs: additional keyword arguments
"""
pass
def post_trigger_clean(self, trigger: RawTrigger, *args, **kwargs) -> None:
"""
Function called before the trigger exits. This is always run, whatever happens.
:param trigger: the trigger that has run
:param args: additional arguments
:param kwargs: additional keyword arguments
"""
pass
class MainPlugin(BasePlugin, metaclass=ABCMeta):
"""
Implements the minimum for a main plugin for trigger runs. For a plugin to be used in this case, it must inherit
this one
"""
@property
@abstractmethod
def extension(self) -> str:
"""
The extension to identify this plugin. Will be appended if needed on the plugin-specific binary
"""
@classmethod
def register_for_trigger(cls, subparser: _SubParsersAction, *args, **kwargs) -> ArgumentParser:
"""
Registers the plugin as an option to run with for trigger
:param subparser: the subparser on which to register
:param args: additional arguments
:param kwargs: additional keyword arguments
:return: the parser created by registering, to allow subclasses to register options when running
"""
parser = subparser.add_parser(cls.__name__.lower(), help=cls.help)
parser.set_defaults(main_plugin=cls()) # pylint: disable=abstract-class-instantiated
return parser
@abstractmethod
def check_trigger_success(self, trigger: RawTrigger, error: int, *args, **kwargs) -> int:
"""
Main plugins are responsible of determining if the run was successful or not. This function gets called to do
this
:param trigger: the trigger that has run
:param error: the error code returned by the trigger
:param args: additional arguments
:param kwargs: additional keyword arguments
:return: 0|constants.PLUGIN_ERROR on success|error
"""
# noinspection PyUnusedLocal
# pylint: disable=unused-argument
def create_executable(self, installer: Installer, extension: str=None, version_number: int=None, force: bool=False,
*args, **kwargs) -> int:
"""
Creates a special executable to run for this plugin if needed. If a patch is supplied by the form
"program-name-version-extension.patch", it will automatically get used to create a new version
:param installer: the installer instance that is used
:param extension: the extension to add to the binary, usually the plugin name
:param version_number: if multiple version are required for a plugin, this will get appended to it
:param force: force creation even if no patch is provided
:param args: additional arguments
:param kwargs: additional keyword arguments
:return: None|0 if nothing happened or installation is successful
"""
extension = extension or self.extension
executable_suffix = "{}-{}".format(extension, version_number) if version_number else extension
for lib in installer.conf.getlist("libraries"):
lib_installer = Installer.factory(installer.conf.get_library(lib), False)
with ExtensionPatcherManager(lib_installer, extension) as lib_patcher:
if lib_patcher.is_patched or force:
lib_installer.configure()
lib_installer.make()
lib_installer.install()
force = True
with ExtensionPatcherManager(installer, extension) as patcher:
if not patcher.is_patched and not force:
logging.verbose("No need to create special executable for {}".format(extension))
return
installer.make()
executable = os.path.join(installer.working_dir, installer.conf.get("bitcode_file"))
destination = "{}-{}".format(installer.conf.get_executable(), executable_suffix)
logging.verbose("Copying {} to {}".format(executable, os.path.join(installer.install_dir, destination)))
shutil.copy(executable, os.path.join(installer.install_dir, destination))
for lib in installer.conf.getlist("libraries"):
lib_installer = Installer.factory(installer.conf.get_library(lib), False)
if force:
lib_installer.configure()
lib_installer.make()
lib_installer.install()
return 0
class AnalysisPlugin(BasePlugin, metaclass=ABCMeta):
"""
Implements available actions for an analysis plugin. That is a plugin that runs on top of a main plugin to provide
more functionality, analysis, benchmarking and so on
"""
@classmethod
@abstractmethod
def options(cls) -> list:
"""
A list of dashed options to use for enabling this plugin. Beware of conflicts with others
:return: the options to enable the plugin
"""
@classmethod
def register_for_trigger(cls, parser: ArgumentParser, *args, **kwargs) -> None:
"""
Registers the plugin as being available to run with the trigger
:param parser: the parser on which to register
:param args: additional arguments
:param kwargs: additional keyword arguments
"""
parser.add_argument(*cls.options(), action="append_const", dest="analysis_plugins", const=cls, help=cls.help)
class InstallPlugin(BasePlugin, metaclass=ABCMeta):
"""
Implements available actions to a plugin that is meant to run after an installation, for example to do one time
analysis on the binaries
"""
@classmethod
@abstractmethod
def options(cls) -> list:
"""
A list of dashed options to use for enabling this plugin
"""
@abstractmethod
def post_install_run(self, *args, **kwargs) -> None:
"""
Called once the binary was compiled, main entry point for this type of plugins
:param args: additional arguments
:param kwargs: additional keyword arguments
"""
@classmethod
def register_for_install(cls, parser: ArgumentParser, *args, **kwargs) -> None:
"""
Registers the plugin as being available to run after installation
:param parser: the parser on which to register
:param args: additional arguments
:param kwargs: additional keyword arguments
"""
parser.add_argument(*cls.options(), action="append_const", dest="analysis_plugins", const=cls, help=cls.help)
@staticmethod
def post_install_clean(*args, **kwargs) -> None:
"""
Called after installation is done.
:param args: additional arguments
:param kwargs: additional keyword arguments
"""
pass
class MetaPlugin(BasePlugin, metaclass=ABCMeta):
"""
Implements available actions to a plugin that is meant to orchestrate the run, for example choose other plugins to
run and take actions afterwards (such as comparing two different implementations for speed and so on)
"""
@classmethod
def register_for_trigger(cls, subparser: _SubParsersAction, *args, **kwargs):
"""
Registers the plugin as an option to run for the trigger
:param subparser: the subparser on which to register
:param args: additional arguments
:param kwargs: additional keyword arguments
:return the parser created by registering, to allow subclasses to register options when running
"""
parser = subparser.add_parser(cls.__name__.lower(), help=cls.help)
parser.set_defaults(main_plugin=cls()) # pylint: disable=abstract-class-instantiated
return parser
@abstractmethod
def before_run(self, *args, **kwargs) -> dict:
"""
Called once before the complete run. Should return a dictionary of the form:
dict = {
"main_plugins": list of MainPlugins to run
"analysis_plugins": list of analysis plugins to use
}
:param args: additional arguments
:param kwargs: additional keyword arguments
:return: list of MainPlugin
"""
pass
@abstractmethod
def after_run(self, *args, **kwargs) -> int:
"""
Called once all plugins have run.
:param args: additional arguments
:param kwargs: additional keyword arguments
:return: 0 | Positive Integer on success | failure
"""
pass
def create_big_file(size: int=1) -> str:
"""
Used to create a very big file to use for some processing
:param size: the number of times to duplicate the file, increasing its size
:return: the file path
"""
return_file = os.path.join(get_global_conf().getdir("trigger", "workloads"), "{}-{}.tar".format("workloads", size))
if os.path.exists(return_file):
return return_file
os.makedirs(os.path.dirname(return_file), exist_ok=True)
with open(return_file, "w") as big_file:
for _ in range(size):
big_file.write("0" * (1024 ** 2))
return return_file
| |
#!/usr/bin/env python
"""Data master specific classes."""
import socket
import threading
import urlparse
import urllib3
from urllib3 import connectionpool
import logging
from grr.lib import config_lib
from grr.lib import rdfvalue
from grr.lib import utils
from grr.server.data_server import constants
from grr.server.data_server import rebalance
from grr.server.data_server import utils as sutils
class DataMasterError(Exception):
"""Raised when some critical error happens in the data master."""
pass
class DataServer(object):
"""DataServer objects for each data server."""
def __init__(self, location, index):
# Parse location.
loc = urlparse.urlparse(location, scheme="http")
offline = rdfvalue.DataServerState.Status.OFFLINE
state = rdfvalue.DataServerState(size=0, load=0, status=offline)
self.server_info = rdfvalue.DataServerInformation(index=index,
address=loc.hostname,
port=loc.port,
state=state)
self.registered = False
self.removed = False
logging.info("Configured DataServer on %s:%d", self.Address(), self.Port())
def SetInitialInterval(self, num_servers):
self.server_info.interval = sutils.CreateStartInterval(self.Index(),
num_servers)
def IsRegistered(self):
return self.registered
def Matches(self, addr, port):
if isinstance(addr, list):
if self.Address() not in addr:
return False
else:
# Handle hostnames and IPs
if socket.gethostbyname(self.Address()) != socket.gethostbyname(addr):
return False
return self.Port() == port
def Register(self):
"""Once the server is registered, it is allowed to use the database."""
self.registered = True
def Deregister(self):
self.registered = False
def Port(self):
return self.server_info.port
def Address(self):
return self.server_info.address
def Index(self):
return self.server_info.index
def SetIndex(self, newindex):
self.server_info.index = newindex
def Size(self):
return self.server_info.state.size
def Load(self):
return self.server_info.state.load
def Interval(self):
return self.server_info.interval
def SetInterval(self, start, end):
self.server_info.interval.start = start
self.server_info.interval.end = end
def GetInfo(self):
return self.server_info
def UpdateState(self, newstate):
"""Update state of server."""
self.server_info.state = newstate
def Remove(self):
self.removed = True
def WasRemoved(self):
return self.removed
class DataMaster(object):
"""DataMaster information."""
def __init__(self, myport, service):
self.service = service
stores = config_lib.CONFIG["Dataserver.server_list"]
if not stores:
logging.error("Dataserver.server_list is empty: no data servers will"
" be available")
raise DataMasterError("Dataserver.server_list is empty")
self.servers = [DataServer(loc, idx) for idx, loc in enumerate(stores)]
self.registered_count = 0
# Load server mapping.
self.mapping = self.service.LoadServerMapping()
if not self.mapping:
# Bootstrap mapping.
# Each server information is linked to its corresponding object.
# Updating the data server object will reflect immediately on
# the mapping.
for server in self.servers:
server.SetInitialInterval(len(self.servers))
servers_info = [server.server_info for server in self.servers]
self.mapping = rdfvalue.DataServerMapping(version=0,
num_servers=len(self.servers),
servers=servers_info)
self.service.SaveServerMapping(self.mapping, create_pathing=True)
else:
# Check mapping and configuration matching.
if len(self.mapping.servers) != len(self.servers):
raise DataMasterError("Server mapping does not correspond "
"to the configuration.")
for server in self.servers:
self._EnsureServerInMapping(server)
# Create locks.
self.server_lock = threading.Lock()
# Register the master.
self.myself = self.servers[0]
if self.myself.Port() == myport:
self._DoRegisterServer(self.myself)
else:
logging.warning("First server in Dataserver.server_list is not the "
"master. Found port '%i' but my port is '%i'. If you"
" really are running master, you may want to specify"
" flag --port %i."
% (self.myself.Port(), myport, myport))
raise DataMasterError("First server in Dataserver.server_list must be "
"the master.")
# Start database measuring thread.
sleep = config_lib.CONFIG["Dataserver.stats_frequency"]
self.periodic_thread = utils.InterruptableThread(
target=self._PeriodicThread, sleep_time=sleep)
self.periodic_thread.start()
# Holds current rebalance operation.
self.rebalance = None
self.rebalance_pool = []
def LoadMapping(self):
return self.mapping
def _PeriodicThread(self):
"""Periodically update our state and store the mappings."""
ok = rdfvalue.DataServerState.Status.AVAILABLE
num_components, avg_component = self.service.GetComponentInformation()
state = rdfvalue.DataServerState(size=self.service.Size(),
load=0,
status=ok,
num_components=num_components,
avg_component=avg_component)
self.myself.UpdateState(state)
self.service.SaveServerMapping(self.mapping)
def _EnsureServerInMapping(self, server):
"""Ensure that the data server exists on the mapping."""
index = server.Index()
server_info = self.mapping.servers[index]
if server_info.address != server.Address():
return False
if server_info.port != server.Port():
return False
# Change underlying server information.
server.server_info = server_info
def RegisterServer(self, addr, port):
"""Register incoming data server. Return server object."""
for server in self.servers:
if server == self.myself:
continue
if server.Matches(addr, port):
with self.server_lock:
if server.IsRegistered():
return None
else:
self._DoRegisterServer(server)
return server
return None
def HasServer(self, addr, port):
"""Checks if a given server is already in the set."""
for server in self.servers:
if server.Matches(addr, port):
return server
return None
def _DoRegisterServer(self, server):
self.registered_count += 1
server.Register()
logging.info("Registered server %s:%d", server.Address(), server.Port())
if self.AllRegistered():
logging.info("All data servers have registered!")
def DeregisterServer(self, server):
"""Deregister a data server."""
with self.server_lock:
server.Deregister()
self.registered_count -= 1
def AllRegistered(self):
"""Check if all servers have registered."""
return self.registered_count == len(self.servers)
def Stop(self):
self.service.SaveServerMapping(self.mapping)
self.periodic_thread.Stop()
def SetRebalancing(self, reb):
"""Sets a new rebalance operation and starts communication with servers."""
self.rebalance = reb
self.rebalance_pool = []
try:
for serv in self.servers:
pool = connectionpool.HTTPConnectionPool(serv.Address(),
port=serv.Port())
self.rebalance_pool.append(pool)
except urllib3.exceptions.MaxRetryError:
self.CancelRebalancing()
return False
return True
def CancelRebalancing(self):
self.rebalance = None
for pool in self.rebalance_pool:
pool.close()
self.rebalance_pool = []
def IsRebalancing(self):
return self.rebalance
def AddServer(self, addr, port):
"""Add new server to the group."""
server = DataServer("http://%s:%d" % (addr, port), len(self.servers))
self.servers.append(server)
server.SetInterval(constants.MAX_RANGE, constants.MAX_RANGE)
self.mapping.servers.Append(server.GetInfo())
self.mapping.num_servers += 1
# At this point, the new server is now part of the group.
return server
def RemoveServer(self, removed_server):
"""Remove a server. Returns None if server interval is not empty."""
interval = removed_server.Interval()
# Interval range must be 0.
if interval.start != interval.end:
return None
# Update ids of other servers.
newserverlist = []
for serv in self.servers:
if serv == removed_server:
continue
if serv.Index() > removed_server.Index():
serv.SetIndex(serv.Index() - 1)
newserverlist.append(serv.GetInfo())
# Change list of servers.
self.mapping.servers = newserverlist
self.mapping.num_servers -= 1
self.servers.pop(removed_server.Index())
self.DeregisterServer(removed_server)
removed_server.Remove()
return removed_server
def SyncMapping(self, skip=None):
"""Syncs mapping with other servers."""
pools = []
try:
# Update my state.
self._PeriodicThread()
for serv in self.servers[1:]:
if skip and serv in skip:
continue
pool = connectionpool.HTTPConnectionPool(serv.Address(),
port=serv.Port())
pools.append((serv, pool))
body = self.mapping.SerializeToString()
headers = {"Content-Length": len(body)}
for serv, pool in pools:
res = pool.urlopen("POST", "/servers/sync", headers=headers,
body=body)
if res.status != constants.RESPONSE_OK:
logging.warning("Could not sync with server %s:%d", serv.Address(),
serv.Port())
return False
state = rdfvalue.DataServerState()
state.ParseFromString(res.data)
serv.UpdateState(state)
except urllib3.exceptions.MaxRetryError:
return False
finally:
for _, pool in pools:
pool.close()
return True
def FetchRebalanceInformation(self):
"""Asks data servers for number of changes for rebalancing."""
body = self.rebalance.SerializeToString()
size = len(body)
headers = {"Content-Length": size}
for pool in self.rebalance_pool:
try:
res = pool.urlopen("POST", "/rebalance/statistics", headers=headers,
body=body)
if res.status != constants.RESPONSE_OK:
self.CancelRebalancing()
return False
reb = rdfvalue.DataServerRebalance()
reb.ParseFromString(res.data)
ls = list(reb.moving)
if ls:
logging.warning("Moving %d", ls[0])
self.rebalance.moving.Append(ls[0])
else:
self.CancelRebalancing()
return False
except urllib3.exceptions.MaxRetryError:
self.CancelRebalancing()
return False
return True
def CopyRebalanceFiles(self):
"""Tell servers to copy files to the corresponding servers."""
body = self.rebalance.SerializeToString()
size = len(body)
headers = {"Content-Length": size}
for pool in self.rebalance_pool:
try:
res = pool.urlopen("POST", "/rebalance/copy", headers=headers,
body=body)
if res.status != constants.RESPONSE_OK:
self.CancelRebalancing()
return False
except urllib3.exceptions.MaxRetryError:
self.CancelRebalancing()
return False
return True
def RebalanceCommit(self):
"""Tell servers to commit rebalance changes."""
# Save rebalance information to a file, so we can recover later.
rebalance.SaveCommitInformation(self.rebalance)
body = self.rebalance.SerializeToString()
size = len(body)
headers = {"Content-Length": size}
for i, pool in enumerate(self.rebalance_pool):
try:
res = pool.urlopen("POST", "/rebalance/perform", headers=headers,
body=body)
if res.status != constants.RESPONSE_OK:
logging.error("Server %d failed to perform transaction %s", i,
self.rebalance.id)
self.CancelRebalancing()
return None
stat = rdfvalue.DataServerState()
stat.ParseFromString(res.data)
data_server = self.servers[i]
data_server.UpdateState(stat)
except urllib3.exceptions.MaxRetryError:
self.CancelRebalancing()
return None
# Update server intervals.
mapping = self.rebalance.mapping
for i, serv in enumerate(list(self.mapping.servers)):
serv.interval = mapping.servers[i].interval
self.rebalance.mapping = self.mapping
self.service.SaveServerMapping(self.mapping)
# We can finally delete the temporary file, since we have succeeded.
rebalance.DeleteCommitInformation(self.rebalance)
rebalance.RemoveDirectory(self.rebalance)
self.CancelRebalancing()
return self.mapping
| |
"""
Provides tools for initiating webservices session
"""
from xml.etree import ElementTree as xml
import datetime
import logging
from xml.etree.ElementTree import XMLParser
import os
import yaml
from suds.client import Client
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logger.addHandler(logging.NullHandler())
def element_to_dict(element):
e_dict = {a.text: v.text for a, v in zip(element.getiterator('AttrName'), element.getiterator('AttrValue'))}
return e_dict
def get_session_from_yaml(yaml_file, session_name):
if os.path.exists(yaml_file):
f = open(yaml_file)
session_data = yaml.load(f)[session_name]
return SoapSession(session_data['user'], session_data['password'],
session_data['host'], session_data['port'])
else:
raise NameError("File Does not exist {}".format(yaml_file))
def extract_fields(objXML, attribute_only=False):
x = XMLParser(encoding='utf-8')
element_list = xml.fromstring(objXML.encode('utf-8'))
obj_list = []
if attribute_only:
obj_list.append({i.tag: i.attrib for i in element_list[0].getchildren()})
else:
for e in element_list.iter('UDSObject'):
element_dict = element_to_dict(e)
element_dict['handle'] = e[0].text
obj_list.append(element_dict)
return obj_list
class SoapResponse(object):
def __init__(self, xmlresponse, attribute_only=False):
self.response = xmlresponse
self.attribute_only = attribute_only
def __repr__(self):
if not self.response:
return ''
if str(self.response.__class__) == "<class 'suds.sudsobject.reply'>":
return self.response.__repr__()
else:
return self.response
def __str__(self):
if not self.response:
return ""
if str(self.response.__class__) == "<class 'suds.sudsobject.reply'>":
return self.response.__str__()
else:
return self.response
def to_dict(self):
"""
converts raw xml to dictionary
:return:
"""
if self.response:
return extract_fields(objXML=self.response, attribute_only=self.attribute_only)
else:
return {}
class SoapService(object):
def __init__(self, session, service_name):
self.s = session
self.name = service_name
def __call__(self, *args, **kwargs):
service_method = getattr(self.s.client.service, self.name)
new_args = list(args)
for idx, arg in enumerate(new_args):
if isinstance(arg, list):
new_args[idx] = self.createArrayOfString(arg)
for k, v in kwargs.items():
if isinstance(v, list):
kwargs[k] = self.createArrayOfString(v)
return SoapResponse(service_method(self.s.sid, *new_args, **kwargs))
def createArrayOfString(self, iterable):
array_of_string = self.s.client.factory.create('ArrayOfString')
iterable = iterable if iterable else []
array_of_string.string = iterable
return array_of_string
def login(host, username, password, port=8080):
return SoapAPI(SoapSession(username, password, host, port))
class SoapSession(object):
"""This builds a session object used to login to ca webservices."""
def __init__(self, username, password, host, port=8080, auto_renew=True):
self.host = host
self.port = port
self.username = username
self.password = password
self._sid = None
self.url = self.get_url()
self.client = Client(self.url)
self.last_login = None
self.auto_renew = auto_renew
def __repr__(self):
return "{}@{}".format(self.username, self.host)
@property
def sid(self):
self._sid = self.login()
return self._sid
def login(self):
if not (self.username and self.password):
raise ValueError('Invalid username or password')
if self.session_is_expired():
newsid = self.client.service.login(self.username, self.password)
self.last_login = datetime.datetime.now()
return newsid
else:
return self._sid
def session_is_expired(self):
"""
Takes subtracts last time logged in from current date and
if the difference in minutes is greater than 90 minutes
the session has expired.
:return:
"""
if not self.last_login:
return True
session_limit = 90
session_duration = datetime.datetime.now() - self.last_login
return session_duration.seconds / 60 > session_limit
def get_url(self):
url = "http://{}:{}/axis/services/USD_R11_WebService?wsdl"
if not (self.host or self.port):
raise ValueError("No valid host name supplied")
else:
return url.format(self.host, self.port)
class SoapAPI(object):
"""
Web Services Interface
"""
def __init__(self, session):
# returns suds client interface and sid
self.cl = session.client
self.sid = session.sid
self._session = session
def __getattr__(self, service_name):
return SoapService(self._session, service_name)
#def searchObjects(self, objType, searchCriteria, maxRows=-1, returnAttributes=None):
# returnAttributes = returnAttributes if returnAttributes else []
# return self.doSelect(objType, searchCriteria, maxRows, attributes=returnAttributes).to_dict()
def searchObjects(self, objType, searchCriteria, maxRows=-1,returnAttributes=None):
returnAttributes = returnAttributes if returnAttributes else []
if (maxRows > 250):
return_obj = []
query = self.cl.service.doQuery(self.sid, objType, searchCriteria)
for x in range(0,query.listLength+1,250):
max = x+249
if (max > query.listLength-1):
max = query.listLength-1
if (x < query.listLength):
return_obj = return_obj+self.getListValues(query.listHandle,x,max,returnAttributes).to_dict()
self.freeListHandles(query.listHandle)
return return_obj
else:
return self.doSelect(objType, searchCriteria, maxRows, attributes=returnAttributes).to_dict()
def updateObject(self, obj_handle, attribute_changes, return_attributes=None):
attribute_changes = self.createArrayOfString(attribute_changes)
return_attributes = self.createArrayOfString(return_attributes)
return self.cl.service.updateObject(self.sid, obj_handle, attribute_changes, return_attributes)
def createRequest(self, creator_handle, attrvals, return_attributes=None,string_template="",attributes=None,
reqHandle="", reqNumber=""):
attrvals = self.createArrayOfString(attrvals)
return_attributes = self.createArrayOfString(return_attributes)
attributes = self.createArrayOfString(attributes)
results=self.cl.service.createRequest(self.sid, creator_handle, attrvals,return_attributes,string_template,attributes,
reqHandle,reqNumber)
return results
def listAttributes(self, obj_name, convert_to_dict=True):
objresults = self.getObjectTypeInformation(obj_name)
if convert_to_dict:
attributes = extract_fields(objXML=objresults.response, attribute_only=convert_to_dict)
else:
return objresults.response
return attributes
def extractHandle(self, suds_xml):
return xml.fromstring(suds_xml)[0][0].text
def isMember(self, user, group):
ismember = self.doSelect("grpmem", "member = U'{}' and group = U'{}'".format(user, group), )
if ismember:
logger.debug("User: {} is in group: {}".format(user, group))
else:
logger.debug("User {} Not in group: {}".format(user, group))
return ismember
def addToGroup(self, user, group):
logger.info("Adding user: {} to group {}".format(user, group))
if not self.isMember(user, group):
self.cl.service.addMemberToGroup(self.sid, "cnt:{}".format(user), "cnt:{}".format(group))
def createArrayOfString(self, iterable):
array_of_string = self.cl.factory.create('ArrayOfString')
iterable = iterable if iterable else []
array_of_string.string = iterable
return array_of_string
def removeFromGroup(self, user, group):
logger.info("removing user: {} from group {}".format(user, group))
if self.isMember(user, group):
self.removeMemberFromGroup("cnt:{}".format(user), "cnt:{}".format(group))
def updateRequest(self, ref_num, **kwargs):
logger.info("updating Request {}".format(ref_num))
request_handle = self.searchObjects('cr', "ref_num='{}'".format(ref_num), maxRows=1)[0]['handle']
update_attributes = list(kwargs.items())
results = self.updateObject(request_handle, update_attributes)
logger.info("Update Successful")
return results
def tansferRequest(self, ref_num, group=None, assignee=None, message=""):
request_handle = self.searchObjects('cr', "ref_num='{}'".format(ref_num), maxRows=1)[0]['handle']
user_handle = self.cl.service.getHandleForUserid(self.sid, self._session.username)
assignee_handle = "cnt:{}".format(assignee) if assignee else ""
group_handle = "cnt:{}".format(group) if assignee_handle else ""
setAssignee = 1 if assignee else 0
setGroup = 1 if group else 0
if group or assignee:
self.cl.service.transfer(self.sid, user_handle, request_handle,
message, setAssignee, assignee_handle,
setGroup, group_handle, 0, "")
else:
logger.exception("No Group or assignee specified")
if __name__ == '__main__':
pass
| |
# -*- mode: python; coding: utf-8 -*-
# Copyright 2012-2020 Peter Williams <peter@newton.cx> and collaborators.
# Licensed under the MIT License.
"""pwkit.colormaps -- tools to convert arrays of real-valued data to other
formats (usually, RGB24) for visualization.
TODO: "heated body" map.
The main interface is the `factory_map` dictionary from colormap names to
factory functions. `base_factory_names` lists the names of a set of color
maps. Additional ones are available with the suffixes "_reverse" and "_sqrt"
that apply the relevant transforms.
The factory functions return another function, the "mapper". Each mapper takes
a single argument, an array of values between 0 and 1, and returns the mapped
colors. If the input array has shape S, the returned value has a shape (S +
(3, )), with mapped[...,0] being the R values, between 0 and 1, etc.
Example:
data = np.array ([<things between 0 and 1>])
mapper = factory_map['cubehelix_blue']()
rgb = mapper (data)
green_values = rgb[:,1]
last_rgb = rgb[-1]
The basic colormap names are:
moreland_bluered
Divergent colormap from intense blue (at 0) to intense red (at 1),
passing through white
cubehelix_dagreen
From black to white through rainbow colors
cubehelix_blue
From black to white, with blue hues
pkgw
From black to red, through purplish
black_to_white, black_to_red, black_to_green, black_to_blue
From black to the named colors.
white_to_black, white_to_red, white_to_green, white_to_blue
From white to the named colors.
The mappers can also take keyword arguments, including at least "transform",
which specifies simple transforms that can be applied to the colormaps. These
are (in terms of symbolic constants and literal string values):
'none' - No transform (the default)
'reverse' - x -> 1 - x (reverses the colormap)
'sqrt' - x -> sqrt(x)
For each transform other than "none", *factory_map* contains an entry with an
underscore and the transform name applied (e.g., "pkgw_reverse") that has that
transform applied.
The initial inspiration was an implementation of the ideas in "Diverging Color
Maps for Scientific Visualization (Expanded)", Kenneth Moreland,
http://www.cs.unm.edu/~kmorel/documents/ColorMaps/index.html
I've realized that I'm not too fond of the white mid-values in these color
maps in many cases. So I also added an implementation of the "cube helix"
color map, described by D. A. Green in
"A colour scheme for the display of astronomical intensity images"
http://adsabs.harvard.edu/abs/2011BASI...39..289G
(D. A. Green, 2011 Bull. Ast. Soc. of India, 39 289)
I made up the pkgw map myself (who'd have guessed?).
"""
from __future__ import absolute_import, division, print_function
__all__ = 'base_factory_names factory_map'.split()
import numpy as np, six
base_factory_names = ('moreland_bluered cubehelix_dagreen cubehelix_blue '
'pkgw black_to_white black_to_red '
'black_to_green black_to_blue '
'white_to_black white_to_red '
'white_to_green white_to_blue').split()
R, G, B = range(3)
X, Y, Z = range(3)
L, A, B = range(3) # fortunately this B and RGB's B agree...
M, S, H = range(3)
DEFAULT_SAMPLE_POINTS = 512
# I don't quite understand where this value comes from, given the various
# Wikipedia values for D65, but this works.
CIELAB_D65 = np.asarray([0.9505, 1., 1.0890])
# from Moreland:
_linsrgb_to_xyz = np.asarray([[0.4124, 0.2126, 0.0193],
[0.3576, 0.7152, 0.1192],
[0.1805, 0.0722, 0.9505]])
# from Wikipedia, SRGB:
_xyz_to_linsrgb = np.asarray([[3.2406, -0.9689, 0.0557],
[-1.5372, 1.8758, -0.2040],
[-0.4986, 0.0415, 1.0570]])
# Interpolation utilities.
def approx_colormap(samples, transform='none', fitfactor=1.):
"""Given a colormap sampled at various values, compute splines that
interpolate in R, G, and B (separately) for fast evaluation of the
colormap for arbitrary float values. We have primitive support for some
transformations, though these are generally best done upstream of the
color mapping code.
samples - Shape (4, n). samples[0,:] are the normalized values at
which the map is sampled, hopefully ranging uniformly
between 0 and 1. samples[1:4,:] are the RGB values of
the colormap. (They don't need to actually be RGB, but
there need to be three of them.)
transform - One of 'none', 'reverse', or 'sqrt'.
fitfactor - Sets the tightness of the spline interpolation.
Returns: a function `map` following `map(n) -> rgb`, where if `n` has
shape S the result has shape shape (S + (3,)), following a spline
interpolation from the sampled values.
"""
import scipy.interpolate as SI
values = samples[0]
if transform == 'none':
pass
elif transform == 'reverse':
samples = samples[:,::-1]
elif transform == 'sqrt':
values = np.sqrt(values)
else:
raise ValueError('unknown transformation: ' + str(transform))
nsamp = samples.shape[1]
rspline = SI.splrep(values, samples[R+1], s=fitfactor/nsamp)
gspline = SI.splrep(values, samples[G+1], s=fitfactor/nsamp)
bspline = SI.splrep(values, samples[B+1], s=fitfactor/nsamp)
def colormap(values):
values = np.asarray(values)
mapped = np.empty(values.shape + (3,))
flatvalues = values.flatten()
flatmapped = mapped.reshape(flatvalues.shape + (3,))
flatmapped[:,R] = SI.splev(flatvalues, rspline)
flatmapped[:,G] = SI.splev(flatvalues, gspline)
flatmapped[:,B] = SI.splev(flatvalues, bspline)
return mapped
return colormap
# Colorspace utilities based on the Moreland paper.
def srgb_to_linsrgb(srgb):
"""Convert sRGB values to physically linear ones. The transformation is
uniform in RGB, so *srgb* can be of any shape.
*srgb* values should range between 0 and 1, inclusively.
"""
gamma = ((srgb + 0.055) / 1.055)**2.4
scale = srgb / 12.92
return np.where(srgb > 0.04045, gamma, scale)
def linsrgb_to_srgb(linsrgb):
"""Convert physically linear RGB values into sRGB ones. The transform is
uniform in the components, so *linsrgb* can be of any shape.
*linsrgb* values should range between 0 and 1, inclusively.
"""
# From Wikipedia, but easy analogue to the above.
gamma = 1.055 * linsrgb**(1./2.4) - 0.055
scale = linsrgb * 12.92
return np.where(linsrgb > 0.0031308, gamma, scale)
def linsrgb_to_xyz(linsrgb):
"""Convert linearized sRGB values (cf srgb_to_linsrgb) to CIE XYZ values.
*linsrgb* should be of shape (*, 3). Values should range between 0 and 1
inclusively. Return value will be of same shape.
Returned XYZ values range between [0, 0, 0] and [0.9505, 1., 1.089].
"""
return np.dot(linsrgb, _linsrgb_to_xyz)
def xyz_to_linsrgb(xyz):
"""Convert CIE XYZ values to linearized sRGB values (cf srgb_to_linsrgb).
*xyz* should be of shape (*, 3)
Return value will be of same shape.
"""
return np.dot(xyz, _xyz_to_linsrgb)
def xyz_to_cielab(xyz, refwhite):
"""Convert CIE XYZ color values to CIE L*a*b*.
*xyz* should be of shape (*, 3). *refwhite* is the reference white value, of
shape (3, ).
Return value will have same shape as *xyz*, but be in CIE L*a*b*
coordinates.
"""
norm = xyz / refwhite
pow = norm**0.333333333333333
scale = 7.787037 * norm + 16./116
mapped = np.where(norm > 0.008856, pow, scale)
cielab = np.empty_like(xyz)
cielab[...,L] = 116 * mapped[...,Y] - 16
cielab[...,A] = 500 * (mapped[...,X] - mapped[...,Y])
cielab[...,B] = 200 * (mapped[...,Y] - mapped[...,Z])
return cielab
def cielab_to_xyz(cielab, refwhite):
"""Convert CIE L*a*b* color values to CIE XYZ,
*cielab* should be of shape (*, 3). *refwhite* is the reference white
value in the L*a*b* color space, of shape (3, ).
Return value has same shape as *cielab*
"""
def func(t):
pow = t**3
scale = 0.128419 * t - 0.0177129
return np.where(t > 0.206897, pow, scale)
xyz = np.empty_like(cielab)
lscale = 1./116 * (cielab[...,L] + 16)
xyz[...,X] = func(lscale + 0.002 * cielab[...,A])
xyz[...,Y] = func(lscale)
xyz[...,Z] = func(lscale - 0.005 * cielab[...,B])
xyz *= refwhite
return xyz
def cielab_to_msh(cielab):
"""Convert CIE L*a*b* to Moreland's Msh colorspace.
*cielab* should be of shape (*, 3).
Return value will have same shape.
"""
msh = np.empty_like(cielab)
msh[...,M] = np.sqrt((cielab**2).sum(axis=-1))
msh[...,S] = np.arccos(cielab[...,L] / msh[...,M])
msh[...,H] = np.arctan2(cielab[...,B], cielab[...,A])
return msh
def msh_to_cielab(msh):
"""Convert Moreland's Msh colorspace to CIE L*a*b*.
*msh* should be of shape (*, 3).
Return value will have same shape.
"""
cielab = np.empty_like(msh)
cielab[...,L] = msh[...,M] * np.cos(msh[...,S])
cielab[...,A] = msh[...,M] * np.sin(msh[...,S]) * np.cos(msh[...,H])
cielab[...,B] = msh[...,M] * np.sin(msh[...,S]) * np.sin(msh[...,H])
return cielab
def srgb_to_msh(srgb, refwhite):
"""Convert sRGB to Moreland's Msh color space, via XYZ and CIE L*a*b*.
*srgb* should be of shape (*, 3). *refwhite* is the CIE L*a*b* reference
white color, of shape (3, ).
Return value will have same shape.
"""
return cielab_to_msh(xyz_to_cielab(linsrgb_to_xyz(srgb_to_linsrgb(srgb)), refwhite))
def msh_to_srgb(msh, refwhite):
"""Convert Moreland's Msh color space to sRGB, via XYZ and CIE L*a*b*.
*msh* should be of shape (*, 3). *refwhite* is the CIE L*a*b* reference
white color, of shape (3, ).
Return value will have same shape.
"""
return linsrgb_to_srgb(xyz_to_linsrgb(cielab_to_xyz(msh_to_cielab(msh), refwhite)))
# The Moreland divergent colormap generation algorithm.
def moreland_adjusthue(msh, m_unsat):
"""Moreland's AdjustHue procedure to adjust the hue value of an Msh color
based on ... some criterion.
*msh* should be of of shape (3, ). *m_unsat* is a scalar.
Return value is the adjusted h (hue) value.
"""
if msh[M] >= m_unsat:
return msh[H] # "Best we can do"
hspin = (msh[S] * np.sqrt(m_unsat**2 - msh[M]**2) / (msh[M] * np.sin(msh[S])))
if msh[H] > -np.pi / 3: # "Spin away from purple"
return msh[H] + hspin
return msh[H] - hspin
def moreland_interpolate_sampled(srgb1, srgb2, refwhite=CIELAB_D65,
nsamples=DEFAULT_SAMPLE_POINTS):
"""Adapted from Moreland's InterpolateColor. This uses the full
transformations to compute a color mapping at a set of sampled points."""
msh1, msh2 = srgb_to_msh(np.asarray([srgb1, srgb2], dtype=np.float), refwhite)
raddiff = msh1[H] - msh2[H]
while raddiff > np.pi:
raddiff -= 2 * np.pi
while raddiff < -np.pi:
raddiff += 2 * np.pi
raddiff = np.abs(raddiff)
x = np.linspace(0, 1, nsamples).reshape((nsamples, 1))
x = np.repeat(x, 3, 1)
if msh1[S] <= 0.05 or msh2[S] <= 0.05 or raddiff < np.pi/3:
# Colors are too close together to comfortably put white in between.
# Our interpolation won't have a control point, and won't actually be
# divergent.
if msh1[S] < 0.05 and msh2[S] > 0.05:
msh1[H] = moreland_adjusthue(msh1, msh1[M])
elif msh2[S] < 0.05 and msh1[S] > 0.05:
msh2[H] = moreland_adjusthue(msh2, msh2[M])
samples = np.empty((4, nsamples))
msh = (1 - x) * msh1 + x * msh2
samples[0] = x[:,0]
samples[1:4] = msh_to_srgb(msh, refwhite).T
else:
# Colors are not too close together -- we can add a white control
# point in the middle, and do two interpolations joined piecewise. We
# then use 2*nsamples-1 (not actually nsamples -- shhh) samples for
# the spline fit
msh3 = msh2
msh2a = np.asarray([np.max([msh1[M], msh3[M], 88]), 0, 0])
msh2b = msh2a.copy()
if msh1[S] < 0.05 and msh2a[S] > 0.05:
msh1[H] = moreland_adjusthue(msh2a, msh1[M])
elif msh2a[S] < 0.05 and msh1[S] > 0.05:
msh2a[H] = moreland_adjusthue(msh1, msh2a[M])
if msh2b[S] < 0.05 and msh3[S] > 0.05:
msh2b[H] = moreland_adjusthue(msh3, msh2b[M])
elif msh3[S] < 0.05 and msh2b[S] > 0.05:
msh3[H] = moreland_adjusthue(msh2b, msh3[M])
samples = np.empty((4, 2*nsamples-1))
msh = (1 - x) * msh1 + x * msh2a
samples[0,:nsamples] = 0.5 * x[:,0]
samples[1:4,:nsamples] = msh_to_srgb(msh, refwhite).T
msh = (1 - x) * msh2b + x * msh3
samples[0,nsamples-1:] = 0.5 * x[:,0] + 0.5
samples[1:4,nsamples-1:] = msh_to_srgb(msh, refwhite).T
return samples
def moreland_bluered(transform='none'):
samples = moreland_interpolate_sampled([0.2305, 0.2969, 0.7500],
[0.7031, 0.0156, 0.1484])
return approx_colormap(samples, transform=transform)
# D. A. Green's "cube helix" colormap
def cubehelix_create(start, rotations, hue, gamma):
def colormap(values):
values = np.asarray(values)
mapped = np.empty(values.shape + (3,))
flatvalues = values.flatten()
flatmapped = mapped.reshape(flatvalues.shape + (3,))
gv = flatvalues ** gamma
a = 0.5 * hue * gv * (1 - gv)
phi = 2 * np.pi * (0.3333333 * start + rotations * flatvalues)
c = np.cos(phi)
s = np.sin(phi)
flatmapped[:,R] = gv + a * (-0.14861 * c + 1.78277 * s)
flatmapped[:,G] = gv + a * (-0.29227 * c - 0.90649 * s)
flatmapped[:,B] = gv + a * 1.97294 * c
return mapped
return colormap
def cubehelix_sample(start, rotations, hue, gamma, nsamples=DEFAULT_SAMPLE_POINTS):
samples = np.empty((4, nsamples,))
samples[0] = np.linspace(0, 1, nsamples)
samples[1:] = cubehelix_create(start, rotations, hue, gamma)(samples[0]).T
return samples
def cubehelix_dagreen(transform='none'):
samples = cubehelix_sample(0.5, -1.5, 1.0, 1)
return approx_colormap(samples, transform=transform)
def cubehelix_blue(transform='none'):
samples = cubehelix_sample(0.5, -0.6, 1.2, 1)
return approx_colormap(samples, transform=transform)
# Something quick I came up with based on the Moreland work, scaling from
# black to a bright-ish red.
def pkgw(transform='none', nsamples=DEFAULT_SAMPLE_POINTS):
samples = np.empty((4, nsamples))
samples[0] = np.linspace(0, 1, nsamples)
msh = np.empty((nsamples, 3))
msh[:,M] = 1. + 85 * samples[0]
msh[:,S] = 0.3 * samples[0] + 0.7
msh[:,H] = 2.9 * samples[0] - 2.1
samples[1:4] = msh_to_srgb(msh, CIELAB_D65).T
return approx_colormap(samples, transform=transform)
# Simple maps linear in RGB
def rgblinear_create(factor_r, factor_g, factor_b,
zero_r, zero_g, zero_b,
transform='none'):
if transform == 'none':
valmap = lambda x: x
elif transform == 'reverse':
valmap = lambda x: 1 - x
elif transform == 'sqrt':
valmap = np.sqrt
else:
raise ValueError('unknown transformation: ' + str(transform))
def colormap(values):
values = valmap(np.asarray(values))
mapped = np.empty(values.shape + (3,))
flatvalues = values.flatten()
flatmapped = mapped.reshape(flatvalues.shape + (3,))
flatmapped[:,R] = flatvalues * factor_r + zero_r
flatmapped[:,G] = flatvalues * factor_g + zero_g
flatmapped[:,B] = flatvalues * factor_b + zero_b
return mapped
return colormap
def black_to_white(transform='none'):
return rgblinear_create(1, 1, 1, 0, 0, 0, transform)
def black_to_red(transform='none'):
return rgblinear_create(1, 0, 0, 0, 0, 0, transform)
def black_to_green(transform='none'):
return rgblinear_create(0, 1, 0, 0, 0, 0, transform)
def black_to_blue(transform='none'):
return rgblinear_create(0, 0, 1, 0, 0, 0, transform)
def white_to_black(transform='none'):
return rgblinear_create(-1, -1, -1, 1, 1, 1, transform)
def white_to_red(transform='none'):
return rgblinear_create(0, -1, -1, 1, 1, 1, transform)
def white_to_green(transform='none'):
return rgblinear_create(-1, 0, -1, 1, 1, 1, transform)
def white_to_blue(transform='none'):
return rgblinear_create(-1, -1, 0, 1, 1, 1, transform)
# Useful for introspection
#
# Factories return a function that maps values between 0 and 1 into RGB values
# between 0 and 1, and accept a keyword argument 'transform' that can perform
# primitive transforms on the direction and scaling of the colormap.
factory_map = dict((n, globals()[n]) for n in base_factory_names)
def _make_transformed(factory, transform):
# We have to create these helper functions in a separate function because
# otherwise some all of the new dict entries end up referencing the
# finally-created subfunction.
def newfactory():
return factory(transform=transform)
return newfactory
def _fill_transforms():
for transform in('reverse', 'sqrt'):
for n in base_factory_names:
factory = globals()[n]
factory_map[n + '_' + transform] = \
_make_transformed(factory, transform)
_fill_transforms()
# Infrastructure for quickly rendering color maps.
def showdemo(factoryname, **kwargs):
import gi
gi.require_version('Gdk', '3.0')
gi.require_version('Gtk', '3.0')
from gi.repository import GLib, GObject, Gdk, Gtk
import cairo
W, H = 512, 100
colormap = factory_map[factoryname](**kwargs)
array = np.linspace(0, 1, W)
array = array.reshape((W, 1))
array = np.repeat(array, H, 1).T
mapped = colormap(array)
argb = np.empty((H, W), dtype=np.uint32)
argb.fill(0xFF000000)
argb |= (mapped[:,:,0] * 0xFF).astype(np.uint32) << 16
argb |= (mapped[:,:,1] * 0xFF).astype(np.uint32) << 8
argb |= (mapped[:,:,2] * 0xFF).astype(np.uint32)
surf = cairo.ImageSurface.create_for_data(argb, cairo.FORMAT_ARGB32, W, H, W * 4)
def draw(widget, ctxt):
ctxt.set_source_surface(surf, 0, 0)
pat = ctxt.get_source()
pat.set_extend(cairo.EXTEND_NONE)
pat.set_filter(cairo.FILTER_NEAREST)
ctxt.paint()
return True
da = Gtk.DrawingArea()
da.connect('draw', draw)
win = Gtk.Window(type=Gtk.WindowType.TOPLEVEL)
win.set_title('Colormap Demo - ' + factoryname)
win.set_default_size(W, H)
win.connect('destroy', Gtk.main_quit)
win.add(da)
win.show_all()
Gtk.main()
def printmaps():
print('Available color maps:')
for m in sorted(six.iterkeys(factory_map)):
print('\t' + m)
if __name__ == '__main__':
import sys
if len(sys.argv) < 2:
printmaps()
else:
showdemo(sys.argv[1])
| |
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
#
# Performs per host Linux Bridge configuration for Neutron.
# Based on the structure of the OpenVSwitch agent in the
# Neutron OpenVSwitch Plugin.
# @author: Sumit Naiksatam, Cisco Systems, Inc.
import os
import sys
import time
import eventlet
eventlet.monkey_patch()
from oslo.config import cfg
from neutron.agent import l2population_rpc as l2pop_rpc
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.agent import rpc as agent_rpc
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.common import config as logging_config
from neutron.common import constants
from neutron.common import exceptions
from neutron.common import rpc_compat
from neutron.common import topics
from neutron.common import utils as q_utils
from neutron import context
from neutron.openstack.common import log as logging
from neutron.openstack.common import loopingcall
from neutron.openstack.common.rpc import dispatcher
from neutron.plugins.common import constants as p_const
from neutron.plugins.linuxbridge.common import config # noqa
from neutron.plugins.linuxbridge.common import constants as lconst
LOG = logging.getLogger(__name__)
BRIDGE_NAME_PREFIX = "brq"
TAP_INTERFACE_PREFIX = "tap"
BRIDGE_FS = "/sys/devices/virtual/net/"
BRIDGE_NAME_PLACEHOLDER = "bridge_name"
BRIDGE_INTERFACES_FS = BRIDGE_FS + BRIDGE_NAME_PLACEHOLDER + "/brif/"
DEVICE_NAME_PLACEHOLDER = "device_name"
BRIDGE_PORT_FS_FOR_DEVICE = BRIDGE_FS + DEVICE_NAME_PLACEHOLDER + "/brport"
VXLAN_INTERFACE_PREFIX = "vxlan-"
class NetworkSegment:
def __init__(self, network_type, physical_network, segmentation_id):
self.network_type = network_type
self.physical_network = physical_network
self.segmentation_id = segmentation_id
class LinuxBridgeManager:
def __init__(self, interface_mappings, root_helper):
self.interface_mappings = interface_mappings
self.root_helper = root_helper
self.ip = ip_lib.IPWrapper(self.root_helper)
# VXLAN related parameters:
self.local_ip = cfg.CONF.VXLAN.local_ip
self.vxlan_mode = lconst.VXLAN_NONE
if cfg.CONF.VXLAN.enable_vxlan:
self.local_int = self.get_interface_by_ip(self.local_ip)
if self.local_int:
self.check_vxlan_support()
else:
LOG.warning(_('VXLAN is enabled, a valid local_ip '
'must be provided'))
# Store network mapping to segments
self.network_map = {}
def interface_exists_on_bridge(self, bridge, interface):
directory = '/sys/class/net/%s/brif' % bridge
for filename in os.listdir(directory):
if filename == interface:
return True
return False
def get_bridge_name(self, network_id):
if not network_id:
LOG.warning(_("Invalid Network ID, will lead to incorrect bridge"
"name"))
bridge_name = BRIDGE_NAME_PREFIX + network_id[0:11]
return bridge_name
def get_subinterface_name(self, physical_interface, vlan_id):
if not vlan_id:
LOG.warning(_("Invalid VLAN ID, will lead to incorrect "
"subinterface name"))
subinterface_name = '%s.%s' % (physical_interface, vlan_id)
return subinterface_name
def get_tap_device_name(self, interface_id):
if not interface_id:
LOG.warning(_("Invalid Interface ID, will lead to incorrect "
"tap device name"))
tap_device_name = TAP_INTERFACE_PREFIX + interface_id[0:11]
return tap_device_name
def get_vxlan_device_name(self, segmentation_id):
if 0 <= int(segmentation_id) <= constants.MAX_VXLAN_VNI:
return VXLAN_INTERFACE_PREFIX + str(segmentation_id)
else:
LOG.warning(_("Invalid Segmentation ID: %s, will lead to "
"incorrect vxlan device name"), segmentation_id)
def get_all_neutron_bridges(self):
neutron_bridge_list = []
bridge_list = os.listdir(BRIDGE_FS)
for bridge in bridge_list:
if bridge.startswith(BRIDGE_NAME_PREFIX):
neutron_bridge_list.append(bridge)
return neutron_bridge_list
def get_interfaces_on_bridge(self, bridge_name):
if ip_lib.device_exists(bridge_name, root_helper=self.root_helper):
bridge_interface_path = BRIDGE_INTERFACES_FS.replace(
BRIDGE_NAME_PLACEHOLDER, bridge_name)
return os.listdir(bridge_interface_path)
else:
return []
def get_tap_devices_count(self, bridge_name):
bridge_interface_path = BRIDGE_INTERFACES_FS.replace(
BRIDGE_NAME_PLACEHOLDER, bridge_name)
try:
if_list = os.listdir(bridge_interface_path)
return len([interface for interface in if_list if
interface.startswith(TAP_INTERFACE_PREFIX)])
except OSError:
return 0
def get_interface_by_ip(self, ip):
for device in self.ip.get_devices():
if device.addr.list(to=ip):
return device.name
def get_bridge_for_tap_device(self, tap_device_name):
bridges = self.get_all_neutron_bridges()
for bridge in bridges:
interfaces = self.get_interfaces_on_bridge(bridge)
if tap_device_name in interfaces:
return bridge
return None
def is_device_on_bridge(self, device_name):
if not device_name:
return False
else:
bridge_port_path = BRIDGE_PORT_FS_FOR_DEVICE.replace(
DEVICE_NAME_PLACEHOLDER, device_name)
return os.path.exists(bridge_port_path)
def ensure_vlan_bridge(self, network_id, physical_interface, vlan_id):
"""Create a vlan and bridge unless they already exist."""
interface = self.ensure_vlan(physical_interface, vlan_id)
bridge_name = self.get_bridge_name(network_id)
ips, gateway = self.get_interface_details(interface)
if self.ensure_bridge(bridge_name, interface, ips, gateway):
return interface
def ensure_vxlan_bridge(self, network_id, segmentation_id):
"""Create a vxlan and bridge unless they already exist."""
interface = self.ensure_vxlan(segmentation_id)
if not interface:
LOG.error(_("Failed creating vxlan interface for "
"%(segmentation_id)s"),
{segmentation_id: segmentation_id})
return
bridge_name = self.get_bridge_name(network_id)
self.ensure_bridge(bridge_name, interface)
return interface
def get_interface_details(self, interface):
device = self.ip.device(interface)
ips = device.addr.list(scope='global')
# Update default gateway if necessary
gateway = device.route.get_gateway(scope='global')
return ips, gateway
def ensure_flat_bridge(self, network_id, physical_interface):
"""Create a non-vlan bridge unless it already exists."""
bridge_name = self.get_bridge_name(network_id)
ips, gateway = self.get_interface_details(physical_interface)
if self.ensure_bridge(bridge_name, physical_interface, ips, gateway):
return physical_interface
def ensure_local_bridge(self, network_id):
"""Create a local bridge unless it already exists."""
bridge_name = self.get_bridge_name(network_id)
return self.ensure_bridge(bridge_name)
def ensure_vlan(self, physical_interface, vlan_id):
"""Create a vlan unless it already exists."""
interface = self.get_subinterface_name(physical_interface, vlan_id)
if not ip_lib.device_exists(interface, root_helper=self.root_helper):
LOG.debug(_("Creating subinterface %(interface)s for "
"VLAN %(vlan_id)s on interface "
"%(physical_interface)s"),
{'interface': interface, 'vlan_id': vlan_id,
'physical_interface': physical_interface})
if utils.execute(['ip', 'link', 'add', 'link',
physical_interface,
'name', interface, 'type', 'vlan', 'id',
vlan_id], root_helper=self.root_helper):
return
if utils.execute(['ip', 'link', 'set',
interface, 'up'], root_helper=self.root_helper):
return
LOG.debug(_("Done creating subinterface %s"), interface)
return interface
def ensure_vxlan(self, segmentation_id):
"""Create a vxlan unless it already exists."""
interface = self.get_vxlan_device_name(segmentation_id)
if not ip_lib.device_exists(interface, root_helper=self.root_helper):
LOG.debug(_("Creating vxlan interface %(interface)s for "
"VNI %(segmentation_id)s"),
{'interface': interface,
'segmentation_id': segmentation_id})
args = {'dev': self.local_int}
if self.vxlan_mode == lconst.VXLAN_MCAST:
args['group'] = cfg.CONF.VXLAN.vxlan_group
if cfg.CONF.VXLAN.ttl:
args['ttl'] = cfg.CONF.VXLAN.ttl
if cfg.CONF.VXLAN.tos:
args['tos'] = cfg.CONF.VXLAN.tos
if cfg.CONF.VXLAN.l2_population:
args['proxy'] = True
int_vxlan = self.ip.add_vxlan(interface, segmentation_id, **args)
int_vxlan.link.set_up()
LOG.debug(_("Done creating vxlan interface %s"), interface)
return interface
def update_interface_ip_details(self, destination, source, ips,
gateway):
if ips or gateway:
dst_device = self.ip.device(destination)
src_device = self.ip.device(source)
# Append IP's to bridge if necessary
if ips:
for ip in ips:
dst_device.addr.add(ip_version=ip['ip_version'],
cidr=ip['cidr'],
broadcast=ip['broadcast'])
if gateway:
# Ensure that the gateway can be updated by changing the metric
metric = 100
if 'metric' in gateway:
metric = gateway['metric'] - 1
dst_device.route.add_gateway(gateway=gateway['gateway'],
metric=metric)
src_device.route.delete_gateway(gateway=gateway['gateway'])
# Remove IP's from interface
if ips:
for ip in ips:
src_device.addr.delete(ip_version=ip['ip_version'],
cidr=ip['cidr'])
def _bridge_exists_and_ensure_up(self, bridge_name):
"""Check if the bridge exists and make sure it is up."""
br = ip_lib.IPDevice(bridge_name, self.root_helper)
try:
# If the device doesn't exist this will throw a RuntimeError
br.link.set_up()
except RuntimeError:
return False
return True
def ensure_bridge(self, bridge_name, interface=None, ips=None,
gateway=None):
"""Create a bridge unless it already exists."""
# _bridge_exists_and_ensure_up instead of device_exists is used here
# because there are cases where the bridge exists but it's not UP,
# for example:
# 1) A greenthread was executing this function and had not yet executed
# "ip link set bridge_name up" before eventlet switched to this
# thread running the same function
# 2) The Nova VIF driver was running concurrently and had just created
# the bridge, but had not yet put it UP
if not self._bridge_exists_and_ensure_up(bridge_name):
LOG.debug(_("Starting bridge %(bridge_name)s for subinterface "
"%(interface)s"),
{'bridge_name': bridge_name, 'interface': interface})
if utils.execute(['brctl', 'addbr', bridge_name],
root_helper=self.root_helper):
return
if utils.execute(['brctl', 'setfd', bridge_name,
str(0)], root_helper=self.root_helper):
return
if utils.execute(['brctl', 'stp', bridge_name,
'off'], root_helper=self.root_helper):
return
if utils.execute(['ip', 'link', 'set', bridge_name,
'up'], root_helper=self.root_helper):
return
LOG.debug(_("Done starting bridge %(bridge_name)s for "
"subinterface %(interface)s"),
{'bridge_name': bridge_name, 'interface': interface})
if not interface:
return bridge_name
# Update IP info if necessary
self.update_interface_ip_details(bridge_name, interface, ips, gateway)
# Check if the interface is part of the bridge
if not self.interface_exists_on_bridge(bridge_name, interface):
try:
# Check if the interface is not enslaved in another bridge
if self.is_device_on_bridge(interface):
bridge = self.get_bridge_for_tap_device(interface)
utils.execute(['brctl', 'delif', bridge, interface],
root_helper=self.root_helper)
utils.execute(['brctl', 'addif', bridge_name, interface],
root_helper=self.root_helper)
except Exception as e:
LOG.error(_("Unable to add %(interface)s to %(bridge_name)s! "
"Exception: %(e)s"),
{'interface': interface, 'bridge_name': bridge_name,
'e': e})
return
return bridge_name
def ensure_physical_in_bridge(self, network_id,
network_type,
physical_network,
segmentation_id):
if network_type == p_const.TYPE_VXLAN:
if self.vxlan_mode == lconst.VXLAN_NONE:
LOG.error(_("Unable to add vxlan interface for network %s"),
network_id)
return
return self.ensure_vxlan_bridge(network_id, segmentation_id)
physical_interface = self.interface_mappings.get(physical_network)
if not physical_interface:
LOG.error(_("No mapping for physical network %s"),
physical_network)
return
if network_type == p_const.TYPE_FLAT:
return self.ensure_flat_bridge(network_id, physical_interface)
elif network_type == p_const.TYPE_VLAN:
return self.ensure_vlan_bridge(network_id, physical_interface,
segmentation_id)
else:
LOG.error(_("Unknown network_type %(network_type)s for network "
"%(network_id)s."), {network_type: network_type,
network_id: network_id})
def add_tap_interface(self, network_id, network_type, physical_network,
segmentation_id, tap_device_name):
"""Add tap interface.
If a VIF has been plugged into a network, this function will
add the corresponding tap device to the relevant bridge.
"""
if not ip_lib.device_exists(tap_device_name,
root_helper=self.root_helper):
LOG.debug(_("Tap device: %s does not exist on "
"this host, skipped"), tap_device_name)
return False
bridge_name = self.get_bridge_name(network_id)
if network_type == p_const.TYPE_LOCAL:
self.ensure_local_bridge(network_id)
elif not self.ensure_physical_in_bridge(network_id,
network_type,
physical_network,
segmentation_id):
return False
# Check if device needs to be added to bridge
tap_device_in_bridge = self.get_bridge_for_tap_device(tap_device_name)
if not tap_device_in_bridge:
data = {'tap_device_name': tap_device_name,
'bridge_name': bridge_name}
msg = _("Adding device %(tap_device_name)s to bridge "
"%(bridge_name)s") % data
LOG.debug(msg)
if utils.execute(['brctl', 'addif', bridge_name, tap_device_name],
root_helper=self.root_helper):
return False
else:
data = {'tap_device_name': tap_device_name,
'bridge_name': bridge_name}
msg = _("%(tap_device_name)s already exists on bridge "
"%(bridge_name)s") % data
LOG.debug(msg)
return True
def add_interface(self, network_id, network_type, physical_network,
segmentation_id, port_id):
self.network_map[network_id] = NetworkSegment(network_type,
physical_network,
segmentation_id)
tap_device_name = self.get_tap_device_name(port_id)
return self.add_tap_interface(network_id, network_type,
physical_network, segmentation_id,
tap_device_name)
def delete_vlan_bridge(self, bridge_name):
if ip_lib.device_exists(bridge_name, root_helper=self.root_helper):
interfaces_on_bridge = self.get_interfaces_on_bridge(bridge_name)
for interface in interfaces_on_bridge:
self.remove_interface(bridge_name, interface)
if interface.startswith(VXLAN_INTERFACE_PREFIX):
self.delete_vxlan(interface)
continue
for physical_interface in self.interface_mappings.itervalues():
if (interface.startswith(physical_interface)):
ips, gateway = self.get_interface_details(bridge_name)
if ips:
# This is a flat network or a VLAN interface that
# was setup outside of neutron => return IP's from
# bridge to interface
self.update_interface_ip_details(interface,
bridge_name,
ips, gateway)
elif physical_interface != interface:
self.delete_vlan(interface)
LOG.debug(_("Deleting bridge %s"), bridge_name)
if utils.execute(['ip', 'link', 'set', bridge_name, 'down'],
root_helper=self.root_helper):
return
if utils.execute(['brctl', 'delbr', bridge_name],
root_helper=self.root_helper):
return
LOG.debug(_("Done deleting bridge %s"), bridge_name)
else:
LOG.error(_("Cannot delete bridge %s, does not exist"),
bridge_name)
def remove_empty_bridges(self):
for network_id in self.network_map.keys():
bridge_name = self.get_bridge_name(network_id)
if not self.get_tap_devices_count(bridge_name):
self.delete_vlan_bridge(bridge_name)
del self.network_map[network_id]
def remove_interface(self, bridge_name, interface_name):
if ip_lib.device_exists(bridge_name, root_helper=self.root_helper):
if not self.is_device_on_bridge(interface_name):
return True
LOG.debug(_("Removing device %(interface_name)s from bridge "
"%(bridge_name)s"),
{'interface_name': interface_name,
'bridge_name': bridge_name})
if utils.execute(['brctl', 'delif', bridge_name, interface_name],
root_helper=self.root_helper):
return False
LOG.debug(_("Done removing device %(interface_name)s from bridge "
"%(bridge_name)s"),
{'interface_name': interface_name,
'bridge_name': bridge_name})
return True
else:
LOG.debug(_("Cannot remove device %(interface_name)s bridge "
"%(bridge_name)s does not exist"),
{'interface_name': interface_name,
'bridge_name': bridge_name})
return False
def delete_vlan(self, interface):
if ip_lib.device_exists(interface, root_helper=self.root_helper):
LOG.debug(_("Deleting subinterface %s for vlan"), interface)
if utils.execute(['ip', 'link', 'set', interface, 'down'],
root_helper=self.root_helper):
return
if utils.execute(['ip', 'link', 'delete', interface],
root_helper=self.root_helper):
return
LOG.debug(_("Done deleting subinterface %s"), interface)
def delete_vxlan(self, interface):
if ip_lib.device_exists(interface, root_helper=self.root_helper):
LOG.debug(_("Deleting vxlan interface %s for vlan"),
interface)
int_vxlan = self.ip.device(interface)
int_vxlan.link.set_down()
int_vxlan.link.delete()
LOG.debug(_("Done deleting vxlan interface %s"), interface)
def update_devices(self, registered_devices):
devices = self.get_tap_devices()
if devices == registered_devices:
return
added = devices - registered_devices
removed = registered_devices - devices
return {'current': devices,
'added': added,
'removed': removed}
def get_tap_devices(self):
devices = set()
for device in os.listdir(BRIDGE_FS):
if device.startswith(TAP_INTERFACE_PREFIX):
devices.add(device)
return devices
def vxlan_ucast_supported(self):
if not cfg.CONF.VXLAN.l2_population:
return False
if not ip_lib.iproute_arg_supported(
['bridge', 'fdb'], 'append', self.root_helper):
LOG.warning(_('Option "%(option)s" must be supported by command '
'"%(command)s" to enable %(mode)s mode') %
{'option': 'append',
'command': 'bridge fdb',
'mode': 'VXLAN UCAST'})
return False
for segmentation_id in range(1, constants.MAX_VXLAN_VNI + 1):
if not ip_lib.device_exists(
self.get_vxlan_device_name(segmentation_id),
root_helper=self.root_helper):
break
else:
LOG.error(_('No valid Segmentation ID to perform UCAST test.'))
return False
test_iface = self.ensure_vxlan(segmentation_id)
try:
utils.execute(
cmd=['bridge', 'fdb', 'append', constants.FLOODING_ENTRY[0],
'dev', test_iface, 'dst', '1.1.1.1'],
root_helper=self.root_helper)
return True
except RuntimeError:
return False
finally:
self.delete_vxlan(test_iface)
def vxlan_mcast_supported(self):
if not cfg.CONF.VXLAN.vxlan_group:
LOG.warning(_('VXLAN muticast group must be provided in '
'vxlan_group option to enable VXLAN MCAST mode'))
return False
if not ip_lib.iproute_arg_supported(
['ip', 'link', 'add', 'type', 'vxlan'],
'proxy', self.root_helper):
LOG.warning(_('Option "%(option)s" must be supported by command '
'"%(command)s" to enable %(mode)s mode') %
{'option': 'proxy',
'command': 'ip link add type vxlan',
'mode': 'VXLAN MCAST'})
return False
return True
def vxlan_module_supported(self):
try:
utils.execute(cmd=['modinfo', 'vxlan'])
return True
except RuntimeError:
return False
def check_vxlan_support(self):
self.vxlan_mode = lconst.VXLAN_NONE
if not self.vxlan_module_supported():
LOG.error(_('Linux kernel vxlan module and iproute2 3.8 or above '
'are required to enable VXLAN.'))
raise exceptions.VxlanNetworkUnsupported()
if self.vxlan_ucast_supported():
self.vxlan_mode = lconst.VXLAN_UCAST
elif self.vxlan_mcast_supported():
self.vxlan_mode = lconst.VXLAN_MCAST
else:
raise exceptions.VxlanNetworkUnsupported()
LOG.debug(_('Using %s VXLAN mode'), self.vxlan_mode)
def fdb_ip_entry_exists(self, mac, ip, interface):
entries = utils.execute(['ip', 'neigh', 'show', 'to', ip,
'dev', interface],
root_helper=self.root_helper)
return mac in entries
def fdb_bridge_entry_exists(self, mac, interface, agent_ip=None):
entries = utils.execute(['bridge', 'fdb', 'show', 'dev', interface],
root_helper=self.root_helper)
if not agent_ip:
return mac in entries
return (agent_ip in entries and mac in entries)
def add_fdb_ip_entry(self, mac, ip, interface):
utils.execute(['ip', 'neigh', 'replace', ip, 'lladdr', mac,
'dev', interface, 'nud', 'permanent'],
root_helper=self.root_helper,
check_exit_code=False)
def remove_fdb_ip_entry(self, mac, ip, interface):
utils.execute(['ip', 'neigh', 'del', ip, 'lladdr', mac,
'dev', interface],
root_helper=self.root_helper,
check_exit_code=False)
def add_fdb_bridge_entry(self, mac, agent_ip, interface, operation="add"):
utils.execute(['bridge', 'fdb', operation, mac, 'dev', interface,
'dst', agent_ip],
root_helper=self.root_helper,
check_exit_code=False)
def remove_fdb_bridge_entry(self, mac, agent_ip, interface):
utils.execute(['bridge', 'fdb', 'del', mac, 'dev', interface,
'dst', agent_ip],
root_helper=self.root_helper,
check_exit_code=False)
def add_fdb_entries(self, agent_ip, ports, interface):
for mac, ip in ports:
if mac != constants.FLOODING_ENTRY[0]:
self.add_fdb_ip_entry(mac, ip, interface)
self.add_fdb_bridge_entry(mac, agent_ip, interface)
elif self.vxlan_mode == lconst.VXLAN_UCAST:
if self.fdb_bridge_entry_exists(mac, interface):
self.add_fdb_bridge_entry(mac, agent_ip, interface,
"append")
else:
self.add_fdb_bridge_entry(mac, agent_ip, interface)
def remove_fdb_entries(self, agent_ip, ports, interface):
for mac, ip in ports:
if mac != constants.FLOODING_ENTRY[0]:
self.remove_fdb_ip_entry(mac, ip, interface)
self.remove_fdb_bridge_entry(mac, agent_ip, interface)
elif self.vxlan_mode == lconst.VXLAN_UCAST:
self.remove_fdb_bridge_entry(mac, agent_ip, interface)
class LinuxBridgeRpcCallbacks(rpc_compat.RpcCallback,
sg_rpc.SecurityGroupAgentRpcCallbackMixin,
l2pop_rpc.L2populationRpcCallBackMixin):
# Set RPC API version to 1.0 by default.
# history
# 1.1 Support Security Group RPC
RPC_API_VERSION = '1.1'
def __init__(self, context, agent):
super(LinuxBridgeRpcCallbacks, self).__init__()
self.context = context
self.agent = agent
self.sg_agent = agent
def network_delete(self, context, **kwargs):
LOG.debug(_("network_delete received"))
network_id = kwargs.get('network_id')
bridge_name = self.agent.br_mgr.get_bridge_name(network_id)
LOG.debug(_("Delete %s"), bridge_name)
self.agent.br_mgr.delete_vlan_bridge(bridge_name)
def port_update(self, context, **kwargs):
LOG.debug(_("port_update received"))
# Check port exists on node
port = kwargs.get('port')
tap_device_name = self.agent.br_mgr.get_tap_device_name(port['id'])
devices = self.agent.br_mgr.get_tap_devices()
if tap_device_name not in devices:
return
if 'security_groups' in port:
self.sg_agent.refresh_firewall()
try:
if port['admin_state_up']:
network_type = kwargs.get('network_type')
if network_type:
segmentation_id = kwargs.get('segmentation_id')
else:
# compatibility with pre-Havana RPC vlan_id encoding
vlan_id = kwargs.get('vlan_id')
(network_type,
segmentation_id) = lconst.interpret_vlan_id(vlan_id)
physical_network = kwargs.get('physical_network')
# create the networking for the port
if self.agent.br_mgr.add_interface(port['network_id'],
network_type,
physical_network,
segmentation_id,
port['id']):
# update plugin about port status
self.agent.plugin_rpc.update_device_up(self.context,
tap_device_name,
self.agent.agent_id,
cfg.CONF.host)
else:
self.agent.plugin_rpc.update_device_down(
self.context,
tap_device_name,
self.agent.agent_id,
cfg.CONF.host
)
else:
bridge_name = self.agent.br_mgr.get_bridge_name(
port['network_id'])
self.agent.br_mgr.remove_interface(bridge_name,
tap_device_name)
# update plugin about port status
self.agent.plugin_rpc.update_device_down(self.context,
tap_device_name,
self.agent.agent_id,
cfg.CONF.host)
except rpc_compat.MessagingTimeout:
LOG.error(_("RPC timeout while updating port %s"), port['id'])
def fdb_add(self, context, fdb_entries):
LOG.debug(_("fdb_add received"))
for network_id, values in fdb_entries.items():
segment = self.agent.br_mgr.network_map.get(network_id)
if not segment:
return
if segment.network_type != p_const.TYPE_VXLAN:
return
interface = self.agent.br_mgr.get_vxlan_device_name(
segment.segmentation_id)
agent_ports = values.get('ports')
for agent_ip, ports in agent_ports.items():
if agent_ip == self.agent.br_mgr.local_ip:
continue
self.agent.br_mgr.add_fdb_entries(agent_ip,
ports,
interface)
def fdb_remove(self, context, fdb_entries):
LOG.debug(_("fdb_remove received"))
for network_id, values in fdb_entries.items():
segment = self.agent.br_mgr.network_map.get(network_id)
if not segment:
return
if segment.network_type != p_const.TYPE_VXLAN:
return
interface = self.agent.br_mgr.get_vxlan_device_name(
segment.segmentation_id)
agent_ports = values.get('ports')
for agent_ip, ports in agent_ports.items():
if agent_ip == self.agent.br_mgr.local_ip:
continue
self.agent.br_mgr.remove_fdb_entries(agent_ip,
ports,
interface)
def _fdb_chg_ip(self, context, fdb_entries):
LOG.debug(_("update chg_ip received"))
for network_id, agent_ports in fdb_entries.items():
segment = self.agent.br_mgr.network_map.get(network_id)
if not segment:
return
if segment.network_type != p_const.TYPE_VXLAN:
return
interface = self.agent.br_mgr.get_vxlan_device_name(
segment.segmentation_id)
for agent_ip, state in agent_ports.items():
if agent_ip == self.agent.br_mgr.local_ip:
continue
after = state.get('after')
for mac, ip in after:
self.agent.br_mgr.add_fdb_ip_entry(mac, ip, interface)
before = state.get('before')
for mac, ip in before:
self.agent.br_mgr.remove_fdb_ip_entry(mac, ip, interface)
def fdb_update(self, context, fdb_entries):
LOG.debug(_("fdb_update received"))
for action, values in fdb_entries.items():
method = '_fdb_' + action
if not hasattr(self, method):
raise NotImplementedError()
getattr(self, method)(context, values)
def create_rpc_dispatcher(self):
'''Get the rpc dispatcher for this manager.
If a manager would like to set an rpc API version, or support more than
one class as the target of rpc messages, override this method.
'''
return dispatcher.RpcDispatcher([self])
class LinuxBridgePluginApi(agent_rpc.PluginApi,
sg_rpc.SecurityGroupServerRpcApiMixin):
pass
class LinuxBridgeNeutronAgentRPC(sg_rpc.SecurityGroupAgentRpcMixin):
def __init__(self, interface_mappings, polling_interval,
root_helper):
self.polling_interval = polling_interval
self.root_helper = root_helper
self.setup_linux_bridge(interface_mappings)
configurations = {'interface_mappings': interface_mappings}
if self.br_mgr.vxlan_mode != lconst.VXLAN_NONE:
configurations['tunneling_ip'] = self.br_mgr.local_ip
configurations['tunnel_types'] = [p_const.TYPE_VXLAN]
configurations['l2_population'] = cfg.CONF.VXLAN.l2_population
self.agent_state = {
'binary': 'neutron-linuxbridge-agent',
'host': cfg.CONF.host,
'topic': constants.L2_AGENT_TOPIC,
'configurations': configurations,
'agent_type': constants.AGENT_TYPE_LINUXBRIDGE,
'start_flag': True}
self.setup_rpc(interface_mappings.values())
self.init_firewall()
def _report_state(self):
try:
devices = len(self.br_mgr.get_tap_devices())
self.agent_state.get('configurations')['devices'] = devices
self.state_rpc.report_state(self.context,
self.agent_state)
self.agent_state.pop('start_flag', None)
except Exception:
LOG.exception(_("Failed reporting state!"))
def setup_rpc(self, physical_interfaces):
if physical_interfaces:
mac = utils.get_interface_mac(physical_interfaces[0])
else:
devices = ip_lib.IPWrapper(self.root_helper).get_devices(True)
if devices:
mac = utils.get_interface_mac(devices[0].name)
else:
LOG.error(_("Unable to obtain MAC address for unique ID. "
"Agent terminated!"))
exit(1)
self.agent_id = '%s%s' % ('lb', (mac.replace(":", "")))
LOG.info(_("RPC agent_id: %s"), self.agent_id)
self.topic = topics.AGENT
self.plugin_rpc = LinuxBridgePluginApi(topics.PLUGIN)
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN)
# RPC network init
self.context = context.get_admin_context_without_session()
# Handle updates from service
self.callbacks = LinuxBridgeRpcCallbacks(self.context,
self)
self.dispatcher = self.callbacks.create_rpc_dispatcher()
# Define the listening consumers for the agent
consumers = [[topics.PORT, topics.UPDATE],
[topics.NETWORK, topics.DELETE],
[topics.SECURITY_GROUP, topics.UPDATE]]
if cfg.CONF.VXLAN.l2_population:
consumers.append([topics.L2POPULATION,
topics.UPDATE, cfg.CONF.host])
self.connection = agent_rpc.create_consumers(self.dispatcher,
self.topic,
consumers)
report_interval = cfg.CONF.AGENT.report_interval
if report_interval:
heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
heartbeat.start(interval=report_interval)
def setup_linux_bridge(self, interface_mappings):
self.br_mgr = LinuxBridgeManager(interface_mappings, self.root_helper)
def remove_port_binding(self, network_id, interface_id):
bridge_name = self.br_mgr.get_bridge_name(network_id)
tap_device_name = self.br_mgr.get_tap_device_name(interface_id)
return self.br_mgr.remove_interface(bridge_name, tap_device_name)
def process_network_devices(self, device_info):
resync_a = False
resync_b = False
if 'added' in device_info:
resync_a = self.treat_devices_added(device_info['added'])
if 'removed' in device_info:
resync_b = self.treat_devices_removed(device_info['removed'])
# If one of the above operations fails => resync with plugin
return (resync_a | resync_b)
def treat_devices_added(self, devices):
resync = False
self.prepare_devices_filter(devices)
for device in devices:
LOG.debug(_("Port %s added"), device)
try:
details = self.plugin_rpc.get_device_details(self.context,
device,
self.agent_id)
except Exception as e:
LOG.debug(_("Unable to get port details for "
"%(device)s: %(e)s"),
{'device': device, 'e': e})
resync = True
continue
if 'port_id' in details:
LOG.info(_("Port %(device)s updated. Details: %(details)s"),
{'device': device, 'details': details})
if details['admin_state_up']:
# create the networking for the port
network_type = details.get('network_type')
if network_type:
segmentation_id = details.get('segmentation_id')
else:
# compatibility with pre-Havana RPC vlan_id encoding
vlan_id = details.get('vlan_id')
(network_type,
segmentation_id) = lconst.interpret_vlan_id(vlan_id)
if self.br_mgr.add_interface(details['network_id'],
network_type,
details['physical_network'],
segmentation_id,
details['port_id']):
# update plugin about port status
self.plugin_rpc.update_device_up(self.context,
device,
self.agent_id,
cfg.CONF.host)
else:
self.plugin_rpc.update_device_down(self.context,
device,
self.agent_id,
cfg.CONF.host)
else:
self.remove_port_binding(details['network_id'],
details['port_id'])
else:
LOG.info(_("Device %s not defined on plugin"), device)
return resync
def treat_devices_removed(self, devices):
resync = False
self.remove_devices_filter(devices)
for device in devices:
LOG.info(_("Attachment %s removed"), device)
details = None
try:
details = self.plugin_rpc.update_device_down(self.context,
device,
self.agent_id,
cfg.CONF.host)
except Exception as e:
LOG.debug(_("port_removed failed for %(device)s: %(e)s"),
{'device': device, 'e': e})
resync = True
if details and details['exists']:
LOG.info(_("Port %s updated."), device)
else:
LOG.debug(_("Device %s not defined on plugin"), device)
self.br_mgr.remove_empty_bridges()
return resync
def daemon_loop(self):
sync = True
devices = set()
LOG.info(_("LinuxBridge Agent RPC Daemon Started!"))
while True:
start = time.time()
if sync:
LOG.info(_("Agent out of sync with plugin!"))
devices.clear()
sync = False
device_info = {}
try:
device_info = self.br_mgr.update_devices(devices)
except Exception:
LOG.exception(_("Update devices failed"))
sync = True
try:
# notify plugin about device deltas
if device_info:
LOG.debug(_("Agent loop has new devices!"))
# If treat devices fails - indicates must resync with
# plugin
sync = self.process_network_devices(device_info)
devices = device_info['current']
except Exception:
LOG.exception(_("Error in agent loop. Devices info: %s"),
device_info)
sync = True
# sleep till end of polling interval
elapsed = (time.time() - start)
if (elapsed < self.polling_interval):
time.sleep(self.polling_interval - elapsed)
else:
LOG.debug(_("Loop iteration exceeded interval "
"(%(polling_interval)s vs. %(elapsed)s)!"),
{'polling_interval': self.polling_interval,
'elapsed': elapsed})
def main():
cfg.CONF(project='neutron')
logging_config.setup_logging(cfg.CONF)
try:
interface_mappings = q_utils.parse_mappings(
cfg.CONF.LINUX_BRIDGE.physical_interface_mappings)
except ValueError as e:
LOG.error(_("Parsing physical_interface_mappings failed: %s."
" Agent terminated!"), e)
sys.exit(1)
LOG.info(_("Interface mappings: %s"), interface_mappings)
polling_interval = cfg.CONF.AGENT.polling_interval
root_helper = cfg.CONF.AGENT.root_helper
agent = LinuxBridgeNeutronAgentRPC(interface_mappings,
polling_interval,
root_helper)
LOG.info(_("Agent initialized successfully, now running... "))
agent.daemon_loop()
sys.exit(0)
if __name__ == "__main__":
main()
| |
from __future__ import print_function, division
import numpy as np
from astropy import log as logger
from ..grid.amr_grid import Grid, Level, AMRGrid
def parse_multi_tuple(string):
string = string.replace(' ', '')
string = string.replace(')(', '),(')
return eval(string)
class Star(object):
def __init__(self, line):
values = line.split()
self.m, self.x, self.y, self.z, self.r, self.mdot, self.burnstate = \
[float(values[i]) for i in [0, 1, 2, 3, 11, 14, 15]]
class OrionGrid(Grid):
def __init__(self):
self.imin, self.imax, self.itype = None, None, None
self.jmin, self.jmax, self.jtype = None, None, None
self.kmin, self.kmax, self.ktype = None, None, None
Grid.__init__(self)
def read_data(self, filename, offset, quantity_indices, verbose=False):
if verbose:
logger.info("Reading %s" % filename)
gridsize = self.nx * self.ny * self.nz
f = open(filename, 'rb')
f.seek(offset)
header = f.readline().strip()
p1 = header.find('((') + 2
p2 = header.find(',', p1)
n_bytes = int(header[p1:p2])
p3 = header.find('(', p2) + 1
p4 = header.find('))', p3)
bytes = [int(x) for x in header[p3:p4].split()]
p5 = header.find('(', p4) + 1
p6 = header.find(',', p5)
n_bytes = int(header[p5:p6])
p7 = header.find('(', p6) + 1
p8 = header.find('))', p7)
bytes = [int(x) for x in header[p7:p8].split()]
if bytes == range(1, n_bytes + 1):
endian = '>'
elif bytes == range(n_bytes, 0, -1):
endian = '<'
else:
raise Exception("Unexpected bytes: %s" % str(bytes))
n_components = int(header.split()[-1])
pos = f.tell()
for quantity in quantity_indices:
f.seek(pos + quantity_indices[quantity] * n_bytes * gridsize)
array = np.fromstring(f.read(n_bytes * gridsize),
dtype='%sf%i' % (endian, n_bytes))
self.quantities[quantity] = array.reshape(self.nz, self.ny, self.nx)
class OrionLevel(Level):
def __init__(self):
self.idxlo = None
self.idxhi = None
self.periodicity = None
self.number = None
Level.__init__(self)
class OrionAMRGrid(AMRGrid):
def __init__(self, dirname, quantities, verbose=False, max_level=None):
self.xmin, self.xmax = None, None
self.ymin, self.ymax = None, None
self.zmin, self.zmax = None, None
AMRGrid.__init__(self)
# Open file
f = open('%s/Header' % dirname, 'rb')
# Read version number
version = f.readline().strip()
# Read number of components
n_quantities = int(f.readline().strip())
# Read in component names
available_quantities = [f.readline().strip() for i in range(n_quantities)]
# If a single quantity is requested as a string, make it into a list
if isinstance(quantities, basestring):
if quantities == 'all':
quantities = available_quantities
else:
quantities = [quantities]
# Make list of wanted quantities, and their indices
quantity_indices = {}
for quantity in quantities:
quantity_indices[quantity] = available_quantities.index(quantity)
# Read in number of dimensions
ndim = int(f.readline().strip())
if ndim != 3:
raise Exception("Number of dimensions is not 3")
# Read in time
creation_time = float(f.readline().strip())
# Read in maximum level of refinement
n_levels = int(f.readline().strip()) + 1
# Create list of levels
self.levels = [OrionLevel() for i in range(n_levels)]
if max_level is None:
max_level = n_levels
# Read in position of box corners
self.xmin, self.ymin, self.zmin = [float(x) for x in f.readline().strip().split()]
self.xmax, self.ymax, self.zmax = [float(x) for x in f.readline().strip().split()]
# Read in refinement ratios
refinement_ratios = [int(x) for x in f.readline().strip().split()]
# Read in next line
line = f.readline().strip()
# Split into groups of ndim values
elements = line.replace(' ', '').replace('((', '(').replace('))', ')')[1:-1].split(')(')
for level in self.levels:
level.idxlo = [int(x) for x in elements[3 * i].split(',')]
level.idxhi = [int(x) for x in elements[3 * i + 1].split(',')]
level.periodicity = [int(x) for x in elements[3 * i + 2].split(',')]
# Read in number of steps on each level
levelsteps = [int(x) for x in f.readline().strip().split()]
# Read in grid spacing on each level
# gridspacing = np.zeros((self.ndim, self.maxlevel+1))
# for level in self.levels:
# level.gridspacing = [float(x) for x in f.readline().strip().split()]
for level in self.levels:
f.readline()
# Read in coordinate type
coordtype = int(f.readline().strip())
if coordtype != 0:
raise Exception("coordtype should be zero")
# Skip dummy line
f.readline()
# First part done. Now need to read in individual levels and grids
# Initialize list of levels
# Loop through levels
for level in self.levels[:max_level]:
level_num, ngrids, creation_time = f.readline().strip().split()
level.number = int(level_num)
ngrids = int(ngrids)
# Initialize grids
level.grids = [OrionGrid() for igrid in range(ngrids)]
levelsteps = int(f.readline().strip())
for grid in level.grids:
grid.xmin, grid.xmax = [float(x) for x in f.readline().split()]
grid.ymin, grid.ymax = [float(y) for y in f.readline().split()]
grid.zmin, grid.zmax = [float(z) for z in f.readline().split()]
n_quantities_check = 0
nfiles = 0
nfilecomp = []
# Read filename header file
fname = f.readline().strip()
fh = open("%s/%s_H" % (dirname, fname))
fh.readline()
fh.readline()
# Read the number of components in multigrid files
ngridcomp = int(fh.readline())
if ngridcomp != n_quantities:
raise Exception("Only some of the components included in multigrid file")
fh.readline()
# Read the number of boxes
ngrids_check = int(fh.readline().strip()[1:].split()[0])
if ngrids_check != ngrids:
raise Exception("Number of grids in multigrid file does not match known number")
# Loop through the grids
for grid in level.grids:
values = parse_multi_tuple(fh.readline())
grid.imin, grid.jmin, grid.kmin = values[0]
grid.imax, grid.jmax, grid.kmax = values[1]
grid.itype, grid.jtype, grid.ktype = values[2]
grid.nx = grid.imax - grid.imin + 1
grid.ny = grid.jmax - grid.jmin + 1
grid.nz = grid.kmax - grid.kmin + 1
fh.readline()
fh.readline()
for grid in level.grids:
string = fh.readline().split(':')[1]
filename = "%s/Level_%i/%s" % (dirname, level.number, string.split()[0].strip())
offset = int(string.split()[1])
grid.read_data(filename, offset, quantity_indices, verbose=verbose)
# Throw away levels that aren't needed
self.levels = self.levels[:max_level]
def parse_orion(dirname, quantities='density', verbose=False, max_level=None):
# Read in grid
amr_grid = OrionAMRGrid(dirname, quantities=quantities, verbose=verbose, max_level=max_level)
# Read in star particles
fs = open('%s/StarParticles' % dirname, 'rb')
fs.readline()
stars = []
for line in fs.readlines():
stars.append(Star(line))
return amr_grid, stars
| |
# -*- coding: utf-8 -*-
from cms.api import add_plugin
from cms.models import Page, Placeholder, UserSettings
from cms.operations import (
ADD_PLUGIN,
ADD_PLUGINS_FROM_PLACEHOLDER,
CLEAR_PLACEHOLDER,
CHANGE_PLUGIN,
DELETE_PLUGIN,
CUT_PLUGIN,
MOVE_PLUGIN,
PASTE_PLUGIN,
PASTE_PLACEHOLDER,
)
from cms.signals import pre_placeholder_operation, post_placeholder_operation
from cms.test_utils.testcases import CMSTestCase
from cms.utils.compat.tests import UnittestCompatMixin
from cms.test_utils.util.context_managers import signal_tester
#TODO: DO this for app placeholders
class OperationSignalsTestCase(CMSTestCase, UnittestCompatMixin):
def _add_plugin(self, placeholder=None, plugin_type='LinkPlugin', language='en'):
placeholder = placeholder or self._cms_placeholder
plugin_data = {
'LinkPlugin': {'name': 'A Link', 'external_link': 'https://www.django-cms.org'},
'PlaceholderPlugin': {},
}
plugin = add_plugin(
placeholder,
plugin_type,
language,
**plugin_data[plugin_type]
)
return plugin
def _get_add_plugin_uri(self, language='en'):
uri = self.get_add_plugin_uri(
placeholder=self._cms_placeholder,
plugin_type='LinkPlugin',
language=language,
)
return uri
def setUp(self):
self._admin_user = self.get_superuser()
self._cms_page = self.create_homepage(
"home",
"nav_playground.html",
"en",
created_by=self._admin_user,
published=True,
)
self._cms_placeholder = self._cms_page.placeholders.get(slot='body')
def test_pre_add_plugin(self):
with signal_tester(pre_placeholder_operation) as env:
endpoint = self._get_add_plugin_uri()
data = {'name': 'A Link', 'external_link': 'https://www.django-cms.org'}
with self.login_user_context(self._admin_user):
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(env.call_count, 1)
call_kwargs = env.calls[0][1]
self.assertEqual(call_kwargs['operation'], ADD_PLUGIN)
self.assertEqual(call_kwargs['language'], 'en')
self.assertTrue('token' in call_kwargs)
self.assertEqual(call_kwargs['origin'], '/en/')
self.assertEqual(call_kwargs['placeholder'], self._cms_placeholder)
self.assertEqual(call_kwargs['plugin'].name, data['name'])
self.assertEqual(call_kwargs['plugin'].external_link, data['external_link'])
def test_post_add_plugin(self):
with signal_tester(pre_placeholder_operation, post_placeholder_operation) as env:
endpoint = self._get_add_plugin_uri()
data = {'name': 'A Link', 'external_link': 'https://www.django-cms.org'}
with self.login_user_context(self._admin_user):
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(env.call_count, 2)
pre_call_kwargs = env.calls[0][1]
post_call_kwargs = env.calls[1][1]
self.assertTrue('token' in post_call_kwargs)
self.assertEqual(post_call_kwargs['operation'], ADD_PLUGIN)
self.assertEqual(post_call_kwargs['language'], 'en')
self.assertTrue(pre_call_kwargs['token'] == post_call_kwargs['token'])
self.assertEqual(post_call_kwargs['origin'], '/en/')
self.assertEqual(post_call_kwargs['placeholder'], self._cms_placeholder)
self.assertTrue(post_call_kwargs['plugin'].pk)
self.assertEqual(post_call_kwargs['plugin'].name, data['name'])
self.assertEqual(post_call_kwargs['plugin'].external_link, data['external_link'])
def test_pre_edit_plugin(self):
plugin = self._add_plugin()
endpoint = self.get_admin_url(Page, 'edit_plugin', plugin.pk)
endpoint += '?cms_path=/en/'
with signal_tester(pre_placeholder_operation) as env:
data = {'name': 'A Link 2', 'external_link': 'https://www.django-cms.org'}
with self.login_user_context(self._admin_user):
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(env.call_count, 1)
call_kwargs = env.calls[0][1]
self.assertEqual(call_kwargs['operation'], CHANGE_PLUGIN)
self.assertEqual(call_kwargs['language'], 'en')
self.assertTrue('token' in call_kwargs)
self.assertEqual(call_kwargs['origin'], '/en/')
self.assertEqual(call_kwargs['placeholder'], self._cms_placeholder)
self.assertEqual(call_kwargs['old_plugin'].name, 'A Link')
self.assertEqual(call_kwargs['old_plugin'].external_link, data['external_link'])
self.assertEqual(call_kwargs['new_plugin'].name, data['name'])
self.assertEqual(call_kwargs['new_plugin'].external_link, data['external_link'])
def test_post_edit_plugin(self):
plugin = self._add_plugin()
endpoint = self.get_admin_url(Page, 'edit_plugin', plugin.pk)
endpoint += '?cms_path=/en/'
with signal_tester(pre_placeholder_operation, post_placeholder_operation) as env:
data = {'name': 'A Link 2', 'external_link': 'https://www.django-cms.org'}
with self.login_user_context(self._admin_user):
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(env.call_count, 2)
pre_call_kwargs = env.calls[0][1]
post_call_kwargs = env.calls[1][1]
self.assertTrue('token' in post_call_kwargs)
self.assertEqual(post_call_kwargs['operation'], CHANGE_PLUGIN)
self.assertEqual(post_call_kwargs['language'], 'en')
self.assertTrue(pre_call_kwargs['token'] == post_call_kwargs['token'])
self.assertEqual(post_call_kwargs['origin'], '/en/')
self.assertEqual(post_call_kwargs['placeholder'], self._cms_placeholder)
self.assertEqual(post_call_kwargs['old_plugin'].name, 'A Link')
self.assertEqual(post_call_kwargs['old_plugin'].external_link, data['external_link'])
self.assertEqual(post_call_kwargs['new_plugin'].name, data['name'])
self.assertEqual(post_call_kwargs['new_plugin'].external_link, data['external_link'])
def test_pre_delete_plugin(self):
plugin = self._add_plugin()
endpoint = self.get_admin_url(Page, 'delete_plugin', plugin.pk)
endpoint += '?cms_path=/en/'
with signal_tester(pre_placeholder_operation) as env:
with self.login_user_context(self._admin_user):
data = {'post': True}
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 302)
self.assertEqual(env.call_count, 1)
call_kwargs = env.calls[0][1]
self.assertEqual(call_kwargs['operation'], DELETE_PLUGIN)
self.assertEqual(call_kwargs['language'], 'en')
self.assertTrue('token' in call_kwargs)
self.assertEqual(call_kwargs['origin'], '/en/')
self.assertEqual(call_kwargs['placeholder'], self._cms_placeholder)
self.assertEqual(call_kwargs['plugin'].name, 'A Link')
self.assertEqual(call_kwargs['plugin'].external_link, 'https://www.django-cms.org')
def test_post_delete_plugin(self):
plugin = self._add_plugin()
endpoint = self.get_admin_url(Page, 'delete_plugin', plugin.pk)
endpoint += '?cms_path=/en/'
with signal_tester(pre_placeholder_operation, post_placeholder_operation) as env:
with self.login_user_context(self._admin_user):
data = {'post': True}
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 302)
self.assertEqual(env.call_count, 2)
pre_call_kwargs = env.calls[0][1]
post_call_kwargs = env.calls[1][1]
self.assertTrue('token' in post_call_kwargs)
self.assertEqual(post_call_kwargs['operation'], DELETE_PLUGIN)
self.assertEqual(post_call_kwargs['language'], 'en')
self.assertTrue(pre_call_kwargs['token'] == post_call_kwargs['token'])
self.assertEqual(post_call_kwargs['origin'], '/en/')
self.assertEqual(post_call_kwargs['placeholder'], self._cms_placeholder)
self.assertEqual(post_call_kwargs['plugin'].name, 'A Link')
self.assertEqual(post_call_kwargs['plugin'].external_link, 'https://www.django-cms.org')
def test_pre_move_plugin(self):
plugin = self._add_plugin()
endpoint = self.get_move_plugin_uri(plugin)
source_placeholder = plugin.placeholder
target_placeholder = self._cms_page.placeholders.get(slot='right-column')
data = {
'plugin_id': plugin.pk,
'placeholder_id': target_placeholder.pk,
}
with signal_tester(pre_placeholder_operation) as env:
with self.login_user_context(self._admin_user):
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(env.call_count, 1)
call_kwargs = env.calls[0][1]
self.assertEqual(call_kwargs['operation'], MOVE_PLUGIN)
self.assertEqual(call_kwargs['language'], 'en')
self.assertTrue('token' in call_kwargs)
self.assertEqual(call_kwargs['origin'], '/en/')
self.assertEqual(call_kwargs['plugin'].name, 'A Link')
self.assertEqual(call_kwargs['plugin'].placeholder, source_placeholder)
self.assertEqual(call_kwargs['plugin'].external_link, 'https://www.django-cms.org')
self.assertEqual(call_kwargs['source_language'], 'en')
self.assertEqual(call_kwargs['source_placeholder'], source_placeholder)
self.assertEqual(call_kwargs['source_parent_id'], plugin.parent_id)
self.assertEqual(call_kwargs['target_language'], 'en')
self.assertEqual(call_kwargs['target_placeholder'], target_placeholder)
self.assertEqual(call_kwargs['target_parent_id'], None)
def test_post_move_plugin(self):
plugin = self._add_plugin()
endpoint = self.get_move_plugin_uri(plugin)
source_placeholder = plugin.placeholder
target_placeholder = self._cms_page.placeholders.get(slot='right-column')
data = {
'plugin_id': plugin.pk,
'placeholder_id': target_placeholder.pk,
}
with signal_tester(pre_placeholder_operation, post_placeholder_operation) as env:
with self.login_user_context(self._admin_user):
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(env.call_count, 2)
pre_call_kwargs = env.calls[0][1]
post_call_kwargs = env.calls[1][1]
self.assertTrue('token' in post_call_kwargs)
self.assertEqual(post_call_kwargs['operation'], MOVE_PLUGIN)
self.assertEqual(post_call_kwargs['language'], 'en')
self.assertTrue(pre_call_kwargs['token'] == post_call_kwargs['token'])
self.assertEqual(post_call_kwargs['origin'], '/en/')
self.assertEqual(post_call_kwargs['plugin'].name, 'A Link')
self.assertEqual(post_call_kwargs['plugin'].placeholder, target_placeholder)
self.assertEqual(post_call_kwargs['plugin'].external_link, 'https://www.django-cms.org')
self.assertEqual(post_call_kwargs['source_language'], 'en')
self.assertEqual(post_call_kwargs['source_placeholder'], source_placeholder)
self.assertEqual(post_call_kwargs['source_parent_id'], plugin.parent_id)
self.assertEqual(post_call_kwargs['target_language'], 'en')
self.assertEqual(post_call_kwargs['target_placeholder'], target_placeholder)
self.assertEqual(post_call_kwargs['target_parent_id'], None)
def test_pre_cut_plugin(self):
user_settings = UserSettings.objects.create(
language="en",
user=self._admin_user,
clipboard=Placeholder.objects.create(slot='clipboard'),
)
plugin = self._add_plugin()
endpoint = self.get_move_plugin_uri(plugin)
data = {
'plugin_id': plugin.pk,
'placeholder_id': user_settings.clipboard_id,
}
with signal_tester(pre_placeholder_operation) as env:
with self.login_user_context(self._admin_user):
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(env.call_count, 1)
call_kwargs = env.calls[0][1]
self.assertEqual(call_kwargs['operation'], CUT_PLUGIN)
self.assertEqual(call_kwargs['language'], 'en')
self.assertTrue('token' in call_kwargs)
self.assertEqual(call_kwargs['origin'], '/en/')
self.assertEqual(call_kwargs['plugin'].name, 'A Link')
self.assertEqual(call_kwargs['plugin'].placeholder, self._cms_placeholder)
self.assertEqual(call_kwargs['plugin'].external_link, 'https://www.django-cms.org')
self.assertEqual(call_kwargs['clipboard'], user_settings.clipboard)
self.assertEqual(call_kwargs['clipboard_language'], 'en')
self.assertEqual(call_kwargs['source_language'], 'en')
self.assertEqual(call_kwargs['source_placeholder'], self._cms_placeholder)
self.assertEqual(call_kwargs['source_parent_id'], plugin.parent_id)
def test_post_cut_plugin(self):
user_settings = UserSettings.objects.create(
language="en",
user=self._admin_user,
clipboard=Placeholder.objects.create(slot='clipboard'),
)
plugin = self._add_plugin()
endpoint = self.get_move_plugin_uri(plugin)
data = {
'plugin_id': plugin.pk,
'placeholder_id': user_settings.clipboard_id,
}
with signal_tester(pre_placeholder_operation, post_placeholder_operation) as env:
with self.login_user_context(self._admin_user):
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(env.call_count, 2)
pre_call_kwargs = env.calls[0][1]
post_call_kwargs = env.calls[1][1]
self.assertTrue('token' in post_call_kwargs)
self.assertEqual(post_call_kwargs['operation'], CUT_PLUGIN)
self.assertEqual(post_call_kwargs['language'], 'en')
self.assertTrue(pre_call_kwargs['token'] == post_call_kwargs['token'])
self.assertEqual(post_call_kwargs['origin'], '/en/')
self.assertEqual(post_call_kwargs['plugin'].name, 'A Link')
self.assertEqual(post_call_kwargs['plugin'].placeholder, user_settings.clipboard)
self.assertEqual(post_call_kwargs['plugin'].external_link, 'https://www.django-cms.org')
self.assertEqual(post_call_kwargs['clipboard'], user_settings.clipboard)
self.assertEqual(post_call_kwargs['clipboard_language'], 'en')
self.assertEqual(post_call_kwargs['source_language'], 'en')
self.assertEqual(post_call_kwargs['source_placeholder'], self._cms_placeholder)
self.assertEqual(post_call_kwargs['source_parent_id'], plugin.parent_id)
def test_pre_paste_plugin(self):
user_settings = UserSettings.objects.create(
language="en",
user=self._admin_user,
clipboard=Placeholder.objects.create(slot='clipboard'),
)
plugin = self._add_plugin(placeholder=user_settings.clipboard)
endpoint = self.get_move_plugin_uri(plugin)
data = {
'plugin_id': plugin.pk,
'placeholder_id': self._cms_placeholder.pk,
'move_a_copy': 'true',
'plugin_order[]': ['__COPY__'],
}
with signal_tester(pre_placeholder_operation) as env:
with self.login_user_context(self._admin_user):
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(env.call_count, 1)
call_kwargs = env.calls[0][1]
self.assertEqual(call_kwargs['operation'], PASTE_PLUGIN)
self.assertEqual(call_kwargs['language'], 'en')
self.assertTrue('token' in call_kwargs)
self.assertEqual(call_kwargs['origin'], '/en/')
self.assertEqual(call_kwargs['plugin'].name, 'A Link')
self.assertEqual(call_kwargs['plugin'].placeholder, user_settings.clipboard)
self.assertEqual(call_kwargs['plugin'].external_link, 'https://www.django-cms.org')
self.assertEqual(call_kwargs['target_language'], 'en')
self.assertEqual(call_kwargs['target_placeholder'], self._cms_placeholder)
self.assertEqual(call_kwargs['target_parent_id'], None)
def test_post_paste_plugin(self):
user_settings = UserSettings.objects.create(
language="en",
user=self._admin_user,
clipboard=Placeholder.objects.create(slot='clipboard'),
)
plugin = self._add_plugin(placeholder=user_settings.clipboard)
endpoint = self.get_move_plugin_uri(plugin)
data = {
'plugin_id': plugin.pk,
'placeholder_id': self._cms_placeholder.pk,
'move_a_copy': 'true',
'plugin_order[]': ['__COPY__'],
}
with signal_tester(pre_placeholder_operation, post_placeholder_operation) as env:
with self.login_user_context(self._admin_user):
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(env.call_count, 2)
pre_call_kwargs = env.calls[0][1]
post_call_kwargs = env.calls[1][1]
self.assertTrue('token' in post_call_kwargs)
self.assertEqual(post_call_kwargs['operation'], PASTE_PLUGIN)
self.assertEqual(post_call_kwargs['language'], 'en')
self.assertTrue(pre_call_kwargs['token'] == post_call_kwargs['token'])
self.assertEqual(post_call_kwargs['origin'], '/en/')
self.assertEqual(post_call_kwargs['plugin'].name, 'A Link')
self.assertEqual(post_call_kwargs['plugin'].placeholder, self._cms_placeholder)
self.assertEqual(post_call_kwargs['plugin'].external_link, 'https://www.django-cms.org')
self.assertEqual(post_call_kwargs['target_language'], 'en')
self.assertEqual(post_call_kwargs['target_placeholder'], self._cms_placeholder)
self.assertEqual(post_call_kwargs['target_parent_id'], None)
def test_pre_paste_placeholder(self):
user_settings = UserSettings.objects.create(
language="en",
user=self._admin_user,
clipboard=Placeholder.objects.create(slot='clipboard'),
)
placeholder_plugin = self._add_plugin(
user_settings.clipboard,
'PlaceholderPlugin',
)
ref_placeholder = placeholder_plugin.placeholder_ref
self._add_plugin(ref_placeholder)
endpoint = self.get_move_plugin_uri(placeholder_plugin)
data = {
'plugin_id': placeholder_plugin.pk,
'placeholder_id': self._cms_placeholder.pk,
'move_a_copy': 'true',
'plugin_order[]': ['__COPY__'],
}
with signal_tester(pre_placeholder_operation) as env:
with self.login_user_context(self._admin_user):
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(env.call_count, 1)
call_kwargs = env.calls[0][1]
plugin = call_kwargs['plugins'][0].get_bound_plugin()
self.assertEqual(call_kwargs['operation'], PASTE_PLACEHOLDER)
self.assertEqual(call_kwargs['language'], 'en')
self.assertTrue('token' in call_kwargs)
self.assertEqual(call_kwargs['origin'], '/en/')
self.assertEqual(plugin.name, 'A Link')
self.assertEqual(plugin.placeholder, ref_placeholder)
self.assertEqual(plugin.external_link, 'https://www.django-cms.org')
self.assertEqual(call_kwargs['target_language'], 'en')
self.assertEqual(call_kwargs['target_placeholder'], self._cms_placeholder)
def test_post_paste_placeholder(self):
user_settings = UserSettings.objects.create(
language="en",
user=self._admin_user,
clipboard=Placeholder.objects.create(slot='clipboard'),
)
placeholder_plugin = self._add_plugin(
user_settings.clipboard,
'PlaceholderPlugin',
)
ref_placeholder = placeholder_plugin.placeholder_ref
self._add_plugin(ref_placeholder)
endpoint = self.get_move_plugin_uri(placeholder_plugin)
data = {
'plugin_id': placeholder_plugin.pk,
'placeholder_id': self._cms_placeholder.pk,
'move_a_copy': 'true',
'plugin_order[]': ['__COPY__'],
}
with signal_tester(pre_placeholder_operation, post_placeholder_operation) as env:
with self.login_user_context(self._admin_user):
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(env.call_count, 2)
pre_call_kwargs = env.calls[0][1]
post_call_kwargs = env.calls[1][1]
plugin = post_call_kwargs['plugins'][0].get_bound_plugin()
self.assertTrue('token' in post_call_kwargs)
self.assertEqual(post_call_kwargs['operation'], PASTE_PLACEHOLDER)
self.assertEqual(post_call_kwargs['language'], 'en')
self.assertTrue(pre_call_kwargs['token'] == post_call_kwargs['token'])
self.assertEqual(post_call_kwargs['origin'], '/en/')
self.assertEqual(plugin.name, 'A Link')
self.assertEqual(plugin.placeholder, self._cms_placeholder)
self.assertEqual(plugin.external_link, 'https://www.django-cms.org')
self.assertEqual(post_call_kwargs['target_language'], 'en')
self.assertEqual(post_call_kwargs['target_placeholder'], self._cms_placeholder)
def test_pre_add_plugins_from_placeholder(self):
plugin = self._add_plugin()
endpoint = self.get_admin_url(Page, 'copy_plugins') + '?cms_path=/en/'
source_placeholder = plugin.placeholder
target_placeholder = self._cms_page.placeholders.get(slot='right-column')
data = {
'source_language': 'en',
'source_placeholder_id': self._cms_placeholder.pk,
'target_language': 'de',
'target_placeholder_id': target_placeholder.pk,
}
with signal_tester(pre_placeholder_operation) as env:
with self.login_user_context(self._admin_user):
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(env.call_count, 1)
call_kwargs = env.calls[0][1]
en_plugin = call_kwargs['plugins'][0].get_bound_plugin()
self.assertEqual(call_kwargs['operation'], ADD_PLUGINS_FROM_PLACEHOLDER)
self.assertEqual(call_kwargs['language'], 'en')
self.assertTrue('token' in call_kwargs)
self.assertEqual(call_kwargs['origin'], '/en/')
self.assertEqual(plugin, en_plugin)
self.assertEqual(call_kwargs['source_language'], 'en')
self.assertEqual(call_kwargs['source_placeholder'], source_placeholder)
self.assertEqual(call_kwargs['target_language'], 'de')
self.assertEqual(call_kwargs['target_placeholder'], target_placeholder)
def test_post_add_plugins_from_placeholder(self):
plugin = self._add_plugin()
endpoint = self.get_admin_url(Page, 'copy_plugins') + '?cms_path=/en/'
source_placeholder = plugin.placeholder
target_placeholder = self._cms_page.placeholders.get(slot='right-column')
data = {
'source_language': 'en',
'source_placeholder_id': self._cms_placeholder.pk,
'target_language': 'de',
'target_placeholder_id': target_placeholder.pk,
}
with signal_tester(pre_placeholder_operation, post_placeholder_operation) as env:
with self.login_user_context(self._admin_user):
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(env.call_count, 2)
pre_call_kwargs = env.calls[0][1]
post_call_kwargs = env.calls[1][1]
new_plugin = post_call_kwargs['plugins'][0].get_bound_plugin()
self.assertTrue('token' in post_call_kwargs)
self.assertEqual(post_call_kwargs['operation'], ADD_PLUGINS_FROM_PLACEHOLDER)
self.assertEqual(post_call_kwargs['language'], 'en')
self.assertTrue(pre_call_kwargs['token'] == post_call_kwargs['token'])
self.assertEqual(post_call_kwargs['origin'], '/en/')
self.assertNotEqual(plugin, new_plugin)
self.assertEqual(new_plugin.name, 'A Link')
self.assertEqual(new_plugin.placeholder, target_placeholder)
self.assertEqual(new_plugin.external_link, 'https://www.django-cms.org')
self.assertEqual(post_call_kwargs['source_language'], 'en')
self.assertEqual(post_call_kwargs['source_placeholder'], source_placeholder)
self.assertEqual(post_call_kwargs['target_language'], 'de')
self.assertEqual(post_call_kwargs['target_placeholder'], target_placeholder)
def test_pre_clear_placeholder(self):
plugin = self._add_plugin()
endpoint = self.get_clear_placeholder_url(self._cms_placeholder)
with signal_tester(pre_placeholder_operation) as env:
with self.login_user_context(self._admin_user):
response = self.client.post(endpoint, {'test': 0})
self.assertEqual(response.status_code, 302)
self.assertEqual(env.call_count, 1)
call_kwargs = env.calls[0][1]
del_plugin = call_kwargs['plugins'][0]
self.assertEqual(call_kwargs['operation'], CLEAR_PLACEHOLDER)
self.assertEqual(call_kwargs['language'], 'en')
self.assertTrue('token' in call_kwargs)
self.assertEqual(call_kwargs['origin'], '/en/')
self.assertEqual(del_plugin.pk, plugin.pk)
self.assertEqual(call_kwargs['placeholder'], self._cms_placeholder)
def test_post_clear_placeholder(self):
plugin = self._add_plugin()
endpoint = self.get_clear_placeholder_url(self._cms_placeholder)
with signal_tester(pre_placeholder_operation, post_placeholder_operation) as env:
with self.login_user_context(self._admin_user):
response = self.client.post(endpoint, {'test': 0})
self.assertEqual(response.status_code, 302)
self.assertEqual(env.call_count, 2)
pre_call_kwargs = env.calls[0][1]
post_call_kwargs = env.calls[1][1]
del_plugin = post_call_kwargs['plugins'][0]
self.assertTrue('token' in post_call_kwargs)
self.assertEqual(post_call_kwargs['operation'], CLEAR_PLACEHOLDER)
self.assertEqual(post_call_kwargs['language'], 'en')
self.assertTrue(pre_call_kwargs['token'] == post_call_kwargs['token'])
self.assertEqual(post_call_kwargs['origin'], '/en/')
self.assertEqual(del_plugin.pk, plugin.pk)
self.assertEqual(post_call_kwargs['placeholder'], self._cms_placeholder)
| |
#!/usr/bin/env python
from argparse import ArgumentParser
from Queue import Empty
from multiprocessing import Process, Queue, Value, cpu_count
import requests
import shutil
import urllib
import sys
import signal
import random
import os
import logging
DEF_API_BASE = 'http://index.commoncrawl.org/'
def get_num_pages(api_url, url, page_size=None):
""" Use the showNumPages query
to get the number of pages in the result set
"""
query = {'url': url,
'showNumPages': True}
if page_size:
query['pageSize'] = page_size
query = urllib.urlencode(query)
# Get the result
session = requests.Session()
r = session.get(api_url + '?' + query)
pages_info = r.json()
if isinstance(pages_info, dict):
return pages_info['pages']
elif isinstance(pages_info, int):
return pages_info
else:
msg = 'Num pages query returned invalid data: ' + r.text
raise Exception(msg)
def fetch_result_page(job_params):
""" query the api, getting the specified
results page and write to output file
for that page
"""
api_url = job_params['api_url']
url = job_params['url']
page = job_params['page']
num_pages = job_params['num_pages']
output_prefix = job_params['output_prefix']
timeout = job_params['timeout']
gzipped = job_params['gzipped']
headers = job_params['headers']
dir_ = job_params['dir']
query = {'url': url,
'page': page}
if job_params.get('json'):
query['output'] = 'json'
if job_params.get('fl'):
query['fl'] = job_params['fl']
if job_params.get('page_size'):
query['pageSize'] = job_params['page_size']
query = urllib.urlencode(query)
# format filename to number of digits
nd = len(str(num_pages))
format_ = '%0' + str(nd) + 'd'
page_str = format_ % page
filename = output_prefix + page_str
logging.debug('Fetching page {0} ({2} of {1})'.format(page_str, num_pages, page + 1))
# Add any custom headers that may have been specified
req_headers = {}
if headers:
for h in headers:
n, v = h.split(':', 1)
n = n.strip()
v = v.strip()
req_headers[n] = v
# Get the result
session = requests.Session()
r = session.get(api_url + '?' + query, headers=req_headers, stream=True, timeout=timeout)
if r.status_code == 404:
logging.error('No Results for for this query')
r.close()
return
if r.status_code != 200:
r.raise_for_status()
r.close()
return
# use dir, if provided
if dir_:
if not os.path.isdir(dir_):
os.makedirs(dir_)
filename = os.path.join(dir_, filename)
if not gzipped:
with open(filename, 'w+b') as fh:
for chunk in r.iter_content(1024):
fh.write(chunk)
else:
if r.headers.get('content-encoding') == 'gzip':
filename += '.gz'
with open(filename, 'w+b') as fh:
shutil.copyfileobj(r.raw, fh)
logging.debug('Done with "{0}"'.format(filename))
def do_work(job_queue, counter=None):
""" Process work function, read more fetch page jobs
from queue until all jobs are finished
"""
signal.signal(signal.SIGINT, signal.SIG_IGN)
while not job_queue.empty():
try:
job = job_queue.get_nowait()
fetch_result_page(job)
num_done = 0
with counter.get_lock():
counter.value += 1
num_done = counter.value
logging.info('{0} page(s) of {1} finished'.format(num_done,
job['num_pages']))
except Empty:
pass
except KeyboardInterrupt:
break
except Exception:
if not job:
raise
retries = job.get('retries', 0)
if retries < job['max_retries']:
logging.error('Retrying Page {0}'.format(job['page']))
job['retries'] = retries + 1
job_queue.put_nowait(job)
else:
logging.error('Max retries exceeded for page {0}'.
format(job['page']))
def run_workers(num_workers, jobs, shuffle):
""" Queue up all jobs start workers with job_queue
catch KeyboardInterrupt to allow interrupting all workers
Not using Pool to better hande KeyboardInterrupt gracefully
Adapted from example at:
http://bryceboe.com/2012/02/14/python-multiprocessing-pool-and-keyboardinterrupt-revisited/
"""
# Queue up all jobs
job_queue = Queue()
counter = Value('i', 0)
# optionally shuffle queue
if shuffle:
jobs = list(jobs)
random.shuffle(jobs)
for job in jobs:
job_queue.put(job)
workers = []
for i in xrange(0, num_workers):
tmp = Process(target=do_work,
args=(job_queue, counter))
tmp.start()
workers.append(tmp)
try:
for worker in workers:
worker.join()
except KeyboardInterrupt:
logging.info('Received Ctrl-C, interrupting all workers')
for worker in workers:
worker.terminate()
worker.join()
def main():
url_help = """
url to query in the index:
For prefix, use:
http://example.com/*
For domain query, use:
*.example.com
"""
field_list_help = """
select fields to include: eg, --fl url,timestamp
"""
parser = ArgumentParser('CDX Index API Client')
parser.add_argument('url',
help=url_help)
parser.add_argument('-n', '--show-num-pages', action='store_true',
help='Show Number of Pages only and exit')
parser.add_argument('-p', '--processes', type=int,
help='Number of worker processes to use')
parser.add_argument('--fl',
help=field_list_help)
parser.add_argument('-j', '--json', action='store_true',
help='Use json output instead of cdx(j)')
parser.add_argument('-z', '--gzipped', action='store_true',
help='Storge gzipped results, with .gz extensions')
parser.add_argument('-o', '--output-prefix',
help='Custom output prefix, append with -NN for each page')
parser.add_argument('-d', '--directory',
help='Specify custom output directory')
parser.add_argument('--page-size', type=int,
help='size of each page in blocks, >=1')
group = parser.add_mutually_exclusive_group()
group.add_argument('-c', '--coll', default='CC-MAIN-2015-06',
help='The index collection to use')
group.add_argument('--cdx-server-url',
help='Set endpoint for CDX Server API')
parser.add_argument('--timeout', default=30, type=int,
help='HTTP read timeout before retry')
parser.add_argument('--max-retries', default=5, type=int,
help='Number of retry attempts')
parser.add_argument('-v', '--verbose', action='store_true',
help='Verbose logging of debug msgs')
parser.add_argument('--pages', type=int, nargs='*',
help=('Get only the specified result page(s) instead ' +
'of all results'))
parser.add_argument('--header', nargs='*',
help='Add custom header to request')
parser.add_argument('--in-order', action='store_true',
help='Fetch pages in order (default is to shuffle page list)')
# Logging
r = parser.parse_args()
if r.verbose:
level = logging.DEBUG
else:
level = logging.INFO
logging.basicConfig(format='%(asctime)s: [%(levelname)s]: %(message)s',
level=level)
logging.getLogger("requests").setLevel(logging.WARNING)
if r.cdx_server_url:
api_url = r.cdx_server_url
else:
api_url = DEF_API_BASE + r.coll + '-index'
logging.debug('Getting Num Pages...')
num_pages = get_num_pages(api_url, r.url, r.page_size)
# Num Pages Only Query
if r.show_num_pages:
print(num_pages)
return
if num_pages == 0:
print('No results found for: ' + r.url)
# set output
if not r.output_prefix:
if r.url.startswith('*'):
output_prefix = 'domain-' + r.url.strip('*.')
elif r.url.endswith('*'):
output_prefix = 'prefix-' + r.url.strip('*')
elif r.url.startswith(('http://', 'https://', '//')):
output_prefix = r.url.split('//', 1)[-1]
else:
output_prefix = r.url
output_prefix = output_prefix.strip('/')
output_prefix = output_prefix.replace('/', '-')
output_prefix = urllib.quote(output_prefix) + '-'
else:
output_prefix = r.output_prefix
def get_page_job(page):
job = {}
job['api_url'] = api_url
job['url'] = r.url
job['page'] = page
job['num_pages'] = num_pages
job['output_prefix'] = output_prefix
job['fl'] = r.fl
job['json'] = r.json
job['page_size'] = r.page_size
job['timeout'] = r.timeout
job['max_retries'] = r.max_retries
job['gzipped'] = r.gzipped
job['headers'] = r.header
job['dir'] = r.directory
return job
if r.pages:
page_list = r.pages
logging.info('Fetching pages {0} of {1}'.format(r.pages, r.url))
num_pages = len(page_list)
else:
page_list = range(0, num_pages)
logging.info('Fetching {0} pages of {1}'.format(num_pages, r.url))
if num_pages == 1:
fetch_result_page(get_page_job(page_list[0]))
return
# set num workers based on proesses
if not r.processes:
try:
num_workers = cpu_count() * 2
except NotImplementedError:
num_workers = 4
else:
num_workers = r.processes
num_workers = min(num_workers, num_pages)
# generate page jobs
job_list = map(get_page_job, page_list)
run_workers(num_workers, job_list, not r.in_order)
if __name__ == "__main__":
main()
| |
#!/usr/bin/env python
from __future__ import print_function, absolute_import, division
import argparse
import sys
import os
import re
from jinja2 import Template
import punch
from punch import config as cfr
from punch import file_updater as fu
from punch import replacer as rep
from punch import version as ver
from punch import action_register as ar
from punch import helpers as hlp
from punch.vcs_configuration import VCSConfiguration
from punch.vcs_repositories.exceptions import RepositorySystemError
from punch.vcs_repositories.novcs_repo import NoVCSRepo
from punch.vcs_repositories.git_flow_repo import GitFlowRepo
from punch.vcs_repositories.git_repo import GitRepo
from punch.vcs_repositories.hg_repo import HgRepo
from punch.vcs_use_cases.vcs_start_release import VCSStartReleaseUseCase
from punch.vcs_use_cases.vcs_finish_release import VCSFinishReleaseUseCase
def fatal_error(message, exception=None):
print(message)
if exception is not None:
print("Exception {}: {}".format(
exception.__class__.__name__,
str(exception)
))
sys.exit(1)
def select_vcs_repo_class(vcs_configuration):
if vcs_configuration is None:
repo_class = NoVCSRepo
elif vcs_configuration.name == 'git':
repo_class = GitRepo
elif vcs_configuration.name == 'git-flow':
repo_class = GitFlowRepo
elif vcs_configuration.name == 'hg':
repo_class = HgRepo
else:
fatal_error(
"The requested version control" +
" system {} is not supported.".format(
vcs_configuration.name
)
)
return repo_class
default_config_file_name = "punch_config.py"
default_config_file_content = """__config_version__ = 1
GLOBALS = {
'serializer': '{{major}}.{{minor}}.{{patch}}',
}
FILES = []
VERSION = ['major', 'minor', 'patch']
VCS = {
'name': 'git',
'commit_message': (
"Version updated from {{ current_version }}"
" to {{ new_version }}")
}
"""
default_version_file_name = "punch_version.py"
default_version_file_content = """major = 0
minor = 1
patch = 0
"""
default_commit_message = \
"Version update {{ current_version }} -> {{ new_version }}"
def show_version_parts(values):
for p in values:
print("{}={}".format(p.name, p.value))
def show_version_updates(version_changes):
for current, new in version_changes.values():
print(" - {} -> {}".format(current, new))
def init_config_files():
if not os.path.exists(default_config_file_name):
with open(default_config_file_name, 'w') as f:
f.write(default_config_file_content)
if not os.path.exists(default_version_file_name):
with open(default_version_file_name, 'w') as f:
f.write(default_version_file_content)
def args_initialize(args):
if args.version is True:
print("Punch version {}".format(punch.__version__))
print("Copyright (C) 2016 Leonardo Giordani")
print("This is free software, see the LICENSE file.")
print("Source: https://github.com/lgiordani/punch")
print("Documentation: http://punch.readthedocs.io/en/latest/")
sys.exit(0)
if args.init is True:
init_config_files()
sys.exit(0)
if args.simulate:
args.verbose = True
def args_check_options(args):
if not any([args.part, args.set_part, args.action]):
fatal_error("You must specify one of --part, --set-part, or --action")
set_options = [
i is not None for i in [args.part, args.set_part, args.action]
]
if sum(set_options) > 1:
fatal_error(
"You can only specify one of --part, --set-part, or --action")
if args.set_part and args.reset_on_set:
set_parts = args.set_part.split(',')
if len(set_parts) > 1:
fatal_error(
"If you specify --reset-on-set you may set only one value"
)
try:
config = cfr.PunchConfig(args.config_file)
except (cfr.ConfigurationVersionError, ValueError) as exc:
fatal_error(
"An error occurred while reading the configuration file.",
exc
)
if len(config.files) == 0:
fatal_error("You didn't configure any file")
if args.part:
args.action = "punch:increase"
args.action_options = "part={}".format(args.part)
elif args.set_part:
args.action = "punch:set"
args.action_options = args.set_part
if args.action and args.action not in config.actions:
fatal_error(
"The requested action {} is not defined.".format(args.action)
)
return config
def create_action(args, config):
action_dict = config.actions[args.action]
try:
action_name = action_dict.pop('type')
except KeyError:
fatal_error("The action configuration is missing the 'type' field.")
if args.action_options:
action_dict.update(hlp.optstr2dict(args.action_options))
action_class = ar.ActionRegister.get(action_name)
action = action_class(action_dict)
return action
def check_release_notes(config, changes):
wrong_release_notes = []
new_versions = dict((n, v[1]) for n, v in changes.items())
for file_name, regex_template in config.release_notes:
template = Template(regex_template)
render = template.render(**new_versions)
with open(file_name, 'r') as f:
content = f.read()
if not re.search(render, content, re.MULTILINE):
wrong_release_notes.append((file_name, regex_template, render))
if len(wrong_release_notes):
print("The following files have been configured to contain "
"release notes, but they don't have an entry that matches "
"the new version that Punch is about to create.")
for file_name, regex_template, render in wrong_release_notes:
print(" *", file_name)
print(" - Template:", regex_template)
print(" - Rendered:", render)
fatal_error(
"Please update the files and commit them if you use a VCS")
def main(original_args=None):
parser = argparse.ArgumentParser(
description="Manages file content with versions."
)
parser.add_argument('-c', '--config-file', action='store',
help="Config file", default=default_config_file_name)
parser.add_argument('-v', '--version-file', action='store',
help="Version file", default=default_version_file_name)
parser.add_argument('-p', '--part', action='store')
parser.add_argument('--set-part', action='store')
parser.add_argument('-a', '--action', action='store')
parser.add_argument('--action-options', action='store')
parser.add_argument('--action-flags', action='store')
parser.add_argument('--reset-on-set', action='store_true')
parser.add_argument('-q', '--quiet', action='store_true',
help='Ignore warnings')
parser.add_argument('--verbose', action='store_true',
help="Be verbose")
parser.add_argument('--version', action='store_true',
help="Print the Punch version and project information")
parser.add_argument(
'--init',
action='store_true',
help="Writes default initialization files" +
" (does not overwrite existing ones)"
)
parser.add_argument(
'-s',
'--simulate',
action='store_true',
help="Simulates the version increment and" +
" prints a summary of the relevant data (implies --verbose)"
)
args = parser.parse_args()
# This is here just to avoid "can be not defined" messages by linters
repo = None
args_initialize(args)
config = args_check_options(args)
if args.verbose:
print("## Punch version {}".format(punch.__version__))
action = create_action(args, config)
current_version = ver.Version.from_file(args.version_file, config.version)
new_version = action.process_version(current_version)
global_replacer = rep.Replacer(config.globals['serializer'])
file_updaters = []
for file_configuration in config.files:
file_replacer = rep.Replacer(config.globals['serializer'])
file_replacer.update(file_configuration.config['serializer'])
file_updaters.append(fu.FileUpdater(file_configuration, file_replacer))
if config.vcs is not None:
try:
current_version_string, new_version_string = \
global_replacer.run_serializer(
config.vcs_serializer,
current_version.as_dict(),
new_version.as_dict()
)
except rep.MissingSerializer:
fatal_error(
"The requested serializer {} has not been declared".format(
config.vcs_serializer
)
)
vcs_configuration = VCSConfiguration.from_dict(
config.vcs,
config.globals,
{
'current_version': current_version_string,
'new_version': new_version_string
}
)
else:
vcs_configuration = None
# Prepare the VCS repository
repo_class = select_vcs_repo_class(vcs_configuration)
# Prepare the files that have been changed by Punch
# Including the version file of Punch itself
files_to_commit = [f.path for f in config.files]
files_to_commit.append(args.version_file)
# Initialise the VCS reposity class
try:
repo = repo_class(os.getcwd(), vcs_configuration, files_to_commit)
except RepositorySystemError as exc:
fatal_error(
("An error occurred while initialising "
"the version control repository"),
exc
)
changes = global_replacer.run_all_serializers(
current_version.as_dict(),
new_version.as_dict()
)
if args.verbose:
print("\n# Current version")
show_version_parts(current_version.values)
print("\n# New version")
show_version_parts(new_version.values)
print("\n# Global version updates")
show_version_updates(changes)
print("\n# Configured files")
for file_updater in file_updaters:
print("+ {}:".format(file_configuration.path))
changes = file_updater.get_summary(
current_version.as_dict(),
new_version.as_dict()
)
show_version_updates(changes)
vcs_info = repo.get_info()
if len(vcs_info) != 0:
print("\n# VCS")
for key, value in vcs_info:
print('+ {}: {}'.format(key, value))
if args.simulate:
sys.exit(0)
check_release_notes(config, changes)
VCSStartReleaseUseCase(repo).execute()
try:
for file_updater in file_updaters:
file_updater.update(
current_version.as_dict(), new_version.as_dict()
)
except ValueError as e:
if not args.quiet:
print("Warning:", e)
# Write the updated version info to the version file.
new_version.to_file(args.version_file)
VCSFinishReleaseUseCase(repo).execute()
| |
#!/usr/bin/env python
from __future__ import absolute_import, print_function, division
import datetime
import os
import subprocess
import sys
import time
from six.moves import xrange
import six.moves.cPickle as pickle
import theano
from theano.misc.windows import output_subprocess_Popen
__authors__ = "Olivier Delalleau, Eric Larsen"
__contact__ = "delallea@iro"
"""
Run this script to run tests in small batches rather than all at the same time
or to conduct time-profiling.
If no argument is provided, then the whole Theano test-suite is run.
Otherwise, only tests found in the directory given as argument are run.
If 'time_profile=False', this script performs three tasks:
1. Run `nosetests --collect-only --with-id` to collect test IDs
2. Run `nosetests --with-id i1 ... iN` with batches of 'batch_size'
indices, until all tests have been run (currently batch_size=100 by
default).
3. Run `nosetests --failed` to re-run only tests that failed
=> The output of this 3rd step is the one you should care about
If 'time_profile=True', this script conducts time-profiling of the tests:
1. Run `nosetests --collect-only --with-id` to collect test IDs
2. Run `nosetests --with-id i`, one test with ID 'i' at a time, collecting
timing information and displaying progresses on standard output after
every group of 'batch_size' (100 by default), until all tests have
been run.
The results are deposited in the files 'timeprof_sort' and
'timeprof_nosort' in the current directory. Both contain one record for
each test and comprise the following fields:
- test running-time
- nosetests sequential test number
- test name
- name of class to which test belongs (if any), otherwise full
information is contained in test name
- test outcome ('OK', 'SKIPPED TEST', 'FAILED TEST' or 'FAILED PARSING')
In 'timeprof_sort', test records are sorted according to run-time
whereas in 'timeprof_nosort' records are reported according to
sequential number. The former classification is the main information
source for time-profiling. Since tests belonging to same or close
classes and files have close sequential numbers, the latter may be used
to identify duration patterns among the tests. A full log is also saved
as 'timeprof_rawlog'.
One reason to use this script is if you are a Windows user, and see errors like
"Not enough storage is available to process this command" when trying to simply
run `nosetests` in your Theano installation directory. This error is apparently
caused by memory fragmentation: at some point Windows runs out of contiguous
memory to load the C modules compiled by Theano in the test-suite.
By using this script, nosetests is run on a small subset (batch) of tests until
all tests are run. Note that this is slower, in particular because of the
initial cost of importing theano and loading the C module cache on each call of
nosetests.
"""
def main(stdout=None, stderr=None, argv=None, theano_nose=None,
batch_size=None, time_profile=False, display_batch_output=False):
"""
Run tests with optional output redirection.
Parameters stdout and stderr should be file-like objects used to redirect
the output. None uses default sys.stdout and sys.stderr.
If argv is None, then we use arguments from sys.argv, otherwise we use the
provided arguments instead.
If theano_nose is None, then we use the theano-nose script found in
Theano/bin to call nosetests. Otherwise we call the provided script.
If batch_size is None, we use a default value of 100.
If display_batch_output is False, then the output of nosetests during batch
execution is hidden.
"""
if stdout is None:
stdout = sys.stdout
if stderr is None:
stderr = sys.stderr
if argv is None:
argv = sys.argv
if theano_nose is None:
# If Theano is installed with pip/easy_install, it can be in the
# */lib/python2.7/site-packages/theano, but theano-nose in */bin
for i in range(1, 5):
path = theano.__path__[0]
for _ in range(i):
path = os.path.join(path, '..')
path = os.path.join(path, 'bin', 'theano-nose')
if os.path.exists(path):
theano_nose = path
break
if theano_nose is None:
raise Exception("Unable to find theano-nose")
if batch_size is None:
batch_size = 100
stdout_backup = sys.stdout
stderr_backup = sys.stderr
try:
sys.stdout = stdout
sys.stderr = stderr
run(stdout, stderr, argv, theano_nose, batch_size, time_profile,
display_batch_output)
finally:
sys.stdout = stdout_backup
sys.stderr = stderr_backup
def run(stdout, stderr, argv, theano_nose, batch_size, time_profile,
display_batch_output):
# Setting aside current working directory for later saving
sav_dir = os.getcwd()
# The first argument is the called script.
argv = argv[1:]
# It seems safer to fully regenerate the list of tests on each call.
if os.path.isfile('.noseids'):
os.remove('.noseids')
# Collect test IDs.
print("""\
####################
# COLLECTING TESTS #
####################""")
stdout.flush()
stderr.flush()
dummy_in = open(os.devnull)
# We need to call 'python' on Windows, because theano-nose is not a
# native Windows app; and it does not hurt to call it on Unix.
# Using sys.executable, so that the same Python version is used.
python = sys.executable
rval = subprocess.call(
([python, theano_nose, '--collect-only', '--with-id'] + argv),
stdin=dummy_in.fileno(),
stdout=stdout.fileno(),
stderr=stderr.fileno())
stdout.flush()
stderr.flush()
assert rval == 0
noseids_file = '.noseids'
with open(noseids_file, 'rb') as f:
data = pickle.load(f)
ids = data['ids']
n_tests = len(ids)
if n_tests == 0:
raise Exception("0 test selected")
assert n_tests == max(ids)
# Standard batch testing is called for
if not time_profile:
failed = set()
print("""\
###################################
# RUNNING TESTS IN BATCHES OF %s #
###################################""" % batch_size)
# When `display_batch_output` is False, we suppress all output because
# we want the user to focus only on the failed tests, which are re-run
# (with output) below.
dummy_out = open(os.devnull, 'w')
for test_id in xrange(1, n_tests + 1, batch_size):
stdout.flush()
stderr.flush()
test_range = list(range(test_id,
min(test_id + batch_size, n_tests + 1)))
cmd = ([python, theano_nose, '--with-id'] +
list(map(str, test_range)) +
argv)
subprocess_extra_args = dict(stdin=dummy_in.fileno())
if not display_batch_output:
# Use quiet mode in nosetests.
cmd.append('-q')
# Suppress all output.
subprocess_extra_args.update(dict(
stdout=dummy_out.fileno(),
stderr=dummy_out.fileno()))
t0 = time.time()
subprocess.call(cmd, **subprocess_extra_args)
t1 = time.time()
# Recover failed test indices from the 'failed' field of the
# '.noseids' file. We need to do it after each batch because
# otherwise this field may get erased. We use a set because it
# seems like it is not systematically erased though, and we want
# to avoid duplicates.
with open(noseids_file, 'rb') as f:
failed = failed.union(pickle.load(f)['failed'])
print('%s%% done in %.3fs (failed: %s)' % (
(test_range[-1] * 100) // n_tests, t1 - t0, len(failed)))
# Sort for cosmetic purpose only.
failed = sorted(failed)
if failed:
# Re-run only failed tests
print("""\
################################
# RE-RUNNING FAILED TESTS ONLY #
################################""")
stdout.flush()
stderr.flush()
subprocess.call(
([python, theano_nose, '-v', '--with-id'] + failed + argv),
stdin=dummy_in.fileno(),
stdout=stdout.fileno(),
stderr=stderr.fileno())
stdout.flush()
stderr.flush()
return 0
else:
print("""\
####################
# ALL TESTS PASSED #
####################""")
# Time-profiling is called for
else:
print("""\
########################################
# RUNNING TESTS IN TIME-PROFILING MODE #
########################################""")
# finds first word of list l containing string s
def getIndexOfFirst(l, s):
for pos, word in enumerate(l):
if s in word:
return pos
# finds last word of list l containing string s
def getIndexOfLast(l, s):
for pos, word in enumerate(reversed(l)):
if s in word:
return (len(l) - pos - 1)
# iterating through tests
# initializing master profiling list and raw log
prof_master_nosort = []
dummy_out = open(os.devnull, 'w')
path_rawlog = os.path.join(sav_dir, 'timeprof_rawlog')
stamp = str(datetime.datetime.now()) + '\n\n'
f_rawlog = open(path_rawlog, 'w')
f_rawlog.write('TIME-PROFILING OF THEANO\'S NOSETESTS'
' (raw log)\n\n' + stamp)
f_rawlog.flush()
stamp = str(datetime.datetime.now()) + '\n\n'
fields = ('Fields: computation time; nosetests sequential id;'
' test name; parent class (if any); outcome\n\n')
path_nosort = os.path.join(sav_dir, 'timeprof_nosort')
# probably this part can be extracted for function with many args
with open(path_nosort, 'w') as f_nosort:
# begin of saving nosort
f_nosort.write('TIME-PROFILING OF THEANO\'S NOSETESTS'
' (by sequential id)\n\n' + stamp + fields)
f_nosort.flush()
for test_floor in xrange(1, n_tests + 1, batch_size):
for test_id in xrange(test_floor, min(test_floor + batch_size,
n_tests + 1)):
# Print the test we will start in the raw log to help
# debug tests that are too long.
f_rawlog.write("\n%s Will run test #%d %s\n" % (
time.ctime(), test_id, data["ids"][test_id]))
f_rawlog.flush()
p_out = output_subprocess_Popen(([python, theano_nose, '-v', '--with-id'] +
[str(test_id)] +
argv +
['--disabdocstring']))
# the previous option calls a custom Nosetests plugin
# precluding automatic sustitution of doc. string for
# test name in display
# (see class 'DisabDocString' in file theano-nose)
# recovering and processing data from pipe
err = p_out[1]
# print the raw log
f_rawlog.write(err)
f_rawlog.flush()
# parsing the output
l_err = err.split()
try:
pos_id = getIndexOfFirst(l_err, '#')
prof_id = l_err[pos_id]
pos_dot = getIndexOfFirst(l_err, '...')
prof_test = ''
for s in l_err[pos_id + 1: pos_dot]:
prof_test += s + ' '
if 'OK' in err:
pos_ok = getIndexOfLast(l_err, 'OK')
if len(l_err) == pos_ok + 1:
prof_time = float(l_err[pos_ok - 1][0:-1])
prof_pass = 'OK'
elif 'SKIP' in l_err[pos_ok + 1]:
prof_time = 0.
prof_pass = 'SKIPPED TEST'
elif 'KNOWNFAIL' in l_err[pos_ok + 1]:
prof_time = float(l_err[pos_ok - 1][0:-1])
prof_pass = 'OK'
else:
prof_time = 0.
prof_pass = 'FAILED TEST'
else:
prof_time = 0.
prof_pass = 'FAILED TEST'
except Exception:
prof_time = 0
prof_id = '#' + str(test_id)
prof_test = ('FAILED PARSING, see raw log for details'
' on test')
prof_pass = ''
prof_tuple = (prof_time, prof_id, prof_test, prof_pass)
# appending tuple to master list
prof_master_nosort.append(prof_tuple)
# write the no sort file
s_nosort = ((str(prof_tuple[0]) + 's').ljust(10) +
" " + prof_tuple[1].ljust(7) + " " +
prof_tuple[2] + prof_tuple[3] +
"\n")
f_nosort.write(s_nosort)
f_nosort.flush()
print('%s%% time-profiled' % ((test_id * 100) // n_tests))
f_rawlog.close()
# sorting tests according to running-time
prof_master_sort = sorted(prof_master_nosort,
key=lambda test: test[0], reverse=True)
# saving results to readable files
path_sort = os.path.join(sav_dir, 'timeprof_sort')
with open(path_sort, 'w') as f_sort:
f_sort.write('TIME-PROFILING OF THEANO\'S NOSETESTS'
' (sorted by computation time)\n\n' + stamp + fields)
for i in xrange(len(prof_master_nosort)):
s_sort = ((str(prof_master_sort[i][0]) + 's').ljust(10) +
" " + prof_master_sort[i][1].ljust(7) + " " +
prof_master_sort[i][2] + prof_master_sort[i][3] +
"\n")
f_sort.write(s_sort)
# end of saving nosort
if __name__ == '__main__':
sys.exit(main())
| |
from app.data_model.answer_store import AnswerStore
from app.questionnaire.location import Location
from app.questionnaire.questionnaire_schema import QuestionnaireSchema
from app.submitter.convert_payload_0_0_1 import convert_answers_to_payload_0_0_1
from app.submitter.converter import convert_answers
from tests.app.submitter.schema import make_schema
from tests.app.submitter.test_converter import TestConverter, create_answer
class TestConvertPayload001(TestConverter): # pylint: disable=too-many-public-methods
def test_convert_answers_to_payload_0_0_1_with_key_error(self):
with self._app.test_request_context():
user_answer = [create_answer('ABC', '2016-01-01', group_id='group-1', block_id='block-1'),
create_answer('DEF', '2016-03-30', group_id='group-1', block_id='block-1'),
create_answer('GHI', '2016-05-30', group_id='group-1', block_id='block-1')]
questionnaire = make_schema('0.0.1', 'section-1', 'group-1', 'block-1', [
{
'id': 'question-1',
'answers': [
{
'id': 'LMN',
'type': 'TextField',
'q_code': '001'
},
{
'id': 'DEF',
'type': 'TextField',
'q_code': '002'
},
{
'id': 'JKL',
'type': 'TextField',
'q_code': '003'
}
]
}
])
routing_path = [Location(group_id='group-1', group_instance=0, block_id='block-1')]
answer_object = (convert_answers_to_payload_0_0_1(AnswerStore(user_answer), QuestionnaireSchema(questionnaire), routing_path))
self.assertEqual(answer_object['002'], '2016-03-30')
self.assertEqual(len(answer_object), 1)
def test_answer_with_zero(self):
with self._app.test_request_context():
user_answer = [create_answer('GHI', 0, group_id='group-1', block_id='block-1')]
questionnaire = make_schema('0.0.1', 'section-1', 'group-1', 'block-1', [
{
'id': 'question-2',
'answers': [
{
'id': 'GHI',
'type': 'TextField',
'q_code': '003'
}
]
}
])
routing_path = [Location(group_id='group-1', group_instance=0, block_id='block-1')]
answer_object = convert_answers(self.metadata, self.collection_metadata, QuestionnaireSchema(questionnaire), AnswerStore(user_answer), routing_path)
# Check the converter correctly
self.assertEqual('0', answer_object['data']['003'])
def test_answer_with_float(self):
with self._app.test_request_context():
user_answer = [create_answer('GHI', 10.02, group_id='group-1', block_id='block-1')]
questionnaire = make_schema('0.0.1', 'section-1', 'group-1', 'block-1', [
{
'id': 'question-2',
'answers': [
{
'id': 'GHI',
'type': 'TextField',
'q_code': '003'
}
]
}
])
routing_path = [Location(group_id='group-1', group_instance=0, block_id='block-1')]
answer_object = convert_answers(self.metadata, self.collection_metadata, QuestionnaireSchema(questionnaire), AnswerStore(user_answer), routing_path)
# Check the converter correctly
self.assertEqual('10.02', answer_object['data']['003'])
def test_answer_with_string(self):
with self._app.test_request_context():
user_answer = [create_answer('GHI', 'String test + !', group_id='group-1', block_id='block-1')]
questionnaire = make_schema('0.0.1', 'section-1', 'group-1', 'block-1', [
{
'id': 'question-2',
'answers': [
{
'id': 'GHI',
'type': 'TextField',
'q_code': '003'
}
]
}
])
routing_path = [Location(group_id='group-1', group_instance=0, block_id='block-1')]
answer_object = convert_answers(self.metadata, self.collection_metadata, QuestionnaireSchema(questionnaire), AnswerStore(user_answer), routing_path)
# Check the converter correctly
self.assertEqual('String test + !', answer_object['data']['003'])
def test_answer_with_multiple_instances(self):
with self._app.test_request_context():
user_answer = [create_answer('GHI', 0, group_id='group-1', block_id='block-1'),
create_answer('GHI', value=1, answer_instance=1, group_id='group-1', block_id='block-1'),
create_answer('GHI', value=2, answer_instance=2, group_id='group-1', block_id='block-1')]
questionnaire = make_schema('0.0.1', 'section-1', 'group-1', 'block-1', [
{
'id': 'question-2',
'answers': [
{
'id': 'GHI',
'type': 'TextField',
'q_code': '003'
}
]
}
])
routing_path = [Location(group_id='group-1', group_instance=0, block_id='block-1')]
answer_object = convert_answers(self.metadata, self.collection_metadata, QuestionnaireSchema(questionnaire), AnswerStore(user_answer), routing_path)
# Check the converter correctly
self.assertEqual(answer_object['data']['003'], ['0', '1', '2'])
def test_answer_without_qcode(self):
with self._app.test_request_context():
user_answer = [create_answer('GHI', 'String test + !', group_id='group-1', block_id='block-1')]
questionnaire = make_schema('0.0.1', 'section-1', 'group-1', 'block-1', [
{
'id': 'question-2',
'answers': [
{
'id': 'GHI',
'type': 'TextField'
}
]
}
])
routing_path = [Location(group_id='group-1', group_instance=0, block_id='block-1')]
answer_object = convert_answers(self.metadata, self.collection_metadata, QuestionnaireSchema(questionnaire), AnswerStore(user_answer), routing_path)
self.assertEqual(len(answer_object['data']), 0)
def test_get_checkbox_answer_with_duplicate_detail_answer_ids(self):
with self._app.test_request_context():
routing_path = [Location(group_id='favourite-food', group_instance=0, block_id='crisps')]
answers = [create_answer('crisps-answer', [
'Ready salted',
'Other'
], group_id='favourite-food', block_id='crisps')]
answers += [create_answer('other-answer-mandatory', 'Other', group_id='favourite-food', block_id='crisps',
group_instance=1)]
answers += [create_answer('other-answer-mandatory', 'Other', group_id='favourite-food', block_id='crisps',
group_instance=1)]
questionnaire = make_schema('0.0.1', 'section-1', 'favourite-food', 'crisps', [
{
'id': 'crisps-question',
'answers': [
{
'id': 'crisps-answer',
'type': 'Checkbox',
'options': [
{
'label': 'Other',
'q_code': '4',
'description': 'Choose any other flavour',
'value': 'Other',
'detail_answer': {'id': 'other-answer-mandatory'}
}
]
}
]
}
])
with self.assertRaises(Exception) as err:
convert_answers(self.metadata, self.collection_metadata, QuestionnaireSchema(questionnaire), AnswerStore(answers), routing_path)
self.assertEqual('Multiple answers found for {}'.format('other-answer-mandatory'), str(err.exception))
def test_converter_checkboxes_with_q_codes(self):
with self._app.test_request_context():
routing_path = [Location(group_id='favourite-food', group_instance=0, block_id='crisps')]
answers = [create_answer('crisps-answer', [
'Ready salted',
'Sweet chilli'
], group_id='favourite-food', block_id='crisps')]
questionnaire = make_schema('0.0.1', 'section-1', 'favourite-food', 'crisps', [
{
'id': 'crisps-question',
'answers': [
{
'id': 'crisps-answer',
'type': 'Checkbox',
'options': [
{
'label': 'Ready salted',
'value': 'Ready salted',
'q_code': '1'
},
{
'label': 'Sweet chilli',
'value': 'Sweet chilli',
'q_code': '2'
},
{
'label': 'Cheese and onion',
'value': 'Cheese and onion',
'q_code': '3'
},
{
'label': 'Other',
'q_code': '4',
'description': 'Choose any other flavour',
'value': 'Other',
'detail_answer': {
'mandatory': True,
'id': 'other-answer-mandatory',
'label': 'Please specify other',
'type': 'TextField'
}
}
]
}
]
}
])
# When
answer_object = convert_answers(self.metadata, self.collection_metadata, QuestionnaireSchema(questionnaire), AnswerStore(answers), routing_path)
# Then
self.assertEqual(len(answer_object['data']), 2)
self.assertEqual(answer_object['data']['1'], 'Ready salted')
self.assertEqual(answer_object['data']['2'], 'Sweet chilli')
def test_converter_checkboxes_with_q_codes_and_other_value(self):
with self._app.test_request_context():
routing_path = [Location(group_id='favourite-food', group_instance=0, block_id='crisps')]
answers = [create_answer('crisps-answer', [
'Ready salted',
'Other'
], group_id='favourite-food', block_id='crisps')]
answers += [create_answer('other-answer-mandatory', 'Bacon', group_id='favourite-food', block_id='crisps')]
questionnaire = make_schema('0.0.1', 'section-1', 'favourite-food', 'crisps', [
{
'id': 'crisps-question',
'answers': [
{
'id': 'crisps-answer',
'type': 'Checkbox',
'options': [
{
'label': 'Ready salted',
'value': 'Ready salted',
'q_code': '1'
},
{
'label': 'Sweet chilli',
'value': 'Sweet chilli',
'q_code': '2'
},
{
'label': 'Cheese and onion',
'value': 'Cheese and onion',
'q_code': '3'
},
{
'label': 'Other',
'q_code': '4',
'description': 'Choose any other flavour',
'value': 'Other',
'detail_answer': {
'mandatory': True,
'id': 'other-answer-mandatory',
'label': 'Please specify other',
'type': 'TextField'
}
}
]
}
]
}
])
# When
answer_object = convert_answers(self.metadata, self.collection_metadata, QuestionnaireSchema(questionnaire), AnswerStore(answers), routing_path)
# Then
self.assertEqual(len(answer_object['data']), 2)
self.assertEqual(answer_object['data']['1'], 'Ready salted')
self.assertEqual(answer_object['data']['4'], 'Bacon')
def test_converter_checkboxes_with_q_codes_and_empty_other_value(self):
with self._app.test_request_context():
routing_path = [Location(group_id='favourite-food', group_instance=0, block_id='crisps')]
answers = [create_answer('crisps-answer', [
'Ready salted',
'Other'
], group_id='favourite-food', block_id='crisps')]
answers += [create_answer('other-answer-mandatory', '', group_id='favourite-food', block_id='crisps')]
questionnaire = make_schema('0.0.1', 'section-1', 'favourite-food', 'crisps', [
{
'id': 'crisps-question',
'answers': [
{
'id': 'crisps-answer',
'type': 'Checkbox',
'options': [
{
'label': 'Ready salted',
'value': 'Ready salted',
'q_code': '1'
},
{
'label': 'Sweet chilli',
'value': 'Sweet chilli',
'q_code': '2'
},
{
'label': 'Cheese and onion',
'value': 'Cheese and onion',
'q_code': '3'
},
{
'label': 'Other',
'q_code': '4',
'description': 'Choose any other flavour',
'value': 'Other',
'detail_answer': {
'mandatory': True,
'id': 'other-answer-mandatory',
'label': 'Please specify other',
'type': 'TextField'
}
}
]
}
]
}
])
# When
answer_object = convert_answers(self.metadata, self.collection_metadata, QuestionnaireSchema(questionnaire), AnswerStore(answers), routing_path)
# Then
self.assertEqual(len(answer_object['data']), 2)
self.assertEqual(answer_object['data']['1'], 'Ready salted')
self.assertEqual(answer_object['data']['4'], 'Other')
def test_converter_q_codes_for_empty_strings(self):
with self._app.test_request_context():
routing_path = [Location(group_id='favourite-food', group_instance=0, block_id='crisps')]
answers = [create_answer('crisps-answer', '', group_id='favourite-food', block_id='crisps')]
answers += [
create_answer('other-crisps-answer', 'Ready salted', group_id='favourite-food', block_id='crisps')]
questionnaire = make_schema('0.0.1', 'section-1', 'favourite-food', 'crisps', [
{
'id': 'crisps-question',
'answers': [
{
'id': 'crisps-answer',
'type': 'TextArea',
'options': [],
'q_code': '1'
},
{
'id': 'other-crisps-answer',
'type': 'TextArea',
'options': [],
'q_code': '2'
}
]
}
])
# When
answer_object = convert_answers(self.metadata, self.collection_metadata, QuestionnaireSchema(questionnaire), AnswerStore(answers), routing_path)
# Then
self.assertEqual(len(answer_object['data']), 1)
self.assertEqual(answer_object['data']['2'], 'Ready salted')
def test_radio_answer(self):
with self._app.test_request_context():
routing_path = [Location(group_id='radio-group', group_instance=0, block_id='radio-block')]
user_answer = [create_answer('radio-answer', 'Coffee', group_id='radio-group', block_id='radio-block')]
questionnaire = make_schema('0.0.1', 'section-1', 'radio-block', 'radio-block', [
{
'id': 'radio-question',
'answers': [
{
'type': 'Radio',
'id': 'radio-answer',
'q_code': '1',
'options': [
{
'label': 'Coffee',
'value': 'Coffee'
},
{
'label': 'Tea',
'value': 'Tea'
}
]
}
]
}
])
# When
answer_object = convert_answers(self.metadata, self.collection_metadata, QuestionnaireSchema(questionnaire), AnswerStore(user_answer), routing_path)
# Then
self.assertEqual(len(answer_object['data']), 1)
self.assertEqual(answer_object['data']['1'], 'Coffee')
def test_number_answer(self):
with self._app.test_request_context():
routing_path = [Location(group_id='number-group', group_instance=0, block_id='number-block')]
user_answer = [create_answer('number-answer', 0.9999, group_id='number-block', block_id='number-block')]
questionnaire = make_schema('0.0.1', 'section-1', 'number-block', 'number-block', [
{
'id': 'number-question',
'answers': [
{
'id': 'number-answer',
'type': 'Number',
'q_code': '1'
}
]
}
])
# When
answer_object = convert_answers(self.metadata, self.collection_metadata, QuestionnaireSchema(questionnaire), AnswerStore(user_answer), routing_path)
# Then
self.assertEqual(len(answer_object['data']), 1)
self.assertEqual(answer_object['data']['1'], '0.9999')
def test_percentage_answer(self):
with self._app.test_request_context():
routing_path = [Location(group_id='percentage-group', group_instance=0, block_id='percentage-block')]
user_answer = [create_answer('percentage-answer', 100, group_id='percentage-group', block_id='percentage-block')]
questionnaire = make_schema('0.0.1', 'section-1', 'percentage-block', 'percentage-block', [
{
'id': 'percentage-question',
'answers': [
{
'id': 'percentage-answer',
'type': 'Percentage',
'q_code': '1'
}
]
}
])
# When
answer_object = convert_answers(self.metadata, self.collection_metadata, QuestionnaireSchema(questionnaire), AnswerStore(user_answer), routing_path)
# Then
self.assertEqual(len(answer_object['data']), 1)
self.assertEqual(answer_object['data']['1'], '100')
def test_textarea_answer(self):
with self._app.test_request_context():
routing_path = [Location(group_id='textarea-group', group_instance=0, block_id='textarea-block')]
user_answer = [create_answer('textarea-answer', 'example text.', group_id='textarea-group', block_id='textarea-block')]
questionnaire = make_schema('0.0.1', 'section-1', 'textarea-block', 'textarea-block', [
{
'id': 'textarea-question',
'answers': [
{
'id': 'textarea-answer',
'q_code': '1',
'type': 'TextArea'
}
]
}
])
# When
answer_object = convert_answers(self.metadata, self.collection_metadata, QuestionnaireSchema(questionnaire), AnswerStore(user_answer), routing_path)
# Then
self.assertEqual(len(answer_object['data']), 1)
self.assertEqual(answer_object['data']['1'], 'example text.')
def test_currency_answer(self):
with self._app.test_request_context():
routing_path = [Location(group_id='currency-group', group_instance=0, block_id='currency-block')]
user_answer = [create_answer('currency-answer', 99.99, group_id='currency-group', block_id='currency-block')]
questionnaire = make_schema('0.0.1', 'section-1', 'currency-block', 'currency-block', [
{
'id': 'currency-question',
'answers': [
{
'id': 'currency-answer',
'type': 'Currency',
'q_code': '1'
}
]
}
])
# When
answer_object = convert_answers(self.metadata, self.collection_metadata, QuestionnaireSchema(questionnaire), AnswerStore(user_answer), routing_path)
# Then
self.assertEqual(len(answer_object['data']), 1)
self.assertEqual(answer_object['data']['1'], '99.99')
def test_dropdown_answer(self):
with self._app.test_request_context():
routing_path = [Location(group_id='dropdown-group', group_instance=0, block_id='dropdown-block')]
user_answer = [create_answer('dropdown-answer', 'Liverpool', group_id='dropdown-group', block_id='dropdown-block')]
questionnaire = make_schema('0.0.1', 'section-1', 'dropdown-block', 'dropdown-block', [
{
'id': 'dropdown-question',
'answers': [
{
'id': 'dropdown-answer',
'type': 'Dropdown',
'q_code': '1',
'options': [
{
'label': 'Liverpool',
'value': 'Liverpool'
},
{
'label': 'Chelsea',
'value': 'Chelsea'
},
{
'label': 'Rugby is better!',
'value': 'Rugby is better!'
}
]
}
]
}
])
# When
answer_object = convert_answers(self.metadata, self.collection_metadata, QuestionnaireSchema(questionnaire), AnswerStore(user_answer), routing_path)
# Then
self.assertEqual(len(answer_object['data']), 1)
self.assertEqual(answer_object['data']['1'], 'Liverpool')
def test_date_answer(self):
with self._app.test_request_context():
routing_path = [Location(group_id='date-group', group_instance=0, block_id='date-block')]
user_answer = [create_answer('single-date-answer', '1990-02-01', group_id='date-group', block_id='date-block'),
create_answer('month-year-answer', '1990-01', group_id='date-group', block_id='date-block')]
questionnaire = make_schema('0.0.1', 'section-1', 'date-block', 'date-block', [
{
'id': 'single-date-question',
'answers': [
{
'id': 'single-date-answer',
'type': 'Date',
'q_code': '1'
}
]
},
{
'id': 'month-year-question',
'answers': [
{
'id': 'month-year-answer',
'type': 'MonthYearDate',
'q_code': '2'
}
]
}
])
# When
answer_object = convert_answers(self.metadata, self.collection_metadata, QuestionnaireSchema(questionnaire), AnswerStore(user_answer), routing_path)
# Then
self.assertEqual(len(answer_object['data']), 2)
self.assertEqual(answer_object['data']['1'], '01/02/1990')
self.assertEqual(answer_object['data']['2'], '01/1990')
def test_unit_answer(self):
with self._app.test_request_context():
routing_path = [Location(group_id='unit-group', group_instance=0, block_id='unit-block')]
user_answer = [create_answer('unit-answer', 10, group_id='unit-group', block_id='unit-block')]
questionnaire = make_schema('0.0.1', 'section-1', 'unit-block', 'unit-block', [
{
'id': 'unit-question',
'answers': [
{
'id': 'unit-answer',
'type': 'Unit',
'q_code': '1'
}
]
}
])
# When
answer_object = convert_answers(self.metadata, self.collection_metadata, QuestionnaireSchema(questionnaire), AnswerStore(user_answer), routing_path)
# Then
self.assertEqual(len(answer_object['data']), 1)
self.assertEqual(answer_object['data']['1'], '10')
def test_relationship_answer(self):
with self._app.test_request_context():
routing_path = [Location(group_id='relationship-group', group_instance=0, block_id='relationship-block')]
user_answer = [create_answer('relationship-answer', 'Unrelated', group_id='relationship-group', block_id='relationship-block'),
create_answer('relationship-answer', 'Partner', group_id='relationship-group', block_id='relationship-block',
answer_instance=1),
create_answer('relationship-answer', 'Husband or wife', group_id='relationship-group', block_id='relationship-block',
answer_instance=2)]
questionnaire = make_schema('0.0.1', 'section-1', 'relationship-block', 'relationship-block', [
{
'id': 'relationship-question',
'type': 'Relationship',
'answers': [
{
'id': 'relationship-answer',
'q_code': '1',
'type': 'Relationship',
'options': [
{
'label': 'Husband or wife',
'value': 'Husband or wife'
},
{
'label': 'Partner',
'value': 'Partner'
},
{
'label': 'Unrelated',
'value': 'Unrelated'
}
]
}
]
}
])
# When
answer_object = convert_answers(self.metadata, self.collection_metadata, QuestionnaireSchema(questionnaire), AnswerStore(user_answer), routing_path)
# Then
self.assertEqual(len(answer_object['data']), 1)
self.assertEqual(answer_object['data']['1'], ['Unrelated', 'Partner', 'Husband or wife'])
| |
"""
${NAME}
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import sys
import collections
import itertools
from OpenGL import GL
import numpy
from mcedit2.rendering.layers import Layer
from mcedit2.rendering import chunkupdate
from mcedit2.rendering.scenegraph import scenenode
from mcedit2.rendering import renderstates
from mcedit2.rendering.chunknode import ChunkNode, ChunkGroupNode
from mcedit2.rendering.chunkupdate import ChunkRenderInfo
from mcedit2.rendering.depths import DepthOffset
from mcedit2.rendering.geometrycache import GeometryCache
from mcedit2.rendering.scenegraph.depth_test import DepthOffsetNode
from mcedit2.rendering.scenegraph.texture_atlas import TextureAtlasNode
from mcedit2.util.glutils import Texture
from mcedit2.util.load_png import loadPNGData
from mceditlib.anvil.biome_types import BiomeTypes
log = logging.getLogger(__name__)
def layerProperty(layer, default=True):
attr = intern(str("_draw" + layer))
def _get(self):
return getattr(self, attr, default)
def _set(self, val):
if val != _get(self):
setattr(self, attr, val)
self.toggleLayer(val, layer)
return property(_get, _set)
DEBUG_WORLDMESH_LISTS = "-debuglists" in sys.argv
class SceneUpdateTask(object):
showRedraw = True
showHiddenOres = False
showChunkRedraw = True
spaceHeight = 64
targetFPS = 30
def __init__(self, worldScene, textureAtlas, bounds=None):
"""
:type worldScene: WorldScene
:type bounds: BoundingBox
:type textureAtlas: TextureAtlas
"""
self.worldScene = worldScene
self.render = True
self.rotation = 0
self.alpha = 255
self.textureAtlas = textureAtlas
self.mapTextures = {}
self.modelTextures = {}
self.renderType = numpy.zeros((256*256,), 'uint8')
self.renderType[:] = 3
for block in self.worldScene.dimension.blocktypes:
self.renderType[block.ID] = block.renderType
biomeTypes = BiomeTypes()
self.biomeRain = numpy.zeros((256*256,), numpy.float32)
self.biomeTemp = numpy.zeros((256*256,), numpy.float32)
for biome in biomeTypes.types.itervalues():
self.biomeRain[biome.ID] = biome.rainfall
self.biomeTemp[biome.ID] = biome.temperature
overheadMode = False
maxWorkFactor = 64
minWorkFactor = 1
workFactor = 2
def wantsChunk(self, cPos):
chunkInfo = self.worldScene.chunkRenderInfo.get(cPos)
if chunkInfo is None:
return True
return chunkInfo.layersToRender
def workOnChunk(self, chunk, visibleSections=None):
work = 0
cPos = chunk.chunkPosition
log.debug("Working on chunk %s sections %s", cPos, visibleSections)
chunkInfo = self.worldScene.getChunkRenderInfo(cPos)
chunkInfo.visibleSections = visibleSections # currently unused
try:
chunkUpdate = chunkupdate.ChunkUpdate(self, chunkInfo, chunk)
for _ in chunkUpdate:
work += 1
if (work % SceneUpdateTask.workFactor) == 0:
yield
meshesByRS = collections.defaultdict(list)
for mesh in chunkUpdate.blockMeshes:
meshesByRS[mesh.renderstate].append(mesh)
# Create one ChunkNode for each renderstate group, if needed
for renderstate in renderstates.allRenderstates:
groupNode = self.worldScene.getRenderstateGroup(renderstate)
if groupNode.containsChunkNode(cPos):
chunkNode = groupNode.getChunkNode(cPos)
else:
chunkNode = ChunkNode(cPos)
groupNode.addChunkNode(chunkNode)
meshes = meshesByRS[renderstate]
if len(meshes):
meshes = sorted(meshes, key=lambda m: m.layer)
log.debug("Updating chunk node for renderstate %s, mesh count %d", renderstate, len(meshes))
for layer, layerMeshes in itertools.groupby(meshes, lambda m: m.layer):
if layer not in self.worldScene.visibleLayers:
continue
layerMeshes = list(layerMeshes)
# Check if the mesh was re-rendered and remove the old mesh
meshTypes = set(type(m) for m in layerMeshes)
for arrayNode in list(chunkNode.children):
if arrayNode.meshType in meshTypes:
chunkNode.removeChild(arrayNode)
# Add the scene nodes created by each mesh builder
for mesh in layerMeshes:
if mesh.sceneNode:
mesh.sceneNode.layerName = layer
mesh.sceneNode.meshType = type(mesh)
chunkNode.addChild(mesh.sceneNode)
chunkInfo.renderedLayers.add(layer)
if chunkNode.childCount() == 0:
groupNode.discardChunkNode(*cPos)
except Exception as e:
log.exception(u"Rendering chunk %s failed: %r", cPos, e)
def chunkNotPresent(self, (cx, cz)):
# Assume chunk was deleted by the user
for renderstate in renderstates.allRenderstates:
groupNode = self.worldScene.getRenderstateGroup(renderstate)
groupNode.discardChunkNode(cx, cz)
def getMapTexture(self, mapID):
def _loadFunc(colors):
def _load():
w = colors.shape[1]
h = colors.shape[0]
GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, GL.GL_RGBA, w, h, 0, GL.GL_RGBA, GL.GL_UNSIGNED_BYTE, colors.ravel())
return _load
if mapID in self.mapTextures:
return self.mapTextures[mapID]
try:
mapData = self.worldScene.dimension.worldEditor.getMap(mapID)
except Exception as e:
log.exception("Map %s could not be loaded (while loading GL texture)", mapID)
else:
colors = mapData.getColorsAsRGBA()
mapTex = Texture(_loadFunc(colors))
self.mapTextures[mapID] = mapTex
return mapTex
def getModelTexture(self, texturePath):
if texturePath in self.modelTextures:
return self.modelTextures[texturePath]
try:
w, h, rgba = loadPNGData(self.textureAtlas.resourceLoader.openStream(texturePath).read())
except Exception as e:
log.exception("Model texture %s could not be loaded", texturePath)
else:
def _load():
GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, GL.GL_RGBA, w, h, 0, GL.GL_RGBA, GL.GL_UNSIGNED_BYTE, rgba[::-1])
modelTex = Texture(_load)
self.modelTextures[texturePath] = modelTex
return modelTex
class WorldScene(scenenode.Node):
def __init__(self, dimension, textureAtlas=None, geometryCache=None, bounds=None):
super(WorldScene, self).__init__()
self.dimension = dimension
self.textureAtlas = textureAtlas
self.depthOffsetNode = DepthOffsetNode(DepthOffset.Renderer)
self.addChild(self.depthOffsetNode)
self.textureAtlasNode = TextureAtlasNode(textureAtlas)
self.depthOffsetNode.addChild(self.textureAtlasNode)
self.renderstateNodes = {}
for rsClass in renderstates.allRenderstates:
rsNode = scenenode.RenderstateNode(rsClass)
self.textureAtlasNode.addChild(rsNode)
self.renderstateNodes[rsClass] = rsNode
self.groupNodes = {} # by renderstate
self.chunkRenderInfo = {}
self.visibleLayers = set(Layer.DefaultVisibleLayers)
self.updateTask = SceneUpdateTask(self, textureAtlas, bounds)
if geometryCache is None:
geometryCache = GeometryCache()
self.geometryCache = geometryCache
self.showRedraw = False
self.minlod = 0
self.bounds = bounds
def setTextureAtlas(self, textureAtlas):
if textureAtlas is not self.textureAtlas:
self.textureAtlas = textureAtlas
self.textureAtlasNode.textureAtlas = textureAtlas
self.updateTask.textureAtlas = textureAtlas
self.discardAllChunks()
def chunkPositions(self):
return self.chunkRenderInfo.iterkeys()
def getRenderstateGroup(self, rsClass):
groupNode = self.groupNodes.get(rsClass)
if groupNode is None:
groupNode = ChunkGroupNode()
self.groupNodes[rsClass] = groupNode
self.renderstateNodes[rsClass].addChild(groupNode)
return groupNode
def discardChunk(self, cx, cz):
"""
Discard the chunk at the given position from the scene
"""
for groupNode in self.groupNodes.itervalues():
groupNode.discardChunkNode(cx, cz)
self.chunkRenderInfo.pop((cx, cz), None)
def discardChunks(self, chunks):
for cx, cz in chunks:
self.discardChunk(cx, cz)
def discardAllChunks(self):
for groupNode in self.groupNodes.itervalues():
groupNode.clear()
self.chunkRenderInfo.clear()
def invalidateChunk(self, cx, cz, invalidLayers=None):
"""
Mark the chunk for regenerating vertex data
"""
if invalidLayers is None:
invalidLayers = Layer.AllLayers
node = self.chunkRenderInfo.get((cx, cz))
if node:
node.invalidLayers.update(invalidLayers)
_fastLeaves = False
@property
def fastLeaves(self):
return self._fastLeaves
@fastLeaves.setter
def fastLeaves(self, val):
if self._fastLeaves != bool(val):
self.discardAllChunks()
self._fastLeaves = bool(val)
_roughGraphics = False
@property
def roughGraphics(self):
return self._roughGraphics
@roughGraphics.setter
def roughGraphics(self, val):
if self._roughGraphics != bool(val):
self.discardAllChunks()
self._roughGraphics = bool(val)
_showHiddenOres = False
@property
def showHiddenOres(self):
return self._showHiddenOres
@showHiddenOres.setter
def showHiddenOres(self, val):
if self._showHiddenOres != bool(val):
self.discardAllChunks()
self._showHiddenOres = bool(val)
def wantsChunk(self, cPos):
return self.updateTask.wantsChunk(cPos)
def workOnChunk(self, chunk, visibleSections=None):
return self.updateTask.workOnChunk(chunk, visibleSections)
def chunkNotPresent(self, cPos):
self.updateTask.chunkNotPresent(cPos)
def getChunkRenderInfo(self, cPos):
chunkInfo = self.chunkRenderInfo.get(cPos)
if chunkInfo is None:
#log.info("Creating ChunkRenderInfo %s in %s", cPos, self.worldScene)
chunkInfo = ChunkRenderInfo(self, cPos)
self.chunkRenderInfo[cPos] = chunkInfo
return chunkInfo
def setLayerVisible(self, layerName, visible):
if visible:
self.visibleLayers.add(layerName)
else:
self.visibleLayers.discard(layerName)
for groupNode in self.groupNodes.itervalues():
groupNode.setLayerVisible(layerName, visible)
def setVisibleLayers(self, layerNames):
self.visibleLayers = set(layerNames)
| |
# -*- coding: utf-8 -*-
import errno
import hashlib
import logging
import os
import traceback
from datetime import datetime
from PIL import Image
from urllib.parse import unquote
from urllib.request import url2pathname
from django import template
from django.utils.encoding import smart_str
from django.utils.timezone import get_default_timezone, is_aware, make_aware
from watermarker import utils
from watermarker.conf import settings
from watermarker.models import Watermark
QUALITY = settings.WATERMARK_QUALITY
OBSCURE_ORIGINAL = settings.WATERMARK_OBSCURE_ORIGINAL
RANDOM_POSITION_ONCE = settings.WATERMARK_RANDOM_POSITION_ONCE
register = template.Library()
logger = logging.getLogger("watermarker")
class Watermarker(object):
def __call__(
self,
url,
name,
position=None,
opacity=0.5,
tile=False,
scale=1.0,
greyscale=False,
rotation=0,
noalpha=True,
quality=QUALITY,
obscure=OBSCURE_ORIGINAL,
random_position_once=RANDOM_POSITION_ONCE,
):
"""
Creates a watermarked copy of an image.
"""
# look for the specified watermark by name. If it's not there, go no
# further
try:
watermark = Watermark.objects.get(name__exact=name, is_active=True)
except Watermark.DoesNotExist:
logger.error('Watermark "%s" does not exist... Bailing out.' % name)
return url
# make sure URL is a string
url = smart_str(url)
basedir = "%s/watermarked/" % os.path.dirname(url)
original_basename, ext = os.path.splitext(os.path.basename(url))
# open the target image file along with the watermark image
target = Image.open(self._get_filesystem_path(url))
mark = Image.open(watermark.image.path)
# determine the actual value that the parameters provided will render
random_position = bool(position is None or str(position).lower() == "r")
scale = utils.determine_scale(scale, target, mark)
mark = mark.resize(scale, resample=Image.ANTIALIAS)
rotation = utils.determine_rotation(rotation, mark)
pos = utils.determine_position(position, target, mark)
# see if we need to create only one randomly positioned watermarked
# image
if not random_position or (not random_position_once and random_position):
logger.debug("Generating random position for watermark each time")
position = pos
else:
logger.debug("Random positioning watermark once")
params = {
"position": position,
"opacity": opacity,
"scale": scale,
"tile": tile,
"greyscale": greyscale,
"rotation": rotation,
"original_basename": original_basename,
"ext": ext,
"noalpha": noalpha,
"quality": quality,
"watermark": watermark.id,
"left": pos[0],
"top": pos[1],
"fstat": os.stat(self._get_filesystem_path(url)),
}
logger.debug("Params: %s" % params)
fname = self.generate_filename(mark, **params)
url_path = self.get_url_path(basedir, original_basename, ext, fname, obscure)
fpath = self._get_filesystem_path(url_path)
logger.debug(
"Watermark name: %s; URL: %s; Path: %s"
% (
fname,
url_path,
fpath,
)
)
# see if the image already exists on the filesystem. If it does, use it.
if os.access(fpath, os.R_OK):
# see if the ``Watermark`` object was modified since the
# file was created
modified = make_aware(datetime.fromtimestamp(os.path.getmtime(fpath)), get_default_timezone())
date_updated = watermark.date_updated
if not is_aware(date_updated):
date_updated = make_aware(date_updated, get_default_timezone())
# only return the old file if things appear to be the same
if modified >= date_updated:
logger.info("Watermark exists and has not changed. Bailing out.")
return url_path
# make sure the position is in our params for the watermark
params["position"] = pos
self.create_watermark(target, mark, fpath, **params)
# send back the URL to the new, watermarked image
return url_path
def _get_filesystem_path(self, url_path, basedir=settings.MEDIA_ROOT):
"""Makes a filesystem path from the specified URL path"""
if url_path.startswith(settings.MEDIA_URL):
url_path = url_path[len(settings.MEDIA_URL):] # strip media root url
return os.path.normpath(os.path.join(basedir, url2pathname(url_path)))
def generate_filename(self, mark, **kwargs):
"""Comes up with a good filename for the watermarked image"""
kwargs = kwargs.copy()
kwargs["opacity"] = int(kwargs["opacity"] * 100)
kwargs["st_mtime"] = kwargs["fstat"].st_mtime
kwargs["st_size"] = kwargs["fstat"].st_size
params = [
"%(original_basename)s",
"wm",
"w%(watermark)i",
"o%(opacity)i",
"gs%(greyscale)i",
"r%(rotation)i",
"fm%(st_mtime)i",
"fz%(st_size)i",
"p%(position)s",
]
scale = kwargs.get("scale", None)
if scale and scale != mark.size:
params.append("_s%i" % (float(kwargs["scale"][0]) / mark.size[0] * 100))
if kwargs.get("tile", None):
params.append("_tiled")
# make thumbnail filename
filename = "%s%s" % ("_".join(params), kwargs["ext"])
return filename % kwargs
def get_url_path(self, basedir, original_basename, ext, name, obscure=True):
"""Determines an appropriate watermark path"""
try:
hash = hashlib.sha1(smart_str(name)).hexdigest()
except TypeError:
hash = hashlib.sha1(smart_str(name).encode("utf-8")).hexdigest()
# figure out where the watermark would be saved on the filesystem
if obscure is True:
logger.debug("Obscuring original image name: %s => %s" % (name, hash))
url_path = os.path.join(basedir, hash + ext)
else:
logger.debug("Not obscuring original image name.")
url_path = os.path.join(basedir, hash, original_basename + ext)
# make sure the destination directory exists
try:
fpath = self._get_filesystem_path(url_path)
os.makedirs(os.path.dirname(fpath))
except OSError as e:
if e.errno == errno.EEXIST:
pass # not to worry, directory exists
else:
logger.error("Error creating path: %s" % traceback.format_exc())
raise
else:
logger.debug("Created directory: %s" % os.path.dirname(fpath))
return url_path
def create_watermark(self, target, mark, fpath, quality=QUALITY, **kwargs):
"""Create the watermarked image on the filesystem"""
im = utils.watermark(target, mark, **kwargs)
if not kwargs.get("noalpha", True) is False:
im = im.convert("RGB")
im.save(fpath, quality=quality)
return im
@register.filter
def watermark(url, args=""):
"""
Returns the URL to a watermarked copy of the image specified.
"""
# initialize some variables
args = args.split(",")
params = dict(
name=args.pop(0),
opacity=0.5,
tile=False,
scale=1.0,
greyscale=False,
rotation=0,
position=None,
noalpha=True,
quality=QUALITY,
obscure=OBSCURE_ORIGINAL,
random_position_once=RANDOM_POSITION_ONCE,
)
params["url"] = unquote(url)
# iterate over all parameters to see what we need to do
for arg in args:
key, value = arg.split("=")
key, value = key.strip(), value.strip()
if key == "position":
params["position"] = value
elif key == "opacity":
params["opacity"] = utils._percent(value)
elif key == "tile":
params["tile"] = bool(int(value))
elif key == "scale":
params["scale"] = value
elif key == "greyscale":
params["greyscale"] = bool(int(value))
elif key == "rotation":
params["rotation"] = value
elif key == "noalpha":
params["noalpha"] = bool(int(value))
elif key == "quality":
params["quality"] = int(value)
elif key == "obscure":
params["obscure"] = bool(int(value))
elif key == "random_position_once":
params["random_position_once"] = bool(int(value))
return Watermarker()(**params)
| |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from unittest import mock
from oslo_utils import timeutils
from oslo_utils import uuidutils
from six.moves.urllib import parse as urlparse
from webtest.app import AppError
from wsme import types as wtypes
from magnum.api import attr_validator
from magnum.api.controllers.v1 import baymodel as api_baymodel
from magnum.common import exception
from magnum.common import policy as magnum_policy
import magnum.conf
from magnum.tests import base
from magnum.tests.unit.api import base as api_base
from magnum.tests.unit.api import utils as apiutils
from magnum.tests.unit.objects import utils as obj_utils
CONF = magnum.conf.CONF
class TestBayModelObject(base.TestCase):
def test_baymodel_init(self):
baymodel_dict = apiutils.baymodel_post_data()
del baymodel_dict['image_id']
baymodel = api_baymodel.BayModel(**baymodel_dict)
self.assertEqual(wtypes.Unset, baymodel.image_id)
class TestListBayModel(api_base.FunctionalTest):
_baymodel_attrs = ('name', 'apiserver_port', 'network_driver',
'coe', 'flavor_id', 'fixed_network',
'dns_nameserver', 'http_proxy',
'docker_volume_size', 'server_type',
'cluster_distro', 'external_network_id',
'image_id', 'registry_enabled', 'no_proxy',
'keypair_id', 'https_proxy', 'tls_disabled',
'public', 'labels', 'master_flavor_id',
'volume_driver', 'insecure_registry')
def test_empty(self):
response = self.get_json('/baymodels')
self.assertEqual([], response['baymodels'])
def test_one(self):
baymodel = obj_utils.create_test_cluster_template(self.context)
response = self.get_json('/baymodels')
self.assertEqual(baymodel.uuid, response['baymodels'][0]["uuid"])
self._verify_attrs(self._baymodel_attrs,
response['baymodels'][0])
def test_get_one(self):
baymodel = obj_utils.create_test_cluster_template(self.context)
response = self.get_json('/baymodels/%s' % baymodel['uuid'])
self.assertEqual(baymodel.uuid, response['uuid'])
self._verify_attrs(self._baymodel_attrs, response)
def test_get_one_by_name(self):
baymodel = obj_utils.create_test_cluster_template(self.context)
response = self.get_json('/baymodels/%s' % baymodel['name'])
self.assertEqual(baymodel.uuid, response['uuid'])
self._verify_attrs(self._baymodel_attrs, response)
def test_get_one_by_name_not_found(self):
response = self.get_json(
'/baymodels/not_found',
expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['errors'])
def test_get_one_by_name_multiple_baymodel(self):
obj_utils.create_test_cluster_template(
self.context, name='test_baymodel',
uuid=uuidutils.generate_uuid())
obj_utils.create_test_cluster_template(
self.context, name='test_baymodel',
uuid=uuidutils.generate_uuid())
response = self.get_json(
'/baymodels/test_baymodel',
expect_errors=True)
self.assertEqual(409, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['errors'])
def test_get_all_with_pagination_marker(self):
bm_list = []
for id_ in range(4):
baymodel = obj_utils.create_test_cluster_template(
self.context, id=id_,
uuid=uuidutils.generate_uuid())
bm_list.append(baymodel)
response = self.get_json('/baymodels?limit=3&marker=%s'
% bm_list[2].uuid)
self.assertEqual(1, len(response['baymodels']))
self.assertEqual(bm_list[-1].uuid, response['baymodels'][0]['uuid'])
def test_detail(self):
baymodel = obj_utils.create_test_cluster_template(self.context)
response = self.get_json('/baymodels/detail')
self.assertEqual(baymodel.uuid, response['baymodels'][0]["uuid"])
self._verify_attrs(self._baymodel_attrs,
response['baymodels'][0])
def test_detail_with_pagination_marker(self):
bm_list = []
for id_ in range(4):
baymodel = obj_utils.create_test_cluster_template(
self.context, id=id_,
uuid=uuidutils.generate_uuid())
bm_list.append(baymodel)
response = self.get_json('/baymodels/detail?limit=3&marker=%s'
% bm_list[2].uuid)
self.assertEqual(1, len(response['baymodels']))
self.assertEqual(bm_list[-1].uuid, response['baymodels'][0]['uuid'])
self._verify_attrs(self._baymodel_attrs,
response['baymodels'][0])
def test_detail_against_single(self):
baymodel = obj_utils.create_test_cluster_template(self.context)
response = self.get_json('/baymodels/%s/detail' % baymodel['uuid'],
expect_errors=True)
self.assertEqual(404, response.status_int)
def test_many(self):
bm_list = []
for id_ in range(5):
baymodel = obj_utils.create_test_cluster_template(
self.context, id=id_,
uuid=uuidutils.generate_uuid())
bm_list.append(baymodel.uuid)
response = self.get_json('/baymodels')
self.assertEqual(len(bm_list), len(response['baymodels']))
uuids = [bm['uuid'] for bm in response['baymodels']]
self.assertEqual(sorted(bm_list), sorted(uuids))
def test_links(self):
uuid = uuidutils.generate_uuid()
obj_utils.create_test_cluster_template(self.context, id=1, uuid=uuid)
response = self.get_json('/baymodels/%s' % uuid)
self.assertIn('links', response.keys())
self.assertEqual(2, len(response['links']))
self.assertIn(uuid, response['links'][0]['href'])
for link in response['links']:
bookmark = link['rel'] == 'bookmark'
self.assertTrue(self.validate_link(link['href'],
bookmark=bookmark))
def test_collection_links(self):
for id_ in range(5):
obj_utils.create_test_cluster_template(
self.context, id=id_, uuid=uuidutils.generate_uuid())
response = self.get_json('/baymodels/?limit=3')
self.assertEqual(3, len(response['baymodels']))
next_marker = response['baymodels'][-1]['uuid']
self.assertIn(next_marker, response['next'])
def test_collection_links_default_limit(self):
CONF.set_override('max_limit', 3, 'api')
for id_ in range(5):
obj_utils.create_test_cluster_template(
self.context, id=id_, uuid=uuidutils.generate_uuid())
response = self.get_json('/baymodels')
self.assertEqual(3, len(response['baymodels']))
next_marker = response['baymodels'][-1]['uuid']
self.assertIn(next_marker, response['next'])
class TestPatch(api_base.FunctionalTest):
def setUp(self):
super(TestPatch, self).setUp()
p = mock.patch.object(attr_validator, 'validate_os_resources')
self.mock_valid_os_res = p.start()
self.addCleanup(p.stop)
self.baymodel = obj_utils.create_test_cluster_template(
self.context,
name='bay_model_example_A',
image_id='nerdherd',
apiserver_port=8080,
fixed_network='private',
flavor_id='m1.magnum',
master_flavor_id='m1.magnum',
external_network_id='public',
keypair_id='test',
volume_driver='rexray',
public=False,
docker_volume_size=20,
coe='swarm',
labels={'key1': 'val1', 'key2': 'val2'}
)
def test_update_not_found(self):
uuid = uuidutils.generate_uuid()
response = self.patch_json('/baymodels/%s' % uuid,
[{'path': '/name',
'value': 'bay_model_example_B',
'op': 'add'}],
expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['errors'])
def test_update_baymodel_with_bay(self):
baymodel = obj_utils.create_test_cluster_template(self.context)
obj_utils.create_test_cluster(self.context,
cluster_template_id=baymodel.uuid)
response = self.patch_json('/baymodels/%s' % baymodel.uuid,
[{'path': '/network_driver',
'value': 'flannel',
'op': 'replace'}],
expect_errors=True)
self.assertEqual(400, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['errors'])
self.assertIn(baymodel.uuid, response.json['errors'][0]['detail'])
def test_update_baymodel_name_with_bay(self):
baymodel = obj_utils.create_test_cluster_template(self.context)
obj_utils.create_test_cluster(self.context,
cluster_template_id=baymodel.uuid)
response = self.patch_json('/baymodels/%s' % baymodel.uuid,
[{'path': '/name',
'value': 'bay_model_example_B',
'op': 'replace'}],
expect_errors=True)
self.assertEqual(200, response.status_int)
@mock.patch.object(magnum_policy, 'enforce')
def test_update_public_baymodel_success(self, mock_policy):
mock_policy.return_value = True
response = self.patch_json('/baymodels/%s' % self.baymodel.uuid,
[{'path': '/public', 'value': True,
'op': 'replace'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(200, response.status_code)
response = self.get_json('/baymodels/%s' % self.baymodel.uuid)
self.assertTrue(response['public'])
@mock.patch.object(magnum_policy, 'enforce')
def test_update_public_baymodel_fail(self, mock_policy):
mock_policy.return_value = False
self.assertRaises(AppError, self.patch_json,
'/baymodels/%s' % self.baymodel.uuid,
[{'path': '/public', 'value': True,
'op': 'replace'}])
@mock.patch.object(magnum_policy, 'enforce')
def test_update_baymodel_with_bay_allow_update(self, mock_policy):
mock_policy.return_value = True
baymodel = obj_utils.create_test_cluster_template(self.context)
obj_utils.create_test_cluster(self.context,
cluster_template_id=baymodel.uuid)
response = self.patch_json('/baymodels/%s' % baymodel.uuid,
[{'path': '/public',
'value': True,
'op': 'replace'}],
expect_errors=True)
self.assertEqual(200, response.status_int)
response = self.get_json('/baymodels/%s' % self.baymodel.uuid)
self.assertEqual(response['public'], True)
def test_update_baymodel_with_bay_not_allow_update(self):
baymodel = obj_utils.create_test_cluster_template(self.context)
obj_utils.create_test_cluster(self.context,
cluster_template_id=baymodel.uuid)
response = self.patch_json('/baymodels/%s' % baymodel.uuid,
[{'path': '/network_driver',
'value': 'calico',
'op': 'replace'}],
expect_errors=True)
self.assertEqual(400, response.status_code)
@mock.patch('oslo_utils.timeutils.utcnow')
def test_replace_singular(self, mock_utcnow):
name = 'bay_model_example_B'
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
response = self.patch_json('/baymodels/%s' % self.baymodel.uuid,
[{'path': '/name', 'value': name,
'op': 'replace'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(200, response.status_code)
response = self.get_json('/baymodels/%s' % self.baymodel.uuid)
self.assertEqual(name, response['name'])
return_updated_at = timeutils.parse_isotime(
response['updated_at']).replace(tzinfo=None)
self.assertEqual(test_time, return_updated_at)
# Assert nothing else was changed
self.assertEqual(self.baymodel.uuid, response['uuid'])
self.assertEqual(self.baymodel.image_id, response['image_id'])
self.assertEqual(self.baymodel.apiserver_port,
response['apiserver_port'])
self.assertEqual(self.baymodel.fixed_network,
response['fixed_network'])
self.assertEqual(self.baymodel.network_driver,
response['network_driver'])
self.assertEqual(self.baymodel.volume_driver,
response['volume_driver'])
self.assertEqual(self.baymodel.docker_volume_size,
response['docker_volume_size'])
self.assertEqual(self.baymodel.coe,
response['coe'])
self.assertEqual(self.baymodel.http_proxy,
response['http_proxy'])
self.assertEqual(self.baymodel.https_proxy,
response['https_proxy'])
self.assertEqual(self.baymodel.no_proxy,
response['no_proxy'])
self.assertEqual(self.baymodel.labels,
response['labels'])
def test_replace_baymodel_with_no_exist_flavor_id(self):
self.mock_valid_os_res.side_effect = exception.FlavorNotFound("aaa")
response = self.patch_json('/baymodels/%s' % self.baymodel.uuid,
[{'path': '/flavor_id', 'value': 'aaa',
'op': 'replace'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_code)
self.assertTrue(response.json['errors'])
def test_replace_baymodel_with_no_exist_keypair_id(self):
self.mock_valid_os_res.side_effect = exception.KeyPairNotFound("aaa")
response = self.patch_json('/baymodels/%s' % self.baymodel.uuid,
[{'path': '/keypair_id', 'value': 'aaa',
'op': 'replace'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(404, response.status_code)
self.assertTrue(response.json['errors'])
def test_replace_baymodel_with_no_exist_external_network_id(self):
self.mock_valid_os_res.side_effect = exception.ExternalNetworkNotFound(
"aaa")
response = self.patch_json('/baymodels/%s' % self.baymodel.uuid,
[{'path': '/external_network_id',
'value': 'aaa',
'op': 'replace'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_code)
self.assertTrue(response.json['errors'])
def test_replace_baymodel_with_no_exist_image_id(self):
self.mock_valid_os_res.side_effect = exception.ImageNotFound("aaa")
response = self.patch_json('/baymodels/%s' % self.baymodel.uuid,
[{'path': '/image_id', 'value': 'aaa',
'op': 'replace'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_code)
self.assertTrue(response.json['errors'])
def test_create_baymodel_with_no_os_distro_image(self):
image_exce = exception.OSDistroFieldNotFound('img')
self.mock_valid_os_res.side_effect = image_exce
response = self.patch_json('/baymodels/%s' % self.baymodel.uuid,
[{'path': '/image_id', 'value': 'img',
'op': 'replace'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_code)
self.assertTrue(response.json['errors'])
def test_remove_singular(self):
response = self.get_json('/baymodels/%s' % self.baymodel.uuid)
self.assertIsNotNone(response['dns_nameserver'])
response = self.patch_json('/baymodels/%s' % self.baymodel.uuid,
[{'path': '/dns_nameserver',
'op': 'remove'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(200, response.status_code)
response = self.get_json('/baymodels/%s' % self.baymodel.uuid)
self.assertIsNone(response['dns_nameserver'])
# Assert nothing else was changed
self.assertEqual(self.baymodel.uuid, response['uuid'])
self.assertEqual(self.baymodel.name, response['name'])
self.assertEqual(self.baymodel.apiserver_port,
response['apiserver_port'])
self.assertEqual(self.baymodel.image_id,
response['image_id'])
self.assertEqual(self.baymodel.fixed_network,
response['fixed_network'])
self.assertEqual(self.baymodel.network_driver,
response['network_driver'])
self.assertEqual(self.baymodel.volume_driver,
response['volume_driver'])
self.assertEqual(self.baymodel.docker_volume_size,
response['docker_volume_size'])
self.assertEqual(self.baymodel.coe, response['coe'])
self.assertEqual(self.baymodel.http_proxy, response['http_proxy'])
self.assertEqual(self.baymodel.https_proxy, response['https_proxy'])
self.assertEqual(self.baymodel.no_proxy, response['no_proxy'])
self.assertEqual(self.baymodel.labels, response['labels'])
def test_remove_non_existent_property_fail(self):
response = self.patch_json('/baymodels/%s' % self.baymodel.uuid,
[{'path': '/non-existent', 'op': 'remove'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_code)
self.assertTrue(response.json['errors'])
def test_remove_mandatory_property_fail(self):
mandatory_properties = ('/image_id', '/keypair_id', '/coe',
'/external_network_id', '/server_type',
'/tls_disabled', '/public',
'/registry_enabled',
'/cluster_distro', '/network_driver')
for p in mandatory_properties:
response = self.patch_json('/baymodels/%s' % self.baymodel.uuid,
[{'path': p, 'op': 'remove'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_code)
self.assertTrue(response.json['errors'])
def test_add_root_non_existent(self):
response = self.patch_json(
'/baymodels/%s' % self.baymodel.uuid,
[{'path': '/foo', 'value': 'bar', 'op': 'add'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_int)
self.assertTrue(response.json['errors'])
def test_remove_uuid(self):
response = self.patch_json('/baymodels/%s' % self.baymodel.uuid,
[{'path': '/uuid', 'op': 'remove'}],
expect_errors=True)
self.assertEqual(400, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['errors'])
class TestPost(api_base.FunctionalTest):
def setUp(self):
super(TestPost, self).setUp()
p = mock.patch.object(attr_validator, 'validate_os_resources')
self.mock_valid_os_res = p.start()
self.addCleanup(p.stop)
@mock.patch('magnum.api.attr_validator.validate_image')
@mock.patch('oslo_utils.timeutils.utcnow')
def test_create_baymodel(self, mock_utcnow,
mock_image_data):
bdict = apiutils.baymodel_post_data()
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
response = self.post_json('/baymodels', bdict)
self.assertEqual(201, response.status_int)
# Check location header
self.assertIsNotNone(response.location)
expected_location = '/v1/baymodels/%s' % bdict['uuid']
self.assertEqual(expected_location,
urlparse.urlparse(response.location).path)
self.assertEqual(bdict['uuid'], response.json['uuid'])
self.assertNotIn('updated_at', response.json.keys)
return_created_at = timeutils.parse_isotime(
response.json['created_at']).replace(tzinfo=None)
self.assertEqual(test_time, return_created_at)
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_baymodel_set_project_id_and_user_id(self,
mock_image_data):
with mock.patch.object(
self.dbapi, 'create_cluster_template',
wraps=self.dbapi.create_cluster_template) as cc_mock:
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.baymodel_post_data()
self.post_json('/baymodels', bdict)
cc_mock.assert_called_once_with(mock.ANY)
self.assertEqual(self.context.project_id,
cc_mock.call_args[0][0]['project_id'])
self.assertEqual(self.context.user_id,
cc_mock.call_args[0][0]['user_id'])
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_baymodel_doesnt_contain_id(self,
mock_image_data):
with mock.patch.object(
self.dbapi, 'create_cluster_template',
wraps=self.dbapi.create_cluster_template) as cc_mock:
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.baymodel_post_data(image_id='my-image')
response = self.post_json('/baymodels', bdict)
self.assertEqual(bdict['image_id'], response.json['image_id'])
cc_mock.assert_called_once_with(mock.ANY)
# Check that 'id' is not in first arg of positional args
self.assertNotIn('id', cc_mock.call_args[0][0])
def _create_baymodel_raises_app_error(self, **kwargs):
# Create mock for db and image data
with mock.patch.object(
self.dbapi, 'create_cluster_template',
wraps=self.dbapi.create_cluster_template) as cc_mock,\
mock.patch('magnum.api.attr_validator.validate_image')\
as mock_image_data:
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.baymodel_post_data(**kwargs)
self.assertRaises(AppError, self.post_json, '/baymodels', bdict)
self.assertFalse(cc_mock.called)
def test_create_baymodel_with_invalid_long_string(self):
fields = ["uuid", "name", "image_id", "flavor_id", "master_flavor_id",
"dns_nameserver", "keypair_id", "external_network_id",
"cluster_distro", "fixed_network", "apiserver_port",
"docker_volume_size", "http_proxy", "https_proxy",
"no_proxy", "network_driver", "labels", "volume_driver"]
for field in fields:
self._create_baymodel_raises_app_error(**{field: 'i' * 256})
def test_create_baymodel_with_invalid_empty_string(self):
fields = ["uuid", "name", "image_id", "flavor_id", "master_flavor_id",
"dns_nameserver", "keypair_id", "external_network_id",
"cluster_distro", "fixed_network", "apiserver_port",
"docker_volume_size", "labels", "http_proxy", "https_proxy",
"no_proxy", "network_driver", "volume_driver", "coe"]
for field in fields:
self._create_baymodel_raises_app_error(**{field: ''})
def test_create_baymodel_with_invalid_coe(self):
self._create_baymodel_raises_app_error(coe='k8s')
self._create_baymodel_raises_app_error(coe='storm')
self._create_baymodel_raises_app_error(coe='meson')
self._create_baymodel_raises_app_error(coe='osomatsu')
def test_create_baymodel_with_invalid_docker_volume_size(self):
self._create_baymodel_raises_app_error(docker_volume_size=-1)
self._create_baymodel_raises_app_error(
docker_volume_size=1,
docker_storage_driver="devicemapper")
self._create_baymodel_raises_app_error(
docker_volume_size=2,
docker_storage_driver="devicemapper")
self._create_baymodel_raises_app_error(docker_volume_size='notanint')
def test_create_baymodel_with_invalid_dns_nameserver(self):
self._create_baymodel_raises_app_error(dns_nameserver='1.1.2')
self._create_baymodel_raises_app_error(dns_nameserver='1.1..1')
self._create_baymodel_raises_app_error(dns_nameserver='openstack.org')
def test_create_baymodel_with_invalid_apiserver_port(self):
self._create_baymodel_raises_app_error(apiserver_port=-12)
self._create_baymodel_raises_app_error(apiserver_port=65536)
self._create_baymodel_raises_app_error(apiserver_port=0)
self._create_baymodel_raises_app_error(apiserver_port=1023)
self._create_baymodel_raises_app_error(apiserver_port='not an int')
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_baymodel_with_labels(self, mock_image_data):
with mock.patch.object(
self.dbapi, 'create_cluster_template',
wraps=self.dbapi.create_cluster_template) as cc_mock:
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.baymodel_post_data(labels={'key1': 'val1',
'key2': 'val2'})
response = self.post_json('/baymodels', bdict)
self.assertEqual(bdict['labels'],
response.json['labels'])
cc_mock.assert_called_once_with(mock.ANY)
self.assertNotIn('id', cc_mock.call_args[0][0])
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_baymodel_with_docker_volume_size(self,
mock_image_data):
with mock.patch.object(
self.dbapi, 'create_cluster_template',
wraps=self.dbapi.create_cluster_template) as cc_mock:
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.baymodel_post_data(docker_volume_size=99)
response = self.post_json('/baymodels', bdict)
self.assertEqual(bdict['docker_volume_size'],
response.json['docker_volume_size'])
cc_mock.assert_called_once_with(mock.ANY)
self.assertNotIn('id', cc_mock.call_args[0][0])
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_baymodel_with_overlay(self, mock_image_data):
with mock.patch.object(
self.dbapi, 'create_cluster_template',
wraps=self.dbapi.create_cluster_template) as cc_mock:
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.baymodel_post_data(
docker_volume_size=1, docker_storage_driver="overlay")
response = self.post_json('/baymodels', bdict)
self.assertEqual(bdict['docker_volume_size'],
response.json['docker_volume_size'])
cc_mock.assert_called_once_with(mock.ANY)
self.assertNotIn('id', cc_mock.call_args[0][0])
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_baymodel_generate_uuid(self,
mock_image_data):
# TODO(hongbin): Is this test correct?
pass
@mock.patch('magnum.api.attr_validator.validate_image')
def _test_create_baymodel_network_driver_attr(self,
baymodel_dict,
baymodel_config_dict,
expect_errors,
mock_image_data):
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
for k, v in baymodel_config_dict.items():
CONF.set_override(k, v, 'cluster_template')
with mock.patch.object(
self.dbapi, 'create_cluster_template',
wraps=self.dbapi.create_cluster_template) as cc_mock:
bdict = apiutils.baymodel_post_data(**baymodel_dict)
response = self.post_json('/baymodels', bdict,
expect_errors=expect_errors)
if expect_errors:
self.assertEqual(400, response.status_int)
else:
expected_driver = bdict.get('network_driver')
if not expected_driver:
expected_driver = (
CONF.cluster_template.swarm_default_network_driver)
self.assertEqual(expected_driver,
response.json['network_driver'])
self.assertEqual(bdict['image_id'],
response.json['image_id'])
cc_mock.assert_called_once_with(mock.ANY)
self.assertNotIn('id', cc_mock.call_args[0][0])
self.assertTrue(uuidutils.is_uuid_like(response.json['uuid']))
def test_create_baymodel_with_network_driver(self):
baymodel_dict = {'coe': 'kubernetes', 'network_driver': 'flannel'}
config_dict = {} # Default config
expect_errors_flag = False
self._test_create_baymodel_network_driver_attr(baymodel_dict,
config_dict,
expect_errors_flag)
def test_create_baymodel_with_no_network_driver(self):
baymodel_dict = {}
config_dict = {}
expect_errors_flag = False
self._test_create_baymodel_network_driver_attr(baymodel_dict,
config_dict,
expect_errors_flag)
def test_create_baymodel_with_network_driver_non_def_config(self):
baymodel_dict = {'coe': 'kubernetes', 'network_driver': 'flannel'}
config_dict = {
'kubernetes_allowed_network_drivers': ['flannel', 'foo']}
expect_errors_flag = False
self._test_create_baymodel_network_driver_attr(baymodel_dict,
config_dict,
expect_errors_flag)
def test_create_baymodel_with_invalid_network_driver(self):
baymodel_dict = {'coe': 'kubernetes', 'network_driver': 'bad_driver'}
config_dict = {
'kubernetes_allowed_network_drivers': ['flannel', 'good_driver']}
expect_errors_flag = True
self._test_create_baymodel_network_driver_attr(baymodel_dict,
config_dict,
expect_errors_flag)
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_baymodel_with_volume_driver(self,
mock_image_data):
with mock.patch.object(
self.dbapi, 'create_cluster_template',
wraps=self.dbapi.create_cluster_template) as cc_mock:
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.baymodel_post_data(volume_driver='rexray')
response = self.post_json('/baymodels', bdict)
self.assertEqual(bdict['volume_driver'],
response.json['volume_driver'])
cc_mock.assert_called_once_with(mock.ANY)
self.assertNotIn('id', cc_mock.call_args[0][0])
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_baymodel_with_no_volume_driver(self,
mock_image_data):
with mock.patch.object(
self.dbapi, 'create_cluster_template',
wraps=self.dbapi.create_cluster_template) as cc_mock:
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.baymodel_post_data()
response = self.post_json('/baymodels', bdict)
self.assertEqual(bdict['volume_driver'],
response.json['volume_driver'])
cc_mock.assert_called_once_with(mock.ANY)
self.assertNotIn('id', cc_mock.call_args[0][0])
@mock.patch('magnum.api.attr_validator.validate_image')
@mock.patch.object(magnum_policy, 'enforce')
def test_create_baymodel_public_success(self, mock_policy,
mock_image_data):
with mock.patch.object(
self.dbapi, 'create_cluster_template',
wraps=self.dbapi.create_cluster_template) as cc_mock:
mock_policy.return_value = True
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.baymodel_post_data(public=True)
response = self.post_json('/baymodels', bdict)
self.assertTrue(response.json['public'])
mock_policy.assert_called_with(mock.ANY, "baymodel:publish",
None, do_raise=False)
cc_mock.assert_called_once_with(mock.ANY)
self.assertNotIn('id', cc_mock.call_args[0][0])
self.assertTrue(cc_mock.call_args[0][0]['public'])
@mock.patch('magnum.api.attr_validator.validate_image')
@mock.patch.object(magnum_policy, 'enforce')
def test_create_baymodel_public_fail(self, mock_policy,
mock_image_data):
with mock.patch.object(self.dbapi, 'create_cluster_template',
wraps=self.dbapi.create_cluster_template):
# make policy enforcement fail
mock_policy.return_value = False
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.baymodel_post_data(public=True)
self.assertRaises(AppError, self.post_json, '/baymodels', bdict)
@mock.patch('magnum.api.attr_validator.validate_image')
@mock.patch.object(magnum_policy, 'enforce')
def test_create_baymodel_public_not_set(self, mock_policy,
mock_image_data):
with mock.patch.object(
self.dbapi, 'create_cluster_template',
wraps=self.dbapi.create_cluster_template) as cc_mock:
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.baymodel_post_data(public=False)
response = self.post_json('/baymodels', bdict)
self.assertFalse(response.json['public'])
# policy enforcement is called only once for enforce_wsgi
self.assertEqual(1, mock_policy.call_count)
cc_mock.assert_called_once_with(mock.ANY)
self.assertNotIn('id', cc_mock.call_args[0][0])
self.assertFalse(cc_mock.call_args[0][0]['public'])
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_baymodel_with_no_os_distro_image(self,
mock_image_data):
mock_image_data.side_effect = exception.OSDistroFieldNotFound('img')
bdict = apiutils.baymodel_post_data()
del bdict['uuid']
response = self.post_json('/baymodels', bdict, expect_errors=True)
self.assertEqual(400, response.status_int)
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_baymodel_with_os_distro_image(self,
mock_image_data):
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.baymodel_post_data()
del bdict['uuid']
response = self.post_json('/baymodels', bdict, expect_errors=True)
self.assertEqual(201, response.status_int)
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_baymodel_with_image_name(self,
mock_image_data):
mock_image = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
mock_image_data.return_value = mock_image
bdict = apiutils.baymodel_post_data()
del bdict['uuid']
response = self.post_json('/baymodels', bdict, expect_errors=True)
self.assertEqual(201, response.status_int)
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_baymodel_with_no_exist_image_name(self,
mock_image_data):
mock_image_data.side_effect = exception.ResourceNotFound('test-img')
bdict = apiutils.baymodel_post_data()
del bdict['uuid']
response = self.post_json('/baymodels', bdict, expect_errors=True)
self.assertEqual(404, response.status_int)
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_baymodel_with_multi_image_name(self,
mock_image_data):
mock_image_data.side_effect = exception.Conflict('Multiple images')
bdict = apiutils.baymodel_post_data()
del bdict['uuid']
response = self.post_json('/baymodels', bdict, expect_errors=True)
self.assertEqual(409, response.status_int)
def test_create_baymodel_without_image_id(self):
bdict = apiutils.baymodel_post_data()
del bdict['image_id']
response = self.post_json('/baymodels', bdict, expect_errors=True)
self.assertEqual(400, response.status_int)
def test_create_baymodel_without_keypair_id(self):
bdict = apiutils.baymodel_post_data()
del bdict['keypair_id']
response = self.post_json('/baymodels', bdict, expect_errors=True)
self.assertEqual(400, response.status_int)
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_baymodel_with_dns(self,
mock_image_data):
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.baymodel_post_data()
response = self.post_json('/baymodels', bdict)
self.assertEqual(201, response.status_int)
self.assertEqual(bdict['dns_nameserver'],
response.json['dns_nameserver'])
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_baymodel_with_no_exist_keypair(self,
mock_image_data):
self.mock_valid_os_res.side_effect = exception.KeyPairNotFound("Test")
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.baymodel_post_data()
response = self.post_json('/baymodels', bdict, expect_errors=True)
self.assertEqual(404, response.status_int)
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_baymodel_with_flavor(self,
mock_image_data):
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.baymodel_post_data()
response = self.post_json('/baymodels', bdict)
self.assertEqual(201, response.status_int)
self.assertEqual(bdict['flavor_id'],
response.json['flavor_id'])
self.assertEqual(bdict['master_flavor_id'],
response.json['master_flavor_id'])
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_baymodel_with_no_exist_flavor(self,
mock_image_data):
self.mock_valid_os_res.side_effect = exception.FlavorNotFound("flavor")
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.baymodel_post_data()
response = self.post_json('/baymodels', bdict, expect_errors=True)
self.assertEqual(400, response.status_int)
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_baymodel_with_external_network(self,
mock_image_data):
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.baymodel_post_data()
response = self.post_json('/baymodels', bdict)
self.assertEqual(201, response.status_int)
self.assertEqual(bdict['external_network_id'],
response.json['external_network_id'])
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_baymodel_with_no_exist_external_network(self,
mock_image_data):
self.mock_valid_os_res.side_effect = exception.ExternalNetworkNotFound(
"test")
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.baymodel_post_data()
response = self.post_json('/baymodels', bdict, expect_errors=True)
self.assertEqual(400, response.status_int)
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_baymodel_without_name(self, mock_image_data):
with mock.patch.object(self.dbapi, 'create_cluster_template',
wraps=self.dbapi.create_cluster_template):
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.baymodel_post_data()
bdict.pop('name')
resp = self.post_json('/baymodels', bdict)
self.assertEqual(201, resp.status_int)
self.assertIsNotNone(resp.json['name'])
class TestDelete(api_base.FunctionalTest):
def test_delete_baymodel(self):
baymodel = obj_utils.create_test_cluster_template(self.context)
self.delete('/baymodels/%s' % baymodel.uuid)
response = self.get_json('/baymodels/%s' % baymodel.uuid,
expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['errors'])
def test_delete_baymodel_with_bay(self):
baymodel = obj_utils.create_test_cluster_template(self.context)
obj_utils.create_test_cluster(self.context,
cluster_template_id=baymodel.uuid)
response = self.delete('/baymodels/%s' % baymodel.uuid,
expect_errors=True)
self.assertEqual(400, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['errors'])
self.assertIn(baymodel.uuid, response.json['errors'][0]['detail'])
def test_delete_baymodel_not_found(self):
uuid = uuidutils.generate_uuid()
response = self.delete('/baymodels/%s' % uuid, expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['errors'])
def test_delete_baymodel_with_name(self):
baymodel = obj_utils.create_test_cluster_template(self.context)
response = self.delete('/baymodels/%s' % baymodel['name'],
expect_errors=True)
self.assertEqual(204, response.status_int)
def test_delete_baymodel_with_name_not_found(self):
response = self.delete('/baymodels/not_found', expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['errors'])
def test_delete_multiple_baymodel_by_name(self):
obj_utils.create_test_cluster_template(
self.context, name='test_baymodel', uuid=uuidutils.generate_uuid())
obj_utils.create_test_cluster_template(
self.context, name='test_baymodel', uuid=uuidutils.generate_uuid())
response = self.delete('/baymodels/test_baymodel', expect_errors=True)
self.assertEqual(409, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['errors'])
class TestBayModelPolicyEnforcement(api_base.FunctionalTest):
def _common_policy_check(self, rule, func, *arg, **kwarg):
self.policy.set_rules({rule: "project:non_fake"})
response = func(*arg, **kwarg)
self.assertEqual(403, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(
"Policy doesn't allow %s to be performed." % rule,
response.json['errors'][0]['detail'])
def test_policy_disallow_get_all(self):
self._common_policy_check(
"baymodel:get_all", self.get_json, '/baymodels',
expect_errors=True)
def test_policy_disallow_get_one(self):
baymodel = obj_utils.create_test_cluster_template(self.context)
self._common_policy_check(
"baymodel:get", self.get_json,
'/baymodels/%s' % baymodel.uuid,
expect_errors=True)
def test_policy_disallow_detail(self):
self._common_policy_check(
"baymodel:detail", self.get_json,
'/baymodels/%s/detail' % uuidutils.generate_uuid(),
expect_errors=True)
def test_policy_disallow_update(self):
baymodel = obj_utils.create_test_cluster_template(
self.context,
name='example_A',
uuid=uuidutils.generate_uuid())
self._common_policy_check(
"baymodel:update", self.patch_json,
'/baymodels/%s' % baymodel.name,
[{'path': '/name', 'value': "new_name", 'op': 'replace'}],
expect_errors=True)
def test_policy_disallow_create(self):
bdict = apiutils.baymodel_post_data(name='bay_model_example_A')
self._common_policy_check(
"baymodel:create", self.post_json, '/baymodels', bdict,
expect_errors=True)
def test_policy_disallow_delete(self):
baymodel = obj_utils.create_test_cluster_template(self.context)
self._common_policy_check(
"baymodel:delete", self.delete,
'/baymodels/%s' % baymodel.uuid, expect_errors=True)
def _owner_check(self, rule, func, *args, **kwargs):
self.policy.set_rules({rule: "user_id:%(user_id)s"})
response = func(*args, **kwargs)
self.assertEqual(403, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(
"Policy doesn't allow %s to be performed." % rule,
response.json['errors'][0]['detail'])
def test_policy_only_owner_get_one(self):
baymodel = obj_utils.create_test_cluster_template(self.context,
user_id='another')
self._owner_check("baymodel:get", self.get_json,
'/baymodels/%s' % baymodel.uuid, expect_errors=True)
def test_policy_only_owner_update(self):
baymodel = obj_utils.create_test_cluster_template(self.context,
user_id='another')
self._owner_check(
"baymodel:update", self.patch_json,
'/baymodels/%s' % baymodel.uuid,
[{'path': '/name', 'value': "new_name", 'op': 'replace'}],
expect_errors=True)
def test_policy_only_owner_delete(self):
baymodel = obj_utils.create_test_cluster_template(self.context,
user_id='another')
self._owner_check(
"baymodel:delete", self.delete, '/baymodels/%s' % baymodel.uuid,
expect_errors=True)
| |
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Blocking and non-blocking HTTP client implementations using pycurl."""
import calendar
import collections
import cStringIO
import email.utils
import errno
import escape
import httplib
import httputil
import ioloop
import logging
import pycurl
import sys
import time
import weakref
class HTTPClient(object):
"""A blocking HTTP client backed with pycurl.
Typical usage looks like this:
http_client = httpclient.HTTPClient()
try:
response = http_client.fetch("http://www.google.com/")
print response.body
except httpclient.HTTPError, e:
print "Error:", e
fetch() can take a string URL or an HTTPRequest instance, which offers
more options, like executing POST/PUT/DELETE requests.
"""
def __init__(self, max_simultaneous_connections=None):
self._curl = _curl_create(max_simultaneous_connections)
def __del__(self):
self._curl.close()
def fetch(self, request, **kwargs):
"""Executes an HTTPRequest, returning an HTTPResponse.
If an error occurs during the fetch, we raise an HTTPError.
"""
if not isinstance(request, HTTPRequest):
request = HTTPRequest(url=request, **kwargs)
buffer = cStringIO.StringIO()
headers = httputil.HTTPHeaders()
try:
_curl_setup_request(self._curl, request, buffer, headers)
self._curl.perform()
code = self._curl.getinfo(pycurl.HTTP_CODE)
effective_url = self._curl.getinfo(pycurl.EFFECTIVE_URL)
buffer.seek(0)
response = HTTPResponse(
request=request, code=code, headers=headers,
buffer=buffer, effective_url=effective_url)
if code < 200 or code >= 300:
raise HTTPError(code, response=response)
return response
except pycurl.error, e:
buffer.close()
raise CurlError(*e)
class AsyncHTTPClient(object):
"""An non-blocking HTTP client backed with pycurl.
Example usage:
import ioloop
def handle_request(response):
if response.error:
print "Error:", response.error
else:
print response.body
ioloop.IOLoop.instance().stop()
http_client = httpclient.AsyncHTTPClient()
http_client.fetch("http://www.google.com/", handle_request)
ioloop.IOLoop.instance().start()
fetch() can take a string URL or an HTTPRequest instance, which offers
more options, like executing POST/PUT/DELETE requests.
The keyword argument max_clients to the AsyncHTTPClient constructor
determines the maximum number of simultaneous fetch() operations that
can execute in parallel on each IOLoop.
"""
_ASYNC_CLIENTS = weakref.WeakKeyDictionary()
def __new__(cls, io_loop=None, max_clients=10,
max_simultaneous_connections=None):
# There is one client per IOLoop since they share curl instances
io_loop = io_loop or ioloop.IOLoop.instance()
if io_loop in cls._ASYNC_CLIENTS:
return cls._ASYNC_CLIENTS[io_loop]
else:
instance = super(AsyncHTTPClient, cls).__new__(cls)
instance.io_loop = io_loop
instance._multi = pycurl.CurlMulti()
instance._curls = [_curl_create(max_simultaneous_connections)
for i in xrange(max_clients)]
instance._free_list = instance._curls[:]
instance._requests = collections.deque()
instance._fds = {}
instance._events = {}
instance._added_perform_callback = False
instance._timeout = None
instance._closed = False
cls._ASYNC_CLIENTS[io_loop] = instance
return instance
def close(self):
"""Destroys this http client, freeing any file descriptors used.
Not needed in normal use, but may be helpful in unittests that
create and destroy http clients. No other methods may be called
on the AsyncHTTPClient after close().
"""
del AsyncHTTPClient._ASYNC_CLIENTS[self.io_loop]
for curl in self._curls:
curl.close()
self._multi.close()
self._closed = True
def fetch(self, request, callback, **kwargs):
"""Executes an HTTPRequest, calling callback with an HTTPResponse.
If an error occurs during the fetch, the HTTPResponse given to the
callback has a non-None error attribute that contains the exception
encountered during the request. You can call response.reraise() to
throw the exception (if any) in the callback.
"""
if not isinstance(request, HTTPRequest):
request = HTTPRequest(url=request, **kwargs)
self._requests.append((request, callback))
self._add_perform_callback()
def _add_perform_callback(self):
if not self._added_perform_callback:
self.io_loop.add_callback(self._perform)
self._added_perform_callback = True
def _handle_events(self, fd, events):
self._events[fd] = events
self._add_perform_callback()
def _handle_timeout(self):
self._timeout = None
self._perform()
def _perform(self):
self._added_perform_callback = False
if self._closed:
return
while True:
while True:
ret, num_handles = self._multi.perform()
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
# Update the set of active file descriptors. It is important
# that this happen immediately after perform() because
# fds that have been removed from fdset are free to be reused
# in user callbacks.
fds = {}
(readable, writable, exceptable) = self._multi.fdset()
for fd in readable:
fds[fd] = fds.get(fd, 0) | 0x1 | 0x2
for fd in writable:
fds[fd] = fds.get(fd, 0) | 0x4
for fd in exceptable:
fds[fd] = fds.get(fd, 0) | 0x8 | 0x10
if fds and max(fds.iterkeys()) > 900:
# Libcurl has a bug in which it behaves unpredictably with
# file descriptors greater than 1024. (This is because
# even though it uses poll() instead of select(), it still
# uses FD_SET internally) Since curl opens its own file
# descriptors we can't catch this problem when it happens,
# and the best we can do is detect that it's about to
# happen. Exiting is a lousy way to handle this error,
# but there's not much we can do at this point. Exiting
# (and getting restarted by whatever monitoring process
# is handling crashed tornado processes) will at least
# get things working again and hopefully bring the issue
# to someone's attention.
# If you run into this issue, you either have a file descriptor
# leak or need to run more tornado processes (so that none
# of them are handling more than 1000 simultaneous connections)
print >> sys.stderr, "ERROR: File descriptor too high for libcurl. Exiting."
logging.error("File descriptor too high for libcurl. Exiting.")
sys.exit(1)
for fd in self._fds:
if fd not in fds:
try:
self.io_loop.remove_handler(fd)
except (OSError, IOError), e:
if e[0] != errno.ENOENT:
raise
for fd, events in fds.iteritems():
old_events = self._fds.get(fd, None)
if old_events is None:
self.io_loop.add_handler(fd, self._handle_events, events)
elif old_events != events:
try:
self.io_loop.update_handler(fd, events)
except (OSError, IOError), e:
if e[0] == errno.ENOENT:
self.io_loop.add_handler(fd, self._handle_events,
events)
else:
raise
self._fds = fds
# Handle completed fetches
completed = 0
while True:
num_q, ok_list, err_list = self._multi.info_read()
for curl in ok_list:
self._finish(curl)
completed += 1
for curl, errnum, errmsg in err_list:
self._finish(curl, errnum, errmsg)
completed += 1
if num_q == 0:
break
# Start fetching new URLs
started = 0
while self._free_list and self._requests:
started += 1
curl = self._free_list.pop()
(request, callback) = self._requests.popleft()
curl.info = {
"headers": httputil.HTTPHeaders(),
"buffer": cStringIO.StringIO(),
"request": request,
"callback": callback,
"start_time": time.time(),
}
_curl_setup_request(curl, request, curl.info["buffer"],
curl.info["headers"])
self._multi.add_handle(curl)
if not started and not completed:
break
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
self._timeout = None
if num_handles:
self._timeout = self.io_loop.add_timeout(
time.time() + 0.2, self._handle_timeout)
def _finish(self, curl, curl_error=None, curl_message=None):
info = curl.info
curl.info = None
self._multi.remove_handle(curl)
self._free_list.append(curl)
buffer = info["buffer"]
if curl_error:
error = CurlError(curl_error, curl_message)
code = error.code
body = None
effective_url = None
buffer.close()
buffer = None
else:
error = None
code = curl.getinfo(pycurl.HTTP_CODE)
effective_url = curl.getinfo(pycurl.EFFECTIVE_URL)
buffer.seek(0)
try:
info["callback"](HTTPResponse(
request=info["request"], code=code, headers=info["headers"],
buffer=buffer, effective_url=effective_url, error=error,
request_time=time.time() - info["start_time"]))
except (KeyboardInterrupt, SystemExit):
raise
except:
logging.error("Exception in callback %r", info["callback"],
exc_info=True)
class AsyncHTTPClient2(object):
"""Alternate implementation of AsyncHTTPClient.
This class has the same interface as AsyncHTTPClient (so see that class
for usage documentation) but is implemented with a different set of
libcurl APIs (curl_multi_socket_action instead of fdset/perform).
This implementation will likely become the default in the future, but
for now should be considered somewhat experimental.
The main advantage of this class over the original implementation is
that it is immune to the fd > 1024 bug, so applications with a large
number of simultaneous requests (e.g. long-polling) may prefer this
version.
Known bugs:
* Timeouts connecting to localhost
In some situations, this implementation will return a connection
timeout when the old implementation would be able to connect. This
has only been observed when connecting to localhost when using
the kqueue-based IOLoop (mac/bsd), but it may also occur on epoll (linux)
and, in principle, for non-localhost sites.
While the bug is unrelated to IPv6, disabling IPv6 will avoid the
most common manifestations of the bug, so this class disables IPv6 when
it detects an affected version of libcurl.
The underlying cause is a libcurl bug in versions up to and including
7.21.0 (it will be fixed in the not-yet-released 7.21.1)
http://sourceforge.net/tracker/?func=detail&aid=3017819&group_id=976&atid=100976
"""
_ASYNC_CLIENTS = weakref.WeakKeyDictionary()
def __new__(cls, io_loop=None, max_clients=10,
max_simultaneous_connections=None):
# There is one client per IOLoop since they share curl instances
io_loop = io_loop or ioloop.IOLoop.instance()
if io_loop in cls._ASYNC_CLIENTS:
return cls._ASYNC_CLIENTS[io_loop]
else:
instance = super(AsyncHTTPClient2, cls).__new__(cls)
instance.io_loop = io_loop
instance._multi = pycurl.CurlMulti()
instance._multi.setopt(pycurl.M_TIMERFUNCTION,
instance._set_timeout)
instance._multi.setopt(pycurl.M_SOCKETFUNCTION,
instance._handle_socket)
instance._curls = [_curl_create(max_simultaneous_connections)
for i in xrange(max_clients)]
instance._free_list = instance._curls[:]
instance._requests = collections.deque()
instance._fds = {}
instance._timeout = None
cls._ASYNC_CLIENTS[io_loop] = instance
return instance
def close(self):
"""Destroys this http client, freeing any file descriptors used.
Not needed in normal use, but may be helpful in unittests that
create and destroy http clients. No other methods may be called
on the AsyncHTTPClient after close().
"""
del AsyncHTTPClient2._ASYNC_CLIENTS[self.io_loop]
for curl in self._curls:
curl.close()
self._multi.close()
self._closed = True
def fetch(self, request, callback, **kwargs):
"""Executes an HTTPRequest, calling callback with an HTTPResponse.
If an error occurs during the fetch, the HTTPResponse given to the
callback has a non-None error attribute that contains the exception
encountered during the request. You can call response.reraise() to
throw the exception (if any) in the callback.
"""
if not isinstance(request, HTTPRequest):
request = HTTPRequest(url=request, **kwargs)
self._requests.append((request, callback))
self._process_queue()
self._set_timeout(0)
def _handle_socket(self, event, fd, multi, data):
"""Called by libcurl when it wants to change the file descriptors
it cares about.
"""
event_map = {
pycurl.POLL_NONE: ioloop.IOLoop.NONE,
pycurl.POLL_IN: ioloop.IOLoop.READ,
pycurl.POLL_OUT: ioloop.IOLoop.WRITE,
pycurl.POLL_INOUT: ioloop.IOLoop.READ | ioloop.IOLoop.WRITE
}
if event == pycurl.POLL_REMOVE:
self.io_loop.remove_handler(fd)
del self._fds[fd]
else:
ioloop_event = event_map[event]
if fd not in self._fds:
self._fds[fd] = ioloop_event
self.io_loop.add_handler(fd, self._handle_events,
ioloop_event)
else:
self._fds[fd] = ioloop_event
self.io_loop.update_handler(fd, ioloop_event)
def _set_timeout(self, msecs):
"""Called by libcurl to schedule a timeout."""
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
self._timeout = self.io_loop.add_timeout(
time.time() + msecs/1000.0, self._handle_timeout)
def _handle_events(self, fd, events):
"""Called by IOLoop when there is activity on one of our
file descriptors.
"""
action = 0
if events & ioloop.IOLoop.READ: action |= pycurl.CSELECT_IN
if events & ioloop.IOLoop.WRITE: action |= pycurl.CSELECT_OUT
while True:
try:
ret, num_handles = self._multi.socket_action(fd, action)
except Exception, e:
ret = e[0]
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
self._finish_pending_requests()
def _handle_timeout(self):
"""Called by IOLoop when the requested timeout has passed."""
self._timeout = None
while True:
try:
ret, num_handles = self._multi.socket_action(
pycurl.SOCKET_TIMEOUT, 0)
except Exception, e:
ret = e[0]
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
self._finish_pending_requests()
# In theory, we shouldn't have to do this because curl will
# call _set_timeout whenever the timeout changes. However,
# sometimes after _handle_timeout we will need to reschedule
# immediately even though nothing has changed from curl's
# perspective. This is because when socket_action is
# called with SOCKET_TIMEOUT, libcurl decides internally which
# timeouts need to be processed by using a monotonic clock
# (where available) while tornado uses python's time.time()
# to decide when timeouts have occurred. When those clocks
# disagree on elapsed time (as they will whenever there is an
# NTP adjustment), tornado might call _handle_timeout before
# libcurl is ready. After each timeout, resync the scheduled
# timeout with libcurl's current state.
new_timeout = self._multi.timeout()
if new_timeout != -1:
self._set_timeout(new_timeout)
def _finish_pending_requests(self):
"""Process any requests that were completed by the last
call to multi.socket_action.
"""
while True:
num_q, ok_list, err_list = self._multi.info_read()
for curl in ok_list:
self._finish(curl)
for curl, errnum, errmsg in err_list:
self._finish(curl, errnum, errmsg)
if num_q == 0:
break
self._process_queue()
def _process_queue(self):
while True:
started = 0
while self._free_list and self._requests:
started += 1
curl = self._free_list.pop()
(request, callback) = self._requests.popleft()
curl.info = {
"headers": httputil.HTTPHeaders(),
"buffer": cStringIO.StringIO(),
"request": request,
"callback": callback,
"start_time": time.time(),
}
# Disable IPv6 to mitigate the effects of this bug
# on curl versions <= 7.21.0
# http://sourceforge.net/tracker/?func=detail&aid=3017819&group_id=976&atid=100976
if pycurl.version_info()[2] <= 0x71500: # 7.21.0
curl.setopt(pycurl.IPRESOLVE, pycurl.IPRESOLVE_V4)
_curl_setup_request(curl, request, curl.info["buffer"],
curl.info["headers"])
self._multi.add_handle(curl)
if not started:
break
def _finish(self, curl, curl_error=None, curl_message=None):
info = curl.info
curl.info = None
self._multi.remove_handle(curl)
self._free_list.append(curl)
buffer = info["buffer"]
if curl_error:
error = CurlError(curl_error, curl_message)
code = error.code
effective_url = None
buffer.close()
buffer = None
else:
error = None
code = curl.getinfo(pycurl.HTTP_CODE)
effective_url = curl.getinfo(pycurl.EFFECTIVE_URL)
buffer.seek(0)
try:
info["callback"](HTTPResponse(
request=info["request"], code=code, headers=info["headers"],
buffer=buffer, effective_url=effective_url, error=error,
request_time=time.time() - info["start_time"]))
except (KeyboardInterrupt, SystemExit):
raise
except:
logging.error("Exception in callback %r", info["callback"],
exc_info=True)
class HTTPRequest(object):
def __init__(self, url, method="GET", headers=None, body=None,
auth_username=None, auth_password=None,
connect_timeout=20.0, request_timeout=20.0,
if_modified_since=None, follow_redirects=True,
max_redirects=5, user_agent=None, use_gzip=True,
network_interface=None, streaming_callback=None,
header_callback=None, prepare_curl_callback=None,
allow_nonstandard_methods=False):
if headers is None:
headers = httputil.HTTPHeaders()
if if_modified_since:
timestamp = calendar.timegm(if_modified_since.utctimetuple())
headers["If-Modified-Since"] = email.utils.formatdate(
timestamp, localtime=False, usegmt=True)
if "Pragma" not in headers:
headers["Pragma"] = ""
self.url = _utf8(url)
self.method = method
self.headers = headers
self.body = body
self.auth_username = _utf8(auth_username)
self.auth_password = _utf8(auth_password)
self.connect_timeout = connect_timeout
self.request_timeout = request_timeout
self.follow_redirects = follow_redirects
self.max_redirects = max_redirects
self.user_agent = user_agent
self.use_gzip = use_gzip
self.network_interface = network_interface
self.streaming_callback = streaming_callback
self.header_callback = header_callback
self.prepare_curl_callback = prepare_curl_callback
self.allow_nonstandard_methods = allow_nonstandard_methods
class HTTPResponse(object):
def __init__(self, request, code, headers={}, buffer=None, effective_url=None,
error=None, request_time=None):
self.request = request
self.code = code
self.headers = headers
self.buffer = buffer
self._body = None
if effective_url is None:
self.effective_url = request.url
else:
self.effective_url = effective_url
if error is None:
if self.code < 200 or self.code >= 300:
self.error = HTTPError(self.code, response=self)
else:
self.error = None
else:
self.error = error
self.request_time = request_time
def _get_body(self):
if self.buffer is None:
return None
elif self._body is None:
self._body = self.buffer.getvalue()
return self._body
body = property(_get_body)
def rethrow(self):
if self.error:
raise self.error
def __repr__(self):
args = ",".join("%s=%r" % i for i in self.__dict__.iteritems())
return "%s(%s)" % (self.__class__.__name__, args)
def __del__(self):
if self.buffer is not None:
self.buffer.close()
class HTTPError(Exception):
"""Exception thrown for an unsuccessful HTTP request.
Attributes:
code - HTTP error integer error code, e.g. 404. Error code 599 is
used when no HTTP response was received, e.g. for a timeout.
response - HTTPResponse object, if any.
Note that if follow_redirects is False, redirects become HTTPErrors,
and you can look at error.response.headers['Location'] to see the
destination of the redirect.
"""
def __init__(self, code, message=None, response=None):
self.code = code
message = message or httplib.responses.get(code, "Unknown")
self.response = response
Exception.__init__(self, "HTTP %d: %s" % (self.code, message))
class CurlError(HTTPError):
def __init__(self, errno, message):
HTTPError.__init__(self, 599, message)
self.errno = errno
def _curl_create(max_simultaneous_connections=None):
curl = pycurl.Curl()
if logging.getLogger().isEnabledFor(logging.DEBUG):
curl.setopt(pycurl.VERBOSE, 1)
curl.setopt(pycurl.DEBUGFUNCTION, _curl_debug)
curl.setopt(pycurl.MAXCONNECTS, max_simultaneous_connections or 5)
return curl
def _curl_setup_request(curl, request, buffer, headers):
curl.setopt(pycurl.URL, request.url)
# Request headers may be either a regular dict or HTTPHeaders object
if isinstance(request.headers, httputil.HTTPHeaders):
curl.setopt(pycurl.HTTPHEADER,
[_utf8("%s: %s" % i) for i in request.headers.get_all()])
else:
curl.setopt(pycurl.HTTPHEADER,
[_utf8("%s: %s" % i) for i in request.headers.iteritems()])
if request.header_callback:
curl.setopt(pycurl.HEADERFUNCTION, request.header_callback)
else:
curl.setopt(pycurl.HEADERFUNCTION,
lambda line: _curl_header_callback(headers, line))
if request.streaming_callback:
curl.setopt(pycurl.WRITEFUNCTION, request.streaming_callback)
else:
curl.setopt(pycurl.WRITEFUNCTION, buffer.write)
curl.setopt(pycurl.FOLLOWLOCATION, request.follow_redirects)
curl.setopt(pycurl.MAXREDIRS, request.max_redirects)
curl.setopt(pycurl.CONNECTTIMEOUT, int(request.connect_timeout))
curl.setopt(pycurl.TIMEOUT, int(request.request_timeout))
if request.user_agent:
curl.setopt(pycurl.USERAGENT, _utf8(request.user_agent))
else:
curl.setopt(pycurl.USERAGENT, "Mozilla/5.0 (compatible; pycurl)")
if request.network_interface:
curl.setopt(pycurl.INTERFACE, request.network_interface)
if request.use_gzip:
curl.setopt(pycurl.ENCODING, "gzip,deflate")
else:
curl.setopt(pycurl.ENCODING, "none")
# Set the request method through curl's retarded interface which makes
# up names for almost every single method
curl_options = {
"GET": pycurl.HTTPGET,
"POST": pycurl.POST,
"PUT": pycurl.UPLOAD,
"HEAD": pycurl.NOBODY,
}
custom_methods = set(["DELETE"])
for o in curl_options.values():
curl.setopt(o, False)
if request.method in curl_options:
curl.unsetopt(pycurl.CUSTOMREQUEST)
curl.setopt(curl_options[request.method], True)
elif request.allow_nonstandard_methods or request.method in custom_methods:
curl.setopt(pycurl.CUSTOMREQUEST, request.method)
else:
raise KeyError('unknown method ' + request.method)
# Handle curl's cryptic options for every individual HTTP method
if request.method in ("POST", "PUT"):
request_buffer = cStringIO.StringIO(escape.utf8(request.body))
curl.setopt(pycurl.READFUNCTION, request_buffer.read)
if request.method == "POST":
def ioctl(cmd):
if cmd == curl.IOCMD_RESTARTREAD:
request_buffer.seek(0)
curl.setopt(pycurl.IOCTLFUNCTION, ioctl)
curl.setopt(pycurl.POSTFIELDSIZE, len(request.body))
else:
curl.setopt(pycurl.INFILESIZE, len(request.body))
if request.auth_username and request.auth_password:
userpwd = "%s:%s" % (request.auth_username, request.auth_password)
curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_BASIC)
curl.setopt(pycurl.USERPWD, userpwd)
logging.info("%s %s (username: %r)", request.method, request.url,
request.auth_username)
else:
curl.unsetopt(pycurl.USERPWD)
logging.info("%s %s", request.method, request.url)
if request.prepare_curl_callback is not None:
request.prepare_curl_callback(curl)
def _curl_header_callback(headers, header_line):
if header_line.startswith("HTTP/"):
headers.clear()
return
if header_line == "\r\n":
return
headers.parse_line(header_line)
def _curl_debug(debug_type, debug_msg):
debug_types = ('I', '<', '>', '<', '>')
if debug_type == 0:
logging.debug('%s', debug_msg.strip())
elif debug_type in (1, 2):
for line in debug_msg.splitlines():
logging.debug('%s %s', debug_types[debug_type], line)
elif debug_type == 4:
logging.debug('%s %r', debug_types[debug_type], debug_msg)
def _utf8(value):
if value is None:
return value
if isinstance(value, unicode):
return value.encode("utf-8")
assert isinstance(value, str)
return value
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from nose.plugins.attrib import attr
from marvin.lib.base import (Account,
Router,
Network,
VirtualMachine,
ServiceOffering,
NetworkOffering)
from marvin.lib.utils import cleanup_resources
from marvin.lib.common import (get_domain,
get_zone,
get_template)
# Import Local Modules
from marvin.cloudstackTestCase import cloudstackTestCase
class Services:
"""Test Services for customer defects
"""
def __init__(self):
self.services = {
"account": {
"email": "test@test.com",
"firstname": "Test",
"lastname": "User",
"username": "test",
# Random characters are appended for unique
# username
"password": "password",
},
"service_offering": {
"name": "Tiny Instance",
"displaytext": "Tiny Instance",
"cpunumber": 1,
"cpuspeed": 100,
"memory": 128,
},
"disk_offering": {
"displaytext": "Small",
"name": "Small",
"disksize": 1
},
"virtual_machine": {
"displayname": "Test VM",
"username": "root",
"password": "password",
"ssh_port": 22,
"hypervisor": 'XenServer',
"privateport": 22,
"publicport": 22,
"protocol": 'TCP',
},
"static_nat": {
"startport": 22,
"endport": 22,
"protocol": "TCP"
},
"network_offering": {
"name": 'Network offering-RVR services',
"displaytext": 'Network off-RVR services',
"guestiptype": 'Isolated',
"supportedservices": 'Vpn,Dhcp,Dns,SourceNat,PortForwarding,Firewall,Lb,UserData,StaticNat',
"traffictype": 'GUEST',
"availability": 'Optional',
"serviceProviderList": {
"Vpn": 'VirtualRouter',
"Dhcp": 'VirtualRouter',
"Dns": 'VirtualRouter',
"SourceNat": 'VirtualRouter',
"PortForwarding": 'VirtualRouter',
"Firewall": 'VirtualRouter',
"Lb": 'VirtualRouter',
"UserData": 'VirtualRouter',
"StaticNat": 'VirtualRouter',
},
"serviceCapabilityList": {
"SourceNat": {
"SupportedSourceNatTypes": "peraccount",
"RedundantRouter": "true",
},
"lb": {
"SupportedLbIsolation": "dedicated"
},
},
},
"host": {
"username": "root",
"password": "password",
"publicport": 22,
},
"network": {
"name": "Test Network",
"displaytext": "Test Network",
},
"lbrule": {
"name": "SSH",
"alg": "roundrobin",
# Algorithm used for load balancing
"privateport": 22,
"publicport": 22,
"openfirewall": True,
},
"natrule": {
"privateport": 22,
"publicport": 22,
"protocol": "TCP"
},
"natrule_221": {
"privateport": 22,
"publicport": 221,
"protocol": "TCP"
},
"fw_rule": {
"startport": 1,
"endport": 6000,
"cidr": '55.55.0.0/11',
# Any network (For creating FW rule)
"protocol": 'TCP',
},
"ostype": 'CentOS 5.3 (64-bit)',
"sleep": 60,
}
class TestRvRUpgradeDowngrade(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestRvRUpgradeDowngrade, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["virtual_machine"]["template"] = cls.template.id
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls.network_offering = NetworkOffering.create(
cls.api_client,
cls.services["network_offering"],
conservemode=True
)
# Enable Network offering
cls.network_offering.update(cls.api_client, state='Enabled')
cls._cleanup = [
cls.service_offering,
cls.network_offering,
]
return
@classmethod
def tearDownClass(cls):
try:
# Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.account = Account.create(
self.apiclient,
self.services["account"],
admin=True,
domainid=self.domain.id
)
self.cleanup = []
self.cleanup.insert(0, self.account)
return
def tearDown(self):
try:
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags=["advanced", "advancedns", "ssh"], required_hardware="false")
def test_upgradeVR_to_redundantVR(self):
"""Test upgrade virtual router to redundant virtual router
"""
# Steps to validate
# 1. create a network with DefaultNetworkOfferingWithSourceNATservice
# (all VR based services)
# 2. deploy a VM in the above network and listRouters
# 3. create a network Offering that has redundant router enabled and
# all VR based services
# 4. updateNetwork created above to the offfering in 3.
# 5. listRouters in the network
# 6. delete account in which resources are created
# Validate the following
# 1. listNetworks should show the created network in allocated state
# 2. VM should be deployed and in Running state and there should be
# one Router running for this network
# 3. listNetworkOfferings should show craeted offering for RvR
# 4. listNetworks shows the network still successfully implemented
# 5. listRouters shows two routers Up and Running (MASTER and BACKUP)
network_offerings = NetworkOffering.list(
self.apiclient,
name='DefaultIsolatedNetworkOfferingWithSourceNatService',
listall=True
)
self.assertEqual(
isinstance(network_offerings, list),
True,
"List network offering should not return empty response"
)
network_off_vr = network_offerings[0]
# Creating network using the network offering created
self.debug("Creating network with network offering: %s" %
network_off_vr.id)
network = Network.create(
self.apiclient,
self.services["network"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=network_off_vr.id,
zoneid=self.zone.id
)
self.debug("Created network with ID: %s" % network.id)
networks = Network.list(
self.apiclient,
id=network.id,
listall=True
)
self.assertEqual(
isinstance(networks, list),
True,
"List networks should return a valid response for created network"
)
nw_response = networks[0]
self.debug("Network state: %s" % nw_response.state)
self.assertEqual(
nw_response.state,
"Allocated",
"The network should be in allocated state after creation"
)
self.debug("Deploying VM in account: %s" % self.account.name)
# Spawn an instance in that network
virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[str(network.id)]
)
self.debug("Deployed VM in the account: %s" %
self.account.name)
vms = VirtualMachine.list(
self.apiclient,
id=virtual_machine.id,
listall=True
)
self.assertEqual(
isinstance(vms, list),
True,
"List Vms should return a valid list"
)
vm = vms[0]
self.assertEqual(
vm.state,
"Running",
"Vm should be in running state after deployment"
)
self.debug("Listing routers for account: %s" %
self.account.name)
routers = Router.list(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return only one router"
)
self.assertEqual(
len(routers),
1,
"Length of the list router should be 1"
)
self.debug("Upgrading the network to RVR network offering..")
try:
network.update(
self.apiclient,
networkofferingid=self.network_offering.id
)
except Exception as e:
self.fail("Failed to upgrade the network from VR to RVR: %s" % e)
self.debug("Listing routers for account: %s" %
self.account.name)
routers = Router.list(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return two routers"
)
self.assertEqual(
len(routers),
2,
"Length of the list router should be 2 (MASTER & BACKUP)"
)
return
@attr(tags=["advanced", "advancedns", "ssh"], required_hardware="false")
def test_downgradeRvR_to_VR(self):
"""Test downgrade redundant virtual router to virtual router
"""
# Steps to validate
# 1. create a network Offering that has redundant router enabled and
# all VR based services
# 2. create a network with above offering
# 3. deploy a VM in the above network and listRouters
# 4. create a network Offering that has redundant router disabled and
# all VR based services
# 5. updateNetwork - downgrade - created above to the offfering in 4.
# 6. listRouters in the network
# 7. delete account in which resources are created
# Validate the following
# 1. listNetworkOfferings should show craeted offering for RvR
# 2. listNetworks should show the created network in allocated state
# 3. VM should be deployed and in Running state and there should be
# two routers (MASTER and BACKUP) for this network
# 4. listNetworkOfferings should show craeted offering for VR
# 5. listNetworks shows the network still successfully implemented
# 6. listRouters shows only one router for this network in Running
# Creating network using the network offering created
self.debug("Creating network with network offering: %s" %
self.network_offering.id)
network = Network.create(
self.apiclient,
self.services["network"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=self.network_offering.id,
zoneid=self.zone.id
)
self.debug("Created network with ID: %s" % network.id)
networks = Network.list(
self.apiclient,
id=network.id,
listall=True
)
self.assertEqual(
isinstance(networks, list),
True,
"List networks should return a valid response for created network"
)
nw_response = networks[0]
self.debug("Network state: %s" % nw_response.state)
self.assertEqual(
nw_response.state,
"Allocated",
"The network should be in allocated state after creation"
)
self.debug("Deploying VM in account: %s" % self.account.name)
# Spawn an instance in that network
virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[str(network.id)]
)
self.debug("Deployed VM in the account: %s" %
self.account.name)
vms = VirtualMachine.list(
self.apiclient,
id=virtual_machine.id,
listall=True
)
self.assertEqual(
isinstance(vms, list),
True,
"List Vms should return a valid list"
)
vm = vms[0]
self.assertEqual(
vm.state,
"Running",
"Vm should be in running state after deployment"
)
self.debug("Listing routers for account: %s" %
self.account.name)
routers = Router.list(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return two routers"
)
self.assertEqual(
len(routers),
2,
"Length of the list router should be 2 (MASTER & BACKUP)"
)
network_offerings = NetworkOffering.list(
self.apiclient,
name='DefaultIsolatedNetworkOfferingWithSourceNatService',
listall=True
)
self.assertEqual(
isinstance(network_offerings, list),
True,
"List network offering should not return empty response"
)
network_off_vr = network_offerings[0]
self.debug("Upgrading the network to RVR network offering..")
try:
network.update(
self.apiclient,
networkofferingid=network_off_vr.id
)
except Exception as e:
self.fail("Failed to upgrade the network from VR to RVR: %s" % e)
self.debug("Listing routers for account: %s" %
self.account.name)
routers = Router.list(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return only one router"
)
self.assertEqual(
len(routers),
1,
"Length of the list router should be 1"
)
return
| |
"""Translation helper functions."""
import functools
import gettext as gettext_module
import os
import re
import sys
import warnings
from asgiref.local import Local
from django.apps import apps
from django.conf import settings
from django.conf.locale import LANG_INFO
from django.core.exceptions import AppRegistryNotReady
from django.core.signals import setting_changed
from django.dispatch import receiver
from django.utils.regex_helper import _lazy_re_compile
from django.utils.safestring import SafeData, mark_safe
from . import to_language, to_locale
# Translations are cached in a dictionary for every language.
# The active translations are stored by threadid to make them thread local.
_translations = {}
_active = Local()
# The default translation is based on the settings file.
_default = None
# magic gettext number to separate context from message
CONTEXT_SEPARATOR = "\x04"
# Format of Accept-Language header values. From RFC 2616, section 14.4 and 3.9
# and RFC 3066, section 2.1
accept_language_re = _lazy_re_compile(r'''
([A-Za-z]{1,8}(?:-[A-Za-z0-9]{1,8})*|\*) # "en", "en-au", "x-y-z", "es-419", "*"
(?:\s*;\s*q=(0(?:\.\d{,3})?|1(?:\.0{,3})?))? # Optional "q=1.00", "q=0.8"
(?:\s*,\s*|$) # Multiple accepts per header.
''', re.VERBOSE)
language_code_re = _lazy_re_compile(
r'^[a-z]{1,8}(?:-[a-z0-9]{1,8})*(?:@[a-z0-9]{1,20})?$',
re.IGNORECASE
)
language_code_prefix_re = _lazy_re_compile(r'^/(\w+([@-]\w+)?)(/|$)')
@receiver(setting_changed)
def reset_cache(**kwargs):
"""
Reset global state when LANGUAGES setting has been changed, as some
languages should no longer be accepted.
"""
if kwargs['setting'] in ('LANGUAGES', 'LANGUAGE_CODE'):
check_for_language.cache_clear()
get_languages.cache_clear()
get_supported_language_variant.cache_clear()
class TranslationCatalog:
"""
Simulate a dict for DjangoTranslation._catalog so as multiple catalogs
with different plural equations are kept separate.
"""
def __init__(self, trans=None):
self._catalogs = [trans._catalog.copy()] if trans else [{}]
self._plurals = [trans.plural] if trans else [lambda n: int(n != 1)]
def __getitem__(self, key):
for cat in self._catalogs:
try:
return cat[key]
except KeyError:
pass
raise KeyError(key)
def __setitem__(self, key, value):
self._catalogs[0][key] = value
def __contains__(self, key):
return any(key in cat for cat in self._catalogs)
def items(self):
for cat in self._catalogs:
yield from cat.items()
def keys(self):
for cat in self._catalogs:
yield from cat.keys()
def update(self, trans):
# Merge if plural function is the same, else prepend.
for cat, plural in zip(self._catalogs, self._plurals):
if trans.plural.__code__ == plural.__code__:
cat.update(trans._catalog)
break
else:
self._catalogs.insert(0, trans._catalog.copy())
self._plurals.insert(0, trans.plural)
def get(self, key, default=None):
missing = object()
for cat in self._catalogs:
result = cat.get(key, missing)
if result is not missing:
return result
return default
def plural(self, msgid, num):
for cat, plural in zip(self._catalogs, self._plurals):
tmsg = cat.get((msgid, plural(num)))
if tmsg is not None:
return tmsg
raise KeyError
class DjangoTranslation(gettext_module.GNUTranslations):
"""
Set up the GNUTranslations context with regard to output charset.
This translation object will be constructed out of multiple GNUTranslations
objects by merging their catalogs. It will construct an object for the
requested language and add a fallback to the default language, if it's
different from the requested language.
"""
domain = 'django'
def __init__(self, language, domain=None, localedirs=None):
"""Create a GNUTranslations() using many locale directories"""
gettext_module.GNUTranslations.__init__(self)
if domain is not None:
self.domain = domain
self.__language = language
self.__to_language = to_language(language)
self.__locale = to_locale(language)
self._catalog = None
# If a language doesn't have a catalog, use the Germanic default for
# pluralization: anything except one is pluralized.
self.plural = lambda n: int(n != 1)
if self.domain == 'django':
if localedirs is not None:
# A module-level cache is used for caching 'django' translations
warnings.warn("localedirs is ignored when domain is 'django'.", RuntimeWarning)
localedirs = None
self._init_translation_catalog()
if localedirs:
for localedir in localedirs:
translation = self._new_gnu_trans(localedir)
self.merge(translation)
else:
self._add_installed_apps_translations()
self._add_local_translations()
if self.__language == settings.LANGUAGE_CODE and self.domain == 'django' and self._catalog is None:
# default lang should have at least one translation file available.
raise OSError('No translation files found for default language %s.' % settings.LANGUAGE_CODE)
self._add_fallback(localedirs)
if self._catalog is None:
# No catalogs found for this language, set an empty catalog.
self._catalog = TranslationCatalog()
def __repr__(self):
return "<DjangoTranslation lang:%s>" % self.__language
def _new_gnu_trans(self, localedir, use_null_fallback=True):
"""
Return a mergeable gettext.GNUTranslations instance.
A convenience wrapper. By default gettext uses 'fallback=False'.
Using param `use_null_fallback` to avoid confusion with any other
references to 'fallback'.
"""
return gettext_module.translation(
domain=self.domain,
localedir=localedir,
languages=[self.__locale],
fallback=use_null_fallback,
)
def _init_translation_catalog(self):
"""Create a base catalog using global django translations."""
settingsfile = sys.modules[settings.__module__].__file__
localedir = os.path.join(os.path.dirname(settingsfile), 'locale')
translation = self._new_gnu_trans(localedir)
self.merge(translation)
def _add_installed_apps_translations(self):
"""Merge translations from each installed app."""
try:
app_configs = reversed(list(apps.get_app_configs()))
except AppRegistryNotReady:
raise AppRegistryNotReady(
"The translation infrastructure cannot be initialized before the "
"apps registry is ready. Check that you don't make non-lazy "
"gettext calls at import time.")
for app_config in app_configs:
localedir = os.path.join(app_config.path, 'locale')
if os.path.exists(localedir):
translation = self._new_gnu_trans(localedir)
self.merge(translation)
def _add_local_translations(self):
"""Merge translations defined in LOCALE_PATHS."""
for localedir in reversed(settings.LOCALE_PATHS):
translation = self._new_gnu_trans(localedir)
self.merge(translation)
def _add_fallback(self, localedirs=None):
"""Set the GNUTranslations() fallback with the default language."""
# Don't set a fallback for the default language or any English variant
# (as it's empty, so it'll ALWAYS fall back to the default language)
if self.__language == settings.LANGUAGE_CODE or self.__language.startswith('en'):
return
if self.domain == 'django':
# Get from cache
default_translation = translation(settings.LANGUAGE_CODE)
else:
default_translation = DjangoTranslation(
settings.LANGUAGE_CODE, domain=self.domain, localedirs=localedirs
)
self.add_fallback(default_translation)
def merge(self, other):
"""Merge another translation into this catalog."""
if not getattr(other, '_catalog', None):
return # NullTranslations() has no _catalog
if self._catalog is None:
# Take plural and _info from first catalog found (generally Django's).
self.plural = other.plural
self._info = other._info.copy()
self._catalog = TranslationCatalog(other)
else:
self._catalog.update(other)
if other._fallback:
self.add_fallback(other._fallback)
def language(self):
"""Return the translation language."""
return self.__language
def to_language(self):
"""Return the translation language name."""
return self.__to_language
def ngettext(self, msgid1, msgid2, n):
try:
tmsg = self._catalog.plural(msgid1, n)
except KeyError:
if self._fallback:
return self._fallback.ngettext(msgid1, msgid2, n)
if n == 1:
tmsg = msgid1
else:
tmsg = msgid2
return tmsg
def translation(language):
"""
Return a translation object in the default 'django' domain.
"""
global _translations
if language not in _translations:
_translations[language] = DjangoTranslation(language)
return _translations[language]
def activate(language):
"""
Fetch the translation object for a given language and install it as the
current translation object for the current thread.
"""
if not language:
return
_active.value = translation(language)
def deactivate():
"""
Uninstall the active translation object so that further _() calls resolve
to the default translation object.
"""
if hasattr(_active, "value"):
del _active.value
def deactivate_all():
"""
Make the active translation object a NullTranslations() instance. This is
useful when we want delayed translations to appear as the original string
for some reason.
"""
_active.value = gettext_module.NullTranslations()
_active.value.to_language = lambda *args: None
def get_language():
"""Return the currently selected language."""
t = getattr(_active, "value", None)
if t is not None:
try:
return t.to_language()
except AttributeError:
pass
# If we don't have a real translation object, assume it's the default language.
return settings.LANGUAGE_CODE
def get_language_bidi():
"""
Return selected language's BiDi layout.
* False = left-to-right layout
* True = right-to-left layout
"""
lang = get_language()
if lang is None:
return False
else:
base_lang = get_language().split('-')[0]
return base_lang in settings.LANGUAGES_BIDI
def catalog():
"""
Return the current active catalog for further processing.
This can be used if you need to modify the catalog or want to access the
whole message catalog instead of just translating one string.
"""
global _default
t = getattr(_active, "value", None)
if t is not None:
return t
if _default is None:
_default = translation(settings.LANGUAGE_CODE)
return _default
def gettext(message):
"""
Translate the 'message' string. It uses the current thread to find the
translation object to use. If no current translation is activated, the
message will be run through the default translation object.
"""
global _default
eol_message = message.replace('\r\n', '\n').replace('\r', '\n')
if eol_message:
_default = _default or translation(settings.LANGUAGE_CODE)
translation_object = getattr(_active, "value", _default)
result = translation_object.gettext(eol_message)
else:
# Return an empty value of the corresponding type if an empty message
# is given, instead of metadata, which is the default gettext behavior.
result = type(message)('')
if isinstance(message, SafeData):
return mark_safe(result)
return result
def pgettext(context, message):
msg_with_ctxt = "%s%s%s" % (context, CONTEXT_SEPARATOR, message)
result = gettext(msg_with_ctxt)
if CONTEXT_SEPARATOR in result:
# Translation not found
result = message
elif isinstance(message, SafeData):
result = mark_safe(result)
return result
def gettext_noop(message):
"""
Mark strings for translation but don't translate them now. This can be
used to store strings in global variables that should stay in the base
language (because they might be used externally) and will be translated
later.
"""
return message
def do_ntranslate(singular, plural, number, translation_function):
global _default
t = getattr(_active, "value", None)
if t is not None:
return getattr(t, translation_function)(singular, plural, number)
if _default is None:
_default = translation(settings.LANGUAGE_CODE)
return getattr(_default, translation_function)(singular, plural, number)
def ngettext(singular, plural, number):
"""
Return a string of the translation of either the singular or plural,
based on the number.
"""
return do_ntranslate(singular, plural, number, 'ngettext')
def npgettext(context, singular, plural, number):
msgs_with_ctxt = ("%s%s%s" % (context, CONTEXT_SEPARATOR, singular),
"%s%s%s" % (context, CONTEXT_SEPARATOR, plural),
number)
result = ngettext(*msgs_with_ctxt)
if CONTEXT_SEPARATOR in result:
# Translation not found
result = ngettext(singular, plural, number)
return result
def all_locale_paths():
"""
Return a list of paths to user-provides languages files.
"""
globalpath = os.path.join(
os.path.dirname(sys.modules[settings.__module__].__file__), 'locale')
app_paths = []
for app_config in apps.get_app_configs():
locale_path = os.path.join(app_config.path, 'locale')
if os.path.exists(locale_path):
app_paths.append(locale_path)
return [globalpath, *settings.LOCALE_PATHS, *app_paths]
@functools.lru_cache(maxsize=1000)
def check_for_language(lang_code):
"""
Check whether there is a global language file for the given language
code. This is used to decide whether a user-provided language is
available.
lru_cache should have a maxsize to prevent from memory exhaustion attacks,
as the provided language codes are taken from the HTTP request. See also
<https://www.djangoproject.com/weblog/2007/oct/26/security-fix/>.
"""
# First, a quick check to make sure lang_code is well-formed (#21458)
if lang_code is None or not language_code_re.search(lang_code):
return False
return any(
gettext_module.find('django', path, [to_locale(lang_code)]) is not None
for path in all_locale_paths()
)
@functools.lru_cache()
def get_languages():
"""
Cache of settings.LANGUAGES in a dictionary for easy lookups by key.
"""
return dict(settings.LANGUAGES)
@functools.lru_cache(maxsize=1000)
def get_supported_language_variant(lang_code, strict=False):
"""
Return the language code that's listed in supported languages, possibly
selecting a more generic variant. Raise LookupError if nothing is found.
If `strict` is False (the default), look for a country-specific variant
when neither the language code nor its generic variant is found.
lru_cache should have a maxsize to prevent from memory exhaustion attacks,
as the provided language codes are taken from the HTTP request. See also
<https://www.djangoproject.com/weblog/2007/oct/26/security-fix/>.
"""
if lang_code:
# If 'zh-hant-tw' is not supported, try special fallback or subsequent
# language codes i.e. 'zh-hant' and 'zh'.
possible_lang_codes = [lang_code]
try:
possible_lang_codes.extend(LANG_INFO[lang_code]['fallback'])
except KeyError:
pass
i = None
while (i := lang_code.rfind('-', 0, i)) > -1:
possible_lang_codes.append(lang_code[:i])
generic_lang_code = possible_lang_codes[-1]
supported_lang_codes = get_languages()
for code in possible_lang_codes:
if code in supported_lang_codes and check_for_language(code):
return code
if not strict:
# if fr-fr is not supported, try fr-ca.
for supported_code in supported_lang_codes:
if supported_code.startswith(generic_lang_code + '-'):
return supported_code
raise LookupError(lang_code)
def get_language_from_path(path, strict=False):
"""
Return the language code if there's a valid language code found in `path`.
If `strict` is False (the default), look for a country-specific variant
when neither the language code nor its generic variant is found.
"""
regex_match = language_code_prefix_re.match(path)
if not regex_match:
return None
lang_code = regex_match[1]
try:
return get_supported_language_variant(lang_code, strict=strict)
except LookupError:
return None
def get_language_from_request(request, check_path=False):
"""
Analyze the request to find what language the user wants the system to
show. Only languages listed in settings.LANGUAGES are taken into account.
If the user requests a sublanguage where we have a main language, we send
out the main language.
If check_path is True, the URL path prefix will be checked for a language
code, otherwise this is skipped for backwards compatibility.
"""
if check_path:
lang_code = get_language_from_path(request.path_info)
if lang_code is not None:
return lang_code
lang_code = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME)
if lang_code is not None and lang_code in get_languages() and check_for_language(lang_code):
return lang_code
try:
return get_supported_language_variant(lang_code)
except LookupError:
pass
accept = request.META.get('HTTP_ACCEPT_LANGUAGE', '')
for accept_lang, unused in parse_accept_lang_header(accept):
if accept_lang == '*':
break
if not language_code_re.search(accept_lang):
continue
try:
return get_supported_language_variant(accept_lang)
except LookupError:
continue
try:
return get_supported_language_variant(settings.LANGUAGE_CODE)
except LookupError:
return settings.LANGUAGE_CODE
@functools.lru_cache(maxsize=1000)
def parse_accept_lang_header(lang_string):
"""
Parse the lang_string, which is the body of an HTTP Accept-Language
header, and return a tuple of (lang, q-value), ordered by 'q' values.
Return an empty tuple if there are any format errors in lang_string.
"""
result = []
pieces = accept_language_re.split(lang_string.lower())
if pieces[-1]:
return ()
for i in range(0, len(pieces) - 1, 3):
first, lang, priority = pieces[i:i + 3]
if first:
return ()
if priority:
priority = float(priority)
else:
priority = 1.0
result.append((lang, priority))
result.sort(key=lambda k: k[1], reverse=True)
return tuple(result)
| |
from anynet import http, tls
import pkg_resources
import datetime
import hashlib
import struct
import base64
import logging
logger = logging.getLogger(__name__)
CA = pkg_resources.resource_filename("nintendo", "files/cert/CACERT_NINTENDO_CA_G3.der")
CERT = pkg_resources.resource_filename("nintendo", "files/cert/WIIU_COMMON_1_CERT.der")
KEY = pkg_resources.resource_filename("nintendo", "files/cert/WIIU_COMMON_1_RSA_KEY.der")
def calc_password_hash(pid, password):
data = struct.pack("<I", pid) + b"\x02\x65\x43\x46" + password.encode("ascii")
return hashlib.sha256(data).hexdigest()
class NNASError(Exception):
def __init__(self, status_code, errors):
self.status_code = status_code
self.errors = errors
def __str__(self):
if self.errors:
return "Account request failed: %s" %self.errors
else:
return "Account request failed with status %i" %self.status_code
class OAuth20:
def __init__(self):
self.token = None
self.refresh_token = None
self.expires_in = None
@classmethod
def parse(cls, tree):
access_token = tree["access_token"]
inst = cls()
inst.token = access_token["token"].text
inst.refresh_token = access_token["refresh_token"].text
inst.expires_in = int(access_token["expires_in"].text)
return inst
class NexToken:
def __init__(self):
self.host = None
self.port = None
self.pid = None
self.password = None
self.token = None
@classmethod
def parse(cls, tree):
inst = cls()
inst.host = tree["host"].text
inst.port = int(tree["port"].text)
inst.pid = int(tree["pid"].text)
inst.password = tree["nex_password"].text
inst.token = tree["token"].text
return inst
class MiiImage:
def __init__(self):
self.id = None
self.type = None
self.url = None
self.cached_url = None
@classmethod
def parse(cls, image):
inst = cls()
inst.cached_url = image["cached_url"].text
inst.id = int(image["id"].text)
inst.url = image["url"].text
inst.type = image["type"].text
return inst
class Mii:
def __init__(self):
self.data = None
self.id = None
self.name = None
self.images = None
self.primary = None
self.pid = None
self.nnid = None
@classmethod
def parse(cls, mii):
inst = cls()
inst.data = base64.b64decode(mii["data"].text)
inst.id = int(mii["id"].text)
inst.name = mii["name"].text
inst.images = [MiiImage.parse(image) for image in mii["images"]]
inst.primary = mii["primary"].text == "Y"
inst.pid = int(mii["pid"].text)
inst.nnid = mii["user_id"].text
return inst
class Account:
def __init__(self):
self.domain = None
self.type = None
self.username = None
@classmethod
def parse(cls, account):
inst = cls()
inst.domain = account["domain"].text
inst.type = account["type"].text
inst.username = account["username"].text
return inst
class DeviceAttribute:
def __init__(self):
self.created_date = None
self.name = None
self.value = None
@classmethod
def parse(cls, attribute):
inst = cls()
inst.created_date = datetime.datetime.fromisoformat(attribute["created_date"].text)
inst.name = attribute["name"].text
inst.value = attribute["value"].text
return inst
class Email:
def __init__(self):
self.address = None
self.id = None
self.parent = None
self.primary = None
self.reachable = None
self.type = None
self.validated = None
self.validated_date = None
@classmethod
def parse(cls, email):
inst = cls()
inst.address = email["address"].text
inst.id = int(email["id"].text)
inst.parent = email["parent"].text == "Y"
inst.primary = email["primary"].text == "Y"
inst.reachable = email["reachable"].text == "Y"
inst.type = email["type"].text
inst.validated = email["validated"].text == "Y"
inst.validated_date = datetime.datetime.fromisoformat(email["validated_date"].text)
return inst
class ProfileMii:
def __init__(self):
self.id = None
self.data = None
self.status = None
self.hash = None
self.images = None
self.name = None
self.primary = None
@classmethod
def parse(cls, mii):
inst = cls()
inst.status = mii["status"].text
inst.data = base64.b64decode(mii["data"].text)
inst.id = int(mii["id"].text)
inst.hash = mii["mii_hash"].text
inst.images = [MiiImage.parse(image) for image in mii["mii_images"]]
inst.name = mii["name"].text
inst.primary = mii["primary"].text == "Y"
return inst
class Profile:
def __init__(self):
self.accounts = None
self.active_flag = None
self.birth_date = None
self.country = None
self.create_date = None
self.device_attributes = None
self.forgot_pw_email_sent = None
self.gender = None
self.language = None
self.updated = None
self.marketing_flag = None
self.off_device_flag = None
self.pid = None
self.email = None
self.mii = None
self.region = None
self.temporary_password_expiration = None
self.tz_name = None
self.nnid = None
self.utc_offset = None
@classmethod
def parse(cls, profile):
inst = cls()
inst.accounts = [Account.parse(account) for account in profile["accounts"]]
inst.active_flag = profile["active_flag"] == "Y"
inst.birth_date = datetime.date.fromisoformat(profile["birth_date"].text)
inst.country = profile["country"].text
inst.create_date = datetime.datetime.fromisoformat(profile["create_date"].text)
inst.device_attributes = [DeviceAttribute.parse(attrib) for attrib in profile["device_attributes"]]
if "forgot_pw_email_sent" in profile:
inst.forgot_pw_email_sent = datetime.datetime.fromisoformat(profile["forgot_pw_email_sent"].text)
inst.gender = profile["gender"].text
inst.language = profile["language"].text
inst.updated = datetime.datetime.fromisoformat(profile["updated"].text)
inst.marketing_flag = profile["marketing_flag"].text == "Y"
inst.off_device_flag = profile["off_device_flag"].text == "Y"
inst.pid = int(profile["pid"].text)
inst.email = Email.parse(profile["email"])
inst.mii = ProfileMii.parse(profile["mii"])
inst.region = int(profile["region"].text)
if "temporary_password_expiration" in profile:
inst.temporary_password_expiration = datetime.datetime.fromisoformat(profile["temporary_password_expiration"].text)
inst.tz_name = profile["tz_name"].text
inst.nnid = profile["user_id"].text
inst.utc_offset = int(profile["utc_offset"].text)
return inst
class NNASClient:
def __init__(self):
self.url = "account.nintendo.net"
ca = tls.TLSCertificate.load(CA, tls.TYPE_DER)
cert = tls.TLSCertificate.load(CERT, tls.TYPE_DER)
key = tls.TLSPrivateKey.load(KEY, tls.TYPE_DER)
self.context = tls.TLSContext()
self.context.set_authority(ca)
self.context.set_certificate(cert, key)
self.client_id = "a2efa818a34fa16b8afbc8a74eba3eda"
self.client_secret = "c91cdb5658bd4954ade78533a339cf9a"
self.platform_id = 1
self.device_type = 2
self.device_id = None
self.serial_number = None
self.system_version = 0x260
self.device_cert = None
self.region = 4
self.country = "NL"
self.language = "en"
self.fpd_version = 0
self.environment = "L1"
self.title_id = None
self.title_version = None
def set_context(self, context):
self.context = context
def set_url(self, url): self.url = url
def set_client_id(self, client_id): self.client_id = client_id
def set_client_secret(self, client_secret): self.client_secret = client_secret
def set_platform_id(self, platform_id): self.platform_id = platform_id
def set_device_type(self, device_type): self.device_type = device_type
def set_device(self, device_id, serial_number, system_version, cert=None):
self.device_id = device_id
self.serial_number = serial_number
self.system_version = system_version
self.device_cert = cert
def set_locale(self, region, country, language):
self.region = region
self.country = country
self.language = language
def set_fpd_version(self, version): self.fpd_version = version
def set_environment(self, environment): self.environment = environment
def set_title(self, title_id, title_version):
self.title_id = title_id
self.title_version = title_version
def prepare(self, req, auth=None, cert=None):
req.headers["Host"] = self.url
req.headers["X-Nintendo-Platform-ID"] = self.platform_id
req.headers["X-Nintendo-Device-Type"] = self.device_type
if self.device_id is not None:
req.headers["X-Nintendo-Device-ID"] = self.device_id
if self.serial_number is not None:
req.headers["X-Nintendo-Serial-Number"] = self.serial_number
req.headers["X-Nintendo-System-Version"] = "%04X" %self.system_version
req.headers["X-Nintendo-Region"] = self.region
req.headers["X-Nintendo-Country"] = self.country
req.headers["Accept-Language"] = self.language
req.headers["X-Nintendo-Client-ID"] = self.client_id
req.headers["X-Nintendo-Client-Secret"] = self.client_secret
req.headers["Accept"] = "*/*"
req.headers["X-Nintendo-FPD-Version"] = "%04X" %self.fpd_version
req.headers["X-Nintendo-Environment"] = self.environment
if self.title_id is not None:
req.headers["X-Nintendo-Title-ID"] = "%016X" %self.title_id
req.headers["X-Nintendo-Unique-ID"] = "%05X" %((self.title_id >> 8) & 0xFFFFF)
if self.title_version is not None:
req.headers["X-Nintendo-Application-Version"] = "%04X" %self.title_version
if cert is not None:
req.headers["X-Nintendo-Device-Cert"] = cert
if auth is not None:
req.headers["Authorization"] = "Bearer " + auth
async def request(self, req):
response = await http.request(self.url, req, self.context)
if response.error():
logger.error("Account request returned status code %i\n%s", response.status_code, response.text)
raise NNASError(response.status_code, response.xml)
return response.xml
async def login(self, username, password, password_type=None):
req = http.HTTPRequest.post("/v1/api/oauth20/access_token/generate")
self.prepare(req, cert=self.device_cert)
req.form["grant_type"] = "password"
req.form["user_id"] = username
req.form["password"] = password
if password_type is not None:
req.form["password_type"] = password_type
response = await self.request(req)
return OAuth20.parse(response)
async def get_nex_token(self, access_token, game_server_id):
req = http.HTTPRequest.get("/v1/api/provider/nex_token/@me")
req.params["game_server_id"] = "%08X" %game_server_id
self.prepare(req, access_token)
response = await self.request(req)
return NexToken.parse(response)
async def get_service_token(self, access_token, client_id):
req = http.HTTPRequest.get("/v1/api/provider/service_token/@me")
req.params["client_id"] = client_id
self.prepare(req, access_token)
response = await self.request(req)
return response["token"].text
async def get_profile(self, access_token):
req = http.HTTPRequest.get("/v1/api/people/@me/profile")
self.prepare(req, access_token)
response = await self.request(req)
return Profile.parse(response)
#The following functions can be used without logging in
async def get_miis(self, pids):
req = http.HTTPRequest.get("/v1/api/miis")
req.params["pids"] = ",".join([str(pid) for pid in pids])
self.prepare(req)
response = await self.request(req)
return [Mii.parse(mii) for mii in response]
async def get_pids(self, nnids):
req = http.HTTPRequest.get("/v1/api/admin/mapped_ids")
req.params["input_type"] = "user_id"
req.params["output_type"] = "pid"
req.params["input"] = ",".join(nnids)
self.prepare(req)
response = await self.request(req)
return {id["in_id"].text: int(id["out_id"].text) for id in response if id["out_id"].text}
async def get_nnids(self, pids):
req = http.HTTPRequest.get("/v1/api/admin/mapped_ids")
req.params["input_type"] = "pid"
req.params["output_type"] = "user_id"
req.params["input"] = ",".join([str(pid) for pid in pids])
self.prepare(req)
response = await self.request(req)
return {int(id["in_id"].text): id["out_id"].text for id in response if id["out_id"].text}
async def get_mii(self, pid): return (await self.get_miis([pid]))[0]
async def get_pid(self, nnid): return (await self.get_pids([nnid]))[nnid]
async def get_nnid(self, pid): return (await self.get_nnids([pid]))[pid]
| |
###ExonArray
#Copyright 2005-2008 J. David Gladstone Institutes, San Francisco California
#Author Nathan Salomonis - nsalomonis@gmail.com
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
import os.path
import unique
import time
import export
def filepath(filename):
fn = unique.filepath(filename)
return fn
def read_directory(sub_dir):
dir_list = unique.read_directory(sub_dir)
#add in code to prevent folder names from being included
dir_list2 = []
for file in dir_list:
if '.txt' in file: dir_list2.append(file)
return dir_list2
################# Begin Analysis
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
def importAnnotations(filename):
firstLine = True
fn = filepath(filename)
rows = 0
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line);
tab_delimited_data = string.split(data,'\t')
if rows > 10: sys.exit()
print tab_delimited_data#;sys.exit()
rows+=1
def correlateMethylationData(filename,betaLow=0.4,betaHigh=0.6,counts=-1):
### Takes a filtered pre-processed beta-value file as input
firstLine = True
rows=0; filtered=0
for line in open(filename,'rU').xreadlines():
data = cleanUpLine(line);
t = string.split(data,'\t')
if firstLine:
header = t
if len(t)>5 and 'Illumina_name' in header:
delimiter = -50
annot_export_object.write(string.join([t[0]]+t[delimiter:],'\t')+'\n')
else:
delimiter = len(header)
headers = t[1:delimiter]
firstLine = False
export_object.write(string.join([t[0]]+headers,'\t')+'\n')
else:
probeID = t[0]
#try: beta_values = map(float,t[1:50])
beta_values = map(lambda x: conFloat(x,t[1:delimiter]),t[1:delimiter])
if '' in beta_values:
print beta_values;sys.exit()
high = sum(betaHighCount(x,betaHigh) for x in beta_values)
low = sum(betaLowCount(x,betaLow) for x in beta_values)
def importMethylationData(filename,betaLow=0.4,betaHigh=0.6,counts=-1, filter=None):
annot_file = filepath('AltDatabase/ucsc/Hs/Illumina_methylation_genes.txt')
export_object = open(filename[:-4]+'-filtered.txt','w')
print filename[:-4]+'-filtered.txt', counts
firstLine = True
rows=0; filtered=0
for line in open(filename,'rU').xreadlines():
data = cleanUpLine(line);
t = string.split(data,'\t')
#export_object.write(string.join(t,'\t')+'\n')
#"""
if firstLine:
header = t
if len(t)>5 and 'Illumina_name' in header:
delimiter = -50
annot_export_object = open(annot_file,'w')
annot_export_object.write(string.join([t[0]]+t[delimiter:],'\t')+'\n')
else:
delimiter = len(header)
headers = t[1:delimiter]
firstLine = False
export_object.write(string.join([t[0]]+headers,'\t')+'\n')
else:
probeID = t[0]
#try: beta_values = map(float,t[1:50])
beta_values = map(lambda x: conFloat(x,t[1:delimiter]),t[1:delimiter])
if '' in beta_values:
print beta_values;sys.exit()
high = sum(betaHighCount(x,betaHigh) for x in beta_values)
low = sum(betaLowCount(x,betaLow) for x in beta_values)
#if rows<50: print high, low, max(beta_values), min(beta_values)
#else:sys.exit()
#export_object.write(string.join(t[:delimiter])+'\n')
if high>=counts and low>=counts:
#if (high-low) > 0.2:
#if rows<50: print 1
if filter!=None:
if probeID in filter: proceed=True; probeID = str(filter[probeID])+':'+probeID
else: proceed = False
else: proceed = True
if proceed:
filtered+=1
export_object.write(string.join([probeID]+map(str,beta_values),'\t')+'\n')
if 'Illumina_name' in header:
annot_export_object.write(string.join([t[0]]+t[delimiter:],'\t')+'\n')
rows+=1
#"""
export_object.close()
if delimiter == '-50':
annot_export_object.close()
print filtered, rows
def conFloat(x,betaValues):
try: x = float(x)
except Exception: x=None
if x== None or x == 0:
floats=[]
for i in betaValues:
if i=='': pass
elif float(i)==0: pass
else: floats.append(float(i))
try: return min(floats)
except Exception: print betaValues;sys.exit()
else:
return x
def betaHighCount(x,betaHigh):
if x>betaHigh:
return 1
else: return 0
def betaLowCount(x,betaLow):
if x<betaLow:
return 1
else: return 0
def getIDsFromFile(filename):
filterIDs = {}
fn = filepath(filename)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line);
t = string.split(data,'\t')
filterIDs[string.lower(t[0])]=[]
return filterIDs
def getRegionType(filename,featureType=None,chromosome=None,filterIDs=None):
if filterIDs !=None:
filterIDs = getIDsFromFile(filterIDs)
firstLine = True
fn = filepath(filename)
count=0; filter_db={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line);
t = string.split(data,',')
if firstLine:
if len(t[2]) >0:
header = t
firstLine=False
chr_ind = header.index('CHR')
pos_ind = header.index('Coordinate_36')
tss_ind = header.index('UCSC_RefGene_Group')
gene_name = header.index('UCSC_RefGene_Name')
else:
probeID = t[0]
count+=1
try: gene_names = string.split(t[gene_name],';')
except Exception: gene_names = []
try:
if chromosome != None:
if t[chr_ind] == chromosome:
if filterIDs !=None:
for gene in gene_names:
if string.lower(gene) in filterIDs:
filter_db[probeID]=t[pos_ind]
else:
filter_db[probeID]=t[pos_ind]
if 'promoter' in string.lower(featureType):
if 'TSS' in t[tss_ind]:
if filterIDs !=None:
for gene in gene_names:
if string.lower(gene) in filterIDs:
filter_db[probeID]=t[pos_ind]
else:
filter_db[probeID]=t[pos_ind]
if 'mir' in string.lower(featureType) or 'micro' in string.lower(featureType):
if 'mir' in string.lower(t[gene_name]) or 'let' in string.lower(t[gene_name]):
if filterIDs !=None:
for gene in gene_names:
if string.lower(gene) in filterIDs:
filter_db[probeID]=t[pos_ind]
else:
filter_db[probeID]=t[pos_ind]
if filterIDs !=None:
for gene in gene_names:
if string.lower(gene) in filterIDs:
filter_db[probeID]=t[pos_ind]
except Exception:
pass
print len(filter_db), 'probes remaining'
return filter_db
if __name__ == '__main__':
import getopt
featureType = 'promoter'
featureType = 'all'
Species = 'Hs'
filter_db=None
chromosome=None
numRegulated = -1
analysis = 'filter'
filterIDs = None
################ Comand-line arguments ################
if len(sys.argv[1:])<=1: ### Indicates that there are insufficient number of command-line arguments
print "Warning! Please designate a methylation beta-value file as input in the command-line"
print "Example: python methylation.py --i /Users/me/sample1.txt --g /Users/me/human.gtf"
sys.exit()
else:
analysisType = []
useMultiProcessing=False
options, remainder = getopt.getopt(sys.argv[1:],'', ['i=','a=','t=','r=','c=','f='])
for opt, arg in options:
if opt == '--i': input_file=arg
elif opt == '--a': analysis=arg
elif opt == '--t': featureType=arg
elif opt == '--r': numRegulated=int(arg)
elif opt == '--c': chromosome=arg
elif opt == '--f': filterIDs=arg
else:
print "Warning! Command-line argument: %s not recognized. Exiting..." % opt; sys.exit()
if analysis == 'filter':
filename = 'AltDatabase/ucsc/Hs/wgEncodeHaibMethyl450CpgIslandDetails.txt'
#input_file = '/Volumes/SEQ-DATA/PCBC/Methylation/Methylome70allBValues_aronowAnnotations.txt'
if featureType!= 'all' or chromosome != None or filterIDs!=None:
filter_db = getRegionType(filename,featureType=featureType,chromosome=chromosome,filterIDs=filterIDs)
importMethylationData(input_file,filter = filter_db,counts=numRegulated); sys.exit()
#importAnnotations(methylation_file);sys.exit()
if analysis == 'correlate':
### Performs all pairwise correlations between probes corresponding to a gene
correlateMethylationData(input_file)
| |
from django.utils.translation import ugettext_lazy as _lazy
# Maximum length for values in the url field of feedback Responses. We don't
# want to use this to validate urls, but we do want to use this to truncate.
URL_LENGTH = 1000
# Maximum length for values in the user-agent field of feedback Responses. We
# don't want to use this to validate user agents, but we do want to use this to
# truncate. With user agents, we risk losing important information at the end
# of the value and also messing up the syntax, but I claim it won't have any
# impact on how we use the data.
USER_AGENT_LENGTH = 1000
# List of (value, name, _lazy(name)) tuples for countries Firefox OS has
# been released in.
# Values are ISO 3166 country codes.
# Names are the country names.
# _lazy(name) causes extract to pick up the name so it can be localized.
FIREFOX_OS_COUNTRIES = [
(u'CO', 'Colombia', _lazy('Colombia')),
(u'VE', 'Venezuela', _lazy('Venezuela')),
(u'PL', 'Poland', _lazy('Poland')),
(u'ES', 'Spain', _lazy('Spain')),
(u'BR', 'Brazil', _lazy('Brazil')),
(u'DE', 'Germany', _lazy('Germany')),
(u'GR', 'Greece', _lazy('Greece')),
(u'HU', 'Hungary', _lazy('Hungary')),
(u'CS', 'Serbia', _lazy('Serbia')),
(u'CS', 'Montenegro', _lazy('Montenegro')),
(u'MX', 'Mexico', _lazy('Mexico')),
(u'PE', 'Peru', _lazy('Peru')),
(u'UY', 'Uruguay', _lazy('Uruguay')),
]
CODE_TO_COUNTRY = dict([(mem[0], {'English': mem[1], 'native': mem[2]})
for mem in FIREFOX_OS_COUNTRIES])
# List of Firefox OS devices that have been released.
FIREFOX_OS_DEVICES = [
u'ZTE Open',
u'Alcatel OneTouch Fire',
u'Geeksphone',
u'LG Fireweb',
]
ANALYSIS_STOPWORDS = set([
'a',
'about',
'above',
'across',
'after',
'afterwards',
'again',
'against',
'all',
'almost',
'alone',
'along',
'already',
'also',
'although',
'always',
'am',
'among',
'amongst',
'amoungst',
'amount',
'an',
'and',
'another',
'any',
'anyhow',
'anyone',
'anything',
'anyway',
'anywhere',
'are',
'around',
'as',
'at',
'b',
'back',
'be',
'became',
'because',
'become',
'becomes',
'becoming',
'been',
'before',
'beforehand',
'behind',
'being',
'below',
'beside',
'besides',
'between',
'beyond',
'bill',
'both',
'bottom',
'but',
'by',
'c',
'call',
'can',
'cannot',
'cant',
'co',
'computer',
'con',
'could',
'couldnt',
'cry',
'd',
'de',
'describe',
'detail',
'do',
'does',
'don\'t',
'done',
'dont',
'down',
'due',
'during',
'e',
'each',
'eg',
'eight',
'either',
'eleven',
'else',
'elsewhere',
'empty',
'enough',
'etc',
'even',
'ever',
'every',
'everyone',
'everything',
'everywhere',
'except',
'f',
'few',
'fifteen',
'fify',
'fill',
'find',
'fire',
'firefox',
'first',
'five',
'for',
'former',
'formerly',
'forty',
'found',
'four',
'from',
'front',
'full',
'further',
'g',
'get',
'give',
'go',
'h',
'had',
'has',
'hasnt',
'have',
'he',
'hence',
'her',
'here',
'hereafter',
'hereby',
'herein',
'hereupon',
'hers',
'herse',
'him',
'himse',
'his',
'how',
'however',
'hundred',
'i',
'ie',
'if',
'in',
'inc',
'indeed',
'interest',
'into',
'is',
'it',
'it\'s',
'its',
'itse',
'j',
'k',
'keep',
'l',
'last',
'latter',
'latterly',
'least',
'less',
'ltd',
'm',
'made',
'many',
'may',
'me',
'meanwhile',
'might',
'mill',
'mine',
'more',
'moreover',
'most',
'mostly',
'move',
'much',
'must',
'my',
'myse',
'n',
'name',
'namely',
'neither',
'never',
'nevertheless',
'next',
'nine',
'no',
'nobody',
'none',
'noone',
'nor',
'not',
'nothing',
'now',
'nowhere',
'o',
'of',
'off',
'often',
'on',
'once',
'one',
'only',
'onto',
'or',
'other',
'others',
'otherwise',
'our',
'ours',
'ourselves',
'out',
'over',
'own',
'p',
'part',
'per',
'perhaps',
'please',
'put',
'q',
'r',
'rather',
're',
's',
'same',
'see',
'seem',
'seemed',
'seeming',
'seems',
'serious',
'several',
'she',
'should',
'show',
'side',
'since',
'sincere',
'six',
'sixty',
'so',
'some',
'somehow',
'someone',
'something',
'sometime',
'sometimes',
'somewhere',
'still',
'such',
'system',
't',
'take',
'ten',
'than',
'that',
'the',
'their',
'them',
'themselves',
'then',
'thence',
'there',
'thereafter',
'thereby',
'therefore',
'therein',
'thereupon',
'these',
'they',
'thick',
'thin',
'third',
'this',
'those',
'though',
'three',
'through',
'throughout',
'thru',
'thus',
'to',
'together',
'too',
'top',
'toward',
'towards',
'twelve',
'twenty',
'two',
'u',
'un',
'under',
'until',
'up',
'upon',
'us',
'v',
'very',
'via',
'w',
'was',
'we',
'well',
'were',
'what',
'whatever',
'when',
'whence',
'whenever',
'where',
'whereafter',
'whereas',
'whereby',
'wherein',
'whereupon',
'wherever',
'whether',
'which',
'while',
'whither',
'who',
'whoever',
'whole',
'whom',
'whose',
'why',
'will',
'with',
'within',
'without',
'would',
'x',
'y',
'yet',
'you',
'you\'re',
'your',
'yours',
'yourself',
'yourselves',
'z'
])
# This defines the number of characters the description can have. We
# do this in code rather than in the db since it makes it easier to
# tweak the value.
TRUNCATE_LENGTH = 10000
| |
import json
import os
import shutil
import tempfile
import time
import zipfile
from datetime import timedelta
from django import forms
from django.conf import settings
from django.core.files.storage import default_storage
import flufl.lock
import lxml
import mock
import pytest
from defusedxml.common import EntitiesForbidden, NotSupportedError
from olympia import amo
from olympia.amo.tests import TestCase, create_switch
from olympia.applications.models import AppVersion
from olympia.files import utils
from olympia.files.tests.test_helpers import get_file
pytestmark = pytest.mark.django_db
def _touch(fname):
open(fname, 'a').close()
os.utime(fname, None)
class TestExtractor(TestCase):
def test_no_manifest(self):
fake_zip = utils.make_xpi({'dummy': 'dummy'})
with self.assertRaises(forms.ValidationError) as exc:
utils.Extractor.parse(fake_zip)
assert exc.exception.message == (
'No install.rdf or manifest.json found')
@mock.patch('olympia.files.utils.ManifestJSONExtractor')
@mock.patch('olympia.files.utils.RDFExtractor')
def test_parse_install_rdf(self, rdf_extractor, manifest_json_extractor):
fake_zip = utils.make_xpi({'install.rdf': ''})
utils.Extractor.parse(fake_zip)
assert rdf_extractor.called
assert not manifest_json_extractor.called
@mock.patch('olympia.files.utils.ManifestJSONExtractor')
@mock.patch('olympia.files.utils.RDFExtractor')
def test_ignore_package_json(self, rdf_extractor, manifest_json_extractor):
# Previously we preferred `package.json` to `install.rdf` which
# we don't anymore since
# https://github.com/mozilla/addons-server/issues/2460
fake_zip = utils.make_xpi({'install.rdf': '', 'package.json': ''})
utils.Extractor.parse(fake_zip)
assert rdf_extractor.called
assert not manifest_json_extractor.called
@mock.patch('olympia.files.utils.ManifestJSONExtractor')
@mock.patch('olympia.files.utils.RDFExtractor')
def test_parse_manifest_json(self, rdf_extractor, manifest_json_extractor):
fake_zip = utils.make_xpi({'manifest.json': ''})
utils.Extractor.parse(fake_zip)
assert not rdf_extractor.called
assert manifest_json_extractor.called
@mock.patch('olympia.files.utils.ManifestJSONExtractor')
@mock.patch('olympia.files.utils.RDFExtractor')
def test_prefers_manifest_to_install_rdf(self, rdf_extractor,
manifest_json_extractor):
fake_zip = utils.make_xpi({'install.rdf': '', 'manifest.json': ''})
utils.Extractor.parse(fake_zip)
assert not rdf_extractor.called
assert manifest_json_extractor.called
class TestManifestJSONExtractor(TestCase):
def parse(self, base_data):
return utils.ManifestJSONExtractor(
'/fake_path', json.dumps(base_data)).parse()
def create_appversion(self, name, version):
return AppVersion.objects.create(application=amo.APPS[name].id,
version=version)
def create_webext_default_versions(self):
self.create_appversion('firefox', '36.0') # Incompatible with webexts.
self.create_appversion('firefox', amo.DEFAULT_WEBEXT_MIN_VERSION)
self.create_appversion('firefox', amo.DEFAULT_WEBEXT_MAX_VERSION)
self.create_appversion('firefox', amo.DEFAULT_WEBEXT_MIN_VERSION_NO_ID)
def test_instanciate_without_data(self):
"""Without data, we load the data from the file path."""
data = {'id': 'some-id'}
fake_zip = utils.make_xpi({'manifest.json': json.dumps(data)})
extractor = utils.ManifestJSONExtractor(zipfile.ZipFile(fake_zip))
assert extractor.data == data
def test_guid(self):
"""Use applications>gecko>id for the guid."""
assert self.parse(
{'applications': {
'gecko': {
'id': 'some-id'}}})['guid'] == 'some-id'
def test_name_for_guid_if_no_id(self):
"""Don't use the name for the guid if there is no id."""
assert self.parse({'name': 'addon-name'})['guid'] is None
def test_type(self):
"""manifest.json addons are always ADDON_EXTENSION."""
assert self.parse({})['type'] == amo.ADDON_EXTENSION
def test_is_restart_required(self):
"""manifest.json addons never requires restart."""
assert self.parse({})['is_restart_required'] is False
def test_name(self):
"""Use name for the name."""
assert self.parse({'name': 'addon-name'})['name'] == 'addon-name'
def test_version(self):
"""Use version for the version."""
assert self.parse({'version': '23.0.1'})['version'] == '23.0.1'
def test_homepage(self):
"""Use homepage_url for the homepage."""
assert (
self.parse({'homepage_url': 'http://my-addon.org'})['homepage'] ==
'http://my-addon.org')
def test_summary(self):
"""Use description for the summary."""
assert (
self.parse({'description': 'An addon.'})['summary'] == 'An addon.')
def test_invalid_strict_min_version(self):
data = {
'applications': {
'gecko': {
'strict_min_version': 'A',
'id': '@invalid_strict_min_version'
}
}
}
with pytest.raises(forms.ValidationError) as exc:
self.parse(data)
assert (
exc.value.message ==
'Lowest supported "strict_min_version" is 42.0.')
def test_strict_min_version_needs_to_be_higher_then_42_if_specified(self):
"""strict_min_version needs to be higher than 42.0 if specified."""
data = {
'applications': {
'gecko': {
'strict_min_version': '36.0',
'id': '@too_old_strict_min_version'
}
}
}
with pytest.raises(forms.ValidationError) as exc:
self.parse(data)
assert (
exc.value.message ==
'Lowest supported "strict_min_version" is 42.0.')
def test_apps_use_provided_versions(self):
"""Use the min and max versions if provided."""
firefox_min_version = self.create_appversion('firefox', '47.0')
firefox_max_version = self.create_appversion('firefox', '47.*')
self.create_webext_default_versions()
data = {
'applications': {
'gecko': {
'strict_min_version': '>=47.0',
'strict_max_version': '=47.*',
'id': '@random'
}
}
}
apps = self.parse(data)['apps']
assert len(apps) == 1
app = apps[0]
assert app.appdata == amo.FIREFOX
assert app.min == firefox_min_version
assert app.max == firefox_max_version
def test_apps_use_default_versions_if_none_provided(self):
"""Use the default min and max versions if none provided."""
self.create_webext_default_versions()
data = {'applications': {'gecko': {'id': 'some-id'}}}
apps = self.parse(data)['apps']
assert len(apps) == 1 # Only Firefox for now.
app = apps[0]
assert app.appdata == amo.FIREFOX
assert app.min.version == amo.DEFAULT_WEBEXT_MIN_VERSION
assert app.max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
def test_is_webextension(self):
assert self.parse({})['is_webextension']
def test_disallow_static_theme(self):
manifest = utils.ManifestJSONExtractor(
'/fake_path', '{"theme": {}}').parse()
with pytest.raises(forms.ValidationError) as exc:
utils.check_xpi_info(manifest)
assert (
exc.value.message ==
'WebExtension theme uploads are currently not supported.')
def test_allow_static_theme_waffle(self):
create_switch('allow-static-theme-uploads')
manifest = utils.ManifestJSONExtractor(
'/fake_path', '{"theme": {}}').parse()
utils.check_xpi_info(manifest)
assert self.parse({'theme': {}})['type'] == amo.ADDON_STATICTHEME
def test_is_e10s_compatible(self):
data = self.parse({})
assert data['e10s_compatibility'] == amo.E10S_COMPATIBLE_WEBEXTENSION
def test_langpack(self):
data = self.parse({'langpack_id': 'foo'})
assert data['type'] == amo.ADDON_LPAPP
assert data['strict_compatibility'] is True
def test_extensions_dont_have_strict_compatibility(self):
assert self.parse({})['strict_compatibility'] is False
def test_moz_signed_extension_no_strict_compat(self):
addon = amo.tests.addon_factory()
user = amo.tests.user_factory(email='foo@mozilla.com')
file_obj = addon.current_version.all_files[0]
file_obj.update(is_mozilla_signed_extension=True)
fixture = (
'src/olympia/files/fixtures/files/'
'legacy-addon-already-signed-0.1.0.xpi')
with amo.tests.copy_file(fixture, file_obj.file_path):
parsed = utils.parse_xpi(file_obj.file_path, user=user)
assert parsed['is_mozilla_signed_extension']
assert not parsed['strict_compatibility']
def test_moz_signed_extension_reuse_strict_compat(self):
addon = amo.tests.addon_factory()
user = amo.tests.user_factory(email='foo@mozilla.com')
file_obj = addon.current_version.all_files[0]
file_obj.update(is_mozilla_signed_extension=True)
fixture = (
'src/olympia/files/fixtures/files/'
'legacy-addon-already-signed-strict-compat-0.1.0.xpi')
with amo.tests.copy_file(fixture, file_obj.file_path):
parsed = utils.parse_xpi(file_obj.file_path, user=user)
assert parsed['is_mozilla_signed_extension']
# We set `strictCompatibility` in install.rdf
assert parsed['strict_compatibility']
@mock.patch('olympia.addons.models.resolve_i18n_message')
def test_mozilla_trademark_disallowed(self, resolve_message):
resolve_message.return_value = 'Notify Mozilla'
addon = amo.tests.addon_factory()
file_obj = addon.current_version.all_files[0]
fixture = (
'src/olympia/files/fixtures/files/notify-link-clicks-i18n.xpi')
with amo.tests.copy_file(fixture, file_obj.file_path):
with pytest.raises(forms.ValidationError) as exc:
utils.parse_xpi(file_obj.file_path)
assert dict(exc.value.messages)['en-us'].startswith(
u'Add-on names cannot contain the Mozilla or'
)
@mock.patch('olympia.addons.models.resolve_i18n_message')
def test_mozilla_trademark_for_prefix_allowed(self, resolve_message):
resolve_message.return_value = 'Notify for Mozilla'
addon = amo.tests.addon_factory()
file_obj = addon.current_version.all_files[0]
fixture = (
'src/olympia/files/fixtures/files/notify-link-clicks-i18n.xpi')
with amo.tests.copy_file(fixture, file_obj.file_path):
utils.parse_xpi(file_obj.file_path)
def test_apps_use_default_versions_if_applications_is_omitted(self):
"""
WebExtensions are allowed to omit `applications[/gecko]` and we
previously skipped defaulting to any `AppVersion` once this is not
defined. That resulted in none of our plattforms being selectable.
See https://github.com/mozilla/addons-server/issues/2586 and
probably many others.
"""
self.create_webext_default_versions()
data = {}
apps = self.parse(data)['apps']
assert len(apps) == 1
assert apps[0].appdata == amo.FIREFOX
assert apps[0].min.version == amo.DEFAULT_WEBEXT_MIN_VERSION_NO_ID
assert apps[0].max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
# We support Android by default too
self.create_appversion(
'android', amo.DEFAULT_WEBEXT_MIN_VERSION_ANDROID)
self.create_appversion('android', amo.DEFAULT_WEBEXT_MAX_VERSION)
apps = self.parse(data)['apps']
assert apps[0].appdata == amo.FIREFOX
assert apps[1].appdata == amo.ANDROID
assert apps[1].min.version == amo.DEFAULT_WEBEXT_MIN_VERSION_ANDROID
assert apps[1].max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
def test_handle_utf_bom(self):
manifest = '\xef\xbb\xbf{"manifest_version": 2, "name": "..."}'
parsed = utils.ManifestJSONExtractor(None, manifest).parse()
assert parsed['name'] == '...'
def test_raise_error_if_no_optional_id_support(self):
"""
We only support optional ids in Firefox 48+ and will throw an error
otherwise.
"""
self.create_webext_default_versions()
data = {
'applications': {
'gecko': {
'strict_min_version': '42.0',
'strict_max_version': '49.0',
}
}
}
with pytest.raises(forms.ValidationError) as exc:
self.parse(data)['apps']
assert (
exc.value.message ==
'GUID is required for Firefox 47 and below.')
def test_comments_are_allowed(self):
json_string = """
{
// Required
"manifest_version": 2,
"name": "My Extension",
"version": "versionString",
// Recommended
"default_locale": "en",
"description": "A plain text description"
}
"""
manifest = utils.ManifestJSONExtractor(
'/fake_path', json_string).parse()
assert manifest['is_webextension'] is True
assert manifest.get('name') == 'My Extension'
def test_apps_contains_wrong_versions(self):
"""Use the min and max versions if provided."""
self.create_webext_default_versions()
data = {
'applications': {
'gecko': {
'strict_min_version': '47.0.0',
'id': '@random'
}
}
}
with pytest.raises(forms.ValidationError) as exc:
self.parse(data)['apps']
assert exc.value.message.startswith('Cannot find min/max version.')
class TestManifestJSONExtractorStaticTheme(TestManifestJSONExtractor):
def parse(self, base_data):
if 'theme' not in base_data.keys():
base_data.update(theme={})
return super(
TestManifestJSONExtractorStaticTheme, self).parse(base_data)
def test_type(self):
assert self.parse({})['type'] == amo.ADDON_STATICTHEME
def create_webext_default_versions(self):
self.create_appversion('firefox',
amo.DEFAULT_STATIC_THEME_MIN_VERSION_FIREFOX)
return (super(TestManifestJSONExtractorStaticTheme, self)
.create_webext_default_versions())
def test_apps_use_default_versions_if_applications_is_omitted(self):
"""
Override this because static themes have a higher default version.
"""
self.create_webext_default_versions()
data = {}
apps = self.parse(data)['apps']
assert len(apps) == 1
assert apps[0].appdata == amo.FIREFOX
assert apps[0].min.version == (
amo.DEFAULT_STATIC_THEME_MIN_VERSION_FIREFOX)
assert apps[0].max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
# Static themes don't support Android yet. So check they aren't there.
self.create_appversion(
'android', amo.DEFAULT_WEBEXT_MIN_VERSION_ANDROID)
self.create_appversion(
'android', amo.DEFAULT_STATIC_THEME_MIN_VERSION_FIREFOX)
self.create_appversion('android', amo.DEFAULT_WEBEXT_MAX_VERSION)
assert apps == self.parse(data)['apps'] # Same as before.
def test_apps_use_default_versions_if_none_provided(self):
"""Use the default min and max versions if none provided."""
self.create_webext_default_versions()
data = {'applications': {'gecko': {'id': 'some-id'}}}
apps = self.parse(data)['apps']
assert len(apps) == 1 # Only Firefox for now.
app = apps[0]
assert app.appdata == amo.FIREFOX
assert app.min.version == amo.DEFAULT_STATIC_THEME_MIN_VERSION_FIREFOX
assert app.max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
def test_apps_use_provided_versions(self):
"""Use the min and max versions if provided."""
firefox_min_version = self.create_appversion('firefox', '54.0')
firefox_max_version = self.create_appversion('firefox', '54.*')
self.create_webext_default_versions()
data = {
'applications': {
'gecko': {
'strict_min_version': '>=54.0',
'strict_max_version': '=54.*',
'id': '@random'
}
}
}
apps = self.parse(data)['apps']
assert len(apps) == 1
app = apps[0]
assert app.appdata == amo.FIREFOX
assert app.min == firefox_min_version
assert app.max == firefox_max_version
def test_apps_contains_wrong_versions(self):
"""Use the min and max versions if provided."""
self.create_webext_default_versions()
data = {
'applications': {
'gecko': {
'strict_min_version': '54.0.0',
'id': '@random'
}
}
}
with pytest.raises(forms.ValidationError) as exc:
self.parse(data)['apps']
assert exc.value.message.startswith('Cannot find min/max version.')
def test_theme_json_extracted(self):
# Check theme data is extracted from the manifest and returned.
data = {'theme': {'colors': {'textcolor': "#3deb60"}}}
assert self.parse(data)['theme'] == data['theme']
def test_zip_folder_content():
extension_file = 'src/olympia/files/fixtures/files/extension.xpi'
temp_filename, temp_folder = None, None
try:
temp_folder = utils.extract_zip(extension_file)
assert sorted(os.listdir(temp_folder)) == [
'chrome', 'chrome.manifest', 'install.rdf']
temp_filename = amo.tests.get_temp_filename()
utils.zip_folder_content(temp_folder, temp_filename)
# Make sure the zipped files contain the same files.
with zipfile.ZipFile(temp_filename, mode='r') as new:
with zipfile.ZipFile(extension_file, mode='r') as orig:
assert sorted(new.namelist()) == sorted(orig.namelist())
finally:
if temp_folder is not None and os.path.exists(temp_folder):
amo.utils.rm_local_tmp_dir(temp_folder)
if temp_filename is not None and os.path.exists(temp_filename):
os.unlink(temp_filename)
def test_repack():
# Warning: context managers all the way down. Because they're awesome.
extension_file = 'src/olympia/files/fixtures/files/extension.xpi'
# We don't want to overwrite our fixture, so use a copy.
with amo.tests.copy_file_to_temp(extension_file) as temp_filename:
# This is where we're really testing the repack helper.
with utils.repack(temp_filename) as folder_path:
# Temporary folder contains the unzipped XPI.
assert sorted(os.listdir(folder_path)) == [
'chrome', 'chrome.manifest', 'install.rdf']
# Add a file, which should end up in the repacked file.
with open(os.path.join(folder_path, 'foo.bar'), 'w') as file_:
file_.write('foobar')
# Once we're done with the repack, the temporary folder is removed.
assert not os.path.exists(folder_path)
# And the repacked file has the added file.
assert os.path.exists(temp_filename)
with zipfile.ZipFile(temp_filename, mode='r') as zf:
assert 'foo.bar' in zf.namelist()
assert zf.read('foo.bar') == 'foobar'
@pytest.fixture
def file_obj():
addon = amo.tests.addon_factory()
addon.update(guid='xxxxx')
version = addon.current_version
return version.all_files[0]
def test_bump_version_in_install_rdf(file_obj):
with amo.tests.copy_file('src/olympia/files/fixtures/files/jetpack.xpi',
file_obj.file_path):
utils.update_version_number(file_obj, '1.3.1-signed')
parsed = utils.parse_xpi(file_obj.file_path)
assert parsed['version'] == '1.3.1-signed'
def test_bump_version_in_alt_install_rdf(file_obj):
with amo.tests.copy_file('src/olympia/files/fixtures/files/alt-rdf.xpi',
file_obj.file_path):
utils.update_version_number(file_obj, '2.1.106.1-signed')
parsed = utils.parse_xpi(file_obj.file_path)
assert parsed['version'] == '2.1.106.1-signed'
def test_bump_version_in_package_json(file_obj):
with amo.tests.copy_file(
'src/olympia/files/fixtures/files/new-format-0.0.1.xpi',
file_obj.file_path):
utils.update_version_number(file_obj, '0.0.1.1-signed')
with zipfile.ZipFile(file_obj.file_path, 'r') as source:
parsed = json.loads(source.read('package.json'))
assert parsed['version'] == '0.0.1.1-signed'
def test_bump_version_in_manifest_json(file_obj):
with amo.tests.copy_file(
'src/olympia/files/fixtures/files/webextension.xpi',
file_obj.file_path):
utils.update_version_number(file_obj, '0.0.1.1-signed')
parsed = utils.parse_xpi(file_obj.file_path)
assert parsed['version'] == '0.0.1.1-signed'
def test_extract_translations_simple(file_obj):
extension = 'src/olympia/files/fixtures/files/notify-link-clicks-i18n.xpi'
with amo.tests.copy_file(extension, file_obj.file_path):
messages = utils.extract_translations(file_obj)
assert list(sorted(messages.keys())) == [
'de', 'en-US', 'ja', 'nb-NO', 'nl', 'ru', 'sv-SE']
@mock.patch('olympia.files.utils.zipfile.ZipFile.read')
def test_extract_translations_fail_silent_invalid_file(read_mock, file_obj):
extension = 'src/olympia/files/fixtures/files/notify-link-clicks-i18n.xpi'
with amo.tests.copy_file(extension, file_obj.file_path):
read_mock.side_effect = KeyError
# Does not raise an exception
utils.extract_translations(file_obj)
read_mock.side_effect = IOError
# Does not raise an exception too
utils.extract_translations(file_obj)
# We don't fail on invalid JSON too, this is addons-linter domain
read_mock.side_effect = ValueError
utils.extract_translations(file_obj)
# But everything else...
read_mock.side_effect = TypeError
with pytest.raises(TypeError):
utils.extract_translations(file_obj)
def test_get_all_files():
tempdir = tempfile.mkdtemp(dir=settings.TMP_PATH)
os.mkdir(os.path.join(tempdir, 'dir1'))
_touch(os.path.join(tempdir, 'foo1'))
_touch(os.path.join(tempdir, 'dir1', 'foo2'))
assert utils.get_all_files(tempdir) == [
os.path.join(tempdir, 'dir1'),
os.path.join(tempdir, 'dir1', 'foo2'),
os.path.join(tempdir, 'foo1'),
]
shutil.rmtree(tempdir)
assert not os.path.exists(tempdir)
def test_get_all_files_strip_prefix_no_prefix_silent():
tempdir = tempfile.mkdtemp(dir=settings.TMP_PATH)
os.mkdir(os.path.join(tempdir, 'dir1'))
_touch(os.path.join(tempdir, 'foo1'))
_touch(os.path.join(tempdir, 'dir1', 'foo2'))
# strip_prefix alone doesn't do anything.
assert utils.get_all_files(tempdir, strip_prefix=tempdir) == [
os.path.join(tempdir, 'dir1'),
os.path.join(tempdir, 'dir1', 'foo2'),
os.path.join(tempdir, 'foo1'),
]
def test_get_all_files_prefix():
tempdir = tempfile.mkdtemp(dir=settings.TMP_PATH)
os.mkdir(os.path.join(tempdir, 'dir1'))
_touch(os.path.join(tempdir, 'foo1'))
_touch(os.path.join(tempdir, 'dir1', 'foo2'))
# strip_prefix alone doesn't do anything.
assert utils.get_all_files(tempdir, prefix='/foo/bar') == [
'/foo/bar' + os.path.join(tempdir, 'dir1'),
'/foo/bar' + os.path.join(tempdir, 'dir1', 'foo2'),
'/foo/bar' + os.path.join(tempdir, 'foo1'),
]
def test_get_all_files_prefix_with_strip_prefix():
tempdir = tempfile.mkdtemp(dir=settings.TMP_PATH)
os.mkdir(os.path.join(tempdir, 'dir1'))
_touch(os.path.join(tempdir, 'foo1'))
_touch(os.path.join(tempdir, 'dir1', 'foo2'))
# strip_prefix alone doesn't do anything.
result = utils.get_all_files(
tempdir, strip_prefix=tempdir, prefix='/foo/bar')
assert result == [
os.path.join('/foo', 'bar', 'dir1'),
os.path.join('/foo', 'bar', 'dir1', 'foo2'),
os.path.join('/foo', 'bar', 'foo1'),
]
def test_atomic_lock_with():
lock = flufl.lock.Lock('/tmp/test-atomic-lock1.lock')
assert not lock.is_locked
lock.lock()
assert lock.is_locked
with utils.atomic_lock('/tmp/', 'test-atomic-lock1') as lock_attained:
assert not lock_attained
lock.unlock()
with utils.atomic_lock('/tmp/', 'test-atomic-lock1') as lock_attained:
assert lock_attained
def test_atomic_lock_with_lock_attained():
with utils.atomic_lock('/tmp/', 'test-atomic-lock2') as lock_attained:
assert lock_attained
@mock.patch.object(flufl.lock._lockfile, 'CLOCK_SLOP', timedelta(seconds=0))
def test_atomic_lock_lifetime():
def _get_lock():
return utils.atomic_lock('/tmp/', 'test-atomic-lock3', lifetime=1)
with _get_lock() as lock_attained:
assert lock_attained
lock2 = flufl.lock.Lock('/tmp/test-atomic-lock3.lock')
with pytest.raises(flufl.lock.TimeOutError):
# We have to apply `timedelta` to actually raise an exception,
# otherwise `.lock()` will wait for 2 seconds and get the lock
# for us. We get a `TimeOutError` because we were locking
# with a different claim file
lock2.lock(timeout=timedelta(seconds=0))
with _get_lock() as lock_attained2:
assert not lock_attained2
time.sleep(2)
with _get_lock() as lock_attained2:
assert lock_attained2
def test_parse_search_empty_shortname():
fname = get_file('search_empty_shortname.xml')
with pytest.raises(forms.ValidationError) as excinfo:
utils.parse_search(fname)
assert (
excinfo.value[0] ==
'Could not parse uploaded file, missing or empty <ShortName> element')
class TestResolvei18nMessage(object):
def test_no_match(self):
assert utils.resolve_i18n_message('foo', {}, '') == 'foo'
def test_locale_found(self):
messages = {
'de': {
'foo': {'message': 'bar'}
}
}
result = utils.resolve_i18n_message('__MSG_foo__', messages, 'de')
assert result == 'bar'
def test_uses_default_locale(self):
messages = {
'en-US': {
'foo': {'message': 'bar'}
}
}
result = utils.resolve_i18n_message(
'__MSG_foo__', messages, 'de', 'en')
assert result == 'bar'
def test_no_locale_match(self):
# Neither `locale` or `locale` are found, "message" is returned
# unchanged
messages = {
'fr': {
'foo': {'message': 'bar'}
}
}
result = utils.resolve_i18n_message(
'__MSG_foo__', messages, 'de', 'en')
assert result == '__MSG_foo__'
def test_field_not_set(self):
"""Make sure we don't fail on messages that are `None`
Fixes https://github.com/mozilla/addons-server/issues/3067
"""
result = utils.resolve_i18n_message(None, {}, 'de', 'en')
assert result is None
def test_field_no_string(self):
"""Make sure we don't fail on messages that are no strings"""
result = utils.resolve_i18n_message([], {}, 'de', 'en')
assert result == []
def test_corrects_locales(self):
messages = {
'en-US': {
'foo': {'message': 'bar'}
}
}
result = utils.resolve_i18n_message('__MSG_foo__', messages, 'en')
assert result == 'bar'
def test_ignore_wrong_format(self):
messages = {
'en-US': {
'foo': 'bar'
}
}
result = utils.resolve_i18n_message('__MSG_foo__', messages, 'en')
assert result == '__MSG_foo__'
class TestXMLVulnerabilities(TestCase):
"""Test a few known vulnerabilities to make sure
our defusedxml patching is applied automatically.
This doesn't replicate all defusedxml tests.
"""
def test_quadratic_xml(self):
quadratic_xml = os.path.join(
os.path.dirname(__file__), '..', 'fixtures', 'files',
'quadratic.xml')
with pytest.raises(EntitiesForbidden):
utils.extract_search(quadratic_xml)
def test_general_entity_expansion_is_disabled(self):
zip_file = utils.SafeZip(os.path.join(
os.path.dirname(__file__), '..', 'fixtures', 'files',
'xxe-example-install.zip'))
zip_file.is_valid()
# This asserts that the malicious install.rdf blows up with
# a parse error. If it gets as far as this specific parse error
# it means that the external entity was not processed.
#
# Before the patch in files/utils.py, this would raise an IOError
# from the test suite refusing to make an external HTTP request to
# the entity ref.
with pytest.raises(EntitiesForbidden):
utils.RDFExtractor(zip_file)
def test_lxml_XMLParser_no_resolve_entities(self):
with pytest.raises(NotSupportedError):
lxml.etree.XMLParser(resolve_entities=True)
# not setting it works
lxml.etree.XMLParser()
# Setting it explicitly to `False` is fine too.
lxml.etree.XMLParser(resolve_entities=False)
def test_extract_header_img():
file_obj = os.path.join(
settings.ROOT, 'src/olympia/devhub/tests/addons/static_theme.zip')
data = {'images': {'headerURL': 'weta.png'}}
dest_path = tempfile.mkdtemp()
header_file = dest_path + '/weta.png'
assert not default_storage.exists(header_file)
utils.extract_header_img(file_obj, data, dest_path)
assert default_storage.exists(header_file)
assert default_storage.size(header_file) == 126447
def test_extract_header_img_missing():
file_obj = os.path.join(
settings.ROOT, 'src/olympia/devhub/tests/addons/static_theme.zip')
data = {'images': {'headerURL': 'missing_file.png'}}
dest_path = tempfile.mkdtemp()
header_file = dest_path + '/missing_file.png'
assert not default_storage.exists(header_file)
utils.extract_header_img(file_obj, data, dest_path)
assert not default_storage.exists(header_file)
def test_extract_header_with_additional_imgs():
file_obj = os.path.join(
settings.ROOT,
'src/olympia/devhub/tests/addons/static_theme_tiled.zip')
data = {'images': {
'headerURL': 'empty.png',
'additional_backgrounds': [
'transparent.gif', 'missing_&_ignored.png', 'weta_for_tiling.png']
}}
dest_path = tempfile.mkdtemp()
header_file = dest_path + '/empty.png'
additional_file_1 = dest_path + '/transparent.gif'
additional_file_2 = dest_path + '/weta_for_tiling.png'
assert not default_storage.exists(header_file)
assert not default_storage.exists(additional_file_1)
assert not default_storage.exists(additional_file_2)
utils.extract_header_img(file_obj, data, dest_path)
assert default_storage.exists(header_file)
assert default_storage.size(header_file) == 332
assert default_storage.exists(additional_file_1)
assert default_storage.size(additional_file_1) == 42
assert default_storage.exists(additional_file_2)
assert default_storage.size(additional_file_2) == 93371
| |
# Copyright 2017 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import unittest
from flask import Flask
from fake_ubersmith.api.adapters.data_store import DataStore
from fake_ubersmith.api.methods.client import Client
from fake_ubersmith.api.ubersmith import FakeUbersmithError, UbersmithBase
class TestClientModule(unittest.TestCase):
def setUp(self):
self.data_store = DataStore()
self.client = Client(self.data_store)
self.app = Flask(__name__)
self.base_uber_api = UbersmithBase(self.data_store)
self.client.hook_to(self.base_uber_api)
self.base_uber_api.hook_to(self.app)
def test_client_add_creates_a_client(self):
with self.app.test_client() as c:
resp = c.post(
'api/2.0/',
data={
"method": "client.add",
"first": "John",
"last": "Smith",
"email": "john.smith@invalid.com",
"uber_login": "john",
"uber_pass": "smith"
}
)
self.assertEqual(resp.status_code, 200)
body = json.loads(resp.data.decode('utf-8'))
self.assertEqual(resp.status_code, 200)
self.assertIsNone(body.get("error_code"))
self.assertTrue(body.get("status"))
self.assertEqual(body.get("error_message"), "")
self.assertIsInstance(body.get("data"), str)
self.assertEqual(self.data_store.clients[0]["login"], "john")
self.assertIsInstance(self.data_store.contacts[0]["contact_id"], str)
self.assertEqual(self.data_store.contacts[0]["client_id"], body.get("data"))
self.assertEqual(self.data_store.contacts[0]["description"], "Primary Contact")
def test_client_get_returns_successfully(self):
with self.app.test_client() as c:
resp = c.post(
'api/2.0/',
data={
"method": "client.add",
"first": "John",
}
)
client_id = json.loads(resp.data.decode('utf-8')).get("data")
with self.app.test_client() as c:
resp = c.post(
'api/2.0/',
data={"method": "client.get", "client_id": client_id}
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(
json.loads(resp.data.decode('utf-8')),
{
"data": {"clientid": client_id, "first": "John"},
"error_code": None,
"error_message": "",
"status": True
}
)
def test_client_get_with_user_login_returns_successfully(self):
self.data_store.clients = [{"clientid": "1", "contact_id": '0'}]
with self.app.test_client() as c:
resp = c.post(
'api/2.0/',
data={"method": "client.get", "user_login": "1"}
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(
json.loads(resp.data.decode('utf-8')),
{
"data": {"clientid": "1"},
"error_code": None,
"error_message": "",
"status": True
}
)
def test_client_get_errs_when_no_match(self):
self.data_store.clients = [{"clientid": "100"}]
with self.app.test_client() as c:
resp = c.post(
'api/2.0/',
data={"method": "client.get", "client_id": "1"}
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(
json.loads(resp.data.decode('utf-8')),
{
"data": "",
"error_code": 1,
"error_message": "Client ID '1' not found.",
"status": False
}
)
def test_client_contact_add_creates_a_contact(self):
with self.app.test_client() as c:
resp = c.post(
'api/2.0/',
data={
"method": "client.contact_add",
"first": "John",
"last": "Smith",
"email": "john.smith@invalid.com",
"uber_login": "john",
"uber_pass": "smith"
}
)
body = json.loads(resp.data.decode('utf-8'))
self.assertEqual(resp.status_code, 200)
self.assertIsNone(body.get("error_code"))
self.assertTrue(body.get("status"))
self.assertEqual(body.get("error_message"), "")
self.assertIsInstance(body.get("data"), str)
def test_client_contact_get_returns_error_when_empty_payload_provided(self):
with self.app.test_client() as c:
resp = c.post(
'api/2.0/',
data={
"method": "client.contact_get",
}
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(
json.loads(resp.data.decode('utf-8')),
{
"data": "",
"error_code": 1,
"error_message": "No contact ID specified",
"status": False
}
)
def test_client_contact_get_with_bad_contact_id_returns_error(self):
with self.app.test_client() as c:
resp = c.post(
'api/2.0/',
data={
"method": "client.contact_get",
"contact_id": "bad"
}
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(
json.loads(resp.data.decode('utf-8')),
{
"data": "",
"error_code": 1,
"error_message": "Invalid contact_id specified.",
"status": False
}
)
def test_client_contact_get_with_bad_user_login_returns_error(self):
with self.app.test_client() as c:
resp = c.post(
'api/2.0/',
data={
"method": "client.contact_get",
"user_login": "bad"
}
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(
json.loads(resp.data.decode('utf-8')),
{
"data": "",
"error_code": 1,
"error_message": "Invalid user_login specified.",
"status": False
}
)
def test_client_contact_get_with_contact_id_returns_a_contact(self):
a_contact = {"contact_id": "100"}
self.data_store.contacts.append(a_contact)
with self.app.test_client() as c:
resp = c.post(
'api/2.0/',
data={
"method": "client.contact_get",
"contact_id": "100",
}
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(
json.loads(resp.data.decode('utf-8')),
{
"data": a_contact,
"error_code": None,
"error_message": "",
"status": True
}
)
def test_client_contact_list_returns_contacts_for_given_client_id(self):
contact_1 = {
"contact_id": '1',
"client_id": '100',
"real_name": "John Patate",
"email": "john.patate@fake.invalid"
}
contact_2 = {
"contact_id": '1',
"client_id": '101',
"real_name": "The Dude",
"email": "the.dude@fake.invalid"
}
contact_3 = {
"contact_id": '2',
"client_id": '100',
"real_name": "Joe Poutine",
"email": "joe.poutine@fake.invalid"
}
self.data_store.contacts.extend([contact_1, contact_2, contact_3])
with self.app.test_client() as c:
resp = c.post(
'api/2.0/',
data={
"method": "client.contact_list",
"client_id": "100",
}
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(
json.loads(resp.data.decode('utf-8')),
{
"data": {'1': contact_1, '2': contact_3},
"error_code": None,
"error_message": "",
"status": True
}
)
def test_client_contact_list_with_bad_client_id_returns_error(self):
with self.app.test_client() as c:
resp = c.post(
'api/2.0/',
data={
"method": "client.contact_list",
"client_id": "does_not_exist"
}
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(
json.loads(resp.data.decode('utf-8')),
{
"data": "",
"error_code": 1,
"error_message": "Invalid client_id specified.",
"status": False
}
)
def test_client_contact_get_with_user_login_returns_a_contact(self):
a_contact = {
"contact_id": "100",
"first": "John",
"last": "Smith",
"email": "john.smith@invalid.com",
"uber_login": "john",
}
self.data_store.contacts.append(a_contact)
with self.app.test_client() as c:
resp = c.post(
'api/2.0/',
data={
"method": "client.contact_get",
"user_login": "john",
}
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(
json.loads(resp.data.decode('utf-8')),
{
"data": a_contact,
"error_code": None,
"error_message": "",
"status": True
}
)
def test_client_cc_add_is_successful(self):
with self.app.test_client() as c:
resp = c.post(
'api/2.0/',
data={"method": "client.cc_add", "client_id": "1"}
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(
json.loads(resp.data.decode('utf-8')),
{
"data": 1,
"error_code": None,
"error_message": "",
"status": True
}
)
def test_client_cc_add_fails_returns_error(self):
self.client.credit_card_response = FakeUbersmithError(999, 'oh fail')
with self.app.test_client() as c:
resp = c.post(
'api/2.0/',
data={"method": "client.cc_add", "client_id": "1"}
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(
json.loads(resp.data.decode('utf-8')),
{
"data": "",
"error_code": 999,
"error_message": "oh fail",
"status": False}
)
def test_client_cc_update_is_successful(self):
with self.app.test_client() as c:
resp = c.post(
'api/2.0/',
data={"method": "client.cc_update", "client_id": "1"}
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(
json.loads(resp.data.decode('utf-8')),
{
"data": True,
"error_code": None,
"error_message": "",
"status": True
}
)
def test_client_cc_update_fails_returns_error(self):
self.client.credit_card_response = FakeUbersmithError(999, 'oh fail')
with self.app.test_client() as c:
resp = c.post(
'api/2.0/',
data={"method": "client.cc_update", "client_id": "1"}
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(
json.loads(resp.data.decode('utf-8')),
{
"data": "",
"error_code": 999,
"error_message": "oh fail",
"status": False
}
)
def test_client_cc_info_with_billing_info_id(self):
self.data_store.credit_cards = [{"billing_info_id": "123"}]
with self.app.test_client() as c:
resp = c.post(
'api/2.0/',
data={"method": "client.cc_info", "billing_info_id": "123"}
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(
json.loads(resp.data.decode('utf-8')),
{
"data": {"123": {"billing_info_id": "123"}},
"error_code": None,
"error_message": "",
"status": True
}
)
def test_client_cc_info_with_client_id(self):
self.data_store.credit_cards = [
{
"clientid": "1",
"billing_info_id": "123"
}
]
with self.app.test_client() as c:
resp = c.post(
'api/2.0/',
data={"method": "client.cc_info", "client_id": "1"}
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(
json.loads(resp.data.decode('utf-8')),
{
"data": {"123": {"billing_info_id": "123", "clientid": "1"}},
"error_code": None,
"error_message": "",
"status": True
}
)
def test_client_cc_info_fails(self):
self.data_store.credit_cards = [
{
"clientid": "1",
"billing_info_id": "123"
}
]
with self.app.test_client() as c:
resp = c.post(
'api/2.0/',
data={"method": "client.cc_info", "bogus": "thing"}
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(
json.loads(resp.data.decode('utf-8')),
{
"data": "",
"error_code": 1,
"error_message":
"request failed: client_id parameter not supplied",
"status": False
}
)
def test_client_cc_delete_is_successful(self):
with self.app.test_client() as c:
resp = c.post(
'api/2.0/',
data={"method": "client.cc_delete", "client_id": "1"}
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(
json.loads(resp.data.decode('utf-8')),
{
"data": True,
"error_code": None,
"error_message": "",
"status": True
}
)
def test_client_cc_delete_fails(self):
self.client.credit_card_delete_response = FakeUbersmithError(
999, 'oh fail'
)
with self.app.test_client() as c:
resp = c.post(
'api/2.0/',
data={"method": "client.cc_delete", "client_id": "1"}
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(
json.loads(resp.data.decode('utf-8')),
{
"data": "",
"error_code": 999,
"error_message": "oh fail",
"status": False
}
)
| |
# Copyright 2015 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Cinder common internal object model"""
import collections
import contextlib
import copy
import datetime
import functools
import traceback
import netaddr
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_utils import timeutils
import six
from cinder import context
from cinder import exception
from cinder.i18n import _, _LE
from cinder import objects
from cinder.objects import fields
from cinder.openstack.common import versionutils
from cinder import utils
LOG = logging.getLogger('object')
class NotSpecifiedSentinel(object):
pass
def get_attrname(name):
"""Return the mangled name of the attribute's underlying storage."""
return '_' + name
def make_class_properties(cls):
# NOTE(danms/comstud): Inherit fields from super classes.
# mro() returns the current class first and returns 'object' last, so
# those can be skipped. Also be careful to not overwrite any fields
# that already exist. And make sure each cls has its own copy of
# fields and that it is not sharing the dict with a super class.
cls.fields = dict(cls.fields)
for supercls in cls.mro()[1:-1]:
if not hasattr(supercls, 'fields'):
continue
for name, field in supercls.fields.items():
if name not in cls.fields:
cls.fields[name] = field
for name, field in cls.fields.iteritems():
if not isinstance(field, fields.Field):
raise exception.ObjectFieldInvalid(
field=name, objname=cls.obj_name())
def getter(self, name=name):
attrname = get_attrname(name)
if not hasattr(self, attrname):
self.obj_load_attr(name)
return getattr(self, attrname)
def setter(self, value, name=name, field=field):
attrname = get_attrname(name)
field_value = field.coerce(self, name, value)
if field.read_only and hasattr(self, attrname):
# Note(yjiang5): _from_db_object() may iterate
# every field and write, no exception in such situation.
if getattr(self, attrname) != field_value:
raise exception.ReadOnlyFieldError(field=name)
else:
return
self._changed_fields.add(name)
try:
return setattr(self, attrname, field_value)
except Exception:
attr = "%s.%s" % (self.obj_name(), name)
LOG.exception(_LE('Error setting %(attr)s'), {'attr': attr})
raise
setattr(cls, name, property(getter, setter))
class CinderObjectMetaclass(type):
"""Metaclass that allows tracking of object classes."""
# NOTE(danms): This is what controls whether object operations are
# remoted. If this is not None, use it to remote things over RPC.
indirection_api = None
def __init__(cls, names, bases, dict_):
if not hasattr(cls, '_obj_classes'):
# This means this is a base class using the metaclass. I.e.,
# the 'CinderObject' class.
cls._obj_classes = collections.defaultdict(list)
return
def _vers_tuple(obj):
return tuple([int(x) for x in obj.VERSION.split(".")])
# Add the subclass to CinderObject._obj_classes. If the
# same version already exists, replace it. Otherwise,
# keep the list with newest version first.
make_class_properties(cls)
obj_name = cls.obj_name()
for i, obj in enumerate(cls._obj_classes[obj_name]):
if cls.VERSION == obj.VERSION:
cls._obj_classes[obj_name][i] = cls
# Update cinder.objects with this newer class.
setattr(objects, obj_name, cls)
break
if _vers_tuple(cls) > _vers_tuple(obj):
# Insert before.
cls._obj_classes[obj_name].insert(i, cls)
if i == 0:
# Later version than we've seen before. Update
# cinder.objects.
setattr(objects, obj_name, cls)
break
else:
cls._obj_classes[obj_name].append(cls)
# Either this is the first time we've seen the object or it's
# an older version than anything we'e seen. Update cinder.objects
# only if it's the first time we've seen this object name.
if not hasattr(objects, obj_name):
setattr(objects, obj_name, cls)
# These are decorators that mark an object's method as remotable.
# If the metaclass is configured to forward object methods to an
# indirection service, these will result in making an RPC call
# instead of directly calling the implementation in the object. Instead,
# the object implementation on the remote end will perform the
# requested action and the result will be returned here.
def remotable_classmethod(fn):
"""Decorator for remotable classmethods."""
@functools.wraps(fn)
def wrapper(cls, context, *args, **kwargs):
if CinderObject.indirection_api:
result = CinderObject.indirection_api.object_class_action(
context, cls.obj_name(), fn.__name__, cls.VERSION,
args, kwargs)
else:
result = fn(cls, context, *args, **kwargs)
if isinstance(result, CinderObject):
result._context = context
return result
# NOTE(danms): Make this discoverable
wrapper.remotable = True
wrapper.original_fn = fn
return classmethod(wrapper)
# See comment above for remotable_classmethod()
#
# Note that this will use either the provided context, or the one
# stashed in the object. If neither are present, the object is
# "orphaned" and remotable methods cannot be called.
def remotable(fn):
"""Decorator for remotable object methods."""
@functools.wraps(fn)
def wrapper(self, *args, **kwargs):
ctxt = self._context
try:
if isinstance(args[0], (context.RequestContext)):
ctxt = args[0]
args = args[1:]
except IndexError:
pass
if ctxt is None:
raise exception.OrphanedObjectError(method=fn.__name__,
objtype=self.obj_name())
# Force this to be set if it wasn't before.
self._context = ctxt
if CinderObject.indirection_api:
updates, result = CinderObject.indirection_api.object_action(
ctxt, self, fn.__name__, args, kwargs)
for key, value in updates.iteritems():
if key in self.fields:
field = self.fields[key]
# NOTE(ndipanov): Since CinderObjectSerializer will have
# deserialized any object fields into objects already,
# we do not try to deserialize them again here.
if isinstance(value, CinderObject):
self[key] = value
else:
self[key] = field.from_primitive(self, key, value)
self.obj_reset_changes()
self._changed_fields = set(updates.get('obj_what_changed', []))
return result
else:
return fn(self, ctxt, *args, **kwargs)
wrapper.remotable = True
wrapper.original_fn = fn
return wrapper
@six.add_metaclass(CinderObjectMetaclass)
class CinderObject(object):
"""Base class and object factory.
This forms the base of all objects that can be remoted or instantiated
via RPC. Simply defining a class that inherits from this base class
will make it remotely instantiatable. Objects should implement the
necessary "get" classmethod routines as well as "save" object methods
as appropriate.
"""
# Object versioning rules
#
# Each service has its set of objects, each with a version attached. When
# a client attempts to call an object method, the server checks to see if
# the version of that object matches (in a compatible way) its object
# implementation. If so, cool, and if not, fail.
#
# This version is allowed to have three parts, X.Y.Z, where the .Z element
# is reserved for stable branch backports. The .Z is ignored for the
# purposes of triggering a backport, which means anything changed under
# a .Z must be additive and non-destructive such that a node that knows
# about X.Y can consider X.Y.Z equivalent.
VERSION = '1.0'
# The fields present in this object as key:field pairs. For example:
#
# fields = { 'foo': fields.IntegerField(),
# 'bar': fields.StringField(),
# }
fields = {}
obj_extra_fields = []
# Table of sub-object versioning information
#
# This contains a list of version mappings, by the field name of
# the subobject. The mappings must be in order of oldest to
# newest, and are tuples of (my_version, subobject_version). A
# request to backport this object to $my_version will cause the
# subobject to be backported to $subobject_version.
#
# obj_relationships = {
# 'subobject1': [('1.2', '1.1'), ('1.4', '1.2')],
# 'subobject2': [('1.2', '1.0')],
# }
#
# In the above example:
#
# - If we are asked to backport our object to version 1.3,
# subobject1 will be backported to version 1.1, since it was
# bumped to version 1.2 when our version was 1.4.
# - If we are asked to backport our object to version 1.5,
# no changes will be made to subobject1 or subobject2, since
# they have not changed since version 1.4.
# - If we are asked to backlevel our object to version 1.1, we
# will remove both subobject1 and subobject2 from the primitive,
# since they were not added until version 1.2.
obj_relationships = {}
def __init__(self, context=None, **kwargs):
self._changed_fields = set()
self._context = context
for key in kwargs.keys():
setattr(self, key, kwargs[key])
def __repr__(self):
return '%s(%s)' % (
self.obj_name(),
','.join(['%s=%s' % (name,
(self.obj_attr_is_set(name) and
field.stringify(getattr(self, name)) or
'<?>'))
for name, field in sorted(self.fields.items())]))
@classmethod
def obj_name(cls):
"""Return a canonical name for this object.
The canonical name will be used over the wire for remote hydration.
"""
return cls.__name__
@classmethod
def obj_class_from_name(cls, objname, objver):
"""Returns a class from the registry based on a name and version."""
if objname not in cls._obj_classes:
LOG.error(_LE('Unable to instantiate unregistered object type '
'%(objtype)s'), dict(objtype=objname))
raise exception.UnsupportedObjectError(objtype=objname)
# NOTE(comstud): If there's not an exact match, return the highest
# compatible version. The objects stored in the class are sorted
# such that highest version is first, so only set compatible_match
# once below.
compatible_match = None
for objclass in cls._obj_classes[objname]:
if objclass.VERSION == objver:
return objclass
if (not compatible_match and
versionutils.is_compatible(objver, objclass.VERSION)):
compatible_match = objclass
if compatible_match:
return compatible_match
# As mentioned above, latest version is always first in the list.
latest_ver = cls._obj_classes[objname][0].VERSION
raise exception.IncompatibleObjectVersion(objname=objname,
objver=objver,
supported=latest_ver)
@classmethod
def _obj_from_primitive(cls, context, objver, primitive):
self = cls()
self._context = context
self.VERSION = objver
objdata = primitive['cinder_object.data']
changes = primitive.get('cinder_object.changes', [])
for name, field in self.fields.items():
if name in objdata:
setattr(self, name, field.from_primitive(self, name,
objdata[name]))
self._changed_fields = set([x for x in changes if x in self.fields])
return self
@classmethod
def obj_from_primitive(cls, primitive, context=None):
"""Object field-by-field hydration."""
if primitive['cinder_object.namespace'] != 'cinder':
# NOTE(danms): We don't do anything with this now, but it's
# there for "the future"
raise exception.UnsupportedObjectError(
objtype='%s.%s' % (primitive['cinder_object.namespace'],
primitive['cinder_object.name']))
objname = primitive['cinder_object.name']
objver = primitive['cinder_object.version']
objclass = cls.obj_class_from_name(objname, objver)
return objclass._obj_from_primitive(context, objver, primitive)
def __deepcopy__(self, memo):
"""Efficiently make a deep copy of this object."""
# NOTE(danms): A naive deepcopy would copy more than we need,
# and since we have knowledge of the volatile bits of the
# object, we can be smarter here. Also, nested entities within
# some objects may be uncopyable, so we can avoid those sorts
# of issues by copying only our field data.
nobj = self.__class__()
nobj._context = self._context
for name in self.fields:
if self.obj_attr_is_set(name):
nval = copy.deepcopy(getattr(self, name), memo)
setattr(nobj, name, nval)
nobj._changed_fields = set(self._changed_fields)
return nobj
def obj_clone(self):
"""Create a copy."""
return copy.deepcopy(self)
def _obj_make_obj_compatible(self, primitive, target_version, field):
"""Backlevel a sub-object based on our versioning rules.
This is responsible for backporting objects contained within
this object's primitive according to a set of rules we
maintain about version dependencies between objects. This
requires that the obj_relationships table in this object is
correct and up-to-date.
:param:primitive: The primitive version of this object
:param:target_version: The version string requested for this object
:param:field: The name of the field in this object containing the
sub-object to be backported
"""
def _do_backport(to_version):
obj = getattr(self, field)
if not obj:
return
if isinstance(obj, CinderObject):
obj.obj_make_compatible(
primitive[field]['cinder_object.data'],
to_version)
primitive[field]['cinder_object.version'] = to_version
elif isinstance(obj, list):
for i, element in enumerate(obj):
element.obj_make_compatible(
primitive[field][i]['cinder_object.data'],
to_version)
primitive[field][i]['cinder_object.version'] = to_version
target_version = utils.convert_version_to_tuple(target_version)
for index, versions in enumerate(self.obj_relationships[field]):
my_version, child_version = versions
my_version = utils.convert_version_to_tuple(my_version)
if target_version < my_version:
if index == 0:
# We're backporting to a version from before this
# subobject was added: delete it from the primitive.
del primitive[field]
else:
# We're in the gap between index-1 and index, so
# backport to the older version
last_child_version = \
self.obj_relationships[field][index - 1][1]
_do_backport(last_child_version)
return
elif target_version == my_version:
# This is the first mapping that satisfies the
# target_version request: backport the object.
_do_backport(child_version)
return
def obj_make_compatible(self, primitive, target_version):
"""Make an object representation compatible with a target version.
This is responsible for taking the primitive representation of
an object and making it suitable for the given target_version.
This may mean converting the format of object attributes, removing
attributes that have been added since the target version, etc. In
general:
- If a new version of an object adds a field, this routine
should remove it for older versions.
- If a new version changed or restricted the format of a field, this
should convert it back to something a client knowing only of the
older version will tolerate.
- If an object that this object depends on is bumped, then this
object should also take a version bump. Then, this routine should
backlevel the dependent object (by calling its obj_make_compatible())
if the requested version of this object is older than the version
where the new dependent object was added.
:param:primitive: The result of self.obj_to_primitive()
:param:target_version: The version string requested by the recipient
of the object
:raises: cinder.exception.UnsupportedObjectError if conversion
is not possible for some reason
"""
for key, field in self.fields.items():
if not isinstance(field, (fields.ObjectField,
fields.ListOfObjectsField)):
continue
if not self.obj_attr_is_set(key):
continue
if key not in self.obj_relationships:
# NOTE(danms): This is really a coding error and shouldn't
# happen unless we miss something
raise exception.ObjectActionError(
action='obj_make_compatible',
reason='No rule for %s' % key)
self._obj_make_obj_compatible(primitive, target_version, key)
def obj_to_primitive(self, target_version=None):
"""Simple base-case dehydration.
This calls to_primitive() for each item in fields.
"""
primitive = dict()
for name, field in self.fields.items():
if self.obj_attr_is_set(name):
primitive[name] = field.to_primitive(self, name,
getattr(self, name))
if target_version:
self.obj_make_compatible(primitive, target_version)
obj = {'cinder_object.name': self.obj_name(),
'cinder_object.namespace': 'cinder',
'cinder_object.version': target_version or self.VERSION,
'cinder_object.data': primitive}
if self.obj_what_changed():
obj['cinder_object.changes'] = sorted(self.obj_what_changed())
return obj
def obj_set_defaults(self, *attrs):
if not attrs:
attrs = [name for name, field in self.fields.items()
if field.default != fields.UnspecifiedDefault]
for attr in attrs:
default = self.fields[attr].default
if default is fields.UnspecifiedDefault:
raise exception.ObjectActionError(
action='set_defaults',
reason='No default set for field %s' % attr)
setattr(self, attr, default)
def obj_load_attr(self, attrname):
"""Load an additional attribute from the real object."""
raise NotImplementedError(
_("Cannot load '%s' in the base class") % attrname)
def save(self, context):
"""Save the changed fields back to the store.
This is optional for subclasses, but is presented here in the base
class for consistency among those that do.
"""
raise NotImplementedError('Cannot save anything in the base class')
def obj_what_changed(self):
"""Returns a set of fields that have been modified."""
changes = set(self._changed_fields)
for field in self.fields:
if (self.obj_attr_is_set(field) and
isinstance(getattr(self, field), CinderObject) and
getattr(self, field).obj_what_changed()):
changes.add(field)
return changes
def obj_get_changes(self):
"""Returns a dict of changed fields and their new values."""
changes = {}
for key in self.obj_what_changed():
changes[key] = getattr(self, key)
return changes
def obj_reset_changes(self, fields=None):
"""Reset the list of fields that have been changed.
Note that this is NOT "revert to previous values"
"""
if fields:
self._changed_fields -= set(fields)
else:
self._changed_fields.clear()
def obj_attr_is_set(self, attrname):
"""Test object to see if attrname is present.
Returns True if the named attribute has a value set, or
False if not. Raises AttributeError if attrname is not
a valid attribute for this object.
"""
if attrname not in self.obj_fields:
raise AttributeError(
_("%(objname)s object has no attribute '%(attrname)s'") %
{'objname': self.obj_name(), 'attrname': attrname})
return hasattr(self, get_attrname(attrname))
@property
def obj_fields(self):
return self.fields.keys() + self.obj_extra_fields
class CinderObjectDictCompat(object):
"""Mix-in to provide dictionary key access compat
If an object needs to support attribute access using
dictionary items instead of object attributes, inherit
from this class. This should only be used as a temporary
measure until all callers are converted to use modern
attribute access.
NOTE(berrange) This class will eventually be deleted.
"""
# dictish syntactic sugar
def iteritems(self):
"""For backwards-compatibility with dict-based objects.
NOTE(danms): May be removed in the future.
"""
for name in self.obj_fields:
if (self.obj_attr_is_set(name) or
name in self.obj_extra_fields):
yield name, getattr(self, name)
items = lambda self: list(self.iteritems())
def __getitem__(self, name):
"""For backwards-compatibility with dict-based objects.
NOTE(danms): May be removed in the future.
"""
return getattr(self, name)
def __setitem__(self, name, value):
"""For backwards-compatibility with dict-based objects.
NOTE(danms): May be removed in the future.
"""
setattr(self, name, value)
def __contains__(self, name):
"""For backwards-compatibility with dict-based objects.
NOTE(danms): May be removed in the future.
"""
try:
return self.obj_attr_is_set(name)
except AttributeError:
return False
def get(self, key, value=NotSpecifiedSentinel):
"""For backwards-compatibility with dict-based objects.
NOTE(danms): May be removed in the future.
"""
if key not in self.obj_fields:
# NOTE(jdg): There are a number of places where we rely on the
# old dictionary version and do a get(xxx, None).
# The following preserves that compatability but in
# the future we'll remove this shim altogether so don't
# rely on it.
LOG.debug('Cinder object %(object_name)s has no '
'attribute named: %(attribute_name)s',
{'object_name': self.__class__.__name__,
'attribute_name': key})
return None
if value != NotSpecifiedSentinel and not self.obj_attr_is_set(key):
return value
else:
return getattr(self, key)
def update(self, updates):
"""For backwards-compatibility with dict-base objects.
NOTE(danms): May be removed in the future.
"""
for key, value in updates.items():
setattr(self, key, value)
class CinderPersistentObject(object):
"""Mixin class for Persistent objects.
This adds the fields that we use in common for all persistent objects.
"""
fields = {
'created_at': fields.DateTimeField(nullable=True),
'updated_at': fields.DateTimeField(nullable=True),
'deleted_at': fields.DateTimeField(nullable=True),
'deleted': fields.BooleanField(default=False),
}
@contextlib.contextmanager
def obj_as_admin(self):
"""Context manager to make an object call as an admin.
This temporarily modifies the context embedded in an object to
be elevated() and restores it after the call completes. Example
usage:
with obj.obj_as_admin():
obj.save()
"""
if self._context is None:
raise exception.OrphanedObjectError(method='obj_as_admin',
objtype=self.obj_name())
original_context = self._context
self._context = self._context.elevated()
try:
yield
finally:
self._context = original_context
class ObjectListBase(object):
"""Mixin class for lists of objects.
This mixin class can be added as a base class for an object that
is implementing a list of objects. It adds a single field of 'objects',
which is the list store, and behaves like a list itself. It supports
serialization of the list of objects automatically.
"""
fields = {
'objects': fields.ListOfObjectsField('CinderObject'),
}
# This is a dictionary of my_version:child_version mappings so that
# we can support backleveling our contents based on the version
# requested of the list object.
child_versions = {}
def __init__(self, *args, **kwargs):
super(ObjectListBase, self).__init__(*args, **kwargs)
if 'objects' not in kwargs:
self.objects = []
self._changed_fields.discard('objects')
def __iter__(self):
"""List iterator interface."""
return iter(self.objects)
def __len__(self):
"""List length."""
return len(self.objects)
def __getitem__(self, index):
"""List index access."""
if isinstance(index, slice):
new_obj = self.__class__()
new_obj.objects = self.objects[index]
# NOTE(danms): We must be mixed in with a CinderObject!
new_obj.obj_reset_changes()
new_obj._context = self._context
return new_obj
return self.objects[index]
def __contains__(self, value):
"""List membership test."""
return value in self.objects
def count(self, value):
"""List count of value occurrences."""
return self.objects.count(value)
def index(self, value):
"""List index of value."""
return self.objects.index(value)
def sort(self, cmp=None, key=None, reverse=False):
self.objects.sort(cmp=cmp, key=key, reverse=reverse)
def obj_make_compatible(self, primitive, target_version):
primitives = primitive['objects']
child_target_version = self.child_versions.get(target_version, '1.0')
for index, item in enumerate(self.objects):
self.objects[index].obj_make_compatible(
primitives[index]['cinder_object.data'],
child_target_version)
primitives[index]['cinder_object.version'] = child_target_version
def obj_what_changed(self):
changes = set(self._changed_fields)
for child in self.objects:
if child.obj_what_changed():
changes.add('objects')
return changes
class CinderObjectSerializer(messaging.NoOpSerializer):
"""A CinderObject-aware Serializer.
This implements the Oslo Serializer interface and provides the
ability to serialize and deserialize CinderObject entities. Any service
that needs to accept or return CinderObjects as arguments or result values
should pass this to its RPCClient and RPCServer objects.
"""
def _process_object(self, context, objprim):
try:
objinst = CinderObject.obj_from_primitive(objprim, context=context)
except exception.IncompatibleObjectVersion:
objver = objprim['cinder_object.version']
if objver.count('.') == 2:
# NOTE(danms): For our purposes, the .z part of the version
# should be safe to accept without requiring a backport
objprim['cinder_object.version'] = \
'.'.join(objver.split('.')[:2])
return self._process_object(context, objprim)
raise
return objinst
def _process_iterable(self, context, action_fn, values):
"""Process an iterable, taking an action on each value.
:param:context: Request context
:param:action_fn: Action to take on each item in values
:param:values: Iterable container of things to take action on
:returns: A new container of the same type (except set) with
items from values having had action applied.
"""
iterable = values.__class__
if issubclass(iterable, dict):
return iterable(**{k: action_fn(context, v)
for k, v in six.iteritems(values)})
else:
# NOTE(danms): A set can't have an unhashable value inside, such as
# a dict. Convert sets to tuples, which is fine, since we can't
# send them over RPC anyway.
if iterable == set:
iterable = tuple
return iterable([action_fn(context, value) for value in values])
def serialize_entity(self, context, entity):
if isinstance(entity, (tuple, list, set, dict)):
entity = self._process_iterable(context, self.serialize_entity,
entity)
elif (hasattr(entity, 'obj_to_primitive') and
callable(entity.obj_to_primitive)):
entity = entity.obj_to_primitive()
return entity
def deserialize_entity(self, context, entity):
if isinstance(entity, dict) and 'cinder_object.name' in entity:
entity = self._process_object(context, entity)
elif isinstance(entity, (tuple, list, set, dict)):
entity = self._process_iterable(context, self.deserialize_entity,
entity)
return entity
def obj_to_primitive(obj):
"""Recursively turn an object into a python primitive.
A CinderObject becomes a dict, and anything that implements ObjectListBase
becomes a list.
"""
if isinstance(obj, ObjectListBase):
return [obj_to_primitive(x) for x in obj]
elif isinstance(obj, CinderObject):
result = {}
for key in obj.obj_fields:
if obj.obj_attr_is_set(key) or key in obj.obj_extra_fields:
result[key] = obj_to_primitive(getattr(obj, key))
return result
elif isinstance(obj, netaddr.IPAddress):
return str(obj)
elif isinstance(obj, netaddr.IPNetwork):
return str(obj)
else:
return obj
def obj_make_list(context, list_obj, item_cls, db_list, **extra_args):
"""Construct an object list from a list of primitives.
This calls item_cls._from_db_object() on each item of db_list, and
adds the resulting object to list_obj.
:param:context: Request contextr
:param:list_obj: An ObjectListBase object
:param:item_cls: The CinderObject class of the objects within the list
:param:db_list: The list of primitives to convert to objects
:param:extra_args: Extra arguments to pass to _from_db_object()
:returns: list_obj
"""
list_obj.objects = []
for db_item in db_list:
item = item_cls._from_db_object(context, item_cls(), db_item,
**extra_args)
list_obj.objects.append(item)
list_obj._context = context
list_obj.obj_reset_changes()
return list_obj
def serialize_args(fn):
"""Decorator that will do the arguments serialization before remoting."""
def wrapper(obj, *args, **kwargs):
for kw in kwargs:
value_arg = kwargs.get(kw)
if kw == 'exc_val' and value_arg:
kwargs[kw] = str(value_arg)
elif kw == 'exc_tb' and (
not isinstance(value_arg, six.string_types) and value_arg):
kwargs[kw] = ''.join(traceback.format_tb(value_arg))
elif isinstance(value_arg, datetime.datetime):
kwargs[kw] = timeutils.isotime(value_arg)
if hasattr(fn, '__call__'):
return fn(obj, *args, **kwargs)
# NOTE(danms): We wrap a descriptor, so use that protocol
return fn.__get__(None, obj)(*args, **kwargs)
# NOTE(danms): Make this discoverable
wrapper.remotable = getattr(fn, 'remotable', False)
wrapper.original_fn = fn
return (functools.wraps(fn)(wrapper) if hasattr(fn, '__call__')
else classmethod(wrapper))
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import subprocess
import sys
from contextlib import ExitStack
from datetime import datetime, timedelta
import freezegun
import pytest
# We should set these before loading _any_ of the rest of airflow so that the
# unit test mode config is set as early as possible.
tests_directory = os.path.dirname(os.path.realpath(__file__))
os.environ["AIRFLOW__CORE__DAGS_FOLDER"] = os.path.join(tests_directory, "dags")
os.environ["AIRFLOW__CORE__UNIT_TEST_MODE"] = "True"
os.environ["AWS_DEFAULT_REGION"] = (os.environ.get("AWS_DEFAULT_REGION") or "us-east-1")
os.environ["CREDENTIALS_DIR"] = (os.environ.get('CREDENTIALS_DIR') or "/files/airflow-breeze-config/keys")
perf_directory = os.path.abspath(os.path.join(tests_directory, os.pardir, 'scripts', 'perf'))
if perf_directory not in sys.path:
sys.path.append(perf_directory)
from perf_kit.sqlalchemy import ( # noqa: E402 isort:skip # pylint: disable=wrong-import-position
count_queries, trace_queries
)
@pytest.fixture()
def reset_environment():
"""
Resets env variables.
"""
init_env = os.environ.copy()
yield
changed_env = os.environ
for key in changed_env:
if key not in init_env:
del os.environ[key]
else:
os.environ[key] = init_env[key]
@pytest.fixture()
def reset_db():
"""
Resets Airflow db.
"""
from airflow.utils import db
db.resetdb()
yield
ALLOWED_TRACE_SQL_COLUMNS = ['num', 'time', 'trace', 'sql', 'parameters', 'count']
@pytest.fixture(autouse=True)
def trace_sql(request):
"""
Displays queries from the tests to console.
"""
trace_sql_option = request.config.getoption("trace_sql")
if not trace_sql_option:
yield
return
terminal_reporter = request.config.pluginmanager.getplugin("terminalreporter")
# if no terminal reporter plugin is present, nothing we can do here;
# this can happen when this function executes in a slave node
# when using pytest-xdist, for example
if terminal_reporter is None:
yield
return
columns = [col.strip() for col in trace_sql_option.split(",")]
def pytest_print(text):
return terminal_reporter.write_line(text)
with ExitStack() as exit_stack:
if columns == ['num']:
# It is very unlikely that the user wants to display only numbers, but probably
# the user just wants to count the queries.
exit_stack.enter_context( # pylint: disable=no-member
count_queries(
print_fn=pytest_print
)
)
elif any(c for c in ['time', 'trace', 'sql', 'parameters']):
exit_stack.enter_context( # pylint: disable=no-member
trace_queries(
display_num='num' in columns,
display_time='time' in columns,
display_trace='trace' in columns,
display_sql='sql' in columns,
display_parameters='parameters' in columns,
print_fn=pytest_print
)
)
yield
def pytest_addoption(parser):
"""
Add options parser for custom plugins
"""
group = parser.getgroup("airflow")
group.addoption(
"--with-db-init",
action="store_true",
dest="db_init",
help="Forces database initialization before tests",
)
group.addoption(
"--integration",
action="append",
metavar="INTEGRATIONS",
help="only run tests matching integration specified: "
"[cassandra,kerberos,mongo,openldap,presto,rabbitmq,redis]. ",
)
group.addoption(
"--backend",
action="store",
metavar="BACKEND",
help="only run tests matching the backend: [sqlite,postgres,mysql].",
)
group.addoption(
"--system",
action="append",
metavar="SYSTEMS",
help="only run tests matching the system specified [google.cloud, google.marketing_platform]",
)
group.addoption(
"--include-long-running",
action="store_true",
help="Includes long running tests (marked with long_running marker). They are skipped by default.",
)
group.addoption(
"--include-quarantined",
action="store_true",
help="Includes quarantined tests (marked with quarantined marker). They are skipped by default.",
)
allowed_trace_sql_columns_list = ",".join(ALLOWED_TRACE_SQL_COLUMNS)
group.addoption(
"--trace-sql",
action="store",
help=(
"Trace SQL statements. As an argument, you must specify the columns to be "
f"displayed as a comma-separated list. Supported values: [f{allowed_trace_sql_columns_list}]"
),
metavar="COLUMNS",
)
def initial_db_init():
if os.environ.get("RUN_AIRFLOW_1_10") == "true":
print("Attempting to reset the db using airflow command")
os.system("airflow resetdb -y")
else:
from airflow.utils import db
db.resetdb()
@pytest.fixture(autouse=True, scope="session")
def breeze_test_helper(request):
"""
Helper that setups Airflow testing environment. It does the same thing
as the old 'run-tests' script.
"""
# fixme: this should use some other env variable ex. RUNNING_ON_K8S
if os.environ.get("SKIP_INIT_DB"):
print("Skipping db initialization. Tests do not require database")
return
from airflow import __version__
if __version__.startswith("1.10"):
os.environ['RUN_AIRFLOW_1_10'] = "true"
print(" AIRFLOW ".center(60, "="))
# Setup test environment for breeze
home = os.path.expanduser("~")
airflow_home = os.environ.get("AIRFLOW_HOME") or os.path.join(home, "airflow")
print(f"Home of the user: {home}\nAirflow home {airflow_home}")
# Initialize Airflow db if required
lock_file = os.path.join(airflow_home, ".airflow_db_initialised")
if request.config.option.db_init:
print("Initializing the DB - forced with --with-db-init switch.")
initial_db_init()
elif not os.path.exists(lock_file):
print(
"Initializing the DB - first time after entering the container.\n"
"You can force re-initialization the database by adding --with-db-init switch to run-tests."
)
initial_db_init()
# Create pid file
with open(lock_file, "w+"):
pass
else:
print(
"Skipping initializing of the DB as it was initialized already.\n"
"You can re-initialize the database by adding --with-db-init flag when running tests."
)
integration_kerberos = os.environ.get("INTEGRATION_KERBEROS")
if integration_kerberos == "true":
# Initialize kerberos
kerberos = os.environ.get("KRB5_KTNAME")
if kerberos:
subprocess.check_call(["kinit", "-kt", kerberos, "airflow"])
else:
print("Kerberos enabled! Please setup KRB5_KTNAME environment variable")
sys.exit(1)
def pytest_configure(config):
config.addinivalue_line(
"markers", "integration(name): mark test to run with named integration"
)
config.addinivalue_line(
"markers", "backend(name): mark test to run with named backend"
)
config.addinivalue_line(
"markers", "system(name): mark test to run with named system"
)
config.addinivalue_line(
"markers", "long_running: mark test that run for a long time (many minutes)"
)
config.addinivalue_line(
"markers", "quarantined: mark test that are in quarantine (i.e. flaky, need to be isolated and fixed)"
)
config.addinivalue_line(
"markers", "credential_file(name): mark tests that require credential file in CREDENTIALS_DIR"
)
config.addinivalue_line(
"markers", "airflow_2: mark tests that works only on Airflow 2.0 / master"
)
def skip_if_not_marked_with_integration(selected_integrations, item):
for marker in item.iter_markers(name="integration"):
integration_name = marker.args[0]
if integration_name in selected_integrations or "all" in selected_integrations:
return
pytest.skip("The test is skipped because it does not have the right integration marker. "
"Only tests marked with pytest.mark.integration(INTEGRATION) are run with INTEGRATION"
" being one of {integration}. {item}".
format(integration=selected_integrations, item=item))
def skip_if_not_marked_with_backend(selected_backend, item):
for marker in item.iter_markers(name="backend"):
backend_names = marker.args
if selected_backend in backend_names:
return
pytest.skip("The test is skipped because it does not have the right backend marker "
"Only tests marked with pytest.mark.backend('{backend}') are run"
": {item}".
format(backend=selected_backend, item=item))
def skip_if_not_marked_with_system(selected_systems, item):
for marker in item.iter_markers(name="system"):
systems_name = marker.args[0]
if systems_name in selected_systems or "all" in selected_systems:
return
pytest.skip("The test is skipped because it does not have the right system marker. "
"Only tests marked with pytest.mark.system(SYSTEM) are run with SYSTEM"
" being one of {systems}. {item}".
format(systems=selected_systems, item=item))
def skip_system_test(item):
for marker in item.iter_markers(name="system"):
pytest.skip("The test is skipped because it has system marker. "
"System tests are only run when --system flag "
"with the right system ({system}) is passed to pytest. {item}".
format(system=marker.args[0], item=item))
def skip_long_running_test(item):
for _ in item.iter_markers(name="long_running"):
pytest.skip("The test is skipped because it has long_running marker. "
"And --include-long-running flag is not passed to pytest. {item}".
format(item=item))
def skip_quarantined_test(item):
for _ in item.iter_markers(name="quarantined"):
pytest.skip("The test is skipped because it has quarantined marker. "
"And --include-quarantined flag is passed to pytest. {item}".
format(item=item))
def skip_if_integration_disabled(marker, item):
integration_name = marker.args[0]
environment_variable_name = "INTEGRATION_" + integration_name.upper()
environment_variable_value = os.environ.get(environment_variable_name)
if not environment_variable_value or environment_variable_value != "true":
pytest.skip("The test requires {integration_name} integration started and "
"{name} environment variable to be set to true (it is '{value}')."
" It can be set by specifying '--integration {integration_name}' at breeze startup"
": {item}".
format(name=environment_variable_name, value=environment_variable_value,
integration_name=integration_name, item=item))
def skip_if_wrong_backend(marker, item):
valid_backend_names = marker.args
environment_variable_name = "BACKEND"
environment_variable_value = os.environ.get(environment_variable_name)
if not environment_variable_value or environment_variable_value not in valid_backend_names:
pytest.skip("The test requires one of {valid_backend_names} backend started and "
"{name} environment variable to be set to 'true' (it is '{value}')."
" It can be set by specifying backend at breeze startup"
": {item}".
format(name=environment_variable_name, value=environment_variable_value,
valid_backend_names=valid_backend_names, item=item))
def skip_if_credential_file_missing(item):
for marker in item.iter_markers(name="credential_file"):
credential_file = marker.args[0]
credential_path = os.path.join(os.environ.get('CREDENTIALS_DIR'), credential_file)
if not os.path.exists(credential_path):
pytest.skip("The test requires credential file {path}: {item}".
format(path=credential_path, item=item))
def skip_if_airflow_2_test(item):
for _ in item.iter_markers(name="airflow_2"):
if os.environ.get("RUN_AIRFLOW_1_10") == "true":
pytest.skip("The test works only with Airflow 2.0 / master branch")
def pytest_runtest_setup(item):
selected_integrations_list = item.config.getoption("--integration")
selected_systems_list = item.config.getoption("--system")
include_long_running = item.config.getoption("--include-long-running")
include_quarantined = item.config.getoption("--include-quarantined")
for marker in item.iter_markers(name="integration"):
skip_if_integration_disabled(marker, item)
if selected_integrations_list:
skip_if_not_marked_with_integration(selected_integrations_list, item)
if selected_systems_list:
skip_if_not_marked_with_system(selected_systems_list, item)
else:
skip_system_test(item)
for marker in item.iter_markers(name="backend"):
skip_if_wrong_backend(marker, item)
selected_backend = item.config.getoption("--backend")
if selected_backend:
skip_if_not_marked_with_backend(selected_backend, item)
if not include_long_running:
skip_long_running_test(item)
if not include_quarantined:
skip_quarantined_test(item)
skip_if_credential_file_missing(item)
skip_if_airflow_2_test(item)
@pytest.fixture
def frozen_sleep(monkeypatch):
"""
Use freezegun to "stub" sleep, so that it takes no time, but that
``datetime.now()`` appears to move forwards
If your module under test does ``import time`` and then ``time.sleep``::
def test_something(frozen_sleep):
my_mod.fn_under_test()
If your module under test does ``from time import sleep`` then you will
have to mock that sleep function directly::
def test_something(frozen_sleep, monkeypatch):
monkeypatch.setattr('my_mod.sleep', frozen_sleep)
my_mod.fn_under_test()
"""
freezegun_control = None
def fake_sleep(seconds):
nonlocal freezegun_control
utcnow = datetime.utcnow()
if freezegun_control is not None:
freezegun_control.stop()
freezegun_control = freezegun.freeze_time(utcnow + timedelta(seconds=seconds))
freezegun_control.start()
monkeypatch.setattr("time.sleep", fake_sleep)
yield fake_sleep
if freezegun_control is not None:
freezegun_control.stop()
| |
"""
Dungeon Beasts
"""
from __future__ import print_function
import random
import json
import urllib
import boto3
from boto3.dynamodb.conditions import Key, Attr
import pdb
VERSION = 1.0
# --------------- Helpers that build all of the responses ----------------------
def build_speechlet_response(title, output, reprompt_text, should_end_session):
return {
'outputSpeech': {
'type': 'PlainText',
'text': output
},
'card': {
'type': 'Simple',
'title': title,
'content': output
},
'reprompt': {
'outputSpeech': {
'type': 'PlainText',
'text': reprompt_text
}
},
'shouldEndSession': should_end_session
}
def build_response(session_attributes, speechlet_response):
return {
'version': '1.0',
'sessionAttributes': session_attributes,
'response': speechlet_response
}
# --------------- Functions that control the skill's behavior ------------------
def get_welcome_response():
""" If we wanted to initialize the session to have some attributes we could
add those here
"""
session_attributes = {}
card_title = "Welcome"
speech_output = "Welcome to Dungeon Beasts. " \
"You can ask me to look up a beast from the D&D 5th edition Monster Manual by name. " \
"For example, you can say tell me about Skeletons and I'll tell you what I know about them. " \
"What would you like me to find for you?"
# If the user either does not reply to the welcome message or says something
# that is not understood, they will be prompted again with this text.
reprompt_text = "For instructions on what you can say, please say help me."
should_end_session = False
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session))
def handle_session_end_request():
card_title = "Session Ended"
speech_output = "Thank you for trying the Dungeon Beasts. " \
"Have a nice day! "
# Setting this to true ends the session and exits the skill.
should_end_session = True
return build_response({}, build_speechlet_response(
card_title, speech_output, None, should_end_session))
def handle_session_stop_request():
card_title = "Stop"
speech_output = "Ok."
should_end_session = true
return build_response({}, build_speechlet_response(
card_title, speech_output, None, should_end_session))
# --------------- Events ------------------
def on_session_started(session_started_request, session):
""" Called when the session starts """
#print("on_session_started requestId=" + session_started_request['requestId']
# + ", sessionId=" + session['sessionId'])
print("on_session_started")
def on_launch(launch_request, session):
""" Called when the user launches the skill without specifying what they
want
"""
#print("on_launch requestId=" + launch_request['requestId'] +
# ", sessionId=" + session['sessionId'])
# Dispatch to your skill's launch
return get_welcome_response()
def on_intent(intent_request, session):
""" Called when the user specifies an intent for this skill """
print("on_intent requestId=" + intent_request['requestId'] +
", sessionId=" + session['sessionId'])
intent = intent_request['intent']
intent_name = intent_request['intent']['name']
print("Version: " + str(VERSION))
print("intent_name:" + intent_name)
# Dispatch to your skill's intent handlers
if intent_name == "ListSourcesIntent":
return list_sources(intent, session)
elif intent_name == "BeastNameIntent":
return beast_by_name(intent, session)
elif intent_name == "MoreInfoIntent":
return more_beast_info(intent, session)
elif intent_name == "AMAZON.HelpIntent":
return get_welcome_response()
elif intent_name == "AMAZON.CancelIntent":
return handle_session_end_request()
elif intent_name == "AMAZON.StopIntent":
return handle_session_end_request()
else:
raise ValueError("Invalid intent " + intent_name)
def on_session_ended(session_ended_request, session):
""" Called when the user ends the session.
Is not called when the skill returns should_end_session=true
"""
#print("on_session_ended requestId=" + session_ended_request['requestId'] +
# ", sessionId=" + session['sessionId'])
# add cleanup logic here
# --------------- Main handler ------------------
def lambda_handler(event, context):
""" Route the incoming request based on type (LaunchRequest, IntentRequest,
etc.) The JSON body of the request is provided in the event parameter.
"""
#print("event.session.application.applicationId=" +
# event['session']['application']['applicationId'])
"""
Uncomment this if statement and populate with your skill's application ID to
prevent someone else from configuring a skill that sends requests to this
function.
"""
# if (event['session']['application']['applicationId'] !=
# "amzn1.echo-sdk-ams.app.[unique-value-here]"):
# raise ValueError("Invalid Application ID")
if event['session']['new']:
on_session_started({'requestId': event['request']['requestId']},
event['session'])
if event['request']['type'] == "LaunchRequest":
return on_launch(event['request'], event['session'])
elif event['request']['type'] == "IntentRequest":
return on_intent(event['request'], event['session'])
elif event['request']['type'] == "SessionEndedRequest":
return on_session_ended(event['request'], event['session'])
# --------------- Functions for the core of Dungeon Beasts -----------------
def get_dynamodb_conn():
dynamodb = boto3.resource('dynamodb', endpoint_url='http://localhost:8000')
return dynamodb
def list_sources(intent, session):
card_title = "D&D Bestiary Sources"
session_attributes = {}
should_end_session = False
source_list_arr = []
speech_output = ""
reprompt_text = ""
try:
dynamodb = get_dynamodb_conn()
bestiary = dynamodb.Table('Bestiary_sources').scan()
sources = bestiary['Items']
for source in sources:
source_list_arr.append(str(source['name']))
except Exception as e:
print("error list_sources: ", e)
should_end_session = True
reprompt_text = ""
speech_output = ", ".join(sorted(source_list_arr))
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session))
def beast_by_name(intent, session):
card_title = "D&D Beast By Name"
should_end_session = False
beast_name = ""
print("beast_by_name: ", intent['slots'])
try:
beast_info = ""
session_attributes = ""
single_beast = False
beast_name = singularize((intent['slots']['beastName']['value']).title())
print('Beast name: ', beast_name)
if "beast_name" in session.get('attributes', {}):
print('Session beast name: ', session['attributes']['beast_name'])
if session['attributes']['beast_name'] == beast_name:
single_beast = True
speech_output, reprompt_text, session_attributes = find_beast_info(beast_name, single_beast)
else:
speech_output, reprompt_text, session_attributes = find_beast_info(beast_name)
except Exception as e:
print("error beast_by_name: ", e, "beast_name: " + beast_name)
beast_info = "I couldn't find that beast."
reprompt_text = ""
speech_output = beast_name + ". " + beast_info
should_end_session = True
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session))
def create_beast_name_attributes(beast_name):
return {"beast_name": beast_name}
def find_beast_info(beast_name, single_beast=False):
dynamodb = get_dynamodb_conn()
bestiary = dynamodb.Table('Bestiary')
bestiary_names = dynamodb.Table('Bestiary_names')
speech_output = ""
reprompt_text = ""
session_attributes = ""
if single_beast:
#print("Looking for single beast true")
speech_output, reprompt_text = find_single_beast(bestiary, beast_name)
else:
beast_group_items = find_groups(bestiary_names, beast_name)
if len(beast_group_items) == 0:
#print("Looking for single beast")
speech_output, reprompt_text = find_single_beast(bestiary, beast_name)
else:
speech_output, reprompt_text = group_beasts_response(beast_group_items)
session_attributes = create_beast_name_attributes(beast_name)
return speech_output, reprompt_text, session_attributes
def find_groups(bestiary_names, beast_name):
response = bestiary_names.scan(
FilterExpression = Attr('group_name').begins_with(beast_name)
)
beast_group_items = response['Items']
#print('Groups: ', beast_group_items)
return beast_group_items
def find_single_beast(bestiary, beast_name):
response = bestiary.get_item(
Key={
'name': beast_name
})
#print(response)
beast_data = response['Item']
alignment = "Alignment: " + beast_data['alignment']
ac = "AC: " + str(beast_data['ac'])
hp = "HP: " + beast_data['hp']
cr = "Challenge Rating: " + beast_data['cr']
beast_info = ac + ". " + hp + ". " + cr + ". " + alignment
print(beast_info)
speech_output = beast_name + ". " + beast_info
reprompt_text = "Say 'more' if you want more information about " + beast_name
speech_output = speech_output + ". " + reprompt_text
return speech_output, reprompt_text
def group_beasts_response(beast_group_items):
beasts = beast_group_items[0]["beasts"]
speech_output = "I found the following beasts: " + beasts
reprompt_text = "Tell me which one you want by saying its name."
return speech_output + ". " + reprompt_text, reprompt_text
def more_beast_info(intent, session):
card_title = "D&D Beast More Info"
should_end_session = True
reprompt_text = ""
session_attributes = {}
if "beast_name" in session.get('attributes', {}):
beast_name = session['attributes']['beast_name']
#print('==============', beast_name)
card_title = card_title + " For " + beast_name
try:
dynamodb = get_dynamodb_conn()
bestiary = dynamodb.Table('Bestiary')
beast_info = ""
response = bestiary.get_item(
Key={
'name': beast_name
}
)
beast_data = response['Item']
size = "Size: " + beast_data['size'] + ". "
beast_type = "Type: " + beast_data['type'] + ". "
speed = "Speed: " + beast_data['speed'] + ". "
beast_str = "Strength: " + str(beast_data['str']) + ". "
beast_dex = "Dexterity: " + str(beast_data['dex']) + ". "
beast_con = "Constitution: " + str(beast_data['con']) + ". "
beast_int = "Intelligence: " + str(beast_data['int']) + ". "
beast_wis = "Wisdom: " + str(beast_data['wis']) + ". "
beast_cha = "Charisma: " + str(beast_data['cha']) + ". "
skill = "Skill: " + safe_str(beast_data['skill']) + ". "
passive = "Passive: " + safe_str(beast_data['passive']) + ". "
resist = "Resist: " + safe_str(beast_data['resist']) + ". "
vulnerable = "Vulnerable: " + safe_str(beast_data['vulnerable']) + ". "
immune = "Immune: " + safe_str(beast_data['immune']) + ". "
senses = "Senses: " + safe_str(beast_data['senses']) + ". "
languages = "Languages: " + safe_str(beast_data['languages']) + ". "
traits = "Traits: " + ", ".join(beast_data['traits']) + ". "
actions = "Actions: " + ", ".join(beast_data['actions']) + ". "
legendaries = "Legendaries: " + get_legendaries(beast_data['legendaries']) + ". "
source = "Source: " + beast_data['source'] + ". "
beast_info = size + beast_type + speed + beast_str + beast_dex + \
beast_con + beast_int + beast_wis + beast_cha + \
skill + passive + resist + vulnerable + immune + \
senses + languages + traits + actions + legendaries + \
source
#print(response)
print(beast_info)
except Exception as e:
print("error beast_by_name: ", e, "beast_name: " + beast_name)
beast_info = " I couldn't find that beast."
reprompt_text = ""
should_end_session = True
speech_output = beast_name + ". " + beast_info
else:
speech_output = "There was no beast to look up."
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session))
def safe_str(s):
return 'None' if s is None else str(s)
def get_legendaries(legendaries):
if len(legendaries) == 0:
return "None"
else:
return ", ".join(legendaries)
def singularize(name):
new_name = name
# starting with a brute force approach
if name == "Apes":
new_name = "Ape"
if name == "Dragons":
new_name = "Dragon"
if name == "Orcs":
new_name = "Orc"
if name == "Skeletons":
new_name = "Skeleton"
if name == "Wolves":
new_name = "Wolf"
if name == "Vampires":
new_name = "Vampire"
if name == "Trolls":
new_name = "Troll"
if name == "Be Holders":
new_name = "Beholder"
if name == "Goblins":
new_name = "Goblin"
if name == "Succubus":
new_name = "Succubus/incubus"
if name == "Incubus":
new_name = "Succubus/incubus"
return new_name
| |
#!/usr/bin/env python
"""
C declarations, CPP macros, and C functions for f2py2e.
Only required declarations/macros/functions will be used.
Copyright 1999,2000 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy License.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2005/05/06 11:42:34 $
Pearu Peterson
"""
__version__ = "$Revision: 1.75 $"[10:-1]
import __version__
f2py_version = __version__.version
import types
import sys
import copy
errmess=sys.stderr.write
##################### Definitions ##################
outneeds={'includes0':[],'includes':[],'typedefs':[],'typedefs_generated':[],
'userincludes':[],
'cppmacros':[],'cfuncs':[],'callbacks':[],'f90modhooks':[],
'commonhooks':[]}
needs={}
includes0={'includes0':'/*need_includes0*/'}
includes={'includes':'/*need_includes*/'}
userincludes={'userincludes':'/*need_userincludes*/'}
typedefs={'typedefs':'/*need_typedefs*/'}
typedefs_generated={'typedefs_generated':'/*need_typedefs_generated*/'}
cppmacros={'cppmacros':'/*need_cppmacros*/'}
cfuncs={'cfuncs':'/*need_cfuncs*/'}
callbacks={'callbacks':'/*need_callbacks*/'}
f90modhooks={'f90modhooks':'/*need_f90modhooks*/',
'initf90modhooksstatic':'/*initf90modhooksstatic*/',
'initf90modhooksdynamic':'/*initf90modhooksdynamic*/',
}
commonhooks={'commonhooks':'/*need_commonhooks*/',
'initcommonhooks':'/*need_initcommonhooks*/',
}
############ Includes ###################
includes0['math.h']='#include <math.h>'
includes0['string.h']='#include <string.h>'
includes0['setjmp.h']='#include <setjmp.h>'
includes['Python.h']='#include "Python.h"'
needs['arrayobject.h']=['Python.h']
includes['arrayobject.h']='''#define PY_ARRAY_UNIQUE_SYMBOL PyArray_API
#include "arrayobject.h"'''
includes['arrayobject.h']='#include "fortranobject.h"'
############# Type definitions ###############
typedefs['unsigned_char']='typedef unsigned char unsigned_char;'
typedefs['unsigned_short']='typedef unsigned short unsigned_short;'
typedefs['unsigned_long']='typedef unsigned long unsigned_long;'
typedefs['signed_char']='typedef signed char signed_char;'
typedefs['long_long']="""\
#ifdef _WIN32
typedef __int64 long_long;
#else
typedef long long long_long;
typedef unsigned long long unsigned_long_long;
#endif
"""
typedefs['insinged_long_long']="""\
#ifdef _WIN32
typedef __uint64 long_long;
#else
typedef unsigned long long unsigned_long_long;
#endif
"""
typedefs['long_double']="""\
#ifndef _LONG_DOUBLE
typedef long double long_double;
#endif
"""
typedefs['complex_long_double']='typedef struct {long double r,i;} complex_long_double;'
typedefs['complex_float']='typedef struct {float r,i;} complex_float;'
typedefs['complex_double']='typedef struct {double r,i;} complex_double;'
typedefs['string']="""typedef char * string;"""
############### CPP macros ####################
cppmacros['CFUNCSMESS']="""\
#ifdef DEBUGCFUNCS
#define CFUNCSMESS(mess) fprintf(stderr,\"debug-capi:\"mess);
#define CFUNCSMESSPY(mess,obj) CFUNCSMESS(mess) \\
\tPyObject_Print((PyObject *)obj,stderr,Py_PRINT_RAW);\\
\tfprintf(stderr,\"\\n\");
#else
#define CFUNCSMESS(mess)
#define CFUNCSMESSPY(mess,obj)
#endif
"""
cppmacros['F_FUNC']="""\
#if defined(PREPEND_FORTRAN)
#if defined(NO_APPEND_FORTRAN)
#if defined(UPPERCASE_FORTRAN)
#define F_FUNC(f,F) _##F
#else
#define F_FUNC(f,F) _##f
#endif
#else
#if defined(UPPERCASE_FORTRAN)
#define F_FUNC(f,F) _##F##_
#else
#define F_FUNC(f,F) _##f##_
#endif
#endif
#else
#if defined(NO_APPEND_FORTRAN)
#if defined(UPPERCASE_FORTRAN)
#define F_FUNC(f,F) F
#else
#define F_FUNC(f,F) f
#endif
#else
#if defined(UPPERCASE_FORTRAN)
#define F_FUNC(f,F) F##_
#else
#define F_FUNC(f,F) f##_
#endif
#endif
#endif
#if defined(UNDERSCORE_G77)
#define F_FUNC_US(f,F) F_FUNC(f##_,F##_)
#else
#define F_FUNC_US(f,F) F_FUNC(f,F)
#endif
"""
cppmacros['F_WRAPPEDFUNC']="""\
#if defined(PREPEND_FORTRAN)
#if defined(NO_APPEND_FORTRAN)
#if defined(UPPERCASE_FORTRAN)
#define F_WRAPPEDFUNC(f,F) _F2PYWRAP##F
#else
#define F_WRAPPEDFUNC(f,F) _f2pywrap##f
#endif
#else
#if defined(UPPERCASE_FORTRAN)
#define F_WRAPPEDFUNC(f,F) _F2PYWRAP##F##_
#else
#define F_WRAPPEDFUNC(f,F) _f2pywrap##f##_
#endif
#endif
#else
#if defined(NO_APPEND_FORTRAN)
#if defined(UPPERCASE_FORTRAN)
#define F_WRAPPEDFUNC(f,F) F2PYWRAP##F
#else
#define F_WRAPPEDFUNC(f,F) f2pywrap##f
#endif
#else
#if defined(UPPERCASE_FORTRAN)
#define F_WRAPPEDFUNC(f,F) F2PYWRAP##F##_
#else
#define F_WRAPPEDFUNC(f,F) f2pywrap##f##_
#endif
#endif
#endif
#if defined(UNDERSCORE_G77)
#define F_WRAPPEDFUNC_US(f,F) F_WRAPPEDFUNC(f##_,F##_)
#else
#define F_WRAPPEDFUNC_US(f,F) F_WRAPPEDFUNC(f,F)
#endif
"""
cppmacros['F_MODFUNC']="""\
#if defined(F90MOD2CCONV1) /*E.g. Compaq Fortran */
#if defined(NO_APPEND_FORTRAN)
#define F_MODFUNCNAME(m,f) $ ## m ## $ ## f
#else
#define F_MODFUNCNAME(m,f) $ ## m ## $ ## f ## _
#endif
#endif
#if defined(F90MOD2CCONV2) /*E.g. IBM XL Fortran, not tested though */
#if defined(NO_APPEND_FORTRAN)
#define F_MODFUNCNAME(m,f) __ ## m ## _MOD_ ## f
#else
#define F_MODFUNCNAME(m,f) __ ## m ## _MOD_ ## f ## _
#endif
#endif
#if defined(F90MOD2CCONV3) /*E.g. MIPSPro Compilers */
#if defined(NO_APPEND_FORTRAN)
#define F_MODFUNCNAME(m,f) f ## .in. ## m
#else
#define F_MODFUNCNAME(m,f) f ## .in. ## m ## _
#endif
#endif
/*
#if defined(UPPERCASE_FORTRAN)
#define F_MODFUNC(m,M,f,F) F_MODFUNCNAME(M,F)
#else
#define F_MODFUNC(m,M,f,F) F_MODFUNCNAME(m,f)
#endif
*/
#define F_MODFUNC(m,f) (*(f2pymodstruct##m##.##f))
"""
cppmacros['SWAPUNSAFE']="""\
#define SWAP(a,b) (size_t)(a) = ((size_t)(a) ^ (size_t)(b));\\
(size_t)(b) = ((size_t)(a) ^ (size_t)(b));\\
(size_t)(a) = ((size_t)(a) ^ (size_t)(b))
"""
cppmacros['SWAP']="""\
#define SWAP(a,b,t) {\\
\tt *c;\\
\tc = a;\\
\ta = b;\\
\tb = c;}
"""
#cppmacros['ISCONTIGUOUS']='#define ISCONTIGUOUS(m) ((m)->flags & NPY_CONTIGUOUS)'
cppmacros['PRINTPYOBJERR']="""\
#define PRINTPYOBJERR(obj)\\
\tfprintf(stderr,\"#modulename#.error is related to \");\\
\tPyObject_Print((PyObject *)obj,stderr,Py_PRINT_RAW);\\
\tfprintf(stderr,\"\\n\");
"""
cppmacros['MINMAX']="""\
#ifndef max
#define max(a,b) ((a > b) ? (a) : (b))
#endif
#ifndef min
#define min(a,b) ((a < b) ? (a) : (b))
#endif
#ifndef MAX
#define MAX(a,b) ((a > b) ? (a) : (b))
#endif
#ifndef MIN
#define MIN(a,b) ((a < b) ? (a) : (b))
#endif
"""
cppmacros['len..']="""\
#define rank(var) var ## _Rank
#define shape(var,dim) var ## _Dims[dim]
#define old_rank(var) (((PyArrayObject *)(capi_ ## var ## _tmp))->nd)
#define old_shape(var,dim) (((PyArrayObject *)(capi_ ## var ## _tmp))->dimensions[dim])
#define fshape(var,dim) shape(var,rank(var)-dim-1)
#define len(var) shape(var,0)
#define flen(var) fshape(var,0)
#define size(var) PyArray_SIZE((PyArrayObject *)(capi_ ## var ## _tmp))
/* #define index(i) capi_i ## i */
#define slen(var) capi_ ## var ## _len
"""
cppmacros['pyobj_from_char1']='#define pyobj_from_char1(v) (PyInt_FromLong(v))'
cppmacros['pyobj_from_short1']='#define pyobj_from_short1(v) (PyInt_FromLong(v))'
needs['pyobj_from_int1']=['signed_char']
cppmacros['pyobj_from_int1']='#define pyobj_from_int1(v) (PyInt_FromLong(v))'
cppmacros['pyobj_from_long1']='#define pyobj_from_long1(v) (PyLong_FromLong(v))'
needs['pyobj_from_long_long1']=['long_long']
cppmacros['pyobj_from_long_long1']="""\
#ifdef HAVE_LONG_LONG
#define pyobj_from_long_long1(v) (PyLong_FromLongLong(v))
#else
#warning HAVE_LONG_LONG is not available. Redefining pyobj_from_long_long.
#define pyobj_from_long_long1(v) (PyLong_FromLong(v))
#endif
"""
needs['pyobj_from_long_double1']=['long_double']
cppmacros['pyobj_from_long_double1']='#define pyobj_from_long_double1(v) (PyFloat_FromDouble(v))'
cppmacros['pyobj_from_double1']='#define pyobj_from_double1(v) (PyFloat_FromDouble(v))'
cppmacros['pyobj_from_float1']='#define pyobj_from_float1(v) (PyFloat_FromDouble(v))'
needs['pyobj_from_complex_long_double1']=['complex_long_double']
cppmacros['pyobj_from_complex_long_double1']='#define pyobj_from_complex_long_double1(v) (PyComplex_FromDoubles(v.r,v.i))'
needs['pyobj_from_complex_double1']=['complex_double']
cppmacros['pyobj_from_complex_double1']='#define pyobj_from_complex_double1(v) (PyComplex_FromDoubles(v.r,v.i))'
needs['pyobj_from_complex_float1']=['complex_float']
cppmacros['pyobj_from_complex_float1']='#define pyobj_from_complex_float1(v) (PyComplex_FromDoubles(v.r,v.i))'
needs['pyobj_from_string1']=['string']
cppmacros['pyobj_from_string1']='#define pyobj_from_string1(v) (PyString_FromString((char *)v))'
needs['pyobj_from_string1size']=['string']
cppmacros['pyobj_from_string1size']='#define pyobj_from_string1size(v,len) (PyString_FromStringAndSize((char *)v, len))'
needs['TRYPYARRAYTEMPLATE']=['PRINTPYOBJERR']
cppmacros['TRYPYARRAYTEMPLATE']="""\
/* New SciPy */
#define TRYPYARRAYTEMPLATECHAR case PyArray_STRING: *(char *)(arr->data)=*v; break;
#define TRYPYARRAYTEMPLATELONG case PyArray_LONG: *(long *)(arr->data)=*v; break;
#define TRYPYARRAYTEMPLATEOBJECT case PyArray_OBJECT: (arr->descr->f->setitem)(pyobj_from_ ## ctype ## 1(*v),arr->data); break;
#define TRYPYARRAYTEMPLATE(ctype,typecode) \\
PyArrayObject *arr = NULL;\\
if (!obj) return -2;\\
if (!PyArray_Check(obj)) return -1;\\
if (!(arr=(PyArrayObject *)obj)) {fprintf(stderr,\"TRYPYARRAYTEMPLATE:\");PRINTPYOBJERR(obj);return 0;}\\
if (arr->descr->type==typecode) {*(ctype *)(arr->data)=*v; return 1;}\\
switch (arr->descr->type_num) {\\
case PyArray_DOUBLE: *(double *)(arr->data)=*v; break;\\
case PyArray_INT: *(int *)(arr->data)=*v; break;\\
case PyArray_LONG: *(long *)(arr->data)=*v; break;\\
case PyArray_FLOAT: *(float *)(arr->data)=*v; break;\\
case PyArray_CDOUBLE: *(double *)(arr->data)=*v; break;\\
case PyArray_CFLOAT: *(float *)(arr->data)=*v; break;\\
case PyArray_BOOL: *(npy_bool *)(arr->data)=(*v!=0); break;\\
case PyArray_UBYTE: *(unsigned char *)(arr->data)=*v; break;\\
case PyArray_BYTE: *(signed char *)(arr->data)=*v; break;\\
case PyArray_SHORT: *(short *)(arr->data)=*v; break;\\
case PyArray_USHORT: *(npy_ushort *)(arr->data)=*v; break;\\
case PyArray_UINT: *(npy_uint *)(arr->data)=*v; break;\\
case PyArray_ULONG: *(npy_ulong *)(arr->data)=*v; break;\\
case PyArray_LONGLONG: *(npy_longlong *)(arr->data)=*v; break;\\
case PyArray_ULONGLONG: *(npy_ulonglong *)(arr->data)=*v; break;\\
case PyArray_LONGDOUBLE: *(npy_longdouble *)(arr->data)=*v; break;\\
case PyArray_CLONGDOUBLE: *(npy_longdouble *)(arr->data)=*v; break;\\
case PyArray_OBJECT: (arr->descr->f->setitem)(pyobj_from_ ## ctype ## 1(*v),arr->data, arr); break;\\
default: return -2;\\
};\\
return 1
"""
needs['TRYCOMPLEXPYARRAYTEMPLATE']=['PRINTPYOBJERR']
cppmacros['TRYCOMPLEXPYARRAYTEMPLATE']="""\
#define TRYCOMPLEXPYARRAYTEMPLATEOBJECT case PyArray_OBJECT: (arr->descr->f->setitem)(pyobj_from_complex_ ## ctype ## 1((*v)),arr->data, arr); break;
#define TRYCOMPLEXPYARRAYTEMPLATE(ctype,typecode)\\
PyArrayObject *arr = NULL;\\
if (!obj) return -2;\\
if (!PyArray_Check(obj)) return -1;\\
if (!(arr=(PyArrayObject *)obj)) {fprintf(stderr,\"TRYCOMPLEXPYARRAYTEMPLATE:\");PRINTPYOBJERR(obj);return 0;}\\
if (arr->descr->type==typecode) {\\
*(ctype *)(arr->data)=(*v).r;\\
*(ctype *)(arr->data+sizeof(ctype))=(*v).i;\\
return 1;\\
}\\
switch (arr->descr->type_num) {\\
case PyArray_CDOUBLE: *(double *)(arr->data)=(*v).r;*(double *)(arr->data+sizeof(double))=(*v).i;break;\\
case PyArray_CFLOAT: *(float *)(arr->data)=(*v).r;*(float *)(arr->data+sizeof(float))=(*v).i;break;\\
case PyArray_DOUBLE: *(double *)(arr->data)=(*v).r; break;\\
case PyArray_LONG: *(long *)(arr->data)=(*v).r; break;\\
case PyArray_FLOAT: *(float *)(arr->data)=(*v).r; break;\\
case PyArray_INT: *(int *)(arr->data)=(*v).r; break;\\
case PyArray_SHORT: *(short *)(arr->data)=(*v).r; break;\\
case PyArray_UBYTE: *(unsigned char *)(arr->data)=(*v).r; break;\\
case PyArray_BYTE: *(signed char *)(arr->data)=(*v).r; break;\\
case PyArray_BOOL: *(npy_bool *)(arr->data)=((*v).r!=0 && (*v).i!=0); break;\\
case PyArray_USHORT: *(npy_ushort *)(arr->data)=(*v).r; break;\\
case PyArray_UINT: *(npy_uint *)(arr->data)=(*v).r; break;\\
case PyArray_ULONG: *(npy_ulong *)(arr->data)=(*v).r; break;\\
case PyArray_LONGLONG: *(npy_longlong *)(arr->data)=(*v).r; break;\\
case PyArray_ULONGLONG: *(npy_ulonglong *)(arr->data)=(*v).r; break;\\
case PyArray_LONGDOUBLE: *(npy_longdouble *)(arr->data)=(*v).r; break;\\
case PyArray_CLONGDOUBLE: *(npy_longdouble *)(arr->data)=(*v).r;*(npy_longdouble *)(arr->data+sizeof(npy_longdouble))=(*v).i;break;\\
case PyArray_OBJECT: (arr->descr->f->setitem)(pyobj_from_complex_ ## ctype ## 1((*v)),arr->data, arr); break;\\
default: return -2;\\
};\\
return -1;
"""
## cppmacros['NUMFROMARROBJ']="""\
## #define NUMFROMARROBJ(typenum,ctype) \\
## \tif (PyArray_Check(obj)) arr = (PyArrayObject *)obj;\\
## \telse arr = (PyArrayObject *)PyArray_ContiguousFromObject(obj,typenum,0,0);\\
## \tif (arr) {\\
## \t\tif (arr->descr->type_num==PyArray_OBJECT) {\\
## \t\t\tif (!ctype ## _from_pyobj(v,(arr->descr->getitem)(arr->data),\"\"))\\
## \t\t\tgoto capi_fail;\\
## \t\t} else {\\
## \t\t\t(arr->descr->cast[typenum])(arr->data,1,(char*)v,1,1);\\
## \t\t}\\
## \t\tif ((PyObject *)arr != obj) { Py_DECREF(arr); }\\
## \t\treturn 1;\\
## \t}
## """
## #XXX: Note that CNUMFROMARROBJ is identical with NUMFROMARROBJ
## cppmacros['CNUMFROMARROBJ']="""\
## #define CNUMFROMARROBJ(typenum,ctype) \\
## \tif (PyArray_Check(obj)) arr = (PyArrayObject *)obj;\\
## \telse arr = (PyArrayObject *)PyArray_ContiguousFromObject(obj,typenum,0,0);\\
## \tif (arr) {\\
## \t\tif (arr->descr->type_num==PyArray_OBJECT) {\\
## \t\t\tif (!ctype ## _from_pyobj(v,(arr->descr->getitem)(arr->data),\"\"))\\
## \t\t\tgoto capi_fail;\\
## \t\t} else {\\
## \t\t\t(arr->descr->cast[typenum])((void *)(arr->data),1,(void *)(v),1,1);\\
## \t\t}\\
## \t\tif ((PyObject *)arr != obj) { Py_DECREF(arr); }\\
## \t\treturn 1;\\
## \t}
## """
needs['GETSTRFROMPYTUPLE']=['STRINGCOPYN','PRINTPYOBJERR']
cppmacros['GETSTRFROMPYTUPLE']="""\
#define GETSTRFROMPYTUPLE(tuple,index,str,len) {\\
\t\tPyObject *rv_cb_str = PyTuple_GetItem((tuple),(index));\\
\t\tif (rv_cb_str == NULL)\\
\t\t\tgoto capi_fail;\\
\t\tif (PyString_Check(rv_cb_str)) {\\
\t\t\tstr[len-1]='\\0';\\
\t\t\tSTRINGCOPYN((str),PyString_AS_STRING((PyStringObject*)rv_cb_str),(len));\\
\t\t} else {\\
\t\t\tPRINTPYOBJERR(rv_cb_str);\\
\t\t\tPyErr_SetString(#modulename#_error,\"string object expected\");\\
\t\t\tgoto capi_fail;\\
\t\t}\\
\t}
"""
cppmacros['GETSCALARFROMPYTUPLE']="""\
#define GETSCALARFROMPYTUPLE(tuple,index,var,ctype,mess) {\\
\t\tif ((capi_tmp = PyTuple_GetItem((tuple),(index)))==NULL) goto capi_fail;\\
\t\tif (!(ctype ## _from_pyobj((var),capi_tmp,mess)))\\
\t\t\tgoto capi_fail;\\
\t}
"""
cppmacros['FAILNULL']="""\\
#define FAILNULL(p) do { \\
if ((p) == NULL) { \\
PyErr_SetString(PyExc_MemoryError, "NULL pointer found"); \\
goto capi_fail; \\
} \\
} while (0)
"""
needs['MEMCOPY']=['string.h', 'FAILNULL']
cppmacros['MEMCOPY']="""\
#define MEMCOPY(to,from,n)\\
do { FAILNULL(to); FAILNULL(from); (void)memcpy(to,from,n); } while (0)
"""
cppmacros['STRINGMALLOC']="""\
#define STRINGMALLOC(str,len)\\
\tif ((str = (string)malloc(sizeof(char)*(len+1))) == NULL) {\\
\t\tPyErr_SetString(PyExc_MemoryError, \"out of memory\");\\
\t\tgoto capi_fail;\\
\t} else {\\
\t\t(str)[len] = '\\0';\\
\t}
"""
cppmacros['STRINGFREE']="""\
#define STRINGFREE(str) do {if (!(str == NULL)) free(str);} while (0)
"""
needs['STRINGCOPYN']=['string.h', 'FAILNULL']
cppmacros['STRINGCOPYN']="""\
#define STRINGCOPYN(to,from,buf_size) \\
do { \\
int _m = (buf_size); \\
char *_to = (to); \\
char *_from = (from); \\
FAILNULL(_to); FAILNULL(_from); \\
(void)strncpy(_to, _from, sizeof(char)*_m); \\
_to[_m-1] = '\\0'; \\
/* Padding with spaces instead of nulls */ \\
for (_m -= 2; _m >= 0 && _to[_m] == '\\0'; _m--) { \\
_to[_m] = ' '; \\
} \\
} while (0)
"""
needs['STRINGCOPY']=['string.h', 'FAILNULL']
cppmacros['STRINGCOPY']="""\
#define STRINGCOPY(to,from)\\
do { FAILNULL(to); FAILNULL(from); (void)strcpy(to,from); } while (0)
"""
cppmacros['CHECKGENERIC']="""\
#define CHECKGENERIC(check,tcheck,name) \\
\tif (!(check)) {\\
\t\tPyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\
\t\t/*goto capi_fail;*/\\
\t} else """
cppmacros['CHECKARRAY']="""\
#define CHECKARRAY(check,tcheck,name) \\
\tif (!(check)) {\\
\t\tPyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\
\t\t/*goto capi_fail;*/\\
\t} else """
cppmacros['CHECKSTRING']="""\
#define CHECKSTRING(check,tcheck,name,show,var)\\
\tif (!(check)) {\\
\t\tchar errstring[256];\\
\t\tsprintf(errstring, \"%s: \"show, \"(\"tcheck\") failed for \"name, slen(var), var);\\
\t\tPyErr_SetString(#modulename#_error, errstring);\\
\t\t/*goto capi_fail;*/\\
\t} else """
cppmacros['CHECKSCALAR']="""\
#define CHECKSCALAR(check,tcheck,name,show,var)\\
\tif (!(check)) {\\
\t\tchar errstring[256];\\
\t\tsprintf(errstring, \"%s: \"show, \"(\"tcheck\") failed for \"name, var);\\
\t\tPyErr_SetString(#modulename#_error,errstring);\\
\t\t/*goto capi_fail;*/\\
\t} else """
## cppmacros['CHECKDIMS']="""\
## #define CHECKDIMS(dims,rank) \\
## \tfor (int i=0;i<(rank);i++)\\
## \t\tif (dims[i]<0) {\\
## \t\t\tfprintf(stderr,\"Unspecified array argument requires a complete dimension specification.\\n\");\\
## \t\t\tgoto capi_fail;\\
## \t\t}
## """
cppmacros['ARRSIZE']='#define ARRSIZE(dims,rank) (_PyArray_multiply_list(dims,rank))'
cppmacros['OLDPYNUM']="""\
#ifdef OLDPYNUM
#error You need to intall Numeric Python version 13 or higher. Get it from http:/sourceforge.net/project/?group_id=1369
#endif
"""
################# C functions ###############
cfuncs['calcarrindex']="""\
static int calcarrindex(int *i,PyArrayObject *arr) {
\tint k,ii = i[0];
\tfor (k=1; k < arr->nd; k++)
\t\tii += (ii*(arr->dimensions[k] - 1)+i[k]); /* assuming contiguous arr */
\treturn ii;
}"""
cfuncs['calcarrindextr']="""\
static int calcarrindextr(int *i,PyArrayObject *arr) {
\tint k,ii = i[arr->nd-1];
\tfor (k=1; k < arr->nd; k++)
\t\tii += (ii*(arr->dimensions[arr->nd-k-1] - 1)+i[arr->nd-k-1]); /* assuming contiguous arr */
\treturn ii;
}"""
cfuncs['forcomb']="""\
static struct { int nd;npy_intp *d;int *i,*i_tr,tr; } forcombcache;
static int initforcomb(npy_intp *dims,int nd,int tr) {
int k;
if (dims==NULL) return 0;
if (nd<0) return 0;
forcombcache.nd = nd;
forcombcache.d = dims;
forcombcache.tr = tr;
if ((forcombcache.i = (int *)malloc(sizeof(int)*nd))==NULL) return 0;
if ((forcombcache.i_tr = (int *)malloc(sizeof(int)*nd))==NULL) return 0;
for (k=1;k<nd;k++) {
forcombcache.i[k] = forcombcache.i_tr[nd-k-1] = 0;
}
forcombcache.i[0] = forcombcache.i_tr[nd-1] = -1;
return 1;
}
static int *nextforcomb(void) {
int j,*i,*i_tr,k;
int nd=forcombcache.nd;
if ((i=forcombcache.i) == NULL) return NULL;
if ((i_tr=forcombcache.i_tr) == NULL) return NULL;
if (forcombcache.d == NULL) return NULL;
i[0]++;
if (i[0]==forcombcache.d[0]) {
j=1;
while ((j<nd) && (i[j]==forcombcache.d[j]-1)) j++;
if (j==nd) {
free(i);
free(i_tr);
return NULL;
}
for (k=0;k<j;k++) i[k] = i_tr[nd-k-1] = 0;
i[j]++;
i_tr[nd-j-1]++;
} else
i_tr[nd-1]++;
if (forcombcache.tr) return i_tr;
return i;
}"""
needs['try_pyarr_from_string']=['STRINGCOPYN','PRINTPYOBJERR','string']
cfuncs['try_pyarr_from_string']="""\
static int try_pyarr_from_string(PyObject *obj,const string str) {
\tPyArrayObject *arr = NULL;
\tif (PyArray_Check(obj) && (!((arr = (PyArrayObject *)obj) == NULL)))
\t\t{ STRINGCOPYN(arr->data,str,PyArray_NBYTES(arr)); }
\treturn 1;
capi_fail:
\tPRINTPYOBJERR(obj);
\tPyErr_SetString(#modulename#_error,\"try_pyarr_from_string failed\");
\treturn 0;
}
"""
needs['string_from_pyobj']=['string','STRINGMALLOC','STRINGCOPYN']
cfuncs['string_from_pyobj']="""\
static int string_from_pyobj(string *str,int *len,const string inistr,PyObject *obj,const char *errmess) {
\tPyArrayObject *arr = NULL;
\tPyObject *tmp = NULL;
#ifdef DEBUGCFUNCS
fprintf(stderr,\"string_from_pyobj(str='%s',len=%d,inistr='%s',obj=%p)\\n\",(char*)str,*len,(char *)inistr,obj);
#endif
\tif (obj == Py_None) {
\t\tif (*len == -1)
\t\t\t*len = strlen(inistr); /* Will this cause problems? */
\t\tSTRINGMALLOC(*str,*len);
\t\tSTRINGCOPYN(*str,inistr,*len+1);
\t\treturn 1;
\t}
\tif (PyArray_Check(obj)) {
\t\tif ((arr = (PyArrayObject *)obj) == NULL)
\t\t\tgoto capi_fail;
\t\tif (!ISCONTIGUOUS(arr)) {
\t\t\tPyErr_SetString(PyExc_ValueError,\"array object is non-contiguous.\");
\t\t\tgoto capi_fail;
\t\t}
\t\tif (*len == -1)
\t\t\t*len = (arr->descr->elsize)*PyArray_SIZE(arr);
\t\tSTRINGMALLOC(*str,*len);
\t\tSTRINGCOPYN(*str,arr->data,*len+1);
\t\treturn 1;
\t}
\tif (PyString_Check(obj)) {
\t\ttmp = obj;
\t\tPy_INCREF(tmp);
\t}
#if PY_VERSION_HEX >= 0x03000000
\telse if (PyUnicode_Check(obj)) {
\t\ttmp = PyUnicode_AsASCIIString(obj);
\t}
\telse {
\t\tPyObject *tmp2;
\t\ttmp2 = PyObject_Str(obj);
\t\tif (tmp2) {
\t\t\ttmp = PyUnicode_AsASCIIString(tmp2);
\t\t\tPy_DECREF(tmp2);
\t\t}
\t\telse {
\t\t\ttmp = NULL;
\t\t}
\t}
#else
\telse {
\t\ttmp = PyObject_Str(obj);
\t}
#endif
\tif (tmp == NULL) goto capi_fail;
\tif (*len == -1)
\t\t*len = PyString_GET_SIZE(tmp);
\tSTRINGMALLOC(*str,*len);
\tSTRINGCOPYN(*str,PyString_AS_STRING(tmp),*len+1);
\tPy_DECREF(tmp);
\treturn 1;
capi_fail:
\tPy_XDECREF(tmp);
\t{
\t\tPyObject* err = PyErr_Occurred();
\t\tif (err==NULL) err = #modulename#_error;
\t\tPyErr_SetString(err,errmess);
\t}
\treturn 0;
}
"""
needs['char_from_pyobj']=['int_from_pyobj']
cfuncs['char_from_pyobj']="""\
static int char_from_pyobj(char* v,PyObject *obj,const char *errmess) {
\tint i=0;
\tif (int_from_pyobj(&i,obj,errmess)) {
\t\t*v = (char)i;
\t\treturn 1;
\t}
\treturn 0;
}
"""
needs['signed_char_from_pyobj']=['int_from_pyobj','signed_char']
cfuncs['signed_char_from_pyobj']="""\
static int signed_char_from_pyobj(signed_char* v,PyObject *obj,const char *errmess) {
\tint i=0;
\tif (int_from_pyobj(&i,obj,errmess)) {
\t\t*v = (signed_char)i;
\t\treturn 1;
\t}
\treturn 0;
}
"""
needs['short_from_pyobj']=['int_from_pyobj']
cfuncs['short_from_pyobj']="""\
static int short_from_pyobj(short* v,PyObject *obj,const char *errmess) {
\tint i=0;
\tif (int_from_pyobj(&i,obj,errmess)) {
\t\t*v = (short)i;
\t\treturn 1;
\t}
\treturn 0;
}
"""
cfuncs['int_from_pyobj']="""\
static int int_from_pyobj(int* v,PyObject *obj,const char *errmess) {
\tPyObject* tmp = NULL;
\tif (PyInt_Check(obj)) {
\t\t*v = (int)PyInt_AS_LONG(obj);
\t\treturn 1;
\t}
\ttmp = PyNumber_Int(obj);
\tif (tmp) {
\t\t*v = PyInt_AS_LONG(tmp);
\t\tPy_DECREF(tmp);
\t\treturn 1;
\t}
\tif (PyComplex_Check(obj))
\t\ttmp = PyObject_GetAttrString(obj,\"real\");
\telse if (PyString_Check(obj) || PyUnicode_Check(obj))
\t\t/*pass*/;
\telse if (PySequence_Check(obj))
\t\ttmp = PySequence_GetItem(obj,0);
\tif (tmp) {
\t\tPyErr_Clear();
\t\tif (int_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;}
\t\tPy_DECREF(tmp);
\t}
\t{
\t\tPyObject* err = PyErr_Occurred();
\t\tif (err==NULL) err = #modulename#_error;
\t\tPyErr_SetString(err,errmess);
\t}
\treturn 0;
}
"""
cfuncs['long_from_pyobj']="""\
static int long_from_pyobj(long* v,PyObject *obj,const char *errmess) {
\tPyObject* tmp = NULL;
\tif (PyInt_Check(obj)) {
\t\t*v = PyInt_AS_LONG(obj);
\t\treturn 1;
\t}
\ttmp = PyNumber_Int(obj);
\tif (tmp) {
\t\t*v = PyInt_AS_LONG(tmp);
\t\tPy_DECREF(tmp);
\t\treturn 1;
\t}
\tif (PyComplex_Check(obj))
\t\ttmp = PyObject_GetAttrString(obj,\"real\");
\telse if (PyString_Check(obj) || PyUnicode_Check(obj))
\t\t/*pass*/;
\telse if (PySequence_Check(obj))
\t\ttmp = PySequence_GetItem(obj,0);
\tif (tmp) {
\t\tPyErr_Clear();
\t\tif (long_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;}
\t\tPy_DECREF(tmp);
\t}
\t{
\t\tPyObject* err = PyErr_Occurred();
\t\tif (err==NULL) err = #modulename#_error;
\t\tPyErr_SetString(err,errmess);
\t}
\treturn 0;
}
"""
needs['long_long_from_pyobj']=['long_long']
cfuncs['long_long_from_pyobj']="""\
static int long_long_from_pyobj(long_long* v,PyObject *obj,const char *errmess) {
\tPyObject* tmp = NULL;
\tif (PyLong_Check(obj)) {
\t\t*v = PyLong_AsLongLong(obj);
\t\treturn (!PyErr_Occurred());
\t}
\tif (PyInt_Check(obj)) {
\t\t*v = (long_long)PyInt_AS_LONG(obj);
\t\treturn 1;
\t}
\ttmp = PyNumber_Long(obj);
\tif (tmp) {
\t\t*v = PyLong_AsLongLong(tmp);
\t\tPy_DECREF(tmp);
\t\treturn (!PyErr_Occurred());
\t}
\tif (PyComplex_Check(obj))
\t\ttmp = PyObject_GetAttrString(obj,\"real\");
\telse if (PyString_Check(obj) || PyUnicode_Check(obj))
\t\t/*pass*/;
\telse if (PySequence_Check(obj))
\t\ttmp = PySequence_GetItem(obj,0);
\tif (tmp) {
\t\tPyErr_Clear();
\t\tif (long_long_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;}
\t\tPy_DECREF(tmp);
\t}
\t{
\t\tPyObject* err = PyErr_Occurred();
\t\tif (err==NULL) err = #modulename#_error;
\t\tPyErr_SetString(err,errmess);
\t}
\treturn 0;
}
"""
needs['long_double_from_pyobj']=['double_from_pyobj','long_double']
cfuncs['long_double_from_pyobj']="""\
static int long_double_from_pyobj(long_double* v,PyObject *obj,const char *errmess) {
\tdouble d=0;
\tif (PyArray_CheckScalar(obj)){
\t\tif PyArray_IsScalar(obj, LongDouble) {
\t\t\tPyArray_ScalarAsCtype(obj, v);
\t\t\treturn 1;
\t\t}
\t\telse if (PyArray_Check(obj) && PyArray_TYPE(obj)==PyArray_LONGDOUBLE) {
\t\t\t(*v) = *((npy_longdouble *)PyArray_DATA(obj));
\t\t\treturn 1;
\t\t}
\t}
\tif (double_from_pyobj(&d,obj,errmess)) {
\t\t*v = (long_double)d;
\t\treturn 1;
\t}
\treturn 0;
}
"""
cfuncs['double_from_pyobj']="""\
static int double_from_pyobj(double* v,PyObject *obj,const char *errmess) {
\tPyObject* tmp = NULL;
\tif (PyFloat_Check(obj)) {
#ifdef __sgi
\t\t*v = PyFloat_AsDouble(obj);
#else
\t\t*v = PyFloat_AS_DOUBLE(obj);
#endif
\t\treturn 1;
\t}
\ttmp = PyNumber_Float(obj);
\tif (tmp) {
#ifdef __sgi
\t\t*v = PyFloat_AsDouble(tmp);
#else
\t\t*v = PyFloat_AS_DOUBLE(tmp);
#endif
\t\tPy_DECREF(tmp);
\t\treturn 1;
\t}
\tif (PyComplex_Check(obj))
\t\ttmp = PyObject_GetAttrString(obj,\"real\");
\telse if (PyString_Check(obj) || PyUnicode_Check(obj))
\t\t/*pass*/;
\telse if (PySequence_Check(obj))
\t\ttmp = PySequence_GetItem(obj,0);
\tif (tmp) {
\t\tPyErr_Clear();
\t\tif (double_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;}
\t\tPy_DECREF(tmp);
\t}
\t{
\t\tPyObject* err = PyErr_Occurred();
\t\tif (err==NULL) err = #modulename#_error;
\t\tPyErr_SetString(err,errmess);
\t}
\treturn 0;
}
"""
needs['float_from_pyobj']=['double_from_pyobj']
cfuncs['float_from_pyobj']="""\
static int float_from_pyobj(float* v,PyObject *obj,const char *errmess) {
\tdouble d=0.0;
\tif (double_from_pyobj(&d,obj,errmess)) {
\t\t*v = (float)d;
\t\treturn 1;
\t}
\treturn 0;
}
"""
needs['complex_long_double_from_pyobj']=['complex_long_double','long_double',
'complex_double_from_pyobj']
cfuncs['complex_long_double_from_pyobj']="""\
static int complex_long_double_from_pyobj(complex_long_double* v,PyObject *obj,const char *errmess) {
\tcomplex_double cd={0.0,0.0};
\tif (PyArray_CheckScalar(obj)){
\t\tif PyArray_IsScalar(obj, CLongDouble) {
\t\t\tPyArray_ScalarAsCtype(obj, v);
\t\t\treturn 1;
\t\t}
\t\telse if (PyArray_Check(obj) && PyArray_TYPE(obj)==PyArray_CLONGDOUBLE) {
\t\t\t(*v).r = ((npy_clongdouble *)PyArray_DATA(obj))->real;
\t\t\t(*v).i = ((npy_clongdouble *)PyArray_DATA(obj))->imag;
\t\t\treturn 1;
\t\t}
\t}
\tif (complex_double_from_pyobj(&cd,obj,errmess)) {
\t\t(*v).r = (long_double)cd.r;
\t\t(*v).i = (long_double)cd.i;
\t\treturn 1;
\t}
\treturn 0;
}
"""
needs['complex_double_from_pyobj']=['complex_double']
cfuncs['complex_double_from_pyobj']="""\
static int complex_double_from_pyobj(complex_double* v,PyObject *obj,const char *errmess) {
\tPy_complex c;
\tif (PyComplex_Check(obj)) {
\t\tc=PyComplex_AsCComplex(obj);
\t\t(*v).r=c.real, (*v).i=c.imag;
\t\treturn 1;
\t}
\tif (PyArray_IsScalar(obj, ComplexFloating)) {
\t\tif (PyArray_IsScalar(obj, CFloat)) {
\t\t\tnpy_cfloat new;
\t\t\tPyArray_ScalarAsCtype(obj, &new);
\t\t\t(*v).r = (double)new.real;
\t\t\t(*v).i = (double)new.imag;
\t\t}
\t\telse if (PyArray_IsScalar(obj, CLongDouble)) {
\t\t\tnpy_clongdouble new;
\t\t\tPyArray_ScalarAsCtype(obj, &new);
\t\t\t(*v).r = (double)new.real;
\t\t\t(*v).i = (double)new.imag;
\t\t}
\t\telse { /* if (PyArray_IsScalar(obj, CDouble)) */
\t\t\tPyArray_ScalarAsCtype(obj, v);
\t\t}
\t\treturn 1;
\t}
\tif (PyArray_CheckScalar(obj)) { /* 0-dim array or still array scalar */
\t\tPyObject *arr;
\t\tif (PyArray_Check(obj)) {
\t\t\tarr = PyArray_Cast((PyArrayObject *)obj, PyArray_CDOUBLE);
\t\t}
\t\telse {
\t\t\tarr = PyArray_FromScalar(obj, PyArray_DescrFromType(PyArray_CDOUBLE));
\t\t}
\t\tif (arr==NULL) return 0;
\t\t(*v).r = ((npy_cdouble *)PyArray_DATA(arr))->real;
\t\t(*v).i = ((npy_cdouble *)PyArray_DATA(arr))->imag;
\t\treturn 1;
\t}
\t/* Python does not provide PyNumber_Complex function :-( */
\t(*v).i=0.0;
\tif (PyFloat_Check(obj)) {
#ifdef __sgi
\t\t(*v).r = PyFloat_AsDouble(obj);
#else
\t\t(*v).r = PyFloat_AS_DOUBLE(obj);
#endif
\t\treturn 1;
\t}
\tif (PyInt_Check(obj)) {
\t\t(*v).r = (double)PyInt_AS_LONG(obj);
\t\treturn 1;
\t}
\tif (PyLong_Check(obj)) {
\t\t(*v).r = PyLong_AsDouble(obj);
\t\treturn (!PyErr_Occurred());
\t}
\tif (PySequence_Check(obj) && !(PyString_Check(obj) || PyUnicode_Check(obj))) {
\t\tPyObject *tmp = PySequence_GetItem(obj,0);
\t\tif (tmp) {
\t\t\tif (complex_double_from_pyobj(v,tmp,errmess)) {
\t\t\t\tPy_DECREF(tmp);
\t\t\t\treturn 1;
\t\t\t}
\t\t\tPy_DECREF(tmp);
\t\t}
\t}
\t{
\t\tPyObject* err = PyErr_Occurred();
\t\tif (err==NULL)
\t\t\terr = PyExc_TypeError;
\t\tPyErr_SetString(err,errmess);
\t}
\treturn 0;
}
"""
needs['complex_float_from_pyobj']=['complex_float','complex_double_from_pyobj']
cfuncs['complex_float_from_pyobj']="""\
static int complex_float_from_pyobj(complex_float* v,PyObject *obj,const char *errmess) {
\tcomplex_double cd={0.0,0.0};
\tif (complex_double_from_pyobj(&cd,obj,errmess)) {
\t\t(*v).r = (float)cd.r;
\t\t(*v).i = (float)cd.i;
\t\treturn 1;
\t}
\treturn 0;
}
"""
needs['try_pyarr_from_char']=['pyobj_from_char1','TRYPYARRAYTEMPLATE']
cfuncs['try_pyarr_from_char']='static int try_pyarr_from_char(PyObject* obj,char* v) {\n\tTRYPYARRAYTEMPLATE(char,\'c\');\n}\n'
needs['try_pyarr_from_signed_char']=['TRYPYARRAYTEMPLATE','unsigned_char']
cfuncs['try_pyarr_from_unsigned_char']='static int try_pyarr_from_unsigned_char(PyObject* obj,unsigned_char* v) {\n\tTRYPYARRAYTEMPLATE(unsigned_char,\'b\');\n}\n'
needs['try_pyarr_from_signed_char']=['TRYPYARRAYTEMPLATE','signed_char']
cfuncs['try_pyarr_from_signed_char']='static int try_pyarr_from_signed_char(PyObject* obj,signed_char* v) {\n\tTRYPYARRAYTEMPLATE(signed_char,\'1\');\n}\n'
needs['try_pyarr_from_short']=['pyobj_from_short1','TRYPYARRAYTEMPLATE']
cfuncs['try_pyarr_from_short']='static int try_pyarr_from_short(PyObject* obj,short* v) {\n\tTRYPYARRAYTEMPLATE(short,\'s\');\n}\n'
needs['try_pyarr_from_int']=['pyobj_from_int1','TRYPYARRAYTEMPLATE']
cfuncs['try_pyarr_from_int']='static int try_pyarr_from_int(PyObject* obj,int* v) {\n\tTRYPYARRAYTEMPLATE(int,\'i\');\n}\n'
needs['try_pyarr_from_long']=['pyobj_from_long1','TRYPYARRAYTEMPLATE']
cfuncs['try_pyarr_from_long']='static int try_pyarr_from_long(PyObject* obj,long* v) {\n\tTRYPYARRAYTEMPLATE(long,\'l\');\n}\n'
needs['try_pyarr_from_long_long']=['pyobj_from_long_long1','TRYPYARRAYTEMPLATE','long_long']
cfuncs['try_pyarr_from_long_long']='static int try_pyarr_from_long_long(PyObject* obj,long_long* v) {\n\tTRYPYARRAYTEMPLATE(long_long,\'L\');\n}\n'
needs['try_pyarr_from_float']=['pyobj_from_float1','TRYPYARRAYTEMPLATE']
cfuncs['try_pyarr_from_float']='static int try_pyarr_from_float(PyObject* obj,float* v) {\n\tTRYPYARRAYTEMPLATE(float,\'f\');\n}\n'
needs['try_pyarr_from_double']=['pyobj_from_double1','TRYPYARRAYTEMPLATE']
cfuncs['try_pyarr_from_double']='static int try_pyarr_from_double(PyObject* obj,double* v) {\n\tTRYPYARRAYTEMPLATE(double,\'d\');\n}\n'
needs['try_pyarr_from_complex_float']=['pyobj_from_complex_float1','TRYCOMPLEXPYARRAYTEMPLATE','complex_float']
cfuncs['try_pyarr_from_complex_float']='static int try_pyarr_from_complex_float(PyObject* obj,complex_float* v) {\n\tTRYCOMPLEXPYARRAYTEMPLATE(float,\'F\');\n}\n'
needs['try_pyarr_from_complex_double']=['pyobj_from_complex_double1','TRYCOMPLEXPYARRAYTEMPLATE','complex_double']
cfuncs['try_pyarr_from_complex_double']='static int try_pyarr_from_complex_double(PyObject* obj,complex_double* v) {\n\tTRYCOMPLEXPYARRAYTEMPLATE(double,\'D\');\n}\n'
needs['create_cb_arglist']=['CFUNCSMESS','PRINTPYOBJERR','MINMAX']
cfuncs['create_cb_arglist']="""\
static int create_cb_arglist(PyObject* fun,PyTupleObject* xa,const int maxnofargs,const int nofoptargs,int *nofargs,PyTupleObject **args,const char *errmess) {
\tPyObject *tmp = NULL;
\tPyObject *tmp_fun = NULL;
\tint tot,opt,ext,siz,i,di=0;
\tCFUNCSMESS(\"create_cb_arglist\\n\");
\ttot=opt=ext=siz=0;
\t/* Get the total number of arguments */
\tif (PyFunction_Check(fun))
\t\ttmp_fun = fun;
\telse {
\t\tdi = 1;
\t\tif (PyObject_HasAttrString(fun,\"im_func\")) {
\t\t\ttmp_fun = PyObject_GetAttrString(fun,\"im_func\");
\t\t}
\t\telse if (PyObject_HasAttrString(fun,\"__call__\")) {
\t\t\ttmp = PyObject_GetAttrString(fun,\"__call__\");
\t\t\tif (PyObject_HasAttrString(tmp,\"im_func\"))
\t\t\t\ttmp_fun = PyObject_GetAttrString(tmp,\"im_func\");
\t\t\telse {
\t\t\t\ttmp_fun = fun; /* built-in function */
\t\t\t\ttot = maxnofargs;
\t\t\t\tif (xa != NULL)
\t\t\t\t\ttot += PyTuple_Size((PyObject *)xa);
\t\t\t}
\t\t\tPy_XDECREF(tmp);
\t\t}
\t\telse if (PyFortran_Check(fun) || PyFortran_Check1(fun)) {
\t\t\ttot = maxnofargs;
\t\t\tif (xa != NULL)
\t\t\t\ttot += PyTuple_Size((PyObject *)xa);
\t\t\ttmp_fun = fun;
\t\t}
\t\telse if (F2PyCapsule_Check(fun)) {
\t\t\ttot = maxnofargs;
\t\t\tif (xa != NULL)
\t\t\t\text = PyTuple_Size((PyObject *)xa);
\t\t\tif(ext>0) {
\t\t\t\tfprintf(stderr,\"extra arguments tuple cannot be used with CObject call-back\\n\");
\t\t\t\tgoto capi_fail;
\t\t\t}
\t\t\ttmp_fun = fun;
\t\t}
\t}
if (tmp_fun==NULL) {
fprintf(stderr,\"Call-back argument must be function|instance|instance.__call__|f2py-function but got %s.\\n\",(fun==NULL?\"NULL\":Py_TYPE(fun)->tp_name));
goto capi_fail;
}
#if PY_VERSION_HEX >= 0x03000000
\tif (PyObject_HasAttrString(tmp_fun,\"__code__\")) {
\t\tif (PyObject_HasAttrString(tmp = PyObject_GetAttrString(tmp_fun,\"__code__\"),\"co_argcount\"))
#else
\tif (PyObject_HasAttrString(tmp_fun,\"func_code\")) {
\t\tif (PyObject_HasAttrString(tmp = PyObject_GetAttrString(tmp_fun,\"func_code\"),\"co_argcount\"))
#endif
\t\t\ttot = PyInt_AsLong(PyObject_GetAttrString(tmp,\"co_argcount\")) - di;
\t\tPy_XDECREF(tmp);
\t}
\t/* Get the number of optional arguments */
#if PY_VERSION_HEX >= 0x03000000
\tif (PyObject_HasAttrString(tmp_fun,\"__defaults__\"))
\t\tif (PyTuple_Check(tmp = PyObject_GetAttrString(tmp_fun,\"__defaults__\")))
#else
\tif (PyObject_HasAttrString(tmp_fun,\"func_defaults\"))
\t\tif (PyTuple_Check(tmp = PyObject_GetAttrString(tmp_fun,\"func_defaults\")))
#endif
\t\t\topt = PyTuple_Size(tmp);
\t\tPy_XDECREF(tmp);
\t/* Get the number of extra arguments */
\tif (xa != NULL)
\t\text = PyTuple_Size((PyObject *)xa);
\t/* Calculate the size of call-backs argument list */
\tsiz = MIN(maxnofargs+ext,tot);
\t*nofargs = MAX(0,siz-ext);
#ifdef DEBUGCFUNCS
\tfprintf(stderr,\"debug-capi:create_cb_arglist:maxnofargs(-nofoptargs),tot,opt,ext,siz,nofargs=%d(-%d),%d,%d,%d,%d,%d\\n\",maxnofargs,nofoptargs,tot,opt,ext,siz,*nofargs);
#endif
\tif (siz<tot-opt) {
\t\tfprintf(stderr,\"create_cb_arglist: Failed to build argument list (siz) with enough arguments (tot-opt) required by user-supplied function (siz,tot,opt=%d,%d,%d).\\n\",siz,tot,opt);
\t\tgoto capi_fail;
\t}
\t/* Initialize argument list */
\t*args = (PyTupleObject *)PyTuple_New(siz);
\tfor (i=0;i<*nofargs;i++) {
\t\tPy_INCREF(Py_None);
\t\tPyTuple_SET_ITEM((PyObject *)(*args),i,Py_None);
\t}
\tif (xa != NULL)
\t\tfor (i=(*nofargs);i<siz;i++) {
\t\t\ttmp = PyTuple_GetItem((PyObject *)xa,i-(*nofargs));
\t\t\tPy_INCREF(tmp);
\t\t\tPyTuple_SET_ITEM(*args,i,tmp);
\t\t}
\tCFUNCSMESS(\"create_cb_arglist-end\\n\");
\treturn 1;
capi_fail:
\tif ((PyErr_Occurred())==NULL)
\t\tPyErr_SetString(#modulename#_error,errmess);
\treturn 0;
}
"""
def buildcfuncs():
from capi_maps import c2capi_map
for k in c2capi_map.keys():
m='pyarr_from_p_%s1'%k
cppmacros[m]='#define %s(v) (PyArray_SimpleNewFromData(0,NULL,%s,(char *)v))'%(m,c2capi_map[k])
k='string'
m='pyarr_from_p_%s1'%k
cppmacros[m]='#define %s(v,dims) (PyArray_SimpleNewFromData(1,dims,PyArray_CHAR,(char *)v))'%(m)
############ Auxiliary functions for sorting needs ###################
def append_needs(need,flag=1):
global outneeds,needs
if type(need)==types.ListType:
for n in need:
append_needs(n,flag)
elif type(need)==str:
if not need: return
if need in includes0:
n = 'includes0'
elif need in includes:
n = 'includes'
elif need in typedefs:
n = 'typedefs'
elif need in typedefs_generated:
n = 'typedefs_generated'
elif need in cppmacros:
n = 'cppmacros'
elif need in cfuncs:
n = 'cfuncs'
elif need in callbacks:
n = 'callbacks'
elif need in f90modhooks:
n = 'f90modhooks'
elif need in commonhooks:
n = 'commonhooks'
else:
errmess('append_needs: unknown need %s\n'%(`need`))
return
if need in outneeds[n]: return
if flag:
tmp={}
if need in needs:
for nn in needs[need]:
t=append_needs(nn,0)
if type(t)==types.DictType:
for nnn in t.keys():
if nnn in tmp:
tmp[nnn]=tmp[nnn]+t[nnn]
else:
tmp[nnn]=t[nnn]
for nn in tmp.keys():
for nnn in tmp[nn]:
if nnn not in outneeds[nn]:
outneeds[nn]=[nnn]+outneeds[nn]
outneeds[n].append(need)
else:
tmp={}
if need in needs:
for nn in needs[need]:
t=append_needs(nn,flag)
if type(t)==types.DictType:
for nnn in t.keys():
if nnn in tmp:
tmp[nnn]=t[nnn]+tmp[nnn]
else:
tmp[nnn]=t[nnn]
if n not in tmp:
tmp[n]=[]
tmp[n].append(need)
return tmp
else:
errmess('append_needs: expected list or string but got :%s\n'%(`need`))
def get_needs():
global outneeds,needs
res={}
for n in outneeds.keys():
out=[]
saveout=copy.copy(outneeds[n])
while len(outneeds[n])>0:
if outneeds[n][0] not in needs:
out.append(outneeds[n][0])
del outneeds[n][0]
else:
flag=0
for k in outneeds[n][1:]:
if k in needs[outneeds[n][0]]:
flag=1
break
if flag:
outneeds[n]=outneeds[n][1:]+[outneeds[n][0]]
else:
out.append(outneeds[n][0])
del outneeds[n][0]
if saveout and (0 not in map(lambda x,y:x==y,saveout,outneeds[n])) \
and outneeds[n] != []:
print n,saveout
errmess('get_needs: no progress in sorting needs, probably circular dependence, skipping.\n')
out=out+saveout
break
saveout=copy.copy(outneeds[n])
if out==[]:
out=[n]
res[n]=out
return res
| |
import argparse
import re
from exceptions import *
from utils import SymbolTable, write_inst_hex
import utils
TWO_POW_SEVENTEEN = 131072
UINT16_MAX = 2**16 - 1
INT16_MAX = 2**15 - 1
INT16_MIN = -(2**15)
LONG_MAX = 2**63 - 1
LONG_MIN = -(2**63)
RS = 0x0001
RT = 0x0002
RD = 0x0004
SHAMT = 0x0008
IMM = 0x0010
BRANCH_LABEL = 0x0020
JUMP_LABEL = 0x0040
def strip_comments(line):
"""Removes all text after a # the passed in string
>>> strip_comments("Test string")
'Test string'
>>> strip_comments("Test #comment")
'Test '
>>> strip_comments("#hashtag")
''
>>> strip_comments("Test#comment")
'Test'
"""
if "#" in line:
return line[:line.find("#")]
else:
return line
def tokenize(line):
"""Split up a line of text on spaces, new lines, tabs, commas, parens
returns the first word and the rest of the words
>>> tokenize("This,Test")
('This', ['Test'])
>>> tokenize("word1 word2 word3")
('word1', ['word2', 'word3'])
>>> tokenize("word1, word2, word3")
('word1', ['word2', 'word3'])
"""
tokens = [x for x in re.split("[ \f\n\r\t\v,()]+", line) if x]
return tokens[0], tokens[1:]
def is_label(token):
"""Returns True if this token has a : at the end
>>> is_label("label:")
True
>>> is_label("label")
False
>>> is_label(":::::")
True
"""
return token[-1] == ":"
def raise_inst_error(line_num, name, args):
print("Error on line {0}: {1}".format(line_num, name + " " + " ".join(args)))
register_table = {
"$zero": 0,
"$0": 0,
"$at": 1,
"$v0": 2,
"$v1": 3,
"$a0": 4,
"$a1": 5,
"$a2": 6,
"$a3": 7,
"$t0": 8,
"$t1": 9,
"$t2": 10,
"$t3": 11,
"$t4": 12,
"$t5": 13,
"$t6": 14,
"$t7": 15,
"$s0": 16,
"$s1": 17,
"$s2": 18,
"$s3": 19,
"$s4": 20,
"$s5": 21,
"$s6": 22,
"$s7": 23,
"$t8": 24,
"$t9": 25,
"$k0": 26,
"$k1": 27,
"$gp": 28,
"$sp": 29,
"$fp": 30,
"$ra": 31,
}
def translate_reg(register):
"""Translate a string representing a register into the number for that register
>>> translate_reg("$zero")
0
>>> translate_reg("$s0")
16
"""
if register in register_table:
return register_table[register]
else:
raise invalid_register_name(register)
def translate_num(number, lower_bound, upper_bound):
"""Translate a string containing a decimal or hex number into a number
also makes sure that the number is within the supplied bounds
Returns the translated number, and an integer error code
>>> translate_num("1", 0, 10)
1
>>> translate_num("0x1", 0, 10)
1
>>> translate_num("0xABCD", LONG_MIN, LONG_MAX)
43981
"""
try:
value = int(number, 0)
if value < lower_bound or value > upper_bound:
raise translate_num_out_of_range(value, lower_bound, upper_bound)
else:
return value
except:
raise translate_num_error(number)
def write_pass_one(name, args):
if name == "li":
if len(args) != 2:
raise incorrect_number_of_parameters(name, len(args), 2)
imm = translate_num(args[1], LONG_MIN, LONG_MAX)
if imm <= 0xffff and imm >= -0xffff:
return ["addiu {0} $0 {1}".format(args[0], imm)]
else:
return ["lui $at {0}".format((imm >> 16) & 0xffff),
"ori {0} $at {1}".format(args[0], imm & 0xffff)]
elif name == "move":
if len(args) != 2:
raise incorrect_number_of_parameters(name, len(args), 2)
return ["addu {0} {1} $0".format(args[0], args[1])]
elif name == "rem":
if len(args) != 3:
raise incorrect_number_of_parameters(name, len(args), 3)
return ["div {0} {1}".format(args[1], args[2]),
"mfhi {0}".format(args[0])]
elif name == "bge":
if len(args) != 3:
raise incorrect_number_of_parameters(name, len(args), 3)
return ["slt $at {0} {1}".format(args[0], args[1]),
"beq $at $0 {0}".format(args[2])]
elif name == "bnez":
if len(args) != 2:
raise incorrect_number_of_parameters(name, len(args), 2)
return ["bne {0} $0 {1}".format(args[0], args[1])]
elif name == "blt":
pass
elif name == "bgt":
pass
elif name == "ble":
pass
else:
return ["{0} {1}".format(name, " ".join(args))]
def expected_args(code):
count = 0
while code > 0:
if code & 1:
count += 1
code = code >> 1
return count
def write_inst(output, opcode, args, addr, symtbl, reltbl, params, is_funct, imm_min, imm_max):
if len(args) != len(params):
raise incorrect_number_of_parameters(name_from_opcode(opcode), len(args), len(params))
inst = 0x00000000
if is_funct:
inst = inst | opcode
else:
inst = inst | (opcode << 26)
while len(params) > 0:
param, params = params[0], params[1:]
arg, args = args[0], args[1:]
if param == RS:
inst = inst | (translate_reg(arg) << 21)
elif param == RT:
inst = inst | (translate_reg(arg) << 16)
elif param == RD:
inst = inst | (translate_reg(arg) << 11)
elif param == SHAMT:
inst = inst | (translate_num(arg, 0, 31) << 6)
elif param == IMM:
inst = inst | (translate_num(arg, imm_min, imm_max) & 0xFFFF)
elif param == BRANCH_LABEL:
label_addr = symtbl.get_addr(arg)
if not can_branch_to(addr, label_addr):
raise branch_out_of_range()
offset = (label_addr - addr - 4) >> 2
inst = inst | (offset & 0xFFFF)
elif param == JUMP_LABEL:
reltbl.add(arg, addr)
write_inst_hex(output, inst)
def can_branch_to(src_addr, dest_addr):
diff = dest_addr - src_addr
return (diff >= 0 and diff <= TWO_POW_SEVENTEEN) or (diff < 0 and diff >= -(TWO_POW_SEVENTEEN - 4))
jtype = {
"j": (0x02, [JUMP_LABEL]),
"jal": (0x03, [JUMP_LABEL]),
}
itype = {
"beq": (0x04, [RS, RT, BRANCH_LABEL], 0, 0),
"bne": (0x05, [RS, RT, BRANCH_LABEL], 0, 0),
"blez": (0x06, [RS, BRANCH_LABEL], 0, 0),
"bgtz": (0x07, [RS, BRANCH_LABEL], 0, 0),
"addi": (0x08, [RT, RS, IMM], INT16_MIN, INT16_MAX),
"addiu": (0x09, [RT, RS, IMM], INT16_MIN, INT16_MAX),
"slti": (0x0a, [RT, RS, IMM], INT16_MIN, INT16_MAX),
"sltiu": (0x0b, [RT, RS, IMM], 0, UINT16_MAX),
"andi": (0x0c, [RT, RS, IMM], 0, UINT16_MAX),
"ori": (0x0d, [RT, RS, IMM], 0, UINT16_MAX),
"xori": (0x0e, [RT, RS, IMM], 0, UINT16_MAX),
"lui": (0x0f, [RT, IMM], 0, UINT16_MAX),
"lb": (0x20, [RT, IMM, RS], INT16_MIN, INT16_MAX),
"lh": (0x21, [RT, IMM, RS], INT16_MIN, INT16_MAX),
"lwl": (0x22, [RT, IMM, RS], INT16_MIN, INT16_MAX),
"lw": (0x23, [RT, IMM, RS], INT16_MIN, INT16_MAX),
"lbu": (0x24, [RT, IMM, RS], INT16_MIN, INT16_MAX),
"lhu": (0x25, [RT, IMM, RS], INT16_MIN, INT16_MAX),
"lwr": (0x26, [RT, IMM, RS], INT16_MIN, INT16_MAX),
"sb": (0x28, [RT, IMM, RS], INT16_MIN, INT16_MAX),
"sh": (0x29, [RT, IMM, RS], INT16_MIN, INT16_MAX),
"swl": (0x2a, [RT, IMM, RS], INT16_MIN, INT16_MAX),
"sw": (0x2b, [RT, IMM, RS], INT16_MIN, INT16_MAX),
"swr": (0x2e, [RT, IMM, RS], INT16_MIN, INT16_MAX),
"cache": (0x2f, [], 0, 0),
"ll": (0x30, [RT, IMM, RS], INT16_MIN, INT16_MAX),
"lwc1": (0x31, [RT, IMM, RS], INT16_MIN, INT16_MAX),
"lwc2": (0x32, [RT, IMM, RS], INT16_MIN, INT16_MAX),
"pref": (0x33, [RT, IMM, RS], INT16_MIN, INT16_MAX),
"ldc1": (0x35, [RT, IMM, RS], INT16_MIN, INT16_MAX),
"ldc2": (0x36, [RT, IMM, RS], INT16_MIN, INT16_MAX),
"sc": (0x38, [RT, IMM, RS], INT16_MIN, INT16_MAX),
"swc1": (0x39, [RT, IMM, RS], INT16_MIN, INT16_MAX),
"swc2": (0x3a, [RT, IMM, RS], INT16_MIN, INT16_MAX),
"sdc1": (0x3d, [RT, IMM, RS], INT16_MIN, INT16_MAX),
"sdc2": (0x3e, [RT, IMM, RS], INT16_MIN, INT16_MAX),
}
rtype = {
"sll": (0x00, [RD, RT, SHAMT]),
"srl": (0x02, [RD, RT, SHAMT]),
"sra": (0x03, [RD, RT, SHAMT]),
"sllv": (0x04, [RD, RT, RS]),
"srlv": (0x06, [RD, RT, RS]),
"srav": (0x07, [RD, RT, RS]),
"jr": (0x08, [RS]),
"jalr": (0x09, [RS]),
"movz": (0x0a, [RD, RS, RT]),
"movn": (0x0b, [RD, RS, RT]),
"syscall": (0x0c, []),
"break": (0x0d, []),
"sync": (0x0f, []),
"mfhi": (0x10, [RD]),
"mthi": (0x11, [RS]),
"mflo": (0x12, [RD]),
"mtlo": (0x13, [RS]),
"mult": (0x18, [RS, RT]),
"multu": (0x19, [RS, RT]),
"div": (0x1a, [RS, RT]),
"divu": (0x1b, [RS, RT]),
"add": (0x20, [RD, RS, RT]),
"addu": (0x21, [RD, RS, RT]),
"sub": (0x22, [RD, RS, RT]),
"subu": (0x23, [RD, RS, RT]),
"and": (0x24, [RD, RS, RT]),
"or": (0x25, [RD, RS, RT]),
"xor": (0x26, [RD, RS, RT]),
"nor": (0x27, [RD, RS, RT]),
"slt": (0x2a, [RD, RS, RT]),
"sltu": (0x2b, [RD, RS, RT]),
"tge": (0x30, [RS, RT]),
"tgeu": (0x31, [RS, RT]),
"tlt": (0x32, [RS, RT]),
"tltu": (0x33, [RS, RT]),
"teq": (0x34, [RS, RT]),
"tne": (0x36, [RS, RT]),
}
translate_table = {}
translate_table.update(rtype)
translate_table.update(itype)
translate_table.update(jtype)
def name_from_opcode(opcode):
for key in translate_table:
if translate_table[key][0] == opcode:
return key
return ""
def translate_inst(output, name, args, addr, symtbl, reltbl):
if name in translate_table:
entry = translate_table[name]
opcode = entry[0]
params = entry[1]
if name in itype:
_, _, imm_min, imm_max = itype[name]
imm = IMM in params
write_inst(output, opcode, args, addr, symtbl, reltbl, params, name in rtype, imm_min if imm else 0, imm_max if imm else 0)
else:
raise translate_inst_error(name, args)
def pass_one(lines, symtbl):
errors = []
ret_code = 0
line_num = 0
byte_off = 0
intermediate = []
for line in lines:
try:
line_num += 1
name, args = tokenize(line)
if name == "":
continue
if is_label(name):
err = symtbl.add(name[:-1], byte_off)
if len(args) == 0:
continue
name = args[0]
args = args[1:]
instructions = write_pass_one(name, args)
intermediate += instructions
byte_off += len(instructions) * 4
except AssemblerException as e:
errors += [(line_num, e)]
ret_code = -1
return intermediate, errors
def pass_two(lines, symtbl, reltbl):
output = [".text"]
errors = []
line_num = 0
byte_off = 0
for line in lines:
try:
line_num += 1
name, args = tokenize(line)
translate_inst(output, name, args, byte_off, symtbl, reltbl)
byte_off += 4
except AssemblerException as e:
errors += [(line_num, e)]
output += ["", ".symbol"] + symtbl.to_string()
output += ["", ".relocation"] + reltbl.to_string()
return output, errors
def assemble(input_file):
cleaned = [strip_comments(line).strip() for line in utils.read_file_to_list(input_file)]
asm = [line for line in cleaned if line != ""]
symtbl = SymbolTable(False)
reltbl = SymbolTable(True)
# Pass One
intermediate, errors_one = pass_one(asm, symtbl)
# Pass Two
output, errors_two = pass_two(intermediate, symtbl, reltbl)
if len(errors_one) > 0:
print("Errors during pass one:")
for line_num, e in errors_one:
print("Error: line {0}: {1}".format(line_num, e))
if len(errors_two) > 0:
print("Errors during pass two:")
for line_num, e in errors_two:
print("Error: line {0}: {1}".format(line_num, e))
if len(errors_one) > 0 or len(errors_two) > 0:
print("One or more errors encountered during assembly operation")
return intermediate, output
def main():
parser = argparse.ArgumentParser(prog="mipsa", description='Assemble a MIPS assembly program. Outputs an object file for every input file.')
parser.add_argument("files", action="store", nargs="+", type=str, help="list of assembly files to process")
parser.add_argument("--int", action="store_true", default=False, help="output intermediate files")
args = parser.parse_args()
for input_file in args.files:
ints, objs = assemble(input_file)
file_name = utils.get_file_name(input_file)
if args.int:
int_file = file_name + ".int"
utils.write_file_from_list(int_file, ints)
obj_file = file_name + ".o"
utils.write_file_from_list(obj_file, objs)
if __name__ == "__main__": main()
| |
import datetime
import re
from string import capwords
from typing import List, Optional, Tuple, Union
from loguru import logger
from flexget.utils.qualities import Quality
logger = logger.bind(name='parser')
SERIES_ID_TYPES = ['ep', 'date', 'sequence', 'id']
def clean_value(name: str) -> str:
for char in '[]()_,.':
name = name.replace(char, ' ')
# if there are no spaces
if name.find(' ') == -1:
name = name.replace('-', ' ')
# MovieParser.strip_spaces
name = ' '.join(name.split())
return name
def old_assume_quality(guessed_quality: Quality, assumed_quality: Quality) -> Quality:
if assumed_quality:
if not guessed_quality:
return assumed_quality
if assumed_quality.resolution:
guessed_quality.resolution = assumed_quality.resolution
if assumed_quality.source:
guessed_quality.source = assumed_quality.source
if assumed_quality.codec:
guessed_quality.codec = assumed_quality.codec
if assumed_quality.audio:
guessed_quality.audio = assumed_quality.audio
return guessed_quality
def remove_dirt(name: str) -> str:
if name:
name = re.sub(r'[_.,\[\]\(\): ]+', ' ', name).strip().lower()
return name
def normalize_name(name: str) -> str:
name = capwords(name)
return name
class MovieParseResult:
def __init__(
self,
data: str = None,
name: str = None,
year: Optional[int] = None,
quality: Quality = None,
proper_count: int = 0,
release_group: Optional[str] = None,
valid: bool = True,
) -> None:
self.name: str = name
self.data: str = data
self.year: Optional[int] = year
self.quality: Quality = quality if quality is not None else Quality()
self.proper_count: int = proper_count
self.release_group: Optional[str] = release_group
self.valid: bool = valid
@property
def identifier(self) -> str:
if self.name and self.year:
return ('%s %s' % (self.name, self.year)).strip().lower()
elif self.name:
return self.name.lower()
@property
def proper(self) -> bool:
return self.proper_count > 0
@property
def fields(self) -> dict:
"""
Return a dict of all parser fields
"""
return {
'id': self.identifier,
'movie_parser': self,
'movie_name': self.name,
'movie_year': self.year,
'proper': self.proper,
'proper_count': self.proper_count,
'release_group': self.release_group,
}
def __str__(self) -> str:
valid = 'OK' if self.valid else 'INVALID'
return '<MovieParseResult(data=%s,name=%s,year=%s,id=%s,quality=%s,proper=%s,release_group=%s,status=%s)>' % (
self.data,
self.name,
self.year,
self.identifier,
self.quality,
self.proper_count,
self.release_group,
valid,
)
class SeriesParseResult:
def __init__(
self,
data: str = None,
name: str = None,
identified_by: str = None,
id_type: str = None,
id: Union[Tuple[int, int], str, int, datetime.date] = None,
episodes: int = 1,
season_pack: bool = False,
strict_name: bool = False,
quality: Quality = None,
proper_count: int = 0,
special: bool = False,
group: Optional[str] = None,
valid: bool = True,
) -> None:
self.name: str = name
self.data: str = data
self.episodes: int = episodes
self.season_pack: bool = season_pack
self.identified_by: str = identified_by
self.id: Union[Tuple[int, int], str, int, datetime.date] = id
self.id_type: str = id_type
self.quality: Quality = quality if quality is not None else Quality()
self.proper_count: int = proper_count
self.special: bool = special
self.group: Optional[str] = group
self.valid: bool = valid
self.strict_name: bool = strict_name
@property
def proper(self) -> bool:
return self.proper_count > 0
@property
def season(self) -> Optional[int]:
if self.id_type == 'ep':
return self.id[0]
if self.id_type == 'date':
return self.id.year
if self.id_type == 'sequence':
return 0
return None
@property
def episode(self) -> Optional[int]:
if self.id_type == 'ep':
return self.id[1]
if self.id_type == 'sequence':
return self.id
return None
@property
def identifiers(self) -> List[str]:
"""Return all identifiers this parser represents. (for packs)"""
# Currently 'ep' is the only id type that supports packs
if not self.valid:
raise Exception('Series flagged invalid')
if self.id_type == 'ep':
if self.season_pack:
return ['S%02d' % self.season]
return ['S%02dE%02d' % (self.season, self.episode + x) for x in range(self.episodes)]
elif self.id_type == 'date':
return [self.id.strftime('%Y-%m-%d')]
if self.id is None:
raise Exception('Series is missing identifier')
else:
return [self.id]
@property
def identifier(self) -> str:
"""Return String identifier for parsed episode, eg. S01E02
(will be the first identifier if this is a pack)
"""
return self.identifiers[0]
@property
def pack_identifier(self) -> str:
"""Return a combined identifier for the whole pack if this has more than one episode."""
# Currently only supports ep mode
if self.id_type == 'ep':
if self.episodes > 1:
return 'S%02dE%02d-E%02d' % (
self.season,
self.episode,
self.episode + self.episodes - 1,
)
else:
return self.identifier
else:
return self.identifier
def __str__(self) -> str:
valid = 'OK' if self.valid else 'INVALID'
return '<SeriesParseResult(data=%s,name=%s,id=%s,season=%s,season_pack=%s,episode=%s,quality=%s,proper=%s,' 'special=%s,status=%s)>' % (
self.data,
self.name,
str(self.id),
self.season,
self.season_pack,
self.episode,
self.quality,
self.proper_count,
self.special,
valid,
)
| |
# encoding: utf-8
from __future__ import unicode_literals
from datetime import datetime
from mongoengine import Document, ReferenceField, IntField, EmbeddedDocumentField, BooleanField, DynamicField, DateTimeField
from .compat import py2, py3, unicode
from .queryset import CappedQuerySet
from .structure import Owner
class Message(Document):
"""The basic properties of a MongoDB message queue."""
meta = dict(
collection = 'TaskQueue',
max_documents = 65535,
max_size = 100 * 1024 * 1024,
queryset_class = CappedQuerySet,
allow_inheritance = True,
# abstract = True # While technically abstract, we'd still like to query the base class.
)
sender = EmbeddedDocumentField(Owner, db_field='s', default=Owner.identity)
created = DateTimeField(db_field='c', default=datetime.utcnow)
processed = BooleanField(db_field='p', default=False)
def __repr__(self, inner=None):
if inner:
return '{0.__class__.__name__}({0.id}, host={1.host}, pid={1.pid}, ppid={1.ppid}, {2})'.format(self, self.sender, inner)
return '{0.__class__.__name__}({0.id}, host={1.host}, pid={1.pid}, ppid={1.ppid})'.format(self, self.sender)
def __str__(self):
return "{0.__class__.__name__}".format(self)
def __bytes__(self):
return unicode(self).encode('unicode_escape')
if py2: # pragma: no cover
__unicode__ = __str__
__str__ = __bytes__
class Keepalive(Message):
"""An empty message used as a keepalive.
Due to a quirk in MongoDB, a capped collection must have at least one record before you can 'tail' it.
"""
pass
class TaskMessage(Message):
"""Messages which relate to queued jobs.
For an easy way to monitor all messages relating to a Task, use the Task's `messages` attribute.
"""
task = ReferenceField('Task', required=True, db_field='t')
def __repr__(self, inner=None):
if inner:
return super(TaskMessage, self).__repr__('task={0.task.id}, {1}'.format(self, inner))
return super(TaskMessage, self).__repr__('task={0.task.id}'.format(self))
def __unicode__(self):
return "{0.__class__.__name__}".format(self)
if py3: # pragma: no cover
__str__ = __unicode__
class TaskAdded(TaskMessage):
"""A new task has been added to the queue."""
pass # No additional data is required.
class TaskAddedRescheduled(TaskAdded):
"""Task has been added to the queue in result of prior reschedule."""
pass
class TaskScheduled(TaskAdded):
"""A scheduled task has been added to the queue."""
when = DateTimeField(db_field='w')
def __repr__(self, inner=None):
return super(TaskScheduled, self).__repr__('when={0}'.format(self.when))
class TaskProgress(TaskMessage):
"""A record broadcast back out to indicate progress on a task.
While the latest `current` and `total` values are mirrored on the Task record, all messages are recorded there.
"""
NORMAL, FINISHED, FAILED = range(3)
current = IntField(db_field='a')
total = IntField(db_field='b')
result = DynamicField(db_field='r')
status = IntField(db_field='st', default=NORMAL, choices=[
(NORMAL, 'normal'),
(FINISHED, 'finished'),
(FAILED, 'failed'),
])
@property
def percentage(self):
return self.current * 1.0 / self.total
def __repr__(self, inner=None):
pct = "{0:.0%}".format(self.percentage) if self.total else "N/A"
msg = unicode(self.result)
return super(TaskProgress, self).__repr__('{0.current}/{0.total}, {1}, result={2}, status={3}'.format(self, pct, msg, self.get_status_display()))
def __unicode__(self):
if self.result:
return unicode(self.result)
if self.total:
return "{0:.0%}".format(self.percentage)
return "Task indicates progress."
if py3: # pragma: no cover
__str__ = __unicode__
class TaskAcquired(TaskMessage):
"""Indicate that a task has been acquired by a worker."""
owner = EmbeddedDocumentField(Owner, db_field='o')
def __unicode__(self):
return "Task {0.task.id} locked by PID {0.sender.pid} on host: {0.sender.host}".format(self)
if py3: # pragma: no cover
__str__ = __unicode__
class ReschedulePeriodic(TaskMessage):
"""Indicates that next periodic task's iteration will be requested at specified time."""
when = DateTimeField(db_field='w')
def __repr__(self, inner=None):
return super(ReschedulePeriodic, self).__repr__('when={0}'.format(self.when))
class TaskRetry(TaskMessage):
"""Indicates the given task has been rescheduled."""
def __unicode__(self):
return "Task {0.task.id} scheduled for retry by PID {0.sender.pid} on host: {0.sender.host}".format(self)
if py3: # pragma: no cover
__str__ = __unicode__
class TaskFinished(TaskMessage):
"""Common parent class for cancellation or completion."""
pass
class TaskCancelled(TaskFinished):
"""Indicate that a task has been cancelled."""
def __unicode__(self):
return "Task {0.task.id} cancelled by PID {0.sender.pid} on host: {0.sender.host}".format(self)
if py3: # pragma: no cover
__str__ = __unicode__
class TaskCompletedPeriodic(TaskFinished):
"""Indicate completion of last iteration of periodic task."""
pass
class TaskComplete(TaskFinished):
"""Indicate completion of a task.
You can monitor for completion without caring about the actual result.
"""
success = BooleanField(db_field='su', default=True)
result = DynamicField(db_field='r')
def __repr__(self, inner=None):
return super(TaskComplete, self).__repr__('success={0!r}'.format(self.success))
def __unicode__(self):
if self.success:
return "Task {0.task.id} completed successfully.".format(self)
return "Task {0.task.id} failed to complete successfully.".format(self)
if py3: # pragma: no cover
__str__ = __unicode__
class IterationRequest(TaskMessage):
"""Indicate that client need next iteration of generator task."""
pass
| |
from __future__ import print_function
import functools
import os
import platform
import sys
import threading
from pprint import pprint
import pytest
import hunter
from hunter import And
from hunter import Backlog
from hunter import CallPrinter
from hunter import CodePrinter
from hunter import Debugger
from hunter import ErrorSnooper
from hunter import From
from hunter import Q
from hunter import VarsPrinter
from hunter import VarsSnooper
from hunter import When
from hunter.actions import StackPrinter
from utils import DebugCallPrinter
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
if hunter.Tracer.__module__ == 'hunter.tracer':
class EvilFrame(object):
f_back = None
f_globals = {}
f_locals = {}
f_lineno = 0
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class EvilTracer(object):
is_pure = True
def __init__(self, *args, **kwargs):
self._calls = []
threading_support = kwargs.pop('threading_support', False)
clear_env_var = kwargs.pop('clear_env_var', False)
self.handler = hunter._prepare_predicate(*args, **kwargs)
self._tracer = hunter.trace(self._append, threading_support=threading_support, clear_env_var=clear_env_var)
def _append(self, event):
detached_event = event.detach(lambda obj: obj)
detached_event.detached = False
detached_event.frame = EvilFrame(
f_globals=event.frame.f_globals,
f_locals=event.frame.f_locals,
f_back=event.frame.f_back,
f_lasti=event.frame.f_lasti,
f_code=event.code
)
self._calls.append(detached_event)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._tracer.stop()
predicate = self.handler
for call in self._calls:
predicate(call)
else:
from eviltracer import EvilTracer
trace = EvilTracer
pytest_plugins = 'pytester',
def test_mix_predicates_with_callables():
hunter._prepare_predicate(Q(module=1) | Q(lambda: 2))
hunter._prepare_predicate(Q(lambda: 2) | Q(module=1))
hunter._prepare_predicate(Q(module=1) & Q(lambda: 2))
hunter._prepare_predicate(Q(lambda: 2) & Q(module=1))
hunter._prepare_predicate(Q(module=1) | (lambda: 2))
hunter._prepare_predicate((lambda: 2) | Q(module=1))
hunter._prepare_predicate(Q(module=1) & (lambda: 2))
hunter._prepare_predicate((lambda: 2) & Q(module=1))
def test_predicate_reverse_and_or():
class Foobar(object):
def __str__(self):
return 'Foobar'
__repr__ = __str__
def __call__(self, *args, **kwargs):
pass
foobar = Foobar()
assert str(foobar & Q(module=1)) == 'And(Foobar, Query(module=1))'
assert str(foobar | Q(module=1)) == 'Or(Foobar, Query(module=1))'
assert str(foobar & (Q(module=1) | Q(module=2))) == 'And(Foobar, Or(Query(module=1), Query(module=2)))'
assert str(foobar | (Q(module=1) | Q(module=2))) == 'Or(Foobar, Query(module=1), Query(module=2))'
assert str(foobar & (Q(module=1) & Q(module=2))) == 'And(Foobar, Query(module=1), Query(module=2))'
assert str(foobar | (Q(module=1) & Q(module=2))) == 'Or(Foobar, And(Query(module=1), Query(module=2)))'
assert str(foobar & ~Q(module=1)) == 'And(Foobar, Not(Query(module=1)))'
assert str(foobar | ~Q(module=1)) == 'Or(Foobar, Not(Query(module=1)))'
assert str(foobar & Q(module=1, action=foobar)) == 'And(Foobar, When(Query(module=1), Foobar))'
assert str(foobar | Q(module=1, action=foobar)) == 'Or(Foobar, When(Query(module=1), Foobar))'
assert str(foobar & ~Q(module=1, action=foobar)) == 'And(Foobar, Not(When(Query(module=1), Foobar)))'
assert str(foobar | ~Q(module=1, action=foobar)) == 'Or(Foobar, Not(When(Query(module=1), Foobar)))'
assert str(foobar & From(module=1, depth=2)) == 'And(Foobar, From(Query(module=1), Query(depth=2), watermark=0))'
assert str(foobar | From(module=1, depth=2)) == 'Or(Foobar, From(Query(module=1), Query(depth=2), watermark=0))'
assert str(foobar & ~From(module=1, depth=2)) == 'And(Foobar, Not(From(Query(module=1), Query(depth=2), watermark=0)))'
assert str(foobar | ~From(module=1, depth=2)) == 'Or(Foobar, Not(From(Query(module=1), Query(depth=2), watermark=0)))'
assert str(From(module=1, depth=2) & foobar) == 'And(From(Query(module=1), Query(depth=2), watermark=0), Foobar)'
assert str(From(module=1, depth=2) | foobar) == 'Or(From(Query(module=1), Query(depth=2), watermark=0), Foobar)'
assert str(foobar & Backlog(module=1)).startswith('And(Foobar, Backlog(Query(module=1), ')
assert str(foobar | Backlog(module=1)).startswith('Or(Foobar, Backlog(Query(module=1), ')
assert str(foobar & ~Backlog(module=1)).startswith('And(Foobar, Backlog(Not(Query(module=1)), ')
assert str(foobar | ~Backlog(module=1)).startswith('Or(Foobar, Backlog(Not(Query(module=1)), ')
assert str(Backlog(module=1) & foobar).startswith('And(Backlog(Query(module=1), ')
assert str(Backlog(module=1) | foobar).startswith('Or(Backlog(Query(module=1), ')
assert str(Q(module=1) & foobar) == 'And(Query(module=1), Foobar)'
assert str(Q(module=1) | foobar) == 'Or(Query(module=1), Foobar)'
assert str(~Q(module=1) & foobar) == 'And(Not(Query(module=1)), Foobar)'
assert str(~Q(module=1) | foobar) == 'Or(Not(Query(module=1)), Foobar)'
assert str(Q(module=1, action=foobar) & foobar) == 'And(When(Query(module=1), Foobar), Foobar)'
assert str(Q(module=1, action=foobar) | foobar) == 'Or(When(Query(module=1), Foobar), Foobar)'
assert str(~Q(module=1, action=foobar) & foobar) == 'And(Not(When(Query(module=1), Foobar)), Foobar)'
assert str(~Q(module=1, action=foobar) | foobar) == 'Or(Not(When(Query(module=1), Foobar)), Foobar)'
def test_threading_support(LineMatcher):
lines = StringIO()
idents = set()
names = set()
started = threading.Event()
def record(event):
idents.add(event.threadid)
names.add(event.threadname)
return True
with hunter.trace(record,
actions=[CodePrinter(stream=lines), VarsPrinter('a', stream=lines), CallPrinter(stream=lines)],
threading_support=True):
def foo(a=1):
started.set()
print(a)
def main():
foo()
t = threading.Thread(target=foo)
t.start()
started.wait(10)
main()
lm = LineMatcher(lines.getvalue().splitlines())
assert idents - {t.ident} == {None}
assert 'MainThread' in names
assert any(name.startswith('Thread-') for name in names)
lm.fnmatch_lines_random([
'Thread-* *test_tracer.py:* call def foo(a=1):',
'Thread-* *test_tracer.py:* call [[]a => 1[]]',
'Thread-* *test_tracer.py:* call => foo(a=1)',
'Thread-* *test_tracer.py:* call [[]a => 1[]]',
'MainThread *test_tracer.py:* call def foo(a=1):',
'MainThread *test_tracer.py:* call [[]a => 1[]]',
'MainThread *test_tracer.py:* call => foo(a=1)',
'MainThread *test_tracer.py:* call [[]a => 1[]]',
])
@pytest.mark.parametrize('query', [{'threadid': None}, {'threadname': 'MainThread'}])
def test_thread_filtering(LineMatcher, query):
lines = StringIO()
idents = set()
names = set()
started = threading.Event()
def record(event):
idents.add(event.threadid)
names.add(event.threadname)
return True
with hunter.trace(~Q(**query), record,
actions=[CodePrinter(stream=lines), VarsPrinter('a', stream=lines), CallPrinter(stream=lines)],
threading_support=True):
def foo(a=1):
started.set()
print(a)
def main():
foo()
t = threading.Thread(target=foo)
t.start()
started.wait(10)
main()
lm = LineMatcher(lines.getvalue().splitlines())
print(lines.getvalue())
assert None not in idents
assert 'MainThread' not in names
pprint(lm.lines)
lm.fnmatch_lines_random([
'Thread-* *test_tracer.py:* call def foo(a=1):',
'Thread-* *test_tracer.py:* call [[]a => 1[]]',
'Thread-* *test_tracer.py:* call => foo(a=1)',
'Thread-* *test_tracer.py:* call [[]a => 1[]]',
])
def test_tracing_printing_failures(LineMatcher):
lines = StringIO()
with trace(actions=[CodePrinter(stream=lines, repr_func=repr), VarsPrinter('x', stream=lines, repr_func=repr)]):
class Bad(object):
__slots__ = []
def __repr__(self):
raise RuntimeError("I'm a bad class!")
def a():
x = Bad()
return x
def b():
x = Bad()
raise Exception(x)
a()
try:
b()
except Exception as exc:
pass
lm = LineMatcher(lines.getvalue().splitlines())
print(lines.getvalue())
lm.fnmatch_lines([
"""*tests*test_*.py:* call class Bad(object):""",
"""*tests*test_*.py:* line class Bad(object):""",
"""*tests*test_*.py:* line def __repr__(self):""",
"""*tests*test_*.py:* return def __repr__(self):""",
"""* ... return value: *""",
"""*tests*test_*.py:* call def a():""",
"""*tests*test_*.py:* line x = Bad()""",
"""*tests*test_*.py:* line return x""",
"""*tests*test_*.py:* line [[]x => !!! FAILED REPR: RuntimeError("I'm a bad class!"*)[]]""",
"""*tests*test_*.py:* return return x""",
"""* ... return value: !!! FAILED REPR: RuntimeError("I'm a bad class!"*)""",
"""*tests*test_*.py:* call def b():""",
"""*tests*test_*.py:* line x = Bad()""",
"""*tests*test_*.py:* line raise Exception(x)""",
"""*tests*test_*.py:* line [[]x => !!! FAILED REPR: RuntimeError("I'm a bad class!"*)[]]""",
"""*tests*test_*.py:* exception raise Exception(x)""",
"""* ... exception value: !!! FAILED REPR: RuntimeError("I'm a bad class!"*)""",
"""*tests*test_*.py:* return raise Exception(x)""",
"""* ... return value: None""",
])
def test_tracing_vars(LineMatcher):
lines = StringIO()
with hunter.trace(actions=[VarsPrinter('b', stream=lines), CodePrinter(stream=lines)]):
def a():
b = 1
b = 2
return 1
b = a()
b = 2
try:
raise Exception('BOOM!')
except Exception:
pass
print(lines.getvalue())
lm = LineMatcher(lines.getvalue().splitlines())
lm.fnmatch_lines([
"*test_tracer.py* call def a():",
"*test_tracer.py* line b = 1",
"*test_tracer.py* line [[]b => 1[]]",
"*test_tracer.py* line b = 2",
"*test_tracer.py* line [[]b => 2[]]",
"*test_tracer.py* line return 1",
"*test_tracer.py* return [[]b => 2[]]",
"*test_tracer.py* return return 1",
"* ... return value: 1",
])
def test_tracing_vars_expressions(LineMatcher):
lines = StringIO()
with hunter.trace(actions=[VarsPrinter('Foo.bar', 'vars(Foo)', 'len(range(2))', 'Foo.__dict__["bar"]', stream=lines)]):
def main():
class Foo(object):
bar = 1
main()
print(lines.getvalue())
lm = LineMatcher(lines.getvalue().splitlines())
lm.fnmatch_lines_random([
'* [[]Foo.bar => 1[]]',
'* [[]vars(Foo) => *[]]',
'* [[]len(range(2)) => 2[]]',
'* [[]Foo.__dict__[[]"bar"[]] => 1[]]',
])
def test_trace_merge():
with hunter.trace(function='a'):
with hunter.trace(function='b'):
with hunter.trace(function='c'):
assert sys.gettrace().handler == When(Q(function='c'), CallPrinter)
assert sys.gettrace().handler == When(Q(function='b'), CallPrinter)
assert sys.gettrace().handler == When(Q(function='a'), CallPrinter)
def test_trace_api_expansion():
# simple use
with trace(function='foobar') as t:
assert t.handler == When(Q(function='foobar'), CallPrinter)
# 'or' by expression
with trace(module='foo', function='foobar') as t:
assert t.handler == When(Q(module='foo', function='foobar'), CallPrinter)
# pdb.set_trace
with trace(function='foobar', action=Debugger) as t:
assert str(t.handler) == str(When(Q(function='foobar'), Debugger))
# pdb.set_trace on any hits
with trace(module='foo', function='foobar', action=Debugger) as t:
assert str(t.handler) == str(When(Q(module='foo', function='foobar'), Debugger))
# pdb.set_trace when function is foobar, otherwise just print when module is foo
with trace(Q(function='foobar', action=Debugger), module='foo') as t:
assert str(t.handler) == str(When(And(
When(Q(function='foobar'), Debugger),
Q(module='foo')
), CallPrinter))
# dumping variables from stack
with trace(Q(function='foobar', action=VarsPrinter('foobar')), module='foo') as t:
assert str(t.handler) == str(When(And(
When(Q(function='foobar'), VarsPrinter('foobar')),
Q(module='foo'),
), CallPrinter))
with trace(Q(function='foobar', action=VarsPrinter('foobar', 'mumbojumbo')), module='foo') as t:
assert str(t.handler) == str(When(And(
When(Q(function='foobar'), VarsPrinter('foobar', 'mumbojumbo')),
Q(module='foo'),
), CallPrinter))
# multiple actions
with trace(Q(function='foobar', actions=[VarsPrinter('foobar'), Debugger]), module='foo') as t:
assert str(t.handler) == str(When(And(
When(Q(function='foobar'), VarsPrinter('foobar'), Debugger),
Q(module='foo'),
), CallPrinter))
def test_locals():
out = StringIO()
with hunter.trace(
lambda event: event.locals.get('node') == 'Foobar',
module=__name__,
function='foo',
action=CodePrinter(stream=out)
):
def foo():
a = 1
node = 'Foobar'
node += 'x'
a += 2
return a
foo()
assert out.getvalue().endswith("node += 'x'\n")
def test_fullsource_decorator_issue(LineMatcher):
out = StringIO()
with trace(kind='call', action=CodePrinter(stream=out)):
foo = bar = lambda x: x
@foo
@bar
def foo():
return 1
foo()
lm = LineMatcher(out.getvalue().splitlines())
lm.fnmatch_lines([
'* call @foo',
'* | @bar',
'* * def foo():',
])
def test_callprinter(LineMatcher):
out = StringIO()
with trace(action=CallPrinter(stream=out)):
foo = bar = lambda x: x
@foo
@bar
def foo():
return 1
foo()
lm = LineMatcher(out.getvalue().splitlines())
lm.fnmatch_lines([
'* call => <lambda>(x=<function *foo at *>)',
'* line foo = bar = lambda x: x',
'* return <= <lambda>: <function *foo at *>',
'* call => <lambda>(x=<function *foo at *>)',
'* line foo = bar = lambda x: x',
'* return <= <lambda>: <function *foo at *>',
'* call => foo()',
'* line return 1',
'* return <= foo: 1',
])
def test_callprinter_indent(LineMatcher):
from sample6 import bar
out = StringIO()
with trace(action=CallPrinter(stream=out)):
bar()
lm = LineMatcher(out.getvalue().splitlines())
lm.fnmatch_lines([
"*sample6.py:1 call => bar()",
"*sample6.py:2 line foo()",
"*sample6.py:5 call => foo()",
"*sample6.py:6 line try:",
"*sample6.py:7 line asdf()",
"*sample6.py:16 call => asdf()",
"*sample6.py:17 line raise Exception()",
"*sample6.py:17 exception ! asdf: (<*Exception'>, Exception(), <traceback object at *>)",
"*sample6.py:17 return <= asdf: None",
"*sample6.py:7 exception ! foo: (<*Exception'>, Exception(), <traceback object at *>)",
"*sample6.py:8 line except:",
"*sample6.py:9 line pass",
"*sample6.py:10 line try:",
"*sample6.py:11 line asdf()",
"*sample6.py:16 call => asdf()",
"*sample6.py:17 line raise Exception()",
"*sample6.py:17 exception ! asdf: (<*Exception'>, Exception(), <traceback object at *>)",
"*sample6.py:17 return <= asdf: None",
"*sample6.py:11 exception ! foo: (<*Exception'>, Exception(), <traceback object at *>)",
"*sample6.py:12 line except:",
"*sample6.py:13 line pass",
"*sample6.py:13 return <= foo: None",
"*sample6.py:2 return <= bar: None",
])
def test_source(LineMatcher):
calls = []
with trace(action=lambda event: calls.append(event.source)):
foo = bar = lambda x: x
@foo
@bar
def foo():
return 1
foo()
lm = LineMatcher(calls)
lm.fnmatch_lines([
' foo = bar = lambda x: x\n',
' @foo\n',
' return 1\n',
])
def test_wraps(LineMatcher):
calls = []
@hunter.wrap(action=lambda event: calls.append('%6r calls=%r depth=%r %s' % (event.kind, event.calls, event.depth, event.fullsource)))
def foo():
return 1
foo()
lm = LineMatcher(calls)
for line in calls:
print(repr(line))
lm.fnmatch_lines([
"'call' calls=0 depth=0 @hunter.wrap*",
"'line' calls=1 depth=1 return 1\n",
"'return' calls=1 depth=0 return 1\n",
])
for call in calls:
assert 'tracer.stop()' not in call
def test_wraps_local(LineMatcher):
calls = []
def bar():
for i in range(2):
return 'A'
@hunter.wrap(local=True, action=lambda event: calls.append(
'%06s calls=%s depth=%s %s' % (event.kind, event.calls, event.depth, event.fullsource)))
def foo():
bar()
return 1
foo()
lm = LineMatcher(calls)
for line in calls:
print(repr(line))
lm.fnmatch_lines([
' call calls=0 depth=0 @hunter.wrap*',
' line calls=? depth=1 return 1\n',
'return calls=? depth=0 return 1\n',
])
for call in calls:
assert 'for i in range(2)' not in call
assert 'tracer.stop()' not in call
@pytest.mark.skipif('os.environ.get("SETUPPY_CFLAGS") == "-DCYTHON_TRACE=1"')
def test_depth():
calls = []
tracer = hunter.trace(action=lambda event: calls.append((event.kind, event.module, event.function, event.depth)))
try:
def bar():
for i in range(2):
yield i
def foo():
gen = bar()
next(gen)
while True:
try:
gen.send('foo')
except StopIteration:
break
list(i for i in range(2))
x = [i for i in range(2)]
foo()
finally:
tracer.stop()
pprint(calls)
assert ('call', __name__, 'bar', 1) in calls
assert ('return', __name__, 'foo', 0) in calls
def test_source_cython(LineMatcher):
pytest.importorskip('sample5')
calls = []
from sample5 import foo
with trace(action=lambda event: calls.append(event.source)):
foo()
lm = LineMatcher(calls)
lm.fnmatch_lines([
'def foo():\n',
' return 1\n',
])
def test_fullsource(LineMatcher):
calls = []
with trace(action=lambda event: calls.append(event.fullsource)):
foo = bar = lambda x: x
@foo
@bar
def foo():
return 1
foo()
lm = LineMatcher(calls)
lm.fnmatch_lines([
' foo = bar = lambda x: x\n',
' @foo\n @bar\n def foo():\n',
' return 1\n',
])
def test_fullsource_cython(LineMatcher):
pytest.importorskip('sample5')
calls = []
from sample5 import foo
with trace(action=lambda event: calls.append(event.fullsource)):
foo()
lm = LineMatcher(calls)
lm.fnmatch_lines([
'def foo():\n',
' return 1\n',
])
def test_custom_action():
calls = []
with trace(action=lambda event: calls.append(event.function), kind='return'):
def foo():
return 1
foo()
assert 'foo' in calls
def test_trace_with_class_actions():
with trace(CodePrinter):
def a():
pass
a()
def test_proper_backend():
if os.environ.get('PUREPYTHONHUNTER') or platform.python_implementation() == 'PyPy':
assert 'hunter.tracer.Tracer' in repr(hunter.Tracer)
assert hunter.Tracer.__module__ == 'hunter.tracer'
else:
assert 'hunter._tracer.Tracer' in repr(hunter.Tracer)
assert hunter.Tracer.__module__ == 'hunter._tracer'
@pytest.fixture(params=['pure', 'cython'])
def tracer_impl(request):
if request.param == 'pure':
Tracer = pytest.importorskip('hunter.tracer').Tracer
elif request.param == 'cython':
Tracer = pytest.importorskip('hunter._tracer').Tracer
if Tracer is not hunter.Tracer:
pytest.skip("%s is not %s in this environment" % (Tracer, hunter.Tracer))
return Tracer
def _bulky_func_that_use_stdlib():
import difflib
a = list(map(str, range(500)))
b = list(map(str, range(0, 500, 2)))
list(difflib.unified_diff(a, b, 'a', 'b'))
def test_perf_filter(tracer_impl, benchmark):
impl = tracer_impl()
class Counter(object):
calls = 0
def inc(_):
Counter.calls += 1
handler = Q(
Q(module='does-not-exist') | Q(module='does not exist'.split()),
action=inc
)
@benchmark
def run():
with impl.trace(handler):
_bulky_func_that_use_stdlib()
assert Counter.calls == 0
def test_perf_stdlib(tracer_impl, benchmark):
t = tracer_impl()
@benchmark
def run():
output = StringIO()
with t.trace(Q(
~Q(module_contains='pytest'),
~Q(module_in=(__name__, 'hunter.tracer', 'hunter._tracer')),
~Q(filename='<string>'),
~Q(filename=''),
stdlib=False,
action=CodePrinter(stream=output)
)):
_bulky_func_that_use_stdlib()
return output
assert run.getvalue() == ''
def test_perf_actions(tracer_impl, benchmark):
t = tracer_impl()
@benchmark
def run():
output = StringIO()
with t.trace(Q(
~Q(module_in=['re', 'sre', 'sre_parse']) & ~Q(module_startswith='namedtuple') & Q(kind='call'),
actions=[
CodePrinter(
stream=output
),
VarsPrinter(
'line',
stream=output
)
]
)):
_bulky_func_that_use_stdlib()
def test_clear_env_var(monkeypatch):
monkeypatch.setitem(os.environ, 'PYTHONHUNTER', '123')
assert os.environ.get('PYTHONHUNTER') == '123'
out = StringIO()
with trace(action=CallPrinter(stream=out), clear_env_var=True):
assert 'PYTHONHUNTER' not in os.environ
assert os.environ.get('PYTHONHUNTER') is None
def test_from_predicate(LineMatcher):
buff = StringIO()
from sample7 import one
with trace(From(Q(function='five'), CallPrinter(stream=buff))):
one()
output = buff.getvalue()
lm = LineMatcher(output.splitlines())
lm.fnmatch_lines([
"* call => five()",
"* line for i in range(1): # five",
"* line return i",
"* return <= five: 0",
])
assert '<= four' not in output
assert 'three' not in output
assert 'two' not in output
assert 'one' not in output
def test_from_predicate_with_subpredicate(LineMatcher):
buff = StringIO()
from sample7 import one
with trace(From(Q(source_has='# two'), Q(depth_lt=1)), action=CallPrinter(stream=buff)):
one()
output = buff.getvalue()
lm = LineMatcher(output.splitlines())
lm.fnmatch_lines([
'* line for i in range(1): # two',
'* line three()',
'* call => three()',
'* return <= three: None',
'* line for i in range(1): # two',
])
assert 'five' not in output
assert 'four' not in output
assert 'one()' not in output
assert '# one' not in output
if 'with trace' in output:
assert len(lm.lines) == 6, lm.lines
else:
assert len(lm.lines) == 5, lm.lines
def test_from_predicate_line(LineMatcher):
buff = StringIO()
from sample7 import one
with trace(From(Q(fullsource_has='in_five'), CallPrinter(stream=buff))):
one()
output = buff.getvalue()
lm = LineMatcher(output.splitlines())
lm.fnmatch_lines([
"* line * for i in range(1): # five",
"* line * return i",
])
assert 'four' not in output
assert 'three' not in output
assert 'two' not in output
assert 'one' not in output
def test_from_predicate_no_predicate(LineMatcher):
buff = StringIO()
from sample7 import one
with trace(From(Q(function='five')), action=CallPrinter(stream=buff)):
one()
output = buff.getvalue()
lm = LineMatcher(output.splitlines())
lm.fnmatch_lines([
"* call => five()",
"* line for i in range(1): # five",
"* line return i",
"* return <= five: 0",
])
assert '<= four' not in output
assert 'three' not in output
assert 'two' not in output
assert 'one' not in output
def test_from_predicate_line_no_predicate(LineMatcher):
buff = StringIO()
from sample7 import one
with trace(From(Q(fullsource_has='in_five')), action=CallPrinter(stream=buff)):
one()
output = buff.getvalue()
lm = LineMatcher(output.splitlines())
lm.fnmatch_lines([
"* line * for i in range(1): # five",
"* line * return i",
])
assert 'four' not in output
assert 'three' not in output
assert 'two' not in output
assert 'one' not in output
# def test_backlog_size_call(LineMatcher):
# backlog_buff = StringIO()
# tracer_buff = StringIO()
# from sample7 import one
# with trace(
# Backlog(function='five', size=5, action=CallPrinter(stream=backlog_buff)),
# action=CallPrinter(stream=tracer_buff)
# ):
# one()
# backlog_output = backlog_buff.getvalue()
# lm = LineMatcher(backlog_output.splitlines())
# lm.fnmatch_lines([
# "* line for i in range(1): # three",
# "* line four()",
# "* call => four()",
# "* line for i in range(1): # four",
# "* line five()",
# ])
# assert '=> three' not in backlog_output
# assert '=> five()' not in backlog_output
# assert 'two' not in backlog_output
# assert 'one' not in backlog_output
#
# tracer_output = tracer_buff.getvalue()
# assert len(tracer_output.splitlines()) == 1
# assert " call => five()" in tracer_output
#
#
# def test_backlog_size_line(LineMatcher):
# buff = StringIO()
# tracer_buff = StringIO()
# from sample7 import one
# with trace(
# Backlog(fullsource_has='return i', size=5, action=CallPrinter(stream=buff)),
# action=CallPrinter(stream=tracer_buff)
# ):
# one()
# output = buff.getvalue()
# lm = LineMatcher(output.splitlines())
# lm.fnmatch_lines([
# "* line for i in range(1): # four",
# "* line five()",
# "* call => five()",
# "* line in_five = 1",
# "* line for i in range(1): # five",
# ])
#
# assert 'four' not in output
#
# tracer_output = tracer_buff.getvalue()
# assert len(tracer_output.splitlines()) == 1
# assert " line return i" in tracer_output
#
#
# def test_backlog_size_call_filter(LineMatcher):
# buff = StringIO()
# tracer_buff = StringIO()
# from sample7 import one
# with trace(
# Backlog(function='five', size=5, action=CallPrinter(stream=buff)).filter(~Q(fullsource_has='four')),
# action=CallPrinter(stream=tracer_buff)
# ):
# one()
# output = buff.getvalue()
# lm = LineMatcher(output.splitlines())
# lm.fnmatch_lines([
# "* line for i in range(1): # two",
# "* line three()",
# "* call => three()",
# "* line for i in range(1): # three",
# "* line five()",
# ])
# assert "four" not in output
#
# tracer_output = tracer_buff.getvalue()
# assert len(tracer_output.splitlines()) == 1
# assert " call => five()" in tracer_buff.getvalue()
#
#
# def test_backlog_size_predicate_line_filter(LineMatcher):
# buff = StringIO()
# tracer_buff = StringIO()
# from sample7 import one
# with trace(
# Backlog(fullsource_has='return i', size=5, action=CallPrinter(stream=buff)).filter(~Q(fullsource_has="five")),
# action=CallPrinter(stream=tracer_buff)
# ):
# one()
# output = buff.getvalue()
# lm = LineMatcher(output.splitlines())
# lm.fnmatch_lines([
# "* call => three()",
# "* line for i in range(1): # three",
# "* line four()",
# "* call => four()",
# "* line for i in range(1): # four",
# ])
#
# assert "five" not in output
#
# tracer_output = tracer_buff.getvalue()
# assert len(tracer_output.splitlines()) == 1
# assert " line return i" in tracer_output
#
#
# def test_backlog_size_first_line_match(LineMatcher):
# buff = StringIO()
# tracer_buff = StringIO()
# from sample7 import one
# with trace(Backlog(fullsource_has='one', module_rx='sample7', size=100, action=CallPrinter(stream=buff)).filter(fullsource_has='one'), action=CallPrinter(stream=tracer_buff)):
# one()
# output = buff.getvalue()
# assert not output
def decorator(func):
@functools.wraps(func)
def wrapper(*a, **k):
return func(*a, **k)
return wrapper
def gf(_):
pass
@decorator
def dgf(_):
pass
class Old:
@staticmethod
def old_sm(_):
pass
@classmethod
def old_cm(cls, _):
pass
def old_m(self, _):
pass
class Desc(object):
def __init__(self, func):
self.func = func
def __get__(self, instance, owner):
return self.func
class New(object):
@staticmethod
def new_sm(_):
pass
@classmethod
def new_cm(cls, _):
pass
def new_m(self, _):
pass
new_dm = Desc(gf)
new_ddm = Desc(dgf)
def test_function_object(LineMatcher):
def lf(_):
pass
@decorator
def dlf(_):
pass
class Local(object):
@staticmethod
def local_sm(_):
pass
@classmethod
def local_cm(cls, _):
pass
def local_m(self, _):
pass
local_dm = Desc(lf)
local_ddm = Desc(dlf)
global_dm = Desc(gf)
global_ddm = Desc(dgf)
buff = StringIO()
with trace(actions=[
hunter.CallPrinter(stream=buff),
lambda event: buff.write(
"{0.function}({1})|{2}|{0.kind}\n".format(
event,
event.locals.get('_'),
getattr(event.function_object, '__name__', 'missing')))
]):
gf(1)
dgf(2)
lf(3)
dlf(4)
Old.old_sm(5)
Old.old_cm(6)
Old().old_sm(7)
Old().old_cm(8)
Old().old_m(9)
New.new_sm(10)
New.new_cm(11)
New().new_sm(12)
New().new_cm(13)
New().new_m(14)
New().new_dm(15)
New().new_ddm(16)
Local.local_sm(17)
Local.local_cm(18)
Local().local_sm(19)
Local().local_cm(20)
Local().local_m(21)
Local().local_dm(22)
Local().local_ddm(23)
Local().global_dm(24)
Local().global_ddm(25)
output = buff.getvalue()
print(output)
lm = LineMatcher(output.splitlines())
lm.fnmatch_lines([
"gf(1)|gf|call",
"dgf(2)|dgf|call",
"lf(3)|missing|call",
"dlf(4)|missing|call",
"old_sm(5)|old_sm|call",
"old_cm(6)|old_cm|call",
"old_sm(7)|old_sm|call",
"old_cm(8)|old_cm|call",
"old_m(9)|old_m|call",
"new_sm(10)|new_sm|call",
"new_cm(11)|new_cm|call",
"new_sm(12)|new_sm|call",
"new_cm(13)|new_cm|call",
"new_m(14)|new_m|call",
"gf(15)|gf|call",
"dgf(16)|dgf|call",
"local_sm(17)|missing|call",
"local_cm(18)|local_cm|call",
"local_sm(19)|missing|call",
"local_cm(20)|local_cm|call",
"local_m(21)|local_m|call",
"lf(22)|missing|call",
"dlf(23)|missing|call",
"gf(24)|gf|call",
"dgf(25)|{}|call".format('dgf'),
])
def test_tracing_bare(LineMatcher):
lines = StringIO()
with trace(CodePrinter(stream=lines)):
def a():
return 1
b = a()
b = 2
try:
raise Exception('BOOM!')
except Exception:
pass
print(lines.getvalue())
lm = LineMatcher(lines.getvalue().splitlines())
lm.fnmatch_lines([
"*test_*.py* call def a():",
"*test_*.py* line return 1",
"*test_*.py* return return 1",
"* ... return value: 1",
])
def test_debugger(LineMatcher):
out = StringIO()
calls = []
class FakePDB:
def __init__(self, foobar=1):
calls.append(foobar)
def set_trace(self, frame):
calls.append(frame.f_code.co_name)
with trace(
lambda event: event.locals.get('node') == 'Foobar',
module=__name__,
function='foo',
actions=[CodePrinter,
VarsPrinter('a', 'node', 'foo', 'test_debugger', stream=out),
Debugger(klass=FakePDB, foobar=2)]
):
def foo():
a = 1
node = 'Foobar'
node += 'x'
a += 2
return a
foo()
print(out.getvalue())
assert calls == [2, 'foo']
lm = LineMatcher(out.getvalue().splitlines())
pprint(lm.lines)
lm.fnmatch_lines_random([
"* [[]test_debugger => <function test_debugger at *[]]",
"* [[]node => *Foobar*[]]",
"* [[]a => 1[]]",
])
def test_varssnooper(LineMatcher):
lines = StringIO()
snooper = VarsSnooper(stream=lines)
def a():
foo = bar = b = 1
b = 2
foo = 3
foo = bar = 4
return b
with trace(function='a', actions=[snooper, CodePrinter(stream=lines)]):
a()
print(lines.getvalue())
lm = LineMatcher(lines.getvalue().splitlines())
lm.fnmatch_lines([
"*test_*.py* line foo = bar = b = 1",
"*test_*.py* line [[]b := 1[]]",
"* * ... [[]bar := 1[]]",
"* * ... [[]foo := 1[]]",
"*test_*.py* line b = 2",
"*test_*.py* line [[]b : 1 => 2[]]",
"*test_*.py* line foo = 3",
"*test_*.py* line [[]foo : 1 => 3[]]",
"*test_*.py* line foo = bar = 4",
"*test_*.py* line [[]bar : 1 => 4[]]",
"* * ... [[]foo : 3 => 4[]]",
"*test_*.py* line return b",
"*test_*.py* return return b",
"* * ... return value: 2",
])
assert snooper.stored_reprs == {}
def test_errorsnooper(LineMatcher):
lines = StringIO()
snooper = ErrorSnooper(stream=lines, max_backlog=50, max_events=100)
def a():
from sample8errors import notsilenced
from sample8errors import silenced1
from sample8errors import silenced2
from sample8errors import silenced3
from sample8errors import silenced4
silenced1()
print("Done silenced1")
silenced2()
print("Done silenced2")
silenced3()
print("Done silenced3")
silenced4()
print("Done silenced4")
try:
notsilenced()
except ValueError:
print("Done not silenced")
with trace(actions=[snooper]):
a()
lm = LineMatcher(lines.getvalue().splitlines())
lm.fnmatch_lines([
"*>>>>>>>>>>>>>>>>>>>>>> tracing silenced1 on (*RuntimeError*)",
"*test_*.py:* line silenced1()",
"*sample8errors.py:14 call def silenced1():",
"*sample8errors.py:15 line try:",
"*sample8errors.py:16 line error()",
"*sample8errors.py:6 call def error():",
"*sample8errors.py:7 line raise RuntimeError()",
"*sample8errors.py:7 exception raise RuntimeError()",
"* ... exception value: (*RuntimeError*)",
"*sample8errors.py:7 return raise RuntimeError()",
"* ... return value: None",
"*sample8errors.py:16 exception error()",
"* ... exception value: (*RuntimeError*)",
"*sample8errors.py:17 line except Exception:",
"*sample8errors.py:18 line pass",
"*sample8errors.py:18 return pass",
"* ... return value: None",
"*---------------------- function exit",
"*>>>>>>>>>>>>>>>>>>>>>> tracing silenced2 on (*RuntimeError*)",
'*test_*.py:* line print("Done silenced1")',
"*test_*.py:* line silenced2()",
"*sample8errors.py:21 call def silenced2():",
"*sample8errors.py:22 line try:",
"*sample8errors.py:23 line error()",
"*sample8errors.py:6 call def error():",
"*sample8errors.py:7 line raise RuntimeError()",
"*sample8errors.py:7 exception raise RuntimeError()",
"* ... exception value: (*RuntimeError*)",
"*sample8errors.py:7 return raise RuntimeError()",
"* ... return value: None",
"*sample8errors.py:23 exception error()",
"* ... exception value: (*RuntimeError*)",
"*sample8errors.py:24 line except Exception as exc:",
"*sample8errors.py:25 line log(exc)",
"*sample8errors.py:10 call def log(msg):",
"*sample8errors.py:11 return print(msg)",
"* ... return value: None",
"*sample8errors.py:26 line for i in range(*):",
"*sample8errors.py:27 line log(i)",
"*---------------------- too many lines",
"*>>>>>>>>>>>>>>>>>>>>>> tracing silenced3 on (*RuntimeError*)",
'*test_*.py:* line print("Done silenced2")',
"*test_*.py:* line silenced3()",
"*sample8errors.py:31 call def silenced3():",
"*sample8errors.py:32 line try:",
"*sample8errors.py:33 line error()",
"*sample8errors.py:6 call def error():",
"*sample8errors.py:7 line raise RuntimeError()",
"*sample8errors.py:7 exception raise RuntimeError()",
"* ... exception value: (*RuntimeError*)",
"*sample8errors.py:7 return raise RuntimeError()",
"* ... return value: None",
"*sample8errors.py:33 exception error()",
"* ... exception value: (*RuntimeError*)",
'*sample8errors.py:35 line return "mwhahaha"',
'*sample8errors.py:35 return return "mwhahaha"',
"* ... return value: 'mwhahaha'",
"*---------------------- function exit",
"*>>>>>>>>>>>>>>>>>>>>>> tracing silenced4 on (*RuntimeError*)",
'*test_*.py:* line print("Done silenced3")',
"*test_*.py:* line silenced4()",
"*sample8errors.py:38 call def silenced4():",
"*sample8errors.py:39 line try:",
"*sample8errors.py:40 line error()",
"*sample8errors.py:6 call def error():",
"*sample8errors.py:7 line raise RuntimeError()",
"*sample8errors.py:7 exception raise RuntimeError()",
"* ... exception value: (*RuntimeError*)",
"*sample8errors.py:7 return raise RuntimeError()",
"* ... return value: None",
"*sample8errors.py:40 exception error()",
"* ... exception value: (*RuntimeError*)",
"*sample8errors.py:41 line except Exception as exc:",
"*sample8errors.py:42 line logger.info(repr(exc))",
"*__init__.py:* call def info(self, msg, *args, **kwargs):",
"*sample8errors.py:42 return logger.info(repr(exc))",
"* ... return value: None",
"*---------------------- function exit",
])
def test_errorsnooper_fastmode(LineMatcher):
lines = StringIO()
snooper = ErrorSnooper(stream=lines, max_backlog=0, max_events=100)
def a():
from sample8errors import notsilenced
from sample8errors import silenced1
from sample8errors import silenced2
from sample8errors import silenced3
from sample8errors import silenced4
silenced1()
print("Done silenced1")
silenced2()
print("Done silenced2")
silenced3()
print("Done silenced3")
silenced4()
print("Done silenced4")
try:
notsilenced()
except ValueError:
print("Done not silenced")
with trace(actions=[snooper]):
a()
print(lines.getvalue())
lm = LineMatcher(lines.getvalue().splitlines())
lm.fnmatch_lines([
"*>>>>>>>>>>>>>>>>>>>>>> tracing silenced1 on (*RuntimeError*)",
"*sample8errors.py:17 line except Exception:",
"*sample8errors.py:18 line pass",
"*sample8errors.py:18 return pass",
"* ... return value: None",
"*---------------------- function exit",
"*>>>>>>>>>>>>>>>>>>>>>> tracing silenced2 on (*RuntimeError*)",
"*sample8errors.py:24 line except Exception as exc:",
"*sample8errors.py:25 line log(exc)",
"*sample8errors.py:10 call def log(msg):",
"*sample8errors.py:11 return print(msg)",
"* ... return value: None",
"*sample8errors.py:26 line for i in range(*):",
"*sample8errors.py:27 line log(i)",
"*---------------------- too many lines",
"*>>>>>>>>>>>>>>>>>>>>>> tracing silenced3 on (*RuntimeError*)",
'*sample8errors.py:35 line return "mwhahaha"',
'*sample8errors.py:35 return return "mwhahaha"',
"* ... return value: 'mwhahaha'",
"*---------------------- function exit",
"*>>>>>>>>>>>>>>>>>>>>>> tracing silenced4 on (*RuntimeError*)",
"*sample8errors.py:41 line except Exception as exc:",
"*sample8errors.py:42 line logger.info(repr(exc))",
"*__init__.py:* call def info(self, msg, *args, **kwargs):",
"*sample8errors.py:42 return logger.info(repr(exc))",
"* ... return value: None",
"*---------------------- function exit",
])
def test_stack_printer_1(LineMatcher):
buff = StringIO()
with trace(Q(function="five", action=StackPrinter(limit=1, stream=buff))):
from sample7 import one
one()
output = buff.getvalue()
lm = LineMatcher(output.splitlines())
lm.fnmatch_lines([
"*sample7.py:??:five <= sample7.py:??:four <= sample7.py:??:three <= sample7.py:??:two <= sample7.py:?:one <= test_tracer.py:????:test_stack_printer*"
])
def test_stack_printer_2(LineMatcher):
buff = StringIO()
with trace(Q(function="five", action=StackPrinter(limit=2, stream=buff))):
from sample7 import one
one()
output = buff.getvalue()
lm = LineMatcher(output.splitlines())
lm.fnmatch_lines([
"*sample7.py:??:five <= tests/sample7.py:??:four <= tests/sample7.py:??:three <= tests/sample7.py:??:two <= tests/sample7.py:?:one <= tests/test_tracer.py:????:test_stack_printer*"
])
@pytest.mark.parametrize('stack', [5, 6, 7], ids="stack={}".format)
@pytest.mark.parametrize('size', [6, 8, 10, 12, 14, 16] + list(range(17, 35)), ids="size={}".format)
@pytest.mark.parametrize('vars', [True, False], ids="vars={}".format)
@pytest.mark.parametrize('filter', [None, ~Q(function='six')], ids="filter={}".format)
@pytest.mark.parametrize('condition', [{'fullsource_has': 'return i'}, {'function': 'five'}], ids=urlencode)
def test_backlog_specific(LineMatcher, size, stack, vars, condition, filter):
buff = StringIO()
from sample7args import one
with trace(
Backlog(size=size, stack=stack, vars=vars, action=DebugCallPrinter(' [' 'backlog' ']', stream=buff), filter=filter, **condition),
action=DebugCallPrinter(stream=buff)
):
one()
one() # make sure Backlog is reusable (doesn't have storage side-effects)
output = buff.getvalue()
# print(re.sub(r'([\[\]])', r'[\1]', output))
lm = LineMatcher(output.splitlines())
lm.fnmatch_lines([
"depth=0 calls=*sample7args.py:* call => one(a=*, b=*, c=*) [[]backlog[]]",
"depth=1 calls=*sample7args.py:* call => two(a=*, b=*, c=*) [[]backlog[]]",
"depth=2 calls=*sample7args.py:* call => three(a=*, b=*, c=*) [[]backlog[]]",
"depth=3 calls=*sample7args.py:* call => four(a=*, b=*, c=*) [[]backlog[]]",
"depth=4 calls=*sample7args.py:* call => five(a=*, b=*, c=*)*",
"depth=5 calls=*sample7args.py:* line six()*",
"depth=5 calls=*sample7args.py:* line a = b = c[[]'side'[]] = in_five = 'effect'*",
"depth=5 calls=*sample7args.py:* line for i in range(1): # five*",
"depth=5 calls=*sample7args.py:* line return i # five",
"depth=4 calls=*sample7args.py:* return <= five: 0",
"depth=0 calls=*sample7args.py:* call => one(a=*, b=*, c=*) [[]backlog[]]",
"depth=1 calls=*sample7args.py:* call => two(a=*, b=*, c=*) [[]backlog[]]",
"depth=2 calls=*sample7args.py:* call => three(a=*, b=*, c=*) [[]backlog[]]",
"depth=3 calls=*sample7args.py:* call => four(a=*, b=*, c=*) [[]backlog[]]",
"depth=4 calls=*sample7args.py:* call => five(a=*, b=*, c=*)*",
"depth=5 calls=*sample7args.py:* line six()*",
"depth=5 calls=*sample7args.py:* line a = b = c[[]'side'[]] = in_five = 'effect'*",
"depth=5 calls=*sample7args.py:* line for i in range(1): # five*",
"depth=5 calls=*sample7args.py:* line return i # five",
"depth=4 calls=*sample7args.py:* return <= five: 0",
])
| |
import json
import math
import re
import struct
import sys
from peewee import *
from peewee import ColumnBase
from peewee import EnclosedNodeList
from peewee import Entity
from peewee import Expression
from peewee import Node
from peewee import NodeList
from peewee import OP
from peewee import VirtualField
from peewee import merge_dict
from peewee import sqlite3
try:
from playhouse._sqlite_ext import (
backup,
backup_to_file,
Blob,
ConnectionHelper,
register_bloomfilter,
register_hash_functions,
register_rank_functions,
sqlite_get_db_status,
sqlite_get_status,
TableFunction,
ZeroBlob,
)
CYTHON_SQLITE_EXTENSIONS = True
except ImportError:
CYTHON_SQLITE_EXTENSIONS = False
if sys.version_info[0] == 3:
basestring = str
FTS3_MATCHINFO = 'pcx'
FTS4_MATCHINFO = 'pcnalx'
if sqlite3 is not None:
FTS_VERSION = 4 if sqlite3.sqlite_version_info[:3] >= (3, 7, 4) else 3
else:
FTS_VERSION = 3
FTS5_MIN_SQLITE_VERSION = (3, 9, 0)
class RowIDField(AutoField):
auto_increment = True
column_name = name = required_name = 'rowid'
def bind(self, model, name, *args):
if name != self.required_name:
raise ValueError('%s must be named "%s".' %
(type(self), self.required_name))
super(RowIDField, self).bind(model, name, *args)
class DocIDField(RowIDField):
column_name = name = required_name = 'docid'
class AutoIncrementField(AutoField):
def ddl(self, ctx):
node_list = super(AutoIncrementField, self).ddl(ctx)
return NodeList((node_list, SQL('AUTOINCREMENT')))
class JSONPath(ColumnBase):
def __init__(self, field, path=None):
super(JSONPath, self).__init__()
self._field = field
self._path = path or ()
@property
def path(self):
return Value('$%s' % ''.join(self._path))
def __getitem__(self, idx):
if isinstance(idx, int):
item = '[%s]' % idx
else:
item = '.%s' % idx
return JSONPath(self._field, self._path + (item,))
def set(self, value, as_json=None):
if as_json or isinstance(value, (list, dict)):
value = fn.json(self._field._json_dumps(value))
return fn.json_set(self._field, self.path, value)
def update(self, value):
return self.set(fn.json_patch(self, self._field._json_dumps(value)))
def remove(self):
return fn.json_remove(self._field, self.path)
def json_type(self):
return fn.json_type(self._field, self.path)
def length(self):
return fn.json_array_length(self._field, self.path)
def children(self):
return fn.json_each(self._field, self.path)
def tree(self):
return fn.json_tree(self._field, self.path)
def __sql__(self, ctx):
return ctx.sql(fn.json_extract(self._field, self.path)
if self._path else self._field)
class JSONField(TextField):
field_type = 'JSON'
def __init__(self, json_dumps=None, json_loads=None, **kwargs):
self._json_dumps = json_dumps or json.dumps
self._json_loads = json_loads or json.loads
super(JSONField, self).__init__(**kwargs)
def python_value(self, value):
if value is not None:
try:
return self._json_loads(value)
except (TypeError, ValueError):
return value
def db_value(self, value):
if value is not None:
if not isinstance(value, Node):
value = fn.json(self._json_dumps(value))
return value
def _e(op):
def inner(self, rhs):
if isinstance(rhs, (list, dict)):
rhs = Value(rhs, converter=self.db_value, unpack=False)
return Expression(self, op, rhs)
return inner
__eq__ = _e(OP.EQ)
__ne__ = _e(OP.NE)
__gt__ = _e(OP.GT)
__ge__ = _e(OP.GTE)
__lt__ = _e(OP.LT)
__le__ = _e(OP.LTE)
__hash__ = Field.__hash__
def __getitem__(self, item):
return JSONPath(self)[item]
def set(self, value, as_json=None):
return JSONPath(self).set(value, as_json)
def update(self, data):
return JSONPath(self).update(data)
def remove(self):
return JSONPath(self).remove()
def json_type(self):
return fn.json_type(self)
def length(self):
return fn.json_array_length(self)
def children(self):
"""
Schema of `json_each` and `json_tree`:
key,
value,
type TEXT (object, array, string, etc),
atom (value for primitive/scalar types, NULL for array and object)
id INTEGER (unique identifier for element)
parent INTEGER (unique identifier of parent element or NULL)
fullkey TEXT (full path describing element)
path TEXT (path to the container of the current element)
json JSON hidden (1st input parameter to function)
root TEXT hidden (2nd input parameter, path at which to start)
"""
return fn.json_each(self)
def tree(self):
return fn.json_tree(self)
class SearchField(Field):
def __init__(self, unindexed=False, column_name=None, **k):
if k:
raise ValueError('SearchField does not accept these keyword '
'arguments: %s.' % sorted(k))
super(SearchField, self).__init__(unindexed=unindexed,
column_name=column_name, null=True)
def match(self, term):
return match(self, term)
class VirtualTableSchemaManager(SchemaManager):
def _create_virtual_table(self, safe=True, **options):
options = self.model.clean_options(
merge_dict(self.model._meta.options, options))
# Structure:
# CREATE VIRTUAL TABLE <model>
# USING <extension_module>
# ([prefix_arguments, ...] fields, ... [arguments, ...], [options...])
ctx = self._create_context()
ctx.literal('CREATE VIRTUAL TABLE ')
if safe:
ctx.literal('IF NOT EXISTS ')
(ctx
.sql(self.model)
.literal(' USING '))
ext_module = self.model._meta.extension_module
if isinstance(ext_module, Node):
return ctx.sql(ext_module)
ctx.sql(SQL(ext_module)).literal(' ')
arguments = []
meta = self.model._meta
if meta.prefix_arguments:
arguments.extend([SQL(a) for a in meta.prefix_arguments])
# Constraints, data-types, foreign and primary keys are all omitted.
for field in meta.sorted_fields:
if isinstance(field, (RowIDField)) or field._hidden:
continue
field_def = [Entity(field.column_name)]
if field.unindexed:
field_def.append(SQL('UNINDEXED'))
arguments.append(NodeList(field_def))
if meta.arguments:
arguments.extend([SQL(a) for a in meta.arguments])
if options:
arguments.extend(self._create_table_option_sql(options))
return ctx.sql(EnclosedNodeList(arguments))
def _create_table(self, safe=True, **options):
if issubclass(self.model, VirtualModel):
return self._create_virtual_table(safe, **options)
return super(VirtualTableSchemaManager, self)._create_table(
safe, **options)
class VirtualModel(Model):
class Meta:
arguments = None
extension_module = None
prefix_arguments = None
primary_key = False
schema_manager_class = VirtualTableSchemaManager
@classmethod
def clean_options(cls, options):
return options
class BaseFTSModel(VirtualModel):
@classmethod
def clean_options(cls, options):
content = options.get('content')
prefix = options.get('prefix')
tokenize = options.get('tokenize')
if isinstance(content, basestring) and content == '':
# Special-case content-less full-text search tables.
options['content'] = "''"
elif isinstance(content, Field):
# Special-case to ensure fields are fully-qualified.
options['content'] = Entity(content.model._meta.table_name,
content.column_name)
if prefix:
if isinstance(prefix, (list, tuple)):
prefix = ','.join([str(i) for i in prefix])
options['prefix'] = "'%s'" % prefix.strip("' ")
if tokenize and cls._meta.extension_module.lower() == 'fts5':
# Tokenizers need to be in quoted string for FTS5, but not for FTS3
# or FTS4.
options['tokenize'] = '"%s"' % tokenize
return options
class FTSModel(BaseFTSModel):
"""
VirtualModel class for creating tables that use either the FTS3 or FTS4
search extensions. Peewee automatically determines which version of the
FTS extension is supported and will use FTS4 if possible.
"""
# FTS3/4 uses "docid" in the same way a normal table uses "rowid".
docid = DocIDField()
class Meta:
extension_module = 'FTS%s' % FTS_VERSION
@classmethod
def _fts_cmd(cls, cmd):
tbl = cls._meta.table_name
res = cls._meta.database.execute_sql(
"INSERT INTO %s(%s) VALUES('%s');" % (tbl, tbl, cmd))
return res.fetchone()
@classmethod
def optimize(cls):
return cls._fts_cmd('optimize')
@classmethod
def rebuild(cls):
return cls._fts_cmd('rebuild')
@classmethod
def integrity_check(cls):
return cls._fts_cmd('integrity-check')
@classmethod
def merge(cls, blocks=200, segments=8):
return cls._fts_cmd('merge=%s,%s' % (blocks, segments))
@classmethod
def automerge(cls, state=True):
return cls._fts_cmd('automerge=%s' % (state and '1' or '0'))
@classmethod
def match(cls, term):
"""
Generate a `MATCH` expression appropriate for searching this table.
"""
return match(cls._meta.entity, term)
@classmethod
def rank(cls, *weights):
matchinfo = fn.matchinfo(cls._meta.entity, FTS3_MATCHINFO)
return fn.fts_rank(matchinfo, *weights)
@classmethod
def bm25(cls, *weights):
match_info = fn.matchinfo(cls._meta.entity, FTS4_MATCHINFO)
return fn.fts_bm25(match_info, *weights)
@classmethod
def bm25f(cls, *weights):
match_info = fn.matchinfo(cls._meta.entity, FTS4_MATCHINFO)
return fn.fts_bm25f(match_info, *weights)
@classmethod
def lucene(cls, *weights):
match_info = fn.matchinfo(cls._meta.entity, FTS4_MATCHINFO)
return fn.fts_lucene(match_info, *weights)
@classmethod
def _search(cls, term, weights, with_score, score_alias, score_fn,
explicit_ordering):
if not weights:
rank = score_fn()
elif isinstance(weights, dict):
weight_args = []
for field in cls._meta.sorted_fields:
# Attempt to get the specified weight of the field by looking
# it up using it's field instance followed by name.
field_weight = weights.get(field, weights.get(field.name, 1.0))
weight_args.append(field_weight)
rank = score_fn(*weight_args)
else:
rank = score_fn(*weights)
selection = ()
order_by = rank
if with_score:
selection = (cls, rank.alias(score_alias))
if with_score and not explicit_ordering:
order_by = SQL(score_alias)
return (cls
.select(*selection)
.where(cls.match(term))
.order_by(order_by))
@classmethod
def search(cls, term, weights=None, with_score=False, score_alias='score',
explicit_ordering=False):
"""Full-text search using selected `term`."""
return cls._search(
term,
weights,
with_score,
score_alias,
cls.rank,
explicit_ordering)
@classmethod
def search_bm25(cls, term, weights=None, with_score=False,
score_alias='score', explicit_ordering=False):
"""Full-text search for selected `term` using BM25 algorithm."""
return cls._search(
term,
weights,
with_score,
score_alias,
cls.bm25,
explicit_ordering)
@classmethod
def search_bm25f(cls, term, weights=None, with_score=False,
score_alias='score', explicit_ordering=False):
"""Full-text search for selected `term` using BM25 algorithm."""
return cls._search(
term,
weights,
with_score,
score_alias,
cls.bm25f,
explicit_ordering)
@classmethod
def search_lucene(cls, term, weights=None, with_score=False,
score_alias='score', explicit_ordering=False):
"""Full-text search for selected `term` using BM25 algorithm."""
return cls._search(
term,
weights,
with_score,
score_alias,
cls.lucene,
explicit_ordering)
_alphabet = 'abcdefghijklmnopqrstuvwxyz'
_alphanum = (set('\t ,"(){}*:_+0123456789') |
set(_alphabet) |
set(_alphabet.upper()) |
set((chr(26),)))
_invalid_ascii = set(chr(p) for p in range(128) if chr(p) not in _alphanum)
_quote_re = re.compile('(?:[^\s"]|"(?:\\.|[^"])*")+')
class FTS5Model(BaseFTSModel):
"""
Requires SQLite >= 3.9.0.
Table options:
content: table name of external content, or empty string for "contentless"
content_rowid: column name of external content primary key
prefix: integer(s). Ex: '2' or '2 3 4'
tokenize: porter, unicode61, ascii. Ex: 'porter unicode61'
The unicode tokenizer supports the following parameters:
* remove_diacritics (1 or 0, default is 1)
* tokenchars (string of characters, e.g. '-_'
* separators (string of characters)
Parameters are passed as alternating parameter name and value, so:
{'tokenize': "unicode61 remove_diacritics 0 tokenchars '-_'"}
Content-less tables:
If you don't need the full-text content in it's original form, you can
specify a content-less table. Searches and auxiliary functions will work
as usual, but the only values returned when SELECT-ing can be rowid. Also
content-less tables do not support UPDATE or DELETE.
External content tables:
You can set up triggers to sync these, e.g.
-- Create a table. And an external content fts5 table to index it.
CREATE TABLE tbl(a INTEGER PRIMARY KEY, b);
CREATE VIRTUAL TABLE ft USING fts5(b, content='tbl', content_rowid='a');
-- Triggers to keep the FTS index up to date.
CREATE TRIGGER tbl_ai AFTER INSERT ON tbl BEGIN
INSERT INTO ft(rowid, b) VALUES (new.a, new.b);
END;
CREATE TRIGGER tbl_ad AFTER DELETE ON tbl BEGIN
INSERT INTO ft(fts_idx, rowid, b) VALUES('delete', old.a, old.b);
END;
CREATE TRIGGER tbl_au AFTER UPDATE ON tbl BEGIN
INSERT INTO ft(fts_idx, rowid, b) VALUES('delete', old.a, old.b);
INSERT INTO ft(rowid, b) VALUES (new.a, new.b);
END;
Built-in auxiliary functions:
* bm25(tbl[, weight_0, ... weight_n])
* highlight(tbl, col_idx, prefix, suffix)
* snippet(tbl, col_idx, prefix, suffix, ?, max_tokens)
"""
# FTS5 does not support declared primary keys, but we can use the
# implicit rowid.
rowid = RowIDField()
class Meta:
extension_module = 'fts5'
_error_messages = {
'field_type': ('Besides the implicit `rowid` column, all columns must '
'be instances of SearchField'),
'index': 'Secondary indexes are not supported for FTS5 models',
'pk': 'FTS5 models must use the default `rowid` primary key',
}
@classmethod
def validate_model(cls):
# Perform FTS5-specific validation and options post-processing.
if cls._meta.primary_key.name != 'rowid':
raise ImproperlyConfigured(cls._error_messages['pk'])
for field in cls._meta.fields.values():
if not isinstance(field, (SearchField, RowIDField)):
raise ImproperlyConfigured(cls._error_messages['field_type'])
if cls._meta.indexes:
raise ImproperlyConfigured(cls._error_messages['index'])
@classmethod
def fts5_installed(cls):
if sqlite3.sqlite_version_info[:3] < FTS5_MIN_SQLITE_VERSION:
return False
# Test in-memory DB to determine if the FTS5 extension is installed.
tmp_db = sqlite3.connect(':memory:')
try:
tmp_db.execute('CREATE VIRTUAL TABLE fts5test USING fts5 (data);')
except:
try:
tmp_db.enable_load_extension(True)
tmp_db.load_extension('fts5')
except:
return False
else:
cls._meta.database.load_extension('fts5')
finally:
tmp_db.close()
return True
@staticmethod
def validate_query(query):
"""
Simple helper function to indicate whether a search query is a
valid FTS5 query. Note: this simply looks at the characters being
used, and is not guaranteed to catch all problematic queries.
"""
tokens = _quote_re.findall(query)
for token in tokens:
if token.startswith('"') and token.endswith('"'):
continue
if set(token) & _invalid_ascii:
return False
return True
@staticmethod
def clean_query(query, replace=chr(26)):
"""
Clean a query of invalid tokens.
"""
accum = []
any_invalid = False
tokens = _quote_re.findall(query)
for token in tokens:
if token.startswith('"') and token.endswith('"'):
accum.append(token)
continue
token_set = set(token)
invalid_for_token = token_set & _invalid_ascii
if invalid_for_token:
any_invalid = True
for c in invalid_for_token:
token = token.replace(c, replace)
accum.append(token)
if any_invalid:
return ' '.join(accum)
return query
@classmethod
def match(cls, term):
"""
Generate a `MATCH` expression appropriate for searching this table.
"""
return match(cls._meta.entity, term)
@classmethod
def rank(cls, *args):
return cls.bm25(*args) if args else SQL('rank')
@classmethod
def bm25(cls, *weights):
return fn.bm25(cls._meta.entity, *weights)
@classmethod
def search(cls, term, weights=None, with_score=False, score_alias='score',
explicit_ordering=False):
"""Full-text search using selected `term`."""
return cls.search_bm25(
FTS5Model.clean_query(term),
weights,
with_score,
score_alias,
explicit_ordering)
@classmethod
def search_bm25(cls, term, weights=None, with_score=False,
score_alias='score', explicit_ordering=False):
"""Full-text search using selected `term`."""
if not weights:
rank = SQL('rank')
elif isinstance(weights, dict):
weight_args = []
for field in cls._meta.sorted_fields:
if isinstance(field, SearchField) and not field.unindexed:
weight_args.append(
weights.get(field, weights.get(field.name, 1.0)))
rank = fn.bm25(cls._meta.entity, *weight_args)
else:
rank = fn.bm25(cls._meta.entity, *weights)
selection = ()
order_by = rank
if with_score:
selection = (cls, rank.alias(score_alias))
if with_score and not explicit_ordering:
order_by = SQL(score_alias)
return (cls
.select(*selection)
.where(cls.match(FTS5Model.clean_query(term)))
.order_by(order_by))
@classmethod
def _fts_cmd_sql(cls, cmd, **extra_params):
tbl = cls._meta.entity
columns = [tbl]
values = [cmd]
for key, value in extra_params.items():
columns.append(Entity(key))
values.append(value)
return NodeList((
SQL('INSERT INTO'),
cls._meta.entity,
EnclosedNodeList(columns),
SQL('VALUES'),
EnclosedNodeList(values)))
@classmethod
def _fts_cmd(cls, cmd, **extra_params):
query = cls._fts_cmd_sql(cmd, **extra_params)
return cls._meta.database.execute(query)
@classmethod
def automerge(cls, level):
if not (0 <= level <= 16):
raise ValueError('level must be between 0 and 16')
return cls._fts_cmd('automerge', rank=level)
@classmethod
def merge(cls, npages):
return cls._fts_cmd('merge', rank=npages)
@classmethod
def set_pgsz(cls, pgsz):
return cls._fts_cmd('pgsz', rank=pgsz)
@classmethod
def set_rank(cls, rank_expression):
return cls._fts_cmd('rank', rank=rank_expression)
@classmethod
def delete_all(cls):
return cls._fts_cmd('delete-all')
@classmethod
def VocabModel(cls, table_type='row', table=None):
if table_type not in ('row', 'col', 'instance'):
raise ValueError('table_type must be either "row", "col" or '
'"instance".')
attr = '_vocab_model_%s' % table_type
if not hasattr(cls, attr):
class Meta:
database = cls._meta.database
table_name = table or cls._meta.table_name + '_v'
extension_module = fn.fts5vocab(
cls._meta.entity,
SQL(table_type))
attrs = {
'term': VirtualField(TextField),
'doc': IntegerField(),
'cnt': IntegerField(),
'rowid': RowIDField(),
'Meta': Meta,
}
if table_type == 'col':
attrs['col'] = VirtualField(TextField)
elif table_type == 'instance':
attrs['offset'] = VirtualField(IntegerField)
class_name = '%sVocab' % cls.__name__
setattr(cls, attr, type(class_name, (VirtualModel,), attrs))
return getattr(cls, attr)
def ClosureTable(model_class, foreign_key=None, referencing_class=None,
referencing_key=None):
"""Model factory for the transitive closure extension."""
if referencing_class is None:
referencing_class = model_class
if foreign_key is None:
for field_obj in model_class._meta.refs:
if field_obj.rel_model is model_class:
foreign_key = field_obj
break
else:
raise ValueError('Unable to find self-referential foreign key.')
source_key = model_class._meta.primary_key
if referencing_key is None:
referencing_key = source_key
class BaseClosureTable(VirtualModel):
depth = VirtualField(IntegerField)
id = VirtualField(IntegerField)
idcolumn = VirtualField(TextField)
parentcolumn = VirtualField(TextField)
root = VirtualField(IntegerField)
tablename = VirtualField(TextField)
class Meta:
extension_module = 'transitive_closure'
@classmethod
def descendants(cls, node, depth=None, include_node=False):
query = (model_class
.select(model_class, cls.depth.alias('depth'))
.join(cls, on=(source_key == cls.id))
.where(cls.root == node)
.objects())
if depth is not None:
query = query.where(cls.depth == depth)
elif not include_node:
query = query.where(cls.depth > 0)
return query
@classmethod
def ancestors(cls, node, depth=None, include_node=False):
query = (model_class
.select(model_class, cls.depth.alias('depth'))
.join(cls, on=(source_key == cls.root))
.where(cls.id == node)
.objects())
if depth:
query = query.where(cls.depth == depth)
elif not include_node:
query = query.where(cls.depth > 0)
return query
@classmethod
def siblings(cls, node, include_node=False):
if referencing_class is model_class:
# self-join
fk_value = node.__data__.get(foreign_key.name)
query = model_class.select().where(foreign_key == fk_value)
else:
# siblings as given in reference_class
siblings = (referencing_class
.select(referencing_key)
.join(cls, on=(foreign_key == cls.root))
.where((cls.id == node) & (cls.depth == 1)))
# the according models
query = (model_class
.select()
.where(source_key << siblings)
.objects())
if not include_node:
query = query.where(source_key != node)
return query
class Meta:
database = referencing_class._meta.database
options = {
'tablename': referencing_class._meta.table_name,
'idcolumn': referencing_key.column_name,
'parentcolumn': foreign_key.column_name}
primary_key = False
name = '%sClosure' % model_class.__name__
return type(name, (BaseClosureTable,), {'Meta': Meta})
class LSMTable(VirtualModel):
class Meta:
extension_module = 'lsm1'
filename = None
@classmethod
def clean_options(cls, options):
filename = cls._meta.filename
if not filename:
raise ValueError('LSM1 extension requires that you specify a '
'filename for the LSM database.')
else:
if len(filename) >= 2 and filename[0] != '"':
filename = '"%s"' % filename
if not cls._meta.primary_key:
raise ValueError('LSM1 models must specify a primary-key field.')
key = cls._meta.primary_key
if isinstance(key, AutoField):
raise ValueError('LSM1 models must explicitly declare a primary '
'key field.')
if not isinstance(key, (TextField, BlobField, IntegerField)):
raise ValueError('LSM1 key must be a TextField, BlobField, or '
'IntegerField.')
key._hidden = True
if isinstance(key, IntegerField):
data_type = 'UINT'
elif isinstance(key, BlobField):
data_type = 'BLOB'
else:
data_type = 'TEXT'
cls._meta.prefix_arguments = [filename, '"%s"' % key.name, data_type]
# Does the key map to a scalar value, or a tuple of values?
if len(cls._meta.sorted_fields) == 2:
cls._meta._value_field = cls._meta.sorted_fields[1]
else:
cls._meta._value_field = None
return options
@classmethod
def load_extension(cls, path='lsm.so'):
cls._meta.database.load_extension(path)
@staticmethod
def slice_to_expr(key, idx):
if idx.start is not None and idx.stop is not None:
return key.between(idx.start, idx.stop)
elif idx.start is not None:
return key >= idx.start
elif idx.stop is not None:
return key <= idx.stop
@staticmethod
def _apply_lookup_to_query(query, key, lookup):
if isinstance(lookup, slice):
expr = LSMTable.slice_to_expr(key, lookup)
if expr is not None:
query = query.where(expr)
return query, False
elif isinstance(lookup, Expression):
return query.where(lookup), False
else:
return query.where(key == lookup), True
@classmethod
def get_by_id(cls, pk):
query, is_single = cls._apply_lookup_to_query(
cls.select().namedtuples(),
cls._meta.primary_key,
pk)
if is_single:
try:
row = query.get()
except cls.DoesNotExist:
raise KeyError(pk)
return row[1] if cls._meta._value_field is not None else row
else:
return query
@classmethod
def set_by_id(cls, key, value):
if cls._meta._value_field is not None:
data = {cls._meta._value_field: value}
elif isinstance(value, tuple):
data = {}
for field, fval in zip(cls._meta.sorted_fields[1:], value):
data[field] = fval
elif isinstance(value, dict):
data = value
elif isinstance(value, cls):
data = value.__dict__
data[cls._meta.primary_key] = key
cls.replace(data).execute()
@classmethod
def delete_by_id(cls, pk):
query, is_single = cls._apply_lookup_to_query(
cls.delete(),
cls._meta.primary_key,
pk)
return query.execute()
OP.MATCH = 'MATCH'
def _sqlite_regexp(regex, value):
return re.search(regex, value) is not None
class SqliteExtDatabase(SqliteDatabase):
def __init__(self, database, c_extensions=None, rank_functions=True,
hash_functions=False, regexp_function=False,
bloomfilter=False, json_contains=False, *args, **kwargs):
super(SqliteExtDatabase, self).__init__(database, *args, **kwargs)
self._row_factory = None
if c_extensions and not CYTHON_SQLITE_EXTENSIONS:
raise ImproperlyConfigured('SqliteExtDatabase initialized with '
'C extensions, but shared library was '
'not found!')
prefer_c = CYTHON_SQLITE_EXTENSIONS and (c_extensions is not False)
if rank_functions:
if prefer_c:
register_rank_functions(self)
else:
self.register_function(bm25, 'fts_bm25')
self.register_function(rank, 'fts_rank')
self.register_function(bm25, 'fts_bm25f') # Fall back to bm25.
self.register_function(bm25, 'fts_lucene')
if hash_functions:
if not prefer_c:
raise ValueError('C extension required to register hash '
'functions.')
register_hash_functions(self)
if regexp_function:
self.register_function(_sqlite_regexp, 'regexp', 2)
if bloomfilter:
if not prefer_c:
raise ValueError('C extension required to use bloomfilter.')
register_bloomfilter(self)
if json_contains:
self.register_function(_json_contains, 'json_contains')
self._c_extensions = prefer_c
def _add_conn_hooks(self, conn):
super(SqliteExtDatabase, self)._add_conn_hooks(conn)
if self._row_factory:
conn.row_factory = self._row_factory
def row_factory(self, fn):
self._row_factory = fn
if CYTHON_SQLITE_EXTENSIONS:
SQLITE_STATUS_MEMORY_USED = 0
SQLITE_STATUS_PAGECACHE_USED = 1
SQLITE_STATUS_PAGECACHE_OVERFLOW = 2
SQLITE_STATUS_SCRATCH_USED = 3
SQLITE_STATUS_SCRATCH_OVERFLOW = 4
SQLITE_STATUS_MALLOC_SIZE = 5
SQLITE_STATUS_PARSER_STACK = 6
SQLITE_STATUS_PAGECACHE_SIZE = 7
SQLITE_STATUS_SCRATCH_SIZE = 8
SQLITE_STATUS_MALLOC_COUNT = 9
SQLITE_DBSTATUS_LOOKASIDE_USED = 0
SQLITE_DBSTATUS_CACHE_USED = 1
SQLITE_DBSTATUS_SCHEMA_USED = 2
SQLITE_DBSTATUS_STMT_USED = 3
SQLITE_DBSTATUS_LOOKASIDE_HIT = 4
SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE = 5
SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL = 6
SQLITE_DBSTATUS_CACHE_HIT = 7
SQLITE_DBSTATUS_CACHE_MISS = 8
SQLITE_DBSTATUS_CACHE_WRITE = 9
SQLITE_DBSTATUS_DEFERRED_FKS = 10
#SQLITE_DBSTATUS_CACHE_USED_SHARED = 11
def __status__(flag, return_highwater=False):
"""
Expose a sqlite3_status() call for a particular flag as a property of
the Database object.
"""
def getter(self):
result = sqlite_get_status(flag)
return result[1] if return_highwater else result
return property(getter)
def __dbstatus__(flag, return_highwater=False, return_current=False):
"""
Expose a sqlite3_dbstatus() call for a particular flag as a property of
the Database instance. Unlike sqlite3_status(), the dbstatus properties
pertain to the current connection.
"""
def getter(self):
if self._state.conn is None:
raise ImproperlyConfigured('database connection not opened.')
result = sqlite_get_db_status(self._state.conn, flag)
if return_current:
return result[0]
return result[1] if return_highwater else result
return property(getter)
class CSqliteExtDatabase(SqliteExtDatabase):
def __init__(self, *args, **kwargs):
self._conn_helper = None
self._commit_hook = self._rollback_hook = self._update_hook = None
self._replace_busy_handler = False
super(CSqliteExtDatabase, self).__init__(*args, **kwargs)
def init(self, database, replace_busy_handler=False, **kwargs):
super(CSqliteExtDatabase, self).init(database, **kwargs)
self._replace_busy_handler = replace_busy_handler
def _close(self, conn):
if self._commit_hook:
self._conn_helper.set_commit_hook(None)
if self._rollback_hook:
self._conn_helper.set_rollback_hook(None)
if self._update_hook:
self._conn_helper.set_update_hook(None)
return super(CSqliteExtDatabase, self)._close(conn)
def _add_conn_hooks(self, conn):
super(CSqliteExtDatabase, self)._add_conn_hooks(conn)
self._conn_helper = ConnectionHelper(conn)
if self._commit_hook is not None:
self._conn_helper.set_commit_hook(self._commit_hook)
if self._rollback_hook is not None:
self._conn_helper.set_rollback_hook(self._rollback_hook)
if self._update_hook is not None:
self._conn_helper.set_update_hook(self._update_hook)
if self._replace_busy_handler:
timeout = self._timeout or 5
self._conn_helper.set_busy_handler(timeout * 1000)
def on_commit(self, fn):
self._commit_hook = fn
if not self.is_closed():
self._conn_helper.set_commit_hook(fn)
return fn
def on_rollback(self, fn):
self._rollback_hook = fn
if not self.is_closed():
self._conn_helper.set_rollback_hook(fn)
return fn
def on_update(self, fn):
self._update_hook = fn
if not self.is_closed():
self._conn_helper.set_update_hook(fn)
return fn
def changes(self):
return self._conn_helper.changes()
@property
def last_insert_rowid(self):
return self._conn_helper.last_insert_rowid()
@property
def autocommit(self):
return self._conn_helper.autocommit()
def backup(self, destination, pages=None, name=None, progress=None):
return backup(self.connection(), destination.connection(),
pages=pages, name=name, progress=progress)
def backup_to_file(self, filename, pages=None, name=None,
progress=None):
return backup_to_file(self.connection(), filename, pages=pages,
name=name, progress=progress)
def blob_open(self, table, column, rowid, read_only=False):
return Blob(self, table, column, rowid, read_only)
# Status properties.
memory_used = __status__(SQLITE_STATUS_MEMORY_USED)
malloc_size = __status__(SQLITE_STATUS_MALLOC_SIZE, True)
malloc_count = __status__(SQLITE_STATUS_MALLOC_COUNT)
pagecache_used = __status__(SQLITE_STATUS_PAGECACHE_USED)
pagecache_overflow = __status__(SQLITE_STATUS_PAGECACHE_OVERFLOW)
pagecache_size = __status__(SQLITE_STATUS_PAGECACHE_SIZE, True)
scratch_used = __status__(SQLITE_STATUS_SCRATCH_USED)
scratch_overflow = __status__(SQLITE_STATUS_SCRATCH_OVERFLOW)
scratch_size = __status__(SQLITE_STATUS_SCRATCH_SIZE, True)
# Connection status properties.
lookaside_used = __dbstatus__(SQLITE_DBSTATUS_LOOKASIDE_USED)
lookaside_hit = __dbstatus__(SQLITE_DBSTATUS_LOOKASIDE_HIT, True)
lookaside_miss = __dbstatus__(SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE,
True)
lookaside_miss_full = __dbstatus__(SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL,
True)
cache_used = __dbstatus__(SQLITE_DBSTATUS_CACHE_USED, False, True)
#cache_used_shared = __dbstatus__(SQLITE_DBSTATUS_CACHE_USED_SHARED,
# False, True)
schema_used = __dbstatus__(SQLITE_DBSTATUS_SCHEMA_USED, False, True)
statement_used = __dbstatus__(SQLITE_DBSTATUS_STMT_USED, False, True)
cache_hit = __dbstatus__(SQLITE_DBSTATUS_CACHE_HIT, False, True)
cache_miss = __dbstatus__(SQLITE_DBSTATUS_CACHE_MISS, False, True)
cache_write = __dbstatus__(SQLITE_DBSTATUS_CACHE_WRITE, False, True)
def match(lhs, rhs):
return Expression(lhs, OP.MATCH, rhs)
def _parse_match_info(buf):
# See http://sqlite.org/fts3.html#matchinfo
bufsize = len(buf) # Length in bytes.
return [struct.unpack('@I', buf[i:i+4])[0] for i in range(0, bufsize, 4)]
def get_weights(ncol, raw_weights):
if not raw_weights:
return [1] * ncol
else:
weights = [0] * ncol
for i, weight in enumerate(raw_weights):
weights[i] = weight
return weights
# Ranking implementation, which parse matchinfo.
def rank(raw_match_info, *raw_weights):
# Handle match_info called w/default args 'pcx' - based on the example rank
# function http://sqlite.org/fts3.html#appendix_a
match_info = _parse_match_info(raw_match_info)
score = 0.0
p, c = match_info[:2]
weights = get_weights(c, raw_weights)
# matchinfo X value corresponds to, for each phrase in the search query, a
# list of 3 values for each column in the search table.
# So if we have a two-phrase search query and three columns of data, the
# following would be the layout:
# p0 : c0=[0, 1, 2], c1=[3, 4, 5], c2=[6, 7, 8]
# p1 : c0=[9, 10, 11], c1=[12, 13, 14], c2=[15, 16, 17]
for phrase_num in range(p):
phrase_info_idx = 2 + (phrase_num * c * 3)
for col_num in range(c):
weight = weights[col_num]
if not weight:
continue
col_idx = phrase_info_idx + (col_num * 3)
# The idea is that we count the number of times the phrase appears
# in this column of the current row, compared to how many times it
# appears in this column across all rows. The ratio of these values
# provides a rough way to score based on "high value" terms.
row_hits = match_info[col_idx]
all_rows_hits = match_info[col_idx + 1]
if row_hits > 0:
score += weight * (float(row_hits) / all_rows_hits)
return -score
# Okapi BM25 ranking implementation (FTS4 only).
def bm25(raw_match_info, *args):
"""
Usage:
# Format string *must* be pcnalx
# Second parameter to bm25 specifies the index of the column, on
# the table being queries.
bm25(matchinfo(document_tbl, 'pcnalx'), 1) AS rank
"""
match_info = _parse_match_info(raw_match_info)
K = 1.2
B = 0.75
score = 0.0
P_O, C_O, N_O, A_O = range(4) # Offsets into the matchinfo buffer.
term_count = match_info[P_O] # n
col_count = match_info[C_O]
total_docs = match_info[N_O] # N
L_O = A_O + col_count
X_O = L_O + col_count
weights = get_weights(col_count, args)
for i in range(term_count):
for j in range(col_count):
weight = weights[j]
if weight == 0:
continue
x = X_O + (3 * (j + i * col_count))
term_frequency = float(match_info[x]) # f(qi, D)
docs_with_term = float(match_info[x + 2]) # n(qi)
# log( (N - n(qi) + 0.5) / (n(qi) + 0.5) )
idf = math.log(
(total_docs - docs_with_term + 0.5) /
(docs_with_term + 0.5))
if idf <= 0.0:
idf = 1e-6
doc_length = float(match_info[L_O + j]) # |D|
avg_length = float(match_info[A_O + j]) or 1. # avgdl
ratio = doc_length / avg_length
num = term_frequency * (K + 1)
b_part = 1 - B + (B * ratio)
denom = term_frequency + (K * b_part)
pc_score = idf * (num / denom)
score += (pc_score * weight)
return -score
def _json_contains(src_json, obj_json):
stack = []
try:
stack.append((json.loads(obj_json), json.loads(src_json)))
except:
# Invalid JSON!
return False
while stack:
obj, src = stack.pop()
if isinstance(src, dict):
if isinstance(obj, dict):
for key in obj:
if key not in src:
return False
stack.append((obj[key], src[key]))
elif isinstance(obj, list):
for item in obj:
if item not in src:
return False
elif obj not in src:
return False
elif isinstance(src, list):
if isinstance(obj, dict):
return False
elif isinstance(obj, list):
try:
for i in range(len(obj)):
stack.append((obj[i], src[i]))
except IndexError:
return False
elif obj not in src:
return False
elif obj != src:
return False
return True
| |
# Copyright (c) 2012-2014 Tycho Andersen
# Copyright (c) 2013 xarvh
# Copyright (c) 2013 roger
# Copyright (c) 2013 Tao Sauvage
# Copyright (c) 2014 ramnes
# Copyright (c) 2014 Sean Vig
# Copyright (c) 2014 dequis
# Copyright (c) 2015 Dario Giovannetti
# Copyright (c) 2015 Alexander Lozovskoy
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import contextlib
import xcffib
import xcffib.xproto
from . import command
from . import hook
from . import window
from . import utils
from .log_utils import logger
class _Group(command.CommandObject):
"""A container for a bunch of windows
Analogous to workspaces in other window managers. Each client window
managed by the window manager belongs to exactly one group.
A group is identified by its name but displayed in GroupBox widget by its label.
"""
def __init__(self, name, layout=None, label=None):
self.name = name
self.label = name if label is None else label
self.custom_layout = layout # will be set on _configure
self.windows = set()
self.qtile = None
self.layouts = []
self.floating_layout = None
# self.focus_history lists the group's windows in the order they
# received focus, from the oldest (first item) to the currently
# focused window (last item); NB the list does *not* contain any
# windows that never received focus; refer to self.windows for the
# complete set
self.focus_history = []
self.screen = None
self.current_layout = None
def _configure(self, layouts, floating_layout, qtile):
self.screen = None
self.current_layout = 0
self.focus_history = []
self.windows = set()
self.qtile = qtile
self.layouts = [i.clone(self) for i in layouts]
self.floating_layout = floating_layout
if self.custom_layout is not None:
self.layout = self.custom_layout
self.custom_layout = None
@property
def current_window(self):
try:
return self.focus_history[-1]
except IndexError:
# no window has focus
return None
@current_window.setter
def current_window(self, win):
try:
self.focus_history.remove(win)
except ValueError:
# win has never received focus before
pass
self.focus_history.append(win)
def _remove_from_focus_history(self, win):
try:
index = self.focus_history.index(win)
except ValueError:
# win has never received focus
return False
else:
del self.focus_history[index]
# return True if win was the last item (i.e. it was current_window)
return index == len(self.focus_history)
@property
def layout(self):
return self.layouts[self.current_layout]
@layout.setter
def layout(self, layout):
"""
Parameters
==========
layout :
a string with matching the name of a Layout object.
"""
for index, obj in enumerate(self.layouts):
if obj.name == layout:
self.current_layout = index
hook.fire(
"layout_change",
self.layouts[self.current_layout],
self
)
self.layout_all()
return
raise ValueError("No such layout: %s" % layout)
def use_layout(self, index):
assert 0 <= index < len(self.layouts), "layout index out of bounds"
self.layout.hide()
self.current_layout = index
hook.fire("layout_change", self.layouts[self.current_layout], self)
self.layout_all()
screen = self.screen.get_rect()
self.layout.show(screen)
def use_next_layout(self):
self.use_layout((self.current_layout + 1) % (len(self.layouts)))
def use_previous_layout(self):
self.use_layout((self.current_layout - 1) % (len(self.layouts)))
def layout_all(self, warp=False):
"""Layout the floating layer, then the current layout.
If we have have a current_window give it focus, optionally moving warp
to it.
"""
if self.screen and len(self.windows):
with self.disable_mask(xcffib.xproto.EventMask.EnterWindow):
normal = [x for x in self.windows if not x.floating]
floating = [
x for x in self.windows
if x.floating and not x.minimized
]
screen = self.screen.get_rect()
if normal:
try:
self.layout.layout(normal, screen)
except: # noqa: E722
logger.exception("Exception in layout %s",
self.layout.name)
if floating:
self.floating_layout.layout(floating, screen)
if self.current_window and \
self.screen == self.qtile.current_screen:
self.current_window.focus(warp)
def _set_screen(self, screen):
"""Set this group's screen to new_screen"""
if screen == self.screen:
return
self.screen = screen
if self.screen:
# move all floating guys offset to new screen
self.floating_layout.to_screen(self, self.screen)
self.layout_all(warp=self.qtile.config.cursor_warp)
rect = self.screen.get_rect()
self.floating_layout.show(rect)
self.layout.show(rect)
else:
self.hide()
def hide(self):
self.screen = None
with self.disable_mask(xcffib.xproto.EventMask.EnterWindow |
xcffib.xproto.EventMask.FocusChange |
xcffib.xproto.EventMask.LeaveWindow):
for i in self.windows:
i.hide()
self.layout.hide()
@contextlib.contextmanager
def disable_mask(self, mask):
for i in self.windows:
i._disable_mask(mask)
yield
for i in self.windows:
i._reset_mask()
def focus(self, win, warp=True, force=False):
"""Focus the given window
If win is in the group, blur any windows and call ``focus`` on the
layout (in case it wants to track anything), fire focus_change hook and
invoke layout_all.
Parameters
==========
win :
Window to focus
warp :
Warp pointer to win. This should basically always be True, unless
the focus event is coming from something like EnterNotify, where
the user is actively using the mouse, or on full screen layouts
where only one window is "maximized" at a time, and it doesn't make
sense for the mouse to automatically move.
"""
if self.qtile._drag and not force:
# don't change focus while dragging windows (unless forced)
return
if win:
if win not in self.windows:
return
self.current_window = win
if win.floating:
for l in self.layouts:
l.blur()
self.floating_layout.focus(win)
else:
self.floating_layout.blur()
for l in self.layouts:
l.focus(win)
hook.fire("focus_change")
self.layout_all(warp)
def info(self):
return dict(
name=self.name,
label=self.label,
focus=self.current_window.name if self.current_window else None,
windows=[i.name for i in self.windows],
focus_history=[i.name for i in self.focus_history],
layout=self.layout.name,
layouts=[l.name for l in self.layouts],
floating_info=self.floating_layout.info(),
screen=self.screen.index if self.screen else None
)
def add(self, win, focus=True, force=False):
hook.fire("group_window_add")
self.windows.add(win)
win.group = self
try:
if 'fullscreen' in win.window.get_net_wm_state() and \
self.qtile.config.auto_fullscreen:
win._float_state = window.FULLSCREEN
elif self.floating_layout.match(win):
# !!! tell it to float, can't set floating
# because it's too early
# so just set the flag underneath
win._float_state = window.FLOATING
except (xcffib.xproto.WindowError, xcffib.xproto.AccessError):
pass # doesn't matter
if win.floating:
self.floating_layout.add(win)
else:
for i in self.layouts:
i.add(win)
if focus:
self.focus(win, warp=True, force=force)
def remove(self, win, force=False):
self.windows.remove(win)
hadfocus = self._remove_from_focus_history(win)
win.group = None
if win.floating:
nextfocus = self.floating_layout.remove(win)
nextfocus = nextfocus or \
self.current_window or \
self.layout.focus_first() or \
self.floating_layout.focus_first(group=self)
else:
for i in self.layouts:
if i is self.layout:
nextfocus = i.remove(win)
else:
i.remove(win)
nextfocus = nextfocus or \
self.floating_layout.focus_first(group=self) or \
self.current_window or \
self.layout.focus_first()
# a notification may not have focus
if hadfocus:
self.focus(nextfocus, warp=True, force=force)
# no next focus window means focus changed to nothing
if not nextfocus:
hook.fire("focus_change")
elif self.screen:
self.layout_all()
def mark_floating(self, win, floating):
if floating:
if win in self.floating_layout.find_clients(self):
# already floating
pass
else:
for i in self.layouts:
i.remove(win)
if win is self.current_window:
i.blur()
self.floating_layout.add(win)
if win is self.current_window:
self.floating_layout.focus(win)
else:
self.floating_layout.remove(win)
self.floating_layout.blur()
for i in self.layouts:
i.add(win)
if win is self.current_window:
i.focus(win)
self.layout_all()
def _items(self, name):
if name == "layout":
return (True, list(range(len(self.layouts))))
elif name == "window":
return (True, [i.window.wid for i in self.windows])
elif name == "screen":
return (True, None)
def _select(self, name, sel):
if name == "layout":
if sel is None:
return self.layout
else:
return utils.lget(self.layouts, sel)
elif name == "window":
if sel is None:
return self.current_window
else:
for i in self.windows:
if i.window.wid == sel:
return i
elif name == "screen":
return self.screen
def cmd_setlayout(self, layout):
self.layout = layout
def cmd_info(self):
"""Returns a dictionary of info for this group"""
return self.info()
def cmd_toscreen(self, screen=None):
"""Pull a group to a specified screen.
Parameters
==========
screen :
Screen offset. If not specified, we assume the current screen.
Examples
========
Pull group to the current screen::
toscreen()
Pull group to screen 0::
toscreen(0)
"""
if screen is None:
screen = self.qtile.current_screen
else:
screen = self.qtile.screens[screen]
screen.set_group(self)
def _get_group(self, direction, skip_empty=False, skip_managed=False):
"""Find a group walking the groups list in the specified direction
Parameters
==========
skip_empty :
skips the empty groups
skip_managed :
skips the groups that have a screen
"""
def match(group):
if group is self:
return True
if skip_empty and not group.windows:
return False
if skip_managed and group.screen:
return False
return True
groups = [group for group in self.qtile.groups if match(group)]
index = (groups.index(self) + direction) % len(groups)
return groups[index]
def get_previous_group(self, skip_empty=False, skip_managed=False):
return self._get_group(-1, skip_empty, skip_managed)
def get_next_group(self, skip_empty=False, skip_managed=False):
return self._get_group(1, skip_empty, skip_managed)
def cmd_unminimize_all(self):
"""Unminimise all windows in this group"""
for w in self.windows:
w.minimized = False
self.layout_all()
def cmd_next_window(self):
"""
Focus the next window in group.
Method cycles _all_ windows in group regardless if tiled in current
layout or floating. Cycling of tiled and floating windows is not mixed.
The cycling order depends on the current Layout.
"""
if not self.windows:
return
if self.current_window.floating:
nxt = self.floating_layout.focus_next(self.current_window) or \
self.layout.focus_first() or \
self.floating_layout.focus_first(group=self)
else:
nxt = self.layout.focus_next(self.current_window) or \
self.floating_layout.focus_first(group=self) or \
self.layout.focus_first()
self.focus(nxt, True)
def cmd_prev_window(self):
"""
Focus the previous window in group.
Method cycles _all_ windows in group regardless if tiled in current
layout or floating. Cycling of tiled and floating windows is not mixed.
The cycling order depends on the current Layout.
"""
if not self.windows:
return
if self.current_window.floating:
nxt = self.floating_layout.focus_previous(self.current_window) or \
self.layout.focus_last() or \
self.floating_layout.focus_last(group=self)
else:
nxt = self.layout.focus_previous(self.current_window) or \
self.floating_layout.focus_last(group=self) or \
self.layout.focus_last()
self.focus(nxt, True)
def cmd_focus_back(self):
"""
Focus the window that had focus before the current one got it.
Repeated calls to this function would basically continuously switch
between the last two focused windows. Do nothing if less than 2
windows ever received focus.
"""
try:
win = self.focus_history[-2]
except IndexError:
pass
else:
self.focus(win)
def cmd_focus_by_name(self, name):
"""
Focus the first window with the given name. Do nothing if the name is
not found.
"""
for win in self.windows:
if win.name == name:
self.focus(win)
break
def cmd_info_by_name(self, name):
"""
Get the info for the first window with the given name without giving it
focus. Do nothing if the name is not found.
"""
for win in self.windows:
if win.name == name:
return win.info()
def cmd_switch_groups(self, name):
"""Switch position of current group with name"""
self.qtile.cmd_switch_groups(self.name, name)
def cmd_set_label(self, label):
"""
Set the display name of current group to be used in GroupBox widget.
If label is None, the name of the group is used as display name.
If label is the empty string, the group is invisible in GroupBox.
"""
self.label = label if label is not None else self.name
hook.fire("changegroup")
def __repr__(self):
return "<group.Group (%r)>" % self.name
| |
# Copyright (c) 2013 Qubell Inc., http://qubell.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
from qubell.api.private.instance import InstanceList, Instance
from qubell.api.private.revision import RevisionList
from qubell.api.tools import lazyproperty, retry
__author__ = "Vasyl Khomenko"
__copyright__ = "Copyright 2013, Qubell.com"
__license__ = "Apache"
__email__ = "vkhomenko@qubell.com"
import logging as log
import simplejson as json
from qubell.api.private import exceptions
from qubell.api.private.common import QubellEntityList, Entity
from qubell.api.provider.router import InstanceRouter
class Application(Entity, InstanceRouter):
"""
Base class for applications. It should create application and services+environment requested
"""
# noinspection PyShadowingBuiltins
def __init__(self, organization, id):
self.organization = organization
self.organizationId = self.organization.organizationId
self.applicationId = self.id = id
self.launch = self.create_instance = functools.partial(organization.create_instance, application=self)
@lazyproperty
def instances(self):
return InstanceList(list_json_method=self.list_instances_json,
organization=self.organization).init_router(self._router)
@lazyproperty
def destroyed_instances(self):
return InstanceList(list_json_method=self.list_destroyed_instances_json,
organization=self.organization).init_router(self._router)
@lazyproperty
def revisions(self):
return RevisionList(list_json_method=self.list_revisions_json, application=self).init_router(self._router)
@property
def defaultEnvironment(self):
return self.organization.get_default_environment()
@property
def name(self):
return self.json()['name']
@staticmethod
def new(organization, name, manifest, router):
log.info("Creating application: %s" % name)
resp = router.post_organization_application(org_id=organization.organizationId,
files={'path': manifest.content},
data={'manifestSource': 'upload', 'name': name})
app = Application(organization, resp.json()['id']).init_router(router)
app.manifest = manifest
log.info("Application %s created (%s)" % (name, app.applicationId))
return app
def delete(self, clean=False):
log.info("Removing application: id=%s" % self.applicationId)
if clean:
self.clean()
self._router.delete_application(org_id=self.organizationId, app_id=self.applicationId)
return True
def update(self, **kwargs):
if kwargs.get('manifest'):
self.upload(kwargs.pop('manifest'))
log.info("Updating application: id=%s" % self.applicationId)
data = json.dumps(kwargs)
resp = self._router.put_application(org_id=self.organizationId, app_id=self.applicationId, data=data)
return resp.json()
def clean(self, timeout=None):
if not timeout:
timeout = [7, 1, 1.5]
instances = self.instances
log.info("Cleaning application: id=%s" % self.applicationId)
for ins in instances:
if ins.status not in ['Destroyed', 'Destroying']:
ins.destroy()
@retry(*timeout, retry_exception=AssertionError)
def eventually_clean():
# noinspection PyShadowingNames
for ins in instances:
assert ins.status == 'Destroyed'
eventually_clean()
for rev in self.revisions:
rev.delete()
return True
def remove_destroyed_instances(self):
return self._router.delete_destroyed_instances(org_id=self.organizationId, app_id=self.applicationId).json()
def json(self):
return self._router.get_application(org_id=self.organizationId, app_id=self.applicationId).json()
def list_instances_json(self):
return self.organization.list_instances_json(application=self)
def list_destroyed_instances_json(self):
return self.organization.list_instances_json(application=self, show_only_destroyed=True)
def __getattr__(self, key):
resp = self.json()
if key not in resp:
raise exceptions.NotFoundError('Cannot get property %s' % key)
return resp[key] or False
# REVISION
# noinspection PyShadowingBuiltins
def get_revision(self, id):
from qubell.api.private.revision import Revision
rev = Revision(application=self, id=id).init_router(self._router)
return rev
def list_revisions_json(self):
# return self.json()['revisions']
return self._router.get_revisions(org_id=self.organizationId, app_id=self.applicationId).json()
def create_revision(self, name, instance=None, parameters=None, version=None, submodules=None):
if not parameters:
parameters = {}
if not version:
version = self.get_manifest()['manifestVersion']
if not submodules:
submodules = {}
if instance:
payload = json.dumps({'name': name,
'parameters': parameters,
'submoduleRevisions': {},
'returnValues': [],
'applicationId': self.applicationId,
'applicationName': self.name,
'manifestVersion': version,
'instanceId': instance.instanceId})
resp = self._router.post_revision(org_id=self.organizationId, app_id=self.applicationId, data=payload)
return self.get_revision(id=resp.json()['id'])
else:
payload = json.dumps({'name': name,
'parameters': parameters,
'submodules': submodules,
'applicationId': self.applicationId,
'manifestVersion': version, })
resp = self._router.post_revision_fs(org_id=self.organizationId, app_id=self.applicationId, data=payload)
return self.get_revision(id=resp.json()['id'])
# noinspection PyShadowingBuiltins
def delete_revision(self, id):
self.get_revision(id).delete()
# MANIFEST
def get_manifest(self, version=None):
if not version:
return self._router.post_application_refresh(org_id=self.organizationId, app_id=self.applicationId).json()
else:
return self._router.get_application_manifest_version(org_id=self.organizationId, app_id=self.applicationId, version=version).json()
def get_manifest_latest(self):
return self._router.get_application_manifests_latest(org_id=self.organizationId, app_id=self.applicationId).json()
def upload(self, manifest):
log.info("Uploading manifest: %s to application: id=%s" % (manifest.source, self.applicationId))
# noinspection PyAttributeOutsideInit
self.manifest = manifest
if self._router.public_api_in_use:
return self._router.post_application_manifest(org_id=self.organizationId, app_id=self.applicationId,
data=manifest.content)
return self._router.post_application_manifest(org_id=self.organizationId, app_id=self.applicationId,
files={'path': manifest.content},
data={'manifestSource': 'upload', 'name': self.name}).json()
# noinspection PyShadowingBuiltins
def get_instance(self, id=None, name=None):
if id: # submodules instances are invisible for lists
return Instance(id=id, organization=self.organization).init_router(self._router)
return Instance.get(self._router, self.organization, name, application=self)
class ApplicationList(QubellEntityList):
base_clz = Application
| |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from designateclient import exceptions as designate_exception
from designateclient.v1 import records
import mock
from heat.engine.resources.openstack.designate import record
from heat.engine import stack
from heat.engine import template
from heat.tests import common
from heat.tests import utils
sample_template = {
'heat_template_version': '2015-04-30',
'resources': {
'test_resource': {
'type': 'OS::Designate::Record',
'properties': {
'name': 'test-record.com',
'description': 'Test record',
'ttl': 3600,
'type': 'MX',
'priority': 1,
'data': '1.1.1.1',
'domain': '1234567'
}
}
}
}
RESOURCE_TYPE = 'OS::Designate::Record'
class DesignateRecordTest(common.HeatTestCase):
def setUp(self):
super(DesignateRecordTest, self).setUp()
self.ctx = utils.dummy_context()
self.stack = stack.Stack(
self.ctx, 'test_stack',
template.Template(sample_template)
)
self.test_resource = self.stack['test_resource']
# Mock client plugin
self.test_client_plugin = mock.MagicMock()
self.test_resource.client_plugin = mock.MagicMock(
return_value=self.test_client_plugin)
# Mock client
self.test_client = mock.MagicMock()
self.test_resource.client = mock.MagicMock(
return_value=self.test_client)
def _get_mock_resource(self):
value = mock.MagicMock()
value.id = '477e8273-60a7-4c41-b683-fdb0bc7cd152'
return value
def test_resource_validate_properties(self):
mock_record_create = self.test_client_plugin.record_create
mock_resource = self._get_mock_resource()
mock_record_create.return_value = mock_resource
# validate the properties
self.assertEqual(
'test-record.com',
self.test_resource.properties.get(record.DesignateRecord.NAME))
self.assertEqual(
'Test record',
self.test_resource.properties.get(
record.DesignateRecord.DESCRIPTION))
self.assertEqual(
3600,
self.test_resource.properties.get(record.DesignateRecord.TTL))
self.assertEqual(
'MX',
self.test_resource.properties.get(record.DesignateRecord.TYPE))
self.assertEqual(
1,
self.test_resource.properties.get(record.DesignateRecord.PRIORITY))
self.assertEqual(
'1.1.1.1',
self.test_resource.properties.get(record.DesignateRecord.DATA))
self.assertEqual(
'1234567',
self.test_resource.properties.get(
record.DesignateRecord.DOMAIN))
def test_resource_handle_create_non_mx_or_srv(self):
mock_record_create = self.test_client_plugin.record_create
mock_resource = self._get_mock_resource()
mock_record_create.return_value = mock_resource
for type in (set(self.test_resource._ALLOWED_TYPES) -
set([self.test_resource.MX,
self.test_resource.SRV])):
self.test_resource.properties = args = dict(
name='test-record.com',
description='Test record',
ttl=3600,
type=type,
priority=1,
data='1.1.1.1',
domain='1234567'
)
self.test_resource.handle_create()
# Make sure priority is set to None for non mx or srv records
args['priority'] = None
mock_record_create.assert_called_with(
**args
)
# validate physical resource id
self.assertEqual(mock_resource.id, self.test_resource.resource_id)
def test_resource_handle_create_mx_or_srv(self):
mock_record_create = self.test_client_plugin.record_create
mock_resource = self._get_mock_resource()
mock_record_create.return_value = mock_resource
for type in [self.test_resource.MX, self.test_resource.SRV]:
self.test_resource.properties = args = dict(
name='test-record.com',
description='Test record',
ttl=3600,
type=type,
priority=1,
data='1.1.1.1',
domain='1234567'
)
self.test_resource.handle_create()
mock_record_create.assert_called_with(
**args
)
# validate physical resource id
self.assertEqual(mock_resource.id, self.test_resource.resource_id)
def test_resource_handle_update_non_mx_or_srv(self):
mock_record_update = self.test_client_plugin.record_update
self.test_resource.resource_id = '477e8273-60a7-4c41-b683-fdb0bc7cd151'
for type in (set(self.test_resource._ALLOWED_TYPES) -
set([self.test_resource.MX,
self.test_resource.SRV])):
prop_diff = args = {
record.DesignateRecord.DESCRIPTION: 'updated description',
record.DesignateRecord.TTL: 4200,
record.DesignateRecord.TYPE: type,
record.DesignateRecord.DATA: '2.2.2.2',
record.DesignateRecord.PRIORITY: 1}
self.test_resource.handle_update(json_snippet=None,
tmpl_diff=None,
prop_diff=prop_diff)
# priority is not considered for records other than mx or srv
args.update(dict(
id=self.test_resource.resource_id,
priority=None,
domain='1234567',
))
mock_record_update.assert_called_with(**args)
def test_resource_handle_update_mx_or_srv(self):
mock_record_update = self.test_client_plugin.record_update
self.test_resource.resource_id = '477e8273-60a7-4c41-b683-fdb0bc7cd151'
for type in [self.test_resource.MX, self.test_resource.SRV]:
prop_diff = args = {
record.DesignateRecord.DESCRIPTION: 'updated description',
record.DesignateRecord.TTL: 4200,
record.DesignateRecord.TYPE: type,
record.DesignateRecord.DATA: '2.2.2.2',
record.DesignateRecord.PRIORITY: 1}
self.test_resource.handle_update(json_snippet=None,
tmpl_diff=None,
prop_diff=prop_diff)
args.update(dict(
id=self.test_resource.resource_id,
domain='1234567',
))
mock_record_update.assert_called_with(**args)
def test_resource_handle_delete(self):
mock_record_delete = self.test_client_plugin.record_delete
self.test_resource.resource_id = '477e8273-60a7-4c41-b683-fdb0bc7cd151'
mock_record_delete.return_value = None
self.assertIsNone(self.test_resource.handle_delete())
mock_record_delete.assert_called_once_with(
domain='1234567',
id=self.test_resource.resource_id
)
def test_resource_handle_delete_resource_id_is_none(self):
self.test_resource.resource_id = None
self.assertIsNone(self.test_resource.handle_delete())
def test_resource_handle_delete_not_found(self):
mock_record_delete = self.test_client_plugin.record_delete
mock_record_delete.side_effect = designate_exception.NotFound
self.assertIsNone(self.test_resource.handle_delete())
def test_resource_show_resource(self):
args = dict(
name='test-record.com',
description='Test record',
ttl=3600,
type='A',
priority=1,
data='1.1.1.1'
)
rsc = records.Record(args)
mock_notification_get = self.test_client_plugin.record_show
mock_notification_get.return_value = rsc
self.assertEqual(args,
self.test_resource._show_resource(),
'Failed to show resource')
def test_resource_get_live_state(self):
tmpl = {
'heat_template_version': '2015-04-30',
'resources': {
'test_resource': {
'type': 'OS::Designate::Record',
'properties': {
'name': 'test-record.com',
'description': 'Test record',
'ttl': 3600,
'type': 'MX',
'priority': 1,
'data': '1.1.1.1',
'domain': 'example.com.'
}
}
}
}
s = stack.Stack(
self.ctx, 'test_stack',
template.Template(tmpl)
)
test_resource = s['test_resource']
test_resource.resource_id = '1234'
test_resource.client_plugin().get_domain_id = mock.MagicMock()
test_resource.client_plugin().get_domain_id.return_value = '1234567'
test_resource.client().records = mock.MagicMock()
test_resource.client().records.get.return_value = {
'type': 'MX',
'data': '1.1.1.1',
'ttl': 3600,
'description': 'test',
'domain_id': '1234567',
'name': 'www.example.com.',
'priority': 0
}
reality = test_resource.get_live_state(test_resource.properties)
expected = {
'type': 'MX',
'data': '1.1.1.1',
'ttl': 3600,
'description': 'test',
'priority': 0
}
self.assertEqual(expected, reality)
| |
# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from io import StringIO
import sys
import pytest
from units.compat import mock
from units.compat import unittest
from units.compat.mock import MagicMock
from units.compat.mock import patch
from ansible.errors import AnsibleError
from ansible.playbook.play_context import PlayContext
from ansible.plugins.connection import ConnectionBase
# from ansible.plugins.connection.accelerate import Connection as AccelerateConnection
# from ansible.plugins.connection.chroot import Connection as ChrootConnection
# from ansible.plugins.connection.funcd import Connection as FuncdConnection
# from ansible.plugins.connection.jail import Connection as JailConnection
# from ansible.plugins.connection.libvirt_lxc import Connection as LibvirtLXCConnection
from ansible.plugins.connection.lxc import Connection as LxcConnection
from ansible.plugins.connection.local import Connection as LocalConnection
from ansible.plugins.connection.paramiko_ssh import Connection as ParamikoConnection
from ansible.plugins.connection.ssh import Connection as SSHConnection
from ansible.plugins.connection.docker import Connection as DockerConnection
# from ansible.plugins.connection.winrm import Connection as WinRmConnection
from ansible.plugins.connection.network_cli import Connection as NetworkCliConnection
from ansible.plugins.connection.httpapi import Connection as HttpapiConnection
pytest.importorskip("ncclient")
PY3 = sys.version_info[0] == 3
builtin_import = __import__
mock_ncclient = MagicMock(name='ncclient')
def import_mock(name, *args):
if name.startswith('ncclient'):
return mock_ncclient
return builtin_import(name, *args)
if PY3:
with patch('builtins.__import__', side_effect=import_mock):
from ansible.plugins.connection.netconf import Connection as NetconfConnection
else:
with patch('__builtin__.__import__', side_effect=import_mock):
from ansible.plugins.connection.netconf import Connection as NetconfConnection
class TestConnectionBaseClass(unittest.TestCase):
def setUp(self):
self.play_context = PlayContext()
self.play_context.prompt = (
'[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: '
)
self.in_stream = StringIO()
def tearDown(self):
pass
def test_subclass_error(self):
class ConnectionModule1(ConnectionBase):
pass
with self.assertRaises(TypeError):
ConnectionModule1() # pylint: disable=abstract-class-instantiated
class ConnectionModule2(ConnectionBase):
def get(self, key):
super(ConnectionModule2, self).get(key)
with self.assertRaises(TypeError):
ConnectionModule2() # pylint: disable=abstract-class-instantiated
def test_subclass_success(self):
class ConnectionModule3(ConnectionBase):
@property
def transport(self):
pass
def _connect(self):
pass
def exec_command(self):
pass
def put_file(self):
pass
def fetch_file(self):
pass
def close(self):
pass
self.assertIsInstance(ConnectionModule3(self.play_context, self.in_stream), ConnectionModule3)
# def test_accelerate_connection_module(self):
# self.assertIsInstance(AccelerateConnection(), AccelerateConnection)
#
# def test_chroot_connection_module(self):
# self.assertIsInstance(ChrootConnection(), ChrootConnection)
#
# def test_funcd_connection_module(self):
# self.assertIsInstance(FuncdConnection(), FuncdConnection)
#
# def test_jail_connection_module(self):
# self.assertIsInstance(JailConnection(), JailConnection)
#
# def test_libvirt_lxc_connection_module(self):
# self.assertIsInstance(LibvirtLXCConnection(), LibvirtLXCConnection)
def test_lxc_connection_module(self):
self.assertIsInstance(LxcConnection(self.play_context, self.in_stream), LxcConnection)
def test_local_connection_module(self):
self.assertIsInstance(LocalConnection(self.play_context, self.in_stream), LocalConnection)
def test_paramiko_connection_module(self):
self.assertIsInstance(ParamikoConnection(self.play_context, self.in_stream), ParamikoConnection)
def test_ssh_connection_module(self):
self.assertIsInstance(SSHConnection(self.play_context, self.in_stream), SSHConnection)
@mock.patch('ansible.plugins.connection.docker.Connection._old_docker_version', return_value=('false', 'garbage', '', 1))
@mock.patch('ansible.plugins.connection.docker.Connection._new_docker_version', return_value=('docker version', '1.2.3', '', 0))
def test_docker_connection_module_too_old(self, mock_new_docker_verison, mock_old_docker_version):
self.assertRaisesRegexp(AnsibleError, '^docker connection type requires docker 1.3 or higher$',
DockerConnection, self.play_context, self.in_stream, docker_command='/fake/docker')
@mock.patch('ansible.plugins.connection.docker.Connection._old_docker_version', return_value=('false', 'garbage', '', 1))
@mock.patch('ansible.plugins.connection.docker.Connection._new_docker_version', return_value=('docker version', '1.3.4', '', 0))
def test_docker_connection_module(self, mock_new_docker_verison, mock_old_docker_version):
self.assertIsInstance(DockerConnection(self.play_context, self.in_stream, docker_command='/fake/docker'),
DockerConnection)
# old version and new version fail
@mock.patch('ansible.plugins.connection.docker.Connection._old_docker_version', return_value=('false', 'garbage', '', 1))
@mock.patch('ansible.plugins.connection.docker.Connection._new_docker_version', return_value=('false', 'garbage', '', 1))
def test_docker_connection_module_wrong_cmd(self, mock_new_docker_version, mock_old_docker_version):
self.assertRaisesRegexp(AnsibleError, '^Docker version check (.*?) failed: ',
DockerConnection, self.play_context, self.in_stream, docker_command='/fake/docker')
# def test_winrm_connection_module(self):
# self.assertIsInstance(WinRmConnection(), WinRmConnection)
def test_network_cli_connection_module(self):
self.play_context.network_os = 'eos'
self.assertIsInstance(NetworkCliConnection(self.play_context, self.in_stream), NetworkCliConnection)
def test_netconf_connection_module(self):
self.assertIsInstance(NetconfConnection(self.play_context, self.in_stream), NetconfConnection)
def test_httpapi_connection_module(self):
self.play_context.network_os = 'eos'
self.assertIsInstance(HttpapiConnection(self.play_context, self.in_stream), HttpapiConnection)
def test_check_password_prompt(self):
local = (
b'[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: \n'
b'BECOME-SUCCESS-ouzmdnewuhucvuaabtjmweasarviygqq\n'
)
ssh_pipelining_vvvv = b'''
debug3: mux_master_read_cb: channel 1 packet type 0x10000002 len 251
debug2: process_mux_new_session: channel 1: request tty 0, X 1, agent 1, subsys 0, term "xterm-256color", cmd "/bin/sh -c 'sudo -H -S -p "[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: " -u root /bin/sh -c '"'"'echo BECOME-SUCCESS-ouzmdnewuhucvuaabtjmweasarviygqq; /bin/true'"'"' && sleep 0'", env 0
debug3: process_mux_new_session: got fds stdin 9, stdout 10, stderr 11
debug2: client_session2_setup: id 2
debug1: Sending command: /bin/sh -c 'sudo -H -S -p "[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: " -u root /bin/sh -c '"'"'echo BECOME-SUCCESS-ouzmdnewuhucvuaabtjmweasarviygqq; /bin/true'"'"' && sleep 0'
debug2: channel 2: request exec confirm 1
debug2: channel 2: rcvd ext data 67
[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: debug2: channel 2: written 67 to efd 11
BECOME-SUCCESS-ouzmdnewuhucvuaabtjmweasarviygqq
debug3: receive packet: type 98
''' # noqa
ssh_nopipelining_vvvv = b'''
debug3: mux_master_read_cb: channel 1 packet type 0x10000002 len 251
debug2: process_mux_new_session: channel 1: request tty 1, X 1, agent 1, subsys 0, term "xterm-256color", cmd "/bin/sh -c 'sudo -H -S -p "[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: " -u root /bin/sh -c '"'"'echo BECOME-SUCCESS-ouzmdnewuhucvuaabtjmweasarviygqq; /bin/true'"'"' && sleep 0'", env 0
debug3: mux_client_request_session: session request sent
debug3: send packet: type 98
debug1: Sending command: /bin/sh -c 'sudo -H -S -p "[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: " -u root /bin/sh -c '"'"'echo BECOME-SUCCESS-ouzmdnewuhucvuaabtjmweasarviygqq; /bin/true'"'"' && sleep 0'
debug2: channel 2: request exec confirm 1
debug2: exec request accepted on channel 2
[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: debug3: receive packet: type 2
debug3: Received SSH2_MSG_IGNORE
debug3: Received SSH2_MSG_IGNORE
BECOME-SUCCESS-ouzmdnewuhucvuaabtjmweasarviygqq
debug3: receive packet: type 98
''' # noqa
ssh_novvvv = (
b'[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: \n'
b'BECOME-SUCCESS-ouzmdnewuhucvuaabtjmweasarviygqq\n'
)
dns_issue = (
b'timeout waiting for privilege escalation password prompt:\n'
b'sudo: sudo: unable to resolve host tcloud014\n'
b'[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: \n'
b'BECOME-SUCCESS-ouzmdnewuhucvuaabtjmweasarviygqq\n'
)
nothing = b''
in_front = b'''
debug1: Sending command: /bin/sh -c 'sudo -H -S -p "[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: " -u root /bin/sh -c '"'"'echo
'''
class ConnectionFoo(ConnectionBase):
@property
def transport(self):
pass
def _connect(self):
pass
def exec_command(self):
pass
def put_file(self):
pass
def fetch_file(self):
pass
def close(self):
pass
c = ConnectionFoo(self.play_context, self.in_stream)
self.assertTrue(c.check_password_prompt(local))
self.assertTrue(c.check_password_prompt(ssh_pipelining_vvvv))
self.assertTrue(c.check_password_prompt(ssh_nopipelining_vvvv))
self.assertTrue(c.check_password_prompt(ssh_novvvv))
self.assertTrue(c.check_password_prompt(dns_issue))
self.assertFalse(c.check_password_prompt(nothing))
self.assertFalse(c.check_password_prompt(in_front))
| |
import numpy as np
import pandas.util.testing as tm
from pandas import DataFrame, Series, read_csv, factorize, date_range
from pandas.core.algorithms import take_1d
try:
from pandas import (
rolling_median,
rolling_mean,
rolling_min,
rolling_max,
rolling_var,
rolling_skew,
rolling_kurt,
rolling_std,
)
have_rolling_methods = True
except ImportError:
have_rolling_methods = False
try:
from pandas._libs import algos
except ImportError:
from pandas import algos
try:
from pandas.util.testing import test_parallel
have_real_test_parallel = True
except ImportError:
have_real_test_parallel = False
def test_parallel(num_threads=1):
def wrapper(fname):
return fname
return wrapper
from .pandas_vb_common import BaseIO
class ParallelGroupbyMethods:
params = ([2, 4, 8], ["count", "last", "max", "mean", "min", "prod", "sum", "var"])
param_names = ["threads", "method"]
def setup(self, threads, method):
if not have_real_test_parallel:
raise NotImplementedError
N = 10 ** 6
ngroups = 10 ** 3
df = DataFrame(
{"key": np.random.randint(0, ngroups, size=N), "data": np.random.randn(N)}
)
@test_parallel(num_threads=threads)
def parallel():
getattr(df.groupby("key")["data"], method)()
self.parallel = parallel
def loop():
getattr(df.groupby("key")["data"], method)()
self.loop = loop
def time_parallel(self, threads, method):
self.parallel()
def time_loop(self, threads, method):
for i in range(threads):
self.loop()
class ParallelGroups:
params = [2, 4, 8]
param_names = ["threads"]
def setup(self, threads):
if not have_real_test_parallel:
raise NotImplementedError
size = 2 ** 22
ngroups = 10 ** 3
data = Series(np.random.randint(0, ngroups, size=size))
@test_parallel(num_threads=threads)
def get_groups():
data.groupby(data).groups
self.get_groups = get_groups
def time_get_groups(self, threads):
self.get_groups()
class ParallelTake1D:
params = ["int64", "float64"]
param_names = ["dtype"]
def setup(self, dtype):
if not have_real_test_parallel:
raise NotImplementedError
N = 10 ** 6
df = DataFrame({"col": np.arange(N, dtype=dtype)})
indexer = np.arange(100, len(df) - 100)
@test_parallel(num_threads=2)
def parallel_take1d():
take_1d(df["col"].values, indexer)
self.parallel_take1d = parallel_take1d
def time_take1d(self, dtype):
self.parallel_take1d()
class ParallelKth:
number = 1
repeat = 5
def setup(self):
if not have_real_test_parallel:
raise NotImplementedError
N = 10 ** 7
k = 5 * 10 ** 5
kwargs_list = [{"arr": np.random.randn(N)}, {"arr": np.random.randn(N)}]
@test_parallel(num_threads=2, kwargs_list=kwargs_list)
def parallel_kth_smallest(arr):
algos.kth_smallest(arr, k)
self.parallel_kth_smallest = parallel_kth_smallest
def time_kth_smallest(self):
self.parallel_kth_smallest()
class ParallelDatetimeFields:
def setup(self):
if not have_real_test_parallel:
raise NotImplementedError
N = 10 ** 6
self.dti = date_range("1900-01-01", periods=N, freq="T")
self.period = self.dti.to_period("D")
def time_datetime_field_year(self):
@test_parallel(num_threads=2)
def run(dti):
dti.year
run(self.dti)
def time_datetime_field_day(self):
@test_parallel(num_threads=2)
def run(dti):
dti.day
run(self.dti)
def time_datetime_field_daysinmonth(self):
@test_parallel(num_threads=2)
def run(dti):
dti.days_in_month
run(self.dti)
def time_datetime_field_normalize(self):
@test_parallel(num_threads=2)
def run(dti):
dti.normalize()
run(self.dti)
def time_datetime_to_period(self):
@test_parallel(num_threads=2)
def run(dti):
dti.to_period("S")
run(self.dti)
def time_period_to_datetime(self):
@test_parallel(num_threads=2)
def run(period):
period.to_timestamp()
run(self.period)
class ParallelRolling:
params = ["median", "mean", "min", "max", "var", "skew", "kurt", "std"]
param_names = ["method"]
def setup(self, method):
if not have_real_test_parallel:
raise NotImplementedError
win = 100
arr = np.random.rand(100000)
if hasattr(DataFrame, "rolling"):
df = DataFrame(arr).rolling(win)
@test_parallel(num_threads=2)
def parallel_rolling():
getattr(df, method)()
self.parallel_rolling = parallel_rolling
elif have_rolling_methods:
rolling = {
"median": rolling_median,
"mean": rolling_mean,
"min": rolling_min,
"max": rolling_max,
"var": rolling_var,
"skew": rolling_skew,
"kurt": rolling_kurt,
"std": rolling_std,
}
@test_parallel(num_threads=2)
def parallel_rolling():
rolling[method](arr, win)
self.parallel_rolling = parallel_rolling
else:
raise NotImplementedError
def time_rolling(self, method):
self.parallel_rolling()
class ParallelReadCSV(BaseIO):
number = 1
repeat = 5
params = ["float", "object", "datetime"]
param_names = ["dtype"]
def setup(self, dtype):
if not have_real_test_parallel:
raise NotImplementedError
rows = 10000
cols = 50
data = {
"float": DataFrame(np.random.randn(rows, cols)),
"datetime": DataFrame(
np.random.randn(rows, cols), index=date_range("1/1/2000", periods=rows)
),
"object": DataFrame(
"foo",
index=range(rows),
columns=["object%03d".format(i) for i in range(5)],
),
}
self.fname = "__test_{}__.csv".format(dtype)
df = data[dtype]
df.to_csv(self.fname)
@test_parallel(num_threads=2)
def parallel_read_csv():
read_csv(self.fname)
self.parallel_read_csv = parallel_read_csv
def time_read_csv(self, dtype):
self.parallel_read_csv()
class ParallelFactorize:
number = 1
repeat = 5
params = [2, 4, 8]
param_names = ["threads"]
def setup(self, threads):
if not have_real_test_parallel:
raise NotImplementedError
strings = tm.makeStringIndex(100000)
@test_parallel(num_threads=threads)
def parallel():
factorize(strings)
self.parallel = parallel
def loop():
factorize(strings)
self.loop = loop
def time_parallel(self, threads):
self.parallel()
def time_loop(self, threads):
for i in range(threads):
self.loop()
from .pandas_vb_common import setup # noqa: F401
| |
# coding: utf-8
from __future__ import unicode_literals
import re
import json
from furl import furl
from collections import OrderedDict
from .utils import camelize
import warnings
import base64
import zeep
import zeep.transports
from lxml import etree
from requests import Request
from rakuten_ws.utils import xml2dict, dict2xml, unflatten_dict, sorted_dict, flatten_dict
from .utils import camelize_dict, PrettyStringRepr, load_file
from .compat import to_unicode
class RmsServiceClient(object):
def __get__(self, service, cls):
if service is not None:
self.service = service
return self
return self.__class__
class RMSInvalidResponse(Exception):
pass
class ZeepClient(RmsServiceClient):
wsdl = None
_zeep_client = None
_xsd_types = None
@property
def zeep_client(self):
_zeep_client = getattr(self, '_zeep_client', None)
if _zeep_client is None:
self._zeep_client = _zeep_client = zeep.Client(wsdl=self.wsdl, transport=zeep.transports.Transport())
return _zeep_client
@property
def xsd_types(self):
_xsd_types = getattr(self, '_xsd_types', None)
if _xsd_types is None:
self._xsd_types = _xsd_types = dict(((t.name, t) for t in self.zeep_client.wsdl.types.types))
return _xsd_types
def _send_request(self, name, **proxy_kwargs):
address = self.zeep_client.service._binding_options['address']
arg0 = self.service.soap_user_auth_model
method = getattr(self.zeep_client.service, name)
if address.endswith('inventory/ws'):
kwargs = {'externalUserAuthModel': arg0}
kwargs.update(proxy_kwargs)
else:
kwargs = {'arg0': arg0}
if proxy_kwargs:
kwargs['arg1'] = proxy_kwargs
response = method(**kwargs)
return response
def __getattr__(self, name):
return lambda **proxy_kwargs: self._send_request(name, **proxy_kwargs)
class RestMethodResult(OrderedDict):
def __init__(self, method, response):
self.method = method
self.response = response
self.request = response.request
self.status, result_data = self.parse_result(response)
super(RestMethodResult, self).__init__(result_data)
def parse_result(self, response):
xml = etree.fromstring(response.content)
_status = xml.xpath('//status')
_result = xml.xpath('//%s' % self.method.result_xml_key)
result_data = {}
if _status:
status = xml2dict(etree.tostring(_status[0]))
else:
raise RMSInvalidResponse(response.text)
if _result:
result_data = xml2dict(etree.tostring(_result[0]))
return status, result_data
@property
def xml(self):
return PrettyStringRepr(self.response.text)
@property
def json(self):
data = {'status': self.status, 'result': self}
return PrettyStringRepr(json.dumps(data, ensure_ascii=False, sort_keys=True,
indent=4, separators=(',', ': ')))
def __repr__(self):
return "<RestMethodResult [%s]>" % self.status.get('systemStatus', 'Error')
class RestMethod(object):
def __init__(self, name=None, http_method="GET", params=[], custom_headers={}, form_data=None, root_xml_key=None):
self.name = name
self.http_method = http_method
self.custom_headers = custom_headers
self.params = params
self.client = None
self.form_data = form_data
self._root_xml_key = root_xml_key
@property
def root_xml_key(self):
if self._root_xml_key:
return camelize(self._root_xml_key, False)
else:
return camelize("%s_%s" % (self.client.name, '_'.join(self.name.split('/'))), False)
@property
def result_xml_key(self):
return "%sResult" % self.root_xml_key
@property
def request_xml_key(self):
return camelize("%sRequest" % self.root_xml_key, False)
def prepare_xml_post(self, params):
camelcase_params = camelize_dict(params)
if self.params:
def key(x):
k = re.sub('.@\d+.', '.', x[0])
try:
return self.params.index(k)
except:
warnings.warn(
"Given invalid parameter '%s'." % k,
SyntaxWarning
)
return len(self.params) + 1
sorted_params = unflatten_dict(sorted_dict(flatten_dict(camelcase_params), key=key))
else:
sorted_params = camelcase_params
return dict2xml({self.request_xml_key: sorted_params}, root="request") + "\n"
def prepare_request(self, params={}):
api_request = furl(self.client.api_url)
api_request.path.segments.append(self.client.api_version)
api_request.path.segments.append(self.client.api_endpoint or self.client.name)
api_request.path.segments.extend(self.name.split('/'))
api_request.path.normalize()
headers = self.client.service.webservice.session.headers.copy()
headers['Authorization'] = self.client.service.esa_key
if self.custom_headers:
headers.update(self.custom_headers)
filename = params.pop('filename', None)
if self.http_method == "POST":
data = self.prepare_xml_post(params)
if filename:
fileobj, mimetype = load_file(filename)
files = {'xml': (None, data), 'file': ('filename', fileobj, mimetype)}
req = Request(self.http_method, api_request.url, files=files, headers=headers)
else:
req = Request(self.http_method, api_request.url, data=data, headers=headers)
else:
req = Request(self.http_method, api_request.url, headers=headers, params=camelize_dict(params))
prepped_request = req.prepare()
return prepped_request
def __call__(self, *args, **kwargs):
raise_for_status = kwargs.pop('raise_for_status', not self.client.service.webservice.debug)
prepped_request = self.prepare_request(kwargs)
response = self.client.service.webservice.session.send(prepped_request)
if raise_for_status:
response.raise_for_status()
return RestMethodResult(self, response)
def __get__(self, client, cls):
if client is not None:
self.client = client
return self
return self.__class__
class RestClient(RmsServiceClient):
api_url = "https://api.rms.rakuten.co.jp/es"
api_endpoint = None
api_version = '1.0'
def __new__(cls, *args, **kwargs):
instance = super(RestClient, cls).__new__(cls)
for name, attr in sorted(list(cls.__dict__.items())):
if isinstance(attr, RestMethod):
if getattr(attr, 'name', None) is None:
setattr(attr, 'name', name)
return instance
def __init__(self, name=None):
self.name = name
self.service = None
class BaseRmsService(object):
def __new__(cls, *args, **kwargs):
instance = super(BaseRmsService, cls).__new__(cls)
for name, attr in sorted(list(cls.__dict__.items())):
if isinstance(attr, RmsServiceClient):
if getattr(attr, 'name', None) is None:
setattr(attr, 'name', name)
return instance
@property
def esa_key(self):
license_key = self.webservice.license_key
secret_service = self.webservice.secret_service
if license_key is None or secret_service is None:
raise Exception("A 'license_key' and 'secret_service' must be provided")
key = b"ESA " + base64.b64encode((secret_service + ":" + license_key).encode('utf-8'))
return to_unicode(key)
@property
def shop_url(self):
return self.webservice.shop_url or ""
@property
def soap_user_auth_model(self):
return {
"authKey": self.esa_key,
"shopUrl": self.shop_url,
"userName": ""
}
def __get__(self, webservice, cls):
if webservice is not None:
self.webservice = webservice
return self
return self.__class__
| |
from django.http import HttpResponse, Http404, HttpResponseBadRequest
from django.db.models import Q
from django.shortcuts import render
from articles.models import Author, AuthorSerializer, Article, ArticleSerializer, Outlet, OutletSerializer
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from django.shortcuts import render
def home(request):
return render(request, "home.html")
class OutletList(APIView):
"""
Handles REST API request to outlet endpoint, more specifically the one performed to its root folder
Class-based Views based on django rest framework documentation.
This class handles GET and POST request to root endpoint address.
Example: GET http://hostname/rest/api/v1/article
see http://www.django-rest-framework.org/tutorial/3-class-based-views/
"""
def get(self, request, format = None):
outlets = Outlet.objects.all().order_by('-id')
serializer = OutletSerializer(outlets, many=True)
return Response(serializer.data)
def post(self, request, format = None):
serializer = OutletSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def articles(request):
articles = Article.objects.all().order_by('-id')
return render(request, 'articles.html', {
'articles': articles,
'active_menu': 'articles',
})
class ArticleList(APIView):
"""
Class-based Views based on django rest framework documentation.
This class handles GET and POST request to root endpoint address.
Example: GET http://hostname/rest/api/v1/article
see http://www.django-rest-framework.org/tutorial/3-class-based-views/
"""
def get(self, request, format = None):
articles = Article.objects.all().order_by('-id')
if 'q' in request.GET:
q = request.GET['q']
if q:
query = Q(title__icontains = q) | Q(content__icontains = q)
articles = articles.filter(query)
serializer = ArticleSerializer(articles, many=True)
return Response(serializer.data)
def post(self, request, format = None):
serializer = ArticleSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class OutletDetail(APIView):
"""
Handles REST API request to Outlet's endpoint, more specifically ones related to a single instance manipulation.
Example: GET http://hostname/api/rest/v1/outlet/1
Class-based Views based on django rest framework documentation.
This class handles GET and POST request to root endpoint address.
Example: GET http://hostname/rest/api/v1/article
see http://www.django-rest-framework.org/tutorial/3-class-based-views/
"""
def get_object(self, id):
try:
id = int(id)
except ValueError:
raise HttpResponseBadRequest()
try:
outlet = Outlet.objects.get(pk = id)
except Article.DoesNotExist:
raise Http404
return outlet
def get(self, request, id, format=None):
outlet = self.get_object(id)
serializer = OutletSerializer(outlet)
return Response(serializer.data)
def put(self, request, id, format=None):
outlet = self.get_object(id)
serializer = OutletSerializer(outlet, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, id, format=None):
outlet = self.get_object(id)
outlet.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class ArticleDetail(APIView):
"""
Handles REST API request to Article's endpoint.
Class-based Views based on django rest framework documentation.
This class handles GET and POST request to root endpoint address.
Example: GET http://hostname/rest/api/v1/article
see http://www.django-rest-framework.org/tutorial/3-class-based-views/
"""
def get_object(self, id):
try:
id = int(id)
except ValueError:
raise HttpResponseBadRequest()
try:
article = Article.objects.get(pk = id)
except Article.DoesNotExist:
raise Http404
return article
def get(self, request, id, format=None):
article = self.get_object(id)
serializer = ArticleSerializer(article)
return Response(serializer.data)
def put(self, request, id, format=None):
article = self.get_object(id)
serializer = ArticleSerializer(article, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, id, format=None):
article = self.get_object(id)
article.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class AuthorList(APIView):
"""
Class-based Views based on django rest framework documentation.
This class handles GET and POST request to root endpoint address.
Example: GET http://hostname/rest/api/v1/authors
see http://www.django-rest-framework.org/tutorial/3-class-based-views/
"""
def get(self, request, format = None):
authors = Author.objects.all().order_by('-id')
serializer = AuthorSerializer(authors, many=True)
return Response(serializer.data)
def post(self, request, format = None):
serializer = AuthorSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class AuthorDetail(APIView):
def get_object(self, id):
try:
id = int(id)
except ValueError:
raise HttpResponseBadRequest()
try:
author = Author.objects.get(pk = id)
except Author.DoesNotExist:
raise Http404
return author
def get(self, request, id, format=None):
author = self.get_object(id)
serializer = AuthorSerializer(author)
return Response(serializer.data)
def put(self, request, id, format=None):
author = self.get_object(id)
serializer = AuthorSerializer(author, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, id, format=None):
author = self.get_object(id)
author.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
| |
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import sys
import threading
import types
from datetime import datetime
from dateutil import parser
from dateutil.tz import tzutc
from time import sleep
from azure import (WindowsAzureData,
WindowsAzureError,
METADATA_NS,
url_quote,
xml_escape,
_create_entry,
_decode_base64_to_text,
_decode_base64_to_bytes,
_encode_base64,
_general_error_handler,
_list_of,
_parse_response_for_dict,
_sign_string,
_unicode_type,
_ERROR_CANNOT_SERIALIZE_VALUE_TO_ENTITY,
_etree_entity_feed_namespaces,
_make_etree_ns_attr_name,
_get_etree_tag_name_without_ns,
_get_etree_text,
ETree,
_ETreeXmlToObject,
)
# x-ms-version for storage service.
X_MS_VERSION = '2014-02-14'
class EnumResultsBase(object):
''' base class for EnumResults. '''
def __init__(self):
self.prefix = u''
self.marker = u''
self.max_results = 0
self.next_marker = u''
class ContainerEnumResults(EnumResultsBase):
''' Blob Container list. '''
def __init__(self):
EnumResultsBase.__init__(self)
self.containers = _list_of(Container)
def __iter__(self):
return iter(self.containers)
def __len__(self):
return len(self.containers)
def __getitem__(self, index):
return self.containers[index]
class Container(WindowsAzureData):
''' Blob container class. '''
def __init__(self):
self.name = u''
self.url = u''
self.properties = Properties()
self.metadata = {}
class Properties(WindowsAzureData):
''' Blob container's properties class. '''
def __init__(self):
self.last_modified = u''
self.etag = u''
class RetentionPolicy(WindowsAzureData):
''' RetentionPolicy in service properties. '''
def __init__(self):
self.enabled = False
self.__dict__['days'] = None
def get_days(self):
# convert days to int value
return int(self.__dict__['days'])
def set_days(self, value):
''' set default days if days is set to empty. '''
self.__dict__['days'] = value
days = property(fget=get_days, fset=set_days)
class Logging(WindowsAzureData):
''' Logging class in service properties. '''
def __init__(self):
self.version = u'1.0'
self.delete = False
self.read = False
self.write = False
self.retention_policy = RetentionPolicy()
class HourMetrics(WindowsAzureData):
''' Hour Metrics class in service properties. '''
def __init__(self):
self.version = u'1.0'
self.enabled = False
self.include_apis = None
self.retention_policy = RetentionPolicy()
class MinuteMetrics(WindowsAzureData):
''' Minute Metrics class in service properties. '''
def __init__(self):
self.version = u'1.0'
self.enabled = False
self.include_apis = None
self.retention_policy = RetentionPolicy()
class StorageServiceProperties(WindowsAzureData):
''' Storage Service Propeties class. '''
def __init__(self):
self.logging = Logging()
self.hour_metrics = HourMetrics()
self.minute_metrics = MinuteMetrics()
@property
def metrics(self):
import warnings
warnings.warn(
'The metrics attribute has been deprecated. Use hour_metrics and minute_metrics instead.')
return self.hour_metrics
class AccessPolicy(WindowsAzureData):
''' Access Policy class in service properties. '''
def __init__(self, start=u'', expiry=u'', permission='u'):
self.start = start
self.expiry = expiry
self.permission = permission
class SignedIdentifier(WindowsAzureData):
''' Signed Identifier class for service properties. '''
def __init__(self):
self.id = u''
self.access_policy = AccessPolicy()
class SignedIdentifiers(WindowsAzureData):
''' SignedIdentifier list. '''
def __init__(self):
self.signed_identifiers = _list_of(SignedIdentifier)
def __iter__(self):
return iter(self.signed_identifiers)
def __len__(self):
return len(self.signed_identifiers)
def __getitem__(self, index):
return self.signed_identifiers[index]
class BlobEnumResults(EnumResultsBase):
''' Blob list.'''
def __init__(self):
EnumResultsBase.__init__(self)
self.blobs = _list_of(Blob)
self.prefixes = _list_of(BlobPrefix)
self.delimiter = ''
def __iter__(self):
return iter(self.blobs)
def __len__(self):
return len(self.blobs)
def __getitem__(self, index):
return self.blobs[index]
class BlobResult(bytes):
def __new__(cls, blob, properties):
return bytes.__new__(cls, blob if blob else b'')
def __init__(self, blob, properties):
self.properties = properties
class Blob(WindowsAzureData):
''' Blob class. '''
def __init__(self):
self.name = u''
self.snapshot = u''
self.url = u''
self.properties = BlobProperties()
self.metadata = {}
class BlobProperties(WindowsAzureData):
''' Blob Properties '''
def __init__(self):
self.last_modified = u''
self.etag = u''
self.content_length = 0
self.content_type = u''
self.content_encoding = u''
self.content_language = u''
self.content_md5 = u''
self.xms_blob_sequence_number = 0
self.blob_type = u''
self.lease_status = u''
self.lease_state = u''
self.lease_duration = u''
self.copy_id = u''
self.copy_source = u''
self.copy_status = u''
self.copy_progress = u''
self.copy_completion_time = u''
self.copy_status_description = u''
class BlobPrefix(WindowsAzureData):
''' BlobPrefix in Blob. '''
def __init__(self):
self.name = ''
class BlobBlock(WindowsAzureData):
''' BlobBlock class '''
def __init__(self, id=None, size=None):
self.id = id
self.size = size
class BlobBlockList(WindowsAzureData):
''' BlobBlockList class '''
def __init__(self):
self.committed_blocks = []
self.uncommitted_blocks = []
class PageRange(WindowsAzureData):
''' Page Range for page blob. '''
def __init__(self):
self.start = 0
self.end = 0
class PageList(object):
''' Page list for page blob. '''
def __init__(self):
self.page_ranges = _list_of(PageRange)
def __iter__(self):
return iter(self.page_ranges)
def __len__(self):
return len(self.page_ranges)
def __getitem__(self, index):
return self.page_ranges[index]
class QueueEnumResults(EnumResultsBase):
''' Queue list'''
def __init__(self):
EnumResultsBase.__init__(self)
self.queues = _list_of(Queue)
def __iter__(self):
return iter(self.queues)
def __len__(self):
return len(self.queues)
def __getitem__(self, index):
return self.queues[index]
class Queue(WindowsAzureData):
''' Queue class '''
def __init__(self):
self.name = u''
self.url = u''
self.metadata = {}
class QueueMessagesList(WindowsAzureData):
''' Queue message list. '''
def __init__(self):
self.queue_messages = _list_of(QueueMessage)
def __iter__(self):
return iter(self.queue_messages)
def __len__(self):
return len(self.queue_messages)
def __getitem__(self, index):
return self.queue_messages[index]
class QueueMessage(WindowsAzureData):
''' Queue message class. '''
def __init__(self):
self.message_id = u''
self.insertion_time = u''
self.expiration_time = u''
self.pop_receipt = u''
self.time_next_visible = u''
self.dequeue_count = u''
self.message_text = u''
class Entity(WindowsAzureData):
''' Entity class. The attributes of entity will be created dynamically. '''
pass
class EntityProperty(WindowsAzureData):
''' Entity property. contains type and value. '''
def __init__(self, type=None, value=None):
self.type = type
self.value = value
class Table(WindowsAzureData):
''' Only for intellicens and telling user the return type. '''
pass
def _parse_blob_enum_results_list(response):
respbody = response.body
return_obj = BlobEnumResults()
enum_results = ETree.fromstring(respbody)
for child in enum_results.findall('./Blobs/Blob'):
return_obj.blobs.append(_ETreeXmlToObject.fill_instance_element(child, Blob))
for child in enum_results.findall('./Blobs/BlobPrefix'):
return_obj.prefixes.append(
_ETreeXmlToObject.fill_instance_element(child, BlobPrefix))
for name, value in vars(return_obj).items():
if name == 'blobs' or name == 'prefixes':
continue
value = _ETreeXmlToObject.fill_data_member(enum_results, name, value)
if value is not None:
setattr(return_obj, name, value)
return return_obj
def _update_storage_header(request):
''' add additional headers for storage request. '''
if request.body:
assert isinstance(request.body, bytes)
# if it is PUT, POST, MERGE, DELETE, need to add content-length to header.
if request.method in ['PUT', 'POST', 'MERGE', 'DELETE']:
request.headers.append(('Content-Length', str(len(request.body))))
# append addtional headers base on the service
request.headers.append(('x-ms-version', X_MS_VERSION))
# append x-ms-meta name, values to header
for name, value in request.headers:
if 'x-ms-meta-name-values' in name and value:
for meta_name, meta_value in value.items():
request.headers.append(('x-ms-meta-' + meta_name, meta_value))
request.headers.remove((name, value))
break
return request
def _update_storage_blob_header(request, account_name, account_key):
''' add additional headers for storage blob request. '''
request = _update_storage_header(request)
current_time = datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')
request.headers.append(('x-ms-date', current_time))
request.headers.append(
('Content-Type', 'application/octet-stream Charset=UTF-8'))
request.headers.append(('Authorization',
_sign_storage_blob_request(request,
account_name,
account_key)))
return request.headers
def _update_storage_queue_header(request, account_name, account_key):
''' add additional headers for storage queue request. '''
return _update_storage_blob_header(request, account_name, account_key)
def _update_storage_table_header(request):
''' add additional headers for storage table request. '''
request = _update_storage_header(request)
for name, _ in request.headers:
if name.lower() == 'content-type':
break
else:
request.headers.append(('Content-Type', 'application/atom+xml'))
request.headers.append(('DataServiceVersion', '2.0;NetFx'))
request.headers.append(('MaxDataServiceVersion', '2.0;NetFx'))
current_time = datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')
request.headers.append(('x-ms-date', current_time))
request.headers.append(('Date', current_time))
return request.headers
def _sign_storage_blob_request(request, account_name, account_key):
'''
Returns the signed string for blob request which is used to set
Authorization header. This is also used to sign queue request.
'''
uri_path = request.path.split('?')[0]
# method to sign
string_to_sign = request.method + '\n'
# get headers to sign
headers_to_sign = [
'content-encoding', 'content-language', 'content-length',
'content-md5', 'content-type', 'date', 'if-modified-since',
'if-match', 'if-none-match', 'if-unmodified-since', 'range']
request_header_dict = dict((name.lower(), value)
for name, value in request.headers if value)
string_to_sign += '\n'.join(request_header_dict.get(x, '')
for x in headers_to_sign) + '\n'
# get x-ms header to sign
x_ms_headers = []
for name, value in request.headers:
if 'x-ms' in name:
x_ms_headers.append((name.lower(), value))
x_ms_headers.sort()
for name, value in x_ms_headers:
if value:
string_to_sign += ''.join([name, ':', value, '\n'])
# get account_name and uri path to sign
string_to_sign += '/' + account_name + uri_path
# get query string to sign if it is not table service
query_to_sign = request.query
query_to_sign.sort()
current_name = ''
for name, value in query_to_sign:
if value:
if current_name != name:
string_to_sign += '\n' + name + ':' + value
current_name = name
else:
string_to_sign += '\n' + ',' + value
# sign the request
auth_string = 'SharedKey ' + account_name + ':' + \
_sign_string(account_key, string_to_sign)
return auth_string
def _sign_storage_table_request(request, account_name, account_key):
uri_path = request.path.split('?')[0]
string_to_sign = request.method + '\n'
headers_to_sign = ['content-md5', 'content-type', 'date']
request_header_dict = dict((name.lower(), value)
for name, value in request.headers if value)
string_to_sign += '\n'.join(request_header_dict.get(x, '')
for x in headers_to_sign) + '\n'
# get account_name and uri path to sign
string_to_sign += ''.join(['/', account_name, uri_path])
for name, value in request.query:
if name == 'comp' and uri_path == '/':
string_to_sign += '?comp=' + value
break
# sign the request
auth_string = 'SharedKey ' + account_name + ':' + \
_sign_string(account_key, string_to_sign)
return auth_string
def _to_python_bool(value):
if value.lower() == 'true':
return True
return False
def _to_entity_int(data):
int_max = (2 << 30) - 1
if data > (int_max) or data < (int_max + 1) * (-1):
return 'Edm.Int64', str(data)
else:
return 'Edm.Int32', str(data)
def _to_entity_bool(value):
if value:
return 'Edm.Boolean', 'true'
return 'Edm.Boolean', 'false'
def _to_entity_datetime(value):
# Azure expects the date value passed in to be UTC.
# Azure will always return values as UTC.
# If a date is passed in without timezone info, it is assumed to be UTC.
if value.tzinfo:
value = value.astimezone(tzutc())
return 'Edm.DateTime', value.strftime('%Y-%m-%dT%H:%M:%SZ')
def _to_entity_float(value):
return 'Edm.Double', str(value)
def _to_entity_property(value):
if value.type == 'Edm.Binary':
return value.type, _encode_base64(value.value)
return value.type, str(value.value)
def _to_entity_none(value):
return None, None
def _to_entity_str(value):
return 'Edm.String', value
# Tables of conversions to and from entity types. We support specific
# datatypes, and beyond that the user can use an EntityProperty to get
# custom data type support.
def _from_entity_binary(value):
return EntityProperty('Edm.Binary', _decode_base64_to_bytes(value))
def _from_entity_int(value):
return int(value)
def _from_entity_datetime(value):
# Note that Azure always returns UTC datetime, and dateutil parser
# will set the tzinfo on the date it returns
return parser.parse(value)
_ENTITY_TO_PYTHON_CONVERSIONS = {
'Edm.Binary': _from_entity_binary,
'Edm.Int32': _from_entity_int,
'Edm.Int64': _from_entity_int,
'Edm.Double': float,
'Edm.Boolean': _to_python_bool,
'Edm.DateTime': _from_entity_datetime,
}
# Conversion from Python type to a function which returns a tuple of the
# type string and content string.
_PYTHON_TO_ENTITY_CONVERSIONS = {
int: _to_entity_int,
bool: _to_entity_bool,
datetime: _to_entity_datetime,
float: _to_entity_float,
EntityProperty: _to_entity_property,
str: _to_entity_str,
}
if sys.version_info < (3,):
_PYTHON_TO_ENTITY_CONVERSIONS.update({
long: _to_entity_int,
types.NoneType: _to_entity_none,
unicode: _to_entity_str,
})
def _convert_entity_to_xml(source):
''' Converts an entity object to xml to send.
The entity format is:
<entry xmlns:d="http://schemas.microsoft.com/ado/2007/08/dataservices" xmlns:m="http://schemas.microsoft.com/ado/2007/08/dataservices/metadata" xmlns="http://www.w3.org/2005/Atom">
<title />
<updated>2008-09-18T23:46:19.3857256Z</updated>
<author>
<name />
</author>
<id />
<content type="application/xml">
<m:properties>
<d:Address>Mountain View</d:Address>
<d:Age m:type="Edm.Int32">23</d:Age>
<d:AmountDue m:type="Edm.Double">200.23</d:AmountDue>
<d:BinaryData m:type="Edm.Binary" m:null="true" />
<d:CustomerCode m:type="Edm.Guid">c9da6455-213d-42c9-9a79-3e9149a57833</d:CustomerCode>
<d:CustomerSince m:type="Edm.DateTime">2008-07-10T00:00:00</d:CustomerSince>
<d:IsActive m:type="Edm.Boolean">true</d:IsActive>
<d:NumOfOrders m:type="Edm.Int64">255</d:NumOfOrders>
<d:PartitionKey>mypartitionkey</d:PartitionKey>
<d:RowKey>myrowkey1</d:RowKey>
<d:Timestamp m:type="Edm.DateTime">0001-01-01T00:00:00</d:Timestamp>
</m:properties>
</content>
</entry>
'''
# construct the entity body included in <m:properties> and </m:properties>
entity_body = '<m:properties xml:space="preserve">{properties}</m:properties>'
if isinstance(source, WindowsAzureData):
source = vars(source)
properties_str = ''
# set properties type for types we know if value has no type info.
# if value has type info, then set the type to value.type
for name, value in source.items():
mtype = ''
conv = _PYTHON_TO_ENTITY_CONVERSIONS.get(type(value))
if conv is None and sys.version_info >= (3,) and value is None:
conv = _to_entity_none
if conv is None:
raise WindowsAzureError(
_ERROR_CANNOT_SERIALIZE_VALUE_TO_ENTITY.format(
type(value).__name__))
mtype, value = conv(value)
# form the property node
properties_str += ''.join(['<d:', name])
if value is None:
properties_str += ' m:null="true" />'
else:
if mtype:
properties_str += ''.join([' m:type="', mtype, '"'])
properties_str += ''.join(['>',
xml_escape(value), '</d:', name, '>'])
if sys.version_info < (3,):
if isinstance(properties_str, unicode):
properties_str = properties_str.encode('utf-8')
# generate the entity_body
entity_body = entity_body.format(properties=properties_str)
xmlstr = _create_entry(entity_body)
return xmlstr
def _convert_table_to_xml(table_name):
'''
Create xml to send for a given table name. Since xml format for table is
the same as entity and the only difference is that table has only one
property 'TableName', so we just call _convert_entity_to_xml.
table_name:
the name of the table
'''
return _convert_entity_to_xml({'TableName': table_name})
def _convert_block_list_to_xml(block_id_list):
'''
Convert a block list to xml to send.
block_id_list:
a str list containing the block ids that are used in put_block_list.
Only get block from latest blocks.
'''
if block_id_list is None:
return ''
xml = '<?xml version="1.0" encoding="utf-8"?><BlockList>'
for value in block_id_list:
xml += '<Latest>{0}</Latest>'.format(_encode_base64(value))
return xml + '</BlockList>'
def _create_blob_result(response):
blob_properties = _parse_response_for_dict(response)
return BlobResult(response.body, blob_properties)
def _convert_block_etree_element_to_blob_block(block_element):
block_id = _decode_base64_to_text(block_element.findtext('./Name', ''))
block_size = int(block_element.findtext('./Size'))
return BlobBlock(block_id, block_size)
def _convert_response_to_block_list(response):
'''
Converts xml response to block list class.
'''
block_list = BlobBlockList()
list_element = ETree.fromstring(response.body)
for block_element in list_element.findall('./CommittedBlocks/Block'):
block = _convert_block_etree_element_to_blob_block(block_element)
block_list.committed_blocks.append(block)
for block_element in list_element.findall('./UncommittedBlocks/Block'):
block = _convert_block_etree_element_to_blob_block(block_element)
block_list.uncommitted_blocks.append(block)
return block_list
def _remove_prefix(name):
colon = name.find(':')
if colon != -1:
return name[colon + 1:]
return name
def _convert_response_to_entity(response):
if response is None:
return response
root = ETree.fromstring(response.body)
return _convert_etree_element_to_entity(root)
def _convert_etree_element_to_entity(entry_element):
''' Convert xml response to entity.
The format of entity:
<entry xmlns:d="http://schemas.microsoft.com/ado/2007/08/dataservices" xmlns:m="http://schemas.microsoft.com/ado/2007/08/dataservices/metadata" xmlns="http://www.w3.org/2005/Atom">
<title />
<updated>2008-09-18T23:46:19.3857256Z</updated>
<author>
<name />
</author>
<id />
<content type="application/xml">
<m:properties>
<d:Address>Mountain View</d:Address>
<d:Age m:type="Edm.Int32">23</d:Age>
<d:AmountDue m:type="Edm.Double">200.23</d:AmountDue>
<d:BinaryData m:type="Edm.Binary" m:null="true" />
<d:CustomerCode m:type="Edm.Guid">c9da6455-213d-42c9-9a79-3e9149a57833</d:CustomerCode>
<d:CustomerSince m:type="Edm.DateTime">2008-07-10T00:00:00</d:CustomerSince>
<d:IsActive m:type="Edm.Boolean">true</d:IsActive>
<d:NumOfOrders m:type="Edm.Int64">255</d:NumOfOrders>
<d:PartitionKey>mypartitionkey</d:PartitionKey>
<d:RowKey>myrowkey1</d:RowKey>
<d:Timestamp m:type="Edm.DateTime">0001-01-01T00:00:00</d:Timestamp>
</m:properties>
</content>
</entry>
'''
entity = Entity()
properties = entry_element.findall('./atom:content/m:properties', _etree_entity_feed_namespaces)
for prop in properties:
for p in prop:
name = _get_etree_tag_name_without_ns(p.tag)
value = p.text or ''
mtype = p.attrib.get(_make_etree_ns_attr_name(_etree_entity_feed_namespaces['m'], 'type'), None)
isnull = p.attrib.get(_make_etree_ns_attr_name(_etree_entity_feed_namespaces['m'], 'null'), None)
# if not isnull and no type info, then it is a string and we just
# need the str type to hold the property.
if not isnull and not mtype:
_set_entity_attr(entity, name, value)
elif isnull == 'true':
if mtype:
property = EntityProperty(mtype, None)
else:
property = EntityProperty('Edm.String', None)
else: # need an object to hold the property
conv = _ENTITY_TO_PYTHON_CONVERSIONS.get(mtype)
if conv is not None:
property = conv(value)
else:
property = EntityProperty(mtype, value)
_set_entity_attr(entity, name, property)
# extract id, updated and name value from feed entry and set them of
# rule.
for name, value in _ETreeXmlToObject.get_entry_properties_from_element(
entry_element, True).items():
if name in ['etag']:
_set_entity_attr(entity, name, value)
return entity
def _set_entity_attr(entity, name, value):
try:
setattr(entity, name, value)
except UnicodeEncodeError:
# Python 2 doesn't support unicode attribute names, so we'll
# add them and access them directly through the dictionary
entity.__dict__[name] = value
def _convert_etree_element_to_table(entry_element):
''' Converts the xml element to table class.
'''
table = Table()
name_element = entry_element.find('./atom:content/m:properties/d:TableName', _etree_entity_feed_namespaces)
if name_element is not None:
table.name = name_element.text
for name_element, value in _ETreeXmlToObject.get_entry_properties_from_element(
entry_element, False).items():
setattr(table, name_element, value)
return table
class _BlobChunkDownloader(object):
def __init__(self, blob_service, container_name, blob_name, blob_size,
chunk_size, stream, parallel, max_retries, retry_wait,
progress_callback):
self.blob_service = blob_service
self.container_name = container_name
self.blob_name = blob_name
self.blob_size = blob_size
self.chunk_size = chunk_size
self.stream = stream
self.stream_start = stream.tell()
self.stream_lock = threading.Lock() if parallel else None
self.progress_callback = progress_callback
self.progress_total = 0
self.progress_lock = threading.Lock() if parallel else None
self.max_retries = max_retries
self.retry_wait = retry_wait
def get_chunk_offsets(self):
index = 0
while index < self.blob_size:
yield index
index += self.chunk_size
def process_chunk(self, chunk_offset):
chunk_data = self._download_chunk_with_retries(chunk_offset)
length = len(chunk_data)
if length > 0:
self._write_to_stream(chunk_data, chunk_offset)
self._update_progress(length)
def _update_progress(self, length):
if self.progress_callback is not None:
if self.progress_lock is not None:
with self.progress_lock:
self.progress_total += length
total = self.progress_total
else:
self.progress_total += length
total = self.progress_total
self.progress_callback(total, self.blob_size)
def _write_to_stream(self, chunk_data, chunk_offset):
if self.stream_lock is not None:
with self.stream_lock:
self.stream.seek(self.stream_start + chunk_offset)
self.stream.write(chunk_data)
else:
self.stream.seek(self.stream_start + chunk_offset)
self.stream.write(chunk_data)
def _download_chunk_with_retries(self, chunk_offset):
range_id = 'bytes={0}-{1}'.format(chunk_offset, chunk_offset + self.chunk_size - 1)
retries = self.max_retries
while True:
try:
return self.blob_service.get_blob(
self.container_name,
self.blob_name,
x_ms_range=range_id
)
except Exception:
if retries > 0:
retries -= 1
sleep(self.retry_wait)
else:
raise
class _BlobChunkUploader(object):
def __init__(self, blob_service, container_name, blob_name, blob_size,
chunk_size, stream, parallel, max_retries, retry_wait,
progress_callback, x_ms_lease_id):
self.blob_service = blob_service
self.container_name = container_name
self.blob_name = blob_name
self.blob_size = blob_size
self.chunk_size = chunk_size
self.stream = stream
self.stream_start = stream.tell()
self.stream_lock = threading.Lock() if parallel else None
self.progress_callback = progress_callback
self.progress_total = 0
self.progress_lock = threading.Lock() if parallel else None
self.max_retries = max_retries
self.retry_wait = retry_wait
self.x_ms_lease_id = x_ms_lease_id
def get_chunk_offsets(self):
index = 0
if self.blob_size is None:
# we don't know the size of the stream, so we have no
# choice but to seek
while True:
data = self._read_from_stream(index, 1)
if not data:
break
yield index
index += self.chunk_size
else:
while index < self.blob_size:
yield index
index += self.chunk_size
def process_chunk(self, chunk_offset):
size = self.chunk_size
if self.blob_size is not None:
size = min(size, self.blob_size - chunk_offset)
chunk_data = self._read_from_stream(chunk_offset, size)
return self._upload_chunk_with_retries(chunk_offset, chunk_data)
def _read_from_stream(self, offset, count):
if self.stream_lock is not None:
with self.stream_lock:
self.stream.seek(self.stream_start + offset)
data = self.stream.read(count)
else:
self.stream.seek(self.stream_start + offset)
data = self.stream.read(count)
return data
def _update_progress(self, length):
if self.progress_callback is not None:
if self.progress_lock is not None:
with self.progress_lock:
self.progress_total += length
total = self.progress_total
else:
self.progress_total += length
total = self.progress_total
self.progress_callback(total, self.blob_size)
def _upload_chunk_with_retries(self, chunk_offset, chunk_data):
retries = self.max_retries
while True:
try:
range_id = self._upload_chunk(chunk_offset, chunk_data)
self._update_progress(len(chunk_data))
return range_id
except Exception:
if retries > 0:
retries -= 1
sleep(self.retry_wait)
else:
raise
class _BlockBlobChunkUploader(_BlobChunkUploader):
def _upload_chunk(self, chunk_offset, chunk_data):
range_id = url_quote(_encode_base64('{0:032d}'.format(chunk_offset)))
self.blob_service.put_block(
self.container_name,
self.blob_name,
chunk_data,
range_id,
x_ms_lease_id=self.x_ms_lease_id
)
return range_id
class _PageBlobChunkUploader(_BlobChunkUploader):
def _upload_chunk(self, chunk_offset, chunk_data):
range_id = 'bytes={0}-{1}'.format(chunk_offset, chunk_offset + len(chunk_data) - 1)
self.blob_service.put_page(
self.container_name,
self.blob_name,
chunk_data,
range_id,
'update',
x_ms_lease_id=self.x_ms_lease_id
)
return range_id
def _download_blob_chunks(blob_service, container_name, blob_name,
blob_size, block_size, stream, max_connections,
max_retries, retry_wait, progress_callback):
downloader = _BlobChunkDownloader(
blob_service,
container_name,
blob_name,
blob_size,
block_size,
stream,
max_connections > 1,
max_retries,
retry_wait,
progress_callback,
)
if progress_callback is not None:
progress_callback(0, blob_size)
if max_connections > 1:
import concurrent.futures
executor = concurrent.futures.ThreadPoolExecutor(max_connections)
result = list(executor.map(downloader.process_chunk, downloader.get_chunk_offsets()))
else:
for range_start in downloader.get_chunk_offsets():
downloader.process_chunk(range_start)
def _upload_blob_chunks(blob_service, container_name, blob_name,
blob_size, block_size, stream, max_connections,
max_retries, retry_wait, progress_callback,
x_ms_lease_id, uploader_class):
uploader = uploader_class(
blob_service,
container_name,
blob_name,
blob_size,
block_size,
stream,
max_connections > 1,
max_retries,
retry_wait,
progress_callback,
x_ms_lease_id,
)
if progress_callback is not None:
progress_callback(0, blob_size)
if max_connections > 1:
import concurrent.futures
executor = concurrent.futures.ThreadPoolExecutor(max_connections)
range_ids = list(executor.map(uploader.process_chunk, uploader.get_chunk_offsets()))
else:
range_ids = [uploader.process_chunk(start) for start in uploader.get_chunk_offsets()]
return range_ids
def _storage_error_handler(http_error):
''' Simple error handler for storage service. '''
return _general_error_handler(http_error)
# make these available just from storage.
from azure.storage.blobservice import BlobService
from azure.storage.queueservice import QueueService
from azure.storage.tableservice import TableService
from azure.storage.cloudstorageaccount import CloudStorageAccount
from azure.storage.sharedaccesssignature import (
SharedAccessSignature,
SharedAccessPolicy,
Permission,
WebResource,
)
| |
"""
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import re
import tinctest
from rpm_util import RPMUtil
from tinctest.lib import local_path, run_shell_command
from hadoop_util import HadoopUtil
class PHDRpmUtil(HadoopUtil):
"""Utility for installing PHD Single node clusters using RPMs"""
def __init__(
self, hadoop_artifact_url, hadoop_install_dir, hadoop_data_dir, template_conf_dir, hostname = 'localhost',
secure_hadoop = False
):
HadoopUtil.__init__(self, hadoop_artifact_url, hadoop_install_dir, hadoop_data_dir, hostname)
self.rpmutil = RPMUtil()
self.hostname = hostname
self.hadoop_artifact_url = hadoop_artifact_url
self.hadoop_install_dir = hadoop_install_dir
self.hadoop_binary_loc = ''
self.hadoop_data_dir = hadoop_data_dir
self.template_conf_dir = template_conf_dir
self.secure_hadoop = secure_hadoop
# Constants
# under the hadoop template configuration directory
# both the below directories should be present
self.SECURE_DIR_NAME = "conf.secure" # secure configuration files location
self.NON_SECURE_DIR_NAME = "conf.pseudo" # non-secure configuration files location
self.DEPENDENCY_PKGS = [
"fuse-", # eg. fuse-2.8.3-4.el6.x86_64
"fuse-libs", # eg. fuse-libs-2.8.3-4.el6.x86_6
"nc-" # eg. 1.84-22.el6.x86_64"
]
self.HADOOP_UTILITY_RPMS = "utility/rpm/"
self.ZOOKEEPER_RPMS = "zookeeper/rpm/"
self.HADOOP_RPMS = "hadoop/rpm/"
self.HADOOP_ENVS = {
"HADOOP_HOME" : "/usr/lib/gphd/hadoop/",
"HADOOP_COMMON_HOME" : "/usr/lib/gphd/hadoop/",
"HADOOP_HDFS_HOME" : "/usr/lib/gphd/hadoop-hdfs/",
"HADOOP_MAPRED_HOME" : "/usr/lib/gphd/hadoop-mapreduce/",
"YARN_HOME" : "/usr/lib/gphd/hadoop-yarn/",
"HADOOP_TMP_DIR" : "%s/hadoop-hdfs/cache/" %self.hadoop_data_dir,
"MAPRED_TMP_DIR" : "%s/hadoop-mapreduce/cache/" %self.hadoop_data_dir,
"YARN_TMP_DIR" : "%s/hadoop-yarn/cache/" %self.hadoop_data_dir,
"HADOOP_CONF_DIR" : "/etc/hadoop/conf",
"HADOOP_LOG_DIR" : "%s/hadoop-logs/hadoop-hdfs" %self.hadoop_data_dir,
"MAPRED_LOG_DIR" : "%s/hadoop-logs/hadoop-mapreduce" %self.hadoop_data_dir,
"YARN_LOG_DIR" : "%s/hadoop-logs/hadoop-yarn" %self.hadoop_data_dir
}
self.PKGS_TO_REMOVE = "^hadoop-*|^bigtop-*|^zookeeper-*|^parquet-*"
def _get_hadoop_conf_dir(self):
"""
Gets the hadoop configuration directory location
"""
cmd_str = "find /etc/gphd/ -name conf | egrep -v \"zookeeper|httpfs\""
res = {}
if run_shell_command(cmd_str, "Find HADOOP_CONF_DIR", res):
return res['stdout'].split('\n')[0]
def _remove_installed_pkgs(self):
self.rpmutil.erase_all_packages(self.PKGS_TO_REMOVE)
def _install_dependency_pkgs(self):
for pkg in self.DEPENDENCY_PKGS:
if not self.rpmutil.is_pkg_installed("^" + pkg):
self.rpmutil.install_package_using_yum(pkg, is_regex_pkg_name = True)
def cleanup(self):
"""
Clean-up process to:
1. kill all the hadoop daemon process from previous runs if any
2. Remove the contents from the hadoop installation & configuration locations
"""
self.stop_hadoop()
cmd_str = "ps aux | awk '/\-Dhadoop/{print $2}' | xargs sudo kill -9"
run_shell_command(cmd_str, "Kill zombie hadoop daemons")
cmd_str = "sudo rm -rf "
for key,value in self.HADOOP_ENVS.iteritems():
cmd_str = cmd_str + value +"* "
cmd_str = cmd_str + "/etc/gphd"
run_shell_command(cmd_str,"Clean up HDFS files")
self._remove_installed_pkgs()
def _create_symlinks(self, lib_dir, symlink):
res = {}
cmd_str = "sudo find %s -name \"%s*\"" % (lib_dir,symlink)
run_shell_command(cmd_str, "Check for %s symlink" %symlink, res)
result = res['stdout']
if result:
result = result.splitlines()
if len(result) == 1:
cmd_str = "cd %s; sudo ln -s %s %s" %(lib_dir, result[0], symlink)
run_shell_command(cmd_str, "Create %s symlink" %symlink)
def install_binary(self):
"""
Installs RPM binaries of:
1. utility eg. bigtop utils
2. zookeeper
3. hadoop
"""
self._install_dependency_pkgs()
# install utility rpms
hadoop_utility_rpms_loc = os.path.join(self.hadoop_binary_loc, self.HADOOP_UTILITY_RPMS)
self.rpmutil.install_rpms_from(hadoop_utility_rpms_loc)
# install zookeeper rpms
zookeeper_rpms_loc = os.path.join(self.hadoop_binary_loc, self.ZOOKEEPER_RPMS)
self.rpmutil.install_rpms_from(zookeeper_rpms_loc)
# install hadoop rpms
hadoop_rpms_loc = os.path.join(self.hadoop_binary_loc, self.HADOOP_RPMS)
self.rpmutil.install_rpms_from(hadoop_rpms_loc)
# create hadoop sym links inside /var/lib/gphd
lib_dir = "/var/lib/gphd"
self._create_symlinks(lib_dir, "hadoop-hdfs")
self._create_symlinks(lib_dir, "hadoop-yarn")
self._create_symlinks(lib_dir, "hadoop-mapreduce")
self._create_symlinks(lib_dir, "zookeeper")
def install_hadoop_configurations(self):
"""
Based on type of installation secure or non-secure,
installs the updated template configuration files
and makes required changes to the env files.
"""
##TODO: Create separate directories for secure & non-secure
## in the hadoop conf dir and copy the update configs in respective directories
self.HADOOP_ENVS['HADOOP_CONF_DIR'] = self._get_hadoop_conf_dir()
# check the type of hadoop installation - secure or non secure
if self.secure_hadoop:
# SECURE_DIR_NAME is expected to be present under template configuration directory
secure_conf = os.path.join(self.template_conf_dir, self.SECURE_DIR_NAME)
super(PHDRpmUtil,self).install_hadoop_configurations(secure_conf, self.HADOOP_ENVS['HADOOP_CONF_DIR'])
# update env files in /etc/default/hadoop*
if self.hadoop_data_dir.endswith('/'):
self.hadoop_data_dir = self.hadoop_data_dir[:-1]
cmd_str = "for env_file in `ls /etc/default/hadoop*`;" \
"do " \
"sudo sed -r -i 's:\/var\/log(\/gphd)?:\%s\/hadoop-logs:g' ${env_file};" \
"done" %self.hadoop_data_dir
run_shell_command(cmd_str, "Update env files in /etc/default/hadoop*")
# update hadoop-env.sh file
hadoop_env_file = os.path.join( self.HADOOP_ENVS['HADOOP_CONF_DIR'], "hadoop-env.sh" )
if not os.path.exists(hadoop_env_file):
tinctest.logger.info("hadoop-env.sh not found..creating a new one!")
run_shell_command("sudo touch %s" %hadoop_env_file, "Create hadoop-env.sh file")
# give write permissions on the file
self.give_others_write_perm(hadoop_env_file)
text = "\n### Added env variables\n" \
"export JAVA_HOME=%s\n" \
"export HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true " \
"-Djava.library.path=$HADOOP_HOME/lib/native/\"\n" %self.get_java_home()
self.append_text_to_file(hadoop_env_file,text)
# revert back to old permissions
self.remove_others_write_perm(hadoop_env_file)
# update env files hadoop-hdfs-datanode & hadoop
hdfs_datanode_env = "/etc/default/hadoop-hdfs-datanode"
hdfs_hadoop_env = "/etc/default/hadoop"
self.give_others_write_perm(hdfs_datanode_env)
text = "\n### Secure env variables\n" \
"export HADOOP_SECURE_DN_USER=hdfs\n" \
"export HADOOP_SECURE_DN_LOG_DIR=${HADOOP_LOG_DIR}/hdfs\n" \
"export HADOOP_PID_DIR=/var/run/gphd/hadoop-hdfs/\n" \
"export HADOOP_SECURE_DN_PID_DIR=${HADOOP_PID_DIR}\n"
self.append_text_to_file(hdfs_datanode_env, text)
self.remove_others_write_perm(hdfs_datanode_env)
self.give_others_write_perm(hdfs_hadoop_env)
self.append_text_to_file(hdfs_hadoop_env, "export JSVC_HOME=/usr/libexec/bigtop-utils\n")
self.remove_others_write_perm(hdfs_hadoop_env)
# change the permissions of container-executor
container_bin_path = os.path.join(self.HADOOP_ENVS['YARN_HOME'],'bin/container-executor')
cmd_str = "sudo chown root:yarn %s" %container_bin_path
run_shell_command(cmd_str)
cmd_str = "sudo chmod 050 %s" %container_bin_path
run_shell_command(cmd_str)
cmd_str = "sudo chmod u+s %s" %container_bin_path
run_shell_command(cmd_str)
cmd_str = "sudo chmod g+s %s" %container_bin_path
run_shell_command(cmd_str)
else:
# NON_SECURE_DIR_NAME is expected to be present under template configuration directory
non_secure_conf = os.path.join(self.template_conf_dir, self.NON_SECURE_DIR_NAME)
super(PHDRpmUtil, self).install_hadoop_configurations(non_secure_conf, self.HADOOP_ENVS['HADOOP_CONF_DIR'])
def start_hdfs(self):
# format namenode
cmd_str = "sudo -u hdfs hdfs --config %s namenode -format" %self.HADOOP_ENVS['HADOOP_CONF_DIR']
namenode_formatted = run_shell_command(cmd_str)
if not namenode_formatted:
raise Exception("Exception in namnode formatting")
# start namenode
cmd_str = "sudo /etc/init.d/hadoop-hdfs-namenode start"
namenode_started = run_shell_command(cmd_str)
if not namenode_started:
raise Exception("Namenode not started")
cmd_str = "sudo /etc/init.d/hadoop-hdfs-datanode start"
namenode_started = run_shell_command(cmd_str)
if not namenode_started:
raise Exception("Namenode not started")
cmd_str = "sudo /etc/init.d/hadoop-hdfs-secondarynamenode start"
namenode_started = run_shell_command(cmd_str)
if not namenode_started:
raise Exception("Secondary namenode not started")
def set_hdfs_permissions(self):
if self.secure_hadoop:
hdfs_cmd = "sudo hdfs dfs"
else:
hdfs_cmd = "sudo -u hdfs hdfs dfs"
# set hdfs permissions
cmd_str = "%s -chmod -R 777 /" %hdfs_cmd
run_shell_command(cmd_str)
cmd_str = "%s -mkdir /tmp" %hdfs_cmd
run_shell_command(cmd_str)
cmd_str = "%s -chmod 1777 /tmp" %hdfs_cmd
run_shell_command(cmd_str)
# cmd_str = "%s -mkdir -p /var/log/gphd/hadoop-yarn" %hdfs_cmd
# run_shell_command(cmd_str)
cmd_str = "%s -mkdir /user" %hdfs_cmd
run_shell_command(cmd_str)
cmd_str = "%s -chmod 777 /user" %hdfs_cmd
run_shell_command(cmd_str)
cmd_str = "%s -mkdir /user/history" %hdfs_cmd
run_shell_command(cmd_str)
cmd_str = "%s -chown mapred:hadoop /user/history" %hdfs_cmd
run_shell_command(cmd_str)
cmd_str = "%s -chmod 1777 -R /user/history" %hdfs_cmd
run_shell_command(cmd_str)
cmd_str = "%s -ls -R /" %hdfs_cmd
run_shell_command(cmd_str)
def put_file_in_hdfs(self, input_path, hdfs_path):
if hdfs_path.rfind('/') > 0:
hdfs_dir = hdfs_path[:hdfs_path.rfind('/')]
cmd_str = "hdfs dfs -mkdir -p %s" %hdfs_dir
run_shell_command(cmd_str, "Creating parent HDFS dir for path %s" %input_path)
cmd_str = "hdfs dfs -put %s %s" %(input_path, hdfs_path)
run_shell_command(cmd_str, "Copy to HDFS : file %s" %input_path)
def remove_file_from_hdfs(self, hdfs_path):
cmd_str = "hdfs dfs -rm -r %s" %hdfs_path
run_shell_command(cmd_str, "Remove %s from HDFS" %hdfs_path)
def start_yarn(self):
# start yarn daemons
# start resource manager
self.set_hdfs_permissions()
cmd_str = "sudo /etc/init.d/hadoop-yarn-resourcemanager start"
namenode_started = run_shell_command(cmd_str)
if not namenode_started:
raise Exception("Resource manager not started")
# start node manager
cmd_str = "sudo /etc/init.d/hadoop-yarn-nodemanager start"
namenode_started = run_shell_command(cmd_str)
if not namenode_started:
raise Exception("Node manager not started")
# start history server
cmd_str = "sudo /etc/init.d/hadoop-mapreduce-historyserver start"
namenode_started = run_shell_command(cmd_str)
if not namenode_started:
raise Exception("History server not started")
def start_hadoop(self):
"""
Starts the PHD cluster and checks the JPS status
"""
self.start_hdfs()
self.start_yarn()
res = {}
# run jps command & check for hadoop daemons
cmd_str = "sudo jps"
run_shell_command(cmd_str, "Check Hadoop Daemons", res)
result = res['stdout']
tinctest.logger.info("\n**** Following Hadoop Daemons started **** \n%s" %result)
tinctest.logger.info("*** Hadoop Started Successfully!!")
def stop_hadoop(self):
"""
Stops the PHD cluster
"""
run_shell_command("sudo /etc/init.d/hadoop-mapreduce-historyserver stop", "Stop history-server")
run_shell_command("sudo /etc/init.d/hadoop-yarn-nodemanager stop", "Stop Node manager")
run_shell_command("sudo /etc/init.d/hadoop-yarn-resourcemanager stop", "Stop resourcemanager")
run_shell_command("sudo /etc/init.d/hadoop-hdfs-secondarynamenode stop", "Stop secondarynamenode")
run_shell_command("sudo /etc/init.d/hadoop-hdfs-datanode stop", "Stop datanode")
run_shell_command("sudo /etc/init.d/hadoop-hdfs-namenode stop", "Stop namenode")
def get_hadoop_env(self):
"""
Returns a dictionary of hadoop environment variables like:
1. HADOOP_HOME
2. HADOOP_CONF_DIR
3. HADOOP_COMMON_HOME
4. HADOOP_HDFS_HOME
5. YARN_HOME
6. HADOOP_MAPRED_HOME
"""
return self.HADOOP_ENVS
def init_cluster(self):
"""
Init point for starting up the PHD cluster
"""
self.download_binary_and_untar()
self.cleanup()
self.install_binary()
self.install_hadoop_configurations()
self.start_hadoop()
if __name__ == '__main__':
hdfs_util = PHDRpmUtil("http://build-prod.sanmateo.greenplum.com/releases/pivotal-hd/1.1.1/PHD-1.1.1.0-82.tar.gz","/data/gpadmin/hadoop_test","../configs/rpm/","localhost")
hdfs_util.init_cluster()
| |
# Copyright 2010 OpenStack Foundation
# Copyright 2012 University Of Minho
# Copyright 2014-2015 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from oslo_utils import encodeutils
from nova import context
from nova import exception
from nova import test
from nova.tests.unit.virt.libvirt import fakelibvirt
from nova import utils
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import guest as libvirt_guest
from nova.virt.libvirt import host
host.libvirt = fakelibvirt
libvirt_guest.libvirt = fakelibvirt
CONF = cfg.CONF
class GuestTestCase(test.NoDBTestCase):
def setUp(self):
super(GuestTestCase, self).setUp()
self.useFixture(fakelibvirt.FakeLibvirtFixture())
self.host = host.Host("qemu:///system")
self.context = context.get_admin_context()
self.domain = mock.Mock(spec=fakelibvirt.virDomain)
self.guest = libvirt_guest.Guest(self.domain)
def test_repr(self):
self.domain.ID.return_value = 99
self.domain.UUIDString.return_value = "UUID"
self.domain.name.return_value = "foo"
self.assertEqual("<Guest 99 foo UUID>", repr(self.guest))
@mock.patch.object(fakelibvirt.Connection, 'defineXML')
def test_create(self, mock_define):
libvirt_guest.Guest.create("xml", self.host)
mock_define.assert_called_once_with("xml")
@mock.patch.object(fakelibvirt.Connection, 'defineXML')
def test_create_exception(self, mock_define):
mock_define.side_effect = test.TestingException
self.assertRaises(test.TestingException,
libvirt_guest.Guest.create,
"foo", self.host)
def test_launch(self):
self.guest.launch()
self.domain.createWithFlags.assert_called_once_with(0)
def test_launch_and_pause(self):
self.guest.launch(pause=True)
self.domain.createWithFlags.assert_called_once_with(
fakelibvirt.VIR_DOMAIN_START_PAUSED)
@mock.patch.object(encodeutils, 'safe_decode')
def test_launch_exception(self, mock_safe_decode):
self.domain.createWithFlags.side_effect = test.TestingException
mock_safe_decode.return_value = "</xml>"
self.assertRaises(test.TestingException, self.guest.launch)
self.assertEqual(1, mock_safe_decode.called)
@mock.patch.object(utils, 'execute')
@mock.patch.object(libvirt_guest.Guest, 'get_interfaces')
def test_enable_hairpin(self, mock_get_interfaces, mock_execute):
mock_get_interfaces.return_value = ["vnet0", "vnet1"]
self.guest.enable_hairpin()
mock_execute.assert_has_calls([
mock.call(
'tee', '/sys/class/net/vnet0/brport/hairpin_mode',
run_as_root=True, process_input='1', check_exit_code=[0, 1]),
mock.call(
'tee', '/sys/class/net/vnet1/brport/hairpin_mode',
run_as_root=True, process_input='1', check_exit_code=[0, 1])])
@mock.patch.object(encodeutils, 'safe_decode')
@mock.patch.object(utils, 'execute')
@mock.patch.object(libvirt_guest.Guest, 'get_interfaces')
def test_enable_hairpin_exception(self, mock_get_interfaces,
mock_execute, mock_safe_decode):
mock_get_interfaces.return_value = ["foo"]
mock_execute.side_effect = test.TestingException('oops')
self.assertRaises(test.TestingException, self.guest.enable_hairpin)
self.assertEqual(1, mock_safe_decode.called)
def test_get_interfaces(self):
self.domain.XMLDesc.return_value = """<domain>
<devices>
<interface type="network">
<target dev="vnet0"/>
</interface>
<interface type="network">
<target dev="vnet1"/>
</interface>
</devices>
</domain>"""
self.assertEqual(["vnet0", "vnet1"], self.guest.get_interfaces())
def test_get_interfaces_exception(self):
self.domain.XMLDesc.return_value = "<bad xml>"
self.assertEqual([], self.guest.get_interfaces())
def test_poweroff(self):
self.guest.poweroff()
self.domain.destroy.assert_called_once_with()
def test_resume(self):
self.guest.resume()
self.domain.resume.assert_called_once_with()
def test_get_vcpus_info(self):
self.domain.vcpus.return_value = ([(0, 1, 10290000000L, 2)],
[(True, True)])
vcpus = list(self.guest.get_vcpus_info())
self.assertEqual(0, vcpus[0].id)
self.assertEqual(2, vcpus[0].cpu)
self.assertEqual(1, vcpus[0].state)
self.assertEqual(10290000000L, vcpus[0].time)
def test_delete_configuration(self):
self.guest.delete_configuration()
self.domain.undefineFlags.assert_called_once_with(
fakelibvirt.VIR_DOMAIN_UNDEFINE_MANAGED_SAVE)
def test_delete_configuration_exception(self):
self.domain.undefineFlags.side_effect = fakelibvirt.libvirtError(
'oops')
self.domain.ID.return_value = 1
self.guest.delete_configuration()
self.domain.undefine.assert_called_once_with()
def test_attach_device(self):
conf = mock.Mock(spec=vconfig.LibvirtConfigGuestDevice)
conf.to_xml.return_value = "</xml>"
self.guest.attach_device(conf)
self.domain.attachDeviceFlags.assert_called_once_with(
"</xml>", flags=0)
def test_attach_device_persistent(self):
conf = mock.Mock(spec=vconfig.LibvirtConfigGuestDevice)
conf.to_xml.return_value = "</xml>"
self.guest.attach_device(conf, persistent=True)
self.domain.attachDeviceFlags.assert_called_once_with(
"</xml>", flags=fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG)
def test_attach_device_live(self):
conf = mock.Mock(spec=vconfig.LibvirtConfigGuestDevice)
conf.to_xml.return_value = "</xml>"
self.guest.attach_device(conf, live=True)
self.domain.attachDeviceFlags.assert_called_once_with(
"</xml>", flags=fakelibvirt.VIR_DOMAIN_AFFECT_LIVE)
def test_attach_device_persistent_live(self):
conf = mock.Mock(spec=vconfig.LibvirtConfigGuestDevice)
conf.to_xml.return_value = "</xml>"
self.guest.attach_device(conf, persistent=True, live=True)
self.domain.attachDeviceFlags.assert_called_once_with(
"</xml>", flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
fakelibvirt.VIR_DOMAIN_AFFECT_LIVE))
def test_detach_device(self):
conf = mock.Mock(spec=vconfig.LibvirtConfigGuestDevice)
conf.to_xml.return_value = "</xml>"
self.guest.detach_device(conf)
self.domain.detachDeviceFlags.assert_called_once_with(
"</xml>", flags=0)
def test_detach_device_persistent(self):
conf = mock.Mock(spec=vconfig.LibvirtConfigGuestDevice)
conf.to_xml.return_value = "</xml>"
self.guest.detach_device(conf, persistent=True)
self.domain.detachDeviceFlags.assert_called_once_with(
"</xml>", flags=fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG)
def test_detach_device_live(self):
conf = mock.Mock(spec=vconfig.LibvirtConfigGuestDevice)
conf.to_xml.return_value = "</xml>"
self.guest.detach_device(conf, live=True)
self.domain.detachDeviceFlags.assert_called_once_with(
"</xml>", flags=fakelibvirt.VIR_DOMAIN_AFFECT_LIVE)
def test_detach_device_persistent_live(self):
conf = mock.Mock(spec=vconfig.LibvirtConfigGuestDevice)
conf.to_xml.return_value = "</xml>"
self.guest.detach_device(conf, persistent=True, live=True)
self.domain.detachDeviceFlags.assert_called_once_with(
"</xml>", flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
fakelibvirt.VIR_DOMAIN_AFFECT_LIVE))
def test_get_xml_desc(self):
self.guest.get_xml_desc()
self.domain.XMLDesc.assert_called_once_with(flags=0)
def test_get_xml_desc_dump_inactive(self):
self.guest.get_xml_desc(dump_inactive=True)
self.domain.XMLDesc.assert_called_once_with(
flags=fakelibvirt.VIR_DOMAIN_XML_INACTIVE)
def test_get_xml_desc_dump_sensitive(self):
self.guest.get_xml_desc(dump_sensitive=True)
self.domain.XMLDesc.assert_called_once_with(
flags=fakelibvirt.VIR_DOMAIN_XML_SECURE)
def test_get_xml_desc_dump_inactive_dump_sensitive(self):
self.guest.get_xml_desc(dump_inactive=True, dump_sensitive=True)
self.domain.XMLDesc.assert_called_once_with(
flags=(fakelibvirt.VIR_DOMAIN_XML_INACTIVE |
fakelibvirt.VIR_DOMAIN_XML_SECURE))
def test_get_xml_desc_dump_migratable(self):
self.guest.get_xml_desc(dump_migratable=True)
self.domain.XMLDesc.assert_called_once_with(
flags=fakelibvirt.VIR_DOMAIN_XML_MIGRATABLE)
def test_has_persistent_configuration(self):
self.assertTrue(
self.guest.has_persistent_configuration())
self.domain.isPersistent.assert_called_once_with()
def test_save_memory_state(self):
self.guest.save_memory_state()
self.domain.managedSave.assert_called_once_with(0)
def test_get_block_device(self):
disk = 'vda'
gblock = self.guest.get_block_device(disk)
self.assertEqual(disk, gblock._disk)
self.assertEqual(self.guest, gblock._guest)
class GuestBlockTestCase(test.NoDBTestCase):
def setUp(self):
super(GuestBlockTestCase, self).setUp()
self.useFixture(fakelibvirt.FakeLibvirtFixture())
self.host = host.Host("qemu:///system")
self.context = context.get_admin_context()
self.domain = mock.Mock(spec=fakelibvirt.virDomain)
self.guest = libvirt_guest.Guest(self.domain)
self.gblock = self.guest.get_block_device('vda')
def test_abort_job(self):
self.gblock.abort_job()
self.domain.blockJobAbort.assert_called_once_with('vda', flags=0)
def test_abort_job_async(self):
self.gblock.abort_job(async=True)
self.domain.blockJobAbort.assert_called_once_with(
'vda', flags=fakelibvirt.VIR_DOMAIN_BLOCK_JOB_ABORT_ASYNC)
def test_abort_job_pivot(self):
self.gblock.abort_job(pivot=True)
self.domain.blockJobAbort.assert_called_once_with(
'vda', flags=fakelibvirt.VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT)
def test_get_job_info(self):
self.domain.blockJobInfo.return_value = {
"type": 1,
"bandwidth": 18,
"cur": 66,
"end": 100}
info = self.gblock.get_job_info()
self.assertEqual(1, info.job)
self.assertEqual(18, info.bandwidth)
self.assertEqual(66, info.cur)
self.assertEqual(100, info.end)
self.domain.blockJobInfo.assert_called_once_with('vda', flags=0)
def test_resize(self):
self.gblock.resize(10)
self.domain.blockResize.assert_called_once_with('vda', 10)
def test_rebase(self):
self.gblock.rebase("foo")
self.domain.blockRebase.assert_called_once_with(
'vda', "foo", 0, flags=0)
def test_rebase_shallow(self):
self.gblock.rebase("foo", shallow=True)
self.domain.blockRebase.assert_called_once_with(
'vda', "foo", 0, flags=fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_SHALLOW)
def test_rebase_reuse_ext(self):
self.gblock.rebase("foo", reuse_ext=True)
self.domain.blockRebase.assert_called_once_with(
'vda', "foo", 0,
flags=fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT)
def test_rebase_copy(self):
self.gblock.rebase("foo", copy=True)
self.domain.blockRebase.assert_called_once_with(
'vda', "foo", 0,
flags=fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_COPY)
def test_rebase_relative(self):
self.gblock.rebase("foo", relative=True)
self.domain.blockRebase.assert_called_once_with(
'vda', "foo", 0,
flags=fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_RELATIVE)
def test_commit(self):
self.gblock.commit("foo", "top")
self.domain.blockCommit.assert_called_once_with(
'vda', "foo", "top", 0, flags=0)
def test_commit_relative(self):
self.gblock.commit("foo", "top", relative=True)
self.domain.blockCommit.assert_called_once_with(
'vda', "foo", "top", 0,
flags=fakelibvirt.VIR_DOMAIN_BLOCK_COMMIT_RELATIVE)
def test_wait_for_job(self):
self.domain.blockJobInfo.return_value = {
"type": 4,
"bandwidth": 18,
"cur": 95,
"end": 100}
in_progress = self.gblock.wait_for_job()
self.assertTrue(in_progress)
self.domain.blockJobInfo.return_value = {
"type": 4,
"bandwidth": 18,
"cur": 100,
"end": 100}
in_progress = self.gblock.wait_for_job()
self.assertFalse(in_progress)
self.domain.blockJobInfo.return_value = {"type": 0}
in_progress = self.gblock.wait_for_job(wait_for_job_clean=True)
self.assertFalse(in_progress)
def test_wait_for_job_arbort_on_error(self):
self.domain.blockJobInfo.return_value = -1
self.assertRaises(
exception.NovaException,
self.gblock.wait_for_job, abort_on_error=True)
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class BandwidthSchedulesOperations(object):
"""BandwidthSchedulesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.databoxedge.v2020_09_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_data_box_edge_device(
self,
device_name, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.BandwidthSchedulesList"]
"""Gets all the bandwidth schedules for a Data Box Edge/Data Box Gateway device.
:param device_name: The device name.
:type device_name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either BandwidthSchedulesList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.databoxedge.v2020_09_01_preview.models.BandwidthSchedulesList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BandwidthSchedulesList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_data_box_edge_device.metadata['url'] # type: ignore
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('BandwidthSchedulesList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_data_box_edge_device.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/bandwidthSchedules'} # type: ignore
def get(
self,
device_name, # type: str
name, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.BandwidthSchedule"
"""Gets the properties of the specified bandwidth schedule.
:param device_name: The device name.
:type device_name: str
:param name: The bandwidth schedule name.
:type name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BandwidthSchedule, or the result of cls(response)
:rtype: ~azure.mgmt.databoxedge.v2020_09_01_preview.models.BandwidthSchedule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BandwidthSchedule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('BandwidthSchedule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/bandwidthSchedules/{name}'} # type: ignore
def _create_or_update_initial(
self,
device_name, # type: str
name, # type: str
resource_group_name, # type: str
parameters, # type: "_models.BandwidthSchedule"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.BandwidthSchedule"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.BandwidthSchedule"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'BandwidthSchedule')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('BandwidthSchedule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/bandwidthSchedules/{name}'} # type: ignore
def begin_create_or_update(
self,
device_name, # type: str
name, # type: str
resource_group_name, # type: str
parameters, # type: "_models.BandwidthSchedule"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.BandwidthSchedule"]
"""Creates or updates a bandwidth schedule.
:param device_name: The device name.
:type device_name: str
:param name: The bandwidth schedule name which needs to be added/updated.
:type name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param parameters: The bandwidth schedule to be added or updated.
:type parameters: ~azure.mgmt.databoxedge.v2020_09_01_preview.models.BandwidthSchedule
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either BandwidthSchedule or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.databoxedge.v2020_09_01_preview.models.BandwidthSchedule]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.BandwidthSchedule"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
device_name=device_name,
name=name,
resource_group_name=resource_group_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('BandwidthSchedule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/bandwidthSchedules/{name}'} # type: ignore
def _delete_initial(
self,
device_name, # type: str
name, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01-preview"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/bandwidthSchedules/{name}'} # type: ignore
def begin_delete(
self,
device_name, # type: str
name, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified bandwidth schedule.
:param device_name: The device name.
:type device_name: str
:param name: The bandwidth schedule name.
:type name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
device_name=device_name,
name=name,
resource_group_name=resource_group_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/bandwidthSchedules/{name}'} # type: ignore
| |
# Copyright 2002-2009, Distributed Systems Architecture Group, Universidad
# Complutense de Madrid (dsa-research.org)
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
OpenNebula.org test suite.
"""
__docformat__ = 'epytext'
import unittest
import sys
from libcloud.utils.py3 import httplib
from libcloud.compute.base import Node, NodeImage, NodeSize, NodeState
from libcloud.compute.drivers.opennebula import OpenNebulaNodeDriver
from libcloud.compute.drivers.opennebula import OpenNebulaNetwork
from libcloud.compute.drivers.opennebula import OpenNebulaResponse
from libcloud.compute.drivers.opennebula import OpenNebulaNodeSize
from libcloud.compute.drivers.opennebula import ACTION
from libcloud.test.file_fixtures import ComputeFileFixtures
from libcloud.common.types import InvalidCredsError
from libcloud.test import MockResponse, MockHttp
from libcloud.test.compute import TestCaseMixin
from libcloud.test.secrets import OPENNEBULA_PARAMS
class OpenNebulaCaseMixin(TestCaseMixin):
def test_reboot_node_response(self):
pass
class OpenNebula_ResponseTests(unittest.TestCase):
XML = """<?xml version="1.0" encoding="UTF-8"?><root/>"""
def test_unauthorized_response(self):
http_response = MockResponse(httplib.UNAUTHORIZED,
OpenNebula_ResponseTests.XML,
headers={'content-type':
'application/xml'})
try:
OpenNebulaResponse(http_response, None).parse_body()
except InvalidCredsError:
exceptionType = sys.exc_info()[0]
self.assertEqual(exceptionType, type(InvalidCredsError()))
class OpenNebula_1_4_Tests(unittest.TestCase, OpenNebulaCaseMixin):
"""
OpenNebula.org test suite for OpenNebula v1.4.
"""
def setUp(self):
"""
Setup test environment.
"""
OpenNebulaNodeDriver.connectionCls.conn_classes = (
OpenNebula_1_4_MockHttp, OpenNebula_1_4_MockHttp)
self.driver = OpenNebulaNodeDriver(*OPENNEBULA_PARAMS + ('1.4',))
def test_create_node(self):
"""
Test create_node functionality.
"""
image = NodeImage(id=5, name='Ubuntu 9.04 LAMP', driver=self.driver)
size = NodeSize(id=1, name='small', ram=None, disk=None,
bandwidth=None, price=None, driver=self.driver)
networks = list()
networks.append(OpenNebulaNetwork(id=5, name='Network 5',
address='192.168.0.0', size=256, driver=self.driver))
networks.append(OpenNebulaNetwork(id=15, name='Network 15',
address='192.168.1.0', size=256, driver=self.driver))
node = self.driver.create_node(name='Compute 5', image=image,
size=size, networks=networks)
self.assertEqual(node.id, '5')
self.assertEqual(node.name, 'Compute 5')
self.assertEqual(node.state,
OpenNebulaNodeDriver.NODE_STATE_MAP['ACTIVE'])
self.assertEqual(node.public_ips[0].name, None)
self.assertEqual(node.public_ips[0].id, '5')
self.assertEqual(node.public_ips[0].address, '192.168.0.1')
self.assertEqual(node.public_ips[0].size, 1)
self.assertEqual(node.public_ips[1].name, None)
self.assertEqual(node.public_ips[1].id, '15')
self.assertEqual(node.public_ips[1].address, '192.168.1.1')
self.assertEqual(node.public_ips[1].size, 1)
self.assertEqual(node.private_ips, [])
self.assertEqual(node.image.id, '5')
self.assertEqual(node.image.extra['dev'], 'sda1')
def test_destroy_node(self):
"""
Test destroy_node functionality.
"""
node = Node(5, None, None, None, None, self.driver)
ret = self.driver.destroy_node(node)
self.assertTrue(ret)
def test_list_nodes(self):
"""
Test list_nodes functionality.
"""
nodes = self.driver.list_nodes()
self.assertEqual(len(nodes), 3)
node = nodes[0]
self.assertEqual(node.id, '5')
self.assertEqual(node.name, 'Compute 5')
self.assertEqual(node.state,
OpenNebulaNodeDriver.NODE_STATE_MAP['ACTIVE'])
self.assertEqual(node.public_ips[0].id, '5')
self.assertEqual(node.public_ips[0].name, None)
self.assertEqual(node.public_ips[0].address, '192.168.0.1')
self.assertEqual(node.public_ips[0].size, 1)
self.assertEqual(node.public_ips[1].id, '15')
self.assertEqual(node.public_ips[1].name, None)
self.assertEqual(node.public_ips[1].address, '192.168.1.1')
self.assertEqual(node.public_ips[1].size, 1)
self.assertEqual(node.private_ips, [])
self.assertEqual(node.image.id, '5')
self.assertEqual(node.image.extra['dev'], 'sda1')
node = nodes[1]
self.assertEqual(node.id, '15')
self.assertEqual(node.name, 'Compute 15')
self.assertEqual(node.state,
OpenNebulaNodeDriver.NODE_STATE_MAP['ACTIVE'])
self.assertEqual(node.public_ips[0].id, '5')
self.assertEqual(node.public_ips[0].name, None)
self.assertEqual(node.public_ips[0].address, '192.168.0.2')
self.assertEqual(node.public_ips[0].size, 1)
self.assertEqual(node.public_ips[1].id, '15')
self.assertEqual(node.public_ips[1].name, None)
self.assertEqual(node.public_ips[1].address, '192.168.1.2')
self.assertEqual(node.public_ips[1].size, 1)
self.assertEqual(node.private_ips, [])
self.assertEqual(node.image.id, '15')
self.assertEqual(node.image.extra['dev'], 'sda1')
node = nodes[2]
self.assertEqual(node.id, '25')
self.assertEqual(node.name, 'Compute 25')
self.assertEqual(node.state,
NodeState.UNKNOWN)
self.assertEqual(node.public_ips[0].id, '5')
self.assertEqual(node.public_ips[0].name, None)
self.assertEqual(node.public_ips[0].address, '192.168.0.3')
self.assertEqual(node.public_ips[0].size, 1)
self.assertEqual(node.public_ips[1].id, '15')
self.assertEqual(node.public_ips[1].name, None)
self.assertEqual(node.public_ips[1].address, '192.168.1.3')
self.assertEqual(node.public_ips[1].size, 1)
self.assertEqual(node.private_ips, [])
self.assertEqual(node.image, None)
def test_list_images(self):
"""
Test list_images functionality.
"""
images = self.driver.list_images()
self.assertEqual(len(images), 2)
image = images[0]
self.assertEqual(image.id, '5')
self.assertEqual(image.name, 'Ubuntu 9.04 LAMP')
self.assertEqual(image.extra['size'], '2048')
self.assertEqual(image.extra['url'],
'file:///images/ubuntu/jaunty.img')
image = images[1]
self.assertEqual(image.id, '15')
self.assertEqual(image.name, 'Ubuntu 9.04 LAMP')
self.assertEqual(image.extra['size'], '2048')
self.assertEqual(image.extra['url'],
'file:///images/ubuntu/jaunty.img')
def test_list_sizes(self):
"""
Test list_sizes functionality.
"""
sizes = self.driver.list_sizes()
self.assertEqual(len(sizes), 3)
size = sizes[0]
self.assertEqual(size.id, '1')
self.assertEqual(size.name, 'small')
self.assertEqual(size.ram, None)
self.assertEqual(size.disk, None)
self.assertEqual(size.bandwidth, None)
self.assertEqual(size.price, None)
size = sizes[1]
self.assertEqual(size.id, '2')
self.assertEqual(size.name, 'medium')
self.assertEqual(size.ram, None)
self.assertEqual(size.disk, None)
self.assertEqual(size.bandwidth, None)
self.assertEqual(size.price, None)
size = sizes[2]
self.assertEqual(size.id, '3')
self.assertEqual(size.name, 'large')
self.assertEqual(size.ram, None)
self.assertEqual(size.disk, None)
self.assertEqual(size.bandwidth, None)
self.assertEqual(size.price, None)
def test_list_locations(self):
"""
Test list_locations functionality.
"""
locations = self.driver.list_locations()
self.assertEqual(len(locations), 1)
location = locations[0]
self.assertEqual(location.id, '0')
self.assertEqual(location.name, '')
self.assertEqual(location.country, '')
def test_ex_list_networks(self):
"""
Test ex_list_networks functionality.
"""
networks = self.driver.ex_list_networks()
self.assertEqual(len(networks), 2)
network = networks[0]
self.assertEqual(network.id, '5')
self.assertEqual(network.name, 'Network 5')
self.assertEqual(network.address, '192.168.0.0')
self.assertEqual(network.size, '256')
network = networks[1]
self.assertEqual(network.id, '15')
self.assertEqual(network.name, 'Network 15')
self.assertEqual(network.address, '192.168.1.0')
self.assertEqual(network.size, '256')
def test_ex_node_action(self):
"""
Test ex_node_action functionality.
"""
node = Node(5, None, None, None, None, self.driver)
ret = self.driver.ex_node_action(node, ACTION.STOP)
self.assertTrue(ret)
class OpenNebula_2_0_Tests(unittest.TestCase, OpenNebulaCaseMixin):
"""
OpenNebula.org test suite for OpenNebula v2.0 through v2.2.
"""
def setUp(self):
"""
Setup test environment.
"""
OpenNebulaNodeDriver.connectionCls.conn_classes = (
OpenNebula_2_0_MockHttp, OpenNebula_2_0_MockHttp)
self.driver = OpenNebulaNodeDriver(*OPENNEBULA_PARAMS + ('2.0',))
def test_create_node(self):
"""
Test create_node functionality.
"""
image = NodeImage(id=5, name='Ubuntu 9.04 LAMP', driver=self.driver)
size = OpenNebulaNodeSize(id=1, name='small', ram=1024, cpu=1,
disk=None, bandwidth=None, price=None,
driver=self.driver)
networks = list()
networks.append(OpenNebulaNetwork(id=5, name='Network 5',
address='192.168.0.0', size=256, driver=self.driver))
networks.append(OpenNebulaNetwork(id=15, name='Network 15',
address='192.168.1.0', size=256, driver=self.driver))
context = {'hostname': 'compute-5'}
node = self.driver.create_node(name='Compute 5', image=image,
size=size, networks=networks,
context=context)
self.assertEqual(node.id, '5')
self.assertEqual(node.name, 'Compute 5')
self.assertEqual(node.state,
OpenNebulaNodeDriver.NODE_STATE_MAP['ACTIVE'])
self.assertEqual(node.public_ips[0].id, '5')
self.assertEqual(node.public_ips[0].name, 'Network 5')
self.assertEqual(node.public_ips[0].address, '192.168.0.1')
self.assertEqual(node.public_ips[0].size, 1)
self.assertEqual(node.public_ips[0].extra['mac'], '02:00:c0:a8:00:01')
self.assertEqual(node.public_ips[1].id, '15')
self.assertEqual(node.public_ips[1].name, 'Network 15')
self.assertEqual(node.public_ips[1].address, '192.168.1.1')
self.assertEqual(node.public_ips[1].size, 1)
self.assertEqual(node.public_ips[1].extra['mac'], '02:00:c0:a8:01:01')
self.assertEqual(node.private_ips, [])
self.assertTrue(len([s for s in self.driver.list_sizes()
if s.id == node.size.id]) == 1)
self.assertEqual(node.image.id, '5')
self.assertEqual(node.image.name, 'Ubuntu 9.04 LAMP')
self.assertEqual(node.image.extra['type'], 'DISK')
self.assertEqual(node.image.extra['target'], 'hda')
context = node.extra['context']
self.assertEqual(context['hostname'], 'compute-5')
def test_destroy_node(self):
"""
Test destroy_node functionality.
"""
node = Node(5, None, None, None, None, self.driver)
ret = self.driver.destroy_node(node)
self.assertTrue(ret)
def test_list_nodes(self):
"""
Test list_nodes functionality.
"""
nodes = self.driver.list_nodes()
self.assertEqual(len(nodes), 3)
node = nodes[0]
self.assertEqual(node.id, '5')
self.assertEqual(node.name, 'Compute 5')
self.assertEqual(node.state,
OpenNebulaNodeDriver.NODE_STATE_MAP['ACTIVE'])
self.assertEqual(node.public_ips[0].id, '5')
self.assertEqual(node.public_ips[0].name, 'Network 5')
self.assertEqual(node.public_ips[0].address, '192.168.0.1')
self.assertEqual(node.public_ips[0].size, 1)
self.assertEqual(node.public_ips[0].extra['mac'], '02:00:c0:a8:00:01')
self.assertEqual(node.public_ips[1].id, '15')
self.assertEqual(node.public_ips[1].name, 'Network 15')
self.assertEqual(node.public_ips[1].address, '192.168.1.1')
self.assertEqual(node.public_ips[1].size, 1)
self.assertEqual(node.public_ips[1].extra['mac'], '02:00:c0:a8:01:01')
self.assertEqual(node.private_ips, [])
self.assertTrue(len([size for size in self.driver.list_sizes()
if size.id == node.size.id]) == 1)
self.assertEqual(node.size.id, '1')
self.assertEqual(node.size.name, 'small')
self.assertEqual(node.size.ram, 1024)
self.assertTrue(node.size.cpu is None or isinstance(node.size.cpu,
int))
self.assertTrue(node.size.vcpu is None or isinstance(node.size.vcpu,
int))
self.assertEqual(node.size.cpu, 1)
self.assertEqual(node.size.vcpu, None)
self.assertEqual(node.size.disk, None)
self.assertEqual(node.size.bandwidth, None)
self.assertEqual(node.size.price, None)
self.assertTrue(len([image for image in self.driver.list_images()
if image.id == node.image.id]) == 1)
self.assertEqual(node.image.id, '5')
self.assertEqual(node.image.name, 'Ubuntu 9.04 LAMP')
self.assertEqual(node.image.extra['type'], 'DISK')
self.assertEqual(node.image.extra['target'], 'hda')
context = node.extra['context']
self.assertEqual(context['hostname'], 'compute-5')
node = nodes[1]
self.assertEqual(node.id, '15')
self.assertEqual(node.name, 'Compute 15')
self.assertEqual(node.state,
OpenNebulaNodeDriver.NODE_STATE_MAP['ACTIVE'])
self.assertEqual(node.public_ips[0].id, '5')
self.assertEqual(node.public_ips[0].name, 'Network 5')
self.assertEqual(node.public_ips[0].address, '192.168.0.2')
self.assertEqual(node.public_ips[0].size, 1)
self.assertEqual(node.public_ips[0].extra['mac'], '02:00:c0:a8:00:02')
self.assertEqual(node.public_ips[1].id, '15')
self.assertEqual(node.public_ips[1].name, 'Network 15')
self.assertEqual(node.public_ips[1].address, '192.168.1.2')
self.assertEqual(node.public_ips[1].size, 1)
self.assertEqual(node.public_ips[1].extra['mac'], '02:00:c0:a8:01:02')
self.assertEqual(node.private_ips, [])
self.assertTrue(len([size for size in self.driver.list_sizes()
if size.id == node.size.id]) == 1)
self.assertEqual(node.size.id, '1')
self.assertEqual(node.size.name, 'small')
self.assertEqual(node.size.ram, 1024)
self.assertTrue(node.size.cpu is None or isinstance(node.size.cpu,
int))
self.assertTrue(node.size.vcpu is None or isinstance(node.size.vcpu,
int))
self.assertEqual(node.size.cpu, 1)
self.assertEqual(node.size.vcpu, None)
self.assertEqual(node.size.disk, None)
self.assertEqual(node.size.bandwidth, None)
self.assertEqual(node.size.price, None)
self.assertTrue(len([image for image in self.driver.list_images()
if image.id == node.image.id]) == 1)
self.assertEqual(node.image.id, '15')
self.assertEqual(node.image.name, 'Ubuntu 9.04 LAMP')
self.assertEqual(node.image.extra['type'], 'DISK')
self.assertEqual(node.image.extra['target'], 'hda')
context = node.extra['context']
self.assertEqual(context['hostname'], 'compute-15')
node = nodes[2]
self.assertEqual(node.id, '25')
self.assertEqual(node.name, 'Compute 25')
self.assertEqual(node.state,
NodeState.UNKNOWN)
self.assertEqual(node.public_ips[0].id, '5')
self.assertEqual(node.public_ips[0].name, 'Network 5')
self.assertEqual(node.public_ips[0].address, '192.168.0.3')
self.assertEqual(node.public_ips[0].size, 1)
self.assertEqual(node.public_ips[0].extra['mac'], '02:00:c0:a8:00:03')
self.assertEqual(node.public_ips[1].id, '15')
self.assertEqual(node.public_ips[1].name, 'Network 15')
self.assertEqual(node.public_ips[1].address, '192.168.1.3')
self.assertEqual(node.public_ips[1].size, 1)
self.assertEqual(node.public_ips[1].extra['mac'], '02:00:c0:a8:01:03')
self.assertEqual(node.private_ips, [])
self.assertEqual(node.size, None)
self.assertEqual(node.image, None)
context = node.extra['context']
self.assertEqual(context, {})
def test_list_images(self):
"""
Test list_images functionality.
"""
images = self.driver.list_images()
self.assertEqual(len(images), 2)
image = images[0]
self.assertEqual(image.id, '5')
self.assertEqual(image.name, 'Ubuntu 9.04 LAMP')
self.assertEqual(image.extra['description'],
'Ubuntu 9.04 LAMP Description')
self.assertEqual(image.extra['type'], 'OS')
self.assertEqual(image.extra['size'], '2048')
image = images[1]
self.assertEqual(image.id, '15')
self.assertEqual(image.name, 'Ubuntu 9.04 LAMP')
self.assertEqual(image.extra['description'],
'Ubuntu 9.04 LAMP Description')
self.assertEqual(image.extra['type'], 'OS')
self.assertEqual(image.extra['size'], '2048')
def test_list_sizes(self):
"""
Test list_sizes functionality.
"""
sizes = self.driver.list_sizes()
self.assertEqual(len(sizes), 4)
size = sizes[0]
self.assertEqual(size.id, '1')
self.assertEqual(size.name, 'small')
self.assertEqual(size.ram, 1024)
self.assertTrue(size.cpu is None or isinstance(size.cpu, int))
self.assertTrue(size.vcpu is None or isinstance(size.vcpu, int))
self.assertEqual(size.cpu, 1)
self.assertEqual(size.vcpu, None)
self.assertEqual(size.disk, None)
self.assertEqual(size.bandwidth, None)
self.assertEqual(size.price, None)
size = sizes[1]
self.assertEqual(size.id, '2')
self.assertEqual(size.name, 'medium')
self.assertEqual(size.ram, 4096)
self.assertTrue(size.cpu is None or isinstance(size.cpu, int))
self.assertTrue(size.vcpu is None or isinstance(size.vcpu, int))
self.assertEqual(size.cpu, 4)
self.assertEqual(size.vcpu, None)
self.assertEqual(size.disk, None)
self.assertEqual(size.bandwidth, None)
self.assertEqual(size.price, None)
size = sizes[2]
self.assertEqual(size.id, '3')
self.assertEqual(size.name, 'large')
self.assertEqual(size.ram, 8192)
self.assertTrue(size.cpu is None or isinstance(size.cpu, int))
self.assertTrue(size.vcpu is None or isinstance(size.vcpu, int))
self.assertEqual(size.cpu, 8)
self.assertEqual(size.vcpu, None)
self.assertEqual(size.disk, None)
self.assertEqual(size.bandwidth, None)
self.assertEqual(size.price, None)
size = sizes[3]
self.assertEqual(size.id, '4')
self.assertEqual(size.name, 'custom')
self.assertEqual(size.ram, 0)
self.assertEqual(size.cpu, 0)
self.assertEqual(size.vcpu, None)
self.assertEqual(size.disk, None)
self.assertEqual(size.bandwidth, None)
self.assertEqual(size.price, None)
def test_list_locations(self):
"""
Test list_locations functionality.
"""
locations = self.driver.list_locations()
self.assertEqual(len(locations), 1)
location = locations[0]
self.assertEqual(location.id, '0')
self.assertEqual(location.name, '')
self.assertEqual(location.country, '')
def test_ex_list_networks(self):
"""
Test ex_list_networks functionality.
"""
networks = self.driver.ex_list_networks()
self.assertEqual(len(networks), 2)
network = networks[0]
self.assertEqual(network.id, '5')
self.assertEqual(network.name, 'Network 5')
self.assertEqual(network.address, '192.168.0.0')
self.assertEqual(network.size, '256')
network = networks[1]
self.assertEqual(network.id, '15')
self.assertEqual(network.name, 'Network 15')
self.assertEqual(network.address, '192.168.1.0')
self.assertEqual(network.size, '256')
class OpenNebula_3_0_Tests(unittest.TestCase, OpenNebulaCaseMixin):
"""
OpenNebula.org test suite for OpenNebula v3.0.
"""
def setUp(self):
"""
Setup test environment.
"""
OpenNebulaNodeDriver.connectionCls.conn_classes = (
OpenNebula_3_0_MockHttp, OpenNebula_3_0_MockHttp)
self.driver = OpenNebulaNodeDriver(*OPENNEBULA_PARAMS + ('3.0',))
def test_ex_list_networks(self):
"""
Test ex_list_networks functionality.
"""
networks = self.driver.ex_list_networks()
self.assertEqual(len(networks), 2)
network = networks[0]
self.assertEqual(network.id, '5')
self.assertEqual(network.name, 'Network 5')
self.assertEqual(network.address, '192.168.0.0')
self.assertEqual(network.size, '256')
self.assertEqual(network.extra['public'], 'YES')
network = networks[1]
self.assertEqual(network.id, '15')
self.assertEqual(network.name, 'Network 15')
self.assertEqual(network.address, '192.168.1.0')
self.assertEqual(network.size, '256')
self.assertEqual(network.extra['public'], 'NO')
def test_ex_node_set_save_name(self):
"""
Test ex_node_action functionality.
"""
image = NodeImage(id=5, name='Ubuntu 9.04 LAMP', driver=self.driver)
node = Node(5, None, None, None, None, self.driver, image=image)
ret = self.driver.ex_node_set_save_name(node, 'test')
self.assertTrue(ret)
class OpenNebula_3_2_Tests(unittest.TestCase, OpenNebulaCaseMixin):
"""
OpenNebula.org test suite for OpenNebula v3.2.
"""
def setUp(self):
"""
Setup test environment.
"""
OpenNebulaNodeDriver.connectionCls.conn_classes = (
OpenNebula_3_2_MockHttp, OpenNebula_3_2_MockHttp)
self.driver = OpenNebulaNodeDriver(*OPENNEBULA_PARAMS + ('3.2',))
def test_reboot_node(self):
"""
Test reboot_node functionality.
"""
image = NodeImage(id=5, name='Ubuntu 9.04 LAMP', driver=self.driver)
node = Node(5, None, None, None, None, self.driver, image=image)
ret = self.driver.reboot_node(node)
self.assertTrue(ret)
def test_list_sizes(self):
"""
Test ex_list_networks functionality.
"""
sizes = self.driver.list_sizes()
self.assertEqual(len(sizes), 3)
size = sizes[0]
self.assertEqual(size.id, '1')
self.assertEqual(size.name, 'small')
self.assertEqual(size.ram, 1024)
self.assertTrue(size.cpu is None or isinstance(size.cpu, float))
self.assertTrue(size.vcpu is None or isinstance(size.vcpu, int))
self.assertEqual(size.cpu, 1)
self.assertEqual(size.vcpu, None)
self.assertEqual(size.disk, None)
self.assertEqual(size.bandwidth, None)
self.assertEqual(size.price, None)
size = sizes[1]
self.assertEqual(size.id, '2')
self.assertEqual(size.name, 'medium')
self.assertEqual(size.ram, 4096)
self.assertTrue(size.cpu is None or isinstance(size.cpu, float))
self.assertTrue(size.vcpu is None or isinstance(size.vcpu, int))
self.assertEqual(size.cpu, 4)
self.assertEqual(size.vcpu, None)
self.assertEqual(size.disk, None)
self.assertEqual(size.bandwidth, None)
self.assertEqual(size.price, None)
size = sizes[2]
self.assertEqual(size.id, '3')
self.assertEqual(size.name, 'large')
self.assertEqual(size.ram, 8192)
self.assertTrue(size.cpu is None or isinstance(size.cpu, float))
self.assertTrue(size.vcpu is None or isinstance(size.vcpu, int))
self.assertEqual(size.cpu, 8)
self.assertEqual(size.vcpu, None)
self.assertEqual(size.disk, None)
self.assertEqual(size.bandwidth, None)
self.assertEqual(size.price, None)
class OpenNebula_3_6_Tests(unittest.TestCase, OpenNebulaCaseMixin):
"""
OpenNebula.org test suite for OpenNebula v3.6.
"""
def setUp(self):
"""
Setup test environment.
"""
OpenNebulaNodeDriver.connectionCls.conn_classes = (
OpenNebula_3_6_MockHttp, OpenNebula_3_6_MockHttp)
self.driver = OpenNebulaNodeDriver(*OPENNEBULA_PARAMS + ('3.6',))
def test_create_volume(self):
new_volume = self.driver.create_volume(1000, 'test-volume')
self.assertEqual(new_volume.id, '5')
self.assertEqual(new_volume.size, 1000)
self.assertEqual(new_volume.name, 'test-volume')
def test_destroy_volume(self):
images = self.driver.list_images()
self.assertEqual(len(images), 2)
image = images[0]
ret = self.driver.destroy_volume(image)
self.assertTrue(ret)
def test_attach_volume(self):
nodes = self.driver.list_nodes()
node = nodes[0]
images = self.driver.list_images()
image = images[0]
ret = self.driver.attach_volume(node, image, 'sda')
self.assertTrue(ret)
def test_detach_volume(self):
images = self.driver.list_images()
image = images[1]
ret = self.driver.detach_volume(image)
self.assertTrue(ret)
nodes = self.driver.list_nodes()
# node with only a single associated image
node = nodes[1]
ret = self.driver.detach_volume(node.image)
self.assertFalse(ret)
def test_list_volumes(self):
volumes = self.driver.list_volumes()
self.assertEqual(len(volumes), 2)
volume = volumes[0]
self.assertEqual(volume.id, '5')
self.assertEqual(volume.size, 2048)
self.assertEqual(volume.name, 'Ubuntu 9.04 LAMP')
volume = volumes[1]
self.assertEqual(volume.id, '15')
self.assertEqual(volume.size, 1024)
self.assertEqual(volume.name, 'Debian Sid')
class OpenNebula_3_8_Tests(unittest.TestCase, OpenNebulaCaseMixin):
"""
OpenNebula.org test suite for OpenNebula v3.8.
"""
def setUp(self):
"""
Setup test environment.
"""
OpenNebulaNodeDriver.connectionCls.conn_classes = (
OpenNebula_3_8_MockHttp, OpenNebula_3_8_MockHttp)
self.driver = OpenNebulaNodeDriver(*OPENNEBULA_PARAMS + ('3.8',))
def test_list_sizes(self):
"""
Test ex_list_networks functionality.
"""
sizes = self.driver.list_sizes()
self.assertEqual(len(sizes), 3)
size = sizes[0]
self.assertEqual(size.id, '1')
self.assertEqual(size.name, 'small')
self.assertEqual(size.ram, 1024)
self.assertEqual(size.cpu, 1)
self.assertEqual(size.vcpu, None)
self.assertEqual(size.disk, None)
self.assertEqual(size.bandwidth, None)
self.assertEqual(size.price, None)
size = sizes[1]
self.assertEqual(size.id, '2')
self.assertEqual(size.name, 'medium')
self.assertEqual(size.ram, 4096)
self.assertEqual(size.cpu, 4)
self.assertEqual(size.vcpu, None)
self.assertEqual(size.disk, None)
self.assertEqual(size.bandwidth, None)
self.assertEqual(size.price, None)
size = sizes[2]
self.assertEqual(size.id, '3')
self.assertEqual(size.name, 'large')
self.assertEqual(size.ram, 8192)
self.assertEqual(size.cpu, 8)
self.assertEqual(size.vcpu, None)
self.assertEqual(size.disk, None)
self.assertEqual(size.bandwidth, None)
self.assertEqual(size.price, None)
class OpenNebula_1_4_MockHttp(MockHttp):
"""
Mock HTTP server for testing v1.4 of the OpenNebula.org compute driver.
"""
fixtures = ComputeFileFixtures('opennebula_1_4')
def _compute(self, method, url, body, headers):
"""
Compute pool resources.
"""
if method == 'GET':
body = self.fixtures.load('computes.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'POST':
body = self.fixtures.load('compute_5.xml')
return (httplib.CREATED, body, {},
httplib.responses[httplib.CREATED])
def _storage(self, method, url, body, headers):
"""
Storage pool resources.
"""
if method == 'GET':
body = self.fixtures.load('storage.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'POST':
body = self.fixtures.load('disk_5.xml')
return (httplib.CREATED, body, {},
httplib.responses[httplib.CREATED])
def _network(self, method, url, body, headers):
"""
Network pool resources.
"""
if method == 'GET':
body = self.fixtures.load('networks.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'POST':
body = self.fixtures.load('network_5.xml')
return (httplib.CREATED, body, {},
httplib.responses[httplib.CREATED])
def _compute_5(self, method, url, body, headers):
"""
Compute entry resource.
"""
if method == 'GET':
body = self.fixtures.load('compute_5.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'PUT':
body = ""
return (httplib.ACCEPTED, body, {},
httplib.responses[httplib.ACCEPTED])
if method == 'DELETE':
body = ""
return (httplib.OK, body, {},
httplib.responses[httplib.OK])
def _compute_15(self, method, url, body, headers):
"""
Compute entry resource.
"""
if method == 'GET':
body = self.fixtures.load('compute_15.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'PUT':
body = ""
return (httplib.ACCEPTED, body, {},
httplib.responses[httplib.ACCEPTED])
if method == 'DELETE':
body = ""
return (httplib.OK, body, {},
httplib.responses[httplib.OK])
def _compute_25(self, method, url, body, headers):
"""
Compute entry resource.
"""
if method == 'GET':
body = self.fixtures.load('compute_25.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'PUT':
body = ""
return (httplib.ACCEPTED, body, {},
httplib.responses[httplib.ACCEPTED])
if method == 'DELETE':
body = ""
return (httplib.OK, body, {},
httplib.responses[httplib.OK])
def _storage_5(self, method, url, body, headers):
"""
Storage entry resource.
"""
if method == 'GET':
body = self.fixtures.load('disk_5.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'DELETE':
body = ""
return (httplib.OK, body, {},
httplib.responses[httplib.OK])
def _storage_15(self, method, url, body, headers):
"""
Storage entry resource.
"""
if method == 'GET':
body = self.fixtures.load('disk_15.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'DELETE':
body = ""
return (httplib.OK, body, {},
httplib.responses[httplib.OK])
def _network_5(self, method, url, body, headers):
"""
Network entry resource.
"""
if method == 'GET':
body = self.fixtures.load('network_5.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'DELETE':
body = ""
return (httplib.OK, body, {},
httplib.responses[httplib.OK])
def _network_15(self, method, url, body, headers):
"""
Network entry resource.
"""
if method == 'GET':
body = self.fixtures.load('network_15.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'DELETE':
body = ""
return (httplib.OK, body, {},
httplib.responses[httplib.OK])
class OpenNebula_2_0_MockHttp(MockHttp):
"""
Mock HTTP server for testing v2.0 through v3.2 of the OpenNebula.org
compute driver.
"""
fixtures = ComputeFileFixtures('opennebula_2_0')
def _compute(self, method, url, body, headers):
"""
Compute pool resources.
"""
if method == 'GET':
body = self.fixtures.load('compute_collection.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'POST':
body = self.fixtures.load('compute_5.xml')
return (httplib.CREATED, body, {},
httplib.responses[httplib.CREATED])
def _storage(self, method, url, body, headers):
"""
Storage pool resources.
"""
if method == 'GET':
body = self.fixtures.load('storage_collection.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'POST':
body = self.fixtures.load('storage_5.xml')
return (httplib.CREATED, body, {},
httplib.responses[httplib.CREATED])
def _network(self, method, url, body, headers):
"""
Network pool resources.
"""
if method == 'GET':
body = self.fixtures.load('network_collection.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'POST':
body = self.fixtures.load('network_5.xml')
return (httplib.CREATED, body, {},
httplib.responses[httplib.CREATED])
def _compute_5(self, method, url, body, headers):
"""
Compute entry resource.
"""
if method == 'GET':
body = self.fixtures.load('compute_5.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'PUT':
body = ""
return (httplib.ACCEPTED, body, {},
httplib.responses[httplib.ACCEPTED])
if method == 'DELETE':
body = ""
return (httplib.NO_CONTENT, body, {},
httplib.responses[httplib.NO_CONTENT])
def _compute_15(self, method, url, body, headers):
"""
Compute entry resource.
"""
if method == 'GET':
body = self.fixtures.load('compute_15.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'PUT':
body = ""
return (httplib.ACCEPTED, body, {},
httplib.responses[httplib.ACCEPTED])
if method == 'DELETE':
body = ""
return (httplib.NO_CONTENT, body, {},
httplib.responses[httplib.NO_CONTENT])
def _compute_25(self, method, url, body, headers):
"""
Compute entry resource.
"""
if method == 'GET':
body = self.fixtures.load('compute_25.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'PUT':
body = ""
return (httplib.ACCEPTED, body, {},
httplib.responses[httplib.ACCEPTED])
if method == 'DELETE':
body = ""
return (httplib.NO_CONTENT, body, {},
httplib.responses[httplib.NO_CONTENT])
def _storage_5(self, method, url, body, headers):
"""
Storage entry resource.
"""
if method == 'GET':
body = self.fixtures.load('storage_5.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'DELETE':
body = ""
return (httplib.NO_CONTENT, body, {},
httplib.responses[httplib.NO_CONTENT])
def _storage_15(self, method, url, body, headers):
"""
Storage entry resource.
"""
if method == 'GET':
body = self.fixtures.load('storage_15.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'DELETE':
body = ""
return (httplib.NO_CONTENT, body, {},
httplib.responses[httplib.NO_CONTENT])
def _network_5(self, method, url, body, headers):
"""
Network entry resource.
"""
if method == 'GET':
body = self.fixtures.load('network_5.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'DELETE':
body = ""
return (httplib.NO_CONTENT, body, {},
httplib.responses[httplib.NO_CONTENT])
def _network_15(self, method, url, body, headers):
"""
Network entry resource.
"""
if method == 'GET':
body = self.fixtures.load('network_15.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'DELETE':
body = ""
return (httplib.NO_CONTENT, body, {},
httplib.responses[httplib.NO_CONTENT])
class OpenNebula_3_0_MockHttp(OpenNebula_2_0_MockHttp):
"""
Mock HTTP server for testing v3.0 of the OpenNebula.org compute driver.
"""
fixtures_3_0 = ComputeFileFixtures('opennebula_3_0')
def _network(self, method, url, body, headers):
"""
Network pool resources.
"""
if method == 'GET':
body = self.fixtures_3_0.load('network_collection.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'POST':
body = self.fixtures.load('network_5.xml')
return (httplib.CREATED, body, {},
httplib.responses[httplib.CREATED])
def _network_5(self, method, url, body, headers):
"""
Network entry resource.
"""
if method == 'GET':
body = self.fixtures_3_0.load('network_5.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'DELETE':
body = ""
return (httplib.NO_CONTENT, body, {},
httplib.responses[httplib.NO_CONTENT])
def _network_15(self, method, url, body, headers):
"""
Network entry resource.
"""
if method == 'GET':
body = self.fixtures_3_0.load('network_15.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'DELETE':
body = ""
return (httplib.NO_CONTENT, body, {},
httplib.responses[httplib.NO_CONTENT])
class OpenNebula_3_2_MockHttp(OpenNebula_3_0_MockHttp):
"""
Mock HTTP server for testing v3.2 of the OpenNebula.org compute driver.
"""
fixtures_3_2 = ComputeFileFixtures('opennebula_3_2')
def _compute_5(self, method, url, body, headers):
"""
Compute entry resource.
"""
if method == 'GET':
body = self.fixtures.load('compute_5.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'PUT':
body = ""
return (httplib.ACCEPTED, body, {},
httplib.responses[httplib.ACCEPTED])
if method == 'DELETE':
body = ""
return (httplib.NO_CONTENT, body, {},
httplib.responses[httplib.NO_CONTENT])
def _instance_type(self, method, url, body, headers):
"""
Instance type pool.
"""
if method == 'GET':
body = self.fixtures_3_2.load('instance_type_collection.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
class OpenNebula_3_6_MockHttp(OpenNebula_3_2_MockHttp):
"""
Mock HTTP server for testing v3.6 of the OpenNebula.org compute driver.
"""
fixtures_3_6 = ComputeFileFixtures('opennebula_3_6')
def _storage(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load('storage_collection.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'POST':
body = self.fixtures_3_6.load('storage_5.xml')
return (httplib.CREATED, body, {},
httplib.responses[httplib.CREATED])
def _compute_5(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures_3_6.load('compute_5.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'PUT':
body = ""
return (httplib.ACCEPTED, body, {},
httplib.responses[httplib.ACCEPTED])
if method == 'DELETE':
body = ""
return (httplib.NO_CONTENT, body, {},
httplib.responses[httplib.NO_CONTENT])
def _compute_5_action(self, method, url, body, headers):
body = self.fixtures_3_6.load('compute_5.xml')
if method == 'POST':
return (httplib.ACCEPTED, body, {},
httplib.responses[httplib.ACCEPTED])
if method == 'GET':
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _compute_15(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures_3_6.load('compute_15.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'PUT':
body = ""
return (httplib.ACCEPTED, body, {},
httplib.responses[httplib.ACCEPTED])
if method == 'DELETE':
body = ""
return (httplib.NO_CONTENT, body, {},
httplib.responses[httplib.NO_CONTENT])
def _storage_10(self, method, url, body, headers):
"""
Storage entry resource.
"""
if method == 'GET':
body = self.fixtures_3_6.load('disk_10.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _storage_15(self, method, url, body, headers):
"""
Storage entry resource.
"""
if method == 'GET':
body = self.fixtures_3_6.load('disk_15.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
class OpenNebula_3_8_MockHttp(OpenNebula_3_2_MockHttp):
"""
Mock HTTP server for testing v3.8 of the OpenNebula.org compute driver.
"""
fixtures_3_8 = ComputeFileFixtures('opennebula_3_8')
def _instance_type(self, method, url, body, headers):
"""
Instance type pool.
"""
if method == 'GET':
body = self.fixtures_3_8.load('instance_type_collection.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _instance_type_small(self, method, url, body, headers):
"""
Small instance type.
"""
if method == 'GET':
body = self.fixtures_3_8.load('instance_type_small.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _instance_type_medium(self, method, url, body, headers):
"""
Medium instance type pool.
"""
if method == 'GET':
body = self.fixtures_3_8.load('instance_type_medium.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _instance_type_large(self, method, url, body, headers):
"""
Large instance type pool.
"""
if method == 'GET':
body = self.fixtures_3_8.load('instance_type_large.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if __name__ == '__main__':
sys.exit(unittest.main())
| |
from __future__ import absolute_import, print_function, unicode_literals
import json
import logging
import os
import sys
from abc import ABCMeta, abstractmethod
from argparse import ArgumentTypeError
from ast import literal_eval
from collections import OrderedDict
from textwrap import dedent
from six import add_metaclass
from virtualenv.discovery.cached_py_info import LogCmd
from virtualenv.info import WIN_CPYTHON_2
from virtualenv.util.path import Path, safe_delete
from virtualenv.util.six import ensure_str, ensure_text
from virtualenv.util.subprocess import run_cmd
from virtualenv.version import __version__
from .pyenv_cfg import PyEnvCfg
HERE = Path(os.path.abspath(__file__)).parent
DEBUG_SCRIPT = HERE / "debug.py"
class CreatorMeta(object):
def __init__(self):
self.error = None
@add_metaclass(ABCMeta)
class Creator(object):
"""A class that given a python Interpreter creates a virtual environment"""
def __init__(self, options, interpreter):
"""Construct a new virtual environment creator.
:param options: the CLI option as parsed from :meth:`add_parser_arguments`
:param interpreter: the interpreter to create virtual environment from
"""
self.interpreter = interpreter
self._debug = None
self.dest = Path(options.dest)
self.clear = options.clear
self.no_vcs_ignore = options.no_vcs_ignore
self.pyenv_cfg = PyEnvCfg.from_folder(self.dest)
self.app_data = options.app_data
self.env = options.env
def __repr__(self):
return ensure_str(self.__unicode__())
def __unicode__(self):
return "{}({})".format(self.__class__.__name__, ", ".join("{}={}".format(k, v) for k, v in self._args()))
def _args(self):
return [
("dest", ensure_text(str(self.dest))),
("clear", self.clear),
("no_vcs_ignore", self.no_vcs_ignore),
]
@classmethod
def can_create(cls, interpreter):
"""Determine if we can create a virtual environment.
:param interpreter: the interpreter in question
:return: ``None`` if we can't create, any other object otherwise that will be forwarded to \
:meth:`add_parser_arguments`
"""
return True
@classmethod
def add_parser_arguments(cls, parser, interpreter, meta, app_data):
"""Add CLI arguments for the creator.
:param parser: the CLI parser
:param app_data: the application data folder
:param interpreter: the interpreter we're asked to create virtual environment for
:param meta: value as returned by :meth:`can_create`
"""
parser.add_argument(
"dest",
help="directory to create virtualenv at",
type=cls.validate_dest,
)
parser.add_argument(
"--clear",
dest="clear",
action="store_true",
help="remove the destination directory if exist before starting (will overwrite files otherwise)",
default=False,
)
parser.add_argument(
"--no-vcs-ignore",
dest="no_vcs_ignore",
action="store_true",
help="don't create VCS ignore directive in the destination directory",
default=False,
)
@abstractmethod
def create(self):
"""Perform the virtual environment creation."""
raise NotImplementedError
@classmethod
def validate_dest(cls, raw_value):
"""No path separator in the path, valid chars and must be write-able"""
def non_write_able(dest, value):
common = Path(*os.path.commonprefix([value.parts, dest.parts]))
raise ArgumentTypeError(
"the destination {} is not write-able at {}".format(dest.relative_to(common), common),
)
# the file system must be able to encode
# note in newer CPython this is always utf-8 https://www.python.org/dev/peps/pep-0529/
encoding = sys.getfilesystemencoding()
refused = OrderedDict()
kwargs = {"errors": "ignore"} if encoding != "mbcs" else {}
for char in ensure_text(raw_value):
try:
trip = char.encode(encoding, **kwargs).decode(encoding)
if trip == char:
continue
raise ValueError(trip)
except ValueError:
refused[char] = None
if refused:
raise ArgumentTypeError(
"the file system codec ({}) cannot handle characters {!r} within {!r}".format(
encoding,
"".join(refused.keys()),
raw_value,
),
)
if os.pathsep in raw_value:
raise ArgumentTypeError(
"destination {!r} must not contain the path separator ({}) as this would break "
"the activation scripts".format(raw_value, os.pathsep),
)
value = Path(raw_value)
if value.exists() and value.is_file():
raise ArgumentTypeError("the destination {} already exists and is a file".format(value))
if (3, 3) <= sys.version_info <= (3, 6):
# pre 3.6 resolve is always strict, aka must exists, sidestep by using os.path operation
dest = Path(os.path.realpath(raw_value))
else:
dest = Path(os.path.abspath(str(value))).resolve() # on Windows absolute does not imply resolve so use both
value = dest
while dest:
if dest.exists():
if os.access(ensure_text(str(dest)), os.W_OK):
break
else:
non_write_able(dest, value)
base, _ = dest.parent, dest.name
if base == dest:
non_write_able(dest, value) # pragma: no cover
dest = base
return str(value)
def run(self):
if self.dest.exists() and self.clear:
logging.debug("delete %s", self.dest)
safe_delete(self.dest)
self.create()
self.set_pyenv_cfg()
if not self.no_vcs_ignore:
self.setup_ignore_vcs()
def set_pyenv_cfg(self):
self.pyenv_cfg.content = OrderedDict()
self.pyenv_cfg["home"] = self.interpreter.system_exec_prefix
self.pyenv_cfg["implementation"] = self.interpreter.implementation
self.pyenv_cfg["version_info"] = ".".join(str(i) for i in self.interpreter.version_info)
self.pyenv_cfg["virtualenv"] = __version__
def setup_ignore_vcs(self):
"""Generate ignore instructions for version control systems."""
# mark this folder to be ignored by VCS, handle https://www.python.org/dev/peps/pep-0610/#registered-vcs
git_ignore = self.dest / ".gitignore"
if not git_ignore.exists():
git_ignore.write_text(
dedent(
"""
# created by virtualenv automatically
*
""",
).lstrip(),
)
# Mercurial - does not support the .hgignore file inside a subdirectory directly, but only if included via the
# subinclude directive from root, at which point on might as well ignore the directory itself, see
# https://www.selenic.com/mercurial/hgignore.5.html for more details
# Bazaar - does not support ignore files in sub-directories, only at root level via .bzrignore
# Subversion - does not support ignore files, requires direct manipulation with the svn tool
@property
def debug(self):
"""
:return: debug information about the virtual environment (only valid after :meth:`create` has run)
"""
if self._debug is None and self.exe is not None:
self._debug = get_env_debug_info(self.exe, self.debug_script(), self.app_data, self.env)
return self._debug
# noinspection PyMethodMayBeStatic
def debug_script(self):
return DEBUG_SCRIPT
def get_env_debug_info(env_exe, debug_script, app_data, env):
env = env.copy()
env.pop(str("PYTHONPATH"), None)
with app_data.ensure_extracted(debug_script) as debug_script:
cmd = [str(env_exe), str(debug_script)]
if WIN_CPYTHON_2:
cmd = [ensure_text(i) for i in cmd]
logging.debug(str("debug via %r"), LogCmd(cmd))
code, out, err = run_cmd(cmd)
# noinspection PyBroadException
try:
if code != 0:
result = literal_eval(out)
else:
result = json.loads(out)
if err:
result["err"] = err
except Exception as exception:
return {"out": out, "err": err, "returncode": code, "exception": repr(exception)}
if "sys" in result and "path" in result["sys"]:
del result["sys"]["path"][0]
return result
| |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Prestataire.other_type'
db.add_column(u'famille_prestataire', 'other_type',
self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Prestataire.other_type'
db.delete_column(u'famille_prestataire', 'other_type')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'famille.downloadablefile': {
'Meta': {'object_name': 'DownloadableFile'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'file_content': ('famille.utils.fields.ContentTypeRestrictedFileField', [], {'max_length': '100'}),
'file_type': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'famille.enfant': {
'Meta': {'object_name': 'Enfant'},
'created_at': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'e_birthday': ('django.db.models.fields.DateField', [], {'null': 'True', 'db_column': "'birthday'", 'blank': 'True'}),
'e_name': ('django.db.models.fields.CharField', [], {'max_length': '20', 'db_column': "'name'"}),
'e_school': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'db_column': "'school'", 'blank': 'True'}),
'famille': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'enfants'", 'to': "orm['famille.Famille']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'updated_at': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'})
},
'famille.famille': {
'Meta': {'object_name': 'Famille'},
'animaux': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'default': "'France'", 'max_length': '20', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'cuisine': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '400', 'null': 'True', 'blank': 'True'}),
'devoirs': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'diploma': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '100'}),
'enfant_malade': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'experience_type': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'experience_year': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'geolocation': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['famille.Geolocation']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ipn': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['ipn.PayPalIPN']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'langue': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'menage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'newsletter': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'non_fumeur': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'permis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'plan': ('django.db.models.fields.CharField', [], {'default': "'basic'", 'max_length': '20', 'blank': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'profile_pic': ('famille.utils.fields.ContentTypeRestrictedFileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'psc1': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'pseudo': ('django.db.models.fields.CharField', [], {'max_length': '60', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'repassage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'studies': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'tarif': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'tel': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'tel_visible': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'type_attente_famille': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'type_garde': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'type_presta': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'}),
'visibility_family': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'visibility_global': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'visibility_prestataire': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'famille.famillefavorite': {
'Meta': {'object_name': 'FamilleFavorite'},
'created_at': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'famille': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'favorites'", 'to': "orm['famille.Famille']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {}),
'object_type': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'updated_at': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'})
},
'famille.familleplanning': {
'Meta': {'object_name': 'FamillePlanning'},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'famille': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'planning'", 'to': "orm['famille.Famille']"}),
'frequency': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'schedule': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['famille.Schedule']", 'symmetrical': 'False'}),
'start_date': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime.now'}),
'updated_at': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'weekday': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['famille.Weekday']", 'symmetrical': 'False'})
},
'famille.familleratings': {
'Meta': {'object_name': 'FamilleRatings'},
'amability': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'blank': 'True'}),
'by': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'created_at': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ponctuality': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'blank': 'True'}),
'reliability': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'blank': 'True'}),
'serious': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ratings'", 'to': "orm['famille.Famille']"})
},
'famille.geolocation': {
'Meta': {'object_name': 'Geolocation'},
'created_at': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'has_error': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lat': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'lon': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'updated_at': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'})
},
'famille.prestataire': {
'Meta': {'object_name': 'Prestataire'},
'animaux': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'birthday': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'default': "'France'", 'max_length': '20', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'cuisine': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '400', 'null': 'True', 'blank': 'True'}),
'devoirs': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'diploma': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '100'}),
'enfant_malade': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'experience_type': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'experience_year': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'geolocation': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['famille.Geolocation']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ipn': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['ipn.PayPalIPN']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'level_de': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'level_en': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'level_es': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'level_it': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'menage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'nationality': ('django.db.models.fields.CharField', [], {'max_length': '70', 'null': 'True', 'blank': 'True'}),
'newsletter': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'non_fumeur': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'other_language': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'other_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'permis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'plan': ('django.db.models.fields.CharField', [], {'default': "'basic'", 'max_length': '20', 'blank': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'profile_pic': ('famille.utils.fields.ContentTypeRestrictedFileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'psc1': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'pseudo': ('django.db.models.fields.CharField', [], {'max_length': '60', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'repassage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'restrictions': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'resume': ('famille.utils.fields.ContentTypeRestrictedFileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'studies': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'tarif': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'tel': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'tel_visible': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'type_garde': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'}),
'visibility_family': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'visibility_global': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'visibility_prestataire': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'famille.prestatairefavorite': {
'Meta': {'object_name': 'PrestataireFavorite'},
'created_at': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {}),
'object_type': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'prestataire': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'favorites'", 'to': "orm['famille.Prestataire']"}),
'updated_at': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'})
},
'famille.prestataireplanning': {
'Meta': {'object_name': 'PrestatairePlanning'},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'frequency': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'prestataire': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'planning'", 'to': "orm['famille.Prestataire']"}),
'schedule': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['famille.Schedule']", 'symmetrical': 'False'}),
'start_date': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime.now'}),
'updated_at': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'weekday': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['famille.Weekday']", 'symmetrical': 'False'})
},
'famille.prestataireratings': {
'Meta': {'object_name': 'PrestataireRatings'},
'amability': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'blank': 'True'}),
'by': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'created_at': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ponctuality': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'blank': 'True'}),
'reliability': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'blank': 'True'}),
'serious': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ratings'", 'to': "orm['famille.Prestataire']"})
},
'famille.reference': {
'Meta': {'object_name': 'Reference'},
'created_at': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'current': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'date_from': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_to': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'garde': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'missions': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'prestataire': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'references'", 'to': "orm['famille.Prestataire']"}),
'referenced_user': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'reference_of'", 'unique': 'True', 'null': 'True', 'to': "orm['famille.Famille']"}),
'updated_at': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'})
},
'famille.schedule': {
'Meta': {'object_name': 'Schedule'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'famille.weekday': {
'Meta': {'object_name': 'Weekday'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '15'})
},
u'ipn.paypalipn': {
'Meta': {'object_name': 'PayPalIPN', 'db_table': "u'paypal_ipn'"},
'address_city': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'address_country': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'address_country_code': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'address_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'address_state': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'address_status': ('django.db.models.fields.CharField', [], {'max_length': '11', 'blank': 'True'}),
'address_street': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'address_zip': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'amount1': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'amount2': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'amount3': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'amount_per_cycle': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'auction_buyer_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'auction_closing_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'auction_multi_item': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'auth_amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'auth_exp': ('django.db.models.fields.CharField', [], {'max_length': '28', 'blank': 'True'}),
'auth_id': ('django.db.models.fields.CharField', [], {'max_length': '19', 'blank': 'True'}),
'auth_status': ('django.db.models.fields.CharField', [], {'max_length': '9', 'blank': 'True'}),
'business': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}),
'case_creation_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'case_id': ('django.db.models.fields.CharField', [], {'max_length': '14', 'blank': 'True'}),
'case_type': ('django.db.models.fields.CharField', [], {'max_length': '24', 'blank': 'True'}),
'charset': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'contact_phone': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'currency_code': ('django.db.models.fields.CharField', [], {'default': "'USD'", 'max_length': '32', 'blank': 'True'}),
'custom': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'exchange_rate': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '16', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'flag': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'flag_code': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'flag_info': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'for_auction': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'from_view': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'handling_amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initial_payment_amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'invoice': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}),
'ipaddress': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'blank': 'True'}),
'item_name': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}),
'item_number': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'mc_amount1': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'mc_amount2': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'mc_amount3': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'mc_currency': ('django.db.models.fields.CharField', [], {'default': "'USD'", 'max_length': '32', 'blank': 'True'}),
'mc_fee': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'mc_gross': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'mc_handling': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'mc_shipping': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'memo': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'next_payment_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'notify_version': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'num_cart_items': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'option_name1': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'option_name2': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'outstanding_balance': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'parent_txn_id': ('django.db.models.fields.CharField', [], {'max_length': '19', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '24', 'blank': 'True'}),
'payer_business_name': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}),
'payer_email': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}),
'payer_id': ('django.db.models.fields.CharField', [], {'max_length': '13', 'blank': 'True'}),
'payer_status': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'payment_cycle': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'payment_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'payment_gross': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'payment_status': ('django.db.models.fields.CharField', [], {'max_length': '17', 'blank': 'True'}),
'payment_type': ('django.db.models.fields.CharField', [], {'max_length': '7', 'blank': 'True'}),
'pending_reason': ('django.db.models.fields.CharField', [], {'max_length': '14', 'blank': 'True'}),
'period1': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'period2': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'period3': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'period_type': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'product_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'product_type': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'profile_status': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'protection_eligibility': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'quantity': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
'query': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'reason_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}),
'reattempt': ('django.db.models.fields.CharField', [], {'max_length': '1', 'blank': 'True'}),
'receipt_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'receiver_email': ('django.db.models.fields.EmailField', [], {'max_length': '127', 'blank': 'True'}),
'receiver_id': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}),
'recur_times': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'recurring': ('django.db.models.fields.CharField', [], {'max_length': '1', 'blank': 'True'}),
'recurring_payment_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'remaining_settle': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'residence_country': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'response': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'retry_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'rp_invoice_id': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}),
'settle_amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'settle_currency': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'shipping': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'shipping_method': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'subscr_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'subscr_effective': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'subscr_id': ('django.db.models.fields.CharField', [], {'max_length': '19', 'blank': 'True'}),
'tax': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'test_ipn': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'time_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'transaction_entity': ('django.db.models.fields.CharField', [], {'max_length': '7', 'blank': 'True'}),
'transaction_subject': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'txn_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '19', 'blank': 'True'}),
'txn_type': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'verify_sign': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
}
}
complete_apps = ['famille']
| |
from .Child import Child
from .Node import Node # noqa: I201
EXPR_NODES = [
# An inout expression.
# &x
Node('InOutExpr', kind='Expr',
children=[
Child('Ampersand', kind='PrefixAmpersandToken'),
Child('Expression', kind='Expr'),
]),
# A #column expression.
Node('PoundColumnExpr', kind='Expr',
children=[
Child('PoundColumn', kind='PoundColumnToken'),
]),
Node('TupleExprElementList', kind='SyntaxCollection',
element='TupleExprElement'),
Node('ArrayElementList', kind='SyntaxCollection',
element='ArrayElement'),
Node('DictionaryElementList', kind='SyntaxCollection',
element='DictionaryElement'),
Node('StringLiteralSegments', kind='SyntaxCollection',
element='Syntax', element_name='Segment',
element_choices=['StringSegment', 'ExpressionSegment']),
# The try operator.
# try foo()
# try? foo()
# try! foo()
Node('TryExpr', kind='Expr',
children=[
Child('TryKeyword', kind='TryToken'),
Child('QuestionOrExclamationMark', kind='Token',
is_optional=True,
token_choices=[
'PostfixQuestionMarkToken',
'ExclamationMarkToken',
]),
Child('Expression', kind='Expr'),
]),
# The await operator.
# await foo()
Node('AwaitExpr', kind='Expr',
children=[
Child('AwaitKeyword', kind='IdentifierToken',
classification='Keyword',
text_choices=['await']),
Child('Expression', kind='Expr'),
]),
# declname-arguments -> '(' declname-argument-list ')'
# declname-argument-list -> declname-argument*
# declname-argument -> identifier ':'
Node('DeclNameArgument', kind='Syntax',
children=[
Child('Name', kind='Token'),
Child('Colon', kind='ColonToken'),
]),
Node('DeclNameArgumentList', kind='SyntaxCollection',
element='DeclNameArgument'),
Node('DeclNameArguments', kind='Syntax',
traits=['Parenthesized'],
children=[
Child('LeftParen', kind='LeftParenToken'),
Child('Arguments', kind='DeclNameArgumentList',
collection_element_name='Argument'),
Child('RightParen', kind='RightParenToken'),
]),
# An identifier expression.
Node('IdentifierExpr', kind='Expr',
children=[
Child('Identifier', kind='Token',
token_choices=[
'IdentifierToken',
'SelfToken',
'CapitalSelfToken',
'DollarIdentifierToken',
'SpacedBinaryOperatorToken',
]),
Child('DeclNameArguments', kind='DeclNameArguments',
is_optional=True),
]),
# An 'super' expression.
Node('SuperRefExpr', kind='Expr',
children=[
Child('SuperKeyword', kind='SuperToken'),
]),
# A nil expression.
Node('NilLiteralExpr', kind='Expr',
children=[
Child('NilKeyword', kind='NilToken'),
]),
# A _ expression.
Node('DiscardAssignmentExpr', kind='Expr',
children=[
Child('Wildcard', kind='WildcardToken'),
]),
# An = expression.
Node('AssignmentExpr', kind='Expr',
children=[
Child('AssignToken', kind='EqualToken'),
]),
# A flat list of expressions before sequence folding, e.g. 1 + 2 + 3.
Node('SequenceExpr', kind='Expr',
children=[
Child('Elements', kind='ExprList',
collection_element_name='Element'),
]),
Node('ExprList', kind='SyntaxCollection',
element='Expr',
element_name='Expression',
description='''
A list of expressions connected by operators. This list is contained
by a `SequenceExprSyntax`.
'''),
# A #line expression.
Node('PoundLineExpr', kind='Expr',
children=[
Child('PoundLine', kind='PoundLineToken'),
]),
# A #file expression.
Node('PoundFileExpr', kind='Expr',
children=[
Child('PoundFile', kind='PoundFileToken'),
]),
# A #fileID expression.
Node('PoundFileIDExpr', kind='Expr',
children=[
Child('PoundFileID', kind='PoundFileIDToken'),
]),
# A #filePath expression.
Node('PoundFilePathExpr', kind='Expr',
children=[
Child('PoundFilePath', kind='PoundFilePathToken'),
]),
# A #function expression.
Node('PoundFunctionExpr', kind='Expr',
children=[
Child('PoundFunction', kind='PoundFunctionToken'),
]),
# A #dsohandle expression.
Node('PoundDsohandleExpr', kind='Expr',
children=[
Child('PoundDsohandle', kind='PoundDsohandleToken'),
]),
# symbolic-reference-expression -> identifier generic-argument-clause?
Node('SymbolicReferenceExpr', kind='Expr',
children=[
Child('Identifier', kind='IdentifierToken'),
Child('GenericArgumentClause', kind='GenericArgumentClause',
is_optional=True),
]),
# A prefix operator expression.
# -x
# !true
Node('PrefixOperatorExpr', kind='Expr',
children=[
Child('OperatorToken', kind='PrefixOperatorToken',
is_optional=True),
Child('PostfixExpression', kind='Expr'),
]),
# An operator like + or -.
# NOTE: This appears only in SequenceExpr.
Node('BinaryOperatorExpr', kind='Expr',
children=[
Child('OperatorToken', kind='BinaryOperatorToken'),
]),
# arrow-expr -> 'async'? 'throws'? '->'
# NOTE: This appears only in SequenceExpr.
Node('ArrowExpr', kind='Expr',
children=[
Child('AsyncKeyword', kind='IdentifierToken',
classification='Keyword',
text_choices=['async'], is_optional=True),
Child('ThrowsToken', kind='ThrowsToken',
is_optional=True),
Child('ArrowToken', kind='ArrowToken'),
]),
# A floating-point literal
# 4.0
# -3.9
# +4e20
Node('FloatLiteralExpr', kind='Expr',
children=[
Child('FloatingDigits', kind='FloatingLiteralToken'),
]),
Node('TupleExpr', kind='Expr',
traits=['Parenthesized'],
children=[
Child('LeftParen', kind='LeftParenToken'),
Child('ElementList', kind='TupleExprElementList',
collection_element_name='Element'),
Child('RightParen', kind='RightParenToken'),
]),
# Array literal, e.g. [1, 2, 3]
Node('ArrayExpr', kind='Expr',
children=[
Child('LeftSquare', kind='LeftSquareBracketToken'),
Child('Elements', kind='ArrayElementList',
collection_element_name='Element'),
Child('RightSquare', kind='RightSquareBracketToken'),
]),
# Dictionary literal, e.g. [1:1, 2:2, 3:3]
Node('DictionaryExpr', kind='Expr',
children=[
Child('LeftSquare', kind='LeftSquareBracketToken'),
Child('Content', kind='Syntax',
node_choices=[
Child('Colon', kind='ColonToken'),
Child('Elements', kind='DictionaryElementList'),
]),
Child('RightSquare', kind='RightSquareBracketToken'),
]),
# An element inside a tuple element list
Node('TupleExprElement', kind='Syntax',
traits=['WithTrailingComma'],
children=[
Child('Label', kind='Token',
is_optional=True,
token_choices=[
'IdentifierToken',
'WildcardToken'
]),
Child('Colon', kind='ColonToken',
is_optional=True),
Child('Expression', kind='Expr'),
Child('TrailingComma', kind='CommaToken',
is_optional=True),
]),
# element inside an array expression: expression ','?
Node('ArrayElement', kind='Syntax',
traits=['WithTrailingComma'],
children=[
Child('Expression', kind='Expr'),
Child('TrailingComma', kind='CommaToken', is_optional=True),
]),
# element inside an array expression: expression ','?
Node('DictionaryElement', kind='Syntax',
traits=['WithTrailingComma'],
children=[
Child('KeyExpression', kind='Expr'),
Child('Colon', kind='ColonToken'),
Child('ValueExpression', kind='Expr'),
Child('TrailingComma', kind='CommaToken', is_optional=True),
]),
# An integer literal.
# 3
# +3_400
# +0x4f
Node('IntegerLiteralExpr', kind='Expr',
children=[
Child('Digits', kind='IntegerLiteralToken'),
]),
# true or false
Node('BooleanLiteralExpr', kind='Expr',
children=[
Child("BooleanLiteral", kind='Token',
token_choices=[
'TrueToken',
'FalseToken',
])
]),
# a ? 1 : 0
Node('TernaryExpr', kind='Expr',
children=[
Child("ConditionExpression", kind='Expr'),
Child("QuestionMark", kind='InfixQuestionMarkToken'),
Child("FirstChoice", kind='Expr'),
Child("ColonMark", kind='ColonToken'),
Child("SecondChoice", kind='Expr')
]),
# expr?.name
Node('MemberAccessExpr', kind='Expr',
children=[
# The base needs to be optional to parse expressions in key paths
# like \.a
Child("Base", kind='Expr', is_optional=True),
Child("Dot", kind='Token',
token_choices=[
'PeriodToken', 'PrefixPeriodToken'
]),
# Name could be 'self'
Child("Name", kind='Token'),
Child('DeclNameArguments', kind='DeclNameArguments',
is_optional=True),
]),
# is TypeName
Node('IsExpr', kind='Expr',
children=[
Child("IsTok", kind='IsToken'),
Child("TypeName", kind='Type')
]),
# as TypeName
Node('AsExpr', kind='Expr',
children=[
Child("AsTok", kind='AsToken'),
Child("QuestionOrExclamationMark", kind='Token',
is_optional=True,
token_choices=[
'PostfixQuestionMarkToken',
'ExclamationMarkToken',
]),
Child("TypeName", kind='Type')
]),
# Type
Node('TypeExpr', kind='Expr',
children=[
Child('Type', kind='Type'),
]),
Node('ClosureCaptureItem', kind='Syntax',
traits=['WithTrailingComma'],
children=[
# FIXME: Add a 'CaptureSpecifier' node kind for `Specifier`.
Child("Specifier", kind='TokenList',
collection_element_name='SpecifierToken', is_optional=True),
Child("Name", kind='IdentifierToken', is_optional=True),
Child('AssignToken', kind='EqualToken', is_optional=True),
Child("Expression", kind='Expr'),
Child('TrailingComma', kind='CommaToken', is_optional=True),
]),
Node('ClosureCaptureItemList', kind='SyntaxCollection',
element='ClosureCaptureItem'),
Node('ClosureCaptureSignature', kind='Syntax',
children=[
Child('LeftSquare', kind='LeftSquareBracketToken'),
Child('Items', kind='ClosureCaptureItemList',
collection_element_name='Item', is_optional=True),
Child('RightSquare', kind='RightSquareBracketToken'),
]),
Node('ClosureParam', kind='Syntax',
traits=['WithTrailingComma'],
children=[
Child('Name', kind='Token',
token_choices=[
'IdentifierToken',
'WildcardToken',
]),
Child('TrailingComma', kind='CommaToken', is_optional=True),
]),
# a, b, c
Node('ClosureParamList', kind='SyntaxCollection', element='ClosureParam'),
Node('ClosureSignature', kind='Syntax',
children=[
Child('Attributes', kind='AttributeList',
collection_element_name='Attribute', is_optional=True),
Child('Capture', kind='ClosureCaptureSignature',
is_optional=True),
Child('Input', kind='Syntax', is_optional=True,
node_choices=[
Child('SimpleInput', kind='ClosureParamList'),
Child('Input', kind='ParameterClause'),
]),
Child('AsyncKeyword', kind='IdentifierToken',
classification='Keyword',
text_choices=['async'], is_optional=True),
Child('ThrowsTok', kind='ThrowsToken', is_optional=True),
Child('Output', kind='ReturnClause', is_optional=True),
Child('InTok', kind='InToken'),
]),
Node('ClosureExpr', kind='Expr',
traits=['Braced', 'WithStatements'],
children=[
Child('LeftBrace', kind='LeftBraceToken'),
Child('Signature', kind='ClosureSignature', is_optional=True),
Child('Statements', kind='CodeBlockItemList',
collection_element_name='Statement'),
Child('RightBrace', kind='RightBraceToken'),
]),
# unresolved-pattern-expr -> pattern
Node('UnresolvedPatternExpr', kind='Expr',
children=[
Child('Pattern', kind='Pattern'),
]),
# trailing-closure-element -> identifier ':' closure-expression
Node('MultipleTrailingClosureElement', kind='Syntax',
children=[
Child('Label', kind='Token',
token_choices=[
'IdentifierToken',
'WildcardToken'
]),
Child('Colon', kind='ColonToken'),
Child('Closure', kind='ClosureExpr'),
]),
Node('MultipleTrailingClosureElementList', kind='SyntaxCollection',
element='MultipleTrailingClosureElement'),
# call-expr -> expr '(' call-argument-list ')' closure-expr?
# | expr closure-expr
Node('FunctionCallExpr', kind='Expr',
children=[
Child('CalledExpression', kind='Expr'),
Child('LeftParen', kind='LeftParenToken',
is_optional=True),
Child('ArgumentList', kind='TupleExprElementList',
collection_element_name='Argument'),
Child('RightParen', kind='RightParenToken',
is_optional=True),
Child('TrailingClosure', kind='ClosureExpr',
is_optional=True),
Child('AdditionalTrailingClosures',
kind='MultipleTrailingClosureElementList',
collection_element_name='AdditionalTrailingClosure',
is_optional=True),
]),
# subscript-expr -> expr '[' call-argument-list ']' closure-expr?
Node('SubscriptExpr', kind='Expr',
children=[
Child('CalledExpression', kind='Expr'),
Child('LeftBracket', kind='LeftSquareBracketToken'),
Child('ArgumentList', kind='TupleExprElementList',
collection_element_name='Argument'),
Child('RightBracket', kind='RightSquareBracketToken'),
Child('TrailingClosure', kind='ClosureExpr',
is_optional=True),
Child('AdditionalTrailingClosures',
kind='MultipleTrailingClosureElementList',
collection_element_name='AdditionalTrailingClosure',
is_optional=True),
]),
# optional-chaining-expr -> expr '?'
Node('OptionalChainingExpr', kind='Expr',
children=[
Child('Expression', kind='Expr'),
Child('QuestionMark', kind='PostfixQuestionMarkToken'),
]),
# forced-value-expr -> expr '!'
Node('ForcedValueExpr', kind='Expr',
children=[
Child('Expression', kind='Expr'),
Child('ExclamationMark', kind='ExclamationMarkToken'),
]),
# postfix-unary-expr -> expr postfix-operator
Node('PostfixUnaryExpr', kind='Expr',
children=[
Child('Expression', kind='Expr'),
Child('OperatorToken', kind='PostfixOperatorToken'),
]),
# specialize-expr -> expr generic-argument-clause?
Node('SpecializeExpr', kind='Expr',
children=[
Child('Expression', kind='Expr'),
Child('GenericArgumentClause', kind='GenericArgumentClause'),
]),
# string literal segment in a string interpolation expression.
Node('StringSegment', kind='Syntax',
children=[
Child('Content', kind='StringSegmentToken'),
]),
# expression segment in a string interpolation expression.
Node('ExpressionSegment', kind='Syntax',
traits=['Parenthesized'],
children=[
Child('Backslash', kind='BackslashToken'),
Child('Delimiter', kind='RawStringDelimiterToken',
is_optional=True),
Child('LeftParen', kind='LeftParenToken',
classification='StringInterpolationAnchor',
force_classification=True),
Child('Expressions', kind='TupleExprElementList',
collection_element_name='Expression'),
Child('RightParen', kind='StringInterpolationAnchorToken'),
]),
# e.g. "abc \(foo()) def"
Node('StringLiteralExpr', kind='Expr',
children=[
Child('OpenDelimiter', kind='RawStringDelimiterToken',
is_optional=True),
Child('OpenQuote', kind='Token',
token_choices=[
'StringQuoteToken',
'MultilineStringQuoteToken',
]),
Child('Segments', kind='StringLiteralSegments',
collection_element_name='Segment'),
Child('CloseQuote', kind='Token',
token_choices=[
'StringQuoteToken',
'MultilineStringQuoteToken',
]),
Child('CloseDelimiter', kind='RawStringDelimiterToken',
is_optional=True),
]),
# e.g. "\a.b[2].a"
Node('KeyPathExpr', kind='Expr',
children=[
Child('Backslash', kind='BackslashToken'),
Child('RootExpr', kind='Expr', is_optional=True,
node_choices=[
Child('IdentifierExpr', kind='IdentifierExpr'),
Child('SpecializeExpr', kind='SpecializeExpr'),
Child('OptionalChainingExpr', kind='OptionalChainingExpr'),
]),
Child('Expression', kind='Expr'),
]),
# The period in the key path serves as the base on which the
# right-hand-side of the key path is evaluated
Node('KeyPathBaseExpr', kind='Expr',
children=[
Child('Period', kind='PeriodToken'),
]),
# e.g. "a." or "a"
Node('ObjcNamePiece', kind='Syntax',
children=[
Child('Name', kind='IdentifierToken'),
Child('Dot', kind='PeriodToken', is_optional=True),
]),
# e.g. "a.b.c"
Node('ObjcName', kind='SyntaxCollection', element='ObjcNamePiece'),
# e.g. "#keyPath(a.b.c)"
Node('ObjcKeyPathExpr', kind='Expr',
traits=['Parenthesized'],
children=[
Child('KeyPath', kind='PoundKeyPathToken'),
Child('LeftParen', kind='LeftParenToken'),
Child('Name', kind='ObjcName',
collection_element_name='NamePiece'),
Child('RightParen', kind='RightParenToken'),
]),
# e.g. "#selector(getter:Foo.bar)"
Node('ObjcSelectorExpr', kind='Expr',
traits=['Parenthesized'],
children=[
Child('PoundSelector', kind='PoundSelectorToken'),
Child('LeftParen', kind='LeftParenToken'),
Child('Kind', kind='ContextualKeywordToken',
text_choices=['getter', 'setter'],
is_optional=True),
Child('Colon', kind='ColonToken',
is_optional=True),
Child('Name', kind='Expr'),
Child('RightParen', kind='RightParenToken'),
]),
# postfix '#if' expession
Node('PostfixIfConfigExpr', kind='Expr',
children=[
Child('Base', kind='Expr', is_optional=True),
Child('Config', kind='IfConfigDecl'),
]),
# <#content#>
Node('EditorPlaceholderExpr', kind='Expr',
children=[
Child('Identifier', kind='IdentifierToken'),
]),
# #fileLiteral(a, b, c)
Node('ObjectLiteralExpr', kind='Expr',
traits=['Parenthesized'],
children=[
Child('Identifier', kind='Token',
token_choices=[
'PoundColorLiteralToken',
'PoundFileLiteralToken',
'PoundImageLiteralToken',
]),
Child('LeftParen', kind='LeftParenToken'),
Child('Arguments', kind='TupleExprElementList',
collection_element_name='Argument'),
Child('RightParen', kind='RightParenToken'),
]),
]
| |
from __future__ import unicode_literals
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
import logging
from oauthlib.common import add_params_to_uri
from oauthlib.common import urldecode as _urldecode
from oauthlib.oauth1 import SIGNATURE_HMAC, SIGNATURE_RSA, SIGNATURE_TYPE_AUTH_HEADER
import requests
from . import OAuth1
log = logging.getLogger(__name__)
def urldecode(body):
"""Parse query or json to python dictionary"""
try:
return _urldecode(body)
except Exception:
import json
return json.loads(body)
class TokenRequestDenied(ValueError):
def __init__(self, message, response):
super(TokenRequestDenied, self).__init__(message)
self.response = response
@property
def status_code(self):
"""For backwards-compatibility purposes"""
return self.response.status_code
class TokenMissing(ValueError):
def __init__(self, message, response):
super(TokenMissing, self).__init__(message)
self.response = response
class VerifierMissing(ValueError):
pass
class OAuth1Session(requests.Session):
"""Request signing and convenience methods for the oauth dance.
What is the difference between OAuth1Session and OAuth1?
OAuth1Session actually uses OAuth1 internally and its purpose is to assist
in the OAuth workflow through convenience methods to prepare authorization
URLs and parse the various token and redirection responses. It also provide
rudimentary validation of responses.
An example of the OAuth workflow using a basic CLI app and Twitter.
>>> # Credentials obtained during the registration.
>>> client_key = 'client key'
>>> client_secret = 'secret'
>>> callback_uri = 'https://127.0.0.1/callback'
>>>
>>> # Endpoints found in the OAuth provider API documentation
>>> request_token_url = 'https://api.twitter.com/oauth/request_token'
>>> authorization_url = 'https://api.twitter.com/oauth/authorize'
>>> access_token_url = 'https://api.twitter.com/oauth/access_token'
>>>
>>> oauth_session = OAuth1Session(client_key,client_secret=client_secret, callback_uri=callback_uri)
>>>
>>> # First step, fetch the request token.
>>> oauth_session.fetch_request_token(request_token_url)
{
'oauth_token': 'kjerht2309u',
'oauth_token_secret': 'lsdajfh923874',
}
>>>
>>> # Second step. Follow this link and authorize
>>> oauth_session.authorization_url(authorization_url)
'https://api.twitter.com/oauth/authorize?oauth_token=sdf0o9823sjdfsdf&oauth_callback=https%3A%2F%2F127.0.0.1%2Fcallback'
>>>
>>> # Third step. Fetch the access token
>>> redirect_response = raw_input('Paste the full redirect URL here.')
>>> oauth_session.parse_authorization_response(redirect_response)
{
'oauth_token: 'kjerht2309u',
'oauth_token_secret: 'lsdajfh923874',
'oauth_verifier: 'w34o8967345',
}
>>> oauth_session.fetch_access_token(access_token_url)
{
'oauth_token': 'sdf0o9823sjdfsdf',
'oauth_token_secret': '2kjshdfp92i34asdasd',
}
>>> # Done. You can now make OAuth requests.
>>> status_url = 'http://api.twitter.com/1/statuses/update.json'
>>> new_status = {'status': 'hello world!'}
>>> oauth_session.post(status_url, data=new_status)
<Response [200]>
"""
def __init__(
self,
client_key,
client_secret=None,
resource_owner_key=None,
resource_owner_secret=None,
callback_uri=None,
signature_method=SIGNATURE_HMAC,
signature_type=SIGNATURE_TYPE_AUTH_HEADER,
rsa_key=None,
verifier=None,
client_class=None,
force_include_body=False,
**kwargs
):
"""Construct the OAuth 1 session.
:param client_key: A client specific identifier.
:param client_secret: A client specific secret used to create HMAC and
plaintext signatures.
:param resource_owner_key: A resource owner key, also referred to as
request token or access token depending on
when in the workflow it is used.
:param resource_owner_secret: A resource owner secret obtained with
either a request or access token. Often
referred to as token secret.
:param callback_uri: The URL the user is redirect back to after
authorization.
:param signature_method: Signature methods determine how the OAuth
signature is created. The three options are
oauthlib.oauth1.SIGNATURE_HMAC (default),
oauthlib.oauth1.SIGNATURE_RSA and
oauthlib.oauth1.SIGNATURE_PLAIN.
:param signature_type: Signature type decides where the OAuth
parameters are added. Either in the
Authorization header (default) or to the URL
query parameters or the request body. Defined as
oauthlib.oauth1.SIGNATURE_TYPE_AUTH_HEADER,
oauthlib.oauth1.SIGNATURE_TYPE_QUERY and
oauthlib.oauth1.SIGNATURE_TYPE_BODY
respectively.
:param rsa_key: The private RSA key as a string. Can only be used with
signature_method=oauthlib.oauth1.SIGNATURE_RSA.
:param verifier: A verifier string to prove authorization was granted.
:param client_class: A subclass of `oauthlib.oauth1.Client` to use with
`requests_oauthlib.OAuth1` instead of the default
:param force_include_body: Always include the request body in the
signature creation.
:param **kwargs: Additional keyword arguments passed to `OAuth1`
"""
super(OAuth1Session, self).__init__()
self._client = OAuth1(
client_key,
client_secret=client_secret,
resource_owner_key=resource_owner_key,
resource_owner_secret=resource_owner_secret,
callback_uri=callback_uri,
signature_method=signature_method,
signature_type=signature_type,
rsa_key=rsa_key,
verifier=verifier,
client_class=client_class,
force_include_body=force_include_body,
**kwargs
)
self.auth = self._client
@property
def token(self):
oauth_token = self._client.client.resource_owner_key
oauth_token_secret = self._client.client.resource_owner_secret
oauth_verifier = self._client.client.verifier
token_dict = {}
if oauth_token:
token_dict["oauth_token"] = oauth_token
if oauth_token_secret:
token_dict["oauth_token_secret"] = oauth_token_secret
if oauth_verifier:
token_dict["oauth_verifier"] = oauth_verifier
return token_dict
@token.setter
def token(self, value):
self._populate_attributes(value)
@property
def authorized(self):
"""Boolean that indicates whether this session has an OAuth token
or not. If `self.authorized` is True, you can reasonably expect
OAuth-protected requests to the resource to succeed. If
`self.authorized` is False, you need the user to go through the OAuth
authentication dance before OAuth-protected requests to the resource
will succeed.
"""
if self._client.client.signature_method == SIGNATURE_RSA:
# RSA only uses resource_owner_key
return bool(self._client.client.resource_owner_key)
else:
# other methods of authentication use all three pieces
return (
bool(self._client.client.client_secret)
and bool(self._client.client.resource_owner_key)
and bool(self._client.client.resource_owner_secret)
)
def authorization_url(self, url, request_token=None, **kwargs):
"""Create an authorization URL by appending request_token and optional
kwargs to url.
This is the second step in the OAuth 1 workflow. The user should be
redirected to this authorization URL, grant access to you, and then
be redirected back to you. The redirection back can either be specified
during client registration or by supplying a callback URI per request.
:param url: The authorization endpoint URL.
:param request_token: The previously obtained request token.
:param kwargs: Optional parameters to append to the URL.
:returns: The authorization URL with new parameters embedded.
An example using a registered default callback URI.
>>> request_token_url = 'https://api.twitter.com/oauth/request_token'
>>> authorization_url = 'https://api.twitter.com/oauth/authorize'
>>> oauth_session = OAuth1Session('client-key', client_secret='secret')
>>> oauth_session.fetch_request_token(request_token_url)
{
'oauth_token': 'sdf0o9823sjdfsdf',
'oauth_token_secret': '2kjshdfp92i34asdasd',
}
>>> oauth_session.authorization_url(authorization_url)
'https://api.twitter.com/oauth/authorize?oauth_token=sdf0o9823sjdfsdf'
>>> oauth_session.authorization_url(authorization_url, foo='bar')
'https://api.twitter.com/oauth/authorize?oauth_token=sdf0o9823sjdfsdf&foo=bar'
An example using an explicit callback URI.
>>> request_token_url = 'https://api.twitter.com/oauth/request_token'
>>> authorization_url = 'https://api.twitter.com/oauth/authorize'
>>> oauth_session = OAuth1Session('client-key', client_secret='secret', callback_uri='https://127.0.0.1/callback')
>>> oauth_session.fetch_request_token(request_token_url)
{
'oauth_token': 'sdf0o9823sjdfsdf',
'oauth_token_secret': '2kjshdfp92i34asdasd',
}
>>> oauth_session.authorization_url(authorization_url)
'https://api.twitter.com/oauth/authorize?oauth_token=sdf0o9823sjdfsdf&oauth_callback=https%3A%2F%2F127.0.0.1%2Fcallback'
"""
kwargs["oauth_token"] = request_token or self._client.client.resource_owner_key
log.debug("Adding parameters %s to url %s", kwargs, url)
return add_params_to_uri(url, kwargs.items())
def fetch_request_token(self, url, realm=None, **request_kwargs):
r"""Fetch a request token.
This is the first step in the OAuth 1 workflow. A request token is
obtained by making a signed post request to url. The token is then
parsed from the application/x-www-form-urlencoded response and ready
to be used to construct an authorization url.
:param url: The request token endpoint URL.
:param realm: A list of realms to request access to.
:param \*\*request_kwargs: Optional arguments passed to ''post''
function in ''requests.Session''
:returns: The response in dict format.
Note that a previously set callback_uri will be reset for your
convenience, or else signature creation will be incorrect on
consecutive requests.
>>> request_token_url = 'https://api.twitter.com/oauth/request_token'
>>> oauth_session = OAuth1Session('client-key', client_secret='secret')
>>> oauth_session.fetch_request_token(request_token_url)
{
'oauth_token': 'sdf0o9823sjdfsdf',
'oauth_token_secret': '2kjshdfp92i34asdasd',
}
"""
self._client.client.realm = " ".join(realm) if realm else None
token = self._fetch_token(url, **request_kwargs)
log.debug("Resetting callback_uri and realm (not needed in next phase).")
self._client.client.callback_uri = None
self._client.client.realm = None
return token
def fetch_access_token(self, url, verifier=None, **request_kwargs):
"""Fetch an access token.
This is the final step in the OAuth 1 workflow. An access token is
obtained using all previously obtained credentials, including the
verifier from the authorization step.
Note that a previously set verifier will be reset for your
convenience, or else signature creation will be incorrect on
consecutive requests.
>>> access_token_url = 'https://api.twitter.com/oauth/access_token'
>>> redirect_response = 'https://127.0.0.1/callback?oauth_token=kjerht2309uf&oauth_token_secret=lsdajfh923874&oauth_verifier=w34o8967345'
>>> oauth_session = OAuth1Session('client-key', client_secret='secret')
>>> oauth_session.parse_authorization_response(redirect_response)
{
'oauth_token: 'kjerht2309u',
'oauth_token_secret: 'lsdajfh923874',
'oauth_verifier: 'w34o8967345',
}
>>> oauth_session.fetch_access_token(access_token_url)
{
'oauth_token': 'sdf0o9823sjdfsdf',
'oauth_token_secret': '2kjshdfp92i34asdasd',
}
"""
if verifier:
self._client.client.verifier = verifier
if not getattr(self._client.client, "verifier", None):
raise VerifierMissing("No client verifier has been set.")
token = self._fetch_token(url, **request_kwargs)
log.debug("Resetting verifier attribute, should not be used anymore.")
self._client.client.verifier = None
return token
def parse_authorization_response(self, url):
"""Extract parameters from the post authorization redirect response URL.
:param url: The full URL that resulted from the user being redirected
back from the OAuth provider to you, the client.
:returns: A dict of parameters extracted from the URL.
>>> redirect_response = 'https://127.0.0.1/callback?oauth_token=kjerht2309uf&oauth_token_secret=lsdajfh923874&oauth_verifier=w34o8967345'
>>> oauth_session = OAuth1Session('client-key', client_secret='secret')
>>> oauth_session.parse_authorization_response(redirect_response)
{
'oauth_token: 'kjerht2309u',
'oauth_token_secret: 'lsdajfh923874',
'oauth_verifier: 'w34o8967345',
}
"""
log.debug("Parsing token from query part of url %s", url)
token = dict(urldecode(urlparse(url).query))
log.debug("Updating internal client token attribute.")
self._populate_attributes(token)
self.token = token
return token
def _populate_attributes(self, token):
if "oauth_token" in token:
self._client.client.resource_owner_key = token["oauth_token"]
else:
raise TokenMissing(
"Response does not contain a token: {resp}".format(resp=token), token
)
if "oauth_token_secret" in token:
self._client.client.resource_owner_secret = token["oauth_token_secret"]
if "oauth_verifier" in token:
self._client.client.verifier = token["oauth_verifier"]
def _fetch_token(self, url, **request_kwargs):
log.debug("Fetching token from %s using client %s", url, self._client.client)
r = self.post(url, **request_kwargs)
if r.status_code >= 400:
error = "Token request failed with code %s, response was '%s'."
raise TokenRequestDenied(error % (r.status_code, r.text), r)
log.debug('Decoding token from response "%s"', r.text)
try:
token = dict(urldecode(r.text.strip()))
except ValueError as e:
error = (
"Unable to decode token from token response. "
"This is commonly caused by an unsuccessful request where"
" a non urlencoded error message is returned. "
"The decoding error was %s"
"" % e
)
raise ValueError(error)
log.debug("Obtained token %s", token)
log.debug("Updating internal client attributes from token data.")
self._populate_attributes(token)
self.token = token
return token
def rebuild_auth(self, prepared_request, response):
"""
When being redirected we should always strip Authorization
header, since nonce may not be reused as per OAuth spec.
"""
if "Authorization" in prepared_request.headers:
# If we get redirected to a new host, we should strip out
# any authentication headers.
prepared_request.headers.pop("Authorization", True)
prepared_request.prepare_auth(self.auth)
return
| |
from __future__ import unicode_literals
import datetime
from decimal import Decimal
from django.core.exceptions import FieldDoesNotExist, FieldError
from django.db.models import (
BooleanField, CharField, Count, DateTimeField, ExpressionWrapper, F, Func,
IntegerField, Q, Sum, Value,
)
from django.db.models.functions import Lower
from django.test import TestCase, skipUnlessDBFeature
from django.utils import six
from .models import (
Author, Book, Company, DepartmentStore, Employee, Publisher, Store, Ticket,
)
def cxOracle_py3_bug(func):
"""
There's a bug in Django/cx_Oracle with respect to string handling under
Python 3 (essentially, they treat Python 3 strings as Python 2 strings
rather than unicode). This makes some tests here fail under Python 3, so
we mark them as expected failures until someone fixes them in #23843.
"""
from unittest import expectedFailure
from django.db import connection
return expectedFailure(func) if connection.vendor == 'oracle' and six.PY3 else func
class NonAggregateAnnotationTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.a1 = Author.objects.create(name='Adrian Holovaty', age=34)
cls.a2 = Author.objects.create(name='Jacob Kaplan-Moss', age=35)
cls.a3 = Author.objects.create(name='Brad Dayley', age=45)
cls.a4 = Author.objects.create(name='James Bennett', age=29)
cls.a5 = Author.objects.create(name='Jeffrey Forcier', age=37)
cls.a6 = Author.objects.create(name='Paul Bissex', age=29)
cls.a7 = Author.objects.create(name='Wesley J. Chun', age=25)
cls.a8 = Author.objects.create(name='Peter Norvig', age=57)
cls.a9 = Author.objects.create(name='Stuart Russell', age=46)
cls.a1.friends.add(cls.a2, cls.a4)
cls.a2.friends.add(cls.a1, cls.a7)
cls.a4.friends.add(cls.a1)
cls.a5.friends.add(cls.a6, cls.a7)
cls.a6.friends.add(cls.a5, cls.a7)
cls.a7.friends.add(cls.a2, cls.a5, cls.a6)
cls.a8.friends.add(cls.a9)
cls.a9.friends.add(cls.a8)
cls.p1 = Publisher.objects.create(name='Apress', num_awards=3)
cls.p2 = Publisher.objects.create(name='Sams', num_awards=1)
cls.p3 = Publisher.objects.create(name='Prentice Hall', num_awards=7)
cls.p4 = Publisher.objects.create(name='Morgan Kaufmann', num_awards=9)
cls.p5 = Publisher.objects.create(name="Jonno's House of Books", num_awards=0)
cls.b1 = Book.objects.create(
isbn='159059725', name='The Definitive Guide to Django: Web Development Done Right',
pages=447, rating=4.5, price=Decimal('30.00'), contact=cls.a1, publisher=cls.p1,
pubdate=datetime.date(2007, 12, 6)
)
cls.b2 = Book.objects.create(
isbn='067232959', name='Sams Teach Yourself Django in 24 Hours',
pages=528, rating=3.0, price=Decimal('23.09'), contact=cls.a3, publisher=cls.p2,
pubdate=datetime.date(2008, 3, 3)
)
cls.b3 = Book.objects.create(
isbn='159059996', name='Practical Django Projects',
pages=300, rating=4.0, price=Decimal('29.69'), contact=cls.a4, publisher=cls.p1,
pubdate=datetime.date(2008, 6, 23)
)
cls.b4 = Book.objects.create(
isbn='013235613', name='Python Web Development with Django',
pages=350, rating=4.0, price=Decimal('29.69'), contact=cls.a5, publisher=cls.p3,
pubdate=datetime.date(2008, 11, 3)
)
cls.b5 = Book.objects.create(
isbn='013790395', name='Artificial Intelligence: A Modern Approach',
pages=1132, rating=4.0, price=Decimal('82.80'), contact=cls.a8, publisher=cls.p3,
pubdate=datetime.date(1995, 1, 15)
)
cls.b6 = Book.objects.create(
isbn='155860191', name='Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp',
pages=946, rating=5.0, price=Decimal('75.00'), contact=cls.a8, publisher=cls.p4,
pubdate=datetime.date(1991, 10, 15)
)
cls.b1.authors.add(cls.a1, cls.a2)
cls.b2.authors.add(cls.a3)
cls.b3.authors.add(cls.a4)
cls.b4.authors.add(cls.a5, cls.a6, cls.a7)
cls.b5.authors.add(cls.a8, cls.a9)
cls.b6.authors.add(cls.a8)
s1 = Store.objects.create(
name='Amazon.com',
original_opening=datetime.datetime(1994, 4, 23, 9, 17, 42),
friday_night_closing=datetime.time(23, 59, 59)
)
s2 = Store.objects.create(
name='Books.com',
original_opening=datetime.datetime(2001, 3, 15, 11, 23, 37),
friday_night_closing=datetime.time(23, 59, 59)
)
s3 = Store.objects.create(
name="Mamma and Pappa's Books",
original_opening=datetime.datetime(1945, 4, 25, 16, 24, 14),
friday_night_closing=datetime.time(21, 30)
)
s1.books.add(cls.b1, cls.b2, cls.b3, cls.b4, cls.b5, cls.b6)
s2.books.add(cls.b1, cls.b3, cls.b5, cls.b6)
s3.books.add(cls.b3, cls.b4, cls.b6)
def test_basic_annotation(self):
books = Book.objects.annotate(
is_book=Value(1, output_field=IntegerField()))
for book in books:
self.assertEqual(book.is_book, 1)
def test_basic_f_annotation(self):
books = Book.objects.annotate(another_rating=F('rating'))
for book in books:
self.assertEqual(book.another_rating, book.rating)
def test_joined_annotation(self):
books = Book.objects.select_related('publisher').annotate(
num_awards=F('publisher__num_awards'))
for book in books:
self.assertEqual(book.num_awards, book.publisher.num_awards)
def test_mixed_type_annotation_date_interval(self):
active = datetime.datetime(2015, 3, 20, 14, 0, 0)
duration = datetime.timedelta(hours=1)
expires = datetime.datetime(2015, 3, 20, 14, 0, 0) + duration
Ticket.objects.create(active_at=active, duration=duration)
t = Ticket.objects.annotate(
expires=ExpressionWrapper(F('active_at') + F('duration'), output_field=DateTimeField())
).first()
self.assertEqual(t.expires, expires)
def test_mixed_type_annotation_numbers(self):
test = self.b1
b = Book.objects.annotate(
combined=ExpressionWrapper(F('pages') + F('rating'), output_field=IntegerField())
).get(isbn=test.isbn)
combined = int(test.pages + test.rating)
self.assertEqual(b.combined, combined)
def test_empty_expression_annotation(self):
books = Book.objects.annotate(
selected=ExpressionWrapper(Q(pk__in=[]), output_field=BooleanField())
)
self.assertEqual(len(books), Book.objects.count())
self.assertTrue(all(not book.selected for book in books))
books = Book.objects.annotate(
selected=ExpressionWrapper(Q(pk__in=Book.objects.none()), output_field=BooleanField())
)
self.assertEqual(len(books), Book.objects.count())
self.assertTrue(all(not book.selected for book in books))
def test_annotate_with_aggregation(self):
books = Book.objects.annotate(
is_book=Value(1, output_field=IntegerField()),
rating_count=Count('rating'))
for book in books:
self.assertEqual(book.is_book, 1)
self.assertEqual(book.rating_count, 1)
def test_aggregate_over_annotation(self):
agg = Author.objects.annotate(other_age=F('age')).aggregate(otherage_sum=Sum('other_age'))
other_agg = Author.objects.aggregate(age_sum=Sum('age'))
self.assertEqual(agg['otherage_sum'], other_agg['age_sum'])
@skipUnlessDBFeature('can_distinct_on_fields')
def test_distinct_on_with_annotation(self):
store = Store.objects.create(
name='test store',
original_opening=datetime.datetime.now(),
friday_night_closing=datetime.time(21, 00, 00),
)
names = [
'Theodore Roosevelt',
'Eleanor Roosevelt',
'Franklin Roosevelt',
'Ned Stark',
'Catelyn Stark',
]
for name in names:
Employee.objects.create(
store=store,
first_name=name.split()[0],
last_name=name.split()[1],
age=30, salary=2000,
)
people = Employee.objects.annotate(
name_lower=Lower('last_name'),
).distinct('name_lower')
self.assertEqual(set(p.last_name for p in people), {'Stark', 'Roosevelt'})
self.assertEqual(len(people), 2)
people2 = Employee.objects.annotate(
test_alias=F('store__name'),
).distinct('test_alias')
self.assertEqual(len(people2), 1)
def test_filter_annotation(self):
books = Book.objects.annotate(
is_book=Value(1, output_field=IntegerField())
).filter(is_book=1)
for book in books:
self.assertEqual(book.is_book, 1)
def test_filter_annotation_with_f(self):
books = Book.objects.annotate(
other_rating=F('rating')
).filter(other_rating=3.5)
for book in books:
self.assertEqual(book.other_rating, 3.5)
def test_filter_annotation_with_double_f(self):
books = Book.objects.annotate(
other_rating=F('rating')
).filter(other_rating=F('rating'))
for book in books:
self.assertEqual(book.other_rating, book.rating)
def test_filter_agg_with_double_f(self):
books = Book.objects.annotate(
sum_rating=Sum('rating')
).filter(sum_rating=F('sum_rating'))
for book in books:
self.assertEqual(book.sum_rating, book.rating)
def test_filter_wrong_annotation(self):
with self.assertRaisesMessage(FieldError, "Cannot resolve keyword 'nope' into field."):
list(Book.objects.annotate(
sum_rating=Sum('rating')
).filter(sum_rating=F('nope')))
def test_combined_annotation_commutative(self):
book1 = Book.objects.annotate(adjusted_rating=F('rating') + 2).get(pk=self.b1.pk)
book2 = Book.objects.annotate(adjusted_rating=2 + F('rating')).get(pk=self.b1.pk)
self.assertEqual(book1.adjusted_rating, book2.adjusted_rating)
book1 = Book.objects.annotate(adjusted_rating=F('rating') + None).get(pk=self.b1.pk)
book2 = Book.objects.annotate(adjusted_rating=None + F('rating')).get(pk=self.b1.pk)
self.assertEqual(book1.adjusted_rating, book2.adjusted_rating)
def test_update_with_annotation(self):
book_preupdate = Book.objects.get(pk=self.b2.pk)
Book.objects.annotate(other_rating=F('rating') - 1).update(rating=F('other_rating'))
book_postupdate = Book.objects.get(pk=self.b2.pk)
self.assertEqual(book_preupdate.rating - 1, book_postupdate.rating)
def test_annotation_with_m2m(self):
books = Book.objects.annotate(author_age=F('authors__age')).filter(pk=self.b1.pk).order_by('author_age')
self.assertEqual(books[0].author_age, 34)
self.assertEqual(books[1].author_age, 35)
def test_annotation_reverse_m2m(self):
books = Book.objects.annotate(
store_name=F('store__name')).filter(
name='Practical Django Projects').order_by(
'store_name')
self.assertQuerysetEqual(
books, [
'Amazon.com',
'Books.com',
'Mamma and Pappa\'s Books'
],
lambda b: b.store_name
)
def test_values_annotation(self):
"""
Annotations can reference fields in a values clause,
and contribute to an existing values clause.
"""
# annotate references a field in values()
qs = Book.objects.values('rating').annotate(other_rating=F('rating') - 1)
book = qs.get(pk=self.b1.pk)
self.assertEqual(book['rating'] - 1, book['other_rating'])
# filter refs the annotated value
book = qs.get(other_rating=4)
self.assertEqual(book['other_rating'], 4)
# can annotate an existing values with a new field
book = qs.annotate(other_isbn=F('isbn')).get(other_rating=4)
self.assertEqual(book['other_rating'], 4)
self.assertEqual(book['other_isbn'], '155860191')
def test_defer_annotation(self):
"""
Deferred attributes can be referenced by an annotation,
but they are not themselves deferred, and cannot be deferred.
"""
qs = Book.objects.defer('rating').annotate(other_rating=F('rating') - 1)
with self.assertNumQueries(2):
book = qs.get(other_rating=4)
self.assertEqual(book.rating, 5)
self.assertEqual(book.other_rating, 4)
with self.assertRaisesMessage(FieldDoesNotExist, "Book has no field named 'other_rating'"):
book = qs.defer('other_rating').get(other_rating=4)
def test_mti_annotations(self):
"""
Fields on an inherited model can be referenced by an
annotated field.
"""
d = DepartmentStore.objects.create(
name='Angus & Robinson',
original_opening=datetime.date(2014, 3, 8),
friday_night_closing=datetime.time(21, 00, 00),
chain='Westfield'
)
books = Book.objects.filter(rating__gt=4)
for b in books:
d.books.add(b)
qs = DepartmentStore.objects.annotate(
other_name=F('name'),
other_chain=F('chain'),
is_open=Value(True, BooleanField()),
book_isbn=F('books__isbn')
).order_by('book_isbn').filter(chain='Westfield')
self.assertQuerysetEqual(
qs, [
('Angus & Robinson', 'Westfield', True, '155860191'),
('Angus & Robinson', 'Westfield', True, '159059725')
],
lambda d: (d.other_name, d.other_chain, d.is_open, d.book_isbn)
)
def test_null_annotation(self):
"""
Test that annotating None onto a model round-trips
"""
book = Book.objects.annotate(no_value=Value(None, output_field=IntegerField())).first()
self.assertIsNone(book.no_value)
def test_order_by_annotation(self):
authors = Author.objects.annotate(other_age=F('age')).order_by('other_age')
self.assertQuerysetEqual(
authors, [
25, 29, 29, 34, 35, 37, 45, 46, 57,
],
lambda a: a.other_age
)
def test_order_by_aggregate(self):
authors = Author.objects.values('age').annotate(age_count=Count('age')).order_by('age_count', 'age')
self.assertQuerysetEqual(
authors, [
(25, 1), (34, 1), (35, 1), (37, 1), (45, 1), (46, 1), (57, 1), (29, 2),
],
lambda a: (a['age'], a['age_count'])
)
def test_annotate_exists(self):
authors = Author.objects.annotate(c=Count('id')).filter(c__gt=1)
self.assertFalse(authors.exists())
def test_column_field_ordering(self):
"""
Test that columns are aligned in the correct order for
resolve_columns. This test will fail on mysql if column
ordering is out. Column fields should be aligned as:
1. extra_select
2. model_fields
3. annotation_fields
4. model_related_fields
"""
store = Store.objects.first()
Employee.objects.create(id=1, first_name='Max', manager=True, last_name='Paine',
store=store, age=23, salary=Decimal(50000.00))
Employee.objects.create(id=2, first_name='Buffy', manager=False, last_name='Summers',
store=store, age=18, salary=Decimal(40000.00))
qs = Employee.objects.extra(
select={'random_value': '42'}
).select_related('store').annotate(
annotated_value=Value(17, output_field=IntegerField())
)
rows = [
(1, 'Max', True, 42, 'Paine', 23, Decimal(50000.00), store.name, 17),
(2, 'Buffy', False, 42, 'Summers', 18, Decimal(40000.00), store.name, 17)
]
self.assertQuerysetEqual(
qs.order_by('id'), rows,
lambda e: (
e.id, e.first_name, e.manager, e.random_value, e.last_name, e.age,
e.salary, e.store.name, e.annotated_value))
def test_column_field_ordering_with_deferred(self):
store = Store.objects.first()
Employee.objects.create(id=1, first_name='Max', manager=True, last_name='Paine',
store=store, age=23, salary=Decimal(50000.00))
Employee.objects.create(id=2, first_name='Buffy', manager=False, last_name='Summers',
store=store, age=18, salary=Decimal(40000.00))
qs = Employee.objects.extra(
select={'random_value': '42'}
).select_related('store').annotate(
annotated_value=Value(17, output_field=IntegerField())
)
rows = [
(1, 'Max', True, 42, 'Paine', 23, Decimal(50000.00), store.name, 17),
(2, 'Buffy', False, 42, 'Summers', 18, Decimal(40000.00), store.name, 17)
]
# and we respect deferred columns!
self.assertQuerysetEqual(
qs.defer('age').order_by('id'), rows,
lambda e: (
e.id, e.first_name, e.manager, e.random_value, e.last_name, e.age,
e.salary, e.store.name, e.annotated_value))
@cxOracle_py3_bug
def test_custom_functions(self):
Company(name='Apple', motto=None, ticker_name='APPL', description='Beautiful Devices').save()
Company(name='Django Software Foundation', motto=None, ticker_name=None, description=None).save()
Company(name='Google', motto='Do No Evil', ticker_name='GOOG', description='Internet Company').save()
Company(name='Yahoo', motto=None, ticker_name=None, description='Internet Company').save()
qs = Company.objects.annotate(
tagline=Func(
F('motto'),
F('ticker_name'),
F('description'),
Value('No Tag'),
function='COALESCE'
)
).order_by('name')
self.assertQuerysetEqual(
qs, [
('Apple', 'APPL'),
('Django Software Foundation', 'No Tag'),
('Google', 'Do No Evil'),
('Yahoo', 'Internet Company')
],
lambda c: (c.name, c.tagline)
)
@cxOracle_py3_bug
def test_custom_functions_can_ref_other_functions(self):
Company(name='Apple', motto=None, ticker_name='APPL', description='Beautiful Devices').save()
Company(name='Django Software Foundation', motto=None, ticker_name=None, description=None).save()
Company(name='Google', motto='Do No Evil', ticker_name='GOOG', description='Internet Company').save()
Company(name='Yahoo', motto=None, ticker_name=None, description='Internet Company').save()
class Lower(Func):
function = 'LOWER'
qs = Company.objects.annotate(
tagline=Func(
F('motto'),
F('ticker_name'),
F('description'),
Value('No Tag'),
function='COALESCE')
).annotate(
tagline_lower=Lower(F('tagline'), output_field=CharField())
).order_by('name')
# LOWER function supported by:
# oracle, postgres, mysql, sqlite, sqlserver
self.assertQuerysetEqual(
qs, [
('Apple', 'APPL'.lower()),
('Django Software Foundation', 'No Tag'.lower()),
('Google', 'Do No Evil'.lower()),
('Yahoo', 'Internet Company'.lower())
],
lambda c: (c.name, c.tagline_lower)
)
| |
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from builtins import object
import logging
from collections import defaultdict
from django.core.cache import cache
from libsentry.api import get_api as get_api_v1
from libsentry.api2 import get_api as get_api_v2
from libsentry.conf import PRIVILEGE_CHECKER_CACHING
from libsentry.sentry_site import get_hive_sentry_provider
LOG = logging.getLogger(__name__)
PRIVILEGE_HIERARCHY = {
'SELECT': 0,
'QUERY': 0,
'READ': 0, # Not a Sentry privilege, but enables v1 and v2 cross-compatible action type
'INSERT': 1,
'UPDATE': 1,
'WRITE': 1, # Not a Sentry privilege, but enables v1 and v2 cross-compatible action type
'ALL': 2
}
# TODO
# "Create", "Drop", "Alter" and "Refresh"
SENTRY_OBJECTS = (
'SERVER',
'DB',
'TABLE',
'COLUMN',
'COLLECTION',
'CONFIG',
'URI'
)
SENTRY_PRIVILEGE_KEY = 'SENTRY_PRIVILEGE'
SENTRY_PRIVILEGE_CACHE_KEY = 'checker-%(username)s'
class MissingSentryPrivilegeException(Exception):
def __init__(self, objects=None):
self.objects = objects
def __str__(self):
return str(self.objects)
def get_checker(user, checker=None):
cache_key = SENTRY_PRIVILEGE_CACHE_KEY % {'username': user.username}
checker = checker or cache.get(cache_key)
if not checker:
checker = PrivilegeChecker(user=user)
cache.set(cache_key, checker, PRIVILEGE_CHECKER_CACHING.get())
return checker
class PrivilegeChecker(object):
"""
Given a user, checks and applies Sentry privilege and authorization rules against Sentry objects
"""
def __init__(self, user, api_v1=None, api_v2=None):
api_v1 = api_v1 if api_v1 else get_api_v1(user)
api_v2 = api_v2 if api_v2 else get_api_v2(user, component='solr')
privileges_v1 = self._get_privileges_for_user(api_v1)
self.privilege_hierarchy_v1 = self._to_privilege_hierarchy_v1(privileges_v1)
privileges_v2 = self._get_privileges_for_user(api_v2, serviceName=get_hive_sentry_provider())
self.privilege_hierarchy_v2 = self._to_privilege_hierarchy_v2(privileges_v2)
def filter_objects(self, objects, action='READ', key=lambda x: x.copy()):
"""
Given a set of authorizable Sentry objects and a requested action, return a filtered set of objects that the user
has privileges to perform the given action upon.
:param objects: a list of objects that can be converted to Sentry authorizables using the key function;
objects should be converted to either V1 or V2 authorizables that utilize the following format:
V1 - {'column': 'total_emp', 'table': 'sample_08', 'db': 'default', 'server': 'server1', 'URI': None}
V2 - {'component': 'solr', 'serviceName': 'server1', 'type': 'COLLECTION', 'name': 'twitter_demo', 'URI': None}
:param action: requested action-level that we should check privileges against (default: READ)
:param key: a function that will be applied to each object in the objects iterable to convert it to a Sentry format
"""
action = action.upper()
# Apply Sentry formatting key function
object_authorizables = self._to_sentry_authorizables(objects=objects, key=key)
# Separate V1 (Hive) and V2 (Solr) authorizable objects
v1_authorizables = [(obj, auth) for (obj, auth) in object_authorizables if 'db' in auth]
v2_authorizables = [(obj, auth) for (obj, auth) in object_authorizables if 'component' in auth]
if v1_authorizables:
for (object, authorizable) in v1_authorizables:
try:
if self._is_object_action_authorized_v1(hierarchy=self.privilege_hierarchy_v1, object=authorizable, action=action):
yield object
except KeyError as e:
LOG.warning('Skipping %s: %s' % (authorizable, e))
if v2_authorizables:
for (object, authorizable) in v2_authorizables:
try:
if self._is_object_action_authorized_v2(hierarchy=self.privilege_hierarchy_v2, object=authorizable, action=action):
yield object
except KeyError as e:
LOG.warning('Skipping %s: %s' % (authorizable, e))
def _to_sentry_authorizables(self, objects, key):
"""
Given a list of objects, return a list of tuples where the first item is the original object and second item is the
converted Sentry authorizable. Any non-Sentry objects are filtered out of the returned dictionary.
:param objects: original list of objects to convert to Sentry authorizables using they key function
:param key: a function that will be applied to each object to convert to a Sentry format
:return: a list of tuples (object, authorizable)
"""
def add_default_server(object):
if 'db' in object and not object.get('server'): # V1
object.update({'server': 'server1'})
elif 'component' in object and not object.get('serviceName'): # V2
object.update({'serviceName': 'server1'})
return object
object_authorizables = [(obj, key(obj)) for obj in objects if key(obj)]
object_authorizables = [(obj, add_default_server(auth)) for (obj, auth) in object_authorizables]
return object_authorizables
def _get_privileges_for_user(self, api, serviceName=None):
privileges = []
user_roles = api.list_sentry_roles_by_group('*') # Get all roles for user
for role in user_roles:
if serviceName is not None:
role_privileges = api.list_sentry_privileges_by_role(serviceName=serviceName, roleName=role['name'])
else:
role_privileges = api.list_sentry_privileges_by_role(role['name'])
privileges.extend(role_privileges) # This may result in duplicates but will get reduced in hierarchy tree
return privileges
def _to_privilege_hierarchy_v1(self, privileges):
"""
Converts a list of privileges to a hierarchical tree of privileges by object, where the privilege is stored into a
key named SENTRY_PRIVILEGE_KEY.
NOTE: This assumes no objects share the same name as SENTRY_PRIVILEGE_KEY
"""
hierarchy = tree()
for privilege in privileges:
column, table, database, server, uri = \
privilege.get('column'), privilege.get('table'), privilege.get('database'), privilege.get('server'), privilege.get('URI')
if column:
hierarchy[server][database][table][column][SENTRY_PRIVILEGE_KEY] = privilege
elif table:
hierarchy[server][database][table][SENTRY_PRIVILEGE_KEY] = privilege
elif database:
hierarchy[server][database][SENTRY_PRIVILEGE_KEY] = privilege
elif uri:
hierarchy[server][uri][SENTRY_PRIVILEGE_KEY] = privilege
else:
hierarchy[server][SENTRY_PRIVILEGE_KEY] = privilege
return hierarchy
def _to_privilege_hierarchy_v2(self, privileges):
"""
Converts a list of privileges to a hierarchical tree of privileges by object, where the privilege is stored into a
key named SENTRY_PRIVILEGE_KEY.
NOTE: This assumes no objects share the same name as SENTRY_PRIVILEGE_KEY
"""
hierarchy = tree()
for privilege in privileges:
component, service, authorizables = privilege['component'], privilege['serviceName'], privilege['authorizables']
for obj in authorizables:
object_type, object_name = obj.get('type'), obj.get('name')
hierarchy[component][service][object_type][object_name][SENTRY_PRIVILEGE_KEY] = privilege
return hierarchy
def _is_object_action_authorized_v1(self, hierarchy, object, action='READ'):
requested_action_level = PRIVILEGE_HIERARCHY[action]
# Initialize all privileges for all object levels to non-authorized by default
privileges_applied = dict((obj, -1) for obj in SENTRY_OBJECTS)
server, db, table, column, uri = object.get('server'), object.get('db'), object.get('table'), object.get('column'), object.get('URI')
if server: # Get server-level privilege
if server in hierarchy:
if SENTRY_PRIVILEGE_KEY in hierarchy[server]:
privileges_applied['SERVER'] = PRIVILEGE_HIERARCHY[hierarchy[server][SENTRY_PRIVILEGE_KEY]['action']]
if uri and uri in hierarchy[server]: # Get URI-level privilege
if SENTRY_PRIVILEGE_KEY in hierarchy[server][uri]:
privileges_applied['URI'] = PRIVILEGE_HIERARCHY[hierarchy[server][uri][SENTRY_PRIVILEGE_KEY]['action']]
if db and db in hierarchy[server]: # Get db-level privilege
if SENTRY_PRIVILEGE_KEY in hierarchy[server][db]:
privileges_applied['DB'] = PRIVILEGE_HIERARCHY[hierarchy[server][db][SENTRY_PRIVILEGE_KEY]['action']]
if table and table in hierarchy[server][db]: # Get table-level privilege
if SENTRY_PRIVILEGE_KEY in hierarchy[server][db][table]:
privileges_applied['TABLE'] = PRIVILEGE_HIERARCHY[hierarchy[server][db][table][SENTRY_PRIVILEGE_KEY]['action']]
if column and column in hierarchy[server][db][table]: # Get column-level privilege
# Since column is the lowest level, it must have a SENTRY_PRIVILEGE set
privileges_applied['COLUMN'] = PRIVILEGE_HIERARCHY[hierarchy[server][db][table][column][SENTRY_PRIVILEGE_KEY]['action']]
# A privilege hierarchy exists and at least one of the granted privileges is greater than or equal to requested action
is_authorized = privileges_applied and max(privileges_applied.values()) >= requested_action_level
return is_authorized
def _is_object_action_authorized_v2(self, hierarchy, object, action='READ'):
requested_action_level = PRIVILEGE_HIERARCHY[action]
# Initialize all privileges for all object levels to non-authorized by default
privileges_applied = dict((obj, -1) for obj in SENTRY_OBJECTS)
component, service, obj_type, obj_name = object.get('component'), object.get('serviceName'), object.get('type'), object.get('name')
if component and component in hierarchy:
if service and service in hierarchy[component]:
if obj_type and obj_type in hierarchy[component][service]:
if obj_name and obj_name in hierarchy[component][service][obj_type]:
if SENTRY_PRIVILEGE_KEY in hierarchy[component][service][obj_type][obj_name]:
privileges_applied[obj_type] = PRIVILEGE_HIERARCHY[hierarchy[component][service][obj_type][obj_name][SENTRY_PRIVILEGE_KEY]['action']]
# A privilege hierarchy exists and at least one of the granted privileges is greater than or equal to requested action
is_authorized = privileges_applied and max(privileges_applied.values()) >= requested_action_level
return is_authorized
def tree():
return defaultdict(tree)
| |
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
import os
import signal
import time
import traceback
from contextlib import contextmanager
import psutil
from pants.base.build_environment import get_buildroot
from pants.init.subprocess import Subprocess
from pants.process.lock import OwnerPrintingInterProcessFileLock
from pants.util.dirutil import read_file, rm_rf, safe_file_dump, safe_mkdir
from pants.util.memo import memoized_property
from pants.util.process_handler import subprocess
logger = logging.getLogger(__name__)
@contextmanager
def swallow_psutil_exceptions():
"""A contextmanager that swallows standard psutil access exceptions."""
try:
yield
except (psutil.AccessDenied, psutil.NoSuchProcess):
# This masks common, but usually benign psutil process access exceptions that might be seen
# when accessing attributes/methods on psutil.Process objects.
pass
class ProcessGroup(object):
"""Wraps a logical group of processes and provides convenient access to ProcessManager objects."""
def __init__(self, name, metadata_base_dir=None):
self._name = name
self._metadata_base_dir = metadata_base_dir
def _instance_from_process(self, process):
"""Default converter from psutil.Process to process instance classes for subclassing."""
return ProcessManager(name=process.name(),
pid=process.pid,
process_name=process.name(),
metadata_base_dir=self._metadata_base_dir)
def iter_processes(self, proc_filter=None):
"""Yields processes from psutil.process_iter with an optional filter and swallows psutil errors.
If a psutil exception is raised during execution of the filter, that process will not be
yielded but subsequent processes will. On the other hand, if psutil.process_iter raises
an exception, no more processes will be yielded.
"""
with swallow_psutil_exceptions(): # process_iter may raise
for proc in psutil.process_iter():
with swallow_psutil_exceptions(): # proc_filter may raise
if (proc_filter is None) or proc_filter(proc):
yield proc
def iter_instances(self, *args, **kwargs):
for item in self.iter_processes(*args, **kwargs):
yield self._instance_from_process(item)
class ProcessMetadataManager(object):
""""Manages contextual, on-disk process metadata."""
class MetadataError(Exception): pass
class Timeout(Exception): pass
FAIL_WAIT_SEC = 10
INFO_INTERVAL_SEC = 5
WAIT_INTERVAL_SEC = .1
def __init__(self, metadata_base_dir=None):
"""
:param str metadata_base_dir: The base directory for process metadata.
"""
super(ProcessMetadataManager, self).__init__()
self._metadata_base_dir = (
metadata_base_dir or
Subprocess.Factory.global_instance().create().get_subprocess_dir()
)
@staticmethod
def _maybe_cast(item, caster):
"""Given a casting function, attempt to cast to that type while masking common cast exceptions.
N.B. This is mostly suitable for casting string types to numeric types - e.g. a port number
read from disk into an int.
:param func caster: A casting callable (e.g. `int`).
:returns: The result of caster(item) or item if TypeError or ValueError are raised during cast.
"""
try:
return caster(item)
except (TypeError, ValueError):
# N.B. the TypeError catch here (already) protects against the case that caster is None.
return item
@classmethod
def _deadline_until(cls, closure, action_msg, timeout=FAIL_WAIT_SEC,
wait_interval=WAIT_INTERVAL_SEC, info_interval=INFO_INTERVAL_SEC):
"""Execute a function/closure repeatedly until a True condition or timeout is met.
:param func closure: the function/closure to execute (should not block for long periods of time
and must return True on success).
:param str action_msg: a description of the action that is being executed, to be rendered as
info while we wait, and as part of any rendered exception.
:param float timeout: the maximum amount of time to wait for a true result from the closure in
seconds. N.B. this is timing based, so won't be exact if the runtime of
the closure exceeds the timeout.
:param float wait_interval: the amount of time to sleep between closure invocations.
:param float info_interval: the amount of time to wait before and between reports via info
logging that we're still waiting for the closure to succeed.
:raises: :class:`ProcessManager.Timeout` on execution timeout.
"""
now = time.time()
deadline = now + timeout
info_deadline = now + info_interval
while 1:
if closure():
return True
now = time.time()
if now > deadline:
raise cls.Timeout('exceeded timeout of {} seconds while waiting for {}'.format(timeout, action_msg))
if now > info_deadline:
logger.info('waiting for {}...'.format(action_msg))
info_deadline = info_deadline + info_interval
elif wait_interval:
time.sleep(wait_interval)
@classmethod
def _wait_for_file(cls, filename, timeout=FAIL_WAIT_SEC, want_content=True):
"""Wait up to timeout seconds for filename to appear with a non-zero size or raise Timeout()."""
def file_waiter():
return os.path.exists(filename) and (not want_content or os.path.getsize(filename))
action_msg = 'file {} to appear'.format(filename)
return cls._deadline_until(file_waiter, action_msg, timeout=timeout)
@staticmethod
def _get_metadata_dir_by_name(name, metadata_base_dir):
"""Retrieve the metadata dir by name.
This should always live outside of the workdir to survive a clean-all.
"""
return os.path.join(metadata_base_dir, name)
def _maybe_init_metadata_dir_by_name(self, name):
"""Initialize the metadata directory for a named identity if it doesn't exist."""
safe_mkdir(self.__class__._get_metadata_dir_by_name(name, self._metadata_base_dir))
def _metadata_file_path(self, name, metadata_key):
return self.metadata_file_path(name, metadata_key, self._metadata_base_dir)
@classmethod
def metadata_file_path(cls, name, metadata_key, metadata_base_dir):
return os.path.join(cls._get_metadata_dir_by_name(name, metadata_base_dir), metadata_key)
def read_metadata_by_name(self, name, metadata_key, caster=None):
"""Read process metadata using a named identity.
:param string name: The ProcessMetadataManager identity/name (e.g. 'pantsd').
:param string metadata_key: The metadata key (e.g. 'pid').
:param func caster: A casting callable to apply to the read value (e.g. `int`).
"""
file_path = self._metadata_file_path(name, metadata_key)
try:
return self._maybe_cast(read_file(file_path).strip(), caster)
except (IOError, OSError):
return None
def write_metadata_by_name(self, name, metadata_key, metadata_value):
"""Write process metadata using a named identity.
:param string name: The ProcessMetadataManager identity/name (e.g. 'pantsd').
:param string metadata_key: The metadata key (e.g. 'pid').
:param string metadata_value: The metadata value (e.g. '1729').
"""
self._maybe_init_metadata_dir_by_name(name)
file_path = self._metadata_file_path(name, metadata_key)
safe_file_dump(file_path, metadata_value)
def await_metadata_by_name(self, name, metadata_key, timeout, caster=None):
"""Block up to a timeout for process metadata to arrive on disk.
:param string name: The ProcessMetadataManager identity/name (e.g. 'pantsd').
:param string metadata_key: The metadata key (e.g. 'pid').
:param int timeout: The deadline to write metadata.
:param type caster: A type-casting callable to apply to the read value (e.g. int, str).
:returns: The value of the metadata key (read from disk post-write).
:raises: :class:`ProcessMetadataManager.Timeout` on timeout.
"""
file_path = self._metadata_file_path(name, metadata_key)
self._wait_for_file(file_path, timeout=timeout)
return self.read_metadata_by_name(name, metadata_key, caster)
def purge_metadata_by_name(self, name):
"""Purge a processes metadata directory.
:raises: `ProcessManager.MetadataError` when OSError is encountered on metadata dir removal.
"""
meta_dir = self._get_metadata_dir_by_name(name, self._metadata_base_dir)
logger.debug('purging metadata directory: {}'.format(meta_dir))
try:
rm_rf(meta_dir)
except OSError as e:
raise self.MetadataError('failed to purge metadata directory {}: {!r}'.format(meta_dir, e))
class ProcessManager(ProcessMetadataManager):
"""Subprocess/daemon management mixin/superclass. Not intended to be thread-safe."""
class InvalidCommandOutput(Exception): pass
class NonResponsiveProcess(Exception): pass
class ExecutionError(Exception):
def __init__(self, message, output=None):
super(ProcessManager.ExecutionError, self).__init__(message)
self.message = message
self.output = output
def __repr__(self):
return '{}(message={!r}, output={!r})'.format(type(self).__name__, self.message, self.output)
KILL_WAIT_SEC = 5
KILL_CHAIN = (signal.SIGTERM, signal.SIGKILL)
def __init__(self, name, pid=None, socket=None, process_name=None, socket_type=int,
metadata_base_dir=None):
"""
:param string name: The process identity/name (e.g. 'pantsd' or 'ng_Zinc').
:param int pid: The process pid. Overrides fetching of the self.pid @property.
:param string socket: The socket metadata. Overrides fetching of the self.socket @property.
:param string process_name: The process name for cmdline executable name matching.
:param type socket_type: The type to be used for socket type casting (e.g. int).
:param str metadata_base_dir: The overridden base directory for process metadata.
"""
super(ProcessManager, self).__init__(metadata_base_dir)
self._name = name.lower().strip()
self._pid = pid
self._socket = socket
self._socket_type = socket_type
self._process_name = process_name
self._buildroot = get_buildroot()
self._process = None
@property
def name(self):
"""The logical name/label of the process."""
return self._name
@property
def process_name(self):
"""The logical process name. If defined, this is compared to exe_name for stale pid checking."""
return self._process_name
@memoized_property
def lifecycle_lock(self):
"""An identity-keyed inter-process lock for safeguarding lifecycle and other operations."""
safe_mkdir(self._metadata_base_dir)
return OwnerPrintingInterProcessFileLock(
# N.B. This lock can't key into the actual named metadata dir (e.g. `.pids/pantsd/lock`
# via `ProcessMetadataManager._get_metadata_dir_by_name()`) because of a need to purge
# the named metadata dir on startup to avoid stale metadata reads.
os.path.join(self._metadata_base_dir, '.lock.{}'.format(self._name))
)
@property
def cmdline(self):
"""The process commandline. e.g. ['/usr/bin/python2.7', 'pants.pex'].
:returns: The command line or else `None` if the underlying process has died.
"""
with swallow_psutil_exceptions():
process = self._as_process()
if process:
return process.cmdline()
return None
@property
def cmd(self):
"""The first element of the process commandline e.g. '/usr/bin/python2.7'.
:returns: The first element of the process command line or else `None` if the underlying
process has died.
"""
return (self.cmdline or [None])[0]
@property
def pid(self):
"""The running processes pid (or None)."""
return self._pid or self.read_metadata_by_name(self._name, 'pid', int)
@property
def socket(self):
"""The running processes socket/port information (or None)."""
return self._socket or self.read_metadata_by_name(self._name, 'socket', self._socket_type)
@classmethod
def get_subprocess_output(cls, command, ignore_stderr=True, **kwargs):
"""Get the output of an executed command.
:param command: An iterable representing the command to execute (e.g. ['ls', '-al']).
:param ignore_stderr: Whether or not to ignore stderr output vs interleave it with stdout.
:raises: `ProcessManager.ExecutionError` on `OSError` or `CalledProcessError`.
:returns: The output of the command.
"""
if ignore_stderr is False:
kwargs.setdefault('stderr', subprocess.STDOUT)
try:
return subprocess.check_output(command, **kwargs)
except (OSError, subprocess.CalledProcessError) as e:
subprocess_output = getattr(e, 'output', '').strip()
raise cls.ExecutionError(str(e), subprocess_output)
def await_pid(self, timeout):
"""Wait up to a given timeout for a process to write pid metadata."""
return self.await_metadata_by_name(self._name, 'pid', timeout, int)
def await_socket(self, timeout):
"""Wait up to a given timeout for a process to write socket info."""
return self.await_metadata_by_name(self._name, 'socket', timeout, self._socket_type)
def write_pid(self, pid=None):
"""Write the current processes PID to the pidfile location"""
pid = pid or os.getpid()
self.write_metadata_by_name(self._name, 'pid', str(pid))
def write_socket(self, socket_info):
"""Write the local processes socket information (TCP port or UNIX socket)."""
self.write_metadata_by_name(self._name, 'socket', str(socket_info))
def write_named_socket(self, socket_name, socket_info):
"""A multi-tenant, named alternative to ProcessManager.write_socket()."""
self.write_metadata_by_name(self._name, 'socket_{}'.format(socket_name), str(socket_info))
def read_named_socket(self, socket_name, socket_type):
"""A multi-tenant, named alternative to ProcessManager.socket."""
return self.read_metadata_by_name(self._name, 'socket_{}'.format(socket_name), socket_type)
def _as_process(self):
"""Returns a psutil `Process` object wrapping our pid.
NB: Even with a process object in hand, subsequent method calls against it can always raise
`NoSuchProcess`. Care is needed to document the raises in the public API or else trap them and
do something sensible for the API.
:returns: a psutil Process object or else None if we have no pid.
:rtype: :class:`psutil.Process`
:raises: :class:`psutil.NoSuchProcess` if the process identified by our pid has died.
"""
if self._process is None and self.pid:
self._process = psutil.Process(self.pid)
return self._process
def is_dead(self):
"""Return a boolean indicating whether the process is dead or not."""
return not self.is_alive()
def is_alive(self, extended_check=None):
"""Return a boolean indicating whether the process is running or not.
:param func extended_check: An additional callable that will be invoked to perform an extended
liveness check. This callable should take a single argument of a
`psutil.Process` instance representing the context-local process
and return a boolean True/False to indicate alive vs not alive.
"""
try:
process = self._as_process()
return not (
# Can happen if we don't find our pid.
(not process) or
# Check for walkers.
(process.status() == psutil.STATUS_ZOMBIE) or
# Check for stale pids.
(self.process_name and self.process_name != process.name()) or
# Extended checking.
(extended_check and not extended_check(process))
)
except (psutil.NoSuchProcess, psutil.AccessDenied):
# On some platforms, accessing attributes of a zombie'd Process results in NoSuchProcess.
return False
def purge_metadata(self, force=False):
"""Instance-based version of ProcessMetadataManager.purge_metadata_by_name() that checks
for process liveness before purging metadata.
:param bool force: If True, skip process liveness check before purging metadata.
:raises: `ProcessManager.MetadataError` when OSError is encountered on metadata dir removal.
"""
if not force and self.is_alive():
raise self.MetadataError('cannot purge metadata for a running process!')
super(ProcessManager, self).purge_metadata_by_name(self._name)
def _kill(self, kill_sig):
"""Send a signal to the current process."""
if self.pid:
os.kill(self.pid, kill_sig)
def terminate(self, signal_chain=KILL_CHAIN, kill_wait=KILL_WAIT_SEC, purge=True):
"""Ensure a process is terminated by sending a chain of kill signals (SIGTERM, SIGKILL)."""
alive = self.is_alive()
if alive:
logger.debug('terminating {}'.format(self._name))
for signal_type in signal_chain:
pid = self.pid
try:
logger.debug('sending signal {} to pid {}'.format(signal_type, pid))
self._kill(signal_type)
except OSError as e:
logger.warning('caught OSError({e!s}) during attempt to kill -{signal} {pid}!'
.format(e=e, signal=signal_type, pid=pid))
# Wait up to kill_wait seconds to terminate or move onto the next signal.
try:
if self._deadline_until(self.is_dead, 'daemon to exit', timeout=kill_wait):
alive = False
logger.debug('successfully terminated pid {}'.format(pid))
break
except self.Timeout:
# Loop to the next kill signal on timeout.
pass
if alive:
raise self.NonResponsiveProcess('failed to kill pid {pid} with signals {chain}'
.format(pid=self.pid, chain=signal_chain))
if purge:
self.purge_metadata(force=True)
def daemonize(self, pre_fork_opts=None, post_fork_parent_opts=None, post_fork_child_opts=None,
write_pid=True):
"""Perform a double-fork, execute callbacks and write the child pid file.
The double-fork here is necessary to truly daemonize the subprocess such that it can never
take control of a tty. The initial fork and setsid() creates a new, isolated process group
and also makes the first child a session leader (which can still acquire a tty). By forking a
second time, we ensure that the second child can never acquire a controlling terminal because
it's no longer a session leader - but it now has its own separate process group.
Additionally, a normal daemon implementation would typically perform an os.umask(0) to reset
the processes file mode creation mask post-fork. We do not do this here (and in daemon_spawn
below) due to the fact that the daemons that pants would run are typically personal user
daemons. Having a disparate umask from pre-vs-post fork causes files written in each phase to
differ in their permissions without good reason - in this case, we want to inherit the umask.
"""
self.purge_metadata()
self.pre_fork(**pre_fork_opts or {})
logger.debug('forking %s', self)
pid = os.fork()
if pid == 0:
os.setsid()
second_pid = os.fork()
if second_pid == 0:
try:
os.chdir(self._buildroot)
self.post_fork_child(**post_fork_child_opts or {})
except Exception:
logger.critical(traceback.format_exc())
finally:
os._exit(0)
else:
try:
if write_pid: self.write_pid(second_pid)
self.post_fork_parent(**post_fork_parent_opts or {})
except Exception:
logger.critical(traceback.format_exc())
finally:
os._exit(0)
else:
# This prevents un-reaped, throw-away parent processes from lingering in the process table.
os.waitpid(pid, 0)
def daemon_spawn(self, pre_fork_opts=None, post_fork_parent_opts=None, post_fork_child_opts=None):
"""Perform a single-fork to run a subprocess and write the child pid file.
Use this if your post_fork_child block invokes a subprocess via subprocess.Popen(). In this
case, a second fork such as used in daemonize() is extraneous given that Popen() also forks.
Using this daemonization method vs daemonize() leaves the responsibility of writing the pid
to the caller to allow for library-agnostic flexibility in subprocess execution.
"""
self.purge_metadata()
self.pre_fork(**pre_fork_opts or {})
pid = os.fork()
if pid == 0:
try:
os.setsid()
os.chdir(self._buildroot)
self.post_fork_child(**post_fork_child_opts or {})
except Exception:
logger.critical(traceback.format_exc())
finally:
os._exit(0)
else:
try:
self.post_fork_parent(**post_fork_parent_opts or {})
except Exception:
logger.critical(traceback.format_exc())
def pre_fork(self):
"""Pre-fork callback for subclasses."""
def post_fork_child(self):
"""Pre-fork child callback for subclasses."""
def post_fork_parent(self):
"""Post-fork parent callback for subclasses."""
class FingerprintedProcessManager(ProcessManager):
"""A `ProcessManager` subclass that provides a general strategy for process fingerprinting."""
FINGERPRINT_KEY = 'fingerprint'
FINGERPRINT_CMD_KEY = None
FINGERPRINT_CMD_SEP = '='
@property
def fingerprint(self):
"""The fingerprint of the current process.
This can either read the current fingerprint from the running process's psutil.Process.cmdline
(if the managed process supports that) or from the `ProcessManager` metadata.
:returns: The fingerprint of the running process as read from the process table, ProcessManager
metadata or `None`.
:rtype: string
"""
return (
self.parse_fingerprint(self.cmdline) or
self.read_metadata_by_name(self.name, self.FINGERPRINT_KEY)
)
def parse_fingerprint(self, cmdline, key=None, sep=None):
"""Given a psutil.Process.cmdline, parse and return a fingerprint.
:param list cmdline: The psutil.Process.cmdline of the current process.
:param string key: The key for fingerprint discovery.
:param string sep: The key/value separator for fingerprint discovery.
:returns: The parsed fingerprint or `None`.
:rtype: string or `None`
"""
key = key or self.FINGERPRINT_CMD_KEY
if key:
sep = sep or self.FINGERPRINT_CMD_SEP
cmdline = cmdline or []
for cmd_part in cmdline:
if cmd_part.startswith('{}{}'.format(key, sep)):
return cmd_part.split(sep)[1]
def has_current_fingerprint(self, fingerprint):
"""Determines if a new fingerprint is the current fingerprint of the running process.
:param string fingerprint: The new fingerprint to compare to.
:rtype: bool
"""
return fingerprint == self.fingerprint
def needs_restart(self, fingerprint):
"""Determines if the current ProcessManager needs to be started or restarted.
:param string fingerprint: The new fingerprint to compare to.
:rtype: bool
"""
return self.is_dead() or not self.has_current_fingerprint(fingerprint)
| |
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import datetime
import logging
from django.db import models
from django.contrib.auth.models import User
from django.contrib.contenttypes import generic
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _, ugettext_lazy as _t
from enum import Enum
from librdbms.server import dbms as librdbms_dbms
from desktop.lib.exceptions_renderable import PopupException
from desktop.models import Document
from TCLIService.ttypes import TSessionHandle, THandleIdentifier,\
TOperationState, TOperationHandle, TOperationType
from beeswax.design import HQLdesign
LOG = logging.getLogger(__name__)
QUERY_SUBMISSION_TIMEOUT = datetime.timedelta(0, 60 * 60) # 1 hour
# Constants for DB fields, hue ini
BEESWAX = 'beeswax'
HIVE_SERVER2 = 'hiveserver2'
QUERY_TYPES = (HQL, IMPALA, RDBMS, SPARK) = range(4)
class QueryHistory(models.Model):
"""
Holds metadata about all queries that have been executed.
"""
STATE = Enum('submitted', 'running', 'available', 'failed', 'expired')
SERVER_TYPE = ((BEESWAX, 'Beeswax'), (HIVE_SERVER2, 'Hive Server 2'),
(librdbms_dbms.MYSQL, 'MySQL'), (librdbms_dbms.POSTGRESQL, 'PostgreSQL'),
(librdbms_dbms.SQLITE, 'sqlite'), (librdbms_dbms.ORACLE, 'oracle'))
owner = models.ForeignKey(User, db_index=True)
query = models.TextField()
last_state = models.IntegerField(db_index=True)
has_results = models.BooleanField(default=False) # If true, this query will eventually return tabular results.
submission_date = models.DateTimeField(auto_now_add=True)
# In case of multi statements in a query, these are the id of the currently running statement
server_id = models.CharField(max_length=1024, null=True) # Aka secret, only query in the "submitted" state is allowed to have no server_id
server_guid = models.CharField(max_length=1024, null=True, default=None)
statement_number = models.SmallIntegerField(default=0) # The index of the currently running statement
operation_type = models.SmallIntegerField(null=True)
modified_row_count = models.FloatField(null=True)
log_context = models.CharField(max_length=1024, null=True)
server_host = models.CharField(max_length=128, help_text=_('Host of the query server.'), default='')
server_port = models.SmallIntegerField(help_text=_('Port of the query server.'), default=0)
server_name = models.CharField(max_length=128, help_text=_('Name of the query server.'), default='')
server_type = models.CharField(max_length=128, help_text=_('Type of the query server.'), default=BEESWAX, choices=SERVER_TYPE)
query_type = models.SmallIntegerField(help_text=_('Type of the query.'), default=HQL, choices=((HQL, 'HQL'), (IMPALA, 'IMPALA')))
design = models.ForeignKey('SavedQuery', to_field='id', null=True) # Some queries (like read/create table) don't have a design
notify = models.BooleanField(default=False) # Notify on completion
class Meta:
ordering = ['-submission_date']
@staticmethod
def build(*args, **kwargs):
return HiveServerQueryHistory(*args, **kwargs)
def get_full_object(self):
return HiveServerQueryHistory.objects.get(id=self.id)
@staticmethod
def get(id):
return HiveServerQueryHistory.objects.get(id=id)
@staticmethod
def get_type_name(query_type):
if query_type == IMPALA:
return 'impala'
elif query_type == RDBMS:
return 'rdbms'
elif query_type == SPARK:
return 'spark'
else:
return 'beeswax'
def get_query_server_config(self):
from beeswax.server.dbms import get_query_server_config
query_server = get_query_server_config(QueryHistory.get_type_name(self.query_type))
query_server.update({
'server_name': self.server_name,
'server_host': self.server_host,
'server_port': self.server_port,
'server_type': self.server_type,
})
return query_server
def get_current_statement(self):
if self.design is not None:
design = self.design.get_design()
return design.get_query_statement(self.statement_number)
else:
return self.query
def refresh_design(self, hql_query):
# Refresh only HQL query part
query = self.design.get_design()
query.hql_query = hql_query
self.design.data = query.dumps()
self.query = hql_query
def is_finished(self):
is_statement_finished = not self.is_running()
if self.design is not None:
design = self.design.get_design()
return is_statement_finished and self.statement_number + 1 == design.statement_count # Last statement
else:
return is_statement_finished
def is_running(self):
return self.last_state in (QueryHistory.STATE.running.index, QueryHistory.STATE.submitted.index)
def is_success(self):
return self.last_state in (QueryHistory.STATE.available.index,)
def is_failure(self):
return self.last_state in (QueryHistory.STATE.expired.index, QueryHistory.STATE.failed.index)
def is_expired(self):
return self.last_state in (QueryHistory.STATE.expired.index,)
def set_to_running(self):
self.last_state = QueryHistory.STATE.running.index
def set_to_failed(self):
self.last_state = QueryHistory.STATE.failed.index
def set_to_available(self):
self.last_state = QueryHistory.STATE.available.index
def set_to_expired(self):
self.last_state = QueryHistory.STATE.expired.index
def make_query_context(type, info):
"""
``type`` is one of "table" and "design", and ``info`` is the table name or design id.
Returns a value suitable for GET param.
"""
if type == 'table':
return "%s:%s" % (type, info)
elif type == 'design':
# Use int() to validate that info is a number
return "%s:%s" % (type, int(info))
LOG.error("Invalid query context type: %s" % (type,))
return '' # Empty string is safer than None
class HiveServerQueryHistory(QueryHistory):
# Map from (thrift) server state
STATE_MAP = {
TOperationState.INITIALIZED_STATE : QueryHistory.STATE.submitted,
TOperationState.RUNNING_STATE : QueryHistory.STATE.running,
TOperationState.FINISHED_STATE : QueryHistory.STATE.available,
TOperationState.CANCELED_STATE : QueryHistory.STATE.failed,
TOperationState.CLOSED_STATE : QueryHistory.STATE.expired,
TOperationState.ERROR_STATE : QueryHistory.STATE.failed,
TOperationState.UKNOWN_STATE : QueryHistory.STATE.failed,
TOperationState.PENDING_STATE : QueryHistory.STATE.submitted,
}
node_type = HIVE_SERVER2
class Meta:
proxy = True
def get_handle(self):
secret, guid = HiveServerQueryHandle.get_decoded(self.server_id, self.server_guid)
return HiveServerQueryHandle(secret=secret,
guid=guid,
has_result_set=self.has_results,
operation_type=self.operation_type,
modified_row_count=self.modified_row_count)
def save_state(self, new_state):
self.last_state = new_state.index
self.save()
@classmethod
def is_canceled(self, res):
return res.operationState in (TOperationState.CANCELED_STATE, TOperationState.CLOSED_STATE)
class SavedQuery(models.Model):
"""
Stores the query that people have save or submitted.
Note that this used to be called QueryDesign. Any references to 'design'
probably mean a SavedQuery.
"""
DEFAULT_NEW_DESIGN_NAME = _('My saved query')
AUTO_DESIGN_SUFFIX = _(' (new)')
TYPES = QUERY_TYPES
TYPES_MAPPING = {'beeswax': HQL, 'hql': HQL, 'impala': IMPALA, 'rdbms': RDBMS, 'spark': SPARK}
type = models.IntegerField(null=False)
owner = models.ForeignKey(User, db_index=True)
# Data is a json of dictionary. See the beeswax.design module.
data = models.TextField(max_length=65536)
name = models.CharField(max_length=64)
desc = models.TextField(max_length=1024)
mtime = models.DateTimeField(auto_now=True)
# An auto design is a place-holder for things users submit but not saved.
# We still want to store it as a design to allow users to save them later.
is_auto = models.BooleanField(default=False, db_index=True)
is_trashed = models.BooleanField(default=False, db_index=True, verbose_name=_t('Is trashed'),
help_text=_t('If this query is trashed.'))
doc = generic.GenericRelation(Document, related_name='hql_doc')
class Meta:
ordering = ['-mtime']
def get_design(self):
try:
return HQLdesign.loads(self.data)
except ValueError:
# data is empty
pass
def clone(self, new_owner=None):
if new_owner is None:
new_owner = self.owner
design = SavedQuery(type=self.type, owner=new_owner)
design.data = self.data
design.name = self.name
design.desc = self.desc
design.is_auto = self.is_auto
return design
@classmethod
def create_empty(cls, app_name, owner, data):
query_type = SavedQuery.TYPES_MAPPING[app_name]
design = SavedQuery(owner=owner, type=query_type)
design.name = SavedQuery.DEFAULT_NEW_DESIGN_NAME
design.desc = ''
design.data = data
design.is_auto = True
design.save()
Document.objects.link(design, owner=design.owner, extra=design.type, name=design.name, description=design.desc)
design.doc.get().add_to_history()
return design
@staticmethod
def get(id, owner=None, type=None):
"""
get(id, owner=None, type=None) -> SavedQuery object
Checks that the owner and type match (when given).
May raise PopupException (type/owner mismatch).
May raise SavedQuery.DoesNotExist.
"""
try:
design = SavedQuery.objects.get(id=id)
except SavedQuery.DoesNotExist, err:
msg = _('Cannot retrieve query id %(id)s.') % {'id': id}
raise err
if owner is not None and design.owner != owner:
msg = _('Query id %(id)s does not belong to user %(user)s.') % {'id': id, 'user': owner}
LOG.error(msg)
raise PopupException(msg)
if type is not None and design.type != type:
msg = _('Type mismatch for design id %(id)s (owner %(owner)s) - Expected %(expected_type)s, got %(real_type)s.') % \
{'id': id, 'owner': owner, 'expected_type': design.type, 'real_type': type}
LOG.error(msg)
raise PopupException(msg)
return design
def __str__(self):
return '%s %s' % (self.name, self.owner)
def get_query_context(self):
try:
return make_query_context('design', self.id)
except:
return ""
def get_absolute_url(self):
return reverse(QueryHistory.get_type_name(self.type) + ':execute_design', kwargs={'design_id': self.id})
class SessionManager(models.Manager):
def get_session(self, user, application='beeswax'):
try:
return self.filter(owner=user, application=application).latest("last_used")
except Session.DoesNotExist:
pass
class Session(models.Model):
"""
A sessions is bound to a user and an application (e.g. Bob with the Impala application).
"""
owner = models.ForeignKey(User, db_index=True)
status_code = models.PositiveSmallIntegerField()
secret = models.TextField(max_length='100')
guid = models.TextField(max_length='100')
server_protocol_version = models.SmallIntegerField(default=0)
last_used = models.DateTimeField(auto_now=True, db_index=True, verbose_name=_t('Last used'))
application = models.CharField(max_length=128, help_text=_t('Application we communicate with.'), default='beeswax')
objects = SessionManager()
def get_handle(self):
secret, guid = HiveServerQueryHandle.get_decoded(secret=self.secret, guid=self.guid)
handle_id = THandleIdentifier(secret=secret, guid=guid)
return TSessionHandle(sessionId=handle_id)
def __str__(self):
return '%s %s' % (self.owner, self.last_used)
class QueryHandle(object):
def __init__(self, secret, guid=None, operation_type=None, has_result_set=None, modified_row_count=None, log_context=None):
self.secret = secret
self.guid = guid
self.operation_type = operation_type
self.has_result_set = has_result_set
self.modified_row_count = modified_row_count
self.log_context = log_context
def is_valid(self):
return sum([bool(obj) for obj in [self.get()]]) > 0
def __str__(self):
return '%s %s' % (self.secret, self.guid)
class HiveServerQueryHandle(QueryHandle):
"""
QueryHandle for Hive Server 2.
Store THandleIdentifier base64 encoded in order to be unicode compatible with Django.
"""
def __init__(self, **kwargs):
super(HiveServerQueryHandle, self).__init__(**kwargs)
self.secret, self.guid = self.get_encoded()
def get(self):
return self.secret, self.guid
def get_rpc_handle(self):
secret, guid = self.get_decoded(self.secret, self.guid)
operation = getattr(TOperationType, TOperationType._NAMES_TO_VALUES.get(self.operation_type, 'EXECUTE_STATEMENT'))
return TOperationHandle(operationId=THandleIdentifier(guid=guid, secret=secret),
operationType=operation,
hasResultSet=self.has_result_set,
modifiedRowCount=self.modified_row_count)
@classmethod
def get_decoded(cls, secret, guid):
return base64.decodestring(secret), base64.decodestring(guid)
def get_encoded(self):
return base64.encodestring(self.secret), base64.encodestring(self.guid)
# Deprecated. Could be removed.
class BeeswaxQueryHandle(QueryHandle):
"""
QueryHandle for Beeswax.
"""
def __init__(self, secret, has_result_set, log_context):
super(BeeswaxQueryHandle, self).__init__(secret=secret,
has_result_set=has_result_set,
log_context=log_context)
def get(self):
return self.secret, None
def get_rpc_handle(self):
return BeeswaxdQueryHandle(id=self.secret, log_context=self.log_context)
# TODO remove
def get_encoded(self):
return self.get(), None
class MetaInstall(models.Model):
"""
Metadata about the installation. Should have at most one row.
"""
installed_example = models.BooleanField()
@staticmethod
def get():
"""
MetaInstall.get() -> MetaInstall object
It helps dealing with that this table has just one row.
"""
try:
return MetaInstall.objects.get(id=1)
except MetaInstall.DoesNotExist:
return MetaInstall(id=1)
| |
import json
from django.core.urlresolvers import reverse
from django.http import HttpResponseNotAllowed, HttpResponseForbidden, \
HttpResponseBadRequest
from django.test import Client, TestCase
from reddit.models import Comment, Submission, Vote
from django.contrib.auth.models import User
from users.models import RedditUser
class TestVotingOnItems(TestCase):
def setUp(self):
self.c = Client()
self.credentials = {
'username': 'voteusername',
'password': 'password'
}
author = RedditUser.objects.create(
user=User.objects.create_user(
**self.credentials
)
)
submission = Submission.objects.create(
author=author,
author_name=author.user.username,
title="vote testing"
)
Comment.create(author=author,
raw_comment="root comment",
parent=submission).save()
def test_post_only(self):
r = self.c.get(reverse('vote'))
self.assertIsInstance(r, HttpResponseNotAllowed)
def test_logged_out(self):
test_data = {
'what': 'submission',
'what_id': 1,
'vote_value': 1
}
r = self.c.post(reverse('vote'), data=test_data)
self.assertIsInstance(r, HttpResponseForbidden)
def test_invalid_vote_value(self):
self.c.login(**self.credentials)
test_data = {
'what': 'submission',
'what_id': 1,
'vote_value': '2'
}
r = self.c.post(reverse('vote'), data=test_data)
self.assertIsInstance(r, HttpResponseBadRequest)
def test_missing_arugmnets(self):
self.c.login(**self.credentials)
r = self.c.post(reverse('vote'),
data={
'what': 'submission',
'what_id': 1
})
self.assertIsInstance(r, HttpResponseBadRequest)
r = self.c.post(reverse('vote'),
data={
'what': 'submission',
'vote_value': '1'
})
self.assertIsInstance(r, HttpResponseBadRequest)
r = self.c.post(reverse('vote'),
data={
'what_id': '1',
'vote_value': '1'
})
self.assertIsInstance(r, HttpResponseBadRequest)
r = self.c.post(reverse('vote'), data={})
self.assertIsInstance(r, HttpResponseBadRequest)
def test_invalid_vote_object_id(self):
self.c.login(**self.credentials)
for what_type in ['comment', 'submission']:
test_data = {
'what': what_type,
'what_id': 9999,
'what_value': '1'
}
r = self.c.post(reverse('vote'), data=test_data)
self.assertIsInstance(r, HttpResponseBadRequest)
def test_submission_first_vote(self):
submission = Submission.objects.filter(title="vote testing").first()
self.assertIsNotNone(submission)
self.c.login(**self.credentials)
r = self.c.post(reverse('vote'),
data={
'what': 'submission',
'what_id': submission.id,
'vote_value': '1'
})
self.assertEqual(r.status_code, 200)
json_r = json.loads(r.content.decode("utf-8"))
self.assertIsNone(json_r['error'])
self.assertEqual(json_r['voteDiff'], 1)
submission = Submission.objects.filter(title="vote testing").first()
self.assertEqual(submission.score, 1)
def test_submission_vote_cancel_or_reverse(self):
submission = Submission.objects.filter(title="vote testing").first()
user = RedditUser.objects.get(
user=User.objects.get(username=self.credentials['username']))
self.assertIsNotNone(submission)
self.assertIsNotNone(user)
Vote.create(user=user, vote_object=submission, vote_value=1).save()
self.c.login(**self.credentials)
r = self.c.post(reverse('vote'),
data={
'what': 'submission',
'what_id': submission.id,
'vote_value': '1'
})
self.assertEqual(r.status_code, 200)
json_r = json.loads(r.content.decode("utf-8"))
self.assertIsNone(json_r['error'])
self.assertEqual(json_r['voteDiff'], -1)
vote = Vote.objects.get(vote_object_type=submission.get_content_type(),
vote_object_id=submission.id,
user=user)
vote.value = 1
vote.save()
self.c.login(**self.credentials)
r = self.c.post(reverse('vote'),
data={
'what': 'submission',
'what_id': submission.id,
'vote_value': '-1'
})
self.assertEqual(r.status_code, 200)
json_r = json.loads(r.content.decode("utf-8"))
self.assertIsNone(json_r['error'])
self.assertEqual(json_r['voteDiff'], -2)
def test_comment_first_vote(self):
submission = Submission.objects.filter(title="vote testing").first()
self.assertIsNotNone(submission)
comment = Comment.objects.filter(submission=submission).first()
self.assertIsNotNone(comment)
self.c.login(**self.credentials)
r = self.c.post(reverse('vote'),
data={
'what': 'comment',
'what_id': comment.id,
'vote_value': '1'
})
self.assertEqual(r.status_code, 200)
json_r = json.loads(r.content.decode("utf-8"))
self.assertIsNone(json_r['error'])
self.assertEqual(json_r['voteDiff'], 1)
scomment = Comment.objects.filter(submission=submission).first()
self.assertEqual(scomment.score, 1)
def test_comment_vote_cancel_or_reverse(self):
submission = Submission.objects.filter(title="vote testing").first()
user = RedditUser.objects.get(
user=User.objects.get(username=self.credentials['username']))
self.assertIsNotNone(submission)
self.assertIsNotNone(user)
comment = Comment.objects.filter(submission=submission).first()
self.assertIsNotNone(comment)
Vote.create(user=user, vote_object=comment, vote_value=1).save()
self.c.login(**self.credentials)
r = self.c.post(reverse('vote'),
data={
'what': 'comment',
'what_id': comment.id,
'vote_value': '1'
})
self.assertEqual(r.status_code, 200)
json_r = json.loads(r.content.decode("utf-8"))
self.assertIsNone(json_r['error'])
self.assertEqual(json_r['voteDiff'], -1)
vote = Vote.objects.get(vote_object_type=comment.get_content_type(),
vote_object_id=comment.id,
user=user)
vote.value = 1
vote.save()
self.c.login(**self.credentials)
r = self.c.post(reverse('vote'),
data={
'what': 'comment',
'what_id': comment.id,
'vote_value': '-1'
})
self.assertEqual(r.status_code, 200)
json_r = json.loads(r.content.decode("utf-8"))
self.assertIsNone(json_r['error'])
self.assertEqual(json_r['voteDiff'], -2)
| |
import collections
from django import http
from django.shortcuts import render, redirect, get_object_or_404
from django.views.decorators.http import require_POST
from django.db import transaction
from django.views.decorators.cache import cache_page
from django.db.models import Q
from funfactory.urlresolvers import reverse
from jsonview.decorators import json_view
from airmozilla.manage.utils import filename_to_notes
from airmozilla.base.utils import dot_dict
from airmozilla.main.helpers import thumbnail
from airmozilla.main.models import Event, Picture
from airmozilla.manage import forms
from .decorators import staff_required, permission_required
from .utils import can_edit_event
@staff_required
def picturegallery(request):
context = {}
if request.GET.get('event'):
event = get_object_or_404(Event, id=request.GET.get('event'))
result = can_edit_event(
event,
request.user,
default='manage:picturegallery'
)
if isinstance(result, http.HttpResponse):
return result
context['event'] = event
return render(request, 'manage/picturegallery.html', context)
@staff_required
@json_view
def picturegallery_data(request):
context = {}
if request.GET.get('event'):
event = get_object_or_404(Event, id=request.GET['event'])
else:
event = None
items = _get_all_pictures(event=event)
context['pictures'] = items
context['urls'] = {
'manage:picture_edit': reverse('manage:picture_edit', args=('0',)),
'manage:picture_delete': reverse('manage:picture_delete', args=('0',)),
'manage:picture_delete_all': reverse(
'manage:picture_delete_all', args=('0',)
),
'manage:redirect_picture_thumbnail': reverse(
'manage:redirect_picture_thumbnail', args=('0',)
),
'manage:picture_event_associate': reverse(
'manage:picture_event_associate', args=('0',)
),
'manage:event_edit': reverse('manage:event_edit', args=('0',)),
}
context['stats'] = {
'total_pictures': Picture.objects.all().count(),
'event_pictures': Picture.objects.filter(event__isnull=False).count(),
}
return context
def _get_all_pictures(event=None):
values = (
'id',
'title',
'placeholder_img',
'picture_id',
# 'default_placeholder',
)
event_map = collections.defaultdict(list)
cant_delete = collections.defaultdict(bool)
for each in Event.objects.filter(picture__isnull=False).values(*values):
event_map[each['picture_id']].append({
'id': each['id'],
'title': each['title']
})
if not each['placeholder_img']:
# then you can definitely not delete this picture
cant_delete[each['picture_id']] = True
pictures = []
values = (
'id',
'size',
'width',
'height',
'notes',
'created',
'modified',
'modified_user',
'event_id',
'default_placeholder',
)
qs = Picture.objects.all()
if event:
qs = qs.filter(
Q(event__isnull=True) |
Q(event=event)
)
else:
qs = qs.filter(event__isnull=True)
for picture_dict in qs.order_by('event', '-created').values(*values):
picture = dot_dict(picture_dict)
item = {
'id': picture.id,
'width': picture.width,
'height': picture.height,
'size': picture.size,
'created': picture.created.isoformat(),
'events': event_map[picture.id],
'event': picture.event_id,
'default_placeholder': picture.default_placeholder,
}
if cant_delete.get(picture.id):
item['cant_delete'] = True
if picture.notes:
item['notes'] = picture.notes
# if picture.id in event_map:
# item['events'] = event_map[picture.id]
pictures.append(item)
return pictures
@staff_required
@permission_required('main.change_picture')
@transaction.commit_on_success
def picture_edit(request, id):
picture = get_object_or_404(Picture, id=id)
context = {'picture': picture}
if request.method == 'POST':
form = forms.PictureForm(request.POST, request.FILES, instance=picture)
if form.is_valid():
picture = form.save()
if picture.default_placeholder:
# make all others NOT-default
qs = (
Picture.objects
.exclude(id=picture.id)
.filter(default_placeholder=True)
)
for other in qs:
other.default_placeholder = False
other.save()
return redirect('manage:picturegallery')
else:
form = forms.PictureForm(instance=picture)
context['form'] = form
return render(request, 'manage/picture_edit.html', context)
@staff_required
@permission_required('main.delete_picture')
@transaction.commit_on_success
@json_view
def picture_delete(request, id):
picture = get_object_or_404(Picture, id=id)
for event in Event.objects.filter(picture=picture):
if not event.placeholder_img:
return http.HttpResponseBadRequest("Can't delete this")
picture.delete()
return True
@require_POST
@staff_required
@permission_required('main.delete_picture')
@transaction.commit_on_success
@json_view
def picture_delete_all(request, id):
event = get_object_or_404(Event, id=id)
pictures = Picture.objects.filter(event=event)
if event.picture and event.picture in pictures:
assert event.placeholder_img
event.picture = None
event.save()
pictures.delete()
return True
@staff_required
@permission_required('main.add_picture')
@transaction.commit_on_success
@json_view
def picture_add(request):
context = {}
if request.GET.get('event'):
event = get_object_or_404(Event, id=request.GET.get('event'))
result = can_edit_event(
event,
request.user,
default='manage:picturegallery'
)
if isinstance(result, http.HttpResponse):
return result
context['event'] = event
if request.method == 'POST':
if request.POST.get('remove'):
# this is for when you change your mind
size = request.POST['size']
filename = request.POST['name']
notes = filename_to_notes(filename)
matches = Picture.objects.filter(
notes=notes,
size=int(size),
modified_user=request.user
)
for picture in matches.order_by('-created')[:1]:
picture.delete()
return True
return False
form = forms.PictureForm(request.POST, request.FILES)
if form.is_valid():
picture = form.save(commit=False)
picture.modified_user = request.user
picture.save()
return redirect('manage:picturegallery')
else:
form = forms.PictureForm()
context['form'] = form
return render(request, 'manage/picture_add.html', context)
@cache_page(60)
def redirect_picture_thumbnail(request, id):
picture = get_object_or_404(Picture, id=id)
geometry = request.GET.get('geometry', '100x100')
crop = request.GET.get('crop', 'center')
thumb = thumbnail(picture.file, geometry, crop=crop)
return redirect(thumb.url)
@staff_required
@require_POST
@transaction.commit_on_success
@permission_required('main.change_event')
@json_view
def picture_event_associate(request, id):
picture = get_object_or_404(Picture, id=id)
if not request.POST.get('event'):
return http.HttpResponseBadRequest("Missing 'event'")
event = get_object_or_404(Event, id=request.POST['event'])
event.picture = picture
event.save()
return True
| |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class cspolicy_cspolicylabel_binding(base_resource) :
""" Binding class showing the cspolicylabel that can be bound to cspolicy.
"""
def __init__(self) :
self._domain = ""
self._url = ""
self._priority = 0
self._hits = 0
self._labeltype = ""
self._labelname = ""
self._policyname = ""
self.___count = 0
@property
def policyname(self) :
"""Name of the content switching policy to display. If this parameter is omitted, details of all the policies are displayed.<br/>Minimum length = 1.
"""
try :
return self._policyname
except Exception as e:
raise e
@policyname.setter
def policyname(self, policyname) :
"""Name of the content switching policy to display. If this parameter is omitted, details of all the policies are displayed.<br/>Minimum length = 1
"""
try :
self._policyname = policyname
except Exception as e:
raise e
@property
def domain(self) :
"""The domain name. The string value can range to 63 characters.<br/>Minimum length = 1.
"""
try :
return self._domain
except Exception as e:
raise e
@domain.setter
def domain(self, domain) :
"""The domain name. The string value can range to 63 characters.<br/>Minimum length = 1
"""
try :
self._domain = domain
except Exception as e:
raise e
@property
def priority(self) :
"""priority of bound policy.
"""
try :
return self._priority
except Exception as e:
raise e
@property
def labelname(self) :
"""Name of the label invoked.
"""
try :
return self._labelname
except Exception as e:
raise e
@property
def hits(self) :
"""Total number of hits.
"""
try :
return self._hits
except Exception as e:
raise e
@property
def url(self) :
"""URL string that is matched with the URL of a request. Can contain a wildcard character. Specify the string value in the following format: [[prefix] [*]] [.suffix].<br/>Minimum length = 1<br/>Maximum length = 208.
"""
try :
return self._url
except Exception as e:
raise e
@property
def labeltype(self) :
"""The invocation type.<br/>Possible values = reqvserver, resvserver, policylabel.
"""
try :
return self._labeltype
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(cspolicy_cspolicylabel_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.cspolicy_cspolicylabel_binding
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.policyname) :
return str(self.policyname)
return None
except Exception as e :
raise e
@classmethod
def get(cls, service, policyname) :
""" Use this API to fetch cspolicy_cspolicylabel_binding resources.
"""
try :
obj = cspolicy_cspolicylabel_binding()
obj.policyname = policyname
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, policyname, filter_) :
""" Use this API to fetch filtered set of cspolicy_cspolicylabel_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = cspolicy_cspolicylabel_binding()
obj.policyname = policyname
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, policyname) :
""" Use this API to count cspolicy_cspolicylabel_binding resources configued on NetScaler.
"""
try :
obj = cspolicy_cspolicylabel_binding()
obj.policyname = policyname
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, policyname, filter_) :
""" Use this API to count the filtered set of cspolicy_cspolicylabel_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = cspolicy_cspolicylabel_binding()
obj.policyname = policyname
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Labeltype:
reqvserver = "reqvserver"
resvserver = "resvserver"
policylabel = "policylabel"
class cspolicy_cspolicylabel_binding_response(base_response) :
def __init__(self, length=1) :
self.cspolicy_cspolicylabel_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.cspolicy_cspolicylabel_binding = [cspolicy_cspolicylabel_binding() for _ in range(length)]
| |
# Monitors the catdoor using MQTT topics
# and sends notification using pushbullet
from time import localtime, strftime, sleep
from datetime import datetime
from array import array
import threading
import netrc
import paho.mqtt.client as mqtt
from pushbullet import Pushbullet
def time2str(t):
return strftime("%Y-%m-%d_%H:%M:%S",t)
try:
netrckeys = netrc.netrc()
API_KEY = netrckeys.authenticators('catdoor')[2]
except Exception:
print("Failed to get the catdoor key from the .netrc file!")
quit()
def pb_publish(title, msg):
global API_KEY
published = False
while not published:
try:
print "pushing notification:"
print " title = " + title
print " message = " + msg
pb = Pushbullet(API_KEY)
push = pb.push_note(title, msg)
published = True
except Exception, ex:
print "Unable to send notification: %s" % ex
print "Will try again in 2 minutes"
sleep(120)
def publish(title, msg):
thread = threading.Thread(target=pb_publish, args=[title, msg])
thread.start()
class Heartbeat(object):
def __init__(self, max_silence_sec=60):
self.alive = False
self.last_beat = None
self.timer = None
self.out_of_sync = None
self.duration = max_silence_sec
def fresh_heartbeat(self):
self.alive = True
title = "Catdoor Notification"
msg = "Catdoor is alive (first heartbeat @ " + time2str(self.last_beat) + ")"
print(time2str(localtime()) + " | " + msg)
publish(title, msg)
def missing_heartbeat(self):
self.alive = False
self.out_of_sync = None
title = "Catdoor Error"
msg = "Catdoor considered dead (last published heartbeat @ "+time2str(self.last_beat)+")"
print(time2str(localtime())+" | " +msg)
publish(title, msg)
def heartbeat(self, msg):
self.last_beat = localtime()
print(time2str(self.last_beat)+" | got heartbeat msg : "+msg.payload)
if not self.alive:
self.fresh_heartbeat()
if self.timer != None:
self.timer.cancel()
self.timer = threading.Timer(self.duration, self.missing_heartbeat)
self.timer.start()
self.check_sync(msg)
def check_sync(self, msg):
args = msg.payload.split()
strt = args[0]+" "+args[1]
rtc = datetime.strptime(strt, '%Y-%m-%d %H:%M:%S')
now = datetime.now()
dst = localtime().tm_isdst
diff = (rtc-now).total_seconds()+dst*3600
if abs(diff) > 20:
if self.out_of_sync != True:
title = "Catdoor Notification"
offset = "Catdoor clock is not in sync! (offset="+\
str(int(diff))+"s)"
publish(title, offset)
self.out_of_sync = True
else:
if self.out_of_sync != False:
title = "Catdoor Notification"
offset = "Catdoor clock is in sync. (offset="+\
str(int(diff))+"s)"
publish(title, offset)
self.out_of_sync = False
class DoorState(object):
def __init__(self, max_opened_state_sec):
self.state = None
self.open_out = None
self.open_in = None
self.jammed = None
self.timer = None
self.duration = max_opened_state_sec
def door_jammed(self):
self.jammed = True
title = "Catdoor Warning"
msg = "Door did not close correctly! @ "+time2str(localtime())
print(time2str(localtime())+" | " + msg)
publish(title, msg)
def door_unstuck(self):
self.jammed = False
title = "Catdoor Notification"
msg = "Door closed normally after being stuck @ "+time2str(localtime())
print(time2str(localtime())+" | " + msg)
publish(title, msg)
def door_cycle(self):
if self.open_out:
self.open_out = False
direction = "OUT"
msg = "Luna went out for a walk @ "+time2str(localtime())
if self.open_in:
self.open_in = False
direction = "IN"
msg = "Luna came in for confort @ "+time2str(localtime())
print(time2str(localtime())+" | door cycle " + direction + " complete.")
title = "Catdoor Cycled " + direction
publish(title, msg)
def new_state(self, msg):
print (time2str(localtime())+" | got new doorstate : "+msg.payload)
self.state = msg.payload.split()[3]
if self.state == "OPEN_OUT":
self.open_out = True
if self.state == "OPEN_IN":
self.open_in = True
if self.state == "OPEN_OUT" or self.state == "AJAR_OUT":
if self.timer != None:
self.timer = threading.Timer(self.duration, self.door_jammed)
self.timer.start()
if self.state == "CLOSED":
print(time2str(localtime())+" | door back to closed position.")
if self.timer != None:
self.timer.cancel()
self.timer = None
if self.jammed == True:
self.door_unstuck()
if self.open_in == True or self.open_out == True:
self.door_cycle()
class BatteryMonitor(object):
def __init__(self, averaging_window_size):
self.low_battery_threshold = 3.65
self.use_battery_threshold = 4.15
self.battery_full_threshold = 4.20
self.charging_threshold = 3.75
self.no_battery_threshold = 4.35
self.significant_change = 0.04
self.values = array('f')
self.window = averaging_window_size
self.index = 0
self.mode = 'UNKNOWN'
self.prev_volts = self.use_battery_threshold
def __add(self, v):
if len(self.values) < self.window:
self.values.append(v)
else :
self.values[self.index] = v
self.index = self.index+1
if self.index == self.window:
self.index = 0
def __avg(self):
sum = 0.0
# inefficient method to avoid juggling with indexes
for v in self.values:
sum = sum + v
return sum / len(self.values)
def new_voltage(self, msg):
print (time2str(localtime())+" | got new battery_v : "+msg.payload)
self.__add(float(msg.payload.split()[3]))
volts = self.__avg()
print ("Average Voltage = %.3f V / current mode = %s") % (volts, self.mode)
if volts > self.no_battery_threshold:
if self.mode != 'NO_BATTERY':
self.mode = 'NO_BATTERY'
# publish("Catdoor Warning", "No battery present!")
elif volts < self.low_battery_threshold:
if self.mode != 'LOW_BATTERY':
self.mode = 'LOW_BATTERY'
# publish("Catdoor Error", "LOW battery!")
elif self.mode == 'LOW_BATTERY':
if volts > self.charging_threshold:
self.mode = 'CHARGING'
elif volts > self.low_battery_threshold:
self.mode = 'BATTERY'
elif self.battery_full_threshold < volts and volts < self.no_battery_threshold:
if self.mode != 'FULL':
self.mode = 'FULL'
# publish("Catdoor Notification", "Battery is fully charged!")
elif volts < self.use_battery_threshold and \
volts < self.prev_volts:
if self.mode != 'BATTERY':
self.mode = 'BATTERY'
# publish("Catdoor Warning", "Running on Battery Power")
elif volts > self.charging_threshold and \
volts > self.prev_volts:
if self.mode != 'CHARGING':
self.mode = 'CHARGING'
# publish("Catdoor Notification", "Running on External Power")
if abs(self.prev_volts-volts) > self.significant_change:
self.prev_volts = volts
def on_connect(client, userdata, flags, rc):
print("Connected with result code "+str(rc))
client.subscribe("/catdoor/#")
def on_message(client, userdata, msg):
userdata[msg.topic](msg)
def catdoor_message(msg):
print (time2str(localtime())+" | got new message : "+msg.payload)
title = "Catdoor Notification"
args = msg.payload.split()
key = args[3]
if key == "LOCKED":
msg = "LOCKED (will re-open tomorrow morning around "+args[4]+")"
elif key == "UNLOCKED":
msg = "UNLOCKED (will close this afternoon at "+args[4]+")"
else:
msg = msg.payload
publish(title, msg)
def catdoor_solenoids(msg):
print (time2str(localtime())+" | got new solenoids : "+msg.payload)
def catdoor_proximity(msg):
print (time2str(localtime())+" | got new proximity : "+msg.payload)
beatcheck = Heartbeat(90)
doorstate = DoorState(60)
battery_monitor = BatteryMonitor(9)
print("catdoor_pusbullet starting...")
topiclist = {
"/catdoor/message" : catdoor_message,
"/catdoor/heartbeat" : beatcheck.heartbeat,
"/catdoor/solenoids" : catdoor_solenoids,
"/catdoor/doorstate" : doorstate.new_state,
"/catdoor/proximity" : catdoor_proximity,
"/catdoor/battery_v" : battery_monitor.new_voltage
}
mqttClient = mqtt.Client("CatdoorSubscriber", True, topiclist)
mqttClient.on_connect = on_connect
mqttClient.on_message = on_message
mqttClient.connect("172.16.0.11")
mqttClient.loop_forever()
| |
"""
A collection of higher-level Twisted Web resources, suitable for use with any
existing ``IResource`` implementations.
`SpinneretResource` adapts an `ISpinneretResource` to `IResource`.
`ContentTypeNegotiator` will negotiate a resource based on the ``Accept``
header.
"""
from twisted.internet.defer import Deferred, maybeDeferred, succeed
from twisted.python.compat import nativeString
from twisted.python.urlpath import URLPath
from twisted.web import http
from twisted.web.error import UnsupportedMethod
from twisted.web.iweb import IRenderable
from twisted.web.resource import (
IResource, NoResource, Resource, _computeAllowedMethods)
from twisted.web.server import NOT_DONE_YET
from twisted.web.template import renderElement
from twisted.web.util import DeferredResource, Redirect
from txspinneret.interfaces import ISpinneretResource
from txspinneret.util import _parseAccept
def _renderResource(resource, request):
"""
Render a given resource.
See `IResource.render <twisted:twisted.web.resource.IResource.render>`.
"""
meth = getattr(resource, 'render_' + nativeString(request.method), None)
if meth is None:
try:
allowedMethods = resource.allowedMethods
except AttributeError:
allowedMethods = _computeAllowedMethods(resource)
raise UnsupportedMethod(allowedMethods)
return meth(request)
class NotAcceptable(Resource):
"""
Leaf resource that renders an empty body for ``406 Not Acceptable``.
"""
isLeaf = True
def render(self, request):
request.setResponseCode(http.NOT_ACCEPTABLE)
return b''
class NotFound(NoResource):
"""
Leaf resource that renders a page for ``404 Not Found``.
"""
def __init__(self):
NoResource.__init__(self, b'Resource not found')
class _RenderableResource(Resource):
"""
Adapter from `IRenderable` to `IResource`.
"""
isLeaf = True
def __init__(self, renderable, doctype=b'<!DOCTYPE html>'):
Resource.__init__(self)
self._renderable = renderable
self._doctype = doctype
def render_GET(self, request):
request.setResponseCode(http.OK)
return renderElement(request, self._renderable, self._doctype)
class SpinneretResource(Resource):
"""
Adapter from `ISpinneretResource` to `IResource`.
"""
def __init__(self, wrappedResource):
"""
:type wrappedResource: `ISpinneretResource`
:param wrappedResource: Spinneret resource to wrap in an `IResource`.
"""
self._wrappedResource = wrappedResource
Resource.__init__(self)
def _adaptToResource(self, result):
"""
Adapt a result to `IResource`.
Several adaptions are tried they are, in order: ``None``,
`IRenderable <twisted:twisted.web.iweb.IRenderable>`, `IResource
<twisted:twisted.web.resource.IResource>`, and `URLPath
<twisted:twisted.python.urlpath.URLPath>`. Anything else is returned as
is.
A `URLPath <twisted:twisted.python.urlpath.URLPath>` is treated as
a redirect.
"""
if result is None:
return NotFound()
spinneretResource = ISpinneretResource(result, None)
if spinneretResource is not None:
return SpinneretResource(spinneretResource)
renderable = IRenderable(result, None)
if renderable is not None:
return _RenderableResource(renderable)
resource = IResource(result, None)
if resource is not None:
return resource
if isinstance(result, URLPath):
return Redirect(str(result))
return result
def getChildWithDefault(self, path, request):
def _setSegments(result):
result, segments = result
request.postpath[:] = segments
return result
def _locateChild(request, segments):
def _defaultLocateChild(request, segments):
return NotFound(), []
locateChild = getattr(
self._wrappedResource, 'locateChild', _defaultLocateChild)
return locateChild(request, segments)
d = maybeDeferred(
_locateChild, request, request.prepath[-1:] + request.postpath)
d.addCallback(_setSegments)
d.addCallback(self._adaptToResource)
return DeferredResource(d)
def _handleRenderResult(self, request, result):
"""
Handle the result from `IResource.render`.
If the result is a `Deferred` then return `NOT_DONE_YET` and add
a callback to write the result to the request when it arrives.
"""
def _requestFinished(result, cancel):
cancel()
return result
if not isinstance(result, Deferred):
result = succeed(result)
def _whenDone(result):
render = getattr(result, 'render', lambda request: result)
renderResult = render(request)
if renderResult != NOT_DONE_YET:
request.write(renderResult)
request.finish()
return result
request.notifyFinish().addBoth(_requestFinished, result.cancel)
result.addCallback(self._adaptToResource)
result.addCallback(_whenDone)
result.addErrback(request.processingFailed)
return NOT_DONE_YET
def render(self, request):
# This is kind of terrible but we need `_RouterResource.render` to be
# called to handle the null route. Finding a better way to achieve this
# would be great.
if hasattr(self._wrappedResource, 'render'):
result = self._wrappedResource.render(request)
else:
result = _renderResource(self._wrappedResource, request)
return self._handleRenderResult(request, result)
class ContentTypeNegotiator(Resource):
"""
Negotiate an appropriate representation based on the ``Accept`` header.
Rendering this resource will negotiate a representation and render the
matching handler.
"""
def __init__(self, handlers, fallback=False):
"""
:type handlers: ``iterable`` of `INegotiableResource` and either
`IResource` or `ISpinneretResource`.
:param handlers: Iterable of negotiable resources, either
`ISpinneretResource` or `IResource`, to use as handlers for
negotiation.
:type fallback: `bool`
:param fallback: Fall back to the first handler in the case where
negotiation fails?
"""
Resource.__init__(self)
self._handlers = list(handlers)
self._fallback = fallback
self._acceptHandlers = {}
for handler in self._handlers:
for acceptType in handler.acceptTypes:
if acceptType in self._acceptHandlers:
raise ValueError(
'Duplicate handler for %r' % (acceptType,))
self._acceptHandlers[acceptType] = handler
def _negotiateHandler(self, request):
"""
Negotiate a handler based on the content types acceptable to the
client.
:rtype: 2-`tuple` of `twisted.web.iweb.IResource` and `bytes`
:return: Pair of a resource and the content type.
"""
accept = _parseAccept(request.requestHeaders.getRawHeaders('Accept'))
for contentType in accept.keys():
handler = self._acceptHandlers.get(contentType.lower())
if handler is not None:
return handler, handler.contentType
if self._fallback:
handler = self._handlers[0]
return handler, handler.contentType
return NotAcceptable(), None
def render(self, request):
resource, contentType = self._negotiateHandler(request)
if contentType is not None:
request.setHeader(b'Content-Type', contentType)
spinneretResource = ISpinneretResource(resource, None)
if spinneretResource is not None:
resource = SpinneretResource(spinneretResource)
return resource.render(request)
__all__ = [
'SpinneretResource', 'ContentTypeNegotiator', 'NotAcceptable', 'NotFound']
| |
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import number_types as N
from .number_types import (UOffsetTFlags, SOffsetTFlags, VOffsetTFlags)
from . import encode
from . import packer
from . import compat
from .compat import range_func
from .compat import memoryview_type
from .compat import import_numpy, NumpyRequiredForThisFeature
np = import_numpy()
## @file
## @addtogroup flatbuffers_python_api
## @{
## @cond FLATBUFFERS_INTERNAL
class OffsetArithmeticError(RuntimeError):
"""
Error caused by an Offset arithmetic error. Probably caused by bad
writing of fields. This is considered an unreachable situation in
normal circumstances.
"""
pass
class IsNotNestedError(RuntimeError):
"""
Error caused by using a Builder to write Object data when not inside
an Object.
"""
pass
class IsNestedError(RuntimeError):
"""
Error caused by using a Builder to begin an Object when an Object is
already being built.
"""
pass
class StructIsNotInlineError(RuntimeError):
"""
Error caused by using a Builder to write a Struct at a location that
is not the current Offset.
"""
pass
class BuilderSizeError(RuntimeError):
"""
Error caused by causing a Builder to exceed the hardcoded limit of 2
gigabytes.
"""
pass
class BuilderNotFinishedError(RuntimeError):
"""
Error caused by not calling `Finish` before calling `Output`.
"""
pass
# VtableMetadataFields is the count of metadata fields in each vtable.
VtableMetadataFields = 2
## @endcond
class Builder(object):
""" A Builder is used to construct one or more FlatBuffers.
Typically, Builder objects will be used from code generated by the `flatc`
compiler.
A Builder constructs byte buffers in a last-first manner for simplicity and
performance during reading.
Internally, a Builder is a state machine for creating FlatBuffer objects.
It holds the following internal state:
- Bytes: an array of bytes.
- current_vtable: a list of integers.
- vtables: a hash of vtable entries.
Attributes:
Bytes: The internal `bytearray` for the Builder.
finished: A boolean determining if the Builder has been finalized.
"""
## @cond FLATBUFFERS_INTENRAL
__slots__ = ("Bytes", "current_vtable", "head", "minalign", "objectEnd",
"vtables", "nested", "forceDefaults", "finished")
"""Maximum buffer size constant, in bytes.
Builder will never allow it's buffer grow over this size.
Currently equals 2Gb.
"""
MAX_BUFFER_SIZE = 2**31
## @endcond
def __init__(self, initialSize):
"""Initializes a Builder of size `initial_size`.
The internal buffer is grown as needed.
"""
if not (0 <= initialSize <= Builder.MAX_BUFFER_SIZE):
msg = "flatbuffers: Cannot create Builder larger than 2 gigabytes."
raise BuilderSizeError(msg)
self.Bytes = bytearray(initialSize)
## @cond FLATBUFFERS_INTERNAL
self.current_vtable = None
self.head = UOffsetTFlags.py_type(initialSize)
self.minalign = 1
self.objectEnd = None
self.vtables = {}
self.nested = False
self.forceDefaults = False
## @endcond
self.finished = False
def Output(self):
"""Return the portion of the buffer that has been used for writing data.
This is the typical way to access the FlatBuffer data inside the
builder. If you try to access `Builder.Bytes` directly, you would need
to manually index it with `Head()`, since the buffer is constructed
backwards.
It raises BuilderNotFinishedError if the buffer has not been finished
with `Finish`.
"""
if not self.finished:
raise BuilderNotFinishedError()
return self.Bytes[self.Head():]
## @cond FLATBUFFERS_INTERNAL
def StartObject(self, numfields):
"""StartObject initializes bookkeeping for writing a new object."""
self.assertNotNested()
# use 32-bit offsets so that arithmetic doesn't overflow.
self.current_vtable = [0 for _ in range_func(numfields)]
self.objectEnd = self.Offset()
self.nested = True
def WriteVtable(self):
"""
WriteVtable serializes the vtable for the current object, if needed.
Before writing out the vtable, this checks pre-existing vtables for
equality to this one. If an equal vtable is found, point the object to
the existing vtable and return.
Because vtable values are sensitive to alignment of object data, not
all logically-equal vtables will be deduplicated.
A vtable has the following format:
<VOffsetT: size of the vtable in bytes, including this value>
<VOffsetT: size of the object in bytes, including the vtable offset>
<VOffsetT: offset for a field> * N, where N is the number of fields
in the schema for this type. Includes deprecated fields.
Thus, a vtable is made of 2 + N elements, each VOffsetT bytes wide.
An object has the following format:
<SOffsetT: offset to this object's vtable (may be negative)>
<byte: data>+
"""
# Prepend a zero scalar to the object. Later in this function we'll
# write an offset here that points to the object's vtable:
self.PrependSOffsetTRelative(0)
objectOffset = self.Offset()
vtKey = []
trim = True
for elem in reversed(self.current_vtable):
if elem == 0:
if trim:
continue
else:
elem = objectOffset - elem
trim = False
vtKey.append(elem)
vtKey = tuple(vtKey)
vt2Offset = self.vtables.get(vtKey)
if vt2Offset is None:
# Did not find a vtable, so write this one to the buffer.
# Write out the current vtable in reverse , because
# serialization occurs in last-first order:
i = len(self.current_vtable) - 1
trailing = 0
trim = True
while i >= 0:
off = 0
elem = self.current_vtable[i]
i -= 1
if elem == 0:
if trim:
trailing += 1
continue
else:
# Forward reference to field;
# use 32bit number to ensure no overflow:
off = objectOffset - elem
trim = False
self.PrependVOffsetT(off)
# The two metadata fields are written last.
# First, store the object bytesize:
objectSize = UOffsetTFlags.py_type(objectOffset - self.objectEnd)
self.PrependVOffsetT(VOffsetTFlags.py_type(objectSize))
# Second, store the vtable bytesize:
vBytes = len(self.current_vtable) - trailing + VtableMetadataFields
vBytes *= N.VOffsetTFlags.bytewidth
self.PrependVOffsetT(VOffsetTFlags.py_type(vBytes))
# Next, write the offset to the new vtable in the
# already-allocated SOffsetT at the beginning of this object:
objectStart = SOffsetTFlags.py_type(len(self.Bytes) - objectOffset)
encode.Write(packer.soffset, self.Bytes, objectStart,
SOffsetTFlags.py_type(self.Offset() - objectOffset))
# Finally, store this vtable in memory for future
# deduplication:
self.vtables[vtKey] = self.Offset()
else:
# Found a duplicate vtable.
objectStart = SOffsetTFlags.py_type(len(self.Bytes) - objectOffset)
self.head = UOffsetTFlags.py_type(objectStart)
# Write the offset to the found vtable in the
# already-allocated SOffsetT at the beginning of this object:
encode.Write(packer.soffset, self.Bytes, self.Head(),
SOffsetTFlags.py_type(vt2Offset - objectOffset))
self.current_vtable = None
return objectOffset
def EndObject(self):
"""EndObject writes data necessary to finish object construction."""
self.assertNested()
self.nested = False
return self.WriteVtable()
def growByteBuffer(self):
"""Doubles the size of the byteslice, and copies the old data towards
the end of the new buffer (since we build the buffer backwards)."""
if len(self.Bytes) == Builder.MAX_BUFFER_SIZE:
msg = "flatbuffers: cannot grow buffer beyond 2 gigabytes"
raise BuilderSizeError(msg)
newSize = min(len(self.Bytes) * 2, Builder.MAX_BUFFER_SIZE)
if newSize == 0:
newSize = 1
bytes2 = bytearray(newSize)
bytes2[newSize-len(self.Bytes):] = self.Bytes
self.Bytes = bytes2
## @endcond
def Head(self):
"""Get the start of useful data in the underlying byte buffer.
Note: unlike other functions, this value is interpreted as from the
left.
"""
## @cond FLATBUFFERS_INTERNAL
return self.head
## @endcond
## @cond FLATBUFFERS_INTERNAL
def Offset(self):
"""Offset relative to the end of the buffer."""
return UOffsetTFlags.py_type(len(self.Bytes) - self.Head())
def Pad(self, n):
"""Pad places zeros at the current offset."""
for i in range_func(n):
self.Place(0, N.Uint8Flags)
def Prep(self, size, additionalBytes):
"""
Prep prepares to write an element of `size` after `additional_bytes`
have been written, e.g. if you write a string, you need to align
such the int length field is aligned to SizeInt32, and the string
data follows it directly.
If all you need to do is align, `additionalBytes` will be 0.
"""
# Track the biggest thing we've ever aligned to.
if size > self.minalign:
self.minalign = size
# Find the amount of alignment needed such that `size` is properly
# aligned after `additionalBytes`:
alignSize = (~(len(self.Bytes) - self.Head() + additionalBytes)) + 1
alignSize &= (size - 1)
# Reallocate the buffer if needed:
while self.Head() < alignSize+size+additionalBytes:
oldBufSize = len(self.Bytes)
self.growByteBuffer()
updated_head = self.head + len(self.Bytes) - oldBufSize
self.head = UOffsetTFlags.py_type(updated_head)
self.Pad(alignSize)
def PrependSOffsetTRelative(self, off):
"""
PrependSOffsetTRelative prepends an SOffsetT, relative to where it
will be written.
"""
# Ensure alignment is already done:
self.Prep(N.SOffsetTFlags.bytewidth, 0)
if not (off <= self.Offset()):
msg = "flatbuffers: Offset arithmetic error."
raise OffsetArithmeticError(msg)
off2 = self.Offset() - off + N.SOffsetTFlags.bytewidth
self.PlaceSOffsetT(off2)
## @endcond
def PrependUOffsetTRelative(self, off):
"""Prepends an unsigned offset into vector data, relative to where it
will be written.
"""
# Ensure alignment is already done:
self.Prep(N.UOffsetTFlags.bytewidth, 0)
if not (off <= self.Offset()):
msg = "flatbuffers: Offset arithmetic error."
raise OffsetArithmeticError(msg)
off2 = self.Offset() - off + N.UOffsetTFlags.bytewidth
self.PlaceUOffsetT(off2)
## @cond FLATBUFFERS_INTERNAL
def StartVector(self, elemSize, numElems, alignment):
"""
StartVector initializes bookkeeping for writing a new vector.
A vector has the following format:
- <UOffsetT: number of elements in this vector>
- <T: data>+, where T is the type of elements of this vector.
"""
self.assertNotNested()
self.nested = True
self.Prep(N.Uint32Flags.bytewidth, elemSize*numElems)
self.Prep(alignment, elemSize*numElems) # In case alignment > int.
return self.Offset()
## @endcond
def EndVector(self, vectorNumElems):
"""EndVector writes data necessary to finish vector construction."""
self.assertNested()
## @cond FLATBUFFERS_INTERNAL
self.nested = False
## @endcond
# we already made space for this, so write without PrependUint32
self.PlaceUOffsetT(vectorNumElems)
return self.Offset()
def CreateString(self, s, encoding='utf-8', errors='strict'):
"""CreateString writes a null-terminated byte string as a vector."""
self.assertNotNested()
## @cond FLATBUFFERS_INTERNAL
self.nested = True
## @endcond
if isinstance(s, compat.string_types):
x = s.encode(encoding, errors)
elif isinstance(s, compat.binary_types):
x = s
else:
raise TypeError("non-string passed to CreateString")
self.Prep(N.UOffsetTFlags.bytewidth, (len(x)+1)*N.Uint8Flags.bytewidth)
self.Place(0, N.Uint8Flags)
l = UOffsetTFlags.py_type(len(s))
## @cond FLATBUFFERS_INTERNAL
self.head = UOffsetTFlags.py_type(self.Head() - l)
## @endcond
self.Bytes[self.Head():self.Head()+l] = x
return self.EndVector(len(x))
def CreateByteVector(self, x):
"""CreateString writes a byte vector."""
self.assertNotNested()
## @cond FLATBUFFERS_INTERNAL
self.nested = True
## @endcond
if not isinstance(x, compat.binary_types):
raise TypeError("non-byte vector passed to CreateByteVector")
self.Prep(N.UOffsetTFlags.bytewidth, len(x)*N.Uint8Flags.bytewidth)
l = UOffsetTFlags.py_type(len(x))
## @cond FLATBUFFERS_INTERNAL
self.head = UOffsetTFlags.py_type(self.Head() - l)
## @endcond
self.Bytes[self.Head():self.Head()+l] = x
return self.EndVector(len(x))
def CreateNumpyVector(self, x):
"""CreateNumpyVector writes a numpy array into the buffer."""
if np is None:
# Numpy is required for this feature
raise NumpyRequiredForThisFeature("Numpy was not found.")
if not isinstance(x, np.ndarray):
raise TypeError("non-numpy-ndarray passed to CreateNumpyVector")
if x.dtype.kind not in ['b', 'i', 'u', 'f']:
raise TypeError("numpy-ndarray holds elements of unsupported datatype")
if x.ndim > 1:
raise TypeError("multidimensional-ndarray passed to CreateNumpyVector")
self.StartVector(x.itemsize, x.size, x.dtype.alignment)
# Ensure little endian byte ordering
if x.dtype.str[0] == "<":
x_lend = x
else:
x_lend = x.byteswap(inplace=False)
# Calculate total length
l = UOffsetTFlags.py_type(x_lend.itemsize * x_lend.size)
## @cond FLATBUFFERS_INTERNAL
self.head = UOffsetTFlags.py_type(self.Head() - l)
## @endcond
# tobytes ensures c_contiguous ordering
self.Bytes[self.Head():self.Head()+l] = x_lend.tobytes(order='C')
return self.EndVector(x.size)
## @cond FLATBUFFERS_INTERNAL
def assertNested(self):
"""
Check that we are in the process of building an object.
"""
if not self.nested:
raise IsNotNestedError()
def assertNotNested(self):
"""
Check that no other objects are being built while making this
object. If not, raise an exception.
"""
if self.nested:
raise IsNestedError()
def assertStructIsInline(self, obj):
"""
Structs are always stored inline, so need to be created right
where they are used. You'll get this error if you created it
elsewhere.
"""
N.enforce_number(obj, N.UOffsetTFlags)
if obj != self.Offset():
msg = ("flatbuffers: Tried to write a Struct at an Offset that "
"is different from the current Offset of the Builder.")
raise StructIsNotInlineError(msg)
def Slot(self, slotnum):
"""
Slot sets the vtable key `voffset` to the current location in the
buffer.
"""
self.assertNested()
self.current_vtable[slotnum] = self.Offset()
## @endcond
def __Finish(self, rootTable, sizePrefix, file_identifier=None):
"""Finish finalizes a buffer, pointing to the given `rootTable`."""
N.enforce_number(rootTable, N.UOffsetTFlags)
prepSize = N.UOffsetTFlags.bytewidth
if file_identifier is not None:
prepSize += N.Int32Flags.bytewidth
if sizePrefix:
prepSize += N.Int32Flags.bytewidth
self.Prep(self.minalign, prepSize)
if file_identifier is not None:
self.Prep(N.UOffsetTFlags.bytewidth, encode.FILE_IDENTIFIER_LENGTH)
# Convert bytes object file_identifier to an array of 4 8-bit integers,
# and use big-endian to enforce size compliance.
# https://docs.python.org/2/library/struct.html#format-characters
file_identifier = N.struct.unpack(">BBBB", file_identifier)
for i in range(encode.FILE_IDENTIFIER_LENGTH-1, -1, -1):
# Place the bytes of the file_identifer in reverse order:
self.Place(file_identifier[i], N.Uint8Flags)
self.PrependUOffsetTRelative(rootTable)
if sizePrefix:
size = len(self.Bytes) - self.Head()
N.enforce_number(size, N.Int32Flags)
self.PrependInt32(size)
self.finished = True
return self.Head()
def Finish(self, rootTable, file_identifier=None):
"""Finish finalizes a buffer, pointing to the given `rootTable`."""
return self.__Finish(rootTable, False, file_identifier=file_identifier)
def FinishSizePrefixed(self, rootTable, file_identifier=None):
"""
Finish finalizes a buffer, pointing to the given `rootTable`,
with the size prefixed.
"""
return self.__Finish(rootTable, True, file_identifier=file_identifier)
## @cond FLATBUFFERS_INTERNAL
def Prepend(self, flags, off):
self.Prep(flags.bytewidth, 0)
self.Place(off, flags)
def PrependSlot(self, flags, o, x, d):
N.enforce_number(x, flags)
N.enforce_number(d, flags)
if x != d or self.forceDefaults:
self.Prepend(flags, x)
self.Slot(o)
def PrependBoolSlot(self, *args): self.PrependSlot(N.BoolFlags, *args)
def PrependByteSlot(self, *args): self.PrependSlot(N.Uint8Flags, *args)
def PrependUint8Slot(self, *args): self.PrependSlot(N.Uint8Flags, *args)
def PrependUint16Slot(self, *args): self.PrependSlot(N.Uint16Flags, *args)
def PrependUint32Slot(self, *args): self.PrependSlot(N.Uint32Flags, *args)
def PrependUint64Slot(self, *args): self.PrependSlot(N.Uint64Flags, *args)
def PrependInt8Slot(self, *args): self.PrependSlot(N.Int8Flags, *args)
def PrependInt16Slot(self, *args): self.PrependSlot(N.Int16Flags, *args)
def PrependInt32Slot(self, *args): self.PrependSlot(N.Int32Flags, *args)
def PrependInt64Slot(self, *args): self.PrependSlot(N.Int64Flags, *args)
def PrependFloat32Slot(self, *args): self.PrependSlot(N.Float32Flags,
*args)
def PrependFloat64Slot(self, *args): self.PrependSlot(N.Float64Flags,
*args)
def PrependUOffsetTRelativeSlot(self, o, x, d):
"""
PrependUOffsetTRelativeSlot prepends an UOffsetT onto the object at
vtable slot `o`. If value `x` equals default `d`, then the slot will
be set to zero and no other data will be written.
"""
if x != d or self.forceDefaults:
self.PrependUOffsetTRelative(x)
self.Slot(o)
def PrependStructSlot(self, v, x, d):
"""
PrependStructSlot prepends a struct onto the object at vtable slot `o`.
Structs are stored inline, so nothing additional is being added.
In generated code, `d` is always 0.
"""
N.enforce_number(d, N.UOffsetTFlags)
if x != d:
self.assertStructIsInline(x)
self.Slot(v)
## @endcond
def PrependBool(self, x):
"""Prepend a `bool` to the Builder buffer.
Note: aligns and checks for space.
"""
self.Prepend(N.BoolFlags, x)
def PrependByte(self, x):
"""Prepend a `byte` to the Builder buffer.
Note: aligns and checks for space.
"""
self.Prepend(N.Uint8Flags, x)
def PrependUint8(self, x):
"""Prepend an `uint8` to the Builder buffer.
Note: aligns and checks for space.
"""
self.Prepend(N.Uint8Flags, x)
def PrependUint16(self, x):
"""Prepend an `uint16` to the Builder buffer.
Note: aligns and checks for space.
"""
self.Prepend(N.Uint16Flags, x)
def PrependUint32(self, x):
"""Prepend an `uint32` to the Builder buffer.
Note: aligns and checks for space.
"""
self.Prepend(N.Uint32Flags, x)
def PrependUint64(self, x):
"""Prepend an `uint64` to the Builder buffer.
Note: aligns and checks for space.
"""
self.Prepend(N.Uint64Flags, x)
def PrependInt8(self, x):
"""Prepend an `int8` to the Builder buffer.
Note: aligns and checks for space.
"""
self.Prepend(N.Int8Flags, x)
def PrependInt16(self, x):
"""Prepend an `int16` to the Builder buffer.
Note: aligns and checks for space.
"""
self.Prepend(N.Int16Flags, x)
def PrependInt32(self, x):
"""Prepend an `int32` to the Builder buffer.
Note: aligns and checks for space.
"""
self.Prepend(N.Int32Flags, x)
def PrependInt64(self, x):
"""Prepend an `int64` to the Builder buffer.
Note: aligns and checks for space.
"""
self.Prepend(N.Int64Flags, x)
def PrependFloat32(self, x):
"""Prepend a `float32` to the Builder buffer.
Note: aligns and checks for space.
"""
self.Prepend(N.Float32Flags, x)
def PrependFloat64(self, x):
"""Prepend a `float64` to the Builder buffer.
Note: aligns and checks for space.
"""
self.Prepend(N.Float64Flags, x)
def ForceDefaults(self, forceDefaults):
"""
In order to save space, fields that are set to their default value
don't get serialized into the buffer. Forcing defaults provides a
way to manually disable this optimization. When set to `True`, will
always serialize default values.
"""
self.forceDefaults = forceDefaults
##############################################################
## @cond FLATBUFFERS_INTERNAL
def PrependVOffsetT(self, x): self.Prepend(N.VOffsetTFlags, x)
def Place(self, x, flags):
"""
Place prepends a value specified by `flags` to the Builder,
without checking for available space.
"""
N.enforce_number(x, flags)
self.head = self.head - flags.bytewidth
encode.Write(flags.packer_type, self.Bytes, self.Head(), x)
def PlaceVOffsetT(self, x):
"""PlaceVOffsetT prepends a VOffsetT to the Builder, without checking
for space.
"""
N.enforce_number(x, N.VOffsetTFlags)
self.head = self.head - N.VOffsetTFlags.bytewidth
encode.Write(packer.voffset, self.Bytes, self.Head(), x)
def PlaceSOffsetT(self, x):
"""PlaceSOffsetT prepends a SOffsetT to the Builder, without checking
for space.
"""
N.enforce_number(x, N.SOffsetTFlags)
self.head = self.head - N.SOffsetTFlags.bytewidth
encode.Write(packer.soffset, self.Bytes, self.Head(), x)
def PlaceUOffsetT(self, x):
"""PlaceUOffsetT prepends a UOffsetT to the Builder, without checking
for space.
"""
N.enforce_number(x, N.UOffsetTFlags)
self.head = self.head - N.UOffsetTFlags.bytewidth
encode.Write(packer.uoffset, self.Bytes, self.Head(), x)
## @endcond
## @cond FLATBUFFERS_INTERNAL
def vtableEqual(a, objectStart, b):
"""vtableEqual compares an unwritten vtable to a written vtable."""
N.enforce_number(objectStart, N.UOffsetTFlags)
if len(a) * N.VOffsetTFlags.bytewidth != len(b):
return False
for i, elem in enumerate(a):
x = encode.Get(packer.voffset, b, i * N.VOffsetTFlags.bytewidth)
# Skip vtable entries that indicate a default value.
if x == 0 and elem == 0:
pass
else:
y = objectStart - elem
if x != y:
return False
return True
## @endcond
## @}
| |
#!/usr/bin/env python2.5
'''
Pure Python reader for GeoIP Country Edition databases.
'''
__author__ = 'David Wilson <dw@botanicus.net>'
import os
import struct
from cStringIO import StringIO
try:
os.SEEK_SET
except AttributeError:
os.SEEK_SET, os.SEEK_CUR, os.SEEK_END = range(3)
#
# Constants.
#
# From GeoIP.h.
SEGMENT_RECORD_LENGTH = 3
STANDARD_RECORD_LENGTH = 3
ORG_RECORD_LENGTH = 4
MAX_RECORD_LENGTH = 4
FULL_RECORD_LENGTH = 50
NUM_DB_TYPES = 20
GEOIP_COUNTRY_EDITION = 1
GEOIP_REGION_EDITION_REV0 = 7
GEOIP_CITY_EDITION_REV0 = 6
GEOIP_ORG_EDITION = 5
GEOIP_ISP_EDITION = 4
GEOIP_CITY_EDITION_REV1 = 2
GEOIP_REGION_EDITION_REV1 = 3
GEOIP_PROXY_EDITION = 8
GEOIP_ASNUM_EDITION = 9
GEOIP_NETSPEED_EDITION = 10
GEOIP_DOMAIN_EDITION = 11
GEOIP_COUNTRY_EDITION_V6 = 12
COUNTRY_BEGIN = 16776960
STATE_BEGIN_REV0 = 16700000
STATE_BEGIN_REV1 = 16000000
STRUCTURE_INFO_MAX_SIZE = 20
DATABASE_INFO_MAX_SIZE = 100
GeoIP_country_code = '''
AP EU AD AE AF AG AI AL AM AN AO AQ AR AS AT AU AW AZ BA BB BD BE BF BG BH
BI BJ BM BN BO BR BS BT BV BW BY BZ CA CC CD CF CG CH CI CK CL CM CN CO CR
CU CV CX CY CZ DE DJ DK DM DO DZ EC EE EG EH ER ES ET FI FJ FK FM FO FR FX
GA GB GD GE GF GH GI GL GM GN GP GQ GR GS GT GU GW GY HK HM HN HR HT HU ID
IE IL IN IO IQ IR IS IT JM JO JP KE KG KH KI KM KN KP KR KW KY KZ LA LB LC
LI LK LR LS LT LU LV LY MA MC MD MG MH MK ML MM MN MO MP MQ MR MS MT MU MV
MW MX MY MZ NA NC NE NF NG NI NL NO NP NR NU NZ OM PA PE PF PG PH PK PL PM
PN PR PS PT PW PY QA RE RO RU RW SA SB SC SD SE SG SH SI SJ SK SL SM SN SO
SR ST SV SY SZ TC TD TF TG TH TJ TK TM TN TO TL TR TT TV TW TZ UA UG UM US
UY UZ VA VC VE VG VI VN VU WF WS YE YT RS ZA ZM ME ZW A1 A2 O1 AX GG IM JE
BL MF
'''.split()
GeoIP_country_continent = '''
AS EU EU AS AS SA SA EU AS SA AF AN SA OC EU OC SA AS EU SA AS EU AF EU AS
AF AF SA AS SA SA SA AS AF AF EU SA NA AS AF AF AF EU AF OC SA AF AS SA SA
SA AF AS AS EU EU AF EU SA SA AF SA EU AF AF AF EU AF EU OC SA OC EU EU EU
AF EU SA AS SA AF EU SA AF AF SA AF EU SA SA OC AF SA AS AF SA EU SA EU AS
EU AS AS AS AS AS EU EU SA AS AS AF AS AS OC AF SA AS AS AS SA AS AS AS SA
EU AS AF AF EU EU EU AF AF EU EU AF OC EU AF AS AS AS OC SA AF SA EU AF AS
AF NA AS AF AF OC AF OC AF SA EU EU AS OC OC OC AS SA SA OC OC AS AS EU SA
OC SA AS EU OC SA AS AF EU AS AF AS OC AF AF EU AS AF EU EU EU AF EU AF AF
SA AF SA AS AF SA AF AF AF AS AS OC AS AF OC AS AS SA OC AS AF EU AF OC NA
SA AS EU SA SA SA SA AS OC OC OC AS AF EU AF AF EU AF -- -- -- EU EU EU EU
SA SA
'''.split()
#
# Helper functions.
#
def addr_to_num(ip):
'''
Convert an IPv4 address from a string to its integer representation.
@param[in] ip IPv4 address as a string.
@returns Address as an integer.
'''
try:
w, x, y, z = map(int, ip.split('.'))
if w>255 or x>255 or y>255 or z>255:
raise ValueError()
except ValueError, TypeError:
raise ValueError('%r is not an IPv4 address.' % (ip,))
return (w << 24) | (x << 16) | (y << 8) | z
def num_to_addr(num):
'''
Convert an IPv4 address from its integer representation to a string.
@param[in] num Address as an integer.
@returns IPv4 address as a string.
'''
return '%d.%d.%d.%d' % ((num >> 24) & 0xff,
(num >> 16) & 0xff,
(num >> 8) & 0xff,
(num & 0xff))
def latin1_to_utf8(string):
return string.decode('latin-1').encode('utf-8')
def safe_lookup(lst, idx):
if idx is None:
return None
return lst[idx]
#
# Classes.
#
class ReadBuffer(object):
'''
Utility to read data more easily.
'''
buffer = None
def __init__(self, source, size, seek_offset=None, seek_whence=os.SEEK_SET):
fp = StringIO(source)
if seek_offset is not None:
fp.seek(seek_offset, seek_whence)
self.buffer = fp.read(size)
def read_string(self):
'''
Read a null-terminated string.
@returns Result as a string.
'''
result, self.buffer = self.buffer.split('\0', 1)
return result
def read_int(self, size):
'''
Read a multibyte integer.
@param[in] size Number of bytes to read as an integer.
@returns Result as an integer.
'''
result = sum(ord(self.buffer[i]) << (8*i) for i in range(size))
self.buffer = self.buffer[size:]
return result
class AddressInfo(object):
'''
Representation of a database lookup result.
'''
__slots__ = [ 'ip', 'ipnum', 'prefix', 'country', 'continent' ]
def __init__(self, ip=None, ipnum=None, prefix=None, country_id=None):
self.ip = ip
self.ipnum = ipnum
self.prefix = prefix
self.country = safe_lookup(GeoIP_country_code, country_id)
self.continent = safe_lookup(GeoIP_country_continent, country_id)
network = property(lambda self:
num_to_addr(self.ipnum & ~((32-self.prefix)**2-1)))
def __str__(self):
return '[%s of network %s/%d in country %s]' %\
(self.ip, self.network, self.prefix, self.country)
class BigAddressInfo(AddressInfo):
'''
Representation of a database lookup result with more info in it.
'''
# __slots__ is inherited and appended to.
__slots__ = [ 'city', 'region', 'postal_code', 'metro_code', 'area_code', 'longitude', 'latitude' ]
def __init__(self, ip=None, ipnum=None, prefix=None, country_id=None,
city=None, region=None, postal_code=None, metro_code=None, area_code=None,
longitude=None, latitude=None):
AddressInfo.__init__(self, ip, ipnum, prefix, country_id)
self.city = city or None
self.region = region or None
self.postal_code = postal_code or None
self.metro_code = metro_code
self.area_code = area_code
self.longitude = longitude
self.latitude = latitude
def __str__(self):
return '[%s of network %s/%d in city %s, %s]' %\
(self.ip, self.network, self.prefix, self.city, self.country)
class Database(object):
'''
GeoIP database reader implementation. Currently only supports country
edition.
'''
def __init__(self, filename):
'''
Initialize a new GeoIP reader instance.
@param[in] filename Path to GeoIP.dat as a string.
'''
self.filename = filename
self.cache = file(filename).read()
self._setup_segments()
if self.db_type not in (GEOIP_COUNTRY_EDITION,
GEOIP_CITY_EDITION_REV0,
GEOIP_CITY_EDITION_REV1):
raise NotImplementedError('Database edition is not supported yet; '
'Please use a Country or City database.')
def _setup_segments(self):
self.segments = None
# default to GeoIP Country Edition
self.db_type = GEOIP_COUNTRY_EDITION
self.record_length = STANDARD_RECORD_LENGTH
fp = StringIO(self.cache)
fp.seek(-3, os.SEEK_END)
for i in range(STRUCTURE_INFO_MAX_SIZE):
delim = fp.read(3)
if delim != '\xFF\xFF\xFF':
fp.seek(-4, os.SEEK_CUR)
continue
self.db_type = ord(fp.read(1))
# Region Edition, pre June 2003.
if self.db_type == GEOIP_REGION_EDITION_REV0:
self.segments = [STATE_BEGIN_REV0]
# Region Edition, post June 2003.
elif self.db_type == GEOIP_REGION_EDITION_REV1:
self.segments = [STATE_BEGIN_REV1]
# City/Org Editions have two segments, read offset of second segment
elif self.db_type in (GEOIP_CITY_EDITION_REV0,
GEOIP_CITY_EDITION_REV1,
GEOIP_ORG_EDITION, GEOIP_ISP_EDITION,
GEOIP_ASNUM_EDITION):
self.segments = [0]
for idx, ch in enumerate(fp.read(SEGMENT_RECORD_LENGTH)):
self.segments[0] += ord(ch) << (idx * 8)
if self.db_type in (GEOIP_ORG_EDITION, GEOIP_ISP_EDITION):
self.record_length = ORG_RECORD_LENGTH
break
if self.db_type in (GEOIP_COUNTRY_EDITION, GEOIP_PROXY_EDITION,
GEOIP_NETSPEED_EDITION, GEOIP_COUNTRY_EDITION_V6):
self.segments = [COUNTRY_BEGIN]
def info(self):
'''
Return a string describing the loaded database version.
@returns English text string, or None if database is ancient.
'''
fp = StringIO(self.cache)
fp.seek(-3, os.SEEK_END)
hasStructureInfo = False
# first get past the database structure information
for i in range(STRUCTURE_INFO_MAX_SIZE):
if fp.read(3) == '\xFF\xFF\xFF':
hasStructureInfo = True
break
fp.seek(-4, os.SEEK_CUR)
if hasStructureInfo:
fp.seek(-6, os.SEEK_CUR)
else:
# no structure info, must be pre Sep 2002 database, go back to end.
fp.seek(-3, os.SEEK_END)
for i in range(DATABASE_INFO_MAX_SIZE):
if fp.read(3) == '\0\0\0':
return fp.read(i)
fp.seek(-4, os.SEEK_CUR)
def _decode(self, buf, branch):
'''
@param[in] buf Record buffer.
@param[in] branch 1 for left, 2 for right.
@returns X.
'''
offset = 3 * branch
if self.record_length == 3:
return buf[offset] | (buf[offset+1] << 8) | (buf[offset+2] << 16)
# General case.
end = branch * self.record_length
x = 0
for j in range(self.record_length):
x = (x << 8) | buf[end - j]
return x
def _seek_record(self, ipnum):
fp = StringIO(self.cache)
offset = 0
for depth in range(31, -1, -1):
fp.seek(self.record_length * 2 * offset)
buf = map(ord, fp.read(self.record_length * 2))
x = self._decode(buf, int(bool(ipnum & (1 << depth))))
if x >= self.segments[0]:
return 32 - depth, x
offset = x
assert False, \
"Error Traversing Database for ipnum = %lu: "\
"Perhaps database is corrupt?" % ipnum
def _lookup_country(self, ip):
"Lookup a country db entry."
ipnum = addr_to_num(ip)
prefix, num = self._seek_record(ipnum)
num -= COUNTRY_BEGIN
if num:
country_id = num - 1
else:
country_id = None
return AddressInfo(country_id=country_id, ip=ip, ipnum=ipnum, prefix=prefix)
def _lookup_city(self, ip):
"Look up a city db entry."
ipnum = addr_to_num(ip)
prefix, num = self._seek_record(ipnum)
record, next_record_ptr = self._extract_record(num, None)
return BigAddressInfo(ip=ip, ipnum=ipnum, prefix=prefix, **record)
def _extract_record(self, seek_record, next_record_ptr):
if seek_record == self.segments[0]:
return {'country_id': None}, next_record_ptr
seek_offset = seek_record + (2 * self.record_length - 1) * self.segments[0]
record_buf = ReadBuffer(self.cache, FULL_RECORD_LENGTH, seek_offset)
record = {}
# get country
record['country_id'] = record_buf.read_int(1) - 1
# get region
record['region'] = record_buf.read_string()
# get city
record['city'] = latin1_to_utf8(record_buf.read_string())
# get postal code
record['postal_code'] = record_buf.read_string()
# get latitude
record['latitude'] = record_buf.read_int(3) / 10000.0 - 180
# get longitude
record['longitude'] = record_buf.read_int(3) / 10000.0 - 180
# get area code and metro code for post April 2002 databases and for US locations
if (self.db_type == GEOIP_CITY_EDITION_REV1) and (GeoIP_country_code[record['country_id']] == 'US'):
metro_area_combo = record_buf.read_int(3)
record['metro_code'] = metro_area_combo / 1000
record['area_code'] = metro_area_combo % 1000
# Used for GeoIP_next_record (which this code doesn't have.)
if next_record_ptr is not None:
next_record_ptr = seek_record - len(record_buf)
return record, next_record_ptr
def lookup(self, ip):
'''
Lookup an IP address returning an AddressInfo (or BigAddressInfo)
instance describing its location.
@param[in] ip IPv4 address as a string.
@returns AddressInfo (or BigAddressInfo) instance.
'''
if self.db_type in (GEOIP_COUNTRY_EDITION, GEOIP_PROXY_EDITION, GEOIP_NETSPEED_EDITION):
return self._lookup_country(ip)
elif self.db_type in (GEOIP_CITY_EDITION_REV0, GEOIP_CITY_EDITION_REV1):
return self._lookup_city(ip)
if __name__ == '__main__':
import time, sys
dbfile = 'GeoIP.dat'
if len(sys.argv) > 1:
dbfile = sys.argv[1]
t1 = time.time()
db = Database(dbfile)
t2 = time.time()
print db.info()
t3 = time.time()
tests = '''
127.0.0.1
83.198.135.28
83.126.35.59
192.168.1.1
194.168.1.255
196.25.210.14
64.22.109.113
'''.split()
for test in tests:
addr_info = db.lookup(test)
print addr_info
if isinstance(addr_info, BigAddressInfo):
print " ", dict((key, getattr(addr_info, key)) for key in dir(addr_info) if not key.startswith('_'))
t4 = time.time()
print "Open: %dms" % ((t2-t1) * 1000,)
print "Info: %dms" % ((t3-t2) * 1000,)
print "Lookup: %dms" % ((t4-t3) * 1000,)
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Client side of the compute RPC API.
"""
from oslo.config import cfg
from nova import exception
from nova.objects import base as objects_base
from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova import rpcclient
rpcapi_opts = [
cfg.StrOpt('compute_topic',
default='compute',
help='the topic compute nodes listen on'),
]
CONF = cfg.CONF
CONF.register_opts(rpcapi_opts)
rpcapi_cap_opt = cfg.StrOpt('compute',
help='Set a version cap for messages sent to compute services')
CONF.register_opt(rpcapi_cap_opt, 'upgrade_levels')
def _compute_host(host, instance):
'''Get the destination host for a message.
:param host: explicit host to send the message to.
:param instance: If an explicit host was not specified, use
instance['host']
:returns: A host
'''
if host:
return host
if not instance:
raise exception.NovaException(_('No compute host specified'))
if not instance['host']:
raise exception.NovaException(_('Unable to find host for '
'Instance %s') % instance['uuid'])
return instance['host']
class ComputeAPI(rpcclient.RpcProxy):
'''Client side of the compute rpc API.
API version history:
1.0 - Initial version.
1.1 - Adds get_host_uptime()
1.2 - Adds check_can_live_migrate_[destination|source]
1.3 - Adds change_instance_metadata()
1.4 - Remove instance_uuid, add instance argument to reboot_instance()
1.5 - Remove instance_uuid, add instance argument to pause_instance(),
unpause_instance()
1.6 - Remove instance_uuid, add instance argument to suspend_instance()
1.7 - Remove instance_uuid, add instance argument to
get_console_output()
1.8 - Remove instance_uuid, add instance argument to
add_fixed_ip_to_instance()
1.9 - Remove instance_uuid, add instance argument to attach_volume()
1.10 - Remove instance_id, add instance argument to
check_can_live_migrate_destination()
1.11 - Remove instance_id, add instance argument to
check_can_live_migrate_source()
1.12 - Remove instance_uuid, add instance argument to confirm_resize()
1.13 - Remove instance_uuid, add instance argument to detach_volume()
1.14 - Remove instance_uuid, add instance argument to finish_resize()
1.15 - Remove instance_uuid, add instance argument to
finish_revert_resize()
1.16 - Remove instance_uuid, add instance argument to get_diagnostics()
1.17 - Remove instance_uuid, add instance argument to get_vnc_console()
1.18 - Remove instance_uuid, add instance argument to inject_file()
1.19 - Remove instance_uuid, add instance argument to
inject_network_info()
1.20 - Remove instance_id, add instance argument to
post_live_migration_at_destination()
1.21 - Remove instance_uuid, add instance argument to
power_off_instance() and stop_instance()
1.22 - Remove instance_uuid, add instance argument to
power_on_instance() and start_instance()
1.23 - Remove instance_id, add instance argument to
pre_live_migration()
1.24 - Remove instance_uuid, add instance argument to
rebuild_instance()
1.25 - Remove instance_uuid, add instance argument to
remove_fixed_ip_from_instance()
1.26 - Remove instance_id, add instance argument to
remove_volume_connection()
1.27 - Remove instance_uuid, add instance argument to
rescue_instance()
1.28 - Remove instance_uuid, add instance argument to reset_network()
1.29 - Remove instance_uuid, add instance argument to resize_instance()
1.30 - Remove instance_uuid, add instance argument to resume_instance()
1.31 - Remove instance_uuid, add instance argument to revert_resize()
1.32 - Remove instance_id, add instance argument to
rollback_live_migration_at_destination()
1.33 - Remove instance_uuid, add instance argument to
set_admin_password()
1.34 - Remove instance_uuid, add instance argument to
snapshot_instance()
1.35 - Remove instance_uuid, add instance argument to
unrescue_instance()
1.36 - Remove instance_uuid, add instance argument to
change_instance_metadata()
1.37 - Remove instance_uuid, add instance argument to
terminate_instance()
1.38 - Changes to prep_resize():
- remove instance_uuid, add instance
- remove instance_type_id, add instance_type
- remove topic, it was unused
1.39 - Remove instance_uuid, add instance argument to run_instance()
1.40 - Remove instance_id, add instance argument to live_migration()
1.41 - Adds refresh_instance_security_rules()
1.42 - Add reservations arg to prep_resize(), resize_instance(),
finish_resize(), confirm_resize(), revert_resize() and
finish_revert_resize()
1.43 - Add migrate_data to live_migration()
1.44 - Adds reserve_block_device_name()
2.0 - Remove 1.x backwards compat
2.1 - Adds orig_sys_metadata to rebuild_instance()
2.2 - Adds slave_info parameter to add_aggregate_host() and
remove_aggregate_host()
2.3 - Adds volume_id to reserve_block_device_name()
2.4 - Add bdms to terminate_instance
2.5 - Add block device and network info to reboot_instance
2.6 - Remove migration_id, add migration to resize_instance
2.7 - Remove migration_id, add migration to confirm_resize
2.8 - Remove migration_id, add migration to finish_resize
2.9 - Add publish_service_capabilities()
2.10 - Adds filter_properties and request_spec to prep_resize()
2.11 - Adds soft_delete_instance() and restore_instance()
2.12 - Remove migration_id, add migration to revert_resize
2.13 - Remove migration_id, add migration to finish_revert_resize
2.14 - Remove aggregate_id, add aggregate to add_aggregate_host
2.15 - Remove aggregate_id, add aggregate to remove_aggregate_host
2.16 - Add instance_type to resize_instance
2.17 - Add get_backdoor_port()
2.18 - Add bdms to rebuild_instance
2.19 - Add node to run_instance
2.20 - Add node to prep_resize
2.21 - Add migrate_data dict param to pre_live_migration()
2.22 - Add recreate, on_shared_storage and host arguments to
rebuild_instance()
2.23 - Remove network_info from reboot_instance
2.24 - Added get_spice_console method
2.25 - Add attach_interface() and detach_interface()
2.26 - Add validate_console_port to ensure the service connects to
vnc on the correct port
2.27 - Adds 'reservations' to terminate_instance() and
soft_delete_instance()
... Grizzly supports message version 2.27. So, any changes to existing
methods in 2.x after that point should be done such that they can
handle the version_cap being set to 2.27.
2.28 - Adds check_instance_shared_storage()
2.29 - Made start_instance() and stop_instance() take new-world
instance objects
2.30 - Adds live_snapshot_instance()
2.31 - Adds shelve_instance(), shelve_offload_instance, and
unshelve_instance()
2.32 - Make reboot_instance take a new world instance object
2.33 - Made suspend_instance() and resume_instance() take new-world
instance objects
2.34 - Added swap_volume()
2.35 - Made terminate_instance() and soft_delete_instance() take
new-world instance objects
2.36 - Made pause_instance() and unpause_instance() take new-world
instance objects
2.37 - Added the legacy_bdm_in_spec parameter to run_instance
2.38 - Made check_can_live_migrate_[destination|source] take
new-world instance objects
2.39 - Made revert_resize() and confirm_resize() take new-world
instance objects
2.40 - Made reset_network() take new-world instance object
2.41 - Make inject_network_info take new-world instance object
2.42 - Splits snapshot_instance() into snapshot_instance() and
backup_instance() and makes them take new-world instance
objects.
2.43 - Made prep_resize() take new-world instance object
2.44 - Add volume_snapshot_create(), volume_snapshot_delete()
2.45 - Made resize_instance() take new-world objects
2.46 - Made finish_resize() take new-world objects
2.47 - Made finish_revert_resize() take new-world objects
... Havana supports message version 2.47. So, any changes to existing
methods in 2.x after that point should be done such that they can
handle the version_cap being set to 2.47.
2.48 - Make add_aggregate_host() and remove_aggregate_host() take
new-world objects
'''
#
# NOTE(russellb): This is the default minimum version that the server
# (manager) side must implement unless otherwise specified using a version
# argument to self.call()/cast()/etc. here. It should be left as X.0 where
# X is the current major API version (1.0, 2.0, ...). For more information
# about rpc API versioning, see the docs in
# openstack/common/rpc/dispatcher.py.
#
BASE_RPC_API_VERSION = '2.0'
VERSION_ALIASES = {
'grizzly': '2.27',
'havana': '2.47',
}
def __init__(self):
version_cap = self.VERSION_ALIASES.get(CONF.upgrade_levels.compute,
CONF.upgrade_levels.compute)
super(ComputeAPI, self).__init__(
topic=CONF.compute_topic,
default_version=self.BASE_RPC_API_VERSION,
serializer=objects_base.NovaObjectSerializer(),
version_cap=version_cap)
self.client = self.get_client()
def add_aggregate_host(self, ctxt, aggregate, host_param, host,
slave_info=None):
'''Add aggregate host.
:param ctxt: request context
:param aggregate_id:
:param host_param: This value is placed in the message to be the 'host'
parameter for the remote method.
:param host: This is the host to send the message to.
'''
if self.client.can_send_version('2.48'):
version = '2.48'
else:
version = '2.14'
aggregate = jsonutils.to_primitive(aggregate)
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'add_aggregate_host',
aggregate=aggregate, host=host_param,
slave_info=slave_info)
def add_fixed_ip_to_instance(self, ctxt, instance, network_id):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance))
cctxt.cast(ctxt, 'add_fixed_ip_to_instance',
instance=instance_p, network_id=network_id)
def attach_interface(self, ctxt, instance, network_id, port_id,
requested_ip):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version='2.25')
return cctxt.call(ctxt, 'attach_interface',
instance=instance_p, network_id=network_id,
port_id=port_id, requested_ip=requested_ip)
def attach_volume(self, ctxt, instance, volume_id, mountpoint):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance))
cctxt.cast(ctxt, 'attach_volume',
instance=instance_p, volume_id=volume_id,
mountpoint=mountpoint)
def change_instance_metadata(self, ctxt, instance, diff):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance))
cctxt.cast(ctxt, 'change_instance_metadata',
instance=instance_p, diff=diff)
def check_can_live_migrate_destination(self, ctxt, instance, destination,
block_migration, disk_over_commit):
if self.client.can_send_version('2.38'):
version = '2.38'
else:
version = '2.0'
instance = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
cctxt = self.client.prepare(server=destination, version=version)
return cctxt.call(ctxt, 'check_can_live_migrate_destination',
instance=instance,
block_migration=block_migration,
disk_over_commit=disk_over_commit)
def check_can_live_migrate_source(self, ctxt, instance, dest_check_data):
if self.client.can_send_version('2.38'):
version = '2.38'
else:
version = '2.0'
instance = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'check_can_live_migrate_source',
instance=instance,
dest_check_data=dest_check_data)
def check_instance_shared_storage(self, ctxt, instance, data):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version='2.28')
return cctxt.call(ctxt, 'check_instance_shared_storage',
instance=instance_p,
data=data)
def confirm_resize(self, ctxt, instance, migration, host,
reservations=None, cast=True):
if self.client.can_send_version('2.39'):
version = '2.39'
else:
instance = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
migration = jsonutils.to_primitive(
objects_base.obj_to_primitive(migration))
version = '2.7'
cctxt = self.client.prepare(server=_compute_host(host, instance),
version=version)
rpc_method = cctxt.cast if cast else cctxt.call
return rpc_method(ctxt, 'confirm_resize',
instance=instance, migration=migration,
reservations=reservations)
def detach_interface(self, ctxt, instance, port_id):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version='2.25')
cctxt.cast(ctxt, 'detach_interface',
instance=instance_p, port_id=port_id)
def detach_volume(self, ctxt, instance, volume_id):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance))
cctxt.cast(ctxt, 'detach_volume',
instance=instance_p, volume_id=volume_id)
def finish_resize(self, ctxt, instance, migration, image, disk_info,
host, reservations=None):
if self.client.can_send_version('2.46'):
version = '2.46'
else:
instance = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
migration = jsonutils.to_primitive(
objects_base.obj_to_primitive(migration))
version = '2.8'
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'finish_resize',
instance=instance, migration=migration,
image=image, disk_info=disk_info, reservations=reservations)
def finish_revert_resize(self, ctxt, instance, migration, host,
reservations=None):
if self.client.can_send_version('2.47'):
version = '2.47'
else:
instance = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
migration = jsonutils.to_primitive(
objects_base.obj_to_primitive(migration))
version = '2.13'
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'finish_revert_resize',
instance=instance, migration=migration,
reservations=reservations)
def get_console_output(self, ctxt, instance, tail_length):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance))
return cctxt.call(ctxt, 'get_console_output',
instance=instance_p, tail_length=tail_length)
def get_console_pool_info(self, ctxt, console_type, host):
cctxt = self.client.prepare(server=host)
return cctxt.call(ctxt, 'get_console_pool_info',
console_type=console_type)
def get_console_topic(self, ctxt, host):
cctxt = self.client.prepare(server=host)
return cctxt.call(ctxt, 'get_console_topic')
def get_diagnostics(self, ctxt, instance):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance))
return cctxt.call(ctxt, 'get_diagnostics',
instance=instance_p)
def get_vnc_console(self, ctxt, instance, console_type):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance))
return cctxt.call(ctxt, 'get_vnc_console',
instance=instance_p, console_type=console_type)
def get_spice_console(self, ctxt, instance, console_type):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version='2.24')
return cctxt.call(ctxt, 'get_spice_console',
instance=instance_p, console_type=console_type)
def validate_console_port(self, ctxt, instance, port, console_type):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version='2.26')
return cctxt.call(ctxt, 'validate_console_port',
instance=instance_p, port=port,
console_type=console_type)
def host_maintenance_mode(self, ctxt, host_param, mode, host):
'''Set host maintenance mode
:param ctxt: request context
:param host_param: This value is placed in the message to be the 'host'
parameter for the remote method.
:param mode:
:param host: This is the host to send the message to.
'''
cctxt = self.client.prepare(server=host)
return cctxt.call(ctxt, 'host_maintenance_mode',
host=host_param, mode=mode)
def host_power_action(self, ctxt, action, host):
cctxt = self.client.prepare(server=host)
return cctxt.call(ctxt, 'host_power_action', action=action)
def inject_file(self, ctxt, instance, path, file_contents):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance))
cctxt.cast(ctxt, 'inject_file',
instance=instance_p, path=path,
file_contents=file_contents)
def inject_network_info(self, ctxt, instance):
if self.client.can_send_version('2.41'):
version = '2.41'
else:
instance = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
version = '2.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'inject_network_info', instance=instance)
def live_migration(self, ctxt, instance, dest, block_migration, host,
migrate_data=None):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=host)
cctxt.cast(ctxt, 'live_migration', instance=instance_p,
dest=dest, block_migration=block_migration,
migrate_data=migrate_data)
def pause_instance(self, ctxt, instance):
if self.client.can_send_version('2.36'):
version = '2.36'
else:
version = '2.0'
instance = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'pause_instance', instance=instance)
def post_live_migration_at_destination(self, ctxt, instance,
block_migration, host):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=host)
return cctxt.call(ctxt,
'post_live_migration_at_destination',
instance=instance_p, block_migration=block_migration)
def power_off_instance(self, ctxt, instance):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance))
cctxt.cast(ctxt, 'power_off_instance', instance=instance_p)
def power_on_instance(self, ctxt, instance):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance))
cctxt.cast(ctxt, 'power_on_instance', instance=instance_p)
def pre_live_migration(self, ctxt, instance, block_migration, disk,
host, migrate_data=None):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=host, version='2.21')
return cctxt.call(ctxt, 'pre_live_migration',
instance=instance_p,
block_migration=block_migration,
disk=disk, migrate_data=migrate_data)
def prep_resize(self, ctxt, image, instance, instance_type, host,
reservations=None, request_spec=None,
filter_properties=None, node=None):
if self.client.can_send_version('2.43'):
version = '2.43'
else:
instance = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
version = '2.20'
instance_type_p = jsonutils.to_primitive(instance_type)
image_p = jsonutils.to_primitive(image)
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'prep_resize',
instance=instance,
instance_type=instance_type_p,
image=image_p, reservations=reservations,
request_spec=request_spec,
filter_properties=filter_properties,
node=node)
def reboot_instance(self, ctxt, instance, block_device_info,
reboot_type):
if not self.client.can_send_version('2.32'):
version = '2.23'
instance = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
else:
version = '2.32'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'reboot_instance',
instance=instance,
block_device_info=block_device_info,
reboot_type=reboot_type)
def rebuild_instance(self, ctxt, instance, new_pass, injected_files,
image_ref, orig_image_ref, orig_sys_metadata, bdms,
recreate=False, on_shared_storage=False, host=None):
instance_p = jsonutils.to_primitive(instance)
bdms_p = jsonutils.to_primitive(bdms)
cctxt = self.client.prepare(server=_compute_host(host, instance),
version='2.22')
cctxt.cast(ctxt, 'rebuild_instance',
instance=instance_p, new_pass=new_pass,
injected_files=injected_files, image_ref=image_ref,
orig_image_ref=orig_image_ref,
orig_sys_metadata=orig_sys_metadata, bdms=bdms_p,
recreate=recreate, on_shared_storage=on_shared_storage)
def refresh_provider_fw_rules(self, ctxt, host):
cctxt = self.client.prepare(server=host)
cctxt.cast(ctxt, 'refresh_provider_fw_rules')
def remove_aggregate_host(self, ctxt, aggregate, host_param, host,
slave_info=None):
'''Remove aggregate host.
:param ctxt: request context
:param aggregate_id:
:param host_param: This value is placed in the message to be the 'host'
parameter for the remote method.
:param host: This is the host to send the message to.
'''
if self.client.can_send_version('2.48'):
version = '2.48'
else:
version = '2.15'
aggregate = jsonutils.to_primitive(aggregate)
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'remove_aggregate_host',
aggregate=aggregate, host=host_param,
slave_info=slave_info)
def remove_fixed_ip_from_instance(self, ctxt, instance, address):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance))
cctxt.cast(ctxt, 'remove_fixed_ip_from_instance',
instance=instance_p, address=address)
def remove_volume_connection(self, ctxt, instance, volume_id, host):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=host)
return cctxt.call(ctxt, 'remove_volume_connection',
instance=instance_p, volume_id=volume_id)
def rescue_instance(self, ctxt, instance, rescue_password):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance))
cctxt.cast(ctxt, 'rescue_instance',
instance=instance_p,
rescue_password=rescue_password)
def reset_network(self, ctxt, instance):
if self.client.can_send_version('2.40'):
version = '2.40'
else:
instance = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
version = '2.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'reset_network', instance=instance)
def resize_instance(self, ctxt, instance, migration, image, instance_type,
reservations=None):
if self.client.can_send_version('2.45'):
version = '2.45'
else:
instance = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
migration = jsonutils.to_primitive(
objects_base.obj_to_primitive(migration))
version = '2.16'
instance_type_p = jsonutils.to_primitive(instance_type)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'resize_instance',
instance=instance, migration=migration,
image=image, reservations=reservations,
instance_type=instance_type_p)
def resume_instance(self, ctxt, instance):
if self.client.can_send_version('2.33'):
version = '2.33'
else:
version = '2.0'
instance = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'resume_instance', instance=instance)
def revert_resize(self, ctxt, instance, migration, host,
reservations=None):
if self.client.can_send_version('2.39'):
version = '2.39'
else:
instance = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
migration = jsonutils.to_primitive(
objects_base.obj_to_primitive(migration))
version = '2.12'
cctxt = self.client.prepare(server=_compute_host(host, instance),
version=version)
cctxt.cast(ctxt, 'revert_resize',
instance=instance, migration=migration,
reservations=reservations)
def rollback_live_migration_at_destination(self, ctxt, instance, host):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=host)
cctxt.cast(ctxt, 'rollback_live_migration_at_destination',
instance=instance_p)
def run_instance(self, ctxt, instance, host, request_spec,
filter_properties, requested_networks,
injected_files, admin_password,
is_first_time, node=None, legacy_bdm_in_spec=True):
instance_p = jsonutils.to_primitive(instance)
msg_kwargs = {'instance': instance_p, 'request_spec': request_spec,
'filter_properties': filter_properties,
'requested_networks': requested_networks,
'injected_files': injected_files,
'admin_password': admin_password,
'is_first_time': is_first_time, 'node': node}
if self.client.can_send_version('2.37'):
version = '2.37'
msg_kwargs['legacy_bdm_in_spec'] = legacy_bdm_in_spec
else:
version = '2.19'
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'run_instance', **msg_kwargs)
def set_admin_password(self, ctxt, instance, new_pass):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance))
return cctxt.call(ctxt, 'set_admin_password',
instance=instance_p, new_pass=new_pass)
def set_host_enabled(self, ctxt, enabled, host):
cctxt = self.client.prepare(server=host)
return cctxt.call(ctxt, 'set_host_enabled', enabled=enabled)
def swap_volume(self, ctxt, instance, old_volume_id, new_volume_id):
cctxt = self.client.prepare(server=_compute_host(None, instance),
version='2.34')
cctxt.cast(ctxt, 'swap_volume',
instance=instance, old_volume_id=old_volume_id,
new_volume_id=new_volume_id)
def get_host_uptime(self, ctxt, host):
cctxt = self.client.prepare(server=host)
return cctxt.call(ctxt, 'get_host_uptime')
def reserve_block_device_name(self, ctxt, instance, device, volume_id):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version='2.3')
return cctxt.call(ctxt, 'reserve_block_device_name',
instance=instance_p, device=device,
volume_id=volume_id)
def live_snapshot_instance(self, ctxt, instance, image_id):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version='2.30')
cctxt.cast(ctxt, 'live_snapshot_instance',
instance=instance_p, image_id=image_id)
def backup_instance(self, ctxt, instance, image_id, backup_type,
rotation):
if self.client.can_send_version('2.42'):
version = '2.42'
method = 'backup_instance'
extra_kwargs = dict()
else:
instance = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
method = 'snapshot_instance'
extra_kwargs = dict(image_type='backup')
version = '2.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, method,
instance=instance,
image_id=image_id,
backup_type=backup_type,
rotation=rotation,
**extra_kwargs)
def snapshot_instance(self, ctxt, instance, image_id):
if self.client.can_send_version('2.42'):
version = '2.42'
extra_kwargs = dict()
else:
instance = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
extra_kwargs = dict(image_type='snapshot',
backup_type=None,
rotation=None)
version = '2.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'snapshot_instance',
instance=instance,
image_id=image_id,
**extra_kwargs)
def start_instance(self, ctxt, instance):
if self.client.can_send_version('2.29'):
version = '2.29'
else:
version = '2.0'
instance = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'start_instance', instance=instance)
def stop_instance(self, ctxt, instance, do_cast=True):
if self.client.can_send_version('2.29'):
version = '2.29'
else:
version = '2.0'
instance = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
rpc_method = cctxt.cast if do_cast else cctxt.call
return rpc_method(ctxt, 'stop_instance', instance=instance)
def suspend_instance(self, ctxt, instance):
if self.client.can_send_version('2.33'):
version = '2.33'
else:
version = '2.0'
instance = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'suspend_instance', instance=instance)
def terminate_instance(self, ctxt, instance, bdms, reservations=None):
if self.client.can_send_version('2.35'):
version = '2.35'
else:
version = '2.27'
instance = jsonutils.to_primitive(instance)
bdms_p = jsonutils.to_primitive(bdms)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'terminate_instance',
instance=instance, bdms=bdms_p,
reservations=reservations)
def unpause_instance(self, ctxt, instance):
if self.client.can_send_version('2.36'):
version = '2.36'
else:
version = '2.0'
instance = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'unpause_instance', instance=instance)
def unrescue_instance(self, ctxt, instance):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance))
cctxt.cast(ctxt, 'unrescue_instance', instance=instance_p)
def soft_delete_instance(self, ctxt, instance, reservations=None):
if self.client.can_send_version('2.35'):
version = '2.35'
else:
version = '2.27'
instance = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'soft_delete_instance',
instance=instance, reservations=reservations)
def restore_instance(self, ctxt, instance):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance))
cctxt.cast(ctxt, 'restore_instance', instance=instance_p)
def shelve_instance(self, ctxt, instance, image_id=None):
cctxt = self.client.prepare(server=_compute_host(None, instance),
version='2.31')
cctxt.cast(ctxt, 'shelve_instance',
instance=instance, image_id=image_id)
def shelve_offload_instance(self, ctxt, instance):
cctxt = self.client.prepare(server=_compute_host(None, instance),
version='2.31')
cctxt.cast(ctxt, 'shelve_offload_instance', instance=instance)
def unshelve_instance(self, ctxt, instance, host, image=None):
cctxt = self.client.prepare(server=host, version='2.31')
cctxt.cast(ctxt, 'unshelve_instance',
instance=instance, image=image)
def volume_snapshot_create(self, ctxt, instance, volume_id,
create_info):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version='2.44')
cctxt.cast(ctxt, 'volume_snapshot_create', instance=instance_p,
volume_id=volume_id, create_info=create_info)
def volume_snapshot_delete(self, ctxt, instance, volume_id, snapshot_id,
delete_info):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version='2.44')
cctxt.cast(ctxt, 'volume_snapshot_delete', instance=instance_p,
volume_id=volume_id, snapshot_id=snapshot_id,
delete_info=delete_info)
class SecurityGroupAPI(rpcclient.RpcProxy):
'''Client side of the security group rpc API.
API version history:
1.0 - Initial version.
1.41 - Adds refresh_instance_security_rules()
2.0 - Remove 1.x backwards compat
'''
#
# NOTE(russellb): This is the default minimum version that the server
# (manager) side must implement unless otherwise specified using a version
# argument to self.call()/cast()/etc. here. It should be left as X.0 where
# X is the current major API version (1.0, 2.0, ...). For more information
# about rpc API versioning, see the docs in
# openstack/common/rpc/dispatcher.py.
#
BASE_RPC_API_VERSION = '2.0'
def __init__(self):
super(SecurityGroupAPI, self).__init__(
topic=CONF.compute_topic,
default_version=self.BASE_RPC_API_VERSION)
self.client = self.get_client()
def refresh_security_group_rules(self, ctxt, security_group_id, host):
cctxt = self.client.prepare(server=host)
cctxt.cast(ctxt, 'refresh_security_group_rules',
security_group_id=security_group_id)
def refresh_security_group_members(self, ctxt, security_group_id,
host):
cctxt = self.client.prepare(server=host)
cctxt.cast(ctxt, 'refresh_security_group_members',
security_group_id=security_group_id)
def refresh_instance_security_rules(self, ctxt, host, instance):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance))
cctxt.cast(ctxt, 'refresh_instance_security_rules',
instance=instance_p)
| |
# -*- coding: utf-8 -*-
# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
"""Python modules manipulation utility functions.
:type PY_SOURCE_EXTS: tuple(str)
:var PY_SOURCE_EXTS: list of possible python source file extension
:type STD_LIB_DIR: str
:var STD_LIB_DIR: directory where standard modules are located
:type BUILTIN_MODULES: dict
:var BUILTIN_MODULES: dictionary with builtin module names has key
"""
__docformat__ = "restructuredtext en"
import sys
import os
from os.path import walk, splitext, join, abspath, isdir, dirname, exists
from imp import find_module, load_module, C_BUILTIN, PY_COMPILED, PKG_DIRECTORY
try:
import zipimport
except ImportError:
zipimport = None
ZIPFILE = object()
from logilab.common import STD_BLACKLIST
if sys.platform.startswith('win'):
PY_SOURCE_EXTS = ('py', 'pyw')
PY_COMPILED_EXTS = ('dll', 'pyd')
STD_LIB_DIR = join(sys.prefix, 'lib')
else:
PY_SOURCE_EXTS = ('py',)
PY_COMPILED_EXTS = ('so',)
STD_LIB_DIR = join(sys.prefix, 'lib', 'python%s' % sys.version[:3])
BUILTIN_MODULES = dict(zip(sys.builtin_module_names,
[1]*len(sys.builtin_module_names)))
class NoSourceFile(Exception):
"""exception raised when we are not able to get a python
source file for a precompiled file
"""
class LazyObject(object):
def __init__(self, module, obj):
self.module = module
self.obj = obj
self._imported = None
def __getobj(self):
if self._imported is None:
self._imported = getattr(load_module_from_name(self.module),
self.obj)
return self._imported
def __getattribute__(self, attr):
try:
return super(LazyObject, self).__getattribute__(attr)
except AttributeError, ex:
return getattr(self.__getobj(), attr)
def __call__(self, *args, **kwargs):
return self.__getobj()(*args, **kwargs)
def load_module_from_name(dotted_name, path=None, use_sys=1):
"""Load a Python module from it's name.
:type dotted_name: str
:param dotted_name: python name of a module or package
:type path: list or None
:param path:
optional list of path where the module or package should be
searched (use sys.path if nothing or None is given)
:type use_sys: bool
:param use_sys:
boolean indicating whether the sys.modules dictionary should be
used or not
:raise ImportError: if the module or package is not found
:rtype: module
:return: the loaded module
"""
return load_module_from_modpath(dotted_name.split('.'), path, use_sys)
def load_module_from_modpath(parts, path=None, use_sys=1):
"""Load a python module from it's splitted name.
:type parts: list(str) or tuple(str)
:param parts:
python name of a module or package splitted on '.'
:type path: list or None
:param path:
optional list of path where the module or package should be
searched (use sys.path if nothing or None is given)
:type use_sys: bool
:param use_sys:
boolean indicating whether the sys.modules dictionary should be used or not
:raise ImportError: if the module or package is not found
:rtype: module
:return: the loaded module
"""
if use_sys:
try:
return sys.modules['.'.join(parts)]
except KeyError:
pass
modpath = []
prevmodule = None
for part in parts:
modpath.append(part)
curname = ".".join(modpath)
module = None
if len(modpath) != len(parts):
# even with use_sys=False, should try to get outer packages from sys.modules
module = sys.modules.get(curname)
if module is None:
mp_file, mp_filename, mp_desc = find_module(part, path)
module = load_module(curname, mp_file, mp_filename, mp_desc)
if prevmodule:
setattr(prevmodule, part, module)
_file = getattr(module, "__file__", "")
if not _file and len(modpath) != len(parts):
raise ImportError("no module in %s" % ".".join(parts[len(modpath):]) )
path = [dirname( _file )]
prevmodule = module
return module
def load_module_from_file(filepath, path=None, use_sys=1):
"""Load a Python module from it's path.
:type filepath: str
:param filepath: path to the python module or package
:type path: list or None
:param path:
optional list of path where the module or package should be
searched (use sys.path if nothing or None is given)
:type use_sys: bool
:param use_sys:
boolean indicating whether the sys.modules dictionary should be
used or not
:raise ImportError: if the module or package is not found
:rtype: module
:return: the loaded module
"""
return load_module_from_modpath(modpath_from_file(filepath), path, use_sys)
def _check_init(path, mod_path):
"""check there are some __init__.py all along the way"""
for part in mod_path:
path = join(path, part)
if not _has_init(path):
return False
return True
def modpath_from_file(filename, extrapath=None):
"""given a file path return the corresponding splitted module's name
(i.e name of a module or package splitted on '.')
:type filename: str
:param filename: file's path for which we want the module's name
:type extrapath: dict
:param extrapath:
optional extra search path, with path as key and package name for the path
as value. This is usually useful to handle package splitted in multiple
directories using __path__ trick.
:raise ImportError:
if the corresponding module's name has not been found
:rtype: list(str)
:return: the corresponding splitted module's name
"""
base = splitext(abspath(filename))[0]
if extrapath is not None:
for path_ in extrapath:
path = abspath(path_)
if path and base[:len(path)] == path:
submodpath = [pkg for pkg in base[len(path):].split(os.sep)
if pkg]
if _check_init(path, submodpath[:-1]):
return extrapath[path_].split('.') + submodpath
for path in sys.path:
path = abspath(path)
if path and base[:len(path)] == path:
if filename.find('site-packages') != -1 and \
path.find('site-packages') == -1:
continue
modpath = [pkg for pkg in base[len(path):].split(os.sep) if pkg]
if _check_init(path, modpath[:-1]):
return modpath
raise ImportError('Unable to find module for %s in %s' % (
filename, ', \n'.join(sys.path)))
def file_from_modpath(modpath, path=None, context_file=None):
"""given a mod path (i.e. splitted module / package name), return the
corresponding file, giving priority to source file over precompiled
file if it exists
:type modpath: list or tuple
:param modpath:
splitted module's name (i.e name of a module or package splitted
on '.')
(this means explicit relative imports that start with dots have
empty strings in this list!)
:type path: list or None
:param path:
optional list of path where the module or package should be
searched (use sys.path if nothing or None is given)
:type context_file: str or None
:param context_file:
context file to consider, necessary if the identifier has been
introduced using a relative import unresolvable in the actual
context (i.e. modutils)
:raise ImportError: if there is no such module in the directory
:rtype: str or None
:return:
the path to the module's file or None if it's an integrated
builtin module such as 'sys'
"""
if context_file is not None:
context = dirname(context_file)
else:
context = context_file
if modpath[0] == 'xml':
# handle _xmlplus
try:
return _file_from_modpath(['_xmlplus'] + modpath[1:], path, context)
except ImportError:
return _file_from_modpath(modpath, path, context)
elif modpath == ['os', 'path']:
# FIXME: currently ignoring search_path...
return os.path.__file__
return _file_from_modpath(modpath, path, context)
def get_module_part(dotted_name, context_file=None):
"""given a dotted name return the module part of the name :
>>> get_module_part('logilab.common.modutils.get_module_part')
'logilab.common.modutils'
:type dotted_name: str
:param dotted_name: full name of the identifier we are interested in
:type context_file: str or None
:param context_file:
context file to consider, necessary if the identifier has been
introduced using a relative import unresolvable in the actual
context (i.e. modutils)
:raise ImportError: if there is no such module in the directory
:rtype: str or None
:return:
the module part of the name or None if we have not been able at
all to import the given name
XXX: deprecated, since it doesn't handle package precedence over module
(see #10066)
"""
# os.path trick
if dotted_name.startswith('os.path'):
return 'os.path'
parts = dotted_name.split('.')
if context_file is not None:
# first check for builtin module which won't be considered latter
# in that case (path != None)
if parts[0] in BUILTIN_MODULES:
if len(parts) > 2:
raise ImportError(dotted_name)
return parts[0]
# don't use += or insert, we want a new list to be created !
path = None
starti = 0
if parts[0] == '':
assert context_file is not None, \
'explicit relative import, but no context_file?'
path = [] # prevent resolving the import non-relatively
starti = 1
while parts[starti] == '': # for all further dots: change context
starti += 1
context_file = dirname(context_file)
for i in range(starti, len(parts)):
try:
file_from_modpath(parts[starti:i+1],
path=path, context_file=context_file)
except ImportError:
if not i >= max(1, len(parts) - 2):
raise
return '.'.join(parts[:i])
return dotted_name
def get_modules(package, src_directory, blacklist=STD_BLACKLIST):
"""given a package directory return a list of all available python
modules in the package and its subpackages
:type package: str
:param package: the python name for the package
:type src_directory: str
:param src_directory:
path of the directory corresponding to the package
:type blacklist: list or tuple
:param blacklist:
optional list of files or directory to ignore, default to
the value of `logilab.common.STD_BLACKLIST`
:rtype: list
:return:
the list of all available python modules in the package and its
subpackages
"""
def func(modules, directory, fnames):
"""walk handler"""
# remove files/directories in the black list
for norecurs in blacklist:
try:
fnames.remove(norecurs)
except ValueError:
continue
# check for __init__.py
if not '__init__.py' in fnames:
while fnames:
fnames.pop()
elif directory != src_directory:
#src = join(directory, file)
dir_package = directory[len(src_directory):].replace(os.sep, '.')
modules.append(package + dir_package)
for filename in fnames:
src = join(directory, filename)
if isdir(src):
continue
if _is_python_file(filename) and filename != '__init__.py':
module = package + src[len(src_directory):-3]
modules.append(module.replace(os.sep, '.'))
modules = []
walk(src_directory, func, modules)
return modules
def get_module_files(src_directory, blacklist=STD_BLACKLIST):
"""given a package directory return a list of all available python
module's files in the package and its subpackages
:type src_directory: str
:param src_directory:
path of the directory corresponding to the package
:type blacklist: list or tuple
:param blacklist:
optional list of files or directory to ignore, default to the value of
`logilab.common.STD_BLACKLIST`
:rtype: list
:return:
the list of all available python module's files in the package and
its subpackages
"""
def func(files, directory, fnames):
"""walk handler"""
# remove files/directories in the black list
for norecurs in blacklist:
try:
fnames.remove(norecurs)
except ValueError:
continue
# check for __init__.py
if not '__init__.py' in fnames:
while fnames:
fnames.pop()
for filename in fnames:
src = join(directory, filename)
if isdir(src):
continue
if _is_python_file(filename):
files.append(src)
files = []
walk(src_directory, func, files)
return files
def get_source_file(filename, include_no_ext=False):
"""given a python module's file name return the matching source file
name (the filename will be returned identically if it's a already an
absolute path to a python source file...)
:type filename: str
:param filename: python module's file name
:raise NoSourceFile: if no source file exists on the file system
:rtype: str
:return: the absolute path of the source file if it exists
"""
base, orig_ext = splitext(abspath(filename))
for ext in PY_SOURCE_EXTS:
source_path = '%s.%s' % (base, ext)
if exists(source_path):
return source_path
if include_no_ext and not orig_ext and exists(base):
return base
raise NoSourceFile(filename)
def cleanup_sys_modules(directories):
"""remove submodules of `directories` from `sys.modules`"""
for modname, module in sys.modules.items():
modfile = getattr(module, '__file__', None)
if modfile:
for directory in directories:
if modfile.startswith(directory):
del sys.modules[modname]
break
def is_python_source(filename):
"""
rtype: bool
return: True if the filename is a python source file
"""
return splitext(filename)[1][1:] in PY_SOURCE_EXTS
def is_standard_module(modname, std_path=(STD_LIB_DIR,)):
"""try to guess if a module is a standard python module (by default,
see `std_path` parameter's description)
:type modname: str
:param modname: name of the module we are interested in
:type std_path: list(str) or tuple(str)
:param std_path: list of path considered has standard
:rtype: bool
:return:
true if the module:
- is located on the path listed in one of the directory in `std_path`
- is a built-in module
"""
modname = modname.split('.')[0]
try:
filename = file_from_modpath([modname])
except ImportError, ex:
# import failed, i'm probably not so wrong by supposing it's
# not standard...
return 0
# modules which are not living in a file are considered standard
# (sys and __builtin__ for instance)
if filename is None:
return 1
filename = abspath(filename)
for path in std_path:
path = abspath(path)
if filename.startswith(path):
pfx_len = len(path)
if filename[pfx_len+1:pfx_len+14] != 'site-packages':
return 1
return 0
return False
def is_relative(modname, from_file):
"""return true if the given module name is relative to the given
file name
:type modname: str
:param modname: name of the module we are interested in
:type from_file: str
:param from_file:
path of the module from which modname has been imported
:rtype: bool
:return:
true if the module has been imported relatively to `from_file`
"""
if not isdir(from_file):
from_file = dirname(from_file)
if from_file in sys.path:
return False
try:
find_module(modname.split('.')[0], [from_file])
return True
except ImportError:
return False
# internal only functions #####################################################
def _file_from_modpath(modpath, path=None, context=None):
"""given a mod path (i.e. splitted module / package name), return the
corresponding file
this function is used internally, see `file_from_modpath`'s
documentation for more information
"""
assert len(modpath) > 0
if context is not None:
try:
mtype, mp_filename = _module_file(modpath, [context])
except ImportError:
mtype, mp_filename = _module_file(modpath, path)
else:
mtype, mp_filename = _module_file(modpath, path)
if mtype == PY_COMPILED:
try:
return get_source_file(mp_filename)
except NoSourceFile:
return mp_filename
elif mtype == C_BUILTIN:
# integrated builtin module
return None
elif mtype == PKG_DIRECTORY:
mp_filename = _has_init(mp_filename)
return mp_filename
def _search_zip(modpath, pic):
for filepath, importer in pic.items():
if importer is not None:
if importer.find_module(modpath[0]):
if not importer.find_module('/'.join(modpath)):
raise ImportError('No module named %s in %s/%s' % (
'.'.join(modpath[1:]), file, modpath))
return ZIPFILE, abspath(filepath) + '/' + '/'.join(modpath), filepath
raise ImportError('No module named %s' % '.'.join(modpath))
def _module_file(modpath, path=None):
"""get a module type / file path
:type modpath: list or tuple
:param modpath:
splitted module's name (i.e name of a module or package splitted
on '.'), with leading empty strings for explicit relative import
:type path: list or None
:param path:
optional list of path where the module or package should be
searched (use sys.path if nothing or None is given)
:rtype: tuple(int, str)
:return: the module type flag and the file path for a module
"""
# egg support compat
try:
pic = sys.path_importer_cache
_path = (path is None and sys.path or path)
for __path in _path:
if not __path in pic:
try:
pic[__path] = zipimport.zipimporter(__path)
except zipimport.ZipImportError:
pic[__path] = None
checkeggs = True
except AttributeError:
checkeggs = False
imported = []
while modpath:
try:
_, mp_filename, mp_desc = find_module(modpath[0], path)
except ImportError:
if checkeggs:
return _search_zip(modpath, pic)[:2]
raise
else:
if checkeggs:
fullabspath = [abspath(x) for x in _path]
try:
pathindex = fullabspath.index(dirname(abspath(mp_filename)))
emtype, emp_filename, zippath = _search_zip(modpath, pic)
if pathindex > _path.index(zippath):
# an egg takes priority
return emtype, emp_filename
except ValueError:
# XXX not in _path
pass
except ImportError:
pass
checkeggs = False
imported.append(modpath.pop(0))
mtype = mp_desc[2]
if modpath:
if mtype != PKG_DIRECTORY:
raise ImportError('No module %s in %s' % ('.'.join(modpath),
'.'.join(imported)))
path = [mp_filename]
return mtype, mp_filename
def _is_python_file(filename):
"""return true if the given filename should be considered as a python file
.pyc and .pyo are ignored
"""
for ext in ('.py', '.so', '.pyd', '.pyw'):
if filename.endswith(ext):
return True
return False
def _has_init(directory):
"""if the given directory has a valid __init__ file, return its path,
else return None
"""
mod_or_pack = join(directory, '__init__')
for ext in PY_SOURCE_EXTS + ('pyc', 'pyo'):
if exists(mod_or_pack + '.' + ext):
return mod_or_pack + '.' + ext
return None
| |
# Authors: Manoj Kumar mks542@nyu.edu
# License: BSD 3 clause
import numpy as np
from scipy import optimize
from ..base import BaseEstimator, RegressorMixin
from ._base import LinearModel
from ..utils import axis0_safe_slice
from ..utils.validation import _check_sample_weight
from ..utils.extmath import safe_sparse_dot
from ..utils.optimize import _check_optimize_result
def _huber_loss_and_gradient(w, X, y, epsilon, alpha, sample_weight=None):
"""Returns the Huber loss and the gradient.
Parameters
----------
w : ndarray, shape (n_features + 1,) or (n_features + 2,)
Feature vector.
w[:n_features] gives the coefficients
w[-1] gives the scale factor and if the intercept is fit w[-2]
gives the intercept factor.
X : ndarray of shape (n_samples, n_features)
Input data.
y : ndarray of shape (n_samples,)
Target vector.
epsilon : float
Robustness of the Huber estimator.
alpha : float
Regularization parameter.
sample_weight : ndarray of shape (n_samples,), default=None
Weight assigned to each sample.
Returns
-------
loss : float
Huber loss.
gradient : ndarray, shape (len(w))
Returns the derivative of the Huber loss with respect to each
coefficient, intercept and the scale as a vector.
"""
_, n_features = X.shape
fit_intercept = n_features + 2 == w.shape[0]
if fit_intercept:
intercept = w[-2]
sigma = w[-1]
w = w[:n_features]
n_samples = np.sum(sample_weight)
# Calculate the values where |y - X'w -c / sigma| > epsilon
# The values above this threshold are outliers.
linear_loss = y - safe_sparse_dot(X, w)
if fit_intercept:
linear_loss -= intercept
abs_linear_loss = np.abs(linear_loss)
outliers_mask = abs_linear_loss > epsilon * sigma
# Calculate the linear loss due to the outliers.
# This is equal to (2 * M * |y - X'w -c / sigma| - M**2) * sigma
outliers = abs_linear_loss[outliers_mask]
num_outliers = np.count_nonzero(outliers_mask)
n_non_outliers = X.shape[0] - num_outliers
# n_sq_outliers includes the weight give to the outliers while
# num_outliers is just the number of outliers.
outliers_sw = sample_weight[outliers_mask]
n_sw_outliers = np.sum(outliers_sw)
outlier_loss = (
2.0 * epsilon * np.sum(outliers_sw * outliers)
- sigma * n_sw_outliers * epsilon ** 2
)
# Calculate the quadratic loss due to the non-outliers.-
# This is equal to |(y - X'w - c)**2 / sigma**2| * sigma
non_outliers = linear_loss[~outliers_mask]
weighted_non_outliers = sample_weight[~outliers_mask] * non_outliers
weighted_loss = np.dot(weighted_non_outliers.T, non_outliers)
squared_loss = weighted_loss / sigma
if fit_intercept:
grad = np.zeros(n_features + 2)
else:
grad = np.zeros(n_features + 1)
# Gradient due to the squared loss.
X_non_outliers = -axis0_safe_slice(X, ~outliers_mask, n_non_outliers)
grad[:n_features] = (
2.0 / sigma * safe_sparse_dot(weighted_non_outliers, X_non_outliers)
)
# Gradient due to the linear loss.
signed_outliers = np.ones_like(outliers)
signed_outliers_mask = linear_loss[outliers_mask] < 0
signed_outliers[signed_outliers_mask] = -1.0
X_outliers = axis0_safe_slice(X, outliers_mask, num_outliers)
sw_outliers = sample_weight[outliers_mask] * signed_outliers
grad[:n_features] -= 2.0 * epsilon * (safe_sparse_dot(sw_outliers, X_outliers))
# Gradient due to the penalty.
grad[:n_features] += alpha * 2.0 * w
# Gradient due to sigma.
grad[-1] = n_samples
grad[-1] -= n_sw_outliers * epsilon ** 2
grad[-1] -= squared_loss / sigma
# Gradient due to the intercept.
if fit_intercept:
grad[-2] = -2.0 * np.sum(weighted_non_outliers) / sigma
grad[-2] -= 2.0 * epsilon * np.sum(sw_outliers)
loss = n_samples * sigma + squared_loss + outlier_loss
loss += alpha * np.dot(w, w)
return loss, grad
class HuberRegressor(LinearModel, RegressorMixin, BaseEstimator):
"""Linear regression model that is robust to outliers.
The Huber Regressor optimizes the squared loss for the samples where
``|(y - X'w) / sigma| < epsilon`` and the absolute loss for the samples
where ``|(y - X'w) / sigma| > epsilon``, where w and sigma are parameters
to be optimized. The parameter sigma makes sure that if y is scaled up
or down by a certain factor, one does not need to rescale epsilon to
achieve the same robustness. Note that this does not take into account
the fact that the different features of X may be of different scales.
This makes sure that the loss function is not heavily influenced by the
outliers while not completely ignoring their effect.
Read more in the :ref:`User Guide <huber_regression>`
.. versionadded:: 0.18
Parameters
----------
epsilon : float, greater than 1.0, default=1.35
The parameter epsilon controls the number of samples that should be
classified as outliers. The smaller the epsilon, the more robust it is
to outliers.
max_iter : int, default=100
Maximum number of iterations that
``scipy.optimize.minimize(method="L-BFGS-B")`` should run for.
alpha : float, default=0.0001
Regularization parameter.
warm_start : bool, default=False
This is useful if the stored attributes of a previously used model
has to be reused. If set to False, then the coefficients will
be rewritten for every call to fit.
See :term:`the Glossary <warm_start>`.
fit_intercept : bool, default=True
Whether or not to fit the intercept. This can be set to False
if the data is already centered around the origin.
tol : float, default=1e-05
The iteration will stop when
``max{|proj g_i | i = 1, ..., n}`` <= ``tol``
where pg_i is the i-th component of the projected gradient.
Attributes
----------
coef_ : array, shape (n_features,)
Features got by optimizing the Huber loss.
intercept_ : float
Bias.
scale_ : float
The value by which ``|y - X'w - c|`` is scaled down.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_iter_ : int
Number of iterations that
``scipy.optimize.minimize(method="L-BFGS-B")`` has run for.
.. versionchanged:: 0.20
In SciPy <= 1.0.0 the number of lbfgs iterations may exceed
``max_iter``. ``n_iter_`` will now report at most ``max_iter``.
outliers_ : array, shape (n_samples,)
A boolean mask which is set to True where the samples are identified
as outliers.
See Also
--------
RANSACRegressor : RANSAC (RANdom SAmple Consensus) algorithm.
TheilSenRegressor : Theil-Sen Estimator robust multivariate regression model.
SGDRegressor : Fitted by minimizing a regularized empirical loss with SGD.
References
----------
.. [1] Peter J. Huber, Elvezio M. Ronchetti, Robust Statistics
Concomitant scale estimates, pg 172
.. [2] Art B. Owen (2006), A robust hybrid of lasso and ridge regression.
https://statweb.stanford.edu/~owen/reports/hhu.pdf
Examples
--------
>>> import numpy as np
>>> from sklearn.linear_model import HuberRegressor, LinearRegression
>>> from sklearn.datasets import make_regression
>>> rng = np.random.RandomState(0)
>>> X, y, coef = make_regression(
... n_samples=200, n_features=2, noise=4.0, coef=True, random_state=0)
>>> X[:4] = rng.uniform(10, 20, (4, 2))
>>> y[:4] = rng.uniform(10, 20, 4)
>>> huber = HuberRegressor().fit(X, y)
>>> huber.score(X, y)
-7.284...
>>> huber.predict(X[:1,])
array([806.7200...])
>>> linear = LinearRegression().fit(X, y)
>>> print("True coefficients:", coef)
True coefficients: [20.4923... 34.1698...]
>>> print("Huber coefficients:", huber.coef_)
Huber coefficients: [17.7906... 31.0106...]
>>> print("Linear Regression coefficients:", linear.coef_)
Linear Regression coefficients: [-1.9221... 7.0226...]
"""
def __init__(
self,
*,
epsilon=1.35,
max_iter=100,
alpha=0.0001,
warm_start=False,
fit_intercept=True,
tol=1e-05,
):
self.epsilon = epsilon
self.max_iter = max_iter
self.alpha = alpha
self.warm_start = warm_start
self.fit_intercept = fit_intercept
self.tol = tol
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
sample_weight : array-like, shape (n_samples,)
Weight given to each sample.
Returns
-------
self : object
Fitted `HuberRegressor` estimator.
"""
X, y = self._validate_data(
X,
y,
copy=False,
accept_sparse=["csr"],
y_numeric=True,
dtype=[np.float64, np.float32],
)
sample_weight = _check_sample_weight(sample_weight, X)
if self.epsilon < 1.0:
raise ValueError(
"epsilon should be greater than or equal to 1.0, got %f" % self.epsilon
)
if self.warm_start and hasattr(self, "coef_"):
parameters = np.concatenate((self.coef_, [self.intercept_, self.scale_]))
else:
if self.fit_intercept:
parameters = np.zeros(X.shape[1] + 2)
else:
parameters = np.zeros(X.shape[1] + 1)
# Make sure to initialize the scale parameter to a strictly
# positive value:
parameters[-1] = 1
# Sigma or the scale factor should be non-negative.
# Setting it to be zero might cause undefined bounds hence we set it
# to a value close to zero.
bounds = np.tile([-np.inf, np.inf], (parameters.shape[0], 1))
bounds[-1][0] = np.finfo(np.float64).eps * 10
opt_res = optimize.minimize(
_huber_loss_and_gradient,
parameters,
method="L-BFGS-B",
jac=True,
args=(X, y, self.epsilon, self.alpha, sample_weight),
options={"maxiter": self.max_iter, "gtol": self.tol, "iprint": -1},
bounds=bounds,
)
parameters = opt_res.x
if opt_res.status == 2:
raise ValueError(
"HuberRegressor convergence failed: l-BFGS-b solver terminated with %s"
% opt_res.message
)
self.n_iter_ = _check_optimize_result("lbfgs", opt_res, self.max_iter)
self.scale_ = parameters[-1]
if self.fit_intercept:
self.intercept_ = parameters[-2]
else:
self.intercept_ = 0.0
self.coef_ = parameters[: X.shape[1]]
residual = np.abs(y - safe_sparse_dot(X, self.coef_) - self.intercept_)
self.outliers_ = residual > self.scale_ * self.epsilon
return self
| |
import synapse.exc as s_exc
import synapse.common as s_common
import synapse.lib.time as s_time
import synapse.tests.utils as s_t_utils
from synapse.tests.utils import alist
class FileTest(s_t_utils.SynTest):
async def test_model_filebytes(self):
async with self.getTestCore() as core:
valu = 'sha256:' + ('a' * 64)
fbyts = core.model.type('file:bytes')
norm, info = fbyts.norm(valu)
self.eq(info['subs']['sha256'], 'a' * 64)
norm, info = fbyts.norm('b' * 64)
self.eq(info['subs']['sha256'], 'b' * 64)
# Allow an arbitrary struct to be ground into a file:bytes guid.
norm, info = fbyts.norm(('md5', 'b' * 32))
self.eq(norm, 'guid:d32efb12cb5a0f83ffd12788572e1c88')
self.eq(info, {})
self.raises(s_exc.BadTypeValu, fbyts.norm, s_common.guid())
self.raises(s_exc.BadTypeValu, fbyts.norm, 'guid:0101')
self.raises(s_exc.BadTypeValu, fbyts.norm, 'helo:moto')
self.raises(s_exc.BadTypeValu, fbyts.norm, f'sha256:{s_common.guid()}')
nodes = await core.nodes('[ file:bytes=$byts ]', opts={'vars': {'byts': b'visi'}})
pref = nodes[0].props.get('sha256')[:4]
self.len(1, await core.nodes('file:bytes:sha256^=$pref +file:bytes:sha256^=$pref', opts={'vars': {'pref': pref}}))
with self.raises(s_exc.BadTypeValu):
opts = {'vars': {'a': 'a' * 64}}
await core.nodes('file:bytes [:sha256=$a]', opts=opts)
async def test_model_filebytes_pe(self):
# test to make sure pe metadata is well formed
async with self.getTestCore() as core:
async with await core.snap() as snap:
exp_time = '201801010233'
exp_time_parse = s_time.parse(exp_time)
props = {
'mime:pe:imphash': 'e' * 32,
'mime:pe:pdbpath': r'c:\this\is\my\pdbstring',
'mime:pe:exports:time': exp_time,
'mime:pe:exports:libname': 'ohgood',
'mime:pe:richhdr': 'f' * 64,
}
fnode = await snap.addNode('file:bytes', 'a' * 64, props=props)
# pe props
self.eq(fnode.get('mime:pe:imphash'), 'e' * 32)
self.eq(fnode.get('mime:pe:pdbpath'), r'c:/this/is/my/pdbstring')
self.eq(fnode.get('mime:pe:exports:time'), exp_time_parse)
self.eq(fnode.get('mime:pe:exports:libname'), 'ohgood')
self.eq(fnode.get('mime:pe:richhdr'), 'f' * 64)
# pe resource
rbnode = await snap.addNode('file:bytes', 'd' * 64)
rnode = await snap.addNode('file:mime:pe:resource', (fnode.ndef[1], 2, 0x409, rbnode.ndef[1]))
self.eq(rnode.get('langid'), 0x409)
self.eq(rnode.get('type'), 2)
self.eq(rnode.repr('langid'), 'en-US')
self.eq(rnode.repr('type'), 'RT_BITMAP')
# pe section
s1node = await snap.addNode('file:mime:pe:section', (fnode.ndef[1], 'foo', 'b' * 64))
self.eq(s1node.get('name'), 'foo')
self.eq(s1node.get('sha256'), 'b' * 64)
# pe export
enode = await snap.addNode('file:mime:pe:export', (fnode.ndef[1], 'myexport'))
self.eq(enode.get('file'), fnode.ndef[1])
self.eq(enode.get('name'), 'myexport')
# vsversion
vskvnode = await snap.addNode('file:mime:pe:vsvers:keyval', ('foo', 'bar'))
self.eq(vskvnode.get('name'), 'foo')
self.eq(vskvnode.get('value'), 'bar')
vsnode = await snap.addNode('file:mime:pe:vsvers:info', (fnode.ndef[1], vskvnode.ndef[1]))
self.eq(vsnode.get('file'), fnode.ndef[1])
self.eq(vsnode.get('keyval'), vskvnode.ndef[1])
async def test_model_filebytes_macho(self):
async with self.getTestCore() as core:
async with await core.snap() as snap:
file = 'a' * 64
fnode = await snap.addNode('file:bytes', file)
# loadcmds
opts = {'vars': {'file': fnode.get('sha256')}}
gencmd = await core.nodes('''[
file:mime:macho:loadcmd=*
:file=$file
:type=27
:size=123456
]''', opts=opts)
self.len(1, gencmd)
gencmd = gencmd[0]
self.eq(27, gencmd.get('type'))
self.eq(123456, gencmd.get('size'))
self.eq('sha256:' + file, gencmd.get('file'))
# uuid
opts = {'vars': {'file': fnode.get('sha256')}}
uuid = await core.nodes(f'''[
file:mime:macho:uuid=*
:file=$file
:type=27
:size=32
:uuid=BCAA4A0BBF703A5DBCF972F39780EB67
]''', opts=opts)
self.len(1, uuid)
uuid = uuid[0]
self.eq('bcaa4a0bbf703a5dbcf972f39780eb67', uuid.get('uuid'))
self.eq('sha256:' + file, uuid.get('file'))
# version
ver = await core.nodes(f'''[
file:mime:macho:version=*
:file=$file
:type=42
:size=32
:version="7605.1.33.1.4"
]''', opts=opts)
self.len(1, ver)
ver = ver[0]
self.eq('7605.1.33.1.4', ver.get('version'))
self.eq('sha256:' + file, ver.get('file'))
self.eq(42, ver.get('type'))
self.eq(32, ver.get('size'))
self.eq('sha256:' + file, ver.get('file'))
# segment
seghash = 'e' * 64
opts = {'vars': {'file': file, 'sha256': seghash}}
seg = await core.nodes(f'''[
file:mime:macho:segment=*
:file=$file
:type=1
:size=48
:name="__TEXT"
:memsize=4092
:disksize=8192
:sha256=$sha256
:offset=1234
]''', opts=opts)
self.len(1, seg)
seg = seg[0]
self.eq('sha256:' + file, seg.get('file'))
self.eq(1, seg.get('type'))
self.eq(48, seg.get('size'))
self.eq('__TEXT', seg.get('name'))
self.eq(4092, seg.get('memsize'))
self.eq(8192, seg.get('disksize'))
self.eq(seghash, seg.get('sha256'))
self.eq(1234, seg.get('offset'))
# section
opts = {'vars': {'seg': seg.ndef[1]}}
sect = await core.nodes(f'''[
file:mime:macho:section=*
:segment=$seg
:name="__text"
:size=12
:type=0
:offset=5678
]''', opts=opts)
self.len(1, sect)
sect = sect[0]
self.eq(seg.ndef[1], sect.get('segment'))
self.eq("__text", sect.get('name'))
self.eq(12, sect.get('size'))
self.eq(0, sect.get('type'))
self.eq(5678, sect.get('offset'))
async def test_model_filebytes_string(self):
async with self.getTestCore() as core:
async with await core.snap() as snap:
fnode = await snap.addNode('file:bytes', 'a' * 64)
fsnode = await snap.addNode('file:string', (fnode.ndef[1], 'foo'))
self.eq(fsnode.get('file'), fnode.ndef[1])
self.eq(fsnode.get('string'), 'foo')
async def test_model_file_types(self):
async with self.getTestCore() as core:
base = core.model.type('file:base')
path = core.model.type('file:path')
norm, info = base.norm('FOO.EXE')
subs = info.get('subs')
self.eq('foo.exe', norm)
self.eq('exe', subs.get('ext'))
self.raises(s_exc.BadTypeValu, base.norm, 'foo/bar.exe')
self.raises(s_exc.BadTypeValu, base.norm, '/haha')
norm, info = path.norm('c:\\Windows\\System32\\calc.exe')
self.eq(norm, 'c:/windows/system32/calc.exe')
self.eq(info['subs']['dir'], 'c:/windows/system32')
self.eq(info['subs']['base'], 'calc.exe')
norm, info = path.norm(r'/foo////bar/.././baz.json')
self.eq(norm, '/foo/baz.json')
norm, info = path.norm(r'./hehe/haha')
self.eq(norm, 'hehe/haha')
# '.' has no normable value.
self.raises(s_exc.BadTypeValu, path.norm, '.')
self.raises(s_exc.BadTypeValu, path.norm, '..')
norm, info = path.norm('c:')
self.eq(norm, 'c:')
subs = info.get('subs')
self.none(subs.get('ext'))
self.none(subs.get('dir'))
self.eq(subs.get('base'), 'c:')
norm, info = path.norm('/foo')
self.eq(norm, '/foo')
subs = info.get('subs')
self.none(subs.get('ext'))
self.none(subs.get('dir'))
self.eq(subs.get('base'), 'foo')
async with await core.snap() as snap:
node = await snap.addNode('file:path', '/foo/bar/baz.exe')
self.eq(node.get('base'), 'baz.exe')
self.eq(node.get('base:ext'), 'exe')
self.eq(node.get('dir'), '/foo/bar')
self.nn(await snap.getNodeByNdef(('file:path', '/foo/bar')))
nodes = await snap.nodes('file:path^="/foo/bar/b"')
self.len(1, nodes)
self.eq(node.ndef, nodes[0].ndef)
nodes = await snap.nodes('file:base^=baz')
self.len(1, nodes)
self.eq(node.get('base'), nodes[0].ndef[1])
node = await snap.addNode('file:path', '/')
self.none(node.get('base'))
self.none(node.get('base:ext'))
self.none(node.get('dir'))
self.eq(node.ndef[1], '')
node = await snap.addNode('file:path', '')
self.none(node.get('base'))
self.none(node.get('base:ext'))
self.none(node.get('dir'))
self.eq(node.ndef[1], '')
node0 = await snap.addNode('file:bytes', 'hex:56565656')
node1 = await snap.addNode('file:bytes', 'base64:VlZWVg==')
node2 = await snap.addNode('file:bytes', b'VVVV')
self.eq(node0.ndef, node1.ndef)
self.eq(node1.ndef, node2.ndef)
self.nn(node0.get('md5'))
self.nn(node0.get('sha1'))
self.nn(node0.get('sha256'))
self.nn(node0.get('sha512'))
fake = await snap.addNode('file:bytes', '*')
self.true(fake.ndef[1].startswith('guid:'))
node = await snap.addNode('file:subfile', (node1.ndef[1], node2.ndef[1]), {'name': 'embed.BIN', 'path': 'foo/embed.bin'})
self.eq(node.ndef[1], (node1.ndef[1], node2.ndef[1]))
self.eq(node.get('parent'), node1.ndef[1])
self.eq(node.get('child'), node2.ndef[1])
self.eq(node.get('name'), 'embed.bin')
self.eq(node.get('path'), 'foo/embed.bin')
fp = 'C:\\www\\woah\\really\\sup.exe'
node = await snap.addNode('file:filepath', (node0.ndef[1], fp))
self.eq(node.get('file'), node0.ndef[1])
self.eq(node.get('path'), 'c:/www/woah/really/sup.exe')
self.eq(node.get('path:dir'), 'c:/www/woah/really')
self.eq(node.get('path:base'), 'sup.exe')
self.eq(node.get('path:base:ext'), 'exe')
expected_nodes = (
('file:path', 'c:/www/woah/really'),
('file:path', 'c:/www/woah'),
('file:path', 'c:/www'),
('file:base', 'sup.exe'),
)
await self.checkNodes(core, expected_nodes)
async def test_model_file_ismime(self):
async with self.getTestCore() as core:
nodes = await core.nodes('[ file:bytes="*" :mime=text/PLAIN ]')
self.len(1, nodes)
guid = nodes[0].ndef[1]
self.eq('text/plain', nodes[0].get('mime'))
nodes = await core.nodes('file:mime=text/plain')
self.len(1, nodes)
opts = {'vars': {'guid': guid}}
nodes = await core.nodes('file:ismime:file=$guid', opts=opts)
self.len(1, nodes)
node = nodes[0]
self.eq(node.ndef, ('file:ismime', (guid, 'text/plain')))
async def test_model_file_mime_msoffice(self):
async with self.getTestCore() as core:
fileguid = s_common.guid()
opts = {'vars': {'fileguid': f'guid:{fileguid}'}}
def testmsoffice(n):
self.eq('lolz', n.get('title'))
self.eq('deep_value', n.get('author'))
self.eq('GME stonks', n.get('subject'))
self.eq('stonktrader3000', n.get('application'))
self.eq(1611100800000, n.get('created'))
self.eq(1611187200000, n.get('lastsaved'))
self.eq(f'guid:{fileguid}', n.get('file'))
self.eq(0, n.get('file:offs'))
self.eq(('foo', 'bar'), n.get('file:data'))
nodes = await core.nodes('''[
file:mime:msdoc=*
:file=$fileguid
:file:offs=0
:file:data=(foo, bar)
:title=lolz
:author=deep_value
:subject="GME stonks"
:application=stonktrader3000
:created=20210120
:lastsaved=20210121
]''', opts=opts)
self.len(1, nodes)
testmsoffice(nodes[0])
nodes = await core.nodes('''[
file:mime:msxls=*
:file=$fileguid
:file:offs=0
:file:data=(foo, bar)
:title=lolz
:author=deep_value
:subject="GME stonks"
:application=stonktrader3000
:created=20210120
:lastsaved=20210121
]''', opts=opts)
self.len(1, nodes)
testmsoffice(nodes[0])
nodes = await core.nodes('''[
file:mime:msppt=*
:file=$fileguid
:file:offs=0
:file:data=(foo, bar)
:title=lolz
:author=deep_value
:subject="GME stonks"
:application=stonktrader3000
:created=20210120
:lastsaved=20210121
]''', opts=opts)
self.len(1, nodes)
testmsoffice(nodes[0])
async def test_model_file_mime_rtf(self):
async with self.getTestCore() as core:
fileguid = s_common.guid()
opts = {'vars': {'fileguid': f'guid:{fileguid}'}}
nodes = await core.nodes('''[
file:mime:rtf=*
:file=$fileguid
:file:offs=0
:file:data=(foo, bar)
:guid=aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
]''', opts=opts)
self.len(1, nodes)
self.eq(f'guid:{fileguid}', nodes[0].get('file'))
self.eq(0, nodes[0].get('file:offs'))
self.eq(('foo', 'bar'), nodes[0].get('file:data'))
self.eq('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', nodes[0].get('guid'))
async def test_model_file_meta_exif(self):
async with self.getTestCore() as core:
fileguid = s_common.guid()
conguid = s_common.guid()
opts = {'vars': {
'fileguid': f'guid:{fileguid}',
'conguid': conguid
}
}
def testexif(n):
self.eq(f'guid:{fileguid}', n.get('file'))
self.eq(0, n.get('file:offs'))
self.eq(('foo', 'bar'), n.get('file:data'))
self.eq('aaaa', n.get('desc'))
self.eq('bbbb', n.get('comment'))
self.eq(1578236238000, n.get('created'))
self.eq('a6b4', n.get('imageid'))
self.eq(conguid, n.get('author'))
self.eq((38.9582839, -77.358946), n.get('latlong'))
self.eq(6371137800, n.get('altitude'))
nodes = await core.nodes(f'''[
ps:contact=$conguid
:name="Steve Rogers"
:title="Captain"
:orgname="U.S. Army"
:address="569 Leaman Place, Brooklyn, NY, 11201, USA"
]''', opts=opts)
props = '''
:file=$fileguid
:file:offs=0
:file:data=(foo, bar)
:desc=aaaa
:comment=bbbb
:created="2020-01-05 14:57:18"
:imageid=a6b4
:author=$conguid
:latlong="38.9582839,-77.358946"
:altitude="129 meters"'''
nodes = await core.nodes(f'''[
file:mime:jpg=*
{props}
]''', opts=opts)
self.len(1, nodes)
testexif(nodes[0])
nodes = await core.nodes(f'''[
file:mime:tif=*
{props}
]''', opts=opts)
self.len(1, nodes)
testexif(nodes[0])
nodes = await core.nodes(f'''[
file:mime:gif=*
{props}
]''', opts=opts)
self.len(1, nodes)
testexif(nodes[0])
nodes = await core.nodes(f'''[
file:mime:png=*
{props}
]''', opts=opts)
self.len(1, nodes)
testexif(nodes[0])
| |
from __future__ import division
import numpy as np
import scipy.sparse as sp
from scipy.constants import epsilon_0
from ...utils.code_utils import deprecate_class
from ...fields import TimeFields
from ...utils import mkvc, sdiag, Zero
from ..utils import omega
class FieldsTDEM(TimeFields):
"""
Fancy Field Storage for a TDEM simulation. Only one field type is stored for
each problem, the rest are computed. The fields obejct acts like an array
and is indexed by
.. code-block:: python
f = problem.fields(m)
e = f[source_list,'e']
b = f[source_list,'b']
If accessing all sources for a given field, use the :code:`:`
.. code-block:: python
f = problem.fields(m)
e = f[:,'e']
b = f[:,'b']
The array returned will be size (nE or nF, nSrcs :math:`\\times`
nFrequencies)
"""
knownFields = {}
dtype = float
def _GLoc(self, fieldType):
"""Grid location of the fieldType"""
return self.aliasFields[fieldType][1]
def _eDeriv(self, tInd, src, dun_dm_v, v, adjoint=False):
if adjoint is True:
return (
self._eDeriv_u(tInd, src, v, adjoint),
self._eDeriv_m(tInd, src, v, adjoint),
)
return self._eDeriv_u(tInd, src, dun_dm_v) + self._eDeriv_m(tInd, src, v)
def _bDeriv(self, tInd, src, dun_dm_v, v, adjoint=False):
if adjoint is True:
return (
self._bDeriv_u(tInd, src, v, adjoint),
self._bDeriv_m(tInd, src, v, adjoint),
)
return self._bDeriv_u(tInd, src, dun_dm_v) + self._bDeriv_m(tInd, src, v)
def _dbdtDeriv(self, tInd, src, dun_dm_v, v, adjoint=False):
if adjoint is True:
return (
self._dbdtDeriv_u(tInd, src, v, adjoint),
self._dbdtDeriv_m(tInd, src, v, adjoint),
)
return self._dbdtDeriv_u(tInd, src, dun_dm_v) + self._dbdtDeriv_m(tInd, src, v)
def _hDeriv(self, tInd, src, dun_dm_v, v, adjoint=False):
if adjoint is True:
return (
self._hDeriv_u(tInd, src, v, adjoint),
self._hDeriv_m(tInd, src, v, adjoint),
)
return self._hDeriv_u(tInd, src, dun_dm_v) + self._hDeriv_m(tInd, src, v)
def _dhdtDeriv(self, tInd, src, dun_dm_v, v, adjoint=False):
if adjoint is True:
return (
self._dhdtDeriv_u(tInd, src, v, adjoint),
self._dhdtDeriv_m(tInd, src, v, adjoint),
)
return self._dhdtDeriv_u(tInd, src, dun_dm_v) + self._dhdtDeriv_m(tInd, src, v)
def _jDeriv(self, tInd, src, dun_dm_v, v, adjoint=False):
if adjoint is True:
return (
self._jDeriv_u(tInd, src, v, adjoint),
self._jDeriv_m(tInd, src, v, adjoint),
)
return self._jDeriv_u(tInd, src, dun_dm_v) + self._jDeriv_m(tInd, src, v)
class FieldsDerivativesEB(FieldsTDEM):
"""
A fields object for satshing derivs in the EB formulation
"""
knownFields = {
"bDeriv": "F",
"eDeriv": "E",
"hDeriv": "F",
"jDeriv": "E",
"dbdtDeriv": "F",
"dhdtDeriv": "F",
}
class FieldsDerivativesHJ(FieldsTDEM):
"""
A fields object for satshing derivs in the HJ formulation
"""
knownFields = {
"bDeriv": "E",
"eDeriv": "F",
"hDeriv": "E",
"jDeriv": "F",
"dbdtDeriv": "E",
"dhdtDeriv": "E",
}
class Fields3DMagneticFluxDensity(FieldsTDEM):
"""Field Storage for a TDEM simulation."""
knownFields = {"bSolution": "F"}
aliasFields = {
"b": ["bSolution", "F", "_b"],
"h": ["bSolution", "F", "_h"],
"e": ["bSolution", "E", "_e"],
"j": ["bSolution", "E", "_j"],
"dbdt": ["bSolution", "F", "_dbdt"],
"dhdt": ["bSolution", "F", "_dhdt"],
}
def startup(self):
self._times = self.simulation.times
self._MeSigma = self.simulation.MeSigma
self._MeSigmaI = self.simulation.MeSigmaI
self._MeSigmaDeriv = self.simulation.MeSigmaDeriv
self._MeSigmaIDeriv = self.simulation.MeSigmaIDeriv
self._edgeCurl = self.simulation.mesh.edgeCurl
self._MfMui = self.simulation.MfMui
self._timeMesh = self.simulation.time_mesh
def _TLoc(self, fieldType):
return "N"
def _b(self, bSolution, source_list, tInd):
return bSolution
def _bDeriv_u(self, tInd, src, dun_dm_v, adjoint=False):
return dun_dm_v
def _bDeriv_m(self, tInd, src, v, adjoint=False):
return Zero()
def _dbdt(self, bSolution, source_list, tInd):
# self._timeMesh.faceDiv
dbdt = -self._edgeCurl * self._e(bSolution, source_list, tInd)
for i, src in enumerate(source_list):
s_m = src.s_m(self.simulation, self._times[tInd])
dbdt[:, i] = dbdt[:, i] + s_m
return dbdt
def _dbdtDeriv_u(self, tInd, src, dun_dm_v, adjoint=False):
if adjoint is True:
return -self._eDeriv_u(tInd, src, self._edgeCurl.T * dun_dm_v, adjoint)
return -(self._edgeCurl * self._eDeriv_u(tInd, src, dun_dm_v))
def _dbdtDeriv_m(self, tInd, src, v, adjoint=False):
if adjoint is True:
return -(self._eDeriv_m(tInd, src, self._edgeCurl.T * v, adjoint))
return -(
self._edgeCurl * self._eDeriv_m(tInd, src, v)
) # + src.s_mDeriv() assuming src doesn't have deriv for now
def _e(self, bSolution, source_list, tInd):
e = self._MeSigmaI * (self._edgeCurl.T * (self._MfMui * bSolution))
for i, src in enumerate(source_list):
s_e = src.s_e(self.simulation, self._times[tInd])
e[:, i] = e[:, i] - self._MeSigmaI * s_e
return e
def _eDeriv_u(self, tInd, src, dun_dm_v, adjoint=False):
if adjoint is True:
return self._MfMui.T * (self._edgeCurl * (self._MeSigmaI.T * dun_dm_v))
return self._MeSigmaI * (self._edgeCurl.T * (self._MfMui * dun_dm_v))
def _eDeriv_m(self, tInd, src, v, adjoint=False):
_, s_e = src.eval(self.simulation, self._times[tInd])
bSolution = self[[src], "bSolution", tInd].flatten()
_, s_eDeriv = src.evalDeriv(self._times[tInd], self, adjoint=adjoint)
if adjoint is True:
return self._MeSigmaIDeriv(
-s_e + self._edgeCurl.T * (self._MfMui * bSolution), v, adjoint
) - s_eDeriv(self._MeSigmaI.T * v)
return self._MeSigmaIDeriv(
-s_e + self._edgeCurl.T * (self._MfMui * bSolution), v, adjoint
) - self._MeSigmaI * s_eDeriv(v)
def _j(self, hSolution, source_list, tInd):
return self.simulation.MeI * (
self._MeSigma * self._e(hSolution, source_list, tInd)
)
def _jDeriv_u(self, tInd, src, dun_dm_v, adjoint=False):
if adjoint:
return self._eDeriv_u(
tInd,
src,
self._MeSigma.T * (self.simulation.MeI.T * dun_dm_v),
adjoint=True,
)
return self.simulation.MeI * (
self._MeSigma * self._eDeriv_u(tInd, src, dun_dm_v)
)
def _jDeriv_m(self, tInd, src, v, adjoint=False):
e = self[src, "e", tInd]
if adjoint:
w = self.simulation.MeI.T * v
return self._MeSigmaDeriv(e).T * w + self._eDeriv_m(
tInd, src, self._MeSigma.T * w, adjoint=True
)
return self.simulation.MeI * (
self._MeSigmaDeriv(e) * v + self._MeSigma * self._eDeriv_m(tInd, src, v)
)
def _h(self, hSolution, source_list, tInd):
return self.simulation.MfI * (
self._MfMui * self._b(hSolution, source_list, tInd)
)
def _hDeriv_u(self, tInd, src, dun_dm_v, adjoint=False):
if adjoint:
return self._bDeriv_u(
tInd,
src,
self._MfMui.T * (self.simulation.MfI.T * dun_dm_v),
adjoint=True,
)
return self.simulation.MfI * (self._MfMui * self._bDeriv_u(tInd, src, dun_dm_v))
def _hDeriv_m(self, tInd, src, v, adjoint=False):
if adjoint:
return self._bDeriv_m(
tInd, src, self._MfMui.T * (self.simulation.MfI.T * v), adjoint=True
)
return self.simulation.MfI * (self._MfMui * self._bDeriv_m(tInd, src, v))
def _dhdt(self, hSolution, source_list, tInd):
return self.simulation.MfI * (
self._MfMui * self._dbdt(hSolution, source_list, tInd)
)
def _dhdtDeriv_u(self, tInd, src, dun_dm_v, adjoint=False):
if adjoint:
return self._dbdtDeriv_u(
tInd,
src,
self._MfMui.T * (self.simulation.MfI.T * dun_dm_v),
adjoint=True,
)
return self.simulation.MfI * (
self._MfMui * self._dbdtDeriv_u(tInd, src, dun_dm_v)
)
def _dhdtDeriv_m(self, tInd, src, v, adjoint=False):
if adjoint:
return self._dbdtDeriv_m(
tInd, src, self._MfMui.T * (self.simulation.MfI.T * v), adjoint=True
)
return self.simulation.MfI * (self._MfMui * self._dbdtDeriv_m(tInd, src, v))
class Fields3DElectricField(FieldsTDEM):
"""Fancy Field Storage for a TDEM simulation."""
knownFields = {"eSolution": "E"}
aliasFields = {
"e": ["eSolution", "E", "_e"],
"j": ["eSolution", "E", "_j"],
"b": ["eSolution", "F", "_b"],
# 'h': ['eSolution', 'F', '_h'],
"dbdt": ["eSolution", "F", "_dbdt"],
"dhdt": ["eSolution", "F", "_dhdt"],
}
def startup(self):
self._times = self.simulation.times
self._MeSigma = self.simulation.MeSigma
self._MeSigmaI = self.simulation.MeSigmaI
self._MeSigmaDeriv = self.simulation.MeSigmaDeriv
self._MeSigmaIDeriv = self.simulation.MeSigmaIDeriv
self._edgeCurl = self.simulation.mesh.edgeCurl
self._MfMui = self.simulation.MfMui
def _TLoc(self, fieldType):
return "N"
def _e(self, eSolution, source_list, tInd):
return eSolution
def _eDeriv_u(self, tInd, src, dun_dm_v, adjoint=False):
return dun_dm_v
def _eDeriv_m(self, tInd, src, v, adjoint=False):
return Zero()
def _dbdt(self, eSolution, source_list, tInd):
s_m = np.zeros((self.mesh.nF, len(source_list)))
for i, src in enumerate(source_list):
s_m_src = src.s_m(self.simulation, self._times[tInd])
s_m[:, i] = s_m[:, i] + s_m_src
return s_m - self._edgeCurl * eSolution
def _dbdtDeriv_u(self, tInd, src, dun_dm_v, adjoint=False):
if adjoint:
return -self._edgeCurl.T * dun_dm_v
return -self._edgeCurl * dun_dm_v
def _dbdtDeriv_m(self, tInd, src, v, adjoint=False):
# s_mDeriv = src.s_mDeriv(
# self._times[tInd], self, adjoint=adjoint
# )
return Zero() # assumes source doesn't depend on model
def _b(self, eSolution, source_list, tInd):
"""
Integrate _db_dt using rectangles
"""
raise NotImplementedError(
"To obtain b-fields, please use Simulation3DMagneticFluxDensity"
)
# dbdt = self._dbdt(eSolution, source_list, tInd)
# dt = self.simulation.time_mesh.hx
# # assume widths of "ghost cells" same on either end
# dtn = np.hstack([dt[0], 0.5*(dt[1:] + dt[:-1]), dt[-1]])
# return dtn[tInd] * dbdt
# # raise NotImplementedError
def _j(self, eSolution, source_list, tInd):
return self.simulation.MeI * (
self._MeSigma * self._e(eSolution, source_list, tInd)
)
def _jDeriv_u(self, tInd, src, dun_dm_v, adjoint=False):
if adjoint:
return self._eDeriv_u(
tInd,
src,
self._MeSigma.T * (self.simulation.MeI.T * dun_dm_v),
adjoint=True,
)
return self.simulation.MeI * (
self._MeSigma * self._eDeriv_u(tInd, src, dun_dm_v)
)
def _jDeriv_m(self, tInd, src, v, adjoint=False):
e = self[src, "e", tInd]
if adjoint:
w = self.simulation.MeI.T * v
return self._MeSigmaDeriv(e).T * w + self._eDeriv_m(
tInd, src, self._MeSigma.T * w, adjoint=True
)
return self.simulation.MeI * (
self._MeSigmaDeriv(e) * v + self._MeSigma * self._eDeriv_m(tInd, src, v)
)
def _dhdt(self, eSolution, source_list, tInd):
return self.simulation.MfI * (
self._MfMui * self._dbdt(eSolution, source_list, tInd)
)
def _dhdtDeriv_u(self, tInd, src, dun_dm_v, adjoint=False):
if adjoint:
return self._dbdtDeriv_u(
tInd,
src,
self._MfMui.T * (self.simulation.MfI.T * dun_dm_v),
adjoint=True,
)
return self.simulation.MfI * (
self._MfMui * self._dbdtDeriv_u(tInd, src, dun_dm_v)
)
def _dhdtDeriv_m(self, tInd, src, v, adjoint=False):
if adjoint:
return self._dbdtDeriv_m(
tInd, src, self._MfMui.T * (self.simulation.MfI.T * v)
)
return self.simulation.MfI * (self._MfMui * self._dbdtDeriv_m(tInd, src, v))
class Fields3DMagneticField(FieldsTDEM):
"""Fancy Field Storage for a TDEM simulation."""
knownFields = {"hSolution": "E"}
aliasFields = {
"h": ["hSolution", "E", "_h"],
"b": ["hSolution", "E", "_b"],
"dhdt": ["hSolution", "E", "_dhdt"],
"dbdt": ["hSolution", "E", "_dbdt"],
"j": ["hSolution", "F", "_j"],
"e": ["hSolution", "F", "_e"],
"charge": ["hSolution", "CC", "_charge"],
}
def startup(self):
self._times = self.simulation.times
self._edgeCurl = self.simulation.mesh.edgeCurl
self._MeMuI = self.simulation.MeMuI
self._MeMu = self.simulation.MeMu
self._MfRho = self.simulation.MfRho
self._MfRhoDeriv = self.simulation.MfRhoDeriv
def _TLoc(self, fieldType):
# if fieldType in ['h', 'j']:
return "N"
# else:
# raise NotImplementedError
def _h(self, hSolution, source_list, tInd):
return hSolution
def _hDeriv_u(self, tInd, src, dun_dm_v, adjoint=False):
return dun_dm_v
def _hDeriv_m(self, tInd, src, v, adjoint=False):
return Zero()
def _dhdt(self, hSolution, source_list, tInd):
C = self._edgeCurl
MeMuI = self._MeMuI
MfRho = self._MfRho
dhdt = -MeMuI * (C.T * (MfRho * (C * hSolution)))
for i, src in enumerate(source_list):
s_m, s_e = src.eval(self.simulation, self._times[tInd])
dhdt[:, i] = MeMuI * (C.T * MfRho * s_e + s_m) + dhdt[:, i]
return dhdt
def _dhdtDeriv_u(self, tInd, src, dun_dm_v, adjoint=False):
C = self._edgeCurl
MeMuI = self._MeMuI
MfRho = self._MfRho
if adjoint:
return -C.T * (MfRho.T * (C * (MeMuI * dun_dm_v)))
return -MeMuI * (C.T * (MfRho * (C * dun_dm_v)))
def _dhdtDeriv_m(self, tInd, src, v, adjoint=False):
C = self._edgeCurl
MeMuI = self._MeMuI
MfRho = self._MfRho
MfRhoDeriv = self._MfRhoDeriv
hSolution = self[[src], "hSolution", tInd].flatten()
s_e = src.s_e(self.simulation, self._times[tInd])
if adjoint:
return -MfRhoDeriv(C * hSolution - s_e, (C * (MeMuI * v)), adjoint)
return -MeMuI * (C.T * (MfRhoDeriv(C * hSolution - s_e, v, adjoint)))
def _j(self, hSolution, source_list, tInd):
s_e = np.zeros((self.mesh.nF, len(source_list)))
for i, src in enumerate(source_list):
s_e_src = src.s_e(self.simulation, self._times[tInd])
s_e[:, i] = s_e[:, i] + s_e_src
return self._edgeCurl * hSolution - s_e
def _jDeriv_u(self, tInd, src, dun_dm_v, adjoint=False):
if adjoint:
return self._edgeCurl.T * dun_dm_v
return self._edgeCurl * dun_dm_v
def _jDeriv_m(self, tInd, src, v, adjoint=False):
return Zero() # assumes the source doesn't depend on the model
def _b(self, hSolution, source_list, tInd):
h = self._h(hSolution, source_list, tInd)
return self.simulation.MeI * (self._MeMu * h)
def _bDeriv_u(self, tInd, src, dun_dm_v, adjoint=False):
if adjoint:
return self._hDeriv_u(
tInd,
src,
self._MeMu.T * (self.simulation.MeI.T * dun_dm_v),
adjoint=adjoint,
)
return self.simulation.MeI * (self._MeMu * self._hDeriv_u(tInd, src, dun_dm_v))
def _bDeriv_m(self, tInd, src, v, adjoint=False):
if adjoint:
return self._hDeriv_m(
tInd, src, self._MeMu.T * (self.simulation.MeI.T * v), adjoint=adjoint
)
return self.simulation.MeI * (self._MeMu * self._hDeriv_m(tInd, src, v))
def _dbdt(self, hSolution, source_list, tInd):
dhdt = self._dhdt(hSolution, source_list, tInd)
return self.simulation.MeI * (self._MeMu * dhdt)
def _dbdtDeriv_u(self, tInd, src, dun_dm_v, adjoint=False):
if adjoint:
return self._dhdtDeriv_u(
tInd,
src,
self._MeMu.T * (self.simulation.MeI.T * dun_dm_v),
adjoint=adjoint,
)
return self.simulation.MeI * (
self._MeMu * self._dhdtDeriv_u(tInd, src, dun_dm_v)
)
def _dbdtDeriv_m(self, tInd, src, v, adjoint=False):
if adjoint:
return self._dhdtDeriv_m(
tInd, src, self._MeMu.T * (self.simulation.MeI.T * v), adjoint=adjoint
)
return self.simulation.MeI * (self._MeMu * self._dhdtDeriv_m(tInd, src, v))
def _e(self, hSolution, source_list, tInd):
return self.simulation.MfI * (
self._MfRho * self._j(hSolution, source_list, tInd)
)
def _eDeriv_u(self, tInd, src, dun_dm_v, adjoint=False):
if adjoint:
return self._jDeriv_u(
tInd,
src,
self._MfRho.T * (self.simulation.MfI.T * dun_dm_v),
adjoint=True,
)
return self.simulation.MfI * (self._MfRho * self._jDeriv_u(tInd, src, dun_dm_v))
def _eDeriv_m(self, tInd, src, v, adjoint=False):
j = mkvc(self[src, "j", tInd])
if adjoint is True:
return self._MfRhoDeriv(
j, self.simulation.MfI.T * v, adjoint
) + self._jDeriv_m(tInd, src, self._MfRho * v)
return self.simulation.MfI * (
self._MfRhoDeriv(j, v) + self._MfRho * self._jDeriv_m(tInd, src, v)
)
def _charge(self, hSolution, source_list, tInd):
vol = sdiag(self.simulation.mesh.vol)
return (
epsilon_0
* vol
* (self.simulation.mesh.faceDiv * self._e(hSolution, source_list, tInd))
)
class Fields3DCurrentDensity(FieldsTDEM):
"""Fancy Field Storage for a TDEM simulation."""
knownFields = {"jSolution": "F"}
aliasFields = {
"dhdt": ["jSolution", "E", "_dhdt"],
"dbdt": ["jSolution", "E", "_dbdt"],
"j": ["jSolution", "F", "_j"],
"e": ["jSolution", "F", "_e"],
"charge": ["jSolution", "CC", "_charge"],
"charge_density": ["jSolution", "CC", "_charge_density"],
}
def startup(self):
self._times = self.simulation.times
self._edgeCurl = self.simulation.mesh.edgeCurl
self._MeMuI = self.simulation.MeMuI
self._MfRho = self.simulation.MfRho
self._MfRhoDeriv = self.simulation.MfRhoDeriv
def _TLoc(self, fieldType):
# if fieldType in ['h', 'j']:
return "N"
def _j(self, jSolution, source_list, tInd):
return jSolution
def _jDeriv_u(self, tInd, src, dun_dm_v, adjoint=False):
return dun_dm_v
def _jDeriv_m(self, tInd, src, v, adjoint=False):
return Zero()
def _h(self, jSolution, source_list, tInd):
raise NotImplementedError(
"Please use Simulation3DMagneticField to get h-fields"
)
def _dhdt(self, jSolution, source_list, tInd):
C = self._edgeCurl
MfRho = self._MfRho
MeMuI = self._MeMuI
dhdt = -MeMuI * (C.T * (MfRho * jSolution))
for i, src in enumerate(source_list):
s_m = src.s_m(self.simulation, self.simulation.times[tInd])
dhdt[:, i] = MeMuI * s_m + dhdt[:, i]
return dhdt
def _dhdtDeriv_u(self, tInd, src, dun_dm_v, adjoint=False):
C = self._edgeCurl
MfRho = self._MfRho
MeMuI = self._MeMuI
if adjoint is True:
return -MfRho.T * (C * (MeMuI.T * dun_dm_v))
return -MeMuI * (C.T * (MfRho * dun_dm_v))
def _dhdtDeriv_m(self, tInd, src, v, adjoint=False):
jSolution = self[[src], "jSolution", tInd].flatten()
C = self._edgeCurl
MeMuI = self._MeMuI
if adjoint is True:
return -self._MfRhoDeriv(jSolution, C * (MeMuI * v), adjoint)
return -MeMuI * (C.T * (self._MfRhoDeriv(jSolution, v)))
def _e(self, jSolution, source_list, tInd):
return self.simulation.MfI * (
self._MfRho * self._j(jSolution, source_list, tInd)
)
def _eDeriv_u(self, tInd, src, dun_dm_v, adjoint=False):
if adjoint is True:
return self._MfRho.T * (self.simulation.MfI.T * dun_dm_v)
return self.simulation.MfI * (self._MfRho * dun_dm_v)
def _eDeriv_m(self, tInd, src, v, adjoint=False):
jSolution = mkvc(self[src, "jSolution", tInd])
if adjoint:
return self._MfRhoDeriv(jSolution, self.simulation.MfI.T * v, adjoint)
return self.simulation.MfI * self._MfRhoDeriv(jSolution, v)
def _charge(self, jSolution, source_list, tInd):
vol = sdiag(self.simulation.mesh.vol)
return vol * self._charge_density(jSolution, source_list, tInd)
def _charge_density(self, jSolution, source_list, tInd):
return epsilon_0 * (
self.simulation.mesh.faceDiv * self._e(jSolution, source_list, tInd)
)
def _dbdt(self, jSolution, source_list, tInd):
dhdt = mkvc(self._dhdt(jSolution, source_list, tInd))
return self.simulation.MeI * (self.simulation.MeMu * dhdt)
def _dbdtDeriv_u(self, tInd, src, dun_dm_v, adjoint=False):
# dhdt = mkvc(self[src, 'dhdt', tInd])
if adjoint:
return self._dhdtDeriv_u(
tInd,
src,
self.simulation.MeMu.T * (self.simulation.MeI.T * dun_dm_v),
adjoint,
)
return self.simulation.MeI * (
self.simulation.MeMu * self._dhdtDeriv_u(tInd, src, dun_dm_v)
)
def _dbdtDeriv_m(self, tInd, src, v, adjoint=False):
if adjoint:
return self._dhdtDeriv_m(
tInd, src, self.simulation.MeMu.T * (self.simulation.MeI.T * v), adjoint
)
return self.simulation.MeI * (
self.simulation.MeMu * self._dhdtDeriv_m(tInd, src, v)
)
############
# Deprecated
############
@deprecate_class(removal_version="0.16.0", error=True)
class Fields_Derivs_eb(FieldsDerivativesEB):
pass
@deprecate_class(removal_version="0.16.0", error=True)
class Fields_Derivs_hj(FieldsDerivativesHJ):
pass
@deprecate_class(removal_version="0.16.0", error=True)
class Fields3D_b(Fields3DMagneticFluxDensity):
pass
@deprecate_class(removal_version="0.16.0", error=True)
class Fields3D_e(Fields3DElectricField):
pass
@deprecate_class(removal_version="0.16.0", error=True)
class Fields3D_h(Fields3DMagneticField):
pass
@deprecate_class(removal_version="0.16.0", error=True)
class Fields3D_j(Fields3DCurrentDensity):
pass
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# The MIT License (MIT)
#
# Copyright (c) 2015 Tintri, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import json
import sys
import tintri_1_1 as tintri
from prettytable import PrettyTable
"""
This Python script prints server information.
Command usage: get_server_info <server_name> <userName> <password>
Replaces appliance_info.py
"""
# For exhaustive messages on console, make it to True; otherwise keep it False
debug_mode = False
def print_with_prefix(prefix, out):
print(prefix + out)
return
def print_debug(out):
if debug_mode:
print_with_prefix("[DEBUG] : ", out)
return
def print_info(out):
print_with_prefix("[INFO] : ", out)
return
def print_error(out):
print_with_prefix("[ERROR] : ", out)
return
# main
if len(sys.argv) < 4:
print("\nPrints server information\n")
print("Usage: " + sys.argv[0] + " server_name user_name password\n")
sys.exit(-1)
server_name = sys.argv[1]
user_name = sys.argv[2]
password = sys.argv[3]
# Get the product name
try:
r = tintri.api_version(server_name)
json_info = r.json()
product_name = json_info['productName']
if json_info['productName'] != "Tintri VMstore":
raise tintri.TintriRequestException("server needs to be a VMstore.")
# Login to Tintri server
session_id = tintri.api_login(server_name, user_name, password)
except tintri.TintriRequestsException as tre:
print_error(tre.__str__())
exit(-2)
except tintri.TintriApiException as tae:
print_error(tae.__str__())
exit(-3)
try:
print("\nLoading transporter buffers\n")
# Get appliance info
url = "/v310/appliance"
r = tintri.api_get(server_name, url, session_id)
print_debug("The JSON response of the get invoke to the server " +
server_name + " is: " + r.text)
appliance_resp = r.json()
appliance = appliance_resp[0]
# Get failed Components for the appliance
url = "/v310/appliance/default/failedComponents"
r = tintri.api_get(server_name, url, session_id)
print_debug("The JSON response of the get invoke to the server " +
server_name + " is: " + r.text)
failed_components_resp = r.json()
failed_components = failed_components_resp['failedComponents']
except tintri.TintriRequestsException as tre:
print_error(tre.__str__())
tintri.api_logout(server_name, session_id)
exit(-10)
except tintri.TintriApiException as tae:
print_error(tae.__str__())
tintri.api_logout(server_name, session_id)
exit(-11)
# log out
tintri.api_logout(server_name, session_id)
# Some basic info
all_flash = False
show_all_flash = False
appliance_info = appliance['info']
if 'isAllFlash' in appliance_info:
all_flash = appliance_info['isAllFlash']
show_all_flash = True
print("Appliance")
table_header = ('Info', 'Value')
table = PrettyTable(table_header)
table.align['Info'] = "l"
table.align['Value'] = "l"
row = ('Product', product_name)
table.add_row(row)
row = ('Model', appliance_info['modelName'])
table.add_row(row)
if show_all_flash:
row = ('All Flash', all_flash)
table.add_row(row)
long_os_version = appliance_info['osVersion']
dash_x = long_os_version.index("-")
os_version = long_os_version[0:dash_x]
row = ('OS version', os_version)
table.add_row(row)
row = ('API version', json_info['preferredVersion'])
table.add_row(row)
print(table)
print("")
# Appliance Component info
print("Appliance Componets")
table_header = ('Component', 'Status', 'Location')
table = PrettyTable(table_header)
table.align['Component'] = "l"
table.align['Status'] = "l"
table.align['Location'] = "l"
components = appliance['components']
for component in components:
row = (component['type'], component['status'], component['locator'])
table.add_row(row)
print(table)
print("")
# Show failed components
if (len(failed_components) == 0):
print("No failed components")
else:
print("Failed Components")
table_header = ('Component', 'Serial #', 'Description')
table = PrettyTable(table_header)
table.align['Component'] = "l"
table.align['Serial #'] = "l"
table.align['Description'] = "l"
for component in failed_components:
row = (component['componentType'], component['serialNumber'], component['description'])
table.add_row(row)
print(table)
print("")
# Show the configured IP address information
table_header = ('IP', 'Service Type', 'Network Bond', 'VLAN ID')
table = PrettyTable(table_header)
table.align['IP'] = 'l'
ip_configs = appliance['configIps']
for ip_config in ip_configs:
row = (ip_config['ip'], ip_config['serviceType'], ip_config['networkBond'], ip_config['vlanId'])
table.add_row(row)
print(table)
print ""
# Now show each controller information
table_header = ('Component', 'Location', 'Status')
nb_table_hdr = ('Port', 'Port Status', 'Role', 'Speed')
# Pull the controller information
controllers = appliance['controllers']
for controller in controllers:
print(controller['locator'] + ": " + controller['state'] + " - " + controller['role'])
table = PrettyTable(table_header)
table.align['Component'] = "l"
table.align['Location'] = "l"
table.align['Status'] = "l"
components = controller['components']
for component in components:
row = (component['type'], component['locator'], component['status'])
table.add_row(row)
print(table)
print("")
# Add network information
network_bonds = controller['networkBonds']
for nb in network_bonds:
print(controller['locator'] + ": " + nb['name'] + ": " + nb['type'] + ": " + nb['status'] + ": " + nb['macAddress'])
table = PrettyTable(nb_table_hdr)
for port in nb['ports']:
port_speed = str(port['maxSpeed']) + port['maxUnit']
nb_row = (port['locator'], port['status'], port['role'], port_speed)
table.add_row(nb_row)
print(table)
print("")
# Disks
if (not 'disks' in appliance):
print("No disk information present")
sys.exit(0)
print("Disks")
table_header = ('Name', 'Status', 'Type')
table = PrettyTable(table_header)
table.align['Name'] = "l"
table.align['Status'] = "l"
table.align['Type'] = "l"
disks = appliance['disks']
for disk in disks:
if (disk['state'] == "DISK_STATE_REBUILD"):
disk_state = disk['state'] + " (" + str(disk['rebuildPercent']) + "%)"
else:
disk_state = disk['state']
if 'diskType' in disk:
row = (disk['locator'], disk_state, disk['diskType'])
else:
row = (disk['locator'], disk_state, disk['type'])
table.add_row(row)
print(table)
print("")
| |
import io
import itertools
import requests
import os
import sys
from copy import deepcopy
from collections import defaultdict
from lxml import etree
from cf_units import Unit
from netCDF4 import Dimension, Variable
from pkgutil import get_data
from pkg_resources import resource_filename
# copied from paegan
# paegan may depend on these later
_possiblet = {"time", "TIME", "Time",
"t", "T",
"ocean_time", "OCEAN_TIME",
"jd", "JD",
"dn", "DN",
"times", "TIMES", "Times",
"mt", "MT",
"dt", "DT",
}
_possiblez = {"depth", "DEPTH",
"depths", "DEPTHS",
"height", "HEIGHT",
"altitude", "ALTITUDE",
"alt", "ALT",
"Alt", "Altitude",
"h", "H",
"s_rho", "S_RHO",
"s_w", "S_W",
"z", "Z",
"siglay", "SIGLAY",
"siglev", "SIGLEV",
"sigma", "SIGMA",
"vertical", "VERTICAL", "lev", "LEV", "level", "LEVEL"
}
_possiblex = {"x", "X",
"lon", "LON",
"xlon", "XLON",
"lonx", "lonx",
"lon_u", "LON_U",
"lon_v", "LON_V",
"lonc", "LONC",
"Lon", "Longitude",
"longitude", "LONGITUDE",
"lon_rho", "LON_RHO",
"lon_psi", "LON_PSI",
}
_possibley = {"y", "Y",
"lat", "LAT",
"ylat", "YLAT",
"laty", "laty",
"lat_u", "LAT_U",
"lat_v", "LAT_V",
"latc", "LATC",
"Lat", "Latitude",
"latitude", "LATITUDE",
"lat_rho", "LAT_RHO",
"lat_psi", "LAT_PSI",
}
_possibleaxis = _possiblet | _possiblez | _possiblex | _possibley
_possiblexunits = {'degrees_east',
'degree_east',
'degrees_E',
'degree_E',
'degreesE',
'degreeE'
}
_possibleyunits = {'degrees_north',
'degree_north',
'degrees_N',
'degree_N',
'degreesN',
'degreeN'
}
_possibletunits = {'day',
'days',
'd',
'hour',
'hours',
'hr',
'hrs',
'h',
'year',
'years',
'minute',
'minutes',
'm',
'min',
'mins',
'second',
'seconds',
's',
'sec',
'secs'
}
_possibleaxisunits = _possiblexunits | _possibleyunits | _possibletunits
class DotDict(dict):
"""
Subclass of dict that will recursively look up attributes with dot notation.
This is primarily for working with JSON-style data in a cleaner way like javascript.
Note that this will instantiate a number of child DotDicts when you first access attributes;
do not use in performance-critical parts of your code.
"""
def __dir__(self):
return list(self.__dict__.keys()) + list(self.keys())
def __getattr__(self, key):
""" Make attempts to lookup by nonexistent attributes also attempt key lookups. """
if key in self:
return self[key]
import sys
import dis
frame = sys._getframe(1)
if '\x00%c' % dis.opmap['STORE_ATTR'] in frame.f_code.co_code:
self[key] = DotDict()
return self[key]
raise AttributeError(key)
def __setattr__(self, key, value):
if key in dir(dict):
raise AttributeError('%s conflicts with builtin.' % key)
if isinstance(value, dict):
self[key] = DotDict(value)
else:
self[key] = value
def copy(self):
return deepcopy(self)
def get_safe(self, qual_key, default=None):
"""
@brief Returns value of qualified key, such as "system.name" or None if not exists.
If default is given, returns the default. No exception thrown.
"""
value = get_safe(self, qual_key)
if value is None:
value = default
return value
@classmethod
def fromkeys(cls, seq, value=None):
return DotDict(dict.fromkeys(seq, value))
def get_safe(dict_instance, keypath, default=None):
"""
Returns a value with in a nested dict structure from a dot separated
path expression such as "system.server.host" or a list of key entries
@retval Value if found or None
"""
try:
obj = dict_instance
keylist = keypath if type(keypath) is list else keypath.split('.')
for key in keylist:
obj = obj[key]
return obj
except Exception:
return default
class NCGraph(object):
def __init__(self, ds, name, nc_object, self_reference_variables, reference_map=None):
self.ds = ds
self.name = name
self.coords = DotDict()
self.dims = DotDict()
self.grid_mapping = DotDict()
self.obj = nc_object
self.reference_variables = self_reference_variables
self.reference_map = reference_map or {}
self.reference_map[name] = self
if isinstance(nc_object, Dimension):
self._type = 'dim'
elif isinstance(nc_object, Variable):
self._type = 'var'
self.get_references()
else:
raise TypeError("unknown type %s" % repr(type(nc_object)))
def get_references(self):
for dim in self.obj.dimensions:
self.dims[dim] = self.get_dimension(dim)
if hasattr(self.obj, 'coordinates'):
coords = self.obj.coordinates.split(' ')
for coord in coords:
self.coords[coord] = self.get_coordinate(coord)
if hasattr(self.obj, 'grid_mapping'):
gm = self.obj.grid_mapping
self.grid_mapping[gm] = self.get_grid_mapping(gm)
def get_dimension(self, dim):
if dim in self.reference_map:
return self.reference_map[dim]
return NCGraph(self.ds, dim, self.ds.dimensions[dim], self.reference_variables, self.reference_map)
def get_coordinate(self, coord):
if coord not in self.ds.variables:
return
if coord in self.reference_map:
if self.name == coord:
self.reference_variables.add(self.name)
return self.reference_map[coord]
return NCGraph(self.ds, coord, self.ds.variables[coord], self.reference_variables, self.reference_map)
def get_grid_mapping(self, gm):
if gm not in self.ds.variables:
return
if gm in self.reference_map:
return self.reference_map[gm]
return NCGraph(self.ds, gm, self.ds.variables[gm], self.reference_variables, self.reference_map)
def __getattr__(self, key):
if key in self.__dict__:
return self.__dict__[key]
return getattr(self.obj, key)
class StandardNameTable(object):
class NameEntry(object):
def __init__(self, entrynode):
self.canonical_units = self._get(entrynode, 'canonical_units', True)
self.grib = self._get(entrynode, 'grib')
self.amip = self._get(entrynode, 'amip')
self.description = self._get(entrynode, 'description')
def _get(self, entrynode, attrname, required=False):
vals = entrynode.xpath(attrname)
if len(vals) > 1:
raise Exception("Multiple attrs (%s) found" % attrname)
elif required and len(vals) == 0:
raise Exception("Required attr (%s) not found" % attrname)
return vals[0].text
def __init__(self, cached_location=None):
if cached_location:
with io.open(cached_location, 'r', encoding='utf-8') as fp:
resource_text = fp.read()
elif os.environ.get('CF_STANDARD_NAME_TABLE') and os.path.exists(os.environ['CF_STANDARD_NAME_TABLE']):
with io.open(os.environ['CF_STANDARD_NAME_TABLE'], 'r', encoding='utf-8') as fp:
resource_text = fp.read()
else:
resource_text = get_data("compliance_checker", "data/cf-standard-name-table.xml")
parser = etree.XMLParser(remove_blank_text=True)
self._root = etree.fromstring(resource_text, parser)
# generate and save a list of all standard names in file
self._names = [node.get('id') for node in self._root.iter('entry')]
self._aliases = [node.get('id') for node in self._root.iter('alias')]
self._version = self._root.xpath('version_number')[0].text
def __len__(self):
return len(self._names) + len(self._aliases)
def __getitem__(self, key):
if not (key in self._names or key in self._aliases):
raise KeyError("%s not found in standard name table" % key)
if key in self._aliases:
idx = self._aliases.index(key)
entryids = self._root.xpath('alias')[idx].xpath('entry_id')
if len(entryids) != 1:
raise Exception("Inconsistency in standard name table, could not lookup alias for %s" % key)
key = entryids[0].text
if key not in self._names:
raise KeyError("%s not found in standard name table" % key)
idx = self._names.index(key)
entry = self.NameEntry(self._root.xpath('entry')[idx])
return entry
def get(self, key, default=None):
'''
Returns the item for the key or returns the default if it does not exist
'''
try:
return self[key]
except KeyError:
return default
def __contains__(self, key):
return key in self._names or key in self._aliases
def __iter__(self):
return iter(itertools.chain(self._names, self._aliases))
def download_cf_standard_name_table(version, location=None):
'''
Downloads the specified CF standard name table version and saves it to file
:param str version: CF standard name table version number (i.e 34)
:param str location: Path/filename to write downloaded xml file to
'''
if location is None: # This case occurs when updating the packaged version from command line
location = resource_filename('compliance_checker', 'data/cf-standard-name-table.xml')
url = "http://cfconventions.org/Data/cf-standard-names/{0}/src/cf-standard-name-table.xml".format(version)
r = requests.get(url, allow_redirects=True)
if r.status_code == 200:
print("Downloading cf-standard-names table version {0} from: {1}".format(version, url), file=sys.stderr)
with open(location, 'wb') as f:
f.write(r.content)
else:
r.raise_for_status()
return
def create_cached_data_dir():
'''
Returns the path to the data directory to download CF standard names.
Use $XDG_DATA_HOME.
'''
writable_directory = os.path.join(os.path.expanduser('~'), '.local', 'share')
data_directory = os.path.join(os.environ.get("XDG_DATA_HOME", writable_directory),
'compliance-checker')
if not os.path.isdir(data_directory):
os.makedirs(data_directory)
return data_directory
def units_known(units):
try:
Unit(units)
except ValueError:
return False
return True
def units_convertible(units1, units2, reftimeistime=True):
"""Return True if a Unit representing the string units1 can be converted
to a Unit representing the string units2, else False."""
try:
u1 = Unit(units1)
u2 = Unit(units2)
except ValueError:
return False
return u1.is_convertible(u2)
def units_temporal(units):
try:
u = Unit(units)
except ValueError:
return False
return u.is_time_reference()
def map_axes(dim_vars, reverse_map=False):
"""
axis name -> [dimension names]
dimension name -> [axis_name], length 0 if reverse_map
"""
ret_val = defaultdict(list)
axes = ['X', 'Y', 'Z', 'T']
for k, v in dim_vars.items():
axis = getattr(v, 'axis', '')
if not axis:
continue
axis = axis.upper()
if axis in axes:
if reverse_map:
ret_val[k].append(axis)
else:
ret_val[axis].append(k)
return dict(ret_val)
def find_coord_vars(ncds):
"""
Finds all coordinate variables in a dataset.
A variable with the same name as a dimension is called a coordinate variable.
"""
coord_vars = []
for d in ncds.dimensions:
if d in ncds.variables and ncds.variables[d].dimensions == (d,):
coord_vars.append(ncds.variables[d])
return coord_vars
def is_time_variable(varname, var):
"""
Identifies if a variable is represents time
"""
satisfied = varname.lower() == 'time'
satisfied |= getattr(var, 'standard_name', '') == 'time'
satisfied |= getattr(var, 'axis', '') == 'T'
satisfied |= units_convertible('seconds since 1900-01-01', getattr(var, 'units', ''))
return satisfied
def is_vertical_coordinate(var_name, var):
"""
Determines if a variable is a vertical coordinate variable
4.3
A vertical coordinate will be identifiable by: units of pressure; or the presence of the positive attribute with a
value of up or down (case insensitive). Optionally, the vertical type may be indicated additionally by providing
the standard_name attribute with an appropriate value, and/or the axis attribute with the value Z.
"""
# Known name
satisfied = var_name.lower() in _possiblez
satisfied |= getattr(var, 'standard_name', '') in _possiblez
# Is the axis set to Z?
satisfied |= getattr(var, 'axis', '').lower() == 'z'
is_pressure = units_convertible(getattr(var, 'units', '1'), 'dbar')
# Pressure defined or positive defined
satisfied |= is_pressure
if not is_pressure:
satisfied |= getattr(var, 'positive', '').lower() in ('up', 'down')
return satisfied
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for tensorflow.python.framework.meta_graph.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os.path
import random
import shutil
from tensorflow.core.framework import graph_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.training import queue_runner_impl
# pylint: disable=invalid-name
def _TestDir(test_name):
test_dir = os.path.join(test.get_temp_dir(), test_name)
if os.path.exists(test_dir):
shutil.rmtree(test_dir)
gfile.MakeDirs(test_dir)
return test_dir
# pylint: enable=invalid-name
class SimpleMetaGraphTest(test.TestCase):
def testNoVariables(self):
test_dir = _TestDir("no_variables")
filename = os.path.join(test_dir, "metafile")
input_feed_value = -10 # Arbitrary input value for feed_dict.
orig_graph = ops.Graph()
with self.test_session(graph=orig_graph) as sess:
# Create a minimal graph with zero variables.
input_tensor = array_ops.placeholder(
dtypes.float32, shape=[], name="input")
offset = constant_op.constant(42, dtype=dtypes.float32, name="offset")
output_tensor = math_ops.add(input_tensor, offset, name="add_offset")
# Add input and output tensors to graph collections.
ops.add_to_collection("input_tensor", input_tensor)
ops.add_to_collection("output_tensor", output_tensor)
output_value = sess.run(output_tensor, {input_tensor: input_feed_value})
self.assertEqual(output_value, 32)
# Generates MetaGraphDef.
meta_graph_def, var_list = meta_graph.export_scoped_meta_graph(
filename=filename,
graph_def=ops.get_default_graph().as_graph_def(add_shapes=True),
collection_list=["input_tensor", "output_tensor"],
saver_def=None)
self.assertTrue(meta_graph_def.HasField("meta_info_def"))
self.assertNotEqual(meta_graph_def.meta_info_def.tensorflow_version, "")
self.assertNotEqual(meta_graph_def.meta_info_def.tensorflow_git_version,
"")
self.assertEqual({}, var_list)
# Create a clean graph and import the MetaGraphDef nodes.
new_graph = ops.Graph()
with self.test_session(graph=new_graph) as sess:
# Import the previously export meta graph.
meta_graph.import_scoped_meta_graph(filename)
# Re-exports the current graph state for comparison to the original.
new_meta_graph_def, _ = meta_graph.export_scoped_meta_graph(filename +
"_new")
self.assertProtoEquals(meta_graph_def, new_meta_graph_def)
# Ensures that we can still get a reference to our graph collections.
new_input_tensor = ops.get_collection("input_tensor")[0]
new_output_tensor = ops.get_collection("output_tensor")[0]
# Verifies that the new graph computes the same result as the original.
new_output_value = sess.run(new_output_tensor,
{new_input_tensor: input_feed_value})
self.assertEqual(new_output_value, output_value)
def testStrippedOpListNestedFunctions(self):
with self.test_session():
# Square two levels deep
@function.Defun(dtypes.int32)
def f0(x):
return math_ops.square(x)
@function.Defun(dtypes.int32)
def f1(x):
return f0(x)
# At this point we've defined two functions but haven't called them, so
# there should be no used ops.
op_list = meta_graph.stripped_op_list_for_graph(ops.get_default_graph()
.as_graph_def())
self.assertEqual(len(op_list.op), 0)
# If we call the function on a constant, there should be two ops
_ = f1(constant_op.constant(7))
op_list = meta_graph.stripped_op_list_for_graph(ops.get_default_graph()
.as_graph_def())
self.assertEqual(["Const", "Square"], [op.name for op in op_list.op])
def testStrippedOpListRecursiveFunctions(self):
# The function module doesn't support recursive functions, so we build a
# recursive function situation by ourselves: A calls B calls A and Const.
graph = graph_pb2.GraphDef()
a = graph.library.function.add()
b = graph.library.function.add()
a.signature.name = "A"
b.signature.name = "B"
a.node_def.add().op = "B"
b.node_def.add().op = "Const"
b.node_def.add().op = "A"
# Use A in the graph
graph.node.add().op = "A"
# The stripped op list should contain just Const.
op_list = meta_graph.stripped_op_list_for_graph(graph)
self.assertEqual(["Const"], [op.name for op in op_list.op])
class ScopedMetaGraphTest(test.TestCase):
def _testScopedExport(self, test_dir, exported_filenames):
graph = ops.Graph()
with graph.as_default():
# Creates an inference graph.
# Hidden 1
colocate_constraint = constant_op.constant(1.2, name="constraint")
images = constant_op.constant(
1.2, dtypes.float32, shape=[100, 28], name="images")
with ops.name_scope("hidden1"):
with graph.colocate_with(colocate_constraint.op):
weights1 = variables.Variable(
random_ops.truncated_normal(
[28, 128], stddev=1.0 / math.sqrt(float(28))),
name="weights")
# The use of control_flow_ops.cond here is purely for adding test
# coverage the save and restore of control flow context (which doesn't
# make any sense here from a machine learning perspective). The typical
# biases is a simple Variable without the conditions.
biases1 = variables.Variable(
control_flow_ops.cond(
math_ops.less(random.random(), 0.5),
lambda: array_ops.ones([128]), lambda: array_ops.zeros([128])),
name="biases")
hidden1 = nn_ops.relu(math_ops.matmul(images, weights1) + biases1)
# Hidden 2
with ops.name_scope("hidden2"):
weights2 = variables.Variable(
random_ops.truncated_normal(
[128, 32], stddev=1.0 / math.sqrt(float(128))),
name="weights")
# The use of control_flow_ops.while_loop here is purely for adding test
# coverage the save and restore of control flow context (which doesn't
# make any sense here from a machine learning perspective). The typical
# biases is a simple Variable without the conditions.
def loop_cond(it, _):
return it < 2
def loop_body(it, biases2):
biases2 += constant_op.constant(0.1, shape=[32])
return it + 1, biases2
_, biases2 = control_flow_ops.while_loop(
loop_cond,
loop_body, [
constant_op.constant(0), variables.Variable(
array_ops.zeros([32]), name="biases")
])
hidden2 = nn_ops.relu(math_ops.matmul(hidden1, weights2) + biases2)
# Linear
with ops.name_scope("softmax_linear"):
weights3 = variables.Variable(
random_ops.truncated_normal(
[32, 10], stddev=1.0 / math.sqrt(float(32))),
name="weights")
biases3 = variables.Variable(array_ops.zeros([10]), name="biases")
logits = math_ops.matmul(hidden2, weights3) + biases3
ops.add_to_collection("logits", logits)
# Exports each sub-graph.
# Exports the first one with unbound_inputs_col_name set to default.
orig_meta_graph1, var_list = meta_graph.export_scoped_meta_graph(
filename=os.path.join(test_dir, exported_filenames[0]),
graph=ops.get_default_graph(),
export_scope="hidden1")
self.assertEqual(["biases:0", "weights:0"], sorted(var_list.keys()))
var_names = [v.name for _, v in var_list.items()]
self.assertEqual(["hidden1/biases:0", "hidden1/weights:0"],
sorted(var_names))
# Exports the rest with no unbound_inputs_col_name.
orig_meta_graph2, _ = meta_graph.export_scoped_meta_graph(
filename=os.path.join(test_dir, exported_filenames[1]),
graph=ops.get_default_graph(),
export_scope="hidden2",
unbound_inputs_col_name=None)
orig_meta_graph3, _ = meta_graph.export_scoped_meta_graph(
filename=os.path.join(test_dir, exported_filenames[2]),
graph=ops.get_default_graph(),
export_scope="softmax_linear",
unbound_inputs_col_name=None)
return [orig_meta_graph1, orig_meta_graph2, orig_meta_graph3]
def _testScopedImport(self, test_dir, exported_filenames):
graph = ops.Graph()
# Create all the missing inputs.
with graph.as_default():
new_image = constant_op.constant(
1.2, dtypes.float32, shape=[100, 28], name="images")
with self.assertRaisesRegexp(ValueError, "Graph contains unbound inputs"):
meta_graph.import_scoped_meta_graph(
os.path.join(test_dir, exported_filenames[0]),
graph=graph,
import_scope="new_hidden1")
with self.assertRaisesRegexp(ValueError, "Graph contains unbound inputs"):
meta_graph.import_scoped_meta_graph(
os.path.join(test_dir, exported_filenames[0]),
graph=graph,
input_map={"image:0": new_image},
import_scope="new_hidden1")
# Verifies we can import the original "hidden1" into "new_hidden1".
var_list = meta_graph.import_scoped_meta_graph(
os.path.join(test_dir, exported_filenames[0]),
graph=graph,
input_map={"$unbound_inputs_images": new_image},
import_scope="new_hidden1")
self.assertEqual(["biases:0", "weights:0"], sorted(var_list.keys()))
new_var_names = [v.name for _, v in var_list.items()]
self.assertEqual(["new_hidden1/biases:0", "new_hidden1/weights:0"],
sorted(new_var_names))
# Verifies we can import the original "hidden2" into "new_hidden2".
hidden1 = array_ops.identity(
graph.as_graph_element("new_hidden1/Relu:0"), name="hidden1/Relu")
var_list = meta_graph.import_scoped_meta_graph(
os.path.join(test_dir, exported_filenames[1]),
graph=graph,
input_map={"$unbound_inputs_hidden1/Relu": hidden1},
import_scope="new_hidden2",
unbound_inputs_col_name=None)
self.assertEqual(["biases:0", "weights:0"], sorted(var_list.keys()))
new_var_names = [v.name for _, v in var_list.items()]
self.assertEqual(["new_hidden2/biases:0", "new_hidden2/weights:0"],
sorted(new_var_names))
# Verifies we can import the original "softmax_linear" into
# "new_softmax_linear".
hidden2 = array_ops.identity(
graph.as_graph_element("new_hidden2/Relu:0"), name="hidden2/Relu")
var_list = meta_graph.import_scoped_meta_graph(
os.path.join(test_dir, exported_filenames[2]),
graph=graph,
input_map={"$unbound_inputs_hidden2/Relu": hidden2},
import_scope="new_softmax_linear",
unbound_inputs_col_name=None)
self.assertEqual(["biases:0", "weights:0"], sorted(var_list.keys()))
new_var_names = [v.name for _, v in var_list.items()]
self.assertEqual(
["new_softmax_linear/biases:0", "new_softmax_linear/weights:0"],
sorted(new_var_names))
# Exports the scoped meta graphs again.
new_meta_graph1, var_list = meta_graph.export_scoped_meta_graph(
graph=graph, export_scope="new_hidden1")
self.assertEqual(["biases:0", "weights:0"], sorted(var_list.keys()))
new_meta_graph2, var_list = meta_graph.export_scoped_meta_graph(
graph=graph, export_scope="new_hidden2", unbound_inputs_col_name=None)
self.assertEqual(["biases:0", "weights:0"], sorted(var_list.keys()))
new_meta_graph3, var_list = meta_graph.export_scoped_meta_graph(
graph=graph,
export_scope="new_softmax_linear",
unbound_inputs_col_name=None)
self.assertEqual(["biases:0", "weights:0"], sorted(var_list.keys()))
return [new_meta_graph1, new_meta_graph2, new_meta_graph3]
# Verifies that we can export the subgraph under each layer and import
# them into new layers in a new graph.
def testScopedExportAndImport(self):
test_dir = _TestDir("scoped_export_import")
filenames = [
"exported_hidden1.pbtxt", "exported_hidden2.pbtxt",
"exported_softmax_linear.pbtxt"
]
orig_meta_graphs = self._testScopedExport(test_dir, filenames)
new_meta_graphs = self._testScopedImport(test_dir, filenames)
# Delete the unbound_inputs to allow directly calling ProtoEqual.
del orig_meta_graphs[0].collection_def["unbound_inputs"]
del new_meta_graphs[0].collection_def["unbound_inputs"]
for a, b in zip(orig_meta_graphs, new_meta_graphs):
test_util.assert_meta_graph_protos_equal(self, a, b)
def testScopedImportUnderNameScope(self):
graph = ops.Graph()
with graph.as_default():
variables.Variable(initial_value=1.0, trainable=True, name="myvar")
meta_graph_def, _ = meta_graph.export_scoped_meta_graph(graph=graph)
graph = ops.Graph()
with graph.as_default():
with ops.name_scope("foo"):
imported_variables = meta_graph.import_scoped_meta_graph(
meta_graph_def, import_scope="bar")
self.assertEqual(len(imported_variables), 1)
self.assertEqual(list(imported_variables.values())[0].name,
"foo/bar/myvar:0")
def testScopedImportWithSelectedCollections(self):
meta_graph_filename = os.path.join(
_TestDir("selected_collections_import"), "meta_graph.pb")
graph = ops.Graph()
# Add a variable to populate two collections. The functionality tested is
# not specific to variables, but using variables in the test is convenient.
with graph.as_default():
variables.Variable(initial_value=1.0, trainable=True)
self.assertTrue(
all([
graph.get_collection(key)
for key in
[ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.TRAINABLE_VARIABLES]
]))
meta_graph.export_scoped_meta_graph(
filename=meta_graph_filename, graph=graph)
def _test_import(include_collection_keys, omit_collection_keys):
assert set(include_collection_keys).isdisjoint(omit_collection_keys)
newgraph = ops.Graph()
import_scope = "some_scope_name"
def _restore_collections_predicate(collection_key):
return (collection_key in include_collection_keys and
collection_key not in omit_collection_keys)
meta_graph.import_scoped_meta_graph(
meta_graph_filename,
graph=newgraph,
import_scope=import_scope,
restore_collections_predicate=_restore_collections_predicate)
collection_values = [
newgraph.get_collection(name=key, scope=import_scope)
for key in include_collection_keys
]
self.assertTrue(all(collection_values))
collection_values = [
newgraph.get_collection(name=key, scope=import_scope)
for key in omit_collection_keys
]
self.assertFalse(any(collection_values))
_test_import(
include_collection_keys=[
ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.TRAINABLE_VARIABLES
],
omit_collection_keys=[])
_test_import(
include_collection_keys=[ops.GraphKeys.GLOBAL_VARIABLES],
omit_collection_keys=[ops.GraphKeys.TRAINABLE_VARIABLES])
_test_import(
include_collection_keys=[ops.GraphKeys.TRAINABLE_VARIABLES],
omit_collection_keys=[ops.GraphKeys.GLOBAL_VARIABLES])
_test_import(
include_collection_keys=[],
omit_collection_keys=[
ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.TRAINABLE_VARIABLES
])
def _testScopedExportWithQueue(self, test_dir, exported_filename):
graph = ops.Graph()
with graph.as_default():
with ops.name_scope("queue1"):
input_queue = data_flow_ops.FIFOQueue(10, dtypes.float32)
enqueue = input_queue.enqueue((9876), name="enqueue")
close = input_queue.close(name="close")
qr = queue_runner_impl.QueueRunner(input_queue, [enqueue], close)
queue_runner_impl.add_queue_runner(qr)
input_queue.dequeue(name="dequeue")
orig_meta_graph, _ = meta_graph.export_scoped_meta_graph(
filename=os.path.join(test_dir, exported_filename),
graph=ops.get_default_graph(),
export_scope="queue1")
return orig_meta_graph
def _testScopedImportWithQueue(self, test_dir, exported_filename,
new_exported_filename):
graph = ops.Graph()
meta_graph.import_scoped_meta_graph(
os.path.join(test_dir, exported_filename),
graph=graph,
import_scope="new_queue1")
graph.as_graph_element("new_queue1/dequeue:0")
graph.as_graph_element("new_queue1/close")
with graph.as_default():
new_meta_graph, _ = meta_graph.export_scoped_meta_graph(
filename=os.path.join(test_dir, new_exported_filename),
graph=graph,
export_scope="new_queue1")
return new_meta_graph
# Verifies that we can export the subgraph containing a FIFOQueue under
# "queue1" and import it into "new_queue1" in a new graph.
def testScopedWithQueue(self):
test_dir = _TestDir("scoped_with_queue")
orig_meta_graph = self._testScopedExportWithQueue(test_dir,
"exported_queue1.pbtxt")
new_meta_graph = self._testScopedImportWithQueue(
test_dir, "exported_queue1.pbtxt", "exported_new_queue1.pbtxt")
self.assertProtoEquals(orig_meta_graph, new_meta_graph)
# Verifies that we can export a subgraph in a nested name scope containing a
# "hidden1/hidden2" and import it into "new_hidden1/new_hidden2" in a new
# graph.
def doTestExportNestedNames(self, use_resource=False):
graph1 = ops.Graph()
with graph1.as_default():
with ops.name_scope("hidden1/hidden2/hidden3"):
images = constant_op.constant(
1.0, dtypes.float32, shape=[3, 2], name="images")
if use_resource:
weights1 = variables.Variable(
[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], name="weights")
biases1 = resource_variable_ops.ResourceVariable(
[0.1] * 3, name="biases")
else:
biases1 = variables.Variable([0.1] * 3, name="biases")
weights1 = variables.Variable(
[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], name="weights")
nn_ops.relu(math_ops.matmul(images, weights1) + biases1, name="relu")
orig_meta_graph, var_list = meta_graph.export_scoped_meta_graph(
export_scope="hidden1/hidden2", graph=graph1)
var_names = [v.name for _, v in var_list.items()]
self.assertEqual(["hidden3/biases:0", "hidden3/weights:0"],
sorted(var_list.keys()))
self.assertEqual([
"hidden1/hidden2/hidden3/biases:0", "hidden1/hidden2/hidden3/weights:0"
], sorted(var_names))
for node in orig_meta_graph.graph_def.node:
self.assertTrue(node.name.startswith("hidden3"))
graph2 = ops.Graph()
new_var_list = meta_graph.import_scoped_meta_graph(
orig_meta_graph, import_scope="new_hidden1/new_hidden2", graph=graph2)
self.assertEqual(["hidden3/biases:0", "hidden3/weights:0"],
sorted(new_var_list.keys()))
new_var_names = [v.name for _, v in new_var_list.items()]
self.assertEqual([
"new_hidden1/new_hidden2/hidden3/biases:0",
"new_hidden1/new_hidden2/hidden3/weights:0"
], sorted(new_var_names))
nodes = [
"new_hidden1/new_hidden2/hidden3/biases/Assign",
"new_hidden1/new_hidden2/hidden3/weights/Assign"
]
expected = [
b"loc:@new_hidden1/new_hidden2/hidden3/biases",
b"loc:@new_hidden1/new_hidden2/hidden3/weights"
]
for n, e in zip(nodes, expected):
self.assertEqual([e], graph2.get_operation_by_name(n).get_attr("_class"))
def testExportNestedNames(self):
self.doTestExportNestedNames(use_resource=False)
def testExportNestedNamesResource(self):
self.doTestExportNestedNames(use_resource=True)
def testPotentialCycle(self):
graph1 = ops.Graph()
with graph1.as_default():
a = constant_op.constant(1.0, shape=[2, 2])
b = constant_op.constant(2.0, shape=[2, 2])
matmul = math_ops.matmul(a, b)
with ops.name_scope("hidden1"):
c = nn_ops.relu(matmul)
d = constant_op.constant(3.0, shape=[2, 2])
matmul = math_ops.matmul(c, d)
orig_meta_graph, _ = meta_graph.export_scoped_meta_graph(
export_scope="hidden1", graph=graph1)
graph2 = ops.Graph()
with graph2.as_default():
with self.assertRaisesRegexp(ValueError, "Graph contains unbound inputs"):
meta_graph.import_scoped_meta_graph(
orig_meta_graph, import_scope="new_hidden1")
meta_graph.import_scoped_meta_graph(
orig_meta_graph,
import_scope="new_hidden1",
input_map={
"$unbound_inputs_MatMul": constant_op.constant(
4.0, shape=[2, 2])
})
def testClearDevices(self):
graph1 = ops.Graph()
with graph1.as_default():
with ops.device("/device:CPU:0"):
a = variables.Variable(
constant_op.constant(
1.0, shape=[2, 2]), name="a")
with ops.device("/job:ps/replica:0/task:0/device:GPU:0"):
b = variables.Variable(
constant_op.constant(
2.0, shape=[2, 2]), name="b")
with ops.device("/job:localhost/replica:0/task:0/cpu:0"):
math_ops.matmul(a, b, name="matmul")
self.assertEqual("/device:CPU:0", str(graph1.as_graph_element("a").device))
self.assertEqual("/job:ps/replica:0/task:0/device:GPU:0",
str(graph1.as_graph_element("b").device))
self.assertEqual("/job:localhost/replica:0/task:0/device:CPU:0",
str(graph1.as_graph_element("matmul").device))
# Verifies that devices are cleared on export.
orig_meta_graph, _ = meta_graph.export_scoped_meta_graph(
graph=graph1, clear_devices=True)
graph2 = ops.Graph()
with graph2.as_default():
meta_graph.import_scoped_meta_graph(orig_meta_graph, clear_devices=False)
self.assertEqual("", str(graph2.as_graph_element("a").device))
self.assertEqual("", str(graph2.as_graph_element("b").device))
self.assertEqual("", str(graph2.as_graph_element("matmul").device))
# Verifies that devices are cleared on export when passing in graph_def.
orig_meta_graph, _ = meta_graph.export_scoped_meta_graph(
graph_def=graph1.as_graph_def(), clear_devices=True)
graph2 = ops.Graph()
with graph2.as_default():
meta_graph.import_scoped_meta_graph(orig_meta_graph, clear_devices=False)
self.assertEqual("", str(graph2.as_graph_element("a").device))
self.assertEqual("", str(graph2.as_graph_element("b").device))
self.assertEqual("", str(graph2.as_graph_element("matmul").device))
# Verifies that devices are cleared on import.
orig_meta_graph, _ = meta_graph.export_scoped_meta_graph(
graph=graph1, clear_devices=False)
graph2 = ops.Graph()
with graph2.as_default():
meta_graph.import_scoped_meta_graph(orig_meta_graph, clear_devices=True)
self.assertEqual("", str(graph2.as_graph_element("a").device))
self.assertEqual("", str(graph2.as_graph_element("b").device))
self.assertEqual("", str(graph2.as_graph_element("matmul").device))
class MetaGraphWithVariableScopeTest(test.TestCase):
def testMetricsCollection(self):
def _enqueue_vector(sess, queue, values, shape=None):
if not shape:
shape = (1, len(values))
dtype = queue.dtypes[0]
sess.run(
queue.enqueue(constant_op.constant(
values, dtype=dtype, shape=shape)))
meta_graph_filename = os.path.join(
_TestDir("metrics_export"), "meta_graph.pb")
graph = ops.Graph()
with self.test_session(graph=graph) as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
_, update_op = metrics.mean(values)
initializer = variables.local_variables_initializer()
sess.run(initializer)
sess.run(update_op)
meta_graph.export_scoped_meta_graph(
filename=meta_graph_filename, graph=graph)
# Verifies that importing a meta_graph with LOCAL_VARIABLES collection
# works correctly.
graph = ops.Graph()
with self.test_session(graph=graph) as sess:
meta_graph.import_scoped_meta_graph(meta_graph_filename)
initializer = variables.local_variables_initializer()
sess.run(initializer)
# Verifies that importing an old meta_graph where "local_variables"
# collection is of node_list type works, but cannot build initializer
# with the collection.
graph = ops.Graph()
with self.test_session(graph=graph) as sess:
meta_graph.import_scoped_meta_graph(
test.test_src_dir_path(
"python/framework/testdata/metrics_export_meta_graph.pb"))
self.assertEqual(len(ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES)),
2)
with self.assertRaisesRegexp(
AttributeError, "'Tensor' object has no attribute 'initializer'"):
initializer = variables.local_variables_initializer()
class ExportImportAcrossScopesTest(test.TestCase):
def testPartionedVariables(self):
def make_graph_with_partitioned_variables():
variable_scope.get_variable(
name="weights",
partitioner=partitioned_variables.fixed_size_partitioner(3, axis=0),
initializer=random_ops.truncated_normal([100, 10]))
self._testExportImportAcrossScopes(make_graph_with_partitioned_variables)
def _testExportImportAcrossScopes(self, graph_fn):
"""Tests export and importing a graph across scopes.
Args:
graph_fn: A closure that creates a graph on the current scope.
"""
with ops.Graph().as_default() as original_graph:
with variable_scope.variable_scope("dropA/dropB/keepA"):
graph_fn()
exported_meta_graph_def = meta_graph.export_scoped_meta_graph(
graph=original_graph,
export_scope="dropA/dropB")[0]
with ops.Graph().as_default() as imported_graph:
meta_graph.import_scoped_meta_graph(
exported_meta_graph_def,
import_scope="importA")
with ops.Graph().as_default() as expected_graph:
with variable_scope.variable_scope("importA/keepA"):
graph_fn()
result = meta_graph.export_scoped_meta_graph(graph=imported_graph)[0]
expected = meta_graph.export_scoped_meta_graph(graph=expected_graph)[0]
self.assertProtoEquals(expected, result)
if __name__ == "__main__":
test.main()
| |
"""Functions to plot ICA specific data (besides topographies)."""
# Authors: Denis Engemann <denis.engemann@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Teon Brooks <teon.brooks@gmail.com>
#
# License: Simplified BSD
from functools import partial
import numpy as np
from .utils import (tight_layout, _prepare_trellis, _select_bads,
_layout_figure, _plot_raw_onscroll, _mouse_click,
_helper_raw_resize, _plot_raw_onkey, plt_show)
from .topomap import (_prepare_topo_plot, plot_topomap, _hide_frame,
_plot_ica_topomap)
from .raw import _prepare_mne_browse_raw, _plot_raw_traces, _convert_psds
from .epochs import _prepare_mne_browse_epochs, plot_epochs_image
from .evoked import _butterfly_on_button_press, _butterfly_onpick
from ..utils import warn, _validate_type, fill_doc
from ..defaults import _handle_default
from ..io.meas_info import create_info
from ..io.pick import pick_types, _picks_to_idx, _DATA_CH_TYPES_ORDER_DEFAULT
from ..time_frequency.psd import psd_multitaper
from ..utils import _reject_data_segments
@fill_doc
def plot_ica_sources(ica, inst, picks=None, exclude=None, start=None,
stop=None, title=None, show=True, block=False,
show_first_samp=False):
"""Plot estimated latent sources given the unmixing matrix.
Typical usecases:
1. plot evolution of latent sources over time based on (Raw input)
2. plot latent source around event related time windows (Epochs input)
3. plot time-locking in ICA space (Evoked input)
Parameters
----------
ica : instance of mne.preprocessing.ICA
The ICA solution.
inst : instance of mne.io.Raw, mne.Epochs, mne.Evoked
The object to plot the sources from.
%(picks_base)s all sources in the order as fitted.
exclude : array-like of int
The components marked for exclusion. If None (default), ICA.exclude
will be used.
start : int
X-axis start index. If None, from the beginning.
stop : int
X-axis stop index. If None, next 20 are shown, in case of evoked to the
end.
title : str | None
The figure title. If None a default is provided.
show : bool
Show figure if True.
block : bool
Whether to halt program execution until the figure is closed.
Useful for interactive selection of components in raw and epoch
plotter. For evoked, this parameter has no effect. Defaults to False.
show_first_samp : bool
If True, show time axis relative to the ``raw.first_samp``.
Returns
-------
fig : instance of Figure
The figure.
Notes
-----
For raw and epoch instances, it is possible to select components for
exclusion by clicking on the line. The selected components are added to
``ica.exclude`` on close.
.. versionadded:: 0.10.0
"""
from ..io.base import BaseRaw
from ..evoked import Evoked
from ..epochs import BaseEpochs
if exclude is None:
exclude = ica.exclude
elif len(ica.exclude) > 0:
exclude = np.union1d(ica.exclude, exclude)
picks = _picks_to_idx(ica.n_components_, picks, 'all')
if isinstance(inst, BaseRaw):
fig = _plot_sources_raw(ica, inst, picks, exclude, start=start,
stop=stop, show=show, title=title,
block=block, show_first_samp=show_first_samp)
elif isinstance(inst, BaseEpochs):
fig = _plot_sources_epochs(ica, inst, picks, exclude, start=start,
stop=stop, show=show, title=title,
block=block)
elif isinstance(inst, Evoked):
if start is not None or stop is not None:
inst = inst.copy().crop(start, stop)
sources = ica.get_sources(inst)
fig = _plot_ica_sources_evoked(
evoked=sources, picks=picks, exclude=exclude, title=title,
labels=getattr(ica, 'labels_', None), show=show, ica=ica)
else:
raise ValueError('Data input must be of Raw or Epochs type')
return fig
def _create_properties_layout(figsize=None):
"""Create main figure and axes layout used by plot_ica_properties."""
import matplotlib.pyplot as plt
if figsize is None:
figsize = [7., 6.]
fig = plt.figure(figsize=figsize, facecolor=[0.95] * 3)
axes_params = (('topomap', [0.08, 0.5, 0.3, 0.45]),
('image', [0.5, 0.6, 0.45, 0.35]),
('erp', [0.5, 0.5, 0.45, 0.1]),
('spectrum', [0.08, 0.1, 0.32, 0.3]),
('variance', [0.5, 0.1, 0.45, 0.25]))
axes = [fig.add_axes(loc, label=name) for name, loc in axes_params]
return fig, axes
def _plot_ica_properties(pick, ica, inst, psds_mean, freqs, n_trials,
epoch_var, plot_lowpass_edge, epochs_src,
set_title_and_labels, plot_std, psd_ylabel,
spectrum_std, topomap_args, image_args, fig, axes,
kind, dropped_indices):
"""Plot ICA properties (helper)."""
from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable
from scipy.stats import gaussian_kde
topo_ax, image_ax, erp_ax, spec_ax, var_ax = axes
# plotting
# --------
# component topomap
_plot_ica_topomap(ica, pick, show=False, axes=topo_ax, **topomap_args)
# image and erp
# we create a new epoch with dropped rows
epoch_data = epochs_src.get_data()
epoch_data = np.insert(arr=epoch_data,
obj=(dropped_indices -
np.arange(len(dropped_indices))).astype(int),
values=0.0,
axis=0)
from ..epochs import EpochsArray
epochs_src = EpochsArray(epoch_data, epochs_src.info, verbose=0)
plot_epochs_image(epochs_src, picks=pick, axes=[image_ax, erp_ax],
combine=None, colorbar=False, show=False,
**image_args)
# spectrum
spec_ax.plot(freqs, psds_mean, color='k')
if plot_std:
spec_ax.fill_between(freqs, psds_mean - spectrum_std[0],
psds_mean + spectrum_std[1],
color='k', alpha=.2)
if plot_lowpass_edge:
spec_ax.axvline(inst.info['lowpass'], lw=2, linestyle='--',
color='k', alpha=0.2)
# epoch variance
var_ax_divider = make_axes_locatable(var_ax)
hist_ax = var_ax_divider.append_axes("right", size="33%", pad="2.5%")
var_ax.scatter(range(len(epoch_var)), epoch_var, alpha=0.5,
facecolor=[0, 0, 0], lw=0)
# rejected epochs in red
var_ax.scatter(dropped_indices, epoch_var[dropped_indices],
alpha=1., facecolor=[1, 0, 0], lw=0)
# compute percentage of dropped epochs
var_percent = float(len(dropped_indices)) / float(len(epoch_var)) * 100.
var_ax.set_yticks([])
# histogram & histogram
_, counts, _ = hist_ax.hist(epoch_var, orientation="horizontal",
color="k", alpha=.5)
# kde
kde = gaussian_kde(epoch_var)
ymin, ymax = hist_ax.get_ylim()
x = np.linspace(ymin, ymax, 50)
kde_ = kde(x)
kde_ /= kde_.max()
kde_ *= hist_ax.get_xlim()[-1] * .9
hist_ax.plot(kde_, x, color="k")
hist_ax.set_ylim(ymin, ymax)
# aesthetics
# ----------
topo_ax.set_title(ica._ica_names[pick])
set_title_and_labels(image_ax, kind + ' image and ERP/ERF', [], kind)
# erp
set_title_and_labels(erp_ax, [], 'Time (s)', 'AU\n')
erp_ax.spines["right"].set_color('k')
erp_ax.set_xlim(epochs_src.times[[0, -1]])
# remove half of yticks if more than 5
yt = erp_ax.get_yticks()
if len(yt) > 5:
erp_ax.yaxis.set_ticks(yt[::2])
# remove xticks - erp plot shows xticks for both image and erp plot
image_ax.xaxis.set_ticks([])
yt = image_ax.get_yticks()
image_ax.yaxis.set_ticks(yt[1:])
image_ax.set_ylim([-0.5, n_trials + 0.5])
# spectrum
set_title_and_labels(spec_ax, 'Spectrum', 'Frequency (Hz)', psd_ylabel)
spec_ax.yaxis.labelpad = 0
spec_ax.set_xlim(freqs[[0, -1]])
ylim = spec_ax.get_ylim()
air = np.diff(ylim)[0] * 0.1
spec_ax.set_ylim(ylim[0] - air, ylim[1] + air)
image_ax.axhline(0, color='k', linewidth=.5)
# epoch variance
var_ax_title = 'Dropped segments : %.2f %%' % var_percent
set_title_and_labels(var_ax, var_ax_title,
kind + ' (index)',
'Variance (AU)')
hist_ax.set_ylabel("")
hist_ax.set_yticks([])
set_title_and_labels(hist_ax, None, None, None)
return fig
def _get_psd_label_and_std(this_psd, dB, ica, num_std):
"""Handle setting up PSD for one component, for plot_ica_properties."""
psd_ylabel = _convert_psds(this_psd, dB, estimate='auto', scaling=1.,
unit='AU', ch_names=ica.ch_names)
psds_mean = this_psd.mean(axis=0)
diffs = this_psd - psds_mean
# the distribution of power for each frequency bin is highly
# skewed so we calculate std for values below and above average
# separately - this is used for fill_between shade
spectrum_std = [
[np.sqrt((d[d < 0] ** 2).mean(axis=0)) for d in diffs.T],
[np.sqrt((d[d > 0] ** 2).mean(axis=0)) for d in diffs.T]]
spectrum_std = np.array(spectrum_std) * num_std
return psd_ylabel, psds_mean, spectrum_std
@fill_doc
def plot_ica_properties(ica, inst, picks=None, axes=None, dB=True,
plot_std=True, topomap_args=None, image_args=None,
psd_args=None, figsize=None, show=True, reject='auto'):
"""Display component properties.
Properties include the topography, epochs image, ERP/ERF, power
spectrum, and epoch variance.
Parameters
----------
ica : instance of mne.preprocessing.ICA
The ICA solution.
inst: instance of Epochs or Raw
The data to use in plotting properties.
%(picks_base)s the first five sources.
If more than one components were chosen in the picks,
each one will be plotted in a separate figure.
axes: list of matplotlib axes | None
List of five matplotlib axes to use in plotting: [topomap_axis,
image_axis, erp_axis, spectrum_axis, variance_axis]. If None a new
figure with relevant axes is created. Defaults to None.
dB: bool
Whether to plot spectrum in dB. Defaults to True.
plot_std: bool | float
Whether to plot standard deviation in ERP/ERF and spectrum plots.
Defaults to True, which plots one standard deviation above/below.
If set to float allows to control how many standard deviations are
plotted. For example 2.5 will plot 2.5 standard deviation above/below.
topomap_args : dict | None
Dictionary of arguments to ``plot_topomap``. If None, doesn't pass any
additional arguments. Defaults to None.
image_args : dict | None
Dictionary of arguments to ``plot_epochs_image``. If None, doesn't pass
any additional arguments. Defaults to None.
psd_args : dict | None
Dictionary of arguments to ``psd_multitaper``. If None, doesn't pass
any additional arguments. Defaults to None.
figsize : array-like, shape (2,) | None
Allows to control size of the figure. If None, the figure size
defaults to [7., 6.].
show : bool
Show figure if True.
reject : 'auto' | dict | None
Allows to specify rejection parameters used to drop epochs
(or segments if continuous signal is passed as inst).
If None, no rejection is applied. The default is 'auto',
which applies the rejection parameters used when fitting
the ICA object.
Returns
-------
fig : list
List of matplotlib figures.
Notes
-----
.. versionadded:: 0.13
"""
from ..io.base import BaseRaw
from ..epochs import BaseEpochs
from ..preprocessing import ICA
from ..io import RawArray
# input checks and defaults
# -------------------------
_validate_type(inst, (BaseRaw, BaseEpochs), "inst", "Raw or Epochs")
_validate_type(ica, ICA, "ica", "ICA")
if isinstance(plot_std, bool):
num_std = 1. if plot_std else 0.
elif isinstance(plot_std, (float, int)):
num_std = plot_std
plot_std = True
else:
raise ValueError('plot_std has to be a bool, int or float, '
'got %s instead' % type(plot_std))
# if no picks given - plot the first 5 components
limit = min(5, ica.n_components_) if picks is None else len(ica.ch_names)
picks = _picks_to_idx(ica.info, picks, 'all')[:limit]
if axes is None:
fig, axes = _create_properties_layout(figsize=figsize)
else:
if len(picks) > 1:
raise ValueError('Only a single pick can be drawn '
'to a set of axes.')
from .utils import _validate_if_list_of_axes
_validate_if_list_of_axes(axes, obligatory_len=5)
fig = axes[0].get_figure()
psd_args = dict() if psd_args is None else psd_args
topomap_args = dict() if topomap_args is None else topomap_args
image_args = dict() if image_args is None else image_args
image_args["ts_args"] = dict(truncate_xaxis=False, show_sensors=False)
for item_name, item in (("psd_args", psd_args),
("topomap_args", topomap_args),
("image_args", image_args)):
_validate_type(item, dict, item_name, "dictionary")
if dB is not None:
_validate_type(dB, bool, "dB", "bool")
# calculations
# ------------
if isinstance(inst, BaseRaw):
# when auto, delegate reject to the ica
if reject == 'auto':
reject = getattr(ica, 'reject_', None)
else:
pass
if reject is None:
inst_rejected = inst
drop_inds = None
else:
data = inst.get_data()
data, drop_inds = _reject_data_segments(data, ica.reject_,
flat=None, decim=None,
info=inst.info,
tstep=2.0)
inst_rejected = RawArray(data, inst.info)
# break up continuous signal into segments
from ..epochs import _segment_raw
inst_rejected = _segment_raw(inst_rejected,
segment_length=2.,
verbose=False,
preload=True)
inst = _segment_raw(inst, segment_length=2., verbose=False,
preload=True)
kind = "Segment"
else:
drop_inds = None
inst_rejected = inst
kind = "Epochs"
epochs_src = ica.get_sources(inst_rejected)
data = epochs_src.get_data()
ica_data = np.swapaxes(data[:, picks, :], 0, 1)
# getting dropped epochs indexes
if drop_inds is not None:
dropped_indices = [(d[0] // len(inst.times)) + 1
for d in drop_inds]
else:
dropped_indices = []
# getting ica sources from inst
dropped_src = ica.get_sources(inst).get_data()
dropped_src = np.swapaxes(dropped_src[:, picks, :], 0, 1)
# spectrum
Nyquist = inst.info['sfreq'] / 2.
lp = inst.info['lowpass']
if 'fmax' not in psd_args:
psd_args['fmax'] = min(lp * 1.25, Nyquist)
plot_lowpass_edge = lp < Nyquist and (psd_args['fmax'] > lp)
psds, freqs = psd_multitaper(epochs_src, picks=picks, **psd_args)
def set_title_and_labels(ax, title, xlab, ylab):
if title:
ax.set_title(title)
if xlab:
ax.set_xlabel(xlab)
if ylab:
ax.set_ylabel(ylab)
ax.axis('auto')
ax.tick_params('both', labelsize=8)
ax.axis('tight')
# plot
# ----
all_fig = list()
for idx, pick in enumerate(picks):
# calculate component-specific spectrum stuff
psd_ylabel, psds_mean, spectrum_std = _get_psd_label_and_std(
psds[:, idx, :].copy(), dB, ica, num_std)
# if more than one component, spawn additional figures and axes
if idx > 0:
fig, axes = _create_properties_layout(figsize=figsize)
# we reconstruct an epoch_variance with 0 where indexes where dropped
epoch_var = np.var(ica_data[idx], axis=1)
drop_var = np.var(dropped_src[idx], axis=1)
drop_indices_corrected = \
(dropped_indices -
np.arange(len(dropped_indices))).astype(int)
epoch_var = np.insert(arr=epoch_var,
obj=drop_indices_corrected,
values=drop_var[dropped_indices],
axis=0)
# the actual plot
fig = _plot_ica_properties(
pick, ica, inst, psds_mean, freqs, ica_data.shape[1],
epoch_var, plot_lowpass_edge,
epochs_src, set_title_and_labels, plot_std, psd_ylabel,
spectrum_std, topomap_args, image_args, fig, axes, kind,
dropped_indices)
all_fig.append(fig)
plt_show(show)
return all_fig
def _plot_ica_sources_evoked(evoked, picks, exclude, title, show, ica,
labels=None):
"""Plot average over epochs in ICA space.
Parameters
----------
evoked : instance of mne.Evoked
The Evoked to be used.
%(picks_base)s all sources in the order as fitted.
exclude : array-like of int
The components marked for exclusion. If None (default), ICA.exclude
will be used.
title : str
The figure title.
show : bool
Show figure if True.
labels : None | dict
The ICA labels attribute.
"""
import matplotlib.pyplot as plt
from matplotlib import patheffects
if title is None:
title = 'Reconstructed latent sources, time-locked'
fig, axes = plt.subplots(1)
ax = axes
axes = [axes]
times = evoked.times * 1e3
# plot unclassified sources and label excluded ones
lines = list()
texts = list()
picks = np.sort(picks)
idxs = [picks]
if labels is not None:
labels_used = [k for k in labels if '/' not in k]
exclude_labels = list()
for ii in picks:
if ii in exclude:
line_label = ica._ica_names[ii]
if labels is not None:
annot = list()
for this_label in labels_used:
indices = labels[this_label]
if ii in indices:
annot.append(this_label)
line_label += (' - ' + ', '.join(annot))
exclude_labels.append(line_label)
else:
exclude_labels.append(None)
if labels is not None:
# compute colors only based on label categories
unique_labels = {k.split(' - ')[1] for k in exclude_labels if k}
label_colors = plt.cm.rainbow(np.linspace(0, 1, len(unique_labels)))
label_colors = dict(zip(unique_labels, label_colors))
else:
label_colors = {k: 'red' for k in exclude_labels}
for exc_label, ii in zip(exclude_labels, picks):
if exc_label is not None:
# create look up for color ...
if ' - ' in exc_label:
key = exc_label.split(' - ')[1]
else:
key = exc_label
color = label_colors[key]
# ... but display component number too
lines.extend(ax.plot(times, evoked.data[ii].T, picker=3.,
zorder=2, color=color, label=exc_label))
else:
lines.extend(ax.plot(times, evoked.data[ii].T, picker=3.,
color='k', zorder=1))
ax.set(title=title, xlim=times[[0, -1]], xlabel='Time (ms)', ylabel='(NA)')
if len(exclude) > 0:
plt.legend(loc='best')
tight_layout(fig=fig)
# for old matplotlib, we actually need this to have a bounding
# box (!), so we have to put some valid text here, change
# alpha and path effects later
texts.append(ax.text(0, 0, 'blank', zorder=3,
verticalalignment='baseline',
horizontalalignment='left',
fontweight='bold', alpha=0))
# this is done to give the structure of a list of lists of a group of lines
# in each subplot
lines = [lines]
ch_names = evoked.ch_names
path_effects = [patheffects.withStroke(linewidth=2, foreground="w",
alpha=0.75)]
params = dict(axes=axes, texts=texts, lines=lines, idxs=idxs,
ch_names=ch_names, need_draw=False,
path_effects=path_effects)
fig.canvas.mpl_connect('pick_event',
partial(_butterfly_onpick, params=params))
fig.canvas.mpl_connect('button_press_event',
partial(_butterfly_on_button_press,
params=params))
plt_show(show)
return fig
def plot_ica_scores(ica, scores, exclude=None, labels=None, axhline=None,
title='ICA component scores', figsize=None, show=True):
"""Plot scores related to detected components.
Use this function to asses how well your score describes outlier
sources and how well you were detecting them.
Parameters
----------
ica : instance of mne.preprocessing.ICA
The ICA object.
scores : array-like of float, shape (n_ica_components,) | list of array
Scores based on arbitrary metric to characterize ICA components.
exclude : array-like of int
The components marked for exclusion. If None (default), ICA.exclude
will be used.
labels : str | list | 'ecg' | 'eog' | None
The labels to consider for the axes tests. Defaults to None.
If list, should match the outer shape of `scores`.
If 'ecg' or 'eog', the ``labels_`` attributes will be looked up.
Note that '/' is used internally for sublabels specifying ECG and
EOG channels.
axhline : float
Draw horizontal line to e.g. visualize rejection threshold.
title : str
The figure title.
figsize : tuple of int | None
The figure size. If None it gets set automatically.
show : bool
Show figure if True.
Returns
-------
fig : instance of Figure
The figure object
"""
import matplotlib.pyplot as plt
my_range = np.arange(ica.n_components_)
if exclude is None:
exclude = ica.exclude
exclude = np.unique(exclude)
if not isinstance(scores[0], (list, np.ndarray)):
scores = [scores]
n_rows = len(scores)
if figsize is None:
figsize = (6.4, 2.7 * n_rows)
fig, axes = plt.subplots(n_rows, figsize=figsize, sharex=True, sharey=True)
if isinstance(axes, np.ndarray):
axes = axes.flatten()
else:
axes = [axes]
axes[0].set_title(title)
if labels == 'ecg':
labels = [l for l in ica.labels_ if l.startswith('ecg/')]
elif labels == 'eog':
labels = [l for l in ica.labels_ if l.startswith('eog/')]
labels.sort(key=lambda l: l.split('/')[1]) # sort by index
elif isinstance(labels, str):
if len(axes) > 1:
raise ValueError('Need as many labels as axes (%i)' % len(axes))
labels = [labels]
elif isinstance(labels, (tuple, list)):
if len(labels) != len(axes):
raise ValueError('Need as many labels as axes (%i)' % len(axes))
elif labels is None:
labels = (None,) * n_rows
for label, this_scores, ax in zip(labels, scores, axes):
if len(my_range) != len(this_scores):
raise ValueError('The length of `scores` must equal the '
'number of ICA components.')
ax.bar(my_range, this_scores, color='gray', edgecolor='k')
for excl in exclude:
ax.bar(my_range[excl], this_scores[excl], color='r', edgecolor='k')
if axhline is not None:
if np.isscalar(axhline):
axhline = [axhline]
for axl in axhline:
ax.axhline(axl, color='r', linestyle='--')
ax.set_ylabel('score')
if label is not None:
if 'eog/' in label:
split = label.split('/')
label = ', '.join([split[0], split[2]])
elif '/' in label:
label = ', '.join(label.split('/'))
ax.set_title('(%s)' % label)
ax.set_xlabel('ICA components')
ax.set_xlim(0, len(this_scores))
tight_layout(fig=fig)
plt_show(show)
return fig
@fill_doc
def plot_ica_overlay(ica, inst, exclude=None, picks=None, start=None,
stop=None, title=None, show=True):
"""Overlay of raw and cleaned signals given the unmixing matrix.
This method helps visualizing signal quality and artifact rejection.
Parameters
----------
ica : instance of mne.preprocessing.ICA
The ICA object.
inst : instance of mne.io.Raw or mne.Evoked
The signals to be compared given the ICA solution. If Raw input,
The raw data are displayed before and after cleaning. In a second
panel the cross channel average will be displayed. Since dipolar
sources will be canceled out this display is sensitive to
artifacts. If evoked input, butterfly plots for clean and raw
signals will be superimposed.
exclude : array-like of int | None (default)
The components marked for exclusion. If None (default), ICA.exclude
will be used.
%(picks_base)s all channels that were included during fitting.
start : int
X-axis start index. If None from the beginning.
stop : int
X-axis stop index. If None to the end.
title : str
The figure title.
show : bool
Show figure if True.
Returns
-------
fig : instance of Figure
The figure.
"""
# avoid circular imports
from ..io.base import BaseRaw
from ..evoked import Evoked
from ..preprocessing.ica import _check_start_stop
_validate_type(inst, (BaseRaw, Evoked), "inst", "Raw or Evoked")
if title is None:
title = 'Signals before (red) and after (black) cleaning'
picks = ica.ch_names if picks is None else picks
picks = _picks_to_idx(inst.info, picks, exclude=())
if exclude is None:
exclude = ica.exclude
if not isinstance(exclude, (np.ndarray, list)):
raise TypeError('exclude must be of type list. Got %s'
% type(exclude))
if isinstance(inst, BaseRaw):
if start is None:
start = 0.0
if stop is None:
stop = 3.0
ch_types_used = [k for k in ['mag', 'grad', 'eeg'] if k in ica]
start_compare, stop_compare = _check_start_stop(inst, start, stop)
data, times = inst[picks, start_compare:stop_compare]
raw_cln = ica.apply(inst.copy(), exclude=exclude,
start=start, stop=stop)
data_cln, _ = raw_cln[picks, start_compare:stop_compare]
fig = _plot_ica_overlay_raw(data=data, data_cln=data_cln,
times=times, title=title,
ch_types_used=ch_types_used, show=show)
elif isinstance(inst, Evoked):
inst = inst.copy().crop(start, stop)
if picks is not None:
inst.info['comps'] = [] # can be safely disabled
inst.pick_channels([inst.ch_names[p] for p in picks])
evoked_cln = ica.apply(inst.copy(), exclude=exclude)
fig = _plot_ica_overlay_evoked(evoked=inst, evoked_cln=evoked_cln,
title=title, show=show)
return fig
def _plot_ica_overlay_raw(data, data_cln, times, title, ch_types_used, show):
"""Plot evoked after and before ICA cleaning.
Parameters
----------
ica : instance of mne.preprocessing.ICA
The ICA object.
epochs : instance of mne.Epochs
The Epochs to be regarded.
show : bool
Show figure if True.
Returns
-------
fig : instance of Figure
"""
import matplotlib.pyplot as plt
# Restore sensor space data and keep all PCA components
# let's now compare the date before and after cleaning.
# first the raw data
assert data.shape == data_cln.shape
fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
plt.suptitle(title)
ax1.plot(times, data.T, color='r')
ax1.plot(times, data_cln.T, color='k')
ax1.set(xlabel='Time (s)', xlim=times[[0, -1]], title='Raw data')
_ch_types = {'mag': 'Magnetometers',
'grad': 'Gradiometers',
'eeg': 'EEG'}
ch_types = ', '.join([_ch_types[k] for k in ch_types_used])
ax2.set_title('Average across channels ({})'.format(ch_types))
ax2.plot(times, data.mean(0), color='r')
ax2.plot(times, data_cln.mean(0), color='k')
ax2.set(xlabel='Time (s)', xlim=times[[0, -1]])
tight_layout(fig=fig)
fig.subplots_adjust(top=0.90)
fig.canvas.draw()
plt_show(show)
return fig
def _plot_ica_overlay_evoked(evoked, evoked_cln, title, show):
"""Plot evoked after and before ICA cleaning.
Parameters
----------
ica : instance of mne.preprocessing.ICA
The ICA object.
epochs : instance of mne.Epochs
The Epochs to be regarded.
show : bool
If True, all open plots will be shown.
Returns
-------
fig : instance of Figure
"""
import matplotlib.pyplot as plt
ch_types_used = [c for c in ['mag', 'grad', 'eeg'] if c in evoked]
n_rows = len(ch_types_used)
ch_types_used_cln = [c for c in ['mag', 'grad', 'eeg'] if
c in evoked_cln]
if len(ch_types_used) != len(ch_types_used_cln):
raise ValueError('Raw and clean evokeds must match. '
'Found different channels.')
fig, axes = plt.subplots(n_rows, 1)
fig.suptitle('Average signal before (red) and after (black) ICA')
axes = axes.flatten() if isinstance(axes, np.ndarray) else axes
evoked.plot(axes=axes, show=show, time_unit='s')
for ax in fig.axes:
for l in ax.get_lines():
l.set_color('r')
fig.canvas.draw()
evoked_cln.plot(axes=axes, show=show, time_unit='s')
tight_layout(fig=fig)
fig.subplots_adjust(top=0.90)
fig.canvas.draw()
plt_show(show)
return fig
def _plot_sources_raw(ica, raw, picks, exclude, start, stop, show, title,
block, show_first_samp):
"""Plot the ICA components as raw array."""
color = _handle_default('color', (0., 0., 0.))
orig_data = ica._transform_raw(raw, 0, len(raw.times)) * 0.2
types = ['misc' for _ in picks]
eog_chs = pick_types(raw.info, meg=False, eog=True, ref_meg=False)
ecg_chs = pick_types(raw.info, meg=False, ecg=True, ref_meg=False)
data = [orig_data[pick] for pick in picks]
c_names = list(ica._ica_names) # new list
for eog_idx in eog_chs:
c_names.append(raw.ch_names[eog_idx])
types.append('eog')
for ecg_idx in ecg_chs:
c_names.append(raw.ch_names[ecg_idx])
types.append('ecg')
extra_picks = np.append(eog_chs, ecg_chs).astype(int)
if len(extra_picks) > 0:
eog_ecg_data, _ = raw[extra_picks, :]
for idx in range(len(eog_ecg_data)):
if idx < len(eog_chs):
eog_ecg_data[idx] /= 150e-6 # scaling for eog
else:
eog_ecg_data[idx] /= 5e-4 # scaling for ecg
data = np.append(data, eog_ecg_data, axis=0)
for idx in range(len(extra_picks)):
picks = np.append(picks, ica.n_components_ + idx)
if title is None:
title = 'ICA components'
info = create_info([c_names[x] for x in picks], raw.info['sfreq'])
info['bads'] = [c_names[x] for x in exclude]
if start is None:
start = 0
if stop is None:
stop = start + 20
stop = min(stop, raw.times[-1])
duration = stop - start
if duration <= 0:
raise RuntimeError('Stop must be larger than start.')
t_end = int(duration * raw.info['sfreq'])
times = raw.times[0:t_end]
bad_color = (1., 0., 0.)
inds = list(range(len(picks)))
data = np.array(data)
n_channels = min([20, len(picks)])
first_time = raw._first_time if show_first_samp else 0
start += first_time
params = dict(raw=raw, orig_data=data, data=data[:, 0:t_end], inds=inds,
ch_start=0, t_start=start, info=info, duration=duration,
ica=ica, n_channels=n_channels, times=times, types=types,
n_times=raw.n_times, bad_color=bad_color, picks=picks,
first_time=first_time, data_picks=[], decim=1,
noise_cov=None, whitened_ch_names=())
_prepare_mne_browse_raw(params, title, 'w', color, bad_color, inds,
n_channels)
params['scale_factor'] = 1.0
params['plot_fun'] = partial(_plot_raw_traces, params=params, color=color,
bad_color=bad_color)
params['update_fun'] = partial(_update_data, params)
params['pick_bads_fun'] = partial(_pick_bads, params=params)
params['label_click_fun'] = partial(_label_clicked, params=params)
_layout_figure(params)
# callbacks
callback_key = partial(_plot_raw_onkey, params=params)
params['fig'].canvas.mpl_connect('key_press_event', callback_key)
callback_scroll = partial(_plot_raw_onscroll, params=params)
params['fig'].canvas.mpl_connect('scroll_event', callback_scroll)
callback_pick = partial(_mouse_click, params=params)
params['fig'].canvas.mpl_connect('button_press_event', callback_pick)
callback_resize = partial(_helper_raw_resize, params=params)
params['fig'].canvas.mpl_connect('resize_event', callback_resize)
callback_close = partial(_close_event, params=params)
params['fig'].canvas.mpl_connect('close_event', callback_close)
params['fig_proj'] = None
params['event_times'] = None
params['butterfly'] = False
params['update_fun']()
params['plot_fun']()
try:
plt_show(show, block=block)
except TypeError: # not all versions have this
plt_show(show)
return params['fig']
def _update_data(params):
"""Prepare the data on horizontal shift of the viewport."""
sfreq = params['info']['sfreq']
start = int((params['t_start'] - params['first_time']) * sfreq)
end = int((params['t_start'] + params['duration']) * sfreq)
params['data'] = params['orig_data'][:, start:end]
params['times'] = params['raw'].times[start:end]
def _pick_bads(event, params):
"""Select components on click."""
bads = params['info']['bads']
params['info']['bads'] = _select_bads(event, params, bads)
params['update_fun']()
params['plot_fun']()
def _close_event(events, params):
"""Exclude the selected components on close."""
info = params['info']
exclude = [params['ica']._ica_names.index(x)
for x in info['bads'] if x.startswith('ICA')]
params['ica'].exclude = exclude
def _plot_sources_epochs(ica, epochs, picks, exclude, start, stop, show,
title, block):
"""Plot the components as epochs."""
data = ica._transform_epochs(epochs, concatenate=True)
eog_chs = pick_types(epochs.info, meg=False, eog=True, ref_meg=False)
ecg_chs = pick_types(epochs.info, meg=False, ecg=True, ref_meg=False)
c_names = list(ica._ica_names)
ch_types = np.repeat('misc', ica.n_components_)
for eog_idx in eog_chs:
c_names.append(epochs.ch_names[eog_idx])
ch_types = np.append(ch_types, 'eog')
for ecg_idx in ecg_chs:
c_names.append(epochs.ch_names[ecg_idx])
ch_types = np.append(ch_types, 'ecg')
extra_picks = np.append(eog_chs, ecg_chs).astype(int)
if len(extra_picks) > 0:
eog_ecg_data = np.concatenate(epochs.get_data()[:, extra_picks],
axis=1)
data = np.append(data, eog_ecg_data, axis=0)
scalings = _handle_default('scalings_plot_raw')
scalings['misc'] = 5.0
info = create_info(ch_names=c_names, sfreq=epochs.info['sfreq'],
ch_types=ch_types)
info['projs'] = list()
info['bads'] = [c_names[x] for x in exclude]
if title is None:
title = 'ICA components'
if start is None:
start = 0
if stop is None:
stop = start + 20
stop = min(stop, len(epochs.events))
for idx in range(len(extra_picks)):
picks = np.append(picks, ica.n_components_ + idx)
n_epochs = stop - start
if n_epochs <= 0:
raise RuntimeError('Stop must be larger than start.')
params = dict(ica=ica, epochs=epochs, info=info, orig_data=data,
bads=list(), bad_color=(1., 0., 0.),
t_start=start * len(epochs.times),
data_picks=list(), decim=1, whitened_ch_names=(),
noise_cov=None)
params['label_click_fun'] = partial(_label_clicked, params=params)
# changing the order to 'misc' before 'eog' and 'ecg'
order = list(_DATA_CH_TYPES_ORDER_DEFAULT)
order.pop(order.index('misc'))
order.insert(order.index('eog'), 'misc')
_prepare_mne_browse_epochs(params, projs=list(), n_channels=20,
n_epochs=n_epochs, scalings=scalings,
title=title, picks=picks,
order=order)
params['plot_update_proj_callback'] = _update_epoch_data
_update_epoch_data(params)
params['hsel_patch'].set_x(params['t_start'])
callback_close = partial(_close_epochs_event, params=params)
params['fig'].canvas.mpl_connect('close_event', callback_close)
try:
plt_show(show, block=block)
except TypeError: # not all versions have this
plt_show(show)
return params['fig']
def _update_epoch_data(params):
"""Prepare the data on horizontal shift."""
start = params['t_start']
n_epochs = params['n_epochs']
end = start + n_epochs * len(params['epochs'].times)
data = params['orig_data'][:, start:end]
types = params['types']
for pick, ind in enumerate(params['inds']):
params['data'][pick] = data[ind] / params['scalings'][types[pick]]
params['plot_fun']()
def _close_epochs_event(events, params):
"""Exclude the selected components on close."""
info = params['info']
exclude = [info['ch_names'].index(x) for x in info['bads']
if x.startswith('IC')]
params['ica'].exclude = exclude
def _label_clicked(pos, params):
"""Plot independent components on click to label."""
import matplotlib.pyplot as plt
offsets = np.array(params['offsets']) + params['offsets'][0]
line_idx = np.searchsorted(offsets, pos[1]) + params['ch_start']
if line_idx >= len(params['picks']):
return
ic_idx = [params['picks'][line_idx]]
if params['types'][line_idx] != 'misc':
warn('Can only plot ICA components.')
return
types = list()
info = params['ica'].info
if len(pick_types(info, meg=False, eeg=True, ref_meg=False)) > 0:
types.append('eeg')
if len(pick_types(info, meg='mag', ref_meg=False)) > 0:
types.append('mag')
if len(pick_types(info, meg='grad', ref_meg=False)) > 0:
types.append('grad')
ica = params['ica']
data = np.dot(ica.mixing_matrix_[:, ic_idx].T,
ica.pca_components_[:ica.n_components_])
data = np.atleast_2d(data)
fig, axes = _prepare_trellis(len(types), max_col=3)
for ch_idx, ch_type in enumerate(types):
try:
data_picks, pos, merge_grads, _, _ = _prepare_topo_plot(ica,
ch_type,
None)
except Exception as exc:
warn(exc)
plt.close(fig)
return
this_data = data[:, data_picks]
ax = axes[ch_idx]
if merge_grads:
from ..channels.layout import _merge_grad_data
for ii, data_ in zip(ic_idx, this_data):
ax.set_title('%s %s' % (ica._ica_names[ii], ch_type), fontsize=12)
data_ = _merge_grad_data(data_) if merge_grads else data_
plot_topomap(data_.flatten(), pos, axes=ax, show=False)
_hide_frame(ax)
tight_layout(fig=fig)
fig.subplots_adjust(top=0.88, bottom=0.)
fig.canvas.draw()
plt_show(True)
| |
# -*- coding:utf-8 -*-
'''
Library for generating XML as a stream without first building a tree in memory.
Basic usage::
import elementflow
file = open('text.xml', 'w') # can be any object with .write() method
with elementflow.xml(file, u'root') as xml:
xml.element(u'item', attrs={u'key': u'value'}, text=u'text')
with xml.container(u'container', attrs={u'key': u'value'}):
xml.text(u'text')
xml.element(u'subelement', text=u'subelement text')
Usage with namespaces::
with elementflow.xml(file, 'root', namespaces={'': 'urn:n', 'n1': 'urn:n1'}) as xml:
xml.element('item')
with xml.container('container', namespaces={'n2': 'urn:n2'):
xml.element('n1:subelement')
xml.element('n2:subelement')
Pretty-printing::
with elementflow.xml(file, 'root', indent=True):
# ...
'''
import textwrap
import codecs
def escape(value):
if '&' not in value and '<' not in value:
return value
return value.replace('&', '&').replace('<', '<')
def quoteattr(value):
if '&' in value or '<' in value or '"' in value:
value = value.replace('&', '&').replace('<', '<').replace('"', '"')
return u'"%s"' % value
def attr_str(attrs):
if not attrs:
return u''
return u''.join([u' %s=%s' % (k, quoteattr(v)) for k, v in attrs.iteritems()])
class XMLGenerator(object):
'''
Basic generator without support for namespaces or pretty-printing.
Constructor accepts:
- file: an object receiving XML output, anything with .write()
- root: name of the root element
- attrs: attributes dict
Constructor will implicitly open a root container element, you don't need
to call .container() for it
'''
def __init__(self, file, root, attrs={}, **kwargs):
self.file = codecs.getwriter('utf-8')(file)
self.file.write(u'<?xml version="1.0" encoding="utf-8"?>')
self.stack = []
self.container(root, attrs, **kwargs)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
if exc_type:
return
self.file.write(u'</%s>' % self.stack.pop())
def container(self, name, attrs={}):
'''
Opens a new element containing sub-elements and text nodes.
Intends to be used under ``with`` statement.
'''
self.file.write(u'<%s%s>' % (name, attr_str(attrs)))
self.stack.append(name)
return self
def element(self, name, attrs={}, text=u''):
'''
Generates a single element, either empty or with a text contents.
'''
if text:
self.file.write(u'<%s%s>%s</%s>' % (name, attr_str(attrs), escape(text), name))
else:
self.file.write(u'<%s%s/>' % (name, attr_str(attrs)))
def text(self, value):
'''
Generates a text in currently open container.
'''
self.file.write(escape(value))
def map(self, func, sequence):
'''
Convenience function for translating a sequence of objects into xml elements.
First parameter is a function that accepts an object from the sequence and
return a tuple of arguments for "element" method.
'''
for item in sequence:
self.element(*func(item))
class NamespacedGenerator(XMLGenerator):
'''
XML generator with support for namespaces.
'''
def __init__(self, file, root, attrs={}, namespaces={}):
self.namespaces = [set(['xml'])]
super(NamespacedGenerator, self).__init__(file, root, attrs=attrs, namespaces=namespaces)
def _process_namespaces(self, name, attrs, namespaces):
prefixes = self.namespaces[-1]
if namespaces:
prefixes |= set(namespaces.keys())
names = [n for n in [name] + attrs.keys() if ':' in n]
for name in names:
prefix = name.split(':')[0]
if prefix not in prefixes:
raise ValueError('Unkown namespace prefix: %s' % prefix)
if namespaces:
namespaces = dict([
(u'xmlns:%s' % k if k else u'xmlns', v)
for k, v in namespaces.iteritems()
])
attrs = dict(attrs, **namespaces)
return attrs, prefixes
def __exit__(self, exc_type, exc_value, exc_tb):
super(NamespacedGenerator, self).__exit__(exc_type, exc_value, exc_tb)
self.namespaces.pop()
def container(self, name, attrs={}, namespaces={}):
attrs, prefixes = self._process_namespaces(name, attrs, namespaces)
self.namespaces.append(prefixes)
return super(NamespacedGenerator, self).container(name, attrs)
def element(self, name, attrs={}, namespaces={}, text=u''):
attrs, prefixes = self._process_namespaces(name, attrs, namespaces)
super(NamespacedGenerator, self).element(name, attrs, text)
class IndentingGenerator(NamespacedGenerator):
'''
XML generator with pretty-printing.
'''
def _fill(self, value, indent=None):
if indent is None:
indent = u' ' * len(self.stack)
width = max(20, 70 - len(indent))
tw = textwrap.TextWrapper(width=width, initial_indent=indent, subsequent_indent=indent)
return u'\n%s' % tw.fill(value)
def __exit__(self, *args, **kwargs):
self.file.write(u'\n%s' % (u' ' * (len(self.stack) - 1)))
super(IndentingGenerator, self).__exit__(*args, **kwargs)
if not self.stack:
self.file.write(u'\n')
def container(self, *args, **kwargs):
self.file.write(u'\n%s' % (u' ' * len(self.stack)))
return super(IndentingGenerator, self).container(*args, **kwargs)
def element(self, name, attrs={}, namespaces={}, text=u''):
indent = u' ' * len(self.stack)
self.file.write(u'\n%s' % indent)
if len(text) > 70:
fill = self._fill(text, indent + u' ')
text = u'%s\n%s' % (fill, indent)
return super(IndentingGenerator, self).element(name, attrs, namespaces, text)
def text(self, value):
super(IndentingGenerator, self).text(self._fill(value))
class Queue(object):
'''
In-memory queue for using as a temporary buffer in xml generator.
'''
def __init__(self):
self.data = bytearray()
def __len__(self):
return len(self.data)
def write(self, value):
self.data.extend(value)
def pop(self):
result = str(self.data)
self.data = bytearray()
return result
def xml(file, root, attrs={}, namespaces={}, indent=False):
'''
Creates a streaming XML generator.
Parameters:
- file: an object receiving XML output, anything with .write()
- root: name of the root element
- attrs: attributes dict
- namespaces: namespaces dict {prefix: uri}, default namespace has prefix ''
- indent: whether to pretty-print XML, True or False (default)
'''
if indent:
return IndentingGenerator(file, root, attrs, namespaces)
elif namespaces:
return NamespacedGenerator(file, root, attrs, namespaces)
else:
return XMLGenerator(file, root, attrs)
| |
import fnmatch
import functools
import re
import urllib.parse
from collections import defaultdict
from .errors import abort
_HANDLERS = defaultdict(list)
__all__ = [
'BaseCriteria',
'ContentTypeCriteria',
'DomainCriteria',
'HTTPRequestCriteria',
'HTTPResponseCriteria',
'HeaderCriteria',
'MethodCriteria',
'RegexCriteria',
'StatusCodeCriteria',
'handler',
]
@functools.total_ordering
class BaseCriteria(object):
"""Base Criteria class, provides a foundation for implementation of custom
service handlers.
"""
priority = 1
def __lt__(self, other):
return self.priority < other.priority
def __call__(self, request):
raise NotImplementedError()
def __and__(self, other):
return AllOfCriteria(self, other)
def __or__(self, other):
return AnyOfCriteria(self, other)
class AnyOfCriteria(BaseCriteria):
"""Criteria that matches only if any given child criteria match."""
def __init__(self, *criteria):
super().__init__()
self.criteria = criteria
def __call__(self, request):
return any(c(request) for c in self.criteria)
def __str__(self):
return '<%s (%s)>' % (self.__class__.__name__, ', '.join(map(str, self.criteria)))
class AllOfCriteria(BaseCriteria):
"""Criteria that matches only if all given child criteria match."""
def __init__(self, *criteria):
super().__init__()
self.criteria = criteria
def __call__(self, request):
return all(c(request) for c in self.criteria)
def __str__(self):
return '<%s (%s)>' % (self.__class__.__name__, ', '.join(map(str, self.criteria)))
class RegexCriteria(BaseCriteria):
"""Criteria that processes requests based on the URL, by a regex."""
priority = 3
def __init__(self, regex):
super().__init__()
self.regex = re.compile(regex)
def __call__(self, request):
url = urllib.parse.urlunparse(request.session['url'])
return bool(self.regex.match(url))
def __str__(self):
return '<%s (%r)>' % (self.__class__.__name__, self.regex.pattern)
class DomainCriteria(BaseCriteria):
"""Criteria that processes requests based on the domain.
Supports globbing, e.g. "*google.com" matches "www.google.com", and
"go?gle.com" matches "goggle.com" and "google.com".
"""
priority = 2
def __init__(self, *domains):
super().__init__()
self.domains = domains
def __call__(self, request):
if request.is_reqmod:
headers = request.http.headers
else:
headers = request.http.request_headers
host = headers.get('Host', '')
match = functools.partial(fnmatch.fnmatch, host)
return any(match(pattern) for pattern in self.domains)
def __str__(self):
return '<%s (%r)>' % (self.__class__.__name__, ', '.join(self.domains))
class ContentTypeCriteria(BaseCriteria):
"""Criteria that matches responses based on the Content-Type header."""
priority = 2
def __init__(self, *content_types):
super().__init__()
self.content_types = content_types
def __call__(self, request):
headers = request.http.headers
content_type = headers.get('content-type', '').split(';')[0]
return content_type in self.content_types
def __str__(self):
return '<%s (%r)>' % (self.__class__.__name__, ', '.join(self.content_types))
class MethodCriteria(BaseCriteria):
"""Criteria that matches on the method of the encapsulated HTTP request."""
def __init__(self, *methods):
self.methods = {s.upper() for s in methods}
def __call__(self, request):
return request.http.request_line.method in self.methods
class HTTPRequestCriteria(BaseCriteria):
"""Criteria that matches if the request is a REQMOD."""
def __call__(self, request):
return request.is_reqmod
class HTTPResponseCriteria(BaseCriteria):
"""Criteria that matches if the request is a RESPMOD."""
def __call__(self, request):
return request.is_respmod
class StatusCodeCriteria(HTTPResponseCriteria):
"""Criteria that matches on the status code of the encapsulated HTTP response.
Never matches on HTTP requests.
"""
def __init__(self, *status_codes):
self.status_codes = status_codes
def __call__(self, request):
http = request.http
# super isn't callable! The horror.
return (super().__call__(request) and
http.status_line.code in self.status_codes)
class HeaderCriteria(BaseCriteria):
"""Criteria that matches on the presence of a header, optionally matching
on the value of the header.
"""
def __init__(self, key, *values):
self.key = key.lower()
self.values = set(values)
self.check_values = bool(values)
def __call__(self, request):
values = request.http.headers.getlist(self.key)
if values:
if self.check_values:
return bool(self.values & set(values))
else:
return True
return False
class AlwaysCriteria(BaseCriteria):
"""Criteria that matches 100% of the time.
If you want to use this, just decorate a handler with
:func:`icap.server.Server.handler` without any arguments.
"""
priority = 5
def __call__(self, request):
return True
def get_handler(request):
"""Return the handler for a given request, and whether it should be given
the raw ICAP request.
Will abort with the following codes in given conditions:
404: no handlers at a given endpoint.
204: there are handlers at a given endpoint, but none of them matched.
"""
uri = request.request_line.uri
path = uri.path
services = _HANDLERS.get(path)
if not services:
# RFC3507 says we should abort with 404 if there are no handlers at
# a given resource. The most common ICAP client, Squid, doesn't handle
# this very well - it relays them to the client as internal errors.
# Previously this was configurable to work around that, however it
# actually means there's a configuration error on the admin's behalf,
# so I've decided to make the 404 response mandatory.
abort(404)
if request.is_options:
return None, True
for criteria, handler, raw in services:
if criteria(request):
return handler, raw
abort(204)
def sort_handlers():
"""Sort _HANDLERS values by priority.
You should not use this directly.
"""
for key, items in _HANDLERS.items():
_HANDLERS[key] = sorted(items, key=lambda f: f[0], reverse=True)
def handler(criteria=None, name='', raw=False):
"""Decorator to be used on functions/methods/classes intended to be used
for handling request or response modifications.
Keyword arguments:
``criteria`` - the criteria to be used for determining if the wrapped
callable should be used. If None, then will always be
used.
``name`` - subpath to use for matching, e.g. a name of 'foo' will
translate to a uri of ``/foo/reqmod`` or ``/foo/respmod``.
``raw`` - If True, the callable will receive an instance of
`~icap.models.ICAPRequest` instead of an instance of
`~icap.models.HTTPRequest` or `~icap.models.HTTPResponse`.
"""
criteria = criteria or AlwaysCriteria()
def inner(handler):
orig_handler = handler
if isinstance(handler, type):
handler = handler()
reqmod = getattr(handler, 'reqmod', None)
respmod = getattr(handler, 'respmod', None)
else:
reqmod = handler if handler.__name__ == 'reqmod' else None
respmod = handler if handler.__name__ == 'respmod' else None
if reqmod:
key = '/'.join([name, 'reqmod'])
key = key if key.startswith('/') else '/%s' % key
_HANDLERS[key].append((criteria, reqmod, raw))
if respmod:
key = '/'.join([name, 'respmod'])
key = key if key.startswith('/') else '/%s' % key
_HANDLERS[key].append((criteria, respmod, raw))
return orig_handler
return inner
| |
import json
import plistlib
import re
from collections import OrderedDict
import sublime
import sublime_plugin
from .colors import (
BASE_RED,
BASE_ORANGE,
BASE_YELLOW,
BASE_GREEN,
BASE_CYAN,
BASE_BLUE,
BASE_PURPLE,
BASE_PINK,
X11_COLORS,
color_diff,
HSLA,
)
class ColorCatalog():
"""
Catalog of colors used in a color scheme
"""
store = None
names = None
name_map = None
def __init__(self):
self.store = {}
def items(self):
"""
:return:
A list of tuples containing (string name, HSLA color)
"""
self._generate_maps()
return self.names.items()
def name(self, hsla):
"""
:param hsla:
An HSLA object
:return:
A string name for the color
"""
self._generate_maps()
return self.name_map[hsla]
def _generate_maps(self):
"""
Generates names for each color based on diff from base colors
"""
if self.names is None:
self.names = OrderedDict()
self.name_map = {}
for base in sorted(self.store.keys()):
for i, info in enumerate(sorted(self.store[base].keys())):
suffix = '' if i == 0 else str(i + 1)
hsla = self.store[base][info]
self.names[base + suffix] = hsla
self.name_map[hsla] = base + suffix
def lookup(self, hsla):
"""
:param hsla:
An HSLA object
:return:
A CSSColor object for the HSLA object
"""
base, diff = self.base_diff(hsla)
if base not in self.store:
self.store[base] = {}
index = -1 * hsla.l if hsla.l > 0.5 else hsla.l
if (diff, index) not in self.store[base]:
self.names = None
self.name_map = None
self.store[base][(diff, index)] = hsla.full_alpha()
return CSSColor(self, base, diff, hsla)
@classmethod
def base_diff(cls, hsla):
"""
:param hsla:
An HSLA object
:return:
A 2-element tuple of (string base color, float diff)
"""
if hsla.l < 0.15:
return ('black', color_diff(hsla, HSLA(0.0, 0.0, 0.0, 1.0)))
if hsla.l > 0.85:
return ('white', color_diff(hsla, HSLA(1.0, 1.0, 1.0, 1.0)))
if hsla.s < 0.1:
return ('grey', color_diff(hsla, HSLA(0.5, 0.5, 0.5, 1.0)))
comparisons = [
('red', BASE_RED),
('orange', BASE_ORANGE),
('yellow', BASE_YELLOW),
('green', BASE_GREEN),
('cyan', BASE_CYAN),
('blue', BASE_BLUE),
('purple', BASE_PURPLE),
('pink', BASE_PINK),
]
diff = 128.0
for bname, bc in comparisons:
bdiff = color_diff(bc, hsla)
if bdiff < diff:
diff = bdiff
base = bname
return (base, diff)
class CSSColor():
"""
A representation of an HSLA color for use in a CSS document
"""
catalog = None
base = None
diff = None
color = None
def __init__(self, catalog, base, diff, color):
self.catalog = catalog
self.base = base
self.diff = diff
self.color = color
def dump(self):
"""
:return:
A string of the color for use in a CSS document
"""
name = self.catalog.name(self.color.full_alpha())
if self.color.a < 1.0:
return 'color(var(%s) alpha(%.2g))' % (name, self.color.a)
return 'var(%s)' % name
class HexCSSColorEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, CSSColor):
return obj.dump()
if isinstance(obj, HSLA):
return obj.to_hex()
return json.JSONEncoder.default(self, obj)
class HSLCSSColorEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, CSSColor):
return obj.dump()
if isinstance(obj, HSLA):
hue = int(obj.h * 360.0 + 0.5)
sat = int(obj.s * 100.0 + 0.5)
lum = int(obj.l * 100.0 + 0.5)
return 'hsl(%d, %d%%, %d%%)' % (hue, sat, lum)
return json.JSONEncoder.default(self, obj)
class UseVariablesInputHandler(sublime_plugin.ListInputHandler):
options = {
'hsl': 'HSL - use HSL variables for colors',
'yes': 'Hex - use hex variables for colors',
'no': 'None - hardcode all colors'
}
def placeholder(self):
return 'Use variables'
def list_items(self):
return [self.options['hsl'], self.options['yes'], self.options['no']]
class ConvertColorSchemeCommand(sublime_plugin.WindowCommand):
global_settings = OrderedDict([
('foreground', 'foreground'),
('background', 'background'),
('accent', 'accent'),
('caret', 'caret'),
('invisibles', 'invisibles'),
('lineHighlight', 'line_highlight'),
('rulers', 'rulers'),
('selection', 'selection'),
('selectionForeground', 'selection_foreground'),
('selectionBorder', 'selection_border'),
('inactiveSelection', 'inactive_selection'),
('inactiveSelectionForeground', 'inactive_selection_foreground'),
('misspelling', 'misspelling'),
('minimapBorder', 'minimap_border'),
('gutter', 'gutter'),
('gutterForeground', 'gutter_foreground'),
('shadow', 'shadow'),
('shadowWidth', 'shadow_width'),
('guide', 'guide'),
('activeGuide', 'active_guide'),
('stackGuide', 'stack_guide'),
('highlight', 'highlight'),
('findHighlightForeground', 'find_highlight_foreground'),
('findHighlight', 'find_highlight'),
('bracketsOptions', 'brackets_options'),
('bracketsForeground', 'brackets_foreground'),
('bracketContentsOptions', 'bracket_contents_options'),
('bracketContentsForeground', 'bracket_contents_foreground'),
('tagsOptions', 'tags_options'),
('tagsForeground', 'tags_foreground'),
('popupCss', 'popup_css'),
('phantomCss', 'phantom_css'),
])
non_color_settings = [
'shadow_width',
'brackets_options',
'bracket_contents_options',
'tags_options',
'popup_css',
'phantom_css',
]
def run(self, use_variables=None):
use_vars = use_variables != UseVariablesInputHandler.options['no']
hsl_vars = use_variables == UseVariablesInputHandler.options['hsl']
view = self.window.active_view()
if not view:
return
fname = view.file_name()
if not fname or not fname.endswith('.tmTheme'):
return
tm_theme = view.substr(sublime.Region(0, view.size()))
plist = plistlib.readPlistFromBytes(tm_theme.encode("utf-8"))
scheme = OrderedDict()
scheme["name"] = plist.get("name", "Unnamed")
scheme["author"] = plist.get("author", "Unknown")
globals = OrderedDict()
rules = []
colors = ColorCatalog()
for setting in plist.get("settings", []):
if "scope" in setting:
rule = OrderedDict()
if "name" in setting:
rule["name"] = setting["name"]
if "scope" in setting:
rule["scope"] = setting["scope"]
if "settings" in setting:
details = setting["settings"]
if "foreground" in details:
rule["foreground"] = self.resolve(use_vars, colors, details["foreground"])
if "selectionForeground" in details:
rule["selection_foreground"] = self.resolve(use_vars, colors, details["selectionForeground"])
if "background" in details:
rule["background"] = self.resolve(use_vars, colors, details["background"])
if "fontStyle" in details and details["fontStyle"].strip() != "":
rule["font_style"] = details["fontStyle"].strip()
rules.append(rule)
else:
details = setting.get('settings', {})
for tm_key in self.global_settings:
if tm_key not in details:
continue
value = details[tm_key]
subl_key = self.global_settings[tm_key]
if subl_key not in self.non_color_settings:
value = self.resolve(use_vars, colors, value)
if subl_key.endswith('_options'):
value = value.strip()
globals[subl_key] = value
if use_vars:
variables = OrderedDict()
for name, color in colors.items():
variables[name] = color
if len(variables) > 0:
scheme["variables"] = variables
scheme["globals"] = globals
scheme["rules"] = rules
if hsl_vars:
encoder_cls = HSLCSSColorEncoder
else:
encoder_cls = HexCSSColorEncoder
sublime_color_scheme = json.dumps(scheme, indent=4, cls=encoder_cls)
# Trim trailing whitespace
sublime_color_scheme = re.sub(r'\s+$', '', sublime_color_scheme, 0, re.M)
# Put [ and { on the next line
sublime_color_scheme = re.sub(r'^(\s+)("\w+":) ([\[{])\n', '\\1\\2\n\\1\\3\n', sublime_color_scheme, 0, re.M)
new_view = self.window.new_file()
self.window.focus_view(new_view)
new_view.settings().set('syntax', 'Packages/JavaScript/JSON.sublime-syntax')
new_view.run_command('append', {'characters': sublime_color_scheme})
new_view.set_viewport_position((0, 0))
new_view.set_name(scheme['name'] + '.sublime-color-scheme')
def resolve(self, use_vars, colors, value):
"""
Returns a CSS value for the color specified
:param use_vars:
If the .sublime-color-scheme variables functionality should be used
:param colors:
A dict mapping string CSS color to string variable name
:param value:
A string CSS color
:return:
A string containing a CSS color, variable or function
"""
if not use_vars:
return value
if value in X11_COLORS:
value = X11_COLORS[value]
return colors.lookup(HSLA.from_hex(value))
def input(self, args):
if 'use_variables' not in args:
return UseVariablesInputHandler()
return None
def is_enabled(self):
view = self.window.active_view()
if not view:
return False
fname = view.file_name()
return fname is not None and fname.endswith('.tmTheme')
| |
import sys
import time
sys.path.append("../config/common/tests")
from test_utils import *
import fixtures
import testtools
import test_common
import test_case
import discoveryclient.client as client
server_list = {}
def info_callback(info, client_id):
# print 'In subscribe callback handler'
# print 'client-id %s info %s' % (client_id, info)
global server_list
server_list[client_id] = [entry['@publisher-id'] for entry in info]
# print '%32s => %s' % (client_id, server_list[client_id])
"""
Validate publisher in-use count is reasonable (typically
after load-balance event. Discovery server will try to keep
in-use count with 5% of expected average. To provide some
buffer around server calculations, we allow 20% deviation,
specially for small numbers we use in the test
"""
def validate_assignment_count(response, context, enforce=False):
services = response['services']
in_use_counts = {entry['service_id']:entry['in_use'] for entry in services}
print '%s %s' % (context, in_use_counts)
if not enforce:
return False
# only use active pubs
pubs_active = [entry for entry in services if entry['status'] != 'down']
# validate
avg = sum([entry['in_use'] for entry in pubs_active])/len(pubs_active)
# return failure status
return True in [e['in_use'] > int(1.2*avg) for e in pubs_active]
def validate_lb_position(context):
print context
servers = {}
for client_id, slist in server_list.items():
for i, sid in enumerate(slist):
if not i in servers:
servers[i] = {}
if not sid in servers[i]:
servers[i][sid] = 0
servers[i][sid] += 1
for pos, counters in servers.items():
print "Position %d: %s" % (pos, counters)
class DiscoveryServerTestCase(test_case.DsTestCase):
def setUp(self):
extra_config_knobs = [
('SvcActiveLoadBalance', 'policy', 'dynamic-load-balance'),
('SvcLoadBalanceChash', 'policy', 'chash'),
]
super(DiscoveryServerTestCase, self).setUp(extra_disc_server_config_knobs=extra_config_knobs)
def tearDown(self):
# clear subscriber's server list for every test
global server_list
server_list = {}
super(DiscoveryServerTestCase, self).tearDown()
def test_load_balance_chash2(self):
service_type = 'SvcLoadBalanceChash'
client_type = 'test-discovery'
# multiple subscribers for 2 instances each
subcount = 40
service_count = 2
sub_tasks = []
for i in range(subcount):
client_id = "test-load-balance-%d" % i
disc = client.DiscoveryClient(
self._disc_server_ip, self._disc_server_port, client_id)
obj = disc.subscribe(
service_type, service_count, info_callback, client_id)
sub_tasks.append(obj.task)
time.sleep(1)
# start publishers one by one
pub_tasks = []
max_pubs = 4
for i in range(max_pubs):
pub_id = 'test_discovery-%d' % i
pub_data = {service_type : 'data-%d' % i}
print 'Starting publisher %d' % i
disc = client.DiscoveryClient(
self._disc_server_ip, self._disc_server_port,
client_type, pub_id)
task = disc.publish(service_type, pub_data)
pub_tasks.append(task)
print 'Waiting for all client TTL to expire (1 min)'
time.sleep(1*60)
time.sleep(1)
(code, msg) = self._http_get('/services.json')
self.assertEqual(code, 200)
response = json.loads(msg)
self.assertEqual(len(response['services']), i+1)
failure = validate_assignment_count(response, 'In-use count after publishing %d' % i, enforce=False)
self.assertEqual(failure, False)
validate_lb_position("Server Positions")
# set operational state down one by one
for i in range(max_pubs):
pub_id = 'test_discovery-%d' % i
pub_url = '/service/%s' % pub_id
payload = {
'service-type' : '%s' % service_type,
'admin-state' : 'down',
}
(code, msg) = self._http_put(pub_url, json.dumps(payload))
self.assertEqual(code, 200)
# wait for all TTL to expire before looking at publisher's counters
print 'Waiting for all client TTL to expire (1 min)'
time.sleep(1*60)
(code, msg) = self._http_get('/services.json')
self.assertEqual(code, 200)
response = json.loads(msg)
failure = validate_assignment_count(response, 'In-use count just after publisher-%d down' % i, enforce=False)
self.assertEqual(failure, False)
validate_lb_position("Server Positions")
def test_load_balance_chash(self):
# publish 3 instances of service. Active LB must be enabled!
pub_tasks = []
service_type = 'SvcLoadBalanceChash'
num_pubs = 3
for i in range(num_pubs):
client_type = 'test-discovery'
pub_id = 'test_discovery-%d' % i
pub_data = {service_type : '%s-%d' % (service_type, i)}
disc = client.DiscoveryClient(
self._disc_server_ip, self._disc_server_port,
client_type, pub_id)
task = disc.publish(service_type, pub_data)
pub_tasks.append(task)
(code, msg) = self._http_get('/services.json')
self.assertEqual(code, 200)
response = json.loads(msg)
self.assertEqual(len(response['services']), num_pubs)
self.assertEqual(response['services'][0]['service_type'], service_type)
failure = validate_assignment_count(response, 'In-use count just after publishing', enforce=False)
self.assertEqual(failure, False)
# multiple subscribers for 2 instances each
subcount = 40
service_count = 2
sub_tasks = []
for i in range(subcount):
client_id = "test-load-balance-%d" % i
disc = client.DiscoveryClient(
self._disc_server_ip, self._disc_server_port, client_id)
obj = disc.subscribe(
service_type, service_count, info_callback, client_id)
sub_tasks.append(obj.task)
time.sleep(1)
# validate all clients have subscribed
time.sleep(1)
(code, msg) = self._http_get('/clients.json')
self.assertEqual(code, 200)
response = json.loads(msg)
self.assertEqual(len(response['services']), subcount*service_count)
(code, msg) = self._http_get('/services.json')
self.assertEqual(code, 200)
response = json.loads(msg)
validate_lb_position("Server Positions after %d publishers" % num_pubs)
failure = validate_assignment_count(response, 'In-use count just after initial subscribe', enforce=False)
self.assertEqual(failure, False)
# start one more publisher
pub_id = 'test_discovery-%d' % num_pubs
pub_data = {service_type : '%s-%d' % (service_type, num_pubs)}
pub_url = '/service/%s' % pub_id
disc = client.DiscoveryClient(
self._disc_server_ip, self._disc_server_port,
client_type, pub_id)
task = disc.publish(service_type, pub_data)
pub_tasks.append(task)
# ensure all are up
time.sleep(1)
(code, msg) = self._http_get('/services.json')
self.assertEqual(code, 200)
response = json.loads(msg)
self.assertEqual(len(response['services']), num_pubs+1)
# wait for all TTL to expire before looking at publisher's counters
print 'Waiting for all client TTL to expire (1 min)'
time.sleep(1*60)
(code, msg) = self._http_get('/services.json')
self.assertEqual(code, 200)
response = json.loads(msg)
validate_lb_position("Server Positions: additional publisher up")
failure = validate_assignment_count(response, 'In-use count just after bringing up one more publisher', enforce=False)
self.assertEqual(failure, False)
# set operational state down - new service
payload = {
'service-type' : '%s' % service_type,
'admin-state' : 'down',
}
(code, msg) = self._http_put(pub_url, json.dumps(payload))
self.assertEqual(code, 200)
# wait for all TTL to expire before looking at publisher's counters
print 'Waiting for all client TTL to expire (1 min)'
time.sleep(1*60)
(code, msg) = self._http_get('/services.json')
self.assertEqual(code, 200)
response = json.loads(msg)
validate_lb_position("Server Positions: additional publisher down")
failure = validate_assignment_count(response, 'In-use count just after publisher-%d down' % (num_pubs+1), enforce=False)
self.assertEqual(failure, False)
# set operational state up - again
payload = {
'service-type' : '%s' % service_type,
'admin-state' : 'up',
}
(code, msg) = self._http_put(pub_url, json.dumps(payload))
self.assertEqual(code, 200)
# wait for all TTL to expire before looking at publisher's counters
print 'Waiting for all client TTL to expire (1 min)'
time.sleep(1*60)
(code, msg) = self._http_get('/services.json')
self.assertEqual(code, 200)
response = json.loads(msg)
validate_lb_position("Server Positions: additional publisher up again")
failure = validate_assignment_count(response, 'In-use count just after publisher-%d down' % (num_pubs+1), enforce=False)
self.assertEqual(failure, False)
def test_load_balance_partial(self):
global server_list
# publish 3 instances
tasks = []
service_type = 'SvcLoadBalance'
for i in range(3):
client_type = 'test-discovery'
pub_id = 'test_discovery-%d' % i
pub_data = {service_type : '%s-%d' % (service_type, i)}
disc = client.DiscoveryClient(
self._disc_server_ip, self._disc_server_port,
client_type, pub_id)
task = disc.publish(service_type, pub_data)
tasks.append(task)
time.sleep(1)
(code, msg) = self._http_get('/services.json')
self.assertEqual(code, 200)
response = json.loads(msg)
self.assertEqual(len(response['services']), 3)
self.assertEqual(response['services'][0]['service_type'], service_type)
# multiple subscribers for 2 instances each
subcount = 20
service_count = 2
tasks = []
for i in range(subcount):
client_id = "test-load-balance-%d" % i
disc = client.DiscoveryClient(
self._disc_server_ip, self._disc_server_port, client_id)
obj = disc.subscribe(
service_type, service_count, info_callback, client_id)
tasks.append(obj.task)
time.sleep(1)
print 'Started %d tasks to subscribe service %s, count %d' \
% (subcount, service_type, service_count)
# validate all clients have subscribed
time.sleep(1)
(code, msg) = self._http_get('/clients.json')
self.assertEqual(code, 200)
response = json.loads(msg)
self.assertEqual(len(response['services']), subcount*service_count)
# total subscriptions (must be subscount * service_count)
(code, msg) = self._http_get('/services.json')
self.assertEqual(code, 200)
response = json.loads(msg)
subs = sum([item['in_use'] for item in response['services']])
self.assertEqual(subs, subcount*service_count)
validate_lb_position("Server Positions after 3 publishers and initial subscribe")
failure = validate_assignment_count(response, 'In-use count after initial subscribe')
self.assertEqual(failure, False)
# start one more publisher
pub_id = 'test_discovery-3'
pub_data = {service_type : '%s-3' % service_type}
disc = client.DiscoveryClient(
self._disc_server_ip, self._disc_server_port,
client_type, pub_id)
task = disc.publish(service_type, pub_data)
tasks.append(task)
# verify 4th publisher is up
(code, msg) = self._http_get('/services.json')
self.assertEqual(code, 200)
response = json.loads(msg)
self.assertEqual(len(response['services']), 4)
# wait for all TTL to expire before looking at publisher's counters
print 'Waiting for all client TTL to expire (1 min)'
time.sleep(1*60)
# total subscriptions (must be subscount * service_count)
(code, msg) = self._http_get('/services.json')
self.assertEqual(code, 200)
response = json.loads(msg)
subs = sum([item['in_use'] for item in response['services']])
self.assertEqual(subs, subcount*service_count)
# verify newly added in-use count is 0
data = [item for item in response['services'] if item['service_id'] == 'test_discovery-3:%s' % service_type]
entry = data[0]
self.assertEqual(len(data), 1)
self.assertEqual(entry['in_use'], 0)
# Issue load-balance command
print 'Sending load-balance command (partial)'
(code, msg) = self._http_post('/load-balance/%s' % service_type, '')
self.assertEqual(code, 200)
# ensure partial LB was received by discovery server correctly
(code, msg) = self._http_get('/stats.json')
self.assertEqual(code, 200)
response = json.loads(msg)
self.assertEqual(response["lb_full"], 0)
self.assertEqual(response["lb_partial"], 1)
# wait for all TTL to expire before looking at publisher's counters
print 'Waiting for all client TTL to expire (1 min)'
time.sleep(1*60)
# total subscriptions (must still be subscount * service_count)
(code, msg) = self._http_get('/services.json')
self.assertEqual(code, 200)
response = json.loads(msg)
subs = sum([item['in_use'] for item in response['services']])
self.assertEqual(subs, subcount*service_count)
# verify all servers are load balanced
validate_lb_position("Server Positions after LB command")
failure = validate_assignment_count(response, 'In-use count after initial subscribe')
self.assertEqual(failure, False)
def test_load_balance_full(self):
global server_list
# publish 3 instances
tasks = []
service_type = 'SvcLoadBalance'
for i in range(3):
client_type = 'test-discovery'
pub_id = 'test_discovery-%d' % i
pub_data = {service_type : '%s-%d' % (service_type, i)}
disc = client.DiscoveryClient(
self._disc_server_ip, self._disc_server_port,
client_type, pub_id)
task = disc.publish(service_type, pub_data)
tasks.append(task)
time.sleep(1)
(code, msg) = self._http_get('/services.json')
self.assertEqual(code, 200)
response = json.loads(msg)
self.assertEqual(len(response['services']), 3)
self.assertEqual(response['services'][0]['service_type'], service_type)
# multiple subscribers for 2 instances each
subcount = 20
service_count = 2
tasks = []
for i in range(subcount):
client_id = "test-load-balance-%d" % i
disc = client.DiscoveryClient(
self._disc_server_ip, self._disc_server_port, client_id)
obj = disc.subscribe(
service_type, service_count, info_callback, client_id)
tasks.append(obj.task)
time.sleep(1)
print 'Started %d tasks to subscribe service %s, count %d' \
% (subcount, service_type, service_count)
# validate all clients have subscribed
time.sleep(1)
(code, msg) = self._http_get('/clients.json')
self.assertEqual(code, 200)
response = json.loads(msg)
self.assertEqual(len(response['services']), subcount*service_count)
# total subscriptions (must be subscount * service_count)
(code, msg) = self._http_get('/services.json')
self.assertEqual(code, 200)
response = json.loads(msg)
subs = sum([item['in_use'] for item in response['services']])
self.assertEqual(subs, subcount*service_count)
validate_lb_position("Server Positions after 3 publishers and initial subscribe")
failure = validate_assignment_count(response, 'In-use count after initial subscribe')
self.assertEqual(failure, False)
# start one more publisher
pub_id = 'test_discovery-3'
pub_data = {service_type : '%s-3' % service_type}
disc = client.DiscoveryClient(
self._disc_server_ip, self._disc_server_port,
client_type, pub_id)
task = disc.publish(service_type, pub_data)
tasks.append(task)
# verify 4th publisher is up
(code, msg) = self._http_get('/services.json')
self.assertEqual(code, 200)
response = json.loads(msg)
self.assertEqual(len(response['services']), 4)
# wait for all TTL to expire before looking at publisher's counters
print 'Waiting for all client TTL to expire (1 min)'
time.sleep(1*60)
# total subscriptions (must be subscount * service_count)
(code, msg) = self._http_get('/services.json')
self.assertEqual(code, 200)
response = json.loads(msg)
subs = sum([item['in_use'] for item in response['services']])
self.assertEqual(subs, subcount*service_count)
# verify newly added in-use count is 0
data = [item for item in response['services'] if item['service_id'] == 'test_discovery-3:%s' % service_type]
entry = data[0]
self.assertEqual(len(data), 1)
self.assertEqual(entry['in_use'], 0)
# Issue load-balance command
print 'Sending load-balance command (full)'
payload = {"type": "full"}
(code, msg) = self._http_post('/load-balance/%s' % service_type, json.dumps(payload))
self.assertEqual(code, 200)
# ensure full LB was received by discovery server correctly
(code, msg) = self._http_get('/stats.json')
self.assertEqual(code, 200)
response = json.loads(msg)
self.assertEqual(response["lb_full"], 1)
self.assertEqual(response["lb_partial"], 0)
# wait for all TTL to expire before looking at publisher's counters
print 'Waiting for all client TTL to expire (1 min)'
time.sleep(1*60)
# total subscriptions (must still be subscount * service_count)
(code, msg) = self._http_get('/services.json')
self.assertEqual(code, 200)
response = json.loads(msg)
subs = sum([item['in_use'] for item in response['services']])
self.assertEqual(subs, subcount*service_count)
# verify all servers are load balanced
validate_lb_position("Server Positions after LB command")
failure = validate_assignment_count(response, 'In-use count after initial subscribe')
self.assertEqual(failure, False)
def test_active_load_balance(self):
# publish 3 instances of service. Active LB must be enabled!
tasks = []
service_type = 'SvcActiveLoadBalance'
for i in range(3):
client_type = 'test-discovery'
pub_id = 'test_discovery-%d' % i
pub_data = {service_type : '%s-%d' % (service_type, i)}
disc = client.DiscoveryClient(
self._disc_server_ip, self._disc_server_port,
client_type, pub_id)
task = disc.publish(service_type, pub_data)
tasks.append(task)
time.sleep(1)
(code, msg) = self._http_get('/services.json')
self.assertEqual(code, 200)
response = json.loads(msg)
self.assertEqual(len(response['services']), 3)
self.assertEqual(response['services'][0]['service_type'], service_type)
failure = validate_assignment_count(response, 'In-use count just after publishing')
self.assertEqual(failure, False)
# multiple subscribers for 2 instances each
subcount = 20
service_count = 2
tasks = []
for i in range(subcount):
client_id = "test-load-balance-%d" % i
disc = client.DiscoveryClient(
self._disc_server_ip, self._disc_server_port, client_id)
obj = disc.subscribe(
service_type, service_count, info_callback, client_id)
tasks.append(obj.task)
time.sleep(1)
# validate all clients have subscribed
time.sleep(1)
(code, msg) = self._http_get('/clients.json')
self.assertEqual(code, 200)
response = json.loads(msg)
self.assertEqual(len(response['services']), subcount*service_count)
(code, msg) = self._http_get('/services.json')
self.assertEqual(code, 200)
response = json.loads(msg)
failure = validate_assignment_count(response, 'In-use count just after initial subscribe')
self.assertEqual(failure, False)
validate_lb_position("Server Positions after 3 publishers")
# start one more publisher
pub_id = 'test_discovery-3'
pub_data = {service_type : '%s-3' % service_type}
pub_url = '/service/%s' % pub_id
disc = client.DiscoveryClient(
self._disc_server_ip, self._disc_server_port,
client_type, pub_id)
task = disc.publish(service_type, pub_data)
tasks.append(task)
# ensure all are up
time.sleep(1)
(code, msg) = self._http_get('/services.json')
self.assertEqual(code, 200)
response = json.loads(msg)
self.assertEqual(len(response['services']), 4)
# wait for all TTL to expire before looking at publisher's counters
print 'Waiting for all client TTL to expire (1 min)'
time.sleep(1*60)
(code, msg) = self._http_get('/services.json')
self.assertEqual(code, 200)
response = json.loads(msg)
failure = validate_assignment_count(response, 'In-use count just after bringing up one more publisher')
self.assertEqual(failure, False)
validate_lb_position("Server Positions: additional publisher up")
# set operational state down - new service
payload = {
'service-type' : '%s' % service_type,
'admin-state' : 'down',
}
(code, msg) = self._http_put(pub_url, json.dumps(payload))
self.assertEqual(code, 200)
# wait for all TTL to expire before looking at publisher's counters
print 'Waiting for all client TTL to expire (1 min)'
time.sleep(1*60)
(code, msg) = self._http_get('/services.json')
self.assertEqual(code, 200)
response = json.loads(msg)
failure = validate_assignment_count(response, 'In-use count just after publisher-3 down')
self.assertEqual(failure, False)
validate_lb_position("Server Positions: additional publisher down")
# set operational state up - again
payload = {
'service-type' : '%s' % service_type,
'admin-state' : 'up',
}
(code, msg) = self._http_put(pub_url, json.dumps(payload))
self.assertEqual(code, 200)
# wait for all TTL to expire before looking at publisher's counters
print 'Waiting for all client TTL to expire (1 min)'
time.sleep(1*60)
validate_lb_position("Server Positions: additional publisher up again")
# total subscriptions must be subscount * service_count
(code, msg) = self._http_get('/services.json')
self.assertEqual(code, 200)
response = json.loads(msg)
failure = validate_assignment_count(response, 'In-use count just after publisher-3 up again')
self.assertEqual(failure, False)
def test_load_balance_min_instances(self):
tasks = []
service_type = 'Foobar'
pubcount = 5
for i in range(pubcount):
client_type = 'test-discovery'
pub_id = 'test_discovery-%d' % i
pub_data = {service_type : '%s-%d' % (service_type, i)}
disc = client.DiscoveryClient(
self._disc_server_ip, self._disc_server_port,
client_type, pub_id)
task = disc.publish(service_type, pub_data)
tasks.append(task)
time.sleep(1)
(code, msg) = self._http_get('/services.json')
self.assertEqual(code, 200)
response = json.loads(msg)
self.assertEqual(len(response['services']), pubcount)
self.assertEqual(response['services'][0]['service_type'], service_type)
# multiple subscribers for 2 instances each
subcount = 100
min_instances = 2
suburl = "/subscribe"
payload = {
'service' : '%s' % service_type,
'instances' : 0,
'min-instances': min_instances,
'client-type' : 'Vrouter-Agent',
'remote-addr' : '3.3.3.3',
'version' : '2.2',
}
for i in range(subcount):
payload['client'] = "test-load-balance-%d" % i
(code, msg) = self._http_post(suburl, json.dumps(payload))
self.assertEqual(code, 200)
response = json.loads(msg)
self.assertEqual(len(response[service_type]), pubcount)
# validate all clients have subscribed
time.sleep(1)
(code, msg) = self._http_get('/services.json')
self.assertEqual(code, 200)
response = json.loads(msg)
failure = validate_assignment_count(response, 'In-use count after clients with min_instances 2')
self.assertEqual(failure, False)
def test_load_balance_siul(self):
# publish 2 instances
tasks = []
service_type = 'SvcLoadBalance'
pubcount = 2
for i in range(pubcount):
client_type = 'test-discovery'
pub_id = 'test_discovery-%d' % i
pub_data = {service_type : '%s-%d' % (service_type, i)}
disc = client.DiscoveryClient(
self._disc_server_ip, self._disc_server_port,
client_type, pub_id)
task = disc.publish(service_type, pub_data)
tasks.append(task)
time.sleep(1)
(code, msg) = self._http_get('/services.json')
self.assertEqual(code, 200)
response = json.loads(msg)
self.assertEqual(len(response['services']), pubcount)
self.assertEqual(response['services'][0]['service_type'], service_type)
# multiple subscribers for 2 instances each
subcount = 20
service_count = 2
suburl = "/subscribe"
payload = {
'service' : '%s' % service_type,
'instances' : service_count,
'client-type' : 'Vrouter-Agent',
'service-in-use-list' : {'publisher-id': ["test_discovery-0", 'test_discovery-1'] }
}
for i in range(subcount):
payload['client'] = "ut-client-%d" % i
(code, msg) = self._http_post(suburl, json.dumps(payload))
self.assertEqual(code, 200)
response = json.loads(msg)
self.assertEqual(len(response[service_type]), service_count)
# validate both publishers are assigned fairly
time.sleep(1)
(code, msg) = self._http_get('/services.json')
self.assertEqual(code, 200)
response = json.loads(msg)
failure = validate_assignment_count(response, 'In-use count after clients with service-in-use-list')
self.assertEqual(failure, False)
# start one more publisher
pub_id = 'test_discovery-2'
pub_data = {service_type : '%s-2' % service_type}
disc = client.DiscoveryClient(
self._disc_server_ip, self._disc_server_port,
client_type, pub_id)
task = disc.publish(service_type, pub_data)
tasks.append(task)
pubcount += 1
# verify new publisher is up
(code, msg) = self._http_get('/services.json')
self.assertEqual(code, 200)
response = json.loads(msg)
self.assertEqual(len(response['services']), pubcount)
subs = sum([item['in_use'] for item in response['services']])
self.assertEqual(subs, subcount*service_count)
# verify newly added in-use count is 0
data = [item for item in response['services'] if item['service_id'] == '%s:%s' % (pub_id, service_type)]
entry = data[0]
self.assertEqual(len(data), 1)
self.assertEqual(entry['in_use'], 0)
# Issue load-balance command
(code, msg) = self._http_post('/load-balance/%s' % service_type, '')
self.assertEqual(code, 200)
for i in range(subcount):
payload['client'] = "ut-client-%d" % i
(code, msg) = self._http_post(suburl, json.dumps(payload))
self.assertEqual(code, 200)
# verify newly added in-use count is 0
(code, msg) = self._http_get('/services.json')
self.assertEqual(code, 200)
response = json.loads(msg)
failure = validate_assignment_count(response, 'In-use count after LB command')
self.assertEqual(failure, False)
| |
from datetime import datetime
from flask import get_flashed_messages
from flask.ext.login import login_user, logout_user
from testsuite import DataBaseTestCase
from lpm import items, auth
from lpm.components import PartNumber
class ItemsTest(DataBaseTestCase):
def test_create_comment(self):
with self.app.test_request_context():
usr = auth.auth_user('viewer', '1234')
login_user(usr)
c = items.create_comment('comment')
self.assertEqual('viewer', c.get('user'))
self.assertEqual('comment', c.get('message'))
# don't test the date
date = datetime.now()
c = items.create_comment('comment', date)
self.assertEqual({'user': 'viewer', 'date': date, 'message': 'comment'}, c)
logout_user()
def test_store_items(self):
with self.app.test_request_context():
usr = auth.auth_user('viewer', '1234')
login_user(usr)
importdata = [
{
'serial': 'LPM0001',
'partno': 'TE0002a',
'project': 'some project',
'status': 'shipped',
'param1': 'some param',
'batch': 'b1'
},
{
'serial': 'LPM0002',
'partno': 'TE0001b',
'param2': 'testdata',
'comment': 'my comment'
},
{
'serial': 'LPM0003',
'partno': 'TE0002a',
'batch': 'b1'
},
]
items._store_items(importdata)
obj = self.app.mongo.db.items.find_one('LPM0001')
self.assertIsNotNone(obj)
self.assertEqual('TE0002a', obj.get('partno'))
self.assertEqual('some project', obj.get('project'))
self.assertEqual('shipped', obj.get('status'))
self.assertFalse(obj.get('available'))
self.assertEqual('b1', obj.get('batch'))
self.assertEqual('some param', obj.get('param1'))
comments = obj.get('comments', list())
self.assertEqual(1, len(comments))
comment = comments[0]
self.assertEqual('viewer', comment.get('user'))
self.assertEqual('[Auto] created', comment.get('message'))
obj = self.app.mongo.db.items.find_one('LPM0002')
self.assertIsNotNone(obj)
self.assertEqual('TE0001b', obj.get('partno'))
self.assertEqual('', obj.get('project'))
self.assertEqual('', obj.get('status'))
self.assertTrue(obj.get('available'))
self.assertEqual('testdata', obj.get('param2'))
comments = obj.get('comments', list())
self.assertEqual(2, len(comments))
comment = comments[0]
self.assertEqual('viewer', comment.get('user'))
self.assertEqual('[Auto] created', comment.get('message'))
comment = comments[1]
self.assertEqual('viewer', comment.get('user'))
self.assertEqual('my comment', comment.get('message'))
obj = self.app.mongo.db.items.find_one('LPM0003')
self.assertIsNotNone(obj)
self.assertEqual('TE0002a', obj.get('partno'))
self.assertEqual('b1', obj.get('batch'))
self.assertTrue(obj.get('available'))
comments = obj.get('comments', list())
self.assertEqual(1, len(comments))
comment = comments[0]
self.assertEqual('viewer', comment.get('user'))
self.assertEqual('[Auto] created', comment.get('message'))
# also test that the stock is correctly updated
obj = self.app.mongo.db.stock.find_one('TE0001')
self.assertIsNotNone(obj)
self.assertEqual(97, obj.get('quantity')) # 100 - 4 + 1
obj = self.app.mongo.db.stock.find_one('TE0002')
self.assertIsNotNone(obj)
self.assertEqual(37, obj.get('quantity'))
obj = self.app.mongo.db.stock_batches.find_one({'partno': 'TE0002', 'name': 'b1'})
self.assertIsNotNone(obj)
self.assertEqual(2, obj.get('quantity'))
entries = self.app.mongo.db.stock_history.find({'partno': 'TE0002'}).count()
self.assertEqual(1, entries) # one entry for the insertion
entries = self.app.mongo.db.stock_history.find({'partno': 'TE0001'}).count()
self.assertEqual(2, entries) # one entry for the insertion, one for the stock removal
logout_user()
def test_get_requiremets(self):
with self.app.app_context():
refreqs = dict(
required_fields=['param5', 'param6', 'param7', 'param8'],
date_fields=['param5'],
integer_fields=['param2', 'param6'],
floating_point_fields=['param7'],
boolean_fields=['param8']
)
reqs = items.get_requirements(PartNumber('TE0001a'))
self.assertEqual(refreqs, reqs)
reqs = items.get_requirements(PartNumber('TE0001b'))
self.assertEqual(dict(), reqs)
refreqs = dict(
required_fields=['param1', 'param2', 'param3', 'param4'],
integer_fields=['param2']
)
reqs = items.get_requirements(PartNumber('TE0002a'))
self.assertEqual(refreqs, reqs)
reqs = items.get_requirements(PartNumber('TE0002b'))
self.assertEqual(refreqs, reqs)
reqs = items.get_requirements(PartNumber('TE0002'))
self.assertEqual(refreqs, reqs)
def test_process_requirements(self):
items.process_requirements(dict(serial=5, b='c', d='e'), dict())
date = datetime.now()
data = dict(
p1='a',
p2=date,
p3='4.6',
p4='6',
p5='NO'
)
reqs = dict(
date_fields=['p2'],
integer_fields=['p4'],
floating_point_fields=['p3'],
boolean_fields=['p5']
)
items.process_requirements(data, reqs)
self.assertEqual('a', data.get('p1')) # unchanged
self.assertEqual(date, data.get('p2')) # unchanged
self.assertTrue(isinstance(data.get('p3'), float)) # to floating point
self.assertEqual(4.6, data.get('p3'))
self.assertTrue(isinstance(data.get('p4'), int)) # to integer
self.assertEqual(6, data.get('p4'))
self.assertTrue(isinstance(data.get('p5'), bool)) # to boolean
self.assertFalse(data.get('p5'))
with self.assertRaises(ValueError):
items.process_requirements(
dict(serial=5),
dict(required_fields='param1')
)
with self.assertRaises(ValueError):
items.process_requirements(
dict(param1='1234'),
dict(date_fields='param1')
)
with self.assertRaises(ValueError):
items.process_requirements(
dict(param1='6.1'),
dict(integer_fields=['param1'])
)
with self.assertRaises(ValueError):
items.process_requirements(
dict(param1='6.1f'),
dict(floating_point_fields=['param1'])
)
with self.assertRaises(ValueError):
items.process_requirements(
dict(param1='6.1f'),
dict(boolean_fields=['param1'])
)
with self.app.app_context():
with self.assertRaises(ValueError): # invalid initial status
items.process_requirements(
dict(partno='TE0002a', status='somestatus'),
dict()
)
def test_do_import_file(self):
with self.app.test_request_context():
success, headers, data = items._import_file('testsuite/files/items_add.xlsx')
self.assertTrue(success)
refhdr = ['serial', 'partno', 'batch', 'param1', 'param2', 'param3', 'param5',
'param4', 'param6', 'param7', 'param8', 'comment']
self.assertEqual(refhdr, headers)
self.assertEqual(4, len(data))
d = data[0]
refd = dict(
serial='LPM0001',
partno='TE0001a',
batch='b1',
param5=datetime(2016, 1, 31, 15, 38, 4),
param6=3,
param7=4.6,
param8=True,
)
self.assertEqual(refd, d)
d = data[1]
refd = dict(
serial='LPM0002',
partno='TE0001b',
batch='b1',
param2='7',
param5='2016-02-01 15:38:50', # not defined as a date field
param6='4',
param7='4.3',
param8='False',
)
self.assertEqual(refd, d)
d = data[2]
refd = dict(
serial='LPM0003',
partno='TE0001a',
batch='b1',
param5=datetime(2016, 2, 2, 15, 38, 9),
param6=5,
param7=2.0,
param8=False,
comment='some comment here',
)
self.assertEqual(refd, d)
d = data[3]
refd = dict(
serial='LPM0004',
partno='TE0002a',
batch='b2',
param1='a',
param2=5,
param3='7',
param5='n',
param4='d',
param8='text',
)
self.assertEqual(refd, d)
def test_bad_import(self):
with self.app.test_request_context():
usr = auth.auth_user('viewer', '1234')
login_user(usr)
success, headers, data = items._import_file('testsuite/files/badimport.xlsx')
self.assertFalse(success)
msg = get_flashed_messages()
refmsg=[
"field 'param5' must be a datetime object (row 2)",
"serial number 'LP0001' exists already (row 3)",
"required field 'param5' is missing (row 4)",
'unknown part number TE0012 (row 5)',
'part number requires a revision (row 6)'
]
self.assertEqual(refmsg, msg)
def test_import_file(self):
self.login('viewer')
rv = self.client.get('/items/import')
self.assertEqual(302, rv.status_code)
self.assertIn('/login', rv.location)
self.logout()
self.login('admin')
rv = self.client.get('/items/import')
self.assertEqual(200, rv.status_code)
rv = self.client.post('/items/import', data=dict(
file=open('testsuite/files/items_add.xlsx', 'rb')
))
self.assertEqual(200, rv.status_code)
start = rv.data.find(b'lpm_tmp_')
end = rv.data.find(b'"', start)
filename = rv.data[start:end]
rv = self.client.post('/items/import', data=dict(
tmpname=filename.decode('utf-8')
))
self.assertEqual(302, rv.status_code)
with self.app.app_context():
# Only test that the objects are created.
# The actual import logic is tested in the other test functions
obj = self.app.mongo.db.items.find_one('LPM0001')
self.assertIsNotNone(obj)
obj = self.app.mongo.db.items.find_one('LPM0002')
self.assertIsNotNone(obj)
obj = self.app.mongo.db.items.find_one('LPM0003')
self.assertIsNotNone(obj)
obj = self.app.mongo.db.items.find_one('LPM0004')
self.assertIsNotNone(obj)
def test_check_status(self):
with self.app.test_request_context():
usr = auth.auth_user('admin', '1234')
login_user(usr)
items._check_status('TE0002a', '', 'tested')
items._check_status('TE0002a', '', 'reserved')
with self.assertRaises(ValueError):
items._check_status('TE0002a', '', 'shipped') # invalid transition
with self.assertRaises(ValueError):
items._check_status('TE0002a', '', 'somestatus') # unknown status
items._check_status('TE0002a', 'reserved', 'shipped') # now a valid transition
logout_user()
usr = auth.auth_user('viewer', '1234')
login_user(usr)
with self.assertRaises(ValueError):
items._check_status('TE0002a', '', 'tested') # requires item_admin
logout_user()
usr = auth.auth_user('admin', '1234')
login_user(usr)
with self.assertRaises(ValueError):
items._check_status('TE0001a', '', 'tested') # unknown status for this part number
items._check_status('TE0001a', '', 'obsolete')
logout_user()
def test_is_unavailable(self):
with self.app.app_context():
self.assertFalse(items._is_unavailable('TE0002a', 'tested'))
self.assertFalse(items._is_unavailable('TE0002a', 'reserved'))
self.assertTrue(items._is_unavailable('TE0002a', 'shipped'))
self.assertTrue(items._is_unavailable('TE0002a', 'obsolete'))
self.assertTrue(items._is_unavailable('TE0001a', 'obsolete'))
def test_add_comment(self):
self.login('viewer')
rv = self.client.get('/items/LP0001/add-comment')
self.assertEqual(200, rv.status_code)
rv = self.client.post('/items/LP0001/add-comment', data=dict(
message='testcomment'
))
self.assertEqual(302, rv.status_code)
self.logout()
with self.app.app_context():
obj = self.app.mongo.db.items.find_one('LP0001')
comments = obj.get('comments')
self.assertEqual(1, len(comments))
comment = comments[0]
self.assertEqual('testcomment', comment.get('message'))
self.assertEqual('viewer', comment.get('user'))
self.assertTrue(obj.get('available'))
self.assertEqual('', obj.get('status'))
def test_change_status(self):
self.login('viewer')
rv = self.client.get('/items/LP0001/change-status/teststatus')
self.assertEqual(200, rv.status_code)
self.assertTrue(
b'<input class="form-control" id="status" name="status" '
b'placeholder="Required" type="text" value="teststatus">'
in rv.data
)
rv = self.client.post('/items/LP0001/change-status', data=dict(
status='teststatus',
project='someproject',
comment='should not work'
))
self.assertEqual(200, rv.status_code) # invalid state transition
self.assertTrue(b'unknown status: 'teststatus'' in rv.data)
rv = self.client.post('/items/LP0001/change-status', data=dict(
status='obsolete',
project='someproject',
comment='should not work'
))
self.assertEqual(200, rv.status_code) # insufficient permissions
self.assertTrue(
b"insufficient permissions to do the status transition from '' to 'obsolete'"
in rv.data
)
self.logout()
self.login('admin')
rv = self.client.post('/items/LP0001/change-status', data=dict(
status='obsolete',
project='someproject',
comment='should now work'
))
self.assertEqual(302, rv.status_code)
with self.app.app_context():
obj = self.app.mongo.db.items.find_one('LP0001')
self.assertEqual('obsolete', obj.get('status'))
self.assertFalse(obj.get('available'))
self.assertEqual('someproject', obj.get('project'))
comments = obj.get('comments')
self.assertEqual(2, len(comments))
comment = comments[0]
self.assertEqual("[Auto] changed status to 'obsolete'", comment.get('message'))
self.assertEqual('admin', comment.get('user'))
comment = comments[1]
self.assertEqual('should now work', comment.get('message'))
self.assertEqual('admin', comment.get('user'))
def test_set_project(self):
self.login('viewer')
rv = self.client.get('/items/LP0001/set-project')
self.assertEqual(200, rv.status_code)
rv = self.client.post('/items/LP0001/set-project', data=dict(
project='myproject'
))
self.assertEqual(302, rv.status_code)
self.logout()
with self.app.app_context():
obj = self.app.mongo.db.items.find_one('LP0001')
self.assertEqual('myproject', obj.get('project'))
comments = obj.get('comments')
self.assertEqual(1, len(comments))
comment = comments[0]
self.assertEqual("[Auto] changed project association to 'myproject'", comment.get('message'))
self.assertEqual('viewer', comment.get('user'))
self.assertTrue(obj.get('available'))
self.assertEqual('', obj.get('status'))
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ragged_array_ops.stack."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops.ragged import ragged_array_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_test_util
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedStackOpTest(ragged_test_util.RaggedTensorTestCase,
parameterized.TestCase):
@parameterized.parameters(
dict(
descr='Two rank-2 inputs (ragged_rank=1), axis=0',
rt_inputs=(
[['a00', 'a01'], [], ['a20', 'a21']], # shape=(3, None)
[['b00'], ['b10']]), # shape=(2, None)
axis=0,
expected=[[[b'a00', b'a01'], [], [b'a20', b'a21']], [[b'b00'],
[b'b10']]]),
dict(
descr='Two rank-2 inputs (ragged_rank=1), axis=1',
rt_inputs=(
[['a00', 'a01'], [], ['a20', 'a21', 'a22']], # shape=(3, None)
[['b00'], ['b10', 'b11', 'b12'], ['b20']]), # shape=(3, None)
axis=1,
expected=[
[[b'a00', b'a01'], [b'b00']],
[[], [b'b10', b'b11', b'b12']],
[[b'a20', b'a21', b'a22'], [b'b20']]]),
dict(
descr='Two rank-2 inputs (ragged_rank=1), axis=2',
rt_inputs=(
[['a00', 'a01'], [], ['a20', 'a21', 'a22']], # shape=(3, None)
[['b00', 'b01'], [], ['b20', 'b21', 'b22']]), # shape=(3, None)
axis=2,
expected=[
[[b'a00', b'b00'], [b'a01', b'b01']], [],
[[b'a20', b'b20'], [b'a21', b'b21'], [b'a22', b'b22']]]),
dict(
descr='Two rank-2 inputs (ragged_rank=1), axis=-3',
rt_inputs=(
[['a00', 'a01'], [], ['a20', 'a21']], # shape=(3, None)
[['b00'], ['b10']]), # shape=(2, None)
axis=-3,
expected=[[[b'a00', b'a01'], [], [b'a20', b'a21']], [[b'b00'],
[b'b10']]]),
dict(
descr='Two rank-2 inputs (ragged_rank=1), axis=-2',
rt_inputs=(
[['a00', 'a01'], [], ['a20', 'a21', 'a22']], # shape=(3, None)
[['b00'], ['b10', 'b11', 'b12'], ['b20']]), # shape=(3, None)
axis=-2,
expected=[
[[b'a00', b'a01'], [b'b00']],
[[], [b'b10', b'b11', b'b12']],
[[b'a20', b'a21', b'a22'], [b'b20']]]),
dict(
descr='Two rank-2 inputs (ragged_rank=1), axis=-1',
rt_inputs=(
[['a00', 'a01'], [], ['a20', 'a21', 'a22']], # shape=(3, None)
[['b00', 'b01'], [], ['b20', 'b21', 'b22']]), # shape=(3, None)
axis=-1,
expected=[
[[b'a00', b'b00'], [b'a01', b'b01']], [],
[[b'a20', b'b20'], [b'a21', b'b21'], [b'a22', b'b22']]]),
dict(
descr='Three rank-2 inputs (ragged_rank=1), axis=0',
rt_inputs=(
[['a00', 'a01'], [], ['a20', 'a21', 'a22']], # shape=(3, None)
[['b00'], ['b10']], # shape=(2, None)
[['c00'], ['c10', 'c11'], ['c21']]), # shape=(3, None)
axis=0,
expected=[[[b'a00', b'a01'], [], [b'a20', b'a21', b'a22']],
[[b'b00'], [b'b10']],
[[b'c00'], [b'c10', b'c11'], [b'c21']]]),
dict(
descr='Three rank-2 inputs (ragged_rank=1), axis=1',
rt_inputs=(
[['a00', 'a01'], [], ['a20', 'a21', 'a22']], # shape=(3, None)
[['b00'], ['b10', 'b11', 'b12'], ['b20']], # shape=(3, None)
[[], ['c10', 'c11'], ['c20', 'c21']]), # shape=(3, None)
axis=1,
expected=[
[[b'a00', b'a01'], [b'b00'], []],
[[], [b'b10', b'b11', b'b12'], [b'c10', b'c11']],
[[b'a20', b'a21', b'a22'], [b'b20'], [b'c20', b'c21']]],
expected_shape=[3, None, None]),
dict(
descr='Three rank-2 inputs (ragged_rank=1), axis=2',
rt_inputs=(
[['a00', 'a01'], [], ['a20', 'a21', 'a22']], # shape=(3, None)
[['b00', 'b01'], [], ['b20', 'b21', 'b22']], # shape=(3, None)
[['c00', 'c01'], [], ['c20', 'c21', 'c22']]), # shape=(3, None)
axis=2,
expected=[
[[b'a00', b'b00', b'c00'], [b'a01', b'b01', b'c01']], [],
[[b'a20', b'b20', b'c20'], [b'a21', b'b21', b'c21'],
[b'a22', b'b22', b'c22']]]),
dict(
descr='Three rank-3 inputs (ragged_rank=2), axis=0',
rt_inputs=(
[[['a000', 'a001'], ['a010']],
[['a100', 'a101', 'a102'], ['a110', 'a111']]],
[[['b000']], [['b100', 'b101'], ['b110']]],
[[], [['c100', 'c101', 'c102', 'c103']], [[], ['c210', 'c211']]]),
axis=0,
expected=[
[[[b'a000', b'a001'], [b'a010']],
[[b'a100', b'a101', b'a102'], [b'a110', b'a111']]],
[[[b'b000']],
[[b'b100', b'b101'], [b'b110']]],
[[],
[[b'c100', b'c101', b'c102', b'c103']],
[[], [b'c210', b'c211']]]]),
dict(
descr='Three rank-3 inputs (ragged_rank=2), axis=1',
rt_inputs=(
[[['a000', 'a001'], ['a010']],
[['a100', 'a101', 'a102'], ['a110', 'a111']]],
[[['b000']], [['b100', 'b101'], ['b110']]],
[[], [[], ['c110', 'c111']]]),
axis=1,
expected=[
[[[b'a000', b'a001'], [b'a010']], [[b'b000']], []],
[[[b'a100', b'a101', b'a102'], [b'a110', b'a111']],
[[b'b100', b'b101'], [b'b110']],
[[], [b'c110', b'c111']]]]),
dict(
descr='Three rank-3 inputs (ragged_rank=2), axis=2',
rt_inputs=(
[[['a000', 'a001'], ['a010']],
[['a100', 'a101', 'a102'], ['a110', 'a111']]],
[[[], ['b010', 'b011']], [['b100', 'b101'], ['b110']]],
[[['c000'], ['c010']], [[], ['c110', 'c111']]]),
axis=2,
expected=[
[[[b'a000', b'a001'], [], [b'c000']],
[[b'a010'], [b'b010', b'b011'], [b'c010']]],
[[[b'a100', b'a101', b'a102'], [b'b100', b'b101'], []],
[[b'a110', b'a111'], [b'b110'], [b'c110', b'c111']]]]),
dict(
descr='Three rank-3 inputs (ragged_rank=2), axis=3',
rt_inputs=(
[[['a000', 'a001'], ['a010']]],
[[['b000', 'b001'], ['b010']]],
[[['c000', 'c001'], ['c010']]]),
axis=3,
expected=[[
[[b'a000', b'b000', b'c000'], [b'a001', b'b001', b'c001']],
[[b'a010', b'b010', b'c010']]]]),
dict(
descr='Three rank-3 inputs (ragged_rank=2), axis=-2',
rt_inputs=(
[[['a000', 'a001'], ['a010']],
[['a100', 'a101', 'a102'], ['a110', 'a111']]],
[[[], ['b010', 'b011']], [['b100', 'b101'], ['b110']]],
[[['c000'], ['c010']], [[], ['c110', 'c111']]]),
axis=-2,
expected=[
[[[b'a000', b'a001'], [], [b'c000']],
[[b'a010'], [b'b010', b'b011'], [b'c010']]],
[[[b'a100', b'a101', b'a102'], [b'b100', b'b101'], []],
[[b'a110', b'a111'], [b'b110'], [b'c110', b'c111']]]]),
dict(
descr='Three rank-3 inputs (ragged_rank=2), axis=-1',
rt_inputs=(
[[['a000', 'a001'], ['a010']]],
[[['b000', 'b001'], ['b010']]],
[[['c000', 'c001'], ['c010']]]),
axis=-1,
expected=[[
[[b'a000', b'b000', b'c000'], [b'a001', b'b001', b'c001']],
[[b'a010', b'b010', b'c010']]]]),
dict(
descr='ragged_stack([uniform, ragged, uniform], axis=1)',
ragged_ranks=[0, 1, 0],
rt_inputs=(
[['0('], ['1('], ['2(']], # shape=(3, 1)
[['b00'], ['b10', 'b11', 'b12'], ['b20']], # shape=(3, None)
[[')0'], [')1'], [')2']]), # shape=(3, 1)
axis=1,
expected=[
[[b'0('], [b'b00'], [b')0']],
[[b'1('], [b'b10', b'b11', b'b12'], [b')1']],
[[b'2('], [b'b20'], [b')2']]]),
dict(
descr='ragged_stack([uniform, uniform], axis=0)',
ragged_ranks=[0, 0],
rt_inputs=(
[['a00', 'a01'], ['a10', 'a11'], ['a20', 'a21']], # shape=(3, 2)
[['b00', 'b01', 'b02'], ['b10', 'b11', 'b12']]), # shape=(2, 3)
axis=0,
expected=[
[[b'a00', b'a01'], [b'a10', b'a11'], [b'a20', b'a21']],
[[b'b00', b'b01', b'b02'], [b'b10', b'b11', b'b12']]]),
dict(
descr='ragged_stack([uniform, ragged], axis=0)',
ragged_ranks=[0, 1],
rt_inputs=(
[['a00', 'a01'], ['a10', 'a11'], ['a20', 'a21']], # shape=(3, 2)
[['b00', 'b01', 'b02'], ['b10', 'b11', 'b12']]), # shape=(2, 3)
axis=0,
expected=[
[[b'a00', b'a01'], [b'a10', b'a11'], [b'a20', b'a21']],
[[b'b00', b'b01', b'b02'], [b'b10', b'b11', b'b12']]]),
dict(
descr='ragged_stack([uniform, ragged], axis=0) with rank-3 inputs',
ragged_ranks=[0, 2],
rt_inputs=(
[[[0, 1], [2, 3]], [[4, 5], [6, 7]]], # shape = (2, 2, 2)
[[[8], [8, 8]]]), # shape = (2, None, None)
axis=0,
expected=[[[[0, 1], [2, 3]], [[4, 5], [6, 7]]], [[[8], [8, 8]]]]),
dict(
descr='Two rank-3 inputs with ragged_rank=1, axis=-1',
ragged_ranks=[1, 1],
rt_inputs=(
[[[0, 1], [2, 3], [4, 5]], [], [[6, 7], [8, 9]]],
[[[9, 8], [7, 6], [5, 4]], [], [[3, 2], [1, 0]]]),
axis=-1,
expected=[
[[[0, 9], [1, 8]], [[2, 7], [3, 6]], [[4, 5], [5, 4]]],
[],
[[[6, 3], [7, 2]], [[8, 1], [9, 0]]]],
expected_shape=[3, None, 2, 2]),
dict(
descr='Two rank-3 inputs with ragged_rank=1, axis=-2',
ragged_ranks=[1, 1],
rt_inputs=(
[[[0, 1], [2, 3], [4, 5]], [], [[6, 7], [8, 9]]],
[[[9, 8], [7, 6], [5, 4]], [], [[3, 2], [1, 0]]]),
axis=-2,
expected=[
[[[0, 1], [9, 8]], [[2, 3], [7, 6]], [[4, 5], [5, 4]]], [],
[[[6, 7], [3, 2]], [[8, 9], [1, 0]]]]),
dict(
descr='ragged_stack([vector, vector], axis=0)',
ragged_ranks=[0, 0],
rt_inputs=([1, 2, 3], [4, 5, 6]),
axis=0,
expected=[[1, 2, 3], [4, 5, 6]]),
dict(
descr='One input (so just adds an outer dimension)',
rt_inputs=([['a00', 'a01'], [], ['a20', 'a21']],),
axis=0,
expected=[[[b'a00', b'a01'], [], [b'a20', b'a21']]]),
) # pyformat: disable
def testRaggedStack(self,
descr,
rt_inputs,
axis,
expected,
ragged_ranks=None,
expected_ragged_rank=None,
expected_shape=None):
if ragged_ranks is None:
ragged_ranks = [None] * len(rt_inputs)
rt_inputs = [
ragged_factory_ops.constant(rt_input, ragged_rank=rrank) # pylint: disable=g-long-ternary
if rrank != 0 else constant_op.constant(rt_input)
for (rt_input, rrank) in zip(rt_inputs, ragged_ranks)
]
stacked = ragged_array_ops.stack(rt_inputs, axis)
if expected_ragged_rank is not None:
self.assertEqual(stacked.ragged_rank, expected_ragged_rank)
if expected_shape is not None:
self.assertEqual(stacked.shape.as_list(), expected_shape)
self.assertRaggedEqual(stacked, expected)
@parameterized.parameters(
dict(
rt_inputs=(),
axis=0,
error=ValueError,
message=r'rt_inputs may not be empty\.'),
dict(
rt_inputs=([[1, 2]], [[3, 4]]),
axis=r'foo',
error=TypeError,
message='axis must be an int'),
dict(
rt_inputs=([[1, 2]], [[3, 4]]),
axis=-4,
error=ValueError,
message='axis=-4 out of bounds: expected -3<=axis<3'),
dict(
rt_inputs=([[1, 2]], [[3, 4]]),
axis=3,
error=ValueError,
message='axis=3 out of bounds: expected -3<=axis<3'),
)
def testError(self, rt_inputs, axis, error, message):
self.assertRaisesRegexp(error, message, ragged_array_ops.stack, rt_inputs,
axis)
def testSingleTensorInput(self):
"""Tests ragged_stack with a single tensor input.
Usually, we pass a list of values in for rt_inputs. However, you can
also pass in a single value (as with tf.stack), in which case it is
equivalent to expand_dims(axis=0). This test exercises that path.
"""
rt_inputs = ragged_factory_ops.constant([[1, 2], [3, 4]])
stacked = ragged_array_ops.stack(rt_inputs, 0)
self.assertRaggedEqual(stacked, [[[1, 2], [3, 4]]])
if __name__ == '__main__':
googletest.main()
| |
from distutils import spawn
from isign_base_test import IsignBaseTest
import logging
from nose.plugins.skip import SkipTest
import os
from os.path import join
import platform
import re
import shutil
import subprocess
import tempfile
import zipfile
CODESIGN_BIN = spawn.find_executable('codesign')
log = logging.getLogger(__name__)
class TestVersusApple(IsignBaseTest):
def codesign_display(self, path):
""" inspect a path with codesign """
cmd = [CODESIGN_BIN, '-d', '-r-', '--verbose=20', path]
# n.b. codesign may print things to STDERR, or STDOUT, depending
# on exactly what you're extracting. I KNOW RIGHT?
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, _ = proc.communicate()
assert proc.returncode == 0, "Return code not 0"
return self.codesign_display_parse(out)
def codesign_display_parse(self, out):
"""
Parse codesign output into a dict.
The output format is XML-like, in that it's a tree of nodes of
varying types (including key-val pairs). We are assuming that
it never gets more than 1 level deep (so, "array line" is just
a special case here)
"""
# designated => identifier "com.lyft.ios.enterprise.dev" and anchor...
text_line = re.compile('^(\w[\w\s.]+) => (.*)$')
# CodeDirectory v=20200 size=79151 flags=0x0(none) hashes=3948+5 ...
props_line = re.compile('^(\w[\w\s.]+)\s+((?:\w+=\S+\s*)+)$')
# Signed Time=May 14, 2015, 7:12:25 PM
# Info.plist=not bound
single_prop_line = re.compile('(\w[\w\s.]+)=(.*)$')
# this assumes we only have one level of sub-arrays
# -3=969d263f74a5755cd3b4bede3f9e90c9fb0b7bca
array_line = re.compile('\s+(-?\d+)=(.*)$')
# last node assigned - used for appending sub-arrays, if encountered
last = None
ret = {}
for line in out.splitlines():
key = None
val = None
text_match = text_line.match(line)
props_match = props_line.match(line)
sp_match = single_prop_line.match(line)
array_match = array_line.match(line)
if text_match:
key = text_match.group(1)
val = text_match.group(2)
elif props_match:
key = props_match.group(1)
val = {}
pairs = re.split('\s+', props_match.group(2))
for pair in pairs:
pairmatch = re.match('(\w+)=(\S+)', pair)
pairkey = pairmatch.group(1)
pairval = pairmatch.group(2)
val[pairkey] = pairval
elif sp_match:
key = sp_match.group(1)
val = sp_match.group(2)
elif array_match:
if '_' not in last:
last['_'] = {}
akey = array_match.group(1)
aval = array_match.group(2)
last['_'][akey] = aval
else:
# probably an error of some kind. These
# get appended into the output too. :(
if self.ERROR_KEY not in ret:
ret[self.ERROR_KEY] = []
ret[self.ERROR_KEY].append(line)
if key is not None:
if key not in ret:
ret[key] = []
ret[key].append(val)
last = val
return ret
def get_dict_with_key(self, x, key):
""" check a list for a dict that has a key """
# e.g. if x = [ { 'a': 1 }, { 'b': 2 }]
# and key = 'a'
# return 1
for item in x:
if key in item:
return item[key]
return None
def assert_common_signed_properties(self, info):
# has an executable
assert 'Executable' in info
# has an identifier
assert 'Identifier' in info
# has a codedirectory, embedded
assert 'CodeDirectory' in info
codedirectory_info = info['CodeDirectory'][0]
assert codedirectory_info['location'] == 'embedded'
# has a set of hashes
assert 'Hash' in info
hashes = self.get_dict_with_key(info['Hash'], '_')
assert hashes is not None
# seal hash
assert 'CDHash' in info
# signed
assert 'Signature' in info
assert 'Authority' in info
# The following only works with a cert signed by apple
#
# if isinstance(info['Authority'], list):
# authorities = info['Authority']
# else:
# authorities = [info['Authority']]
# assert 'Apple Root CA' in authorities
assert 'Info.plist' in info
assert self.get_dict_with_key(info['Info.plist'], 'entries') is not None
assert 'TeamIdentifier' in info
# TODO get this from an arg
assert info['TeamIdentifier'][0] == self.OU
assert 'designated' in info
assert 'anchor apple generic' in info['designated'][0]
# should have no errors
assert self.ERROR_KEY not in info
def assert_common_signed_hashes(self, info, start_index, end_index):
assert 'Hash' in info
hashes = self.get_dict_with_key(info['Hash'], '_')
assert hashes is not None
for i in range(start_index, end_index + 1):
assert str(i) in hashes
return hashes
def assert_hashes_for_signable(self, info, hashes_to_check):
""" check that various hashes look right. """
# Most of the hashes in the Hash section are hashes of blocks of the
# object code in question. These all have positive subscripts.
# But the "special" slots use negative numbers, and
# are hashes of:
# -5 Embedded entitlement configuration slot
# -4 App-specific slot (in all the examples we know of, all zeroes)
# -3 Resource Directory slot
# -2 Requirements slot
# -1 Info.plist slot
# For more info, see codedirectory.h in Apple open source, e.g.
# http://opensource.apple.com/source/libsecurity_codesigning/
# libsecurity_codesigning-55032/lib/codedirectory.h
assert 'Hash' in info
hashes = self.get_dict_with_key(info['Hash'], '_')
assert hashes is not None
for i in hashes_to_check:
key = str(i)
assert key in hashes
assert int(hashes[key], 16) != 0
def assert_matching_identifier(self, app_path, expected):
info = self.codesign_display(app_path)
identifier = info['Identifier'][0]
assert identifier == expected
def check_bundle(self, path):
""" look at info for bundles (apps and frameworks) """
info = self.codesign_display(path)
self.assert_common_signed_properties(info)
assert 'Sealed Resources' in info
self.assert_hashes_for_signable(info, [-5, -3, -2, -1])
# TODO subject.CN from cert?
def check_dylib(self, path):
info = self.codesign_display(path)
self.assert_common_signed_properties(info)
self.assert_hashes_for_signable(info, [-2, -1])
def test_override_identifier(self):
""" Resign an app with identifiers of varying lengths, test that
they were signed correctly with the new identifier """
# skip if this isn't a Mac with codesign installed
if platform.system() != 'Darwin' or CODESIGN_BIN is None:
raise SkipTest
old_cwd = os.getcwd()
info = self.codesign_display(self.TEST_APP)
original_id = info['Identifier'][0]
# Make sure our original ID is long enough to test shorter bundle ids
assert len(original_id) >= 6
alphabet = 'abcdefghijklmnopqrstuvwxyz'
while len(alphabet) <= len(original_id):
alphabet += alphabet
# Test with a shorter bundle ID
short_id = alphabet[0:len(original_id) / 2 + 1]
working_dir = tempfile.mkdtemp()
os.chdir(working_dir)
resigned_app_path = join(working_dir, 'Short.app')
self.resign(self.TEST_APP,
output_path=resigned_app_path,
info_props={
'CFBundleIdentifier': short_id
})
self.assert_matching_identifier(resigned_app_path, short_id)
shutil.rmtree(working_dir)
# Test with a longer bundle ID
long_id = alphabet[0:len(original_id) + 1]
working_dir = tempfile.mkdtemp()
os.chdir(working_dir)
resigned_app_path = join(working_dir, 'Long.app')
self.resign(self.TEST_APP,
output_path=resigned_app_path,
info_props={
'CFBundleIdentifier': long_id
})
self.assert_matching_identifier(resigned_app_path, long_id)
shutil.rmtree(working_dir)
os.chdir(old_cwd)
def test_app(self):
""" Extract a resigned app with frameworks, analyze if some expected
things about them are true """
# skip if this isn't a Mac with codesign installed
if platform.system() != 'Darwin' or CODESIGN_BIN is None:
raise SkipTest
# resign the test app that has frameworks, extract it to a temp directory
working_dir = tempfile.mkdtemp()
resigned_ipa_path = join(working_dir, 'resigned.ipa')
self.resign(self.TEST_WITH_FRAMEWORKS_IPA,
output_path=resigned_ipa_path)
old_cwd = os.getcwd()
os.chdir(working_dir)
with zipfile.ZipFile(resigned_ipa_path) as zf:
zf.extractall()
# expected path to app
# When we ask for codesign to analyze the app directory, it
# will default to showing info for the main executable
app_path = join(working_dir, 'Payload/isignTestApp.app')
self.check_bundle(app_path)
# Now we do similar tests for a dynamic library, linked to the
# main executable.
dylib_path = join(app_path, 'Frameworks', 'libswiftCore.dylib')
self.check_dylib(dylib_path)
# Now we do similar tests for a framework
framework_path = join(app_path, 'Frameworks', 'FontAwesome_swift.framework')
self.check_bundle(framework_path)
shutil.rmtree(working_dir)
os.chdir(old_cwd)
| |
# -*- coding: utf-8 -*-
# Copyright (c) 2010 matt
# Copyright (c) 2010 Dieter Plaetinck
# Copyright (c) 2010, 2012 roger
# Copyright (c) 2011-2012 Florian Mounier
# Copyright (c) 2011 Mounier Florian
# Copyright (c) 2011 Timo Schmiade
# Copyright (c) 2012 Mikkel Oscar Lyderik
# Copyright (c) 2012, 2014 Tycho Andersen
# Copyright (c) 2012 Craig Barnes
# Copyright (c) 2013 Tao Sauvage
# Copyright (c) 2013 Tom Hunt
# Copyright (c) 2014 Justin Bronder
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# depends on python-mpd
# TODO: check if UI hangs in case of network issues and such
# TODO: some kind of templating to make shown info configurable
# TODO: best practice to handle failures? just write to stderr?
from __future__ import division
import re
import time
import mpd
from .. import utils, pangocffi
from . import base
from libqtile.log_utils import logger
class Mpd(base.ThreadPoolText):
"""A widget for the Music Player Daemon (MPD)
Initialize the widget with the following parameters
Parameters
==========
host :
host to connect to
port :
port to connect to
password :
password to use
fmt_playing :
format string to display when playing/paused
fmt_stopped :
format strings to display when stopped
msg_nc :
which message to show when we're not connected
do_color_progress :
whether to indicate progress in song by altering message color
width :
A fixed width, or bar.CALCULATED to calculate the width automatically
(which is recommended).
"""
orientations = base.ORIENTATION_HORIZONTAL
defaults = [
("foreground_progress", "ffffff", "Foreground progress colour"),
('reconnect', False, 'attempt to reconnect if initial connection failed'),
('reconnect_interval', 1, 'Time to delay between connection attempts.'),
('update_interval', 0.5, 'Update Time in seconds.')
]
# TODO: have this use our config framework
def __init__(self, host='localhost', port=6600,
password=False, fmt_playing="%a - %t [%v%%]",
fmt_stopped="Stopped [%v%%]", msg_nc='Mpd off',
do_color_progress=True, **config):
super(Mpd, self).__init__(msg_nc, **config)
self.host = host
self.port = port
self.password = password
self.fmt_playing, self.fmt_stopped = fmt_playing, fmt_stopped
self.msg_nc = msg_nc
self.do_color_progress = do_color_progress
self.inc = 2
self.add_defaults(Mpd.defaults)
self.client = mpd.MPDClient()
self.connected = False
self.stop = False
def finalize(self):
self.stop = True
if self.connected:
try:
# The volume settings is kind of a dirty trick. There doesn't
# seem to be a decent way to set a timeout for the idle
# command. Therefore we need to trigger some events such that
# if poll() is currently waiting on an idle event it will get
# something so that it can exit. In practice, I can't tell the
# difference in volume and hopefully no one else can either.
self.client.volume(1)
self.client.volume(-1)
self.client.disconnect()
except:
pass
base._Widget.finalize(self)
def connect(self, quiet=False):
if self.connected:
return True
try:
self.client.connect(host=self.host, port=self.port)
except Exception:
if not quiet:
logger.exception('Failed to connect to mpd')
return False
if self.password:
try:
self.client.password(self.password)
except Exception:
logger.warning('Authentication failed. Disconnecting')
try:
self.client.disconnect()
except Exception:
pass
self.connected = True
return True
def _configure(self, qtile, bar):
super(Mpd, self)._configure(qtile, bar)
self.layout = self.drawer.textlayout(
self.text,
self.foreground,
self.font,
self.fontsize,
self.fontshadow,
markup=True
)
def to_minutes_seconds(self, stime):
"""Takes an integer time in seconds, transforms it into
(HH:)?MM:SS. HH portion is only visible if total time is greater
than an hour.
"""
if type(stime) != int:
stime = int(stime)
mm = stime // 60
ss = stime % 60
if mm >= 60:
hh = mm // 60
mm = mm % 60
rv = "{}:{:02}:{:02}".format(hh, mm, ss)
else:
rv = "{}:{:02}".format(mm, ss)
return rv
def get_artist(self):
return self.song['artist']
def get_album(self):
return self.song['album']
def get_elapsed(self):
elapsed = self.status['time'].split(':')[0]
return self.to_minutes_seconds(elapsed)
def get_file(self):
return self.song['file']
def get_length(self):
return self.to_minutes_seconds(self.song['time'])
def get_number(self):
return str(int(self.status['song']) + 1)
def get_playlistlength(self):
return self.status['playlistlength']
def get_status(self):
n = self.status['state']
if n == "play":
return "->"
elif n == "pause":
return "||"
elif n == "stop":
return "[]"
def get_longstatus(self):
n = self.status['state']
if n == "play":
return "Playing"
elif n == "pause":
return "Paused"
elif n == "stop":
return "Stopped"
def get_title(self):
return self.song['title']
def get_track(self):
# This occasionally has leading zeros we don't want.
return str(int(self.song['track'].split('/')[0]))
def get_volume(self):
return self.status['volume']
def get_single(self):
if self.status['single'] == '1':
return '1'
else:
return '_'
def get_repeat(self):
if self.status['repeat'] == '1':
return 'R'
else:
return '_'
def get_shuffle(self):
if self.status['random'] == '1':
return 'S'
else:
return '_'
formats = {
'a': get_artist, 'A': get_album, 'e': get_elapsed,
'f': get_file, 'l': get_length, 'n': get_number,
'p': get_playlistlength, 's': get_status, 'S': get_longstatus,
't': get_title, 'T': get_track, 'v': get_volume, '1': get_single,
'r': get_repeat, 'h': get_shuffle, '%': lambda x: '%',
}
def match_check(self, m):
try:
return self.formats[m.group(1)](self)
except KeyError:
return "(nil)"
def do_format(self, string):
return re.sub("%(.)", self.match_check, string)
def _get_status(self):
playing = self.msg_nc
try:
self.status = self.client.status()
self.song = self.client.currentsong()
if self.status['state'] != 'stop':
text = self.do_format(self.fmt_playing)
if (self.do_color_progress and
self.status and
self.status.get('time', None)):
elapsed, total = self.status['time'].split(':')
percent = float(elapsed) / float(total)
progress = int(percent * len(text))
playing = '<span color="%s">%s</span>%s' % (
utils.hex(self.foreground_progress),
pangocffi.markup_escape_text(text[:progress]),
pangocffi.markup_escape_text(text[progress:])
)
else:
playing = pangocffi.markup_escape_text(text)
else:
playing = self.do_format(self.fmt_stopped)
except Exception:
logger.exception('Mpd error on update')
return playing
def poll(self):
was_connected = self.connected
if not self.connected:
if self.reconnect:
while not self.stop and not self.connect(quiet=True):
time.sleep(self.reconnect_interval)
else:
return
if self.stop:
return
if was_connected:
try:
self.client.send_idle()
self.client.fetch_idle()
except mpd.ConnectionError:
self.client.disconnect()
self.connected = False
return self.msg_nc
except Exception:
logger.exception('Error communicating with mpd')
self.client.disconnect()
return
return self._get_status()
def button_press(self, x, y, button):
if not self.connect():
return False
try:
status = self.client.status()
if button == 3:
if not status:
self.client.play()
else:
self.client.pause()
elif button == 4:
self.client.previous()
elif button == 5:
self.client.next()
elif button == 8:
if status:
self.client.setvol(
max(int(status['volume']) - self.inc, 0)
)
elif button == 9:
if status:
self.client.setvol(
min(int(status['volume']) + self.inc, 100)
)
except Exception:
logger.exception('Mpd error on click')
| |
"""
Confirmation handling API that helps you get the whole confirm/pending/verify
process correct. It doesn't implement any handlers, but what it does do is
provide the logic for doing the following:
* Take an email, put it in a "pending" queue, and then send out a confirm
email with a strong random id.
* Store the pending message ID and the random secret someplace for later
verification.
* Verify an incoming email against the expected ID, and get back the
original.
You then just work this into your project's state flow, write your own
templates, and possibly write your own storage.
"""
import uuid
from lamson import queue, view
from email.utils import parseaddr
class ConfirmationStorage(object):
"""
This is the basic confirmation storage. For simple testing purposes
you can just use the default hash db parameter. If you do a deployment
you can probably get away with a shelf hash instead.
You can write your own version of this and use it. The confirmation engine
only cares that it gets something that supports all of these methods.
"""
def __init__(self, db={}):
"""
Change the db parameter to a shelf to get persistent storage.
"""
self.confirmations = db
def clear(self):
"""
Used primarily in testing, this clears out all pending confirmations.
"""
self.confirmations.clear()
def key(self, target, from_address):
"""
Used internally to construct a string key, if you write
your own you don't need this.
NOTE: To support proper equality and shelve storage, this encodes the
key into ASCII. Make a different subclass if you need unicode and your
storage supports it.
"""
key = target + ':' + from_address
return key.encode('ascii')
def get(self, target, from_address):
"""
Given a target and a from address, this returns a tuple of (expected_secret, pending_message_id).
If it doesn't find that target+from_address, then it should return a (None, None) tuple.
"""
return self.confirmations.get(self.key(target, from_address), (None, None))
def delete(self, target, from_address):
"""
Removes a target+from_address from the storage.
"""
try:
del self.confirmations[self.key(target, from_address)]
except KeyError:
pass
def store(self, target, from_address, expected_secret, pending_message_id):
"""
Given a target, from_address it will store the expected_secret and pending_message_id
of later verification. The target should be a string indicating what is being
confirmed. Like "subscribe", "post", etc.
When implementing your own you should *never* allow more than one target+from_address
combination.
"""
self.confirmations[self.key(target, from_address)] = (expected_secret,
pending_message_id)
class ConfirmationEngine(object):
"""
The confirmation engine is what does the work of sending a confirmation,
and verifying that it was confirmed properly. In order to use it you
have to construct the ConfirmationEngine (usually in config/settings.py) and
you write your confirmation message templates for sending.
The primary methods you use are ConfirmationEngine.send and ConfirmationEngine.verify.
"""
def __init__(self, pending_queue, storage):
"""
The pending_queue should be a string with the path to the lamson.queue.Queue
that will store pending messages. These messages are the originals the user
sent when they tried to confirm.
Storage should be something that is like ConfirmationStorage so that this
can store things for later verification.
"""
self.pending = queue.Queue(pending_queue)
self.storage = storage
def get_pending(self, pending_id):
"""
Returns the pending message for the given ID.
"""
return self.pending.get(pending_id)
def push_pending(self, message):
"""
Puts a pending message into the pending queue.
"""
return self.pending.push(message)
def delete_pending(self, pending_id):
"""
Removes the pending message from the pending queue.
"""
self.pending.remove(pending_id)
def cancel(self, target, from_address, expect_secret):
"""
Used to cancel a pending confirmation.
"""
name, addr = parseaddr(from_address)
secret, pending_id = self.storage.get(target, addr)
if secret == expect_secret:
self.storage.delete(target, addr)
self.delete_pending(pending_id)
def make_random_secret(self):
"""
Generates a random uuid as the secret, in hex form.
"""
return uuid.uuid4().hex
def register(self, target, message):
"""
Don't call this directly unless you know what you are doing.
It does the job of registering the original message and the
expected confirmation into the storage.
"""
from_address = message.route_from
pending_id = self.push_pending(message)
secret = self.make_random_secret()
self.storage.store(target, from_address, secret, pending_id)
return "%s-confirm-%s" % (target, secret)
def verify(self, target, from_address, expect_secret):
"""
Given a target (i.e. "subscribe", "post", etc), a from_address
of someone trying to confirm, and the secret they should use, this
will try to verify their confirmation. If the verify works then
you'll get the original message back to do what you want with.
If the verification fails then you are given None.
The message is *not* deleted from the pending queue. You can do
that yourself with delete_pending.
"""
assert expect_secret, "Must give an expected ID number."
name, addr = parseaddr(from_address)
secret, pending_id = self.storage.get(target, addr)
if secret == expect_secret:
self.storage.delete(target, addr)
return self.get_pending(pending_id)
else:
return None
def send(self, relay, target, message, template, vars):
"""
This is the method you should use to send out confirmation messages.
You give it the relay, a target (i.e. "subscribe"), the message they
sent requesting the confirm, your confirmation template, and any
vars that template needs.
The result of calling this is that the template message gets sent through
the relay, the original message is stored in the pending queue, and
data is put into the storage for later calls to verify.
"""
confirm_address = self.register(target, message)
vars.update(locals())
msg = view.respond(vars, template, To=message['from'],
From="%(confirm_address)s@%(host)s",
Subject="Confirmation required")
msg['Reply-To'] = "%(confirm_address)s@%(host)s" % vars
relay.deliver(msg)
def clear(self):
"""
Used in testing to make sure there's nothing in the pending
queue or storage.
"""
self.pending.clear()
self.storage.clear()
| |
##########################################################################
#
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import IECore
import IECoreMaya
import maya.cmds
import unittest
class SceneShapeTest( IECoreMaya.TestCase ) :
__testFile = "test/test.scc"
__testPlugFile = "test/testPlug.scc"
__testPlugAnimFile = "test/testPlugAnim.scc"
__testPlugAttrFile = "test/testPlugAttr.scc"
def setUp( self ) :
maya.cmds.file( new=True, f=True )
def writeSCC( self, file, rotation=IECore.V3d( 0, 0, 0 ), time=0 ) :
scene = IECore.SceneCache( file, IECore.IndexedIO.OpenMode.Write )
sc = scene.createChild( str( 1 ) )
mesh = IECore.MeshPrimitive.createBox(IECore.Box3f(IECore.V3f(0),IECore.V3f(1)))
sc.writeObject( mesh, time )
matrix = IECore.M44d.createTranslated( IECore.V3d( 1, 0, 0 ) )
matrix = matrix.rotate( rotation )
sc.writeTransform( IECore.M44dData( matrix ), time )
sc = sc.createChild( str( 2 ) )
mesh = IECore.MeshPrimitive.createBox(IECore.Box3f(IECore.V3f(0),IECore.V3f(1)))
sc.writeObject( mesh, time )
matrix = IECore.M44d.createTranslated( IECore.V3d( 2, 0, 0 ) )
matrix = matrix.rotate( rotation )
sc.writeTransform( IECore.M44dData( matrix ), time )
sc = sc.createChild( str( 3 ) )
mesh = IECore.MeshPrimitive.createBox(IECore.Box3f(IECore.V3f(0),IECore.V3f(1)))
sc.writeObject( mesh, time )
matrix = IECore.M44d.createTranslated( IECore.V3d( 3, 0, 0 ) )
matrix = matrix.rotate( rotation )
sc.writeTransform( IECore.M44dData( matrix ), time )
return scene
def writeAnimSCC( self, file ) :
scene = self.writeSCC( file )
sc1 = scene.child( str( 1 ) )
sc2 = sc1.child( str( 2 ) )
sc3 = sc2.child( str( 3 ) )
mesh = IECore.MeshPrimitive.createBox(IECore.Box3f(IECore.V3f(0),IECore.V3f(1)))
for time in [ 0.5, 1, 1.5, 2, 5, 10 ] :
matrix = IECore.M44d.createTranslated( IECore.V3d( 1, time, 0 ) )
sc1.writeTransform( IECore.M44dData( matrix ), time )
sc2.writeObject( mesh, time )
matrix = IECore.M44d.createTranslated( IECore.V3d( 2, time, 0 ) )
sc2.writeTransform( IECore.M44dData( matrix ), time )
matrix = IECore.M44d.createTranslated( IECore.V3d( 3, time, 0 ) )
sc3.writeTransform( IECore.M44dData( matrix ), time )
return scene
def writeAttributeSCC( self, file ) :
scene = self.writeSCC( file )
sc1 = scene.child( str( 1 ) )
sc2 = sc1.child( str( 2 ) )
sc3 = sc2.child( str( 3 ) )
sc1.writeAttribute( "boolAttr", IECore.BoolData( True ), 0.0 )
sc1.writeAttribute( "floatAttr", IECore.FloatData( 5.20 ), 0.0 )
sc2.writeAttribute( "boolAttr", IECore.BoolData( False ), 0.0 )
sc2.writeAttribute( "floatAttr", IECore.FloatData( 2.0 ), 0.0 )
sc3.writeAttribute( "intAttr", IECore.IntData( 12 ), 0.0 )
sc3.writeAttribute( "strAttr", IECore.StringData( "blah" ), 0.0 )
return scene
def writeTagSCC( self, file ) :
scene = self.writeSCC( file )
sc1 = scene.child( str( 1 ) )
sc2 = sc1.child( str( 2 ) )
sc3 = sc2.child( str( 3 ) )
sc1.writeTags( [ "a" ] )
sc2.writeTags( [ "b" ] )
sc3.writeTags( [ "c" ] )
return scene
def testComputePlugs( self ) :
self.writeSCC( file = SceneShapeTest.__testFile )
maya.cmds.file( new=True, f=True )
node = maya.cmds.createNode( 'ieSceneShape' )
maya.cmds.setAttr( node+'.file', SceneShapeTest.__testFile,type='string' )
maya.cmds.setAttr( node+'.root',"/",type='string' )
self.assertEqual( maya.cmds.getAttr( node+".outTransform", mi=True ), None)
self.assertEqual( maya.cmds.getAttr( node+".outBound", mi=True ), None)
self.assertEqual( maya.cmds.getAttr( node+".outObjects", mi=True ), None)
maya.cmds.setAttr( node+".queryPaths[0]", "/1", type="string")
maya.cmds.setAttr( node+".queryPaths[1]", "/1/2", type="string")
maya.cmds.setAttr( node+".queryPaths[2]", "/1/2/3", type="string")
self.assertEqual( maya.cmds.getAttr( node+".outTransform", mi=True ), None)
self.assertEqual( maya.cmds.getAttr( node+".outBound", mi=True ), None)
self.assertEqual( maya.cmds.getAttr( node+".outObjects", mi=True ), None)
# Check only the plugs we trigger get computed
maya.cmds.getAttr( node+".outTransform[0].outTranslate" )
self.assertEqual( maya.cmds.getAttr( node+".outTransform", mi=True ), [0])
self.assertEqual( maya.cmds.getAttr( node+".outBound", mi=True ), None)
self.assertEqual( maya.cmds.getAttr( node+".outObjects", mi=True ), None)
maya.cmds.getAttr( node+".outTransform[2].outTranslate" )
self.assertEqual( maya.cmds.getAttr( node+".outTransform", mi=True ), [0, 2])
self.assertEqual( maya.cmds.getAttr( node+".outBound", mi=True ), None)
self.assertEqual( maya.cmds.getAttr( node+".outObjects", mi=True ), None)
maya.cmds.getAttr( node+".outTransform[1].outTranslate" )
maya.cmds.getAttr( node+".outBound[1].outBoundCenter" )
mesh = maya.cmds.createNode("mesh")
maya.cmds.connectAttr( node+'.outObjects[2]', mesh+".inMesh" )
self.assertEqual( maya.cmds.getAttr( node+".outTransform", mi=True ), [0, 1, 2])
self.assertEqual( maya.cmds.getAttr( node+".outBound", mi=True ), [1])
self.assertEqual( maya.cmds.getAttr( node+".outObjects", mi=True ), [2])
maya.cmds.setAttr( node+".queryPaths[3]", "/", type="string");
self.assertEqual( maya.cmds.getAttr( node+".outTransform", mi=True ), [0, 1, 2])
self.assertEqual( maya.cmds.getAttr( node+".outBound", mi=True ), [1])
self.assertEqual( maya.cmds.getAttr( node+".outObjects", mi=True ), [2])
def testPlugValues( self ) :
self.writeSCC( file=SceneShapeTest.__testPlugFile, rotation = IECore.V3d( 0, 0, IECore.degreesToRadians( -30 ) ) )
maya.cmds.file( new=True, f=True )
node = maya.cmds.createNode( 'ieSceneShape' )
maya.cmds.setAttr( node+'.file', SceneShapeTest.__testPlugFile,type='string' )
maya.cmds.setAttr( node+'.root',"/",type='string' )
maya.cmds.setAttr( node+".queryPaths[0]", "/1", type="string")
maya.cmds.setAttr( node+".queryPaths[1]", "/1/2", type="string")
maya.cmds.setAttr( node+".queryPaths[2]", "/1/2/3", type="string")
# World space
maya.cmds.setAttr( node+".querySpace", 0)
self.assertEqual( maya.cmds.getAttr( node+".outTransform[0].outTranslate"), [(1.0, 0.0, 0.0)] )
self.assertAlmostEqual( maya.cmds.getAttr( node+".outTransform[0].outRotate.outRotateX"), 0.0 )
self.assertAlmostEqual( maya.cmds.getAttr( node+".outTransform[0].outRotate.outRotateY"), 0.0 )
self.assertEqual( round(maya.cmds.getAttr( node+".outTransform[0].outRotate.outRotateZ")), -30.0 )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[0].outScale"), [(1.0, 1.0, 1.0)] )
self.assertAlmostEqual( maya.cmds.getAttr( node+".outTransform[1].outTranslate.outTranslateX"), 2.732050895 )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[1].outTranslate.outTranslateY"), -1.0 )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[1].outTranslate.outTranslateZ"), 0.0 )
self.assertAlmostEqual( maya.cmds.getAttr( node+".outTransform[1].outRotate.outRotateX"), 0.0 )
self.assertAlmostEqual( maya.cmds.getAttr( node+".outTransform[1].outRotate.outRotateY"), 0.0 )
self.assertEqual( round(maya.cmds.getAttr( node+".outTransform[1].outRotate.outRotateZ")), -60.0 )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[1].outScale"), [(1.0, 1.0, 1.0)] )
self.assertAlmostEqual( maya.cmds.getAttr( node+".outTransform[2].outTranslate.outTranslateX"), 4.232050895 )
self.assertAlmostEqual( maya.cmds.getAttr( node+".outTransform[2].outTranslate.outTranslateY"), -3.598076105 )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[2].outTranslate.outTranslateZ"), 0.0 )
self.assertAlmostEqual( maya.cmds.getAttr( node+".outTransform[2].outRotate.outRotateX"), 0.0 )
self.assertAlmostEqual( maya.cmds.getAttr( node+".outTransform[2].outRotate.outRotateY"), 0.0 )
self.assertEqual( round(maya.cmds.getAttr( node+".outTransform[2].outRotate.outRotateZ")), -90.0 )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[2].outScale"), [(1.0, 1.0, 1.0)] )
# Local space
maya.cmds.setAttr( node+".querySpace", 1)
self.assertEqual( maya.cmds.getAttr( node+".outTransform[0].outTranslate"), [(1.0, 0.0, 0.0)] )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[0].outRotate.outRotateX"), 0.0 )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[0].outRotate.outRotateY"), 0.0 )
self.assertEqual( round(maya.cmds.getAttr( node+".outTransform[0].outRotate.outRotateZ")), -30.0 )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[0].outScale"), [(1.0, 1.0, 1.0)] )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[1].outTranslate"), [(2.0, 0.0, 0.0)] )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[1].outRotate.outRotateX"), 0.0 )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[1].outRotate.outRotateY"), 0.0 )
self.assertEqual( round(maya.cmds.getAttr( node+".outTransform[1].outRotate.outRotateZ")), -30.0 )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[1].outScale"), [(1.0, 1.0, 1.0)] )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[2].outTranslate"), [(3.0, 0.0, 0.0)] )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[2].outRotate.outRotateX"), 0.0 )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[2].outRotate.outRotateY"), 0.0 )
self.assertEqual( round(maya.cmds.getAttr( node+".outTransform[2].outRotate.outRotateZ")), -30.0 )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[2].outScale"), [(1.0, 1.0, 1.0)] )
# Change the root path
maya.cmds.setAttr( node+'.root', "/1",type='string' )
maya.cmds.setAttr( node+".queryPaths[0]", "/", type="string")
maya.cmds.setAttr( node+".queryPaths[1]", "/2", type="string")
maya.cmds.setAttr( node+".queryPaths[2]", "/2/3", type="string")
# World space
maya.cmds.setAttr( node+".querySpace", 0)
self.assertEqual( maya.cmds.getAttr( node+".outTransform[0].outTranslate"), [(0.0, 0.0, 0.0)] )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[0].outRotate.outRotateX"), 0.0 )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[0].outRotate.outRotateY"), 0.0 )
self.assertEqual( round(maya.cmds.getAttr( node+".outTransform[0].outRotate.outRotateZ")), 0.0 )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[0].outScale"), [(1.0, 1.0, 1.0)] )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[1].outTranslate.outTranslateX"), 2.0 )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[1].outTranslate.outTranslateY"), 0.0 )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[1].outTranslate.outTranslateZ"), 0.0 )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[1].outRotate.outRotateX"), 0.0 )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[1].outRotate.outRotateY"), 0.0 )
self.assertEqual( round(maya.cmds.getAttr( node+".outTransform[1].outRotate.outRotateZ")), -30.0 )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[1].outScale"), [(1.0, 1.0, 1.0)] )
self.assertAlmostEqual( maya.cmds.getAttr( node+".outTransform[2].outTranslate.outTranslateX"), 4.5980763 )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[2].outTranslate.outTranslateY"), -1.5 )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[2].outTranslate.outTranslateZ"), 0.0 )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[2].outRotate.outRotateX"), 0.0 )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[2].outRotate.outRotateY"), 0.0 )
self.assertEqual( round(maya.cmds.getAttr( node+".outTransform[2].outRotate.outRotateZ")), -60.0 )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[2].outScale"), [(1.0, 1.0, 1.0)] )
# Local space
maya.cmds.setAttr( node+".querySpace", 1)
self.assertEqual( maya.cmds.getAttr( node+".outTransform[0].outTranslate"), [(1.0, 0.0, 0.0)] )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[0].outRotate.outRotateX"), 0.0 )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[0].outRotate.outRotateY"), 0.0 )
self.assertEqual( round(maya.cmds.getAttr( node+".outTransform[0].outRotate.outRotateZ")), -30.0 )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[0].outScale"), [(1.0, 1.0, 1.0)] )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[1].outTranslate"), [(2.0, 0.0, 0.0)] )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[1].outRotate.outRotateX"), 0.0 )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[1].outRotate.outRotateY"), 0.0 )
self.assertEqual( round(maya.cmds.getAttr( node+".outTransform[1].outRotate.outRotateZ")), -30.0 )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[1].outScale"), [(1.0, 1.0, 1.0)] )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[2].outTranslate"), [(3.0, 0.0, 0.0)] )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[2].outRotate.outRotateX"), 0.0 )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[2].outRotate.outRotateY"), 0.0 )
self.assertEqual( round(maya.cmds.getAttr( node+".outTransform[2].outRotate.outRotateZ")), -30.0 )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[2].outScale"), [(1.0, 1.0, 1.0)] )
maya.cmds.setAttr( node+'.time', 5 )
self.assertEqual( maya.cmds.getAttr( node+".outTime" ), 5 )
def testAnimPlugValues( self ) :
self.writeAnimSCC( file=SceneShapeTest.__testPlugAnimFile )
maya.cmds.file( new=True, f=True )
node = maya.cmds.createNode( 'ieSceneShape' )
maya.cmds.connectAttr( "time1.outTime", node+".time" )
maya.cmds.setAttr( node+'.file', SceneShapeTest.__testPlugAnimFile,type='string' )
maya.cmds.setAttr( node+'.root',"/",type='string' )
maya.cmds.setAttr( node+".queryPaths[0]", "/1", type="string")
maya.cmds.setAttr( node+".queryPaths[1]", "/1/2", type="string")
maya.cmds.setAttr( node+".queryPaths[2]", "/1/2/3", type="string")
maya.cmds.currentTime( 0 )
self.assertEqual( maya.cmds.getAttr( node+".outTime" ), 0 )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[0].outTranslate"), [(1.0, 0.0, 0.0)] )
self.assertAlmostEqual( maya.cmds.getAttr( node+".outTransform[0].outRotateX"), 0.0 )
self.assertAlmostEqual( maya.cmds.getAttr( node+".outTransform[0].outRotateY"), 0.0 )
self.assertAlmostEqual( maya.cmds.getAttr( node+".outTransform[0].outRotateZ"), 0.0 )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[0].outScale"), [(1.0, 1.0, 1.0)] )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[1].outTranslate"), [(3.0, 0.0, 0.0)] )
self.assertAlmostEqual( maya.cmds.getAttr( node+".outTransform[1].outRotateX"), 0.0 )
self.assertAlmostEqual( maya.cmds.getAttr( node+".outTransform[1].outRotateY"), 0.0 )
self.assertAlmostEqual( maya.cmds.getAttr( node+".outTransform[1].outRotateZ"), 0.0 )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[1].outScale"), [(1.0, 1.0, 1.0)] )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[2].outTranslate"), [(6.0, 0.0, 0.0)] )
self.assertAlmostEqual( maya.cmds.getAttr( node+".outTransform[2].outRotateX"), 0.0 )
self.assertAlmostEqual( maya.cmds.getAttr( node+".outTransform[2].outRotateY"), 0.0 )
self.assertAlmostEqual( maya.cmds.getAttr( node+".outTransform[2].outRotateZ"), 0.0 )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[2].outScale"), [(1.0, 1.0, 1.0)] )
maya.cmds.currentTime( 48 )
self.assertEqual( maya.cmds.getAttr( node+".outTime" ), 48 )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[0].outTranslate"), [(1.0, 2.0, 0.0)] )
self.assertAlmostEqual( maya.cmds.getAttr( node+".outTransform[0].outRotateX"), 0.0 )
self.assertAlmostEqual( maya.cmds.getAttr( node+".outTransform[0].outRotateY"), 0.0 )
self.assertAlmostEqual( maya.cmds.getAttr( node+".outTransform[0].outRotateZ"), 0.0 )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[0].outScale"), [(1.0, 1.0, 1.0)] )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[1].outTranslate"), [(3.0, 4.0, 0.0)] )
self.assertAlmostEqual( maya.cmds.getAttr( node+".outTransform[1].outRotateX"), 0.0 )
self.assertAlmostEqual( maya.cmds.getAttr( node+".outTransform[1].outRotateY"), 0.0 )
self.assertAlmostEqual( maya.cmds.getAttr( node+".outTransform[1].outRotateZ"), 0.0 )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[1].outScale"), [(1.0, 1.0, 1.0)] )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[2].outTranslate"), [(6.0, 6.0, 0.0)] )
self.assertAlmostEqual( maya.cmds.getAttr( node+".outTransform[2].outRotateX"), 0.0 )
self.assertAlmostEqual( maya.cmds.getAttr( node+".outTransform[2].outRotateY"), 0.0 )
self.assertAlmostEqual( maya.cmds.getAttr( node+".outTransform[2].outRotateZ"), 0.0 )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[2].outScale"), [(1.0, 1.0, 1.0)] )
maya.cmds.currentTime( 60 )
self.assertEqual( maya.cmds.getAttr( node+".outTime" ), 60 )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[0].outTranslate"), [(1.0, 2.5, 0.0)] )
self.assertAlmostEqual( maya.cmds.getAttr( node+".outTransform[0].outRotateX"), 0.0 )
self.assertAlmostEqual( maya.cmds.getAttr( node+".outTransform[0].outRotateY"), 0.0 )
self.assertAlmostEqual( maya.cmds.getAttr( node+".outTransform[0].outRotateZ"), 0.0 )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[0].outScale"), [(1.0, 1.0, 1.0)] )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[1].outTranslate"), [(3.0, 5.0, 0.0)] )
self.assertAlmostEqual( maya.cmds.getAttr( node+".outTransform[1].outRotateX"), 0.0 )
self.assertAlmostEqual( maya.cmds.getAttr( node+".outTransform[1].outRotateY"), 0.0 )
self.assertAlmostEqual( maya.cmds.getAttr( node+".outTransform[1].outRotateZ"), 0.0 )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[1].outScale"), [(1.0, 1.0, 1.0)] )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[2].outTranslate"), [(6.0, 7.5, 0.0)] )
self.assertAlmostEqual( maya.cmds.getAttr( node+".outTransform[2].outRotateX"), 0.0 )
self.assertAlmostEqual( maya.cmds.getAttr( node+".outTransform[2].outRotateY"), 0.0 )
self.assertAlmostEqual( maya.cmds.getAttr( node+".outTransform[2].outRotateZ"), 0.0 )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[2].outScale"), [(1.0, 1.0, 1.0)] )
maya.cmds.currentTime( 0 )
maya.cmds.setAttr( node+".querySpace", 1)
self.assertEqual( maya.cmds.getAttr( node+".outTime" ), 0 )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[0].outTranslate"), [(1.0, 0.0, 0.0)] )
self.assertAlmostEqual( maya.cmds.getAttr( node+".outTransform[0].outRotateX"), 0.0 )
self.assertAlmostEqual( maya.cmds.getAttr( node+".outTransform[0].outRotateY"), 0.0 )
self.assertAlmostEqual( maya.cmds.getAttr( node+".outTransform[0].outRotateZ"), 0.0 )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[0].outScale"), [(1.0, 1.0, 1.0)] )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[1].outTranslate"), [(2.0, 0.0, 0.0)] )
self.assertAlmostEqual( maya.cmds.getAttr( node+".outTransform[1].outRotateX"), 0.0 )
self.assertAlmostEqual( maya.cmds.getAttr( node+".outTransform[1].outRotateY"), 0.0 )
self.assertAlmostEqual( maya.cmds.getAttr( node+".outTransform[1].outRotateZ"), 0.0 )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[1].outScale"), [(1.0, 1.0, 1.0)] )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[2].outTranslate"), [(3.0, 0.0, 0.0)] )
self.assertAlmostEqual( maya.cmds.getAttr( node+".outTransform[2].outRotateX"), 0.0 )
self.assertAlmostEqual( maya.cmds.getAttr( node+".outTransform[2].outRotateY"), 0.0 )
self.assertAlmostEqual( maya.cmds.getAttr( node+".outTransform[2].outRotateZ"), 0.0 )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[2].outScale"), [(1.0, 1.0, 1.0)] )
maya.cmds.currentTime( 48 )
self.assertEqual( maya.cmds.getAttr( node+".outTime" ), 48 )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[0].outTranslate"), [(1.0, 2.0, 0.0)] )
self.assertAlmostEqual( maya.cmds.getAttr( node+".outTransform[0].outRotateX"), 0.0 )
self.assertAlmostEqual( maya.cmds.getAttr( node+".outTransform[0].outRotateY"), 0.0 )
self.assertAlmostEqual( maya.cmds.getAttr( node+".outTransform[0].outRotateZ"), 0.0 )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[0].outScale"), [(1.0, 1.0, 1.0)] )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[1].outTranslate"), [(2.0, 2.0, 0.0)] )
self.assertAlmostEqual( maya.cmds.getAttr( node+".outTransform[1].outRotateX"), 0.0 )
self.assertAlmostEqual( maya.cmds.getAttr( node+".outTransform[1].outRotateY"), 0.0 )
self.assertAlmostEqual( maya.cmds.getAttr( node+".outTransform[1].outRotateZ"), 0.0 )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[1].outScale"), [(1.0, 1.0, 1.0)] )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[2].outTranslate"), [(3.0, 2.0, 0.0)] )
self.assertAlmostEqual( maya.cmds.getAttr( node+".outTransform[2].outRotateX"), 0.0 )
self.assertAlmostEqual( maya.cmds.getAttr( node+".outTransform[2].outRotateY"), 0.0 )
self.assertAlmostEqual( maya.cmds.getAttr( node+".outTransform[2].outRotateZ"), 0.0 )
self.assertEqual( maya.cmds.getAttr( node+".outTransform[2].outScale"), [(1.0, 1.0, 1.0)] )
def testqueryAttributes( self ) :
self.writeAttributeSCC( file=SceneShapeTest.__testPlugAttrFile )
maya.cmds.file( new=True, f=True )
node = maya.cmds.createNode( 'ieSceneShape' )
maya.cmds.setAttr( node+'.file', SceneShapeTest.__testPlugAttrFile,type='string' )
maya.cmds.setAttr( node+".queryPaths[0]", "/1", type="string")
maya.cmds.setAttr( node+".queryPaths[1]", "/1/2", type="string")
maya.cmds.setAttr( node+".queryPaths[2]", "/1/2/3", type="string")
maya.cmds.setAttr( node+".queryAttributes[0]", "boolAttr", type="string")
maya.cmds.setAttr( node+".queryAttributes[1]", "floatAttr", type="string")
maya.cmds.setAttr( node+".queryAttributes[2]", "intAttr", type="string")
maya.cmds.setAttr( node+".queryAttributes[3]", "strAttr", type="string")
maya.cmds.setAttr( node+".queryAttributes[4]", "blablAttr", type="string")
self.assertEqual( maya.cmds.getAttr( node+".attributes[0].attributeValues[0]" ), True )
self.assertEqual( round(maya.cmds.getAttr( node+".attributes[0].attributeValues[1]"), 6 ), 5.2 )
self.assertEqual( maya.cmds.getAttr( node+".attributes[0].attributeValues[2]" ), None )
self.assertEqual( maya.cmds.getAttr( node+".attributes[0].attributeValues[3]" ), None )
self.assertEqual( maya.cmds.getAttr( node+".attributes[0].attributeValues[4]" ), None )
self.assertEqual( maya.cmds.getAttr( node+".attributes[1].attributeValues[0]" ), False )
self.assertEqual( maya.cmds.getAttr( node+".attributes[1].attributeValues[1]" ), 2.0 )
self.assertEqual( maya.cmds.getAttr( node+".attributes[1].attributeValues[2]" ), None )
self.assertEqual( maya.cmds.getAttr( node+".attributes[1].attributeValues[3]" ), None )
self.assertEqual( maya.cmds.getAttr( node+".attributes[1].attributeValues[4]" ), None )
self.assertEqual( maya.cmds.getAttr( node+".attributes[2].attributeValues[0]" ), None )
self.assertEqual( maya.cmds.getAttr( node+".attributes[2].attributeValues[1]" ), None )
self.assertEqual( maya.cmds.getAttr( node+".attributes[2].attributeValues[2]" ), 12 )
self.assertEqual( maya.cmds.getAttr( node+".attributes[2].attributeValues[3]" ), "blah" )
self.assertEqual( maya.cmds.getAttr( node+".attributes[2].attributeValues[4]" ), None )
def testTags( self ) :
self.writeTagSCC( file=SceneShapeTest.__testFile )
maya.cmds.file( new=True, f=True )
node = maya.cmds.createNode( 'ieSceneShape' )
fn = IECoreMaya.FnSceneShape( node )
transform = str(maya.cmds.listRelatives( node, parent=True )[0])
maya.cmds.setAttr( node+'.file', SceneShapeTest.__testFile, type='string' )
scene = IECoreMaya.MayaScene().child( transform )
self.assertEqual( sorted([ str(x) for x in scene.readTags( IECore.SceneInterface.TagFilter.EveryTag ) ]), [ "ObjectType:MeshPrimitive", "a", "b", "c" ] )
self.assertEqual( sorted([ str(x) for x in scene.readTags() ]), [] )
for tag in scene.readTags(IECore.SceneInterface.TagFilter.EveryTag) :
self.assertTrue( scene.hasTag( tag, IECore.SceneInterface.TagFilter.EveryTag ) )
self.assertFalse( scene.hasTag( "fakeTag", IECore.SceneInterface.TagFilter.EveryTag ) )
# double expanding because the first level has all the same tags
childFn = fn.expandOnce()[0].expandOnce()[0]
scene = childFn.sceneInterface()
self.assertEqual( set([ str(x) for x in scene.readTags( IECore.SceneInterface.TagFilter.DescendantTag|IECore.SceneInterface.TagFilter.LocalTag ) ]), set([ "ObjectType:MeshPrimitive", "b", "c" ]) )
self.assertEqual( sorted([ str(x) for x in scene.readTags() ]), [ "ObjectType:MeshPrimitive", "b" ] )
for tag in scene.readTags(IECore.SceneInterface.TagFilter.EveryTag) :
self.assertTrue( scene.hasTag( tag, IECore.SceneInterface.TagFilter.EveryTag ) )
self.assertFalse( scene.hasTag( "fakeTag", IECore.SceneInterface.TagFilter.EveryTag ) )
childFn = childFn.expandOnce()[0]
scene = childFn.sceneInterface()
self.assertEqual( sorted([ str(x) for x in scene.readTags( IECore.SceneInterface.TagFilter.DescendantTag|IECore.SceneInterface.TagFilter.LocalTag ) ]), [ "ObjectType:MeshPrimitive", "c" ] )
self.assertEqual( sorted([ str(x) for x in scene.readTags() ]), [ "ObjectType:MeshPrimitive", "c" ] )
for tag in scene.readTags(IECore.SceneInterface.TagFilter.EveryTag) :
self.assertTrue( scene.hasTag( tag, IECore.SceneInterface.TagFilter.EveryTag ) )
self.assertFalse( scene.hasTag( "fakeTag", IECore.SceneInterface.TagFilter.EveryTag ) )
def tearDown( self ) :
for f in [ SceneShapeTest.__testFile, SceneShapeTest.__testPlugFile, SceneShapeTest.__testPlugAnimFile, SceneShapeTest.__testPlugAttrFile ] :
if os.path.exists( f ) :
os.remove( f )
if __name__ == "__main__":
IECoreMaya.TestProgram( plugins = [ "ieCore" ] )
| |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2007-2009 The PyAMF Project.
# See LICENSE.txt for details.
"""
Local Shared Object implementation.
Local Shared Object (LSO), sometimes known as Adobe Flash cookies, is a
cookie-like data entity used by the Adobe Flash Player and Gnash. The players
allow web content to read and write LSO data to the computer's local drive on
a per-domain basis.
@see: U{Local Shared Object on WikiPedia (external)
<http://en.wikipedia.org/wiki/Local_Shared_Object>}
@see: U{Local Shared Object envelope (external)
<http://osflash.org/documentation/amf/envelopes/sharedobject>}
@since: 0.1.0
"""
import pyamf
from pyamf import util
#: Magic Number - 2 bytes
HEADER_VERSION = '\x00\xbf'
#: Marker - 10 bytes
HEADER_SIGNATURE = 'TCSO\x00\x04\x00\x00\x00\x00'
#: Padding - 4 bytes
PADDING_BYTE = '\x00'
def decode(stream, strict=True):
"""
Decodes a SOL stream. C{strict} mode ensures that the sol stream is as spec
compatible as possible.
@param strict: Ensure that the SOL stream is as spec compatible as possible.
@type strict: C{bool}
@return: A C{tuple} containing the C{root_name} and a C{dict} of name,
value pairs.
@rtype: C{tuple}
@raise DecodeError: Unknown SOL version in header.
@raise DecodeError: Inconsistent stream header length.
@raise DecodeError: Invalid signature.
@raise DecodeError: Invalid padding read.
@raise DecodeError: Missing padding byte.
"""
if not isinstance(stream, util.BufferedByteStream):
stream = util.BufferedByteStream(stream)
# read the version
version = stream.read(2)
if version != HEADER_VERSION:
raise pyamf.DecodeError('Unknown SOL version in header')
# read the length
length = stream.read_ulong()
if strict and stream.remaining() != length:
raise pyamf.DecodeError('Inconsistent stream header length')
# read the signature
signature = stream.read(10)
if signature != HEADER_SIGNATURE:
raise pyamf.DecodeError('Invalid signature')
length = stream.read_ushort()
root_name = stream.read_utf8_string(length)
# read padding
if stream.read(3) != PADDING_BYTE * 3:
raise pyamf.DecodeError('Invalid padding read')
decoder = pyamf.get_decoder(stream.read_uchar())
decoder.stream = stream
values = {}
while 1:
if stream.at_eof():
break
name = decoder.readString()
value = decoder.readElement()
# read the padding
if stream.read(1) != PADDING_BYTE:
raise pyamf.DecodeError('Missing padding byte')
values[name] = value
return (root_name, values)
def encode(name, values, strict=True, encoding=pyamf.AMF0):
"""
Produces a SharedObject encoded stream based on the name and values.
@param name: The root name of the SharedObject.
@type name: C{basestring}
@param values: A C{dict} of name value pairs to be encoded in the stream.
@type values: C{dict}
@param strict: Ensure that the SOL stream is as spec compatible as possible.
@type strict: C{bool}
@return: A SharedObject encoded stream.
@rtype: L{BufferedByteStream<pyamf.util.BufferedByteStream>}
"""
encoder = pyamf.get_encoder(encoding)
encoder.stream = stream = util.BufferedByteStream()
# write the header
stream.write(HEADER_VERSION)
if strict is True:
length_pos = stream.tell()
stream.write_ulong(0)
# write the signature
stream.write(HEADER_SIGNATURE)
# write the root name
if not isinstance(name, unicode):
name = unicode(name)
stream.write_ushort(len(name))
stream.write_utf8_string(name)
# write the padding
stream.write(PADDING_BYTE * 3)
stream.write_uchar(encoding)
for n, v in values.iteritems():
encoder.writeString(n, writeType=False)
encoder.writeElement(v)
# write the padding
stream.write(PADDING_BYTE)
if strict:
stream.seek(length_pos)
stream.write_ulong(stream.remaining() - 4)
stream.seek(0)
return stream
def load(name_or_file):
"""
Loads a sol file and returns a L{SOL} object.
@param name_or_file: Name of file, or file-object.
@type name_or_file: C{str} or C{StringIO}
@raise ValueError: Readable stream expected.
"""
f = name_or_file
opened = False
if isinstance(name_or_file, basestring):
f = open(name_or_file, 'rb')
opened = True
elif not hasattr(f, 'read'):
raise ValueError('Readable stream expected')
name, values = decode(f.read())
s = SOL(name)
for n, v in values.iteritems():
s[n] = v
if opened is True:
f.close()
return s
def save(sol, name_or_file, encoding=pyamf.AMF0):
"""
Writes a L{SOL} object to C{name_or_file}.
@param sol:
@type sol:
@param name_or_file: Name of file, or file-object.
@type name_or_file: C{str} or C{StringIO}
@param encoding: AMF encoding type.
@type encoding: C{int}
@raise ValueError: Writable stream expected.
"""
f = name_or_file
opened = False
if isinstance(name_or_file, basestring):
f = open(name_or_file, 'wb+')
opened = True
elif not hasattr(f, 'write'):
raise ValueError('Writable stream expected')
f.write(encode(sol.name, sol, encoding=encoding).getvalue())
if opened:
f.close()
class SOL(dict):
"""
Local Shared Object class, allows easy manipulation of the internals of a
C{sol} file.
"""
def __init__(self, name):
self.name = name
def save(self, name_or_file, encoding=pyamf.AMF0):
save(self, name_or_file, encoding)
def __repr__(self):
return '<%s %s %s at 0x%x>' % (self.__class__.__name__,
self.name, dict.__repr__(self), id(self))
LSO = SOL
| |
'''
Train
Train your nerual network
Author: Tawn Kramer
'''
from __future__ import print_function
import os
import sys
import glob
import time
import fnmatch
import argparse
import numpy as np
from PIL import Image
import keras
import conf
import random
import augment
conf.init()
'''
matplotlib can be a pain to setup. So handle the case where it is absent. When present,
use it to generate a plot of training results.
'''
try:
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
import matplotlib.pyplot as plt
do_plot = True
except:
do_plot = False
import models
from load_data import *
def shuffle(samples):
'''
randomly mix a list and return a new list
'''
ret_arr = []
len_samples = len(samples)
while len_samples > 0:
iSample = random.randrange(0, len_samples)
ret_arr.append(samples[iSample])
del samples[iSample]
len_samples -= 1
return ret_arr
def parse_img_filepath(filepath):
f = filepath.split('/')[-1]
f = f.split('.')[0]
f = f.split('_')
'''
The neural network seems to train well on values that are not too large or small.
We recorded the raw axis values. So we normalize them and then apply a STEERING_NN_SCALE
that puts them roughly in units of degrees +- 30 or so.
'''
steering = float(f[3]) / float(conf.js_axis_scale) * conf.STEERING_NN_SCALE
throttle = float(f[5]) / float(conf.js_axis_scale) * conf.STEERING_NN_SCALE
data = {'steering':steering, 'throttle':throttle }
return data
def generator(samples, batch_size=32, perc_to_augment=0.5, transposeImages=False):
'''
Rather than keep all data in memory, we will make a function that keeps
it's state and returns just the latest batch required via the yield command.
As we load images, we can optionally augment them in some manner that doesn't
change their underlying meaning or features. This is a combination of
brightness, contrast, sharpness, and color PIL image filters applied with random
settings. Optionally a shadow image may be overlayed with some random rotation and
opacity.
We flip each image horizontally and supply it as a another sample with the steering
negated.
'''
num_samples = len(samples)
do_augment = True
if do_augment:
shadows = augment.load_shadow_images('./shadows/*.png')
while 1: # Loop forever so the generator never terminates
samples = shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
print(".", end="")
images = []
controls = []
for fullpath in batch_samples:
try:
data = parse_img_filepath(fullpath)
steering = data["steering"]
throttle = data["throttle"]
try:
image = Image.open(fullpath)
except:
image = None
if image is None:
print('failed to open', fullpath)
continue
#PIL Image as a numpy array
image = np.array(image)
if do_augment and random.uniform(0.0, 1.0) < perc_to_augment:
image = augment.augment_image(image, shadows)
if transposeImages:
image = image.transpose()
center_angle = steering
images.append(image)
#controls.append([center_angle, throttle])
controls.append([center_angle])
except:
yield [], []
# final np array to submit to training
X_train = np.array(images)
y_train = np.array(controls)
yield X_train, y_train
def get_files(filemask):
'''
use a filemask and search a path recursively for matches
'''
path, mask = os.path.split(filemask)
matches = []
for root, dirnames, filenames in os.walk(path):
for filename in fnmatch.filter(filenames, mask):
matches.append(os.path.join(root, filename))
return matches
def train_test_split(lines, test_perc):
'''
split a list into two parts, percentage of test used to seperate
'''
train = []
test = []
for line in lines:
if random.uniform(0.0, 1.0) < test_perc:
test.append(line)
else:
train.append(line)
return train, test
def make_generators(inputs, limit=None, batch_size=32, aug_perc=0.0, transposeImages=False):
'''
load the job spec from the csv and create some generator for training
'''
#get the image/steering pairs from the csv files
lines = get_files(inputs)
print("found %d files" % len(lines))
if limit is not None:
lines = lines[:limit]
print("limiting to %d files" % len(lines))
train_samples, validation_samples = train_test_split(lines, test_perc=0.2)
print("num train/val", len(train_samples), len(validation_samples))
# compile and train the model using the generator function
train_generator = generator(train_samples, batch_size=batch_size, perc_to_augment=aug_perc, transposeImages=transposeImages)
validation_generator = generator(validation_samples, batch_size=batch_size, perc_to_augment=0.0, transposeImages=transposeImages)
n_train = len(train_samples)
n_val = len(validation_samples)
return train_generator, validation_generator, n_train, n_val
def go(model_name, epochs=50, batch_size=128, inputs='./log/*.jpg', limit=None, aug_mult=1, aug_perc=0.0, resume=False):
print('working on model', model_name)
'''
modify config.json to select the model to train.
'''
if conf.model_selection == "nvidia_transposed_inputs":
model = models.get_nvidia_model()
elif conf.model_selection == "nvidia_standard_inputs":
model = models.get_nvidia_model2()
elif conf.model_selection == "simple":
model = models.get_simple_model()
else:
model = models.get_nvidia_model()
if resume:
print("resuming training of", model_name)
model = keras.models.load_model(model_name)
model.ch_order = 'channel_last'
transposeImages = (model.ch_order == 'channel_first')
callbacks = [
#keras.callbacks.EarlyStopping(monitor='val_loss', patience=conf.training_patience, verbose=0),
keras.callbacks.ModelCheckpoint(model_name, monitor='val_loss', save_best_only=True, verbose=0),
]
#Train on session images
train_generator, validation_generator, n_train, n_val = make_generators(inputs, limit=limit, batch_size=batch_size, aug_perc=aug_perc, transposeImages=transposeImages)
history = model.fit_generator(train_generator,
samples_per_epoch = n_train,
validation_data = validation_generator,
nb_val_samples = n_val,
nb_epoch=epochs,
verbose=1,
callbacks=callbacks)
try:
if do_plot:
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig('loss.png')
except:
print("problems with loss graph")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='train script')
parser.add_argument('model', type=str, help='model name')
parser.add_argument('--epochs', type=int, default=conf.training_default_epochs, help='number of epochs')
parser.add_argument('--batch_size', type=int, default=conf.training_batch_size, help='number samples per gradient update')
parser.add_argument('--inputs', default='./log/*.jpg', help='input mask to gather images')
parser.add_argument('--limit', type=int, default=None, help='max number of images to train with')
parser.add_argument('--aug_mult', type=int, default=conf.training_default_aug_mult, help='how many more images to augment')
parser.add_argument('--aug_perc', type=float, default=conf.training_default_aug_percent, help='what percentage of images to augment 0 - 1')
parser.add_argument('--resume', action='store_true', help="load previous model and weights before resuming training")
args = parser.parse_args()
go(args.model, epochs=args.epochs, batch_size=args.batch_size, limit=args.limit, inputs=args.inputs, aug_mult=args.aug_mult, aug_perc=args.aug_perc, resume=args.resume)
#python train.py mymodel_aug_90_x4_e200 --epochs=200 --aug_mult=4 --aug_perc=0.9
| |
## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <phil@secdev.org>
## This program is published under a GPLv2 license
## Copyright (C) 2005 Guillaume Valadon <guedou@hongo.wide.ad.jp>
## Arnaud Ebalard <arnaud.ebalard@eads.net>
"""
Utility functions for IPv6.
"""
from config import conf
from data import *
from utils import *
def construct_source_candidate_set(addr, plen, laddr, loname):
"""
Given all addresses assigned to a specific interface ('laddr' parameter),
this function returns the "candidate set" associated with 'addr/plen'.
Basically, the function filters all interface addresses to keep only those
that have the same scope as provided prefix.
This is on this list of addresses that the source selection mechanism
will then be performed to select the best source address associated
with some specific destination that uses this prefix.
"""
def cset_sort(x,y):
x_global = 0
if in6_isgladdr(x):
x_global = 1
y_global = 0
if in6_isgladdr(y):
y_global = 1
res = y_global - x_global
if res != 0 or y_global != 1:
return res
# two global addresses: if one is native, it wins.
if not in6_isaddr6to4(x):
return -1;
return -res
cset = []
if in6_isgladdr(addr) or in6_isuladdr(addr):
cset = filter(lambda x: x[1] == IPV6_ADDR_GLOBAL, laddr)
elif in6_islladdr(addr):
cset = filter(lambda x: x[1] == IPV6_ADDR_LINKLOCAL, laddr)
elif in6_issladdr(addr):
cset = filter(lambda x: x[1] == IPV6_ADDR_SITELOCAL, laddr)
elif in6_ismaddr(addr):
if in6_ismnladdr(addr):
cset = [('::1', 16, loname)]
elif in6_ismgladdr(addr):
cset = filter(lambda x: x[1] == IPV6_ADDR_GLOBAL, laddr)
elif in6_ismlladdr(addr):
cset = filter(lambda x: x[1] == IPV6_ADDR_LINKLOCAL, laddr)
elif in6_ismsladdr(addr):
cset = filter(lambda x: x[1] == IPV6_ADDR_SITELOCAL, laddr)
elif addr == '::' and plen == 0:
cset = filter(lambda x: x[1] == IPV6_ADDR_GLOBAL, laddr)
cset = map(lambda x: x[0], cset)
cset.sort(cmp=cset_sort) # Sort with global addresses first
return cset
def get_source_addr_from_candidate_set(dst, candidate_set):
"""
This function implement a limited version of source address selection
algorithm defined in section 5 of RFC 3484. The format is very different
from that described in the document because it operates on a set
of candidate source address for some specific route.
"""
def scope_cmp(a, b):
"""
Given two addresses, returns -1, 0 or 1 based on comparison of
their scope
"""
scope_mapper = {IPV6_ADDR_GLOBAL: 4,
IPV6_ADDR_SITELOCAL: 3,
IPV6_ADDR_LINKLOCAL: 2,
IPV6_ADDR_LOOPBACK: 1}
sa = in6_getscope(a)
if sa == -1:
sa = IPV6_ADDR_LOOPBACK
sb = in6_getscope(b)
if sb == -1:
sb = IPV6_ADDR_LOOPBACK
sa = scope_mapper[sa]
sb = scope_mapper[sb]
if sa == sb:
return 0
if sa > sb:
return 1
return -1
def rfc3484_cmp(source_a, source_b):
"""
The function implements a limited version of the rules from Source
Address selection algorithm defined section of RFC 3484.
"""
# Rule 1: Prefer same address
if source_a == dst:
return 1
if source_b == dst:
return 1
# Rule 2: Prefer appropriate scope
tmp = scope_cmp(source_a, source_b)
if tmp == -1:
if scope_cmp(source_a, dst) == -1:
return 1
else:
return -1
elif tmp == 1:
if scope_cmp(source_b, dst) == -1:
return 1
else:
return -1
# Rule 3: cannot be easily implemented
# Rule 4: cannot be easily implemented
# Rule 5: does not make sense here
# Rule 6: cannot be implemented
# Rule 7: cannot be implemented
# Rule 8: Longest prefix match
tmp1 = in6_get_common_plen(source_a, dst)
tmp2 = in6_get_common_plen(source_b, dst)
if tmp1 > tmp2:
return 1
elif tmp2 > tmp1:
return -1
return 0
if not candidate_set:
# Should not happen
return None
candidate_set.sort(cmp=rfc3484_cmp, reverse=True)
return candidate_set[0]
def find_ifaddr2(addr, plen, laddr):
dstAddrType = in6_getAddrType(addr)
if dstAddrType == IPV6_ADDR_UNSPECIFIED: # Shouldn't happen as dst addr
return None
if dstAddrType == IPV6_ADDR_LOOPBACK:
return None
tmp = [[]] + map(lambda (x,y,z): (in6_getAddrType(x), x, y, z), laddr)
def filterSameScope(l, t):
if (t[0] & dstAddrType & IPV6_ADDR_SCOPE_MASK) == 0:
l.append(t)
return l
sameScope = reduce(filterSameScope, tmp)
l = len(sameScope)
if l == 1: # Only one address for our scope
return sameScope[0][1]
elif l > 1: # Muliple addresses for our scope
stfAddr = filter(lambda x: x[0] & IPV6_ADDR_6TO4, sameScope)
nativeAddr = filter(lambda x: not (x[0] & IPV6_ADDR_6TO4), sameScope)
if not (dstAddrType & IPV6_ADDR_6TO4): # destination is not 6to4
if len(nativeAddr) != 0:
return nativeAddr[0][1]
return stfAddr[0][1]
else: # Destination is 6to4, try to use source 6to4 addr if any
if len(stfAddr) != 0:
return stfAddr[0][1]
return nativeAddr[0][1]
else:
return None
# Think before modify it : for instance, FE::1 does exist and is unicast
# there are many others like that.
# TODO : integrate Unique Local Addresses
def in6_getAddrType(addr):
naddr = inet_pton(socket.AF_INET6, addr)
paddr = inet_ntop(socket.AF_INET6, naddr) # normalize
addrType = 0
# _Assignable_ Global Unicast Address space
# is defined in RFC 3513 as those in 2000::/3
if ((struct.unpack("B", naddr[0])[0] & 0xE0) == 0x20):
addrType = (IPV6_ADDR_UNICAST | IPV6_ADDR_GLOBAL)
if naddr[:2] == ' \x02': # Mark 6to4 @
addrType |= IPV6_ADDR_6TO4
elif naddr[0] == '\xff': # multicast
addrScope = paddr[3]
if addrScope == '2':
addrType = (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_MULTICAST)
elif addrScope == 'e':
addrType = (IPV6_ADDR_GLOBAL | IPV6_ADDR_MULTICAST)
else:
addrType = (IPV6_ADDR_GLOBAL | IPV6_ADDR_MULTICAST)
elif ((naddr[0] == '\xfe') and ((int(paddr[2], 16) & 0xC) == 0x8)):
addrType = (IPV6_ADDR_UNICAST | IPV6_ADDR_LINKLOCAL)
elif paddr == "::1":
addrType = IPV6_ADDR_LOOPBACK
elif paddr == "::":
addrType = IPV6_ADDR_UNSPECIFIED
else:
# Everything else is global unicast (RFC 3513)
# Even old deprecated (RFC3879) Site-Local addresses
addrType = (IPV6_ADDR_GLOBAL | IPV6_ADDR_UNICAST)
return addrType
def in6_mactoifaceid(mac, ulbit=None):
"""
Compute the interface ID in modified EUI-64 format associated
to the Ethernet address provided as input.
value taken by U/L bit in the interface identifier is basically
the reversed value of that in given MAC address it can be forced
to a specific value by using optional 'ulbit' parameter.
"""
if len(mac) != 17: return None
m = "".join(mac.split(':'))
if len(m) != 12: return None
first = int(m[0:2], 16)
if ulbit is None or not (ulbit == 0 or ulbit == 1):
ulbit = [1,'-',0][first & 0x02]
ulbit *= 2
first = "%.02x" % ((first & 0xFD) | ulbit)
eui64 = first + m[2:4] + ":" + m[4:6] + "FF:FE" + m[6:8] + ":" + m[8:12]
return eui64.upper()
def in6_ifaceidtomac(ifaceid): # TODO: finish commenting function behavior
"""
Extract the mac address from provided iface ID. Iface ID is provided
in printable format ("XXXX:XXFF:FEXX:XXXX", eventually compressed). None
is returned on error.
"""
try:
ifaceid = inet_pton(socket.AF_INET6, "::"+ifaceid)[8:16]
except:
return None
if ifaceid[3:5] != '\xff\xfe':
return None
first = struct.unpack("B", ifaceid[:1])[0]
ulbit = 2*[1,'-',0][first & 0x02]
first = struct.pack("B", ((first & 0xFD) | ulbit))
oui = first + ifaceid[1:3]
end = ifaceid[5:]
l = map(lambda x: "%.02x" % struct.unpack("B", x)[0], list(oui+end))
return ":".join(l)
def in6_addrtomac(addr):
"""
Extract the mac address from provided address. None is returned
on error.
"""
mask = inet_pton(socket.AF_INET6, "::ffff:ffff:ffff:ffff")
x = in6_and(mask, inet_pton(socket.AF_INET6, addr))
ifaceid = inet_ntop(socket.AF_INET6, x)[2:]
return in6_ifaceidtomac(ifaceid)
def in6_addrtovendor(addr):
"""
Extract the MAC address from a modified EUI-64 constructed IPv6
address provided and use the IANA oui.txt file to get the vendor.
The database used for the conversion is the one loaded by Scapy,
based on Wireshark (/usr/share/wireshark/wireshark/manuf) None
is returned on error, "UNKNOWN" if the vendor is unknown.
"""
mac = in6_addrtomac(addr)
if mac is None:
return None
res = conf.manufdb._get_manuf(mac)
if len(res) == 17 and res.count(':') != 5: # Mac address, i.e. unknown
res = "UNKNOWN"
return res
def in6_getLinkScopedMcastAddr(addr, grpid=None, scope=2):
"""
Generate a Link-Scoped Multicast Address as described in RFC 4489.
Returned value is in printable notation.
'addr' parameter specifies the link-local address to use for generating
Link-scoped multicast address IID.
By default, the function returns a ::/96 prefix (aka last 32 bits of
returned address are null). If a group id is provided through 'grpid'
parameter, last 32 bits of the address are set to that value (accepted
formats : '\x12\x34\x56\x78' or '12345678' or 0x12345678 or 305419896).
By default, generated address scope is Link-Local (2). That value can
be modified by passing a specific 'scope' value as an argument of the
function. RFC 4489 only authorizes scope values <= 2. Enforcement
is performed by the function (None will be returned).
If no link-local address can be used to generate the Link-Scoped IPv6
Multicast address, or if another error occurs, None is returned.
"""
if not scope in [0, 1, 2]:
return None
try:
if not in6_islladdr(addr):
return None
addr = inet_pton(socket.AF_INET6, addr)
except:
warning("in6_getLinkScopedMcastPrefix(): Invalid address provided")
return None
iid = addr[8:]
if grpid is None:
grpid = '\x00\x00\x00\x00'
else:
if type(grpid) is str:
if len(grpid) == 8:
try:
grpid = int(grpid, 16) & 0xffffffff
except:
warning("in6_getLinkScopedMcastPrefix(): Invalid group id provided")
return None
elif len(grpid) == 4:
try:
grpid = struct.unpack("!I", grpid)[0]
except:
warning("in6_getLinkScopedMcastPrefix(): Invalid group id provided")
return None
grpid = struct.pack("!I", grpid)
flgscope = struct.pack("B", 0xff & ((0x3 << 4) | scope))
plen = '\xff'
res = '\x00'
a = '\xff' + flgscope + res + plen + iid + grpid
return inet_ntop(socket.AF_INET6, a)
def in6_get6to4Prefix(addr):
"""
Returns the /48 6to4 prefix associated with provided IPv4 address
On error, None is returned. No check is performed on public/private
status of the address
"""
try:
addr = inet_pton(socket.AF_INET, addr)
addr = inet_ntop(socket.AF_INET6, '\x20\x02'+addr+'\x00'*10)
except:
return None
return addr
def in6_6to4ExtractAddr(addr):
"""
Extract IPv4 address embbeded in 6to4 address. Passed address must be
a 6to4 addrees. None is returned on error.
"""
try:
addr = inet_pton(socket.AF_INET6, addr)
except:
return None
if addr[:2] != " \x02":
return None
return inet_ntop(socket.AF_INET, addr[2:6])
def in6_getLocalUniquePrefix():
"""
Returns a pseudo-randomly generated Local Unique prefix. Function
follows recommandation of Section 3.2.2 of RFC 4193 for prefix
generation.
"""
# Extracted from RFC 1305 (NTP) :
# NTP timestamps are represented as a 64-bit unsigned fixed-point number,
# in seconds relative to 0h on 1 January 1900. The integer part is in the
# first 32 bits and the fraction part in the last 32 bits.
# epoch = (1900, 1, 1, 0, 0, 0, 5, 1, 0)
# x = time.time()
# from time import gmtime, strftime, gmtime, mktime
# delta = mktime(gmtime(0)) - mktime(self.epoch)
# x = x-delta
tod = time.time() # time of day. Will bother with epoch later
i = int(tod)
j = int((tod - i)*(2**32))
tod = struct.pack("!II", i,j)
# TODO: Add some check regarding system address gathering
rawmac = get_if_raw_hwaddr(conf.iface6)[1]
mac = ":".join(map(lambda x: "%.02x" % ord(x), list(rawmac)))
# construct modified EUI-64 ID
eui64 = inet_pton(socket.AF_INET6, '::' + in6_mactoifaceid(mac))[8:]
import sha
globalid = sha.new(tod+eui64).digest()[:5]
return inet_ntop(socket.AF_INET6, '\xfd' + globalid + '\x00'*10)
def in6_getRandomizedIfaceId(ifaceid, previous=None):
"""
Implements the interface ID generation algorithm described in RFC 3041.
The function takes the Modified EUI-64 interface identifier generated
as described in RFC 4291 and an optional previous history value (the
first element of the output of this function). If no previous interface
identifier is provided, a random one is generated. The function returns
a tuple containing the randomized interface identifier and the history
value (for possible future use). Input and output values are provided in
a "printable" format as depicted below.
ex:
>>> in6_getRandomizedIfaceId('20b:93ff:feeb:2d3')
('4c61:76ff:f46a:a5f3', 'd006:d540:db11:b092')
>>> in6_getRandomizedIfaceId('20b:93ff:feeb:2d3',
previous='d006:d540:db11:b092')
('fe97:46fe:9871:bd38', 'eeed:d79c:2e3f:62e')
"""
s = ""
if previous is None:
d = "".join(map(chr, range(256)))
for i in range(8):
s += random.choice(d)
previous = s
s = inet_pton(socket.AF_INET6, "::"+ifaceid)[8:] + previous
import md5
s = md5.new(s).digest()
s1,s2 = s[:8],s[8:]
s1 = chr(ord(s1[0]) | 0x04) + s1[1:]
s1 = inet_ntop(socket.AF_INET6, "\xff"*8 + s1)[20:]
s2 = inet_ntop(socket.AF_INET6, "\xff"*8 + s2)[20:]
return (s1, s2)
_rfc1924map = [ '0','1','2','3','4','5','6','7','8','9','A','B','C','D','E',
'F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T',
'U','V','W','X','Y','Z','a','b','c','d','e','f','g','h','i',
'j','k','l','m','n','o','p','q','r','s','t','u','v','w','x',
'y','z','!','#','$','%','&','(',')','*','+','-',';','<','=',
'>','?','@','^','_','`','{','|','}','~' ]
def in6_ctop(addr):
"""
Convert an IPv6 address in Compact Representation Notation
(RFC 1924) to printable representation ;-)
Returns None on error.
"""
if len(addr) != 20 or not reduce(lambda x,y: x and y,
map(lambda x: x in _rfc1924map, addr)):
return None
i = 0
for c in addr:
j = _rfc1924map.index(c)
i = 85*i + j
res = []
for j in range(4):
res.append(struct.pack("!I", i%2**32))
i = i/(2**32)
res.reverse()
return inet_ntop(socket.AF_INET6, "".join(res))
def in6_ptoc(addr):
"""
Converts an IPv6 address in printable representation to RFC
1924 Compact Representation ;-)
Returns None on error.
"""
try:
d=struct.unpack("!IIII", inet_pton(socket.AF_INET6, addr))
except:
return None
res = 0
m = [2**96, 2**64, 2**32, 1]
for i in range(4):
res += d[i]*m[i]
rem = res
res = []
while rem:
res.append(_rfc1924map[rem%85])
rem = rem/85
res.reverse()
return "".join(res)
def in6_isaddr6to4(x):
"""
Return True if provided address (in printable format) is a 6to4
address (being in 2002::/16).
"""
x = inet_pton(socket.AF_INET6, x)
return x[:2] == ' \x02'
conf.teredoPrefix = "2001::" # old one was 3ffe:831f (it is a /32)
conf.teredoServerPort = 3544
def in6_isaddrTeredo(x):
"""
Return True if provided address is a Teredo, meaning it is under
the /32 conf.teredoPrefix prefix value (by default, 2001::).
Otherwise, False is returned. Address must be passed in printable
format.
"""
our = inet_pton(socket.AF_INET6, x)[0:4]
teredoPrefix = inet_pton(socket.AF_INET6, conf.teredoPrefix)[0:4]
return teredoPrefix == our
def teredoAddrExtractInfo(x):
"""
Extract information from a Teredo address. Return value is
a 4-tuple made of IPv4 address of Teredo server, flag value (int),
mapped address (non obfuscated) and mapped port (non obfuscated).
No specific checks are performed on passed address.
"""
addr = inet_pton(socket.AF_INET6, x)
server = inet_ntop(socket.AF_INET, addr[4:8])
flag = struct.unpack("!H",addr[8:10])[0]
mappedport = struct.unpack("!H",strxor(addr[10:12],'\xff'*2))[0]
mappedaddr = inet_ntop(socket.AF_INET, strxor(addr[12:16],'\xff'*4))
return server, flag, mappedaddr, mappedport
def in6_iseui64(x):
"""
Return True if provided address has an interface identifier part
created in modified EUI-64 format (meaning it matches *::*:*ff:fe*:*).
Otherwise, False is returned. Address must be passed in printable
format.
"""
eui64 = inet_pton(socket.AF_INET6, '::ff:fe00:0')
x = in6_and(inet_pton(socket.AF_INET6, x), eui64)
return x == eui64
def in6_isanycast(x): # RFC 2526
if in6_iseui64(x):
s = '::fdff:ffff:ffff:ff80'
packed_x = inet_pton(socket.AF_INET6, x)
packed_s = inet_pton(socket.AF_INET6, s)
x_and_s = in6_and(packed_x, packed_s)
return x_and_s == packed_s
else:
# not EUI-64
#| n bits | 121-n bits | 7 bits |
#+---------------------------------+------------------+------------+
#| subnet prefix | 1111111...111111 | anycast ID |
#+---------------------------------+------------------+------------+
# | interface identifier field |
warning('in6_isanycast(): TODO not EUI-64')
return 0
def _in6_bitops(a1, a2, operator=0):
a1 = struct.unpack('4I', a1)
a2 = struct.unpack('4I', a2)
fop = [ lambda x,y: x | y,
lambda x,y: x & y,
lambda x,y: x ^ y
]
ret = map(fop[operator%len(fop)], a1, a2)
t = ''.join(map(lambda x: struct.pack('I', x), ret))
return t
def in6_or(a1, a2):
"""
Provides a bit to bit OR of provided addresses. They must be
passed in network format. Return value is also an IPv6 address
in network format.
"""
return _in6_bitops(a1, a2, 0)
def in6_and(a1, a2):
"""
Provides a bit to bit AND of provided addresses. They must be
passed in network format. Return value is also an IPv6 address
in network format.
"""
return _in6_bitops(a1, a2, 1)
def in6_xor(a1, a2):
"""
Provides a bit to bit XOR of provided addresses. They must be
passed in network format. Return value is also an IPv6 address
in network format.
"""
return _in6_bitops(a1, a2, 2)
def in6_cidr2mask(m):
"""
Return the mask (bitstring) associated with provided length
value. For instance if function is called on 48, return value is
'\xff\xff\xff\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'.
"""
if m > 128 or m < 0:
raise Scapy_Exception("value provided to in6_cidr2mask outside [0, 128] domain (%d)" % m)
t = []
for i in xrange(0, 4):
t.append(max(0, 2**32 - 2**(32-min(32, m))))
m -= 32
return ''.join(map(lambda x: struct.pack('!I', x), t))
def in6_getnsma(a):
"""
Return link-local solicited-node multicast address for given
address. Passed address must be provided in network format.
Returned value is also in network format.
"""
r = in6_and(a, inet_pton(socket.AF_INET6, '::ff:ffff'))
r = in6_or(inet_pton(socket.AF_INET6, 'ff02::1:ff00:0'), r)
return r
def in6_getnsmac(a): # return multicast Ethernet address associated with multicast v6 destination
"""
Return the multicast mac address associated with provided
IPv6 address. Passed address must be in network format.
"""
a = struct.unpack('16B', a)[-4:]
mac = '33:33:'
mac += ':'.join(map(lambda x: '%.2x' %x, a))
return mac
def in6_getha(prefix):
"""
Return the anycast address associated with all home agents on a given
subnet.
"""
r = in6_and(inet_pton(socket.AF_INET6, prefix), in6_cidr2mask(64))
r = in6_or(r, inet_pton(socket.AF_INET6, '::fdff:ffff:ffff:fffe'))
return inet_ntop(socket.AF_INET6, r)
def in6_ptop(str):
"""
Normalizes IPv6 addresses provided in printable format, returning the
same address in printable format. (2001:0db8:0:0::1 -> 2001:db8::1)
"""
return inet_ntop(socket.AF_INET6, inet_pton(socket.AF_INET6, str))
def in6_isincluded(addr, prefix, plen):
"""
Returns True when 'addr' belongs to prefix/plen. False otherwise.
"""
temp = inet_pton(socket.AF_INET6, addr)
pref = in6_cidr2mask(plen)
zero = inet_pton(socket.AF_INET6, prefix)
return zero == in6_and(temp, pref)
def in6_isdocaddr(str):
"""
Returns True if provided address in printable format belongs to
2001:db8::/32 address space reserved for documentation (as defined
in RFC 3849).
"""
return in6_isincluded(str, '2001:db8::', 32)
def in6_islladdr(str):
"""
Returns True if provided address in printable format belongs to
_allocated_ link-local unicast address space (fe80::/10)
"""
return in6_isincluded(str, 'fe80::', 10)
def in6_issladdr(str):
"""
Returns True if provided address in printable format belongs to
_allocated_ site-local address space (fec0::/10). This prefix has
been deprecated, address being now reserved by IANA. Function
will remain for historic reasons.
"""
return in6_isincluded(str, 'fec0::', 10)
def in6_isuladdr(str):
"""
Returns True if provided address in printable format belongs to
Unique local address space (fc00::/7).
"""
return in6_isincluded(str, 'fc00::', 7)
# TODO : we should see the status of Unique Local addresses against
# global address space.
# Up-to-date information is available through RFC 3587.
# We should review function behavior based on its content.
def in6_isgladdr(str):
"""
Returns True if provided address in printable format belongs to
_allocated_ global address space (2000::/3). Please note that,
Unique Local addresses (FC00::/7) are not part of global address
space, and won't match.
"""
return in6_isincluded(str, '2000::', 3)
def in6_ismaddr(str):
"""
Returns True if provided address in printable format belongs to
allocated Multicast address space (ff00::/8).
"""
return in6_isincluded(str, 'ff00::', 8)
def in6_ismnladdr(str):
"""
Returns True if address belongs to node-local multicast address
space (ff01::/16) as defined in RFC
"""
return in6_isincluded(str, 'ff01::', 16)
def in6_ismgladdr(str):
"""
Returns True if address belongs to global multicast address
space (ff0e::/16).
"""
return in6_isincluded(str, 'ff0e::', 16)
def in6_ismlladdr(str):
"""
Returns True if address balongs to link-local multicast address
space (ff02::/16)
"""
return in6_isincluded(str, 'ff02::', 16)
def in6_ismsladdr(str):
"""
Returns True if address belongs to site-local multicast address
space (ff05::/16). Site local address space has been deprecated.
Function remains for historic reasons.
"""
return in6_isincluded(str, 'ff05::', 16)
def in6_isaddrllallnodes(str):
"""
Returns True if address is the link-local all-nodes multicast
address (ff02::1).
"""
return (inet_pton(socket.AF_INET6, "ff02::1") ==
inet_pton(socket.AF_INET6, str))
def in6_isaddrllallservers(str):
"""
Returns True if address is the link-local all-servers multicast
address (ff02::2).
"""
return (inet_pton(socket.AF_INET6, "ff02::2") ==
inet_pton(socket.AF_INET6, str))
def in6_getscope(addr):
"""
Returns the scope of the address.
"""
if in6_isgladdr(addr) or in6_isuladdr(addr):
scope = IPV6_ADDR_GLOBAL
elif in6_islladdr(addr):
scope = IPV6_ADDR_LINKLOCAL
elif in6_issladdr(addr):
scope = IPV6_ADDR_SITELOCAL
elif in6_ismaddr(addr):
if in6_ismgladdr(addr):
scope = IPV6_ADDR_GLOBAL
elif in6_ismlladdr(addr):
scope = IPV6_ADDR_LINKLOCAL
elif in6_ismsladdr(addr):
scope = IPV6_ADDR_SITELOCAL
elif in6_ismnladdr(addr):
scope = IPV6_ADDR_LOOPBACK
else:
scope = -1
elif addr == '::1':
scope = IPV6_ADDR_LOOPBACK
else:
scope = -1
return scope
def in6_get_common_plen(a, b):
"""
Return common prefix length of IPv6 addresses a and b.
"""
def matching_bits(byte1, byte2):
for i in range(8):
cur_mask = 0x80 >> i
if (byte1 & cur_mask) != (byte2 & cur_mask):
return i
return 8
tmpA = inet_pton(socket.AF_INET6, a)
tmpB = inet_pton(socket.AF_INET6, b)
for i in range(16):
mbits = matching_bits(ord(tmpA[i]), ord(tmpB[i]))
if mbits != 8:
return 8*i + mbits
return 128
| |
#!/usr/bin/env python
## /*=========================================================================
## Program: Visualization Toolkit
## Module: HeaderTesting.py
## Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen
## All rights reserved.
## See Copyright.txt or http://www.kitware.com/Copyright.htm for details.
## This software is distributed WITHOUT ANY WARRANTY; without even
## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
## PURPOSE. See the above copyright notice for more information.
## =========================================================================*/
## .NAME HeaderTesting - a VTK style and validity checking utility
## .SECTION Description
## HeaderTesting is a script which checks the list of header files for
## validity based on VTK coding standard. It checks for proper super
## classes, number and style of include files, type macro, private
## copy constructor and assignment operator, broken constructors, and
## exsistence of PrintSelf method. This script should be run as a part
## of the dashboard checking of the Visualization Toolkit and related
## projects.
## .SECTION See Also
## http://www.vtk.org http://public.kitware.com/Dart/HTML/Index.shtml
## http://www.vtk.org/contribute.php#coding-standards
import sys
import re
import os
import stat
import string
# Get the path to the directory containing this script.
if __name__ == '__main__':
selfpath = os.path.abspath(sys.path[0] or os.curdir)
else:
selfpath = os.path.abspath(os.path.dirname(__file__))
# Load the list of names mangled by windows.h.
execfile(os.path.join(selfpath, 'WindowsMangleList.py'))
## If tested from dart, make sure to fix all the output strings
test_from_dart = 0
if os.environ.has_key("DART_TEST_FROM_DART"):
test_from_dart = 1
## For backward compatibility
def StringEndsWith(str1, str2):
l1 = len(str1)
l2 = len(str2)
if l1 < l2:
return 0
return (str1[(l1-l2):] == str2)
##
class TestVTKFiles:
def __init__(self):
self.FileName = ""
self.ErrorValue = 0;
self.Errors = {}
self.WarningValue = 0;
self.Warnings = {}
self.FileLines = []
self.Export = ""
self.UnnecessaryIncludes = [
"stdio.h",
"stdlib.h",
"string.h",
"iostream",
"iostream.h",
"strstream",
"strstream.h",
"fstream",
"fstream.h",
"windows.h"
]
pass
def SetExport(self, export):
self.Export = export
def Print(self, text=""):
rtext = text
if test_from_dart:
rtext = string.replace(rtext, "<", "<")
rtext = string.replace(rtext, ">", ">")
print rtext
def Error(self, error):
self.ErrorValue = 1
self.Errors[error] = 1
pass
def Warning(self, warning):
self.WarningValue = 1
self.Warnings[warning] = 1
pass
def PrintErrors(self):
if self.ErrorValue:
self.Print( )
self.Print( "There were errors:" )
for a in self.Errors.keys():
self.Print( "* %s" % a )
def PrintWarnings(self):
if self.WarningValue:
self.Print( )
self.Print( "There were warnings:" )
for a in self.Warnings.keys():
self.Print( "* %s" % a )
def TestFile(self, filename):
self.FileName = filename
self.FileLines = []
self.ClassName = ""
self.ParentName = ""
try:
file = open(filename)
self.FileLines = file.readlines()
file.close()
except:
self.Print( "Problem reading file: %s" % filename )
sys.exit(1)
return not self.CheckExclude()
def CheckExclude(self):
prefix = '// VTK-HeaderTest-Exclude:'
exclude = 0
for l in self.FileLines:
if l.startswith(prefix):
e = l[len(prefix):].strip()
if e == os.path.basename(self.FileName):
exclude += 1
else:
self.Error("Wrong exclusion: "+l.rstrip())
if exclude > 1:
self.Error("Multiple VTK-HeaderTest-Exclude lines")
return exclude > 0
def CheckIncludes(self):
count = 0
lines = []
nplines = []
unlines = []
includere = "^\s*#\s*include\s*[\"<]([^>\"]+)"
ignincludere = ".*\/\/.*"
regx = re.compile(includere)
regx1 = re.compile(ignincludere)
cc = 0
includeparent = 0
for a in self.FileLines:
line = string.strip(a)
rm = regx.match(line)
if rm and not regx1.match(line):
lines.append(" %4d: %s" % (cc, line))
file = rm.group(1)
if file == (self.ParentName + ".h"):
includeparent = 1
if not StringEndsWith(file, ".h"):
nplines.append(" %4d: %s" % (cc, line))
if file in self.UnnecessaryIncludes:
unlines.append(" %4d: %s" % (cc, line))
cc = cc + 1
if len(lines) > 1:
self.Print()
self.Print( "File: %s has %d includes: " %
( self.FileName, len(lines)) )
for a in lines:
self.Print( a )
self.Error("Multiple includes")
if len(nplines) > 0:
self.Print( )
self.Print( "File: %s has non-portable include(s): " % self.FileName )
for a in nplines:
self.Print( a )
self.Error("Non-portable includes")
if len(unlines) > 0:
self.Print( )
self.Print( "File: %s has unnecessary include(s): " % self.FileName )
for a in unlines:
self.Print( a )
self.Error("Unnecessary includes")
if not includeparent and self.ParentName:
self.Print()
self.Print( "File: %s does not include parent \"%s.h\"" %
( self.FileName, self.ParentName ) )
self.Error("Does not include parent")
pass
def CheckParent(self):
classre = "^class\s*(.*_EXPORT|\s*) (vtk[A-Z0-9_][^ :\n]*)\s*:\s*public\s*(vtk[^ \n\{]*)"
cname = ""
pname = ""
classlines = []
regx = re.compile(classre)
cc = 0
lastline = ""
for a in self.FileLines:
line = string.strip(a)
rm = regx.match(line)
if not rm and not cname:
rm = regx.match(lastline + line)
if rm:
export = rm.group(1)
export = string.strip(export)
cname = rm.group(2)
pname = rm.group(3)
classlines.append(" %4d: %s" % (cc, line))
if not export:
self.Print("File: %s defines 1 class with no export macro:" % self.FileName)
self.Print(" %4d: %s" % (cc, line))
self.Error("No export macro")
elif self.Export and self.Export != export:
self.Print("File: %s defines 1 class with wrong export macro:" % self.FileName)
self.Print(" %4d: %s" % (cc, line))
self.Print(" The export macro should be: %s" % (self.Export))
self.Error("Wrong export macro")
cc = cc + 1
lastline = a
if len(classlines) > 1:
self.Print()
self.Print( "File: %s defines %d classes: " %
(self.FileName, len(classlines)) )
for a in classlines:
self.Print( a )
self.Error("Multiple classes defined")
if len(classlines) < 1:
self.Print()
self.Print( "File: %s does not define any classes" % self.FileName )
self.Error("No class defined")
return
#self.Print( "Classname: %s ParentName: %s" % (cname, pname)
self.ClassName = cname
self.ParentName = pname
pass
def CheckTypeMacro(self):
count = 0
lines = []
oldlines = []
typere = "^\s*vtk(Abstract)?Type(Revision)*Macro\s*\(\s*(vtk[^ ,]+)\s*,\s*(vtk[^ \)]+)\s*\)\s*"
typesplitre = "^\s*vtk(Abstract)?Type(Revision)*Macro\s*\("
regx = re.compile(typere)
regxs = re.compile(typesplitre)
cc = 0
found = 0
for a in range(len(self.FileLines)):
line = string.strip(self.FileLines[a])
rm = regx.match(line)
if rm:
found = 1
if rm.group(2) == "Revision":
oldlines.append(" %4d: %s" % (cc, line))
cname = rm.group(3)
pname = rm.group(4)
if cname != self.ClassName or pname != self.ParentName:
lines.append(" %4d: %s" % (cc, line))
else:
# Maybe it is in two lines
rm = regxs.match(line)
if rm:
nline = line + " " + string.strip(self.FileLines[a+1])
line = string.strip(nline)
rm = regx.match(line)
if rm:
found = 1
if rm.group(2) == "Revision":
oldlines.append(" %4d: %s" % (cc, line))
cname = rm.group(3)
pname = rm.group(4)
if cname != self.ClassName or pname != self.ParentName:
lines.append(" %4d: %s" % (cc, line))
cc = cc + 1
if len(lines) > 0:
self.Print( "File: %s has broken type macro(s):" % self.FileName )
for a in lines:
self.Print( a )
self.Print( "Should be:\n vtkTypeMacro(%s, %s)" %
(self.ClassName, self.ParentName) )
self.Error("Broken type macro")
if len(oldlines) > 0:
self.Print( "File: %s has legacy type-revision macro(s):" % self.FileName )
for a in oldlines:
self.Print( a )
self.Print( "Should be:\n vtkTypeMacro(%s, %s)" %
(self.ClassName, self.ParentName))
self.Error("Legacy style type-revision macro")
if not found:
self.Print( "File: %s does not have type macro" % self.FileName )
self.Print( "Should be:\n vtkTypeMacro(%s, %s)" %
(self.ClassName, self.ParentName))
self.Error("No type macro")
pass
def CheckForCopyAndAssignment(self):
if not self.ClassName:
return
count = 0
lines = []
oldlines = []
copyoperator = "^\s*%s\s*\(\s*const\s*%s\s*&\s*\)\s*;\s*\/\/\s*Not\s*[iI]mplemented(\.)*" % ( self.ClassName, self.ClassName)
asgnoperator = "^\s*void\s*operator\s*=\s*\(\s*const\s*%s\s*&\s*\)\s*;\s*\/\/\s*Not\s*[iI]mplemented(\.)*" % self.ClassName
#self.Print( copyoperator
regx1 = re.compile(copyoperator)
regx2 = re.compile(asgnoperator)
foundcopy = 0
foundasgn = 0
for a in self.FileLines:
line = string.strip(a)
if regx1.match(line):
foundcopy = foundcopy + 1
if regx2.match(line):
foundasgn = foundasgn + 1
lastline = ""
if foundcopy < 1:
for a in self.FileLines:
line = string.strip(a)
if regx1.match(lastline + line):
foundcopy = foundcopy + 1
lastline = a
lastline = ""
if foundasgn < 1:
for a in self.FileLines:
line = string.strip(a)
if regx2.match(lastline + line):
foundasgn = foundasgn + 1
lastline = a
if foundcopy < 1:
self.Print( "File: %s does not define copy constructor" %
self.FileName )
self.Print( "Should be:\n%s(const %s&); // Not implemented" %
(self.ClassName, self.ClassName) )
self.Error("No private copy constructor")
if foundcopy > 1:
self.Print( "File: %s defines multiple copy constructors" %
self.FileName )
self.Error("Multiple copy constructor")
if foundasgn < 1:
self.Print( "File: %s does not define assignment operator" %
self.FileName )
self.Print( "Should be:\nvoid operator=(const %s&); // Not implemented"
% self.ClassName )
self.Error("No private assignment operator")
if foundcopy > 1:
self.Print( "File: %s defines multiple assignment operators" %
self.FileName )
self.Error("Multiple assignment operators")
pass
def CheckWeirdConstructors(self):
count = 0
lines = []
oldlines = []
constructor = "^\s*%s\s*\(([^ )]*)\)" % self.ClassName
copyoperator = "^\s*%s\s*\(\s*const\s*%s\s*&\s*\)\s*;\s*\/\/\s*Not\s*implemented(\.)*" % ( self.ClassName, self.ClassName)
regx1 = re.compile(constructor)
regx2 = re.compile(copyoperator)
cc = 0
for a in self.FileLines:
line = string.strip(a)
rm = regx1.match(line)
if rm:
arg = string.strip(rm.group(1))
if arg and not regx2.match(line):
lines.append(" %4d: %s" % (cc, line))
cc = cc + 1
if len(lines) > 0:
self.Print( "File: %s has weird constructor(s):" % self.FileName )
for a in lines:
self.Print( a )
self.Print( "There should be only:\n %s();" % self.ClassName )
self.Error("Weird constructor")
pass
def CheckPrintSelf(self):
if not self.ClassName:
return
typere = "^\s*void\s*PrintSelf\s*\(\s*ostream\s*&\s*os*\s*,\s*vtkIndent\s*indent\s*\)"
newtypere = "^\s*virtual\s*void\s*PrintSelf\s*\(\s*ostream\s*&\s*os*\s*,\s*vtkIndent\s*indent\s*\)"
regx1 = re.compile(typere)
regx2 = re.compile(newtypere)
found = 0
oldstyle = 0
for a in self.FileLines:
line = string.strip(a)
rm1 = regx1.match(line)
rm2 = regx2.match(line)
if rm1 or rm2:
found = 1
if rm1:
oldstyle = 1
if not found:
self.Print( "File: %s does not define PrintSelf method:" %
self.FileName )
self.Warning("No PrintSelf method")
pass
def CheckWindowsMangling(self):
lines = []
regx1 = WindowsMangleRegEx
regx2 = re.compile("^.*VTK_LEGACY.*$")
# This version will leave out comment lines but we probably do
# not want to refer to mangled (hopefully deprecated) methods
# in comments.
# regx2 = re.compile("^(\s*//|\s*\*|.*VTK_LEGACY).*$")
cc = 1
for a in self.FileLines:
line = string.strip(a)
rm = regx1.match(line)
if rm:
arg = string.strip(rm.group(1))
if arg and not regx2.match(line):
lines.append(" %4d: %s" % (cc, line))
cc = cc + 1
if len(lines) > 0:
self.Print( "File: %s has windows.h mangling violations:" % self.FileName )
for a in lines:
self.Print(a)
self.Error("Windows Mangling Violation - choose another name that does not conflict.")
pass
##
test = TestVTKFiles()
## Check command line arguments
if len(sys.argv) < 2:
print "Testing directory not specified..."
print "Usage: %s <directory> [ exception(s) ]" % sys.argv[0]
sys.exit(1)
dirname = sys.argv[1]
exceptions = sys.argv[2:]
if len(sys.argv) > 2:
export = sys.argv[2]
if export[:3] == "VTK" and export[len(export)-len("EXPORT"):] == "EXPORT":
print "Use export macro: %s" % export
exceptions = sys.argv[3:]
test.SetExport(export)
## Traverse through the list of files
for a in os.listdir(dirname):
## Skip non-header files
if not StringEndsWith(a, ".h"):
continue
## Skip exceptions
if a in exceptions:
continue
pathname = '%s/%s' % (dirname, a)
if pathname in exceptions:
continue
mode = os.stat(pathname)[stat.ST_MODE]
## Skip directories
if stat.S_ISDIR(mode):
continue
elif stat.S_ISREG(mode) and test.TestFile(pathname):
## Do all the tests
test.CheckParent()
test.CheckIncludes()
test.CheckTypeMacro()
test.CheckForCopyAndAssignment()
test.CheckWeirdConstructors()
test.CheckPrintSelf()
test.CheckWindowsMangling()
## Summarize errors
test.PrintWarnings()
test.PrintErrors()
sys.exit(test.ErrorValue)
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for file download and caching."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import abstractmethod
import hashlib
import multiprocessing
from multiprocessing.pool import ThreadPool
import os
import random
import shutil
import sys
import tarfile
import threading
import time
import zipfile
import numpy as np
import six
from six.moves.urllib.error import HTTPError
from six.moves.urllib.error import URLError
from six.moves.urllib.request import urlopen
from tensorflow.contrib.keras.python.keras.utils.generic_utils import Progbar
try:
import queue # pylint:disable=g-import-not-at-top
except ImportError:
import Queue as queue # pylint:disable=g-import-not-at-top
if sys.version_info[0] == 2:
def urlretrieve(url, filename, reporthook=None, data=None):
"""Replacement for `urlretrive` for Python 2.
Under Python 2, `urlretrieve` relies on `FancyURLopener` from legacy
`urllib` module, known to have issues with proxy management.
Arguments:
url: url to retrieve.
filename: where to store the retrieved data locally.
reporthook: a hook function that will be called once
on establishment of the network connection and once
after each block read thereafter.
The hook will be passed three arguments;
a count of blocks transferred so far,
a block size in bytes, and the total size of the file.
data: `data` argument passed to `urlopen`.
"""
def chunk_read(response, chunk_size=8192, reporthook=None):
content_type = response.info().get('Content-Length')
total_size = -1
if content_type is not None:
total_size = int(content_type.strip())
count = 0
while 1:
chunk = response.read(chunk_size)
count += 1
if not chunk:
reporthook(count, total_size, total_size)
break
if reporthook:
reporthook(count, chunk_size, total_size)
yield chunk
response = urlopen(url, data)
with open(filename, 'wb') as fd:
for chunk in chunk_read(response, reporthook=reporthook):
fd.write(chunk)
else:
from six.moves.urllib.request import urlretrieve # pylint: disable=g-import-not-at-top
def _extract_archive(file_path, path='.', archive_format='auto'):
"""Extracts an archive if it matches tar, tar.gz, tar.bz, or zip formats.
Arguments:
file_path: path to the archive file
path: path to extract the archive file
archive_format: Archive format to try for extracting the file.
Options are 'auto', 'tar', 'zip', and None.
'tar' includes tar, tar.gz, and tar.bz files.
The default 'auto' is ['tar', 'zip'].
None or an empty list will return no matches found.
Returns:
True if a match was found and an archive extraction was completed,
False otherwise.
"""
if archive_format is None:
return False
if archive_format is 'auto':
archive_format = ['tar', 'zip']
if isinstance(archive_format, six.string_types):
archive_format = [archive_format]
for archive_type in archive_format:
if archive_type is 'tar':
open_fn = tarfile.open
is_match_fn = tarfile.is_tarfile
if archive_type is 'zip':
open_fn = zipfile.ZipFile
is_match_fn = zipfile.is_zipfile
if is_match_fn(file_path):
with open_fn(file_path) as archive:
try:
archive.extractall(path)
except (tarfile.TarError, RuntimeError, KeyboardInterrupt):
if os.path.exists(path):
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path)
raise
return True
return False
def get_file(fname,
origin,
untar=False,
md5_hash=None,
file_hash=None,
cache_subdir='datasets',
hash_algorithm='auto',
extract=False,
archive_format='auto',
cache_dir=None):
"""Downloads a file from a URL if it not already in the cache.
By default the file at the url `origin` is downloaded to the
cache_dir `~/.keras`, placed in the cache_subdir `datasets`,
and given the filename `fname`. The final location of a file
`example.txt` would therefore be `~/.keras/datasets/example.txt`.
Files in tar, tar.gz, tar.bz, and zip formats can also be extracted.
Passing a hash will verify the file after download. The command line
programs `shasum` and `sha256sum` can compute the hash.
Arguments:
fname: Name of the file. If an absolute path `/path/to/file.txt` is
specified the file will be saved at that location.
origin: Original URL of the file.
untar: Deprecated in favor of 'extract'.
boolean, whether the file should be decompressed
md5_hash: Deprecated in favor of 'file_hash'.
md5 hash of the file for verification
file_hash: The expected hash string of the file after download.
The sha256 and md5 hash algorithms are both supported.
cache_subdir: Subdirectory under the Keras cache dir where the file is
saved. If an absolute path `/path/to/folder` is
specified the file will be saved at that location.
hash_algorithm: Select the hash algorithm to verify the file.
options are 'md5', 'sha256', and 'auto'.
The default 'auto' detects the hash algorithm in use.
extract: True tries extracting the file as an Archive, like tar or zip.
archive_format: Archive format to try for extracting the file.
Options are 'auto', 'tar', 'zip', and None.
'tar' includes tar, tar.gz, and tar.bz files.
The default 'auto' is ['tar', 'zip'].
None or an empty list will return no matches found.
cache_dir: Location to store cached files, when None it
defaults to the [Keras
Directory](/faq/#where-is-the-keras-configuration-filed-stored).
Returns:
Path to the downloaded file
"""
if cache_dir is None:
cache_dir = os.path.expanduser(os.path.join('~', '.keras'))
if md5_hash is not None and file_hash is None:
file_hash = md5_hash
hash_algorithm = 'md5'
datadir_base = os.path.expanduser(cache_dir)
if not os.access(datadir_base, os.W_OK):
datadir_base = os.path.join('/tmp', '.keras')
datadir = os.path.join(datadir_base, cache_subdir)
if not os.path.exists(datadir):
os.makedirs(datadir)
if untar:
untar_fpath = os.path.join(datadir, fname)
fpath = untar_fpath + '.tar.gz'
else:
fpath = os.path.join(datadir, fname)
download = False
if os.path.exists(fpath):
# File found; verify integrity if a hash was provided.
if file_hash is not None:
if not validate_file(fpath, file_hash, algorithm=hash_algorithm):
print('A local file was found, but it seems to be '
'incomplete or outdated because the ' + hash_algorithm +
' file hash does not match the original value of ' + file_hash +
' so we will re-download the data.')
download = True
else:
download = True
if download:
print('Downloading data from', origin)
class ProgressTracker(object):
# Maintain progbar for the lifetime of download.
# This design was chosen for Python 2.7 compatibility.
progbar = None
def dl_progress(count, block_size, total_size):
if ProgressTracker.progbar is None:
if total_size is -1:
total_size = None
ProgressTracker.progbar = Progbar(total_size)
else:
ProgressTracker.progbar.update(count * block_size)
error_msg = 'URL fetch failure on {}: {} -- {}'
try:
try:
urlretrieve(origin, fpath, dl_progress)
except URLError as e:
raise Exception(error_msg.format(origin, e.errno, e.reason))
except HTTPError as e:
raise Exception(error_msg.format(origin, e.code, e.msg))
except (Exception, KeyboardInterrupt) as e:
if os.path.exists(fpath):
os.remove(fpath)
raise
ProgressTracker.progbar = None
if untar:
if not os.path.exists(untar_fpath):
_extract_archive(fpath, datadir, archive_format='tar')
return untar_fpath
if extract:
_extract_archive(fpath, datadir, archive_format)
return fpath
def _hash_file(fpath, algorithm='sha256', chunk_size=65535):
"""Calculates a file sha256 or md5 hash.
Example:
```python
>>> from keras.data_utils import _hash_file
>>> _hash_file('/path/to/file.zip')
'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
```
Arguments:
fpath: path to the file being validated
algorithm: hash algorithm, one of 'auto', 'sha256', or 'md5'.
The default 'auto' detects the hash algorithm in use.
chunk_size: Bytes to read at a time, important for large files.
Returns:
The file hash
"""
if (algorithm is 'sha256') or (algorithm is 'auto' and len(hash) is 64):
hasher = hashlib.sha256()
else:
hasher = hashlib.md5()
with open(fpath, 'rb') as fpath_file:
for chunk in iter(lambda: fpath_file.read(chunk_size), b''):
hasher.update(chunk)
return hasher.hexdigest()
def validate_file(fpath, file_hash, algorithm='auto', chunk_size=65535):
"""Validates a file against a sha256 or md5 hash.
Arguments:
fpath: path to the file being validated
file_hash: The expected hash string of the file.
The sha256 and md5 hash algorithms are both supported.
algorithm: Hash algorithm, one of 'auto', 'sha256', or 'md5'.
The default 'auto' detects the hash algorithm in use.
chunk_size: Bytes to read at a time, important for large files.
Returns:
Whether the file is valid
"""
if ((algorithm is 'sha256') or
(algorithm is 'auto' and len(file_hash) is 64)):
hasher = 'sha256'
else:
hasher = 'md5'
if str(_hash_file(fpath, hasher, chunk_size)) == str(file_hash):
return True
else:
return False
class Sequence(object):
"""Base object for fitting to a sequence of data, such as a dataset.
Every `Sequence` must implements the `__getitem__` and the `__len__` methods.
Examples:
```python
from skimage.io import imread
from skimage.transform import resize
import numpy as np
# Here, `x_set` is list of path to the images
# and `y_set` are the associated classes.
class CIFAR10Sequence(Sequence):
def __init__(self, x_set, y_set, batch_size):
self.X,self.y = x_set,y_set
self.batch_size = batch_size
def __len__(self):
return len(self.X) // self.batch_size
def __getitem__(self,idx):
batch_x = self.X[idx*self.batch_size:(idx+1)*self.batch_size]
batch_y = self.y[idx*self.batch_size:(idx+1)*self.batch_size]
return np.array([
resize(imread(file_name), (200,200))
for file_name in batch_x]), np.array(batch_y)
```
"""
@abstractmethod
def __getitem__(self, index):
"""Gets batch at position `index`.
Arguments:
index: position of the batch in the Sequence.
Returns:
A batch
"""
raise NotImplementedError
@abstractmethod
def __len__(self):
"""Number of batch in the Sequence.
Returns:
The number of batches in the Sequence.
"""
raise NotImplementedError
def get_index(ds, i):
"""Quick fix for Python2, otherwise, it cannot be pickled.
Arguments:
ds: a Holder or Sequence object.
i: index
Returns:
The value at index `i`.
"""
return ds[i]
class SequenceEnqueuer(object):
"""Base class to enqueue inputs.
The task of an Enqueuer is to use parallelism to speed up preprocessing.
This is done with processes or threads.
Examples:
```python
enqueuer = SequenceEnqueuer(...)
enqueuer.start()
datas = enqueuer.get()
for data in datas:
# Use the inputs; training, evaluating, predicting.
# ... stop sometime.
enqueuer.close()
```
The `enqueuer.get()` should be an infinite stream of datas.
"""
@abstractmethod
def is_running(self):
raise NotImplementedError
@abstractmethod
def start(self, workers=1, max_queue_size=10):
"""Starts the handler's workers.
Arguments:
workers: number of worker threads
max_queue_size: queue size
(when full, threads could block on `put()`).
"""
raise NotImplementedError
@abstractmethod
def stop(self, timeout=None):
"""Stop running threads and wait for them to exit, if necessary.
Should be called by the same thread which called start().
Arguments:
timeout: maximum time to wait on thread.join()
"""
raise NotImplementedError
@abstractmethod
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
Returns:
Generator yielding tuples `(inputs, targets)`
or `(inputs, targets, sample_weights)`.
"""
raise NotImplementedError
class OrderedEnqueuer(SequenceEnqueuer):
"""Builds a Enqueuer from a Sequence.
Used in `fit_generator`, `evaluate_generator`, `predict_generator`.
Arguments:
sequence: A `keras.utils.data_utils.Sequence` object.
use_multiprocessing: use multiprocessing if True, otherwise threading
scheduling: Sequential querying of datas if 'sequential', random
otherwise.
"""
def __init__(self,
sequence,
use_multiprocessing=False,
scheduling='sequential'):
self.sequence = sequence
self.use_multiprocessing = use_multiprocessing
self.scheduling = scheduling
self.workers = 0
self.executor = None
self.queue = None
self.run_thread = None
self.stop_signal = None
def is_running(self):
return self.stop_signal is not None and not self.stop_signal.is_set()
def start(self, workers=1, max_queue_size=10):
"""Start the handler's workers.
Arguments:
workers: number of worker threads
max_queue_size: queue size
(when full, workers could block on `put()`)
"""
if self.use_multiprocessing:
self.executor = multiprocessing.Pool(workers)
else:
self.executor = ThreadPool(workers)
self.queue = queue.Queue(max_queue_size)
self.stop_signal = threading.Event()
self.run_thread = threading.Thread(target=self._run)
self.run_thread.daemon = True
self.run_thread.start()
def _run(self):
"""Submits requests to the executor and queues the `Future` objects."""
sequence = list(range(len(self.sequence)))
while True:
if self.scheduling is not 'sequential':
random.shuffle(sequence)
for i in sequence:
if self.stop_signal.is_set():
return
self.queue.put(
self.executor.apply_async(get_index, (self.sequence, i)),
block=True)
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
Yields:
Tuples (inputs, targets)
or (inputs, targets, sample_weights)
"""
try:
while self.is_running():
inputs = self.queue.get(block=True).get()
if inputs is not None:
yield inputs
except Exception as e:
self.stop()
raise StopIteration(e)
def stop(self, timeout=None):
"""Stops running threads and wait for them to exit, if necessary.
Should be called by the same thread which called `start()`.
Arguments:
timeout: maximum time to wait on `thread.join()`
"""
self.stop_signal.set()
with self.queue.mutex:
self.queue.queue.clear()
self.queue.unfinished_tasks = 0
self.queue.not_full.notify()
self.executor.close()
self.executor.join()
self.run_thread.join(timeout)
class GeneratorEnqueuer(SequenceEnqueuer):
"""Builds a queue out of a data generator.
Used in `fit_generator`, `evaluate_generator`, `predict_generator`.
Arguments:
generator: a generator function which endlessly yields data
use_multiprocessing: use multiprocessing if True, otherwise threading
wait_time: time to sleep in-between calls to `put()`
random_seed: Initial seed for workers,
will be incremented by one for each workers.
"""
def __init__(self,
generator,
use_multiprocessing=False,
wait_time=0.05,
random_seed=None):
self.wait_time = wait_time
self._generator = generator
self._use_multiprocessing = use_multiprocessing
self._threads = []
self._stop_event = None
self.queue = None
self.random_seed = random_seed
def start(self, workers=1, max_queue_size=10):
"""Kicks off threads which add data from the generator into the queue.
Arguments:
workers: number of worker threads
max_queue_size: queue size
(when full, threads could block on `put()`)
"""
def data_generator_task():
while not self._stop_event.is_set():
try:
if self._use_multiprocessing or self.queue.qsize() < max_queue_size:
generator_output = next(self._generator)
self.queue.put(generator_output)
else:
time.sleep(self.wait_time)
except Exception:
self._stop_event.set()
raise
try:
if self._use_multiprocessing:
self.queue = multiprocessing.Queue(maxsize=max_queue_size)
self._stop_event = multiprocessing.Event()
else:
self.queue = queue.Queue()
self._stop_event = threading.Event()
for _ in range(workers):
if self._use_multiprocessing:
# Reset random seed else all children processes
# share the same seed
np.random.seed(self.random_seed)
thread = multiprocessing.Process(target=data_generator_task)
thread.daemon = True
if self.random_seed is not None:
self.random_seed += 1
else:
thread = threading.Thread(target=data_generator_task)
self._threads.append(thread)
thread.start()
except:
self.stop()
raise
def is_running(self):
return self._stop_event is not None and not self._stop_event.is_set()
def stop(self, timeout=None):
"""Stops running threads and wait for them to exit, if necessary.
Should be called by the same thread which called `start()`.
Arguments:
timeout: maximum time to wait on `thread.join()`.
"""
if self.is_running():
self._stop_event.set()
for thread in self._threads:
if thread.is_alive():
if self._use_multiprocessing:
thread.terminate()
else:
thread.join(timeout)
if self._use_multiprocessing:
if self.queue is not None:
self.queue.close()
self._threads = []
self._stop_event = None
self.queue = None
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
Yields:
Data arrays.
"""
while self.is_running():
if not self.queue.empty():
inputs = self.queue.get()
if inputs is not None:
yield inputs
else:
time.sleep(self.wait_time)
| |
# Copyright 2015-2018 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from ..azure_common import BaseTest, arm_template, cassette_name
from jsonschema.exceptions import ValidationError
from mock import patch
from c7n_azure.resources.generic_arm_resource import GenericArmResource
class ArmResourceTest(BaseTest):
def setUp(self):
super(ArmResourceTest, self).setUp()
def test_arm_resource_schema_validate(self):
with self.sign_out_patch():
p = self.load_policy({
'name': 'test-azure-armresource',
'resource': 'azure.armresource'
}, validate=True)
self.assertTrue(p)
@arm_template('vm.json')
@cassette_name('common')
def test_find_by_name(self):
p = self.load_policy({
'name': 'test-azure-armresource',
'resource': 'azure.armresource',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value_type': 'normalize',
'value': 'cctestvm'}],
})
resources = p.run()
self.assertEqual(len(resources), 1)
@arm_template('vm.json')
def test_metric_filter_find(self):
p = self.load_policy({
'name': 'test-azure-metric',
'resource': 'azure.vm',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value_type': 'normalize',
'value': 'cctestvm'},
{'type': 'metric',
'metric': 'Network In',
'aggregation': 'total',
'op': 'gt',
'threshold': 0}],
})
resources = p.run()
self.assertEqual(len(resources), 1)
@arm_template('vm.json')
def test_metric_filter_find_average(self):
p = self.load_policy({
'name': 'test-azure-metric',
'resource': 'azure.vm',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value_type': 'normalize',
'value': 'cctestvm'},
{'type': 'metric',
'metric': 'Percentage CPU',
'aggregation': 'average',
'op': 'gt',
'threshold': 0}],
})
resources = p.run()
self.assertEqual(len(resources), 1)
@arm_template('vm.json')
def test_metric_filter_not_find(self):
p = self.load_policy({
'name': 'test-azure-metric',
'resource': 'azure.vm',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value_type': 'normalize',
'value': 'cctestvm'},
{'type': 'metric',
'metric': 'Network In',
'aggregation': 'total',
'op': 'lt',
'threshold': 0}],
})
resources = p.run()
self.assertEqual(len(resources), 0)
@arm_template('vm.json')
def test_metric_filter_not_find_average(self):
p = self.load_policy({
'name': 'test-azure-metric',
'resource': 'azure.vm',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value_type': 'normalize',
'value': 'cctestvm'},
{'type': 'metric',
'metric': 'Percentage CPU',
'aggregation': 'average',
'op': 'lt',
'threshold': 0}],
})
resources = p.run()
self.assertEqual(len(resources), 0)
@arm_template('vm.json')
def test_metric_filter_invalid_metric(self):
p = self.load_policy({
'name': 'test-azure-metric',
'resource': 'azure.vm',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value_type': 'normalize',
'value': 'cctestvm'},
{'type': 'metric',
'metric': 'InvalidMetric',
'aggregation': 'average',
'op': 'gte',
'threshold': 0}],
})
resources = p.run()
self.assertEqual(0, len(resources))
def test_metric_filter_invalid_missing_metric(self):
policy = {
'name': 'test-azure-metric',
'resource': 'azure.vm',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value_type': 'normalize',
'value': 'cctestvm'},
{'type': 'metric',
'aggregation': 'total',
'op': 'lt',
'threshold': 0}],
}
self.assertRaises(ValidationError, self.load_policy, policy, validate=True)
def test_metric_filter_invalid_missing_op(self):
policy = {
'name': 'test-azure-metric',
'resource': 'azure.vm',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value_type': 'normalize',
'value': 'cctestvm'},
{'type': 'metric',
'metric': 'Network In',
'aggregation': 'total',
'threshold': 0}],
}
self.assertRaises(ValidationError, self.load_policy, policy, validate=True)
def test_metric_filter_invalid_missing_threshold(self):
policy = {
'name': 'test-azure-metric',
'resource': 'azure.vm',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value_type': 'normalize',
'value': 'cctestvm'},
{'type': 'metric',
'metric': 'Network In',
'aggregation': 'total',
'op': 'lt'}],
}
self.assertRaises(ValidationError, self.load_policy, policy, validate=True)
fake_arm_resources = [
{
'id': '/subscriptions/fake-guid/resourceGroups/test-resource-group/providers/'
'Microsoft.Network/networkSecurityGroups/test-nsg-delete',
'name': 'test-nsg-delete'
}
]
@patch('c7n_azure.resources.generic_arm_resource.GenericArmResourceQuery.filter',
return_value=fake_arm_resources)
@patch('c7n_azure.actions.delete.DeleteAction.process',
return_value='')
def test_delete_armresource(self, delete_action_mock, filter_mock):
p = self.load_policy({
'name': 'delete-armresource',
'resource': 'azure.armresource',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value_type': 'normalize',
'value': 'test-nsg-delete'}],
'actions': [
{'type': 'delete'}
]
})
p.run()
delete_action_mock.assert_called_with([self.fake_arm_resources[0]])
@patch('c7n_azure.query.ResourceQuery.filter',
return_value=fake_arm_resources)
@patch('c7n_azure.actions.delete.DeleteAction.process',
return_value='')
def test_delete_armresource_specific_name(self, delete_action_mock, filter_mock):
p = self.load_policy({
'name': 'delete-armresource',
'resource': 'azure.networksecuritygroup',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value_type': 'normalize',
'value': 'test-nsg-delete'}],
'actions': [
{'type': 'delete'}
]
})
p.run()
delete_action_mock.assert_called_with([self.fake_arm_resources[0]])
def test_arm_resource_resource_type_schema_validate(self):
with self.sign_out_patch():
p = self.load_policy({
'name': 'test-azure-armresource-filter',
'resource': 'azure.armresource',
'filters': [
{
'type': 'resource-type',
'values': ['Microsoft.Storage/storageAccounts', 'Microsoft.Web/serverFarms']
}
]
}, validate=True)
self.assertTrue(p)
@arm_template('vm.json')
@cassette_name('common')
def test_arm_resource_resource_type(self):
p = self.load_policy({
'name': 'test-azure-armresource-filter',
'resource': 'azure.armresource',
'filters': [
{
'type': 'resource-type',
'values': [
'Microsoft.Network/virtualNetworks',
'Microsoft.Storage/storageAccounts',
'Microsoft.Compute/virtualMachines',
'resourceGroups'
]
},
{
'type': 'value',
'key': 'resourceGroup',
'value_type': 'normalize',
'op': 'eq',
'value': 'test_vm'
}
]
})
resources = p.run()
self.assertEqual(len(resources), 4)
@arm_template('vm.json')
def test_arm_resource_get_resources(self):
rm = GenericArmResource(self.test_context,
{'policies': [
{'name': 'test',
'resource': 'azure.armresource'}]})
rg_id = '/subscriptions/{0}/resourceGroups/test_vm'\
.format(self.session.get_subscription_id())
ids = ['{0}/providers/Microsoft.Compute/virtualMachines/cctestvm'.format(rg_id),
rg_id]
resources = rm.get_resources(ids)
self.assertEqual(len(resources), 2)
self.assertEqual({r['type'] for r in resources},
{'resourceGroups', 'Microsoft.Compute/virtualMachines'})
self.assertEqual({r['id'] for r in resources},
set(ids))
self.assertEqual({r['resourceGroup'] for r in resources},
{'test_vm'})
| |
#!/usr/bin/env python
import argparse
import binascii
import copy
import datetime
import hashlib
import json
import logging
import os
import shutil
import struct
import subprocess
import tempfile
import xml.etree.ElementTree as ET
from collections import defaultdict
from Bio.Data import CodonTable
logging.basicConfig(level=logging.INFO)
log = logging.getLogger('jbrowse')
TODAY = datetime.datetime.now().strftime("%Y-%m-%d")
GALAXY_INFRASTRUCTURE_URL = None
class ColorScaling(object):
COLOR_FUNCTION_TEMPLATE = """
function(feature, variableName, glyphObject, track) {{
var score = {score};
{opacity}
return 'rgba({red}, {green}, {blue}, ' + opacity + ')';
}}
"""
COLOR_FUNCTION_TEMPLATE_QUAL = r"""
function(feature, variableName, glyphObject, track) {{
var search_up = function self(sf, attr){{
if(sf.get(attr) !== undefined){{
return sf.get(attr);
}}
if(sf.parent() === undefined) {{
return;
}}else{{
return self(sf.parent(), attr);
}}
}};
var search_down = function self(sf, attr){{
if(sf.get(attr) !== undefined){{
return sf.get(attr);
}}
if(sf.children() === undefined) {{
return;
}}else{{
var kids = sf.children();
for(var child_idx in kids){{
var x = self(kids[child_idx], attr);
if(x !== undefined){{
return x;
}}
}}
return;
}}
}};
var color = ({user_spec_color} || search_up(feature, 'color') || search_down(feature, 'color') || {auto_gen_color});
var score = (search_up(feature, 'score') || search_down(feature, 'score'));
{opacity}
if(score === undefined){{ opacity = 1; }}
var result = /^#?([a-f\d]{{2}})([a-f\d]{{2}})([a-f\d]{{2}})$/i.exec(color);
var red = parseInt(result[1], 16);
var green = parseInt(result[2], 16);
var blue = parseInt(result[3], 16);
if(isNaN(opacity) || opacity < 0){{ opacity = 0; }}
return 'rgba(' + red + ',' + green + ',' + blue + ',' + opacity + ')';
}}
"""
OPACITY_MATH = {
'linear': """
var opacity = (score - ({min})) / (({max}) - ({min}));
""",
'logarithmic': """
var opacity = Math.log10(score - ({min})) / Math.log10(({max}) - ({min}));
""",
'blast': """
var opacity = 0;
if(score == 0.0) {{
opacity = 1;
}} else {{
opacity = (20 - Math.log10(score)) / 180;
}}
"""
}
BREWER_COLOUR_IDX = 0
BREWER_COLOUR_SCHEMES = [
(166, 206, 227),
(31, 120, 180),
(178, 223, 138),
(51, 160, 44),
(251, 154, 153),
(227, 26, 28),
(253, 191, 111),
(255, 127, 0),
(202, 178, 214),
(106, 61, 154),
(255, 255, 153),
(177, 89, 40),
(228, 26, 28),
(55, 126, 184),
(77, 175, 74),
(152, 78, 163),
(255, 127, 0),
]
BREWER_DIVERGING_PALLETES = {
'BrBg': ("#543005", "#003c30"),
'PiYg': ("#8e0152", "#276419"),
'PRGn': ("#40004b", "#00441b"),
'PuOr': ("#7f3b08", "#2d004b"),
'RdBu': ("#67001f", "#053061"),
'RdGy': ("#67001f", "#1a1a1a"),
'RdYlBu': ("#a50026", "#313695"),
'RdYlGn': ("#a50026", "#006837"),
'Spectral': ("#9e0142", "#5e4fa2"),
}
def __init__(self):
self.brewer_colour_idx = 0
def rgb_from_hex(self, hexstr):
# http://stackoverflow.com/questions/4296249/how-do-i-convert-a-hex-triplet-to-an-rgb-tuple-and-back
return struct.unpack('BBB', binascii.unhexlify(hexstr))
def min_max_gff(self, gff_file):
min_val = None
max_val = None
with open(gff_file, 'r') as handle:
for line in handle:
try:
value = float(line.split('\t')[5])
min_val = min(value, (min_val or value))
max_val = max(value, (max_val or value))
if value < min_val:
min_val = value
if value > max_val:
max_val = value
except Exception:
pass
return min_val, max_val
def hex_from_rgb(self, r, g, b):
return '#%02x%02x%02x' % (r, g, b)
def _get_colours(self):
r, g, b = self.BREWER_COLOUR_SCHEMES[self.brewer_colour_idx % len(self.BREWER_COLOUR_SCHEMES)]
self.brewer_colour_idx += 1
return r, g, b
def parse_menus(self, track):
trackConfig = {'menuTemplate': [{}, {}, {}, {}]}
if 'menu' in track['menus']:
menu_list = [track['menus']['menu']]
if isinstance(track['menus']['menu'], list):
menu_list = track['menus']['menu']
for m in menu_list:
tpl = {
'action': m['action'],
'label': m.get('label', '{name}'),
'iconClass': m.get('iconClass', 'dijitIconBookmark'),
}
if 'url' in m:
tpl['url'] = m['url']
if 'content' in m:
tpl['content'] = m['content']
if 'title' in m:
tpl['title'] = m['title']
trackConfig['menuTemplate'].append(tpl)
return trackConfig
def parse_colours(self, track, trackFormat, gff3=None):
# Wiggle tracks have a bicolor pallete
trackConfig = {'style': {}}
if trackFormat == 'wiggle':
trackConfig['style']['pos_color'] = track['wiggle']['color_pos']
trackConfig['style']['neg_color'] = track['wiggle']['color_neg']
if trackConfig['style']['pos_color'] == '__auto__':
trackConfig['style']['neg_color'] = self.hex_from_rgb(*self._get_colours())
trackConfig['style']['pos_color'] = self.hex_from_rgb(*self._get_colours())
# Wiggle tracks can change colour at a specified place
bc_pivot = track['wiggle']['bicolor_pivot']
if bc_pivot not in ('mean', 'zero'):
# The values are either one of those two strings
# or a number
bc_pivot = float(bc_pivot)
trackConfig['bicolor_pivot'] = bc_pivot
elif 'scaling' in track:
if track['scaling']['method'] == 'ignore':
if track['scaling']['scheme']['color'] != '__auto__':
trackConfig['style']['color'] = track['scaling']['scheme']['color']
else:
trackConfig['style']['color'] = self.hex_from_rgb(*self._get_colours())
else:
# Scored method
algo = track['scaling']['algo']
# linear, logarithmic, blast
scales = track['scaling']['scales']
# type __auto__, manual (min, max)
scheme = track['scaling']['scheme']
# scheme -> (type (opacity), color)
# ==================================
# GENE CALLS OR BLAST
# ==================================
if trackFormat == 'blast':
red, green, blue = self._get_colours()
color_function = self.COLOR_FUNCTION_TEMPLATE.format(**{
'score': "feature._parent.get('score')",
'opacity': self.OPACITY_MATH['blast'],
'red': red,
'green': green,
'blue': blue,
})
trackConfig['style']['color'] = color_function.replace('\n', '')
elif trackFormat == 'gene_calls':
# Default values, based on GFF3 spec
min_val = 0
max_val = 1000
# Get min/max and build a scoring function since JBrowse doesn't
if scales['type'] == 'automatic' or scales['type'] == '__auto__':
min_val, max_val = self.min_max_gff(gff3)
else:
min_val = scales.get('min', 0)
max_val = scales.get('max', 1000)
if scheme['color'] == '__auto__':
user_color = 'undefined'
auto_color = "'%s'" % self.hex_from_rgb(*self._get_colours())
elif scheme['color'].startswith('#'):
user_color = "'%s'" % self.hex_from_rgb(*self.rgb_from_hex(scheme['color'][1:]))
auto_color = 'undefined'
else:
user_color = 'undefined'
auto_color = "'%s'" % self.hex_from_rgb(*self._get_colours())
color_function = self.COLOR_FUNCTION_TEMPLATE_QUAL.format(**{
'opacity': self.OPACITY_MATH[algo].format(**{'max': max_val, 'min': min_val}),
'user_spec_color': user_color,
'auto_gen_color': auto_color,
})
trackConfig['style']['color'] = color_function.replace('\n', '')
return trackConfig
def etree_to_dict(t):
if t is None:
return {}
d = {t.tag: {} if t.attrib else None}
children = list(t)
if children:
dd = defaultdict(list)
for dc in map(etree_to_dict, children):
for k, v in dc.items():
dd[k].append(v)
d = {t.tag: {k: v[0] if len(v) == 1 else v for k, v in dd.items()}}
if t.attrib:
d[t.tag].update(('@' + k, v) for k, v in t.attrib.items())
if t.text:
text = t.text.strip()
if children or t.attrib:
if text:
d[t.tag]['#text'] = text
else:
d[t.tag] = text
return d
# score comes from feature._parent.get('score') or feature.get('score')
INSTALLED_TO = os.path.dirname(os.path.realpath(__file__))
def metadata_from_node(node):
metadata = {}
try:
if len(node.findall('dataset')) != 1:
# exit early
return metadata
except Exception:
return {}
for (key, value) in node.findall('dataset')[0].attrib.items():
metadata['dataset_%s' % key] = value
for (key, value) in node.findall('history')[0].attrib.items():
metadata['history_%s' % key] = value
for (key, value) in node.findall('metadata')[0].attrib.items():
metadata['metadata_%s' % key] = value
for (key, value) in node.findall('tool')[0].attrib.items():
metadata['tool_%s' % key] = value
# Additional Mappings applied:
metadata['dataset_edam_format'] = '<a target="_blank" href="http://edamontology.org/{0}">{1}</a>'.format(metadata['dataset_edam_format'], metadata['dataset_file_ext'])
metadata['history_user_email'] = '<a href="mailto:{0}">{0}</a>'.format(metadata['history_user_email'])
metadata['history_display_name'] = '<a target="_blank" href="{galaxy}/history/view/{encoded_hist_id}">{hist_name}</a>'.format(
galaxy=GALAXY_INFRASTRUCTURE_URL,
encoded_hist_id=metadata['history_id'],
hist_name=metadata['history_display_name']
)
metadata['tool_tool'] = '<a target="_blank" href="{galaxy}/datasets/{encoded_id}/show_params">{tool_id}</a>'.format(
galaxy=GALAXY_INFRASTRUCTURE_URL,
encoded_id=metadata['dataset_id'],
tool_id=metadata['tool_tool_id'],
# tool_version=metadata['tool_tool_version'],
)
return metadata
class JbrowseConnector(object):
def __init__(self, jbrowse, outdir, genomes, standalone=None, gencode=1):
self.cs = ColorScaling()
self.jbrowse = jbrowse
self.outdir = outdir
self.genome_paths = genomes
self.standalone = standalone
self.gencode = gencode
self.tracksToIndex = []
if standalone == "complete":
self.clone_jbrowse(self.jbrowse, self.outdir)
elif standalone == "minimal":
self.clone_jbrowse(self.jbrowse, self.outdir, minimal=True)
else:
try:
os.makedirs(self.outdir)
except OSError:
# Ignore if the folder exists
pass
try:
os.makedirs(os.path.join(self.outdir, 'data', 'raw'))
except OSError:
# Ignore if the folder exists
pass
self.process_genomes()
self.update_gencode()
def update_gencode(self):
table = CodonTable.unambiguous_dna_by_id[int(self.gencode)]
trackList = os.path.join(self.outdir, 'data', 'trackList.json')
with open(trackList, 'r') as handle:
trackListData = json.load(handle)
trackListData['tracks'][0].update({
'codonStarts': table.start_codons,
'codonStops': table.stop_codons,
'codonTable': table.forward_table,
})
with open(trackList, 'w') as handle:
json.dump(trackListData, handle, indent=2)
def subprocess_check_call(self, command, output=None):
if output:
log.debug('cd %s && %s > %s', self.outdir, ' '.join(command), output)
subprocess.check_call(command, cwd=self.outdir, stdout=output)
else:
log.debug('cd %s && %s', self.outdir, ' '.join(command))
subprocess.check_call(command, cwd=self.outdir)
def subprocess_popen(self, command):
log.debug('cd %s && %s', self.outdir, command)
p = subprocess.Popen(command, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, err = p.communicate()
retcode = p.returncode
if retcode != 0:
log.error('cd %s && %s', self.outdir, command)
log.error(output)
log.error(err)
raise RuntimeError("Command failed with exit code %s" % (retcode))
def subprocess_check_output(self, command):
log.debug('cd %s && %s', self.outdir, ' '.join(command))
return subprocess.check_output(command, cwd=self.outdir)
def _jbrowse_bin(self, command):
return os.path.realpath(os.path.join(self.jbrowse, 'bin', command))
def symlink_or_copy(self, src, dest):
if 'GALAXY_JBROWSE_SYMLINKS' in os.environ and bool(os.environ['GALAXY_JBROWSE_SYMLINKS']):
cmd = ['ln', '-s', src, dest]
else:
cmd = ['cp', src, dest]
return self.subprocess_check_call(cmd)
def process_genomes(self):
for genome_node in self.genome_paths:
# We only expect one input genome per run. This for loop is just
# easier to write than the alternative / catches any possible
# issues.
# Copy the file in workdir, prepare-refseqs.pl will copy it to jbrowse's data dir
local_genome = os.path.realpath('./genome.fasta')
shutil.copy(genome_node['path'], local_genome)
cmd = ['samtools', 'faidx', local_genome]
self.subprocess_check_call(cmd)
self.subprocess_check_call([
'perl', self._jbrowse_bin('prepare-refseqs.pl'),
'--trackConfig', json.dumps({'metadata': genome_node['meta']}),
'--indexed_fasta', os.path.realpath(local_genome)])
os.unlink(local_genome)
os.unlink(local_genome + '.fai')
def generate_names(self):
# Generate names
args = [
'perl', self._jbrowse_bin('generate-names.pl'),
'--hashBits', '16'
]
tracks = ','.join(self.tracksToIndex)
if tracks:
args += ['--tracks', tracks]
else:
# No tracks to index, index only the refseq
args += ['--tracks', 'DNA']
self.subprocess_check_call(args)
def _add_json(self, json_data):
cmd = [
'perl', self._jbrowse_bin('add-json.pl'),
json.dumps(json_data),
os.path.join('data', 'trackList.json')
]
self.subprocess_check_call(cmd)
def _add_track_json(self, json_data):
if len(json_data) == 0:
return
tmp = tempfile.NamedTemporaryFile(delete=False)
json.dump(json_data, tmp)
tmp.close()
cmd = ['perl', self._jbrowse_bin('add-track-json.pl'), tmp.name,
os.path.join('data', 'trackList.json')]
self.subprocess_check_call(cmd)
os.unlink(tmp.name)
def _blastxml_to_gff3(self, xml, min_gap=10):
gff3_unrebased = tempfile.NamedTemporaryFile(delete=False)
cmd = ['python', os.path.join(INSTALLED_TO, 'blastxml_to_gapped_gff3.py'),
'--trim', '--trim_end', '--include_seq', '--min_gap', str(min_gap), xml]
log.debug('cd %s && %s > %s', self.outdir, ' '.join(cmd), gff3_unrebased.name)
subprocess.check_call(cmd, cwd=self.outdir, stdout=gff3_unrebased)
gff3_unrebased.close()
return gff3_unrebased.name
def add_blastxml(self, data, trackData, blastOpts, **kwargs):
gff3 = self._blastxml_to_gff3(data, min_gap=blastOpts['min_gap'])
if 'parent' in blastOpts and blastOpts['parent'] != 'None':
gff3_rebased = tempfile.NamedTemporaryFile(delete=False)
cmd = ['python', os.path.join(INSTALLED_TO, 'gff3_rebase.py')]
if blastOpts.get('protein', 'false') == 'true':
cmd.append('--protein2dna')
cmd.extend([os.path.realpath(blastOpts['parent']), gff3])
log.debug('cd %s && %s > %s', self.outdir, ' '.join(cmd), gff3_rebased.name)
subprocess.check_call(cmd, cwd=self.outdir, stdout=gff3_rebased)
gff3_rebased.close()
# Replace original gff3 file
shutil.copy(gff3_rebased.name, gff3)
os.unlink(gff3_rebased.name)
dest = os.path.join(self.outdir, 'data', 'raw', trackData['label'] + '.gff')
self._sort_gff(gff3, dest)
url = os.path.join('raw', trackData['label'] + '.gff.gz')
trackData.update({
"urlTemplate": url,
"storeClass": "JBrowse/Store/SeqFeature/GFF3Tabix",
})
trackData['glyph'] = 'JBrowse/View/FeatureGlyph/Segments'
trackData['trackType'] = 'BlastView/View/Track/CanvasFeatures'
trackData['type'] = 'BlastView/View/Track/CanvasFeatures'
self._add_track_json(trackData)
os.unlink(gff3)
if blastOpts.get('index', 'false') == 'true':
self.tracksToIndex.append("%s" % trackData['label'])
def add_bigwig(self, data, trackData, wiggleOpts, **kwargs):
dest = os.path.join('data', 'raw', trackData['label'] + '.bw')
self.symlink_or_copy(os.path.realpath(data), dest)
url = os.path.join('raw', trackData['label'] + '.bw')
trackData.update({
"urlTemplate": url,
"storeClass": "JBrowse/Store/SeqFeature/BigWig",
"type": "JBrowse/View/Track/Wiggle/Density",
})
trackData['type'] = wiggleOpts['type']
trackData['variance_band'] = True if wiggleOpts['variance_band'] == 'true' else False
if 'min' in wiggleOpts and 'max' in wiggleOpts:
trackData['min_score'] = wiggleOpts['min']
trackData['max_score'] = wiggleOpts['max']
else:
trackData['autoscale'] = wiggleOpts.get('autoscale', 'local')
trackData['scale'] = wiggleOpts['scale']
self._add_track_json(trackData)
def add_bigwig_multiple(self, data, trackData, wiggleOpts, **kwargs):
urls = []
for idx, bw in enumerate(data):
dest = os.path.join('data', 'raw', trackData['label'] + '_' + str(idx) + '.bw')
self.symlink_or_copy(bw[1], dest)
urls.append({"url": os.path.join('raw', trackData['label'] + '_' + str(idx) + '.bw'), "name": str(idx + 1) + ' - ' + bw[0]})
trackData.update({
"urlTemplates": urls,
"showTooltips": "true",
"storeClass": "MultiBigWig/Store/SeqFeature/MultiBigWig",
"type": "MultiBigWig/View/Track/MultiWiggle/MultiDensity",
})
if 'XYPlot' in wiggleOpts['type']:
trackData['type'] = "MultiBigWig/View/Track/MultiWiggle/MultiXYPlot"
trackData['variance_band'] = True if wiggleOpts['variance_band'] == 'true' else False
if 'min' in wiggleOpts and 'max' in wiggleOpts:
trackData['min_score'] = wiggleOpts['min']
trackData['max_score'] = wiggleOpts['max']
else:
trackData['autoscale'] = wiggleOpts.get('autoscale', 'local')
trackData['scale'] = wiggleOpts['scale']
self._add_track_json(trackData)
def add_maf(self, data, trackData, mafOpts, **kwargs):
script = os.path.realpath(os.path.join(self.jbrowse, 'plugins', 'MAFViewer', 'bin', 'maf2bed.pl'))
dest = os.path.join('data', 'raw', trackData['label'] + '.txt')
tmp1 = tempfile.NamedTemporaryFile(delete=False)
tmp1.close()
# Process MAF to bed-like
cmd = [script, data]
self.subprocess_check_call(cmd, output=tmp1.path)
# Sort / Index it
self._sort_bed(tmp1.path, dest)
# Cleanup
try:
os.remove(tmp1.path)
except OSError:
pass
# Construct samples list
# We could get this from galaxy metadata, not sure how easily.
ps = subprocess.Popen(['grep', '^s [^ ]*', '-o', data], stdout=subprocess.PIPE)
output = subprocess.check_output(('sort', '-u'), stdin=ps.stdout)
ps.wait()
samples = [x[2:] for x in output]
trackData.update({
"storeClass": "MAFViewer/Store/SeqFeature/MAFTabix",
"type": "MAFViewer/View/Track/MAF",
"urlTemplate": trackData['label'] + '.txt.gz',
"samples": samples,
})
self._add_track_json(trackData)
def add_bam(self, data, trackData, bamOpts, bam_index=None, **kwargs):
dest = os.path.join('data', 'raw', trackData['label'] + '.bam')
self.symlink_or_copy(os.path.realpath(data), dest)
if bam_index is not None and os.path.exists(os.path.realpath(bam_index)):
# bai most probably made by galaxy and stored in galaxy dirs, need to copy it to dest
self.subprocess_check_call(['cp', os.path.realpath(bam_index), dest + '.bai'])
else:
# Can happen in exotic condition
# e.g. if bam imported as symlink with datatype=unsorted.bam, then datatype changed to bam
# => no index generated by galaxy, but there might be one next to the symlink target
# this trick allows to skip the bam sorting made by galaxy if already done outside
if os.path.exists(os.path.realpath(data) + '.bai'):
self.symlink_or_copy(os.path.realpath(data) + '.bai', dest + '.bai')
else:
log.warn('Could not find a bam index (.bai file) for %s', data)
url = os.path.join('raw', trackData['label'] + '.bam')
trackData.update({
"urlTemplate": url,
"type": "JBrowse/View/Track/Alignments2",
"storeClass": "JBrowse/Store/SeqFeature/BAM",
"chunkSizeLimit": bamOpts.get('chunkSizeLimit', '5000000')
})
# Apollo will only switch to the (prettier) 'bam-read' className if it's not set explicitly in the track config
# So remove the default 'feature' value for these bam tracks
if 'className' in trackData['style'] and trackData['style']['className'] == 'feature':
del trackData['style']['className']
self._add_track_json(trackData)
if bamOpts.get('auto_snp', 'false') == 'true':
trackData2 = copy.copy(trackData)
trackData2.update({
"type": "JBrowse/View/Track/SNPCoverage",
"key": trackData['key'] + " - SNPs/Coverage",
"label": trackData['label'] + "_autosnp",
"chunkSizeLimit": bamOpts.get('chunkSizeLimit', '5000000')
})
self._add_track_json(trackData2)
def add_vcf(self, data, trackData, vcfOpts={}, **kwargs):
dest = os.path.join('data', 'raw', trackData['label'] + '.vcf')
# ln?
cmd = ['ln', '-s', data, dest]
self.subprocess_check_call(cmd)
cmd = ['bgzip', dest]
self.subprocess_check_call(cmd)
cmd = ['tabix', '-p', 'vcf', dest + '.gz']
self.subprocess_check_call(cmd)
url = os.path.join('raw', trackData['label'] + '.vcf.gz')
trackData.update({
"urlTemplate": url,
"type": "JBrowse/View/Track/HTMLVariants",
"storeClass": "JBrowse/Store/SeqFeature/VCFTabix",
})
self._add_track_json(trackData)
def _sort_gff(self, data, dest):
# Only index if not already done
if not os.path.exists(dest):
cmd = "gff3sort.pl --precise '%s' | grep -v \"^$\" > '%s'" % (data, dest)
self.subprocess_popen(cmd)
self.subprocess_check_call(['bgzip', '-f', dest])
self.subprocess_check_call(['tabix', '-f', '-p', 'gff', dest + '.gz'])
def _sort_bed(self, data, dest):
# Only index if not already done
if not os.path.exists(dest):
cmd = ['sort', '-k1,1', '-k2,2n', data]
with open(dest, 'w') as handle:
self.subprocess_check_call(cmd, output=handle)
self.subprocess_check_call(['bgzip', '-f', dest])
self.subprocess_check_call(['tabix', '-f', '-p', 'bed', dest + '.gz'])
def add_gff(self, data, format, trackData, gffOpts, **kwargs):
dest = os.path.join(self.outdir, 'data', 'raw', trackData['label'] + '.gff')
self._sort_gff(data, dest)
url = os.path.join('raw', trackData['label'] + '.gff.gz')
trackData.update({
"urlTemplate": url,
"storeClass": "JBrowse/Store/SeqFeature/GFF3Tabix",
})
if 'match' in gffOpts:
trackData['glyph'] = 'JBrowse/View/FeatureGlyph/Segments'
trackType = 'JBrowse/View/Track/CanvasFeatures'
if 'trackType' in gffOpts:
trackType = gffOpts['trackType']
trackData['type'] = trackType
trackData['trackType'] = trackType # Probably only used by old jbrowse versions
if trackType in ['JBrowse/View/Track/CanvasFeatures', 'NeatCanvasFeatures/View/Track/NeatFeatures']:
if 'transcriptType' in gffOpts and gffOpts['transcriptType']:
trackData['transcriptType'] = gffOpts['transcriptType']
if 'subParts' in gffOpts and gffOpts['subParts']:
trackData['subParts'] = gffOpts['subParts']
if 'impliedUTRs' in gffOpts and gffOpts['impliedUTRs']:
trackData['impliedUTRs'] = gffOpts['impliedUTRs']
elif trackType in ['JBrowse/View/Track/HTMLFeatures', 'NeatHTMLFeatures/View/Track/NeatFeatures']:
if 'topLevelFeatures' in gffOpts and gffOpts['topLevelFeatures']:
trackData['topLevelFeatures'] = gffOpts['topLevelFeatures']
self._add_track_json(trackData)
if gffOpts.get('index', 'false') == 'true':
self.tracksToIndex.append("%s" % trackData['label'])
def add_bed(self, data, format, trackData, gffOpts, **kwargs):
dest = os.path.join(self.outdir, 'data', 'raw', trackData['label'] + '.bed')
self._sort_bed(data, dest)
url = os.path.join('raw', trackData['label'] + '.bed.gz')
trackData.update({
"urlTemplate": url,
"storeClass": "JBrowse/Store/SeqFeature/BEDTabix",
})
if 'match' in gffOpts:
trackData['glyph'] = 'JBrowse/View/FeatureGlyph/Segments'
trackType = gffOpts.get('trackType', 'JBrowse/View/Track/CanvasFeatures')
trackData['type'] = trackType
if trackType in ['JBrowse/View/Track/CanvasFeatures', 'NeatCanvasFeatures/View/Track/NeatFeatures']:
if 'transcriptType' in gffOpts and gffOpts['transcriptType']:
trackData['transcriptType'] = gffOpts['transcriptType']
if 'subParts' in gffOpts and gffOpts['subParts']:
trackData['subParts'] = gffOpts['subParts']
if 'impliedUTRs' in gffOpts and gffOpts['impliedUTRs']:
trackData['impliedUTRs'] = gffOpts['impliedUTRs']
elif trackType in ['JBrowse/View/Track/HTMLFeatures', 'NeatHTMLFeatures/View/Track/NeatFeatures']:
if 'topLevelFeatures' in gffOpts and gffOpts['topLevelFeatures']:
trackData['topLevelFeatures'] = gffOpts['topLevelFeatures']
self._add_track_json(trackData)
if gffOpts.get('index', 'false') == 'true':
self.tracksToIndex.append("%s" % trackData['label'])
def add_genbank(self, data, format, trackData, gffOpts, **kwargs):
cmd = [
'perl', self._jbrowse_bin('flatfile-to-json.pl'),
'--genbank', data,
'--trackLabel', trackData['label'],
'--key', trackData['key']
]
# className in --clientConfig is ignored, it needs to be set with --className
if 'className' in trackData['style']:
cmd += ['--className', trackData['style']['className']]
config = copy.copy(trackData)
clientConfig = trackData['style']
del config['style']
if 'match' in gffOpts:
config['glyph'] = 'JBrowse/View/FeatureGlyph/Segments'
if bool(gffOpts['match']):
# Can be empty for CanvasFeatures = will take all by default
cmd += ['--type', gffOpts['match']]
cmd += ['--clientConfig', json.dumps(clientConfig)]
trackType = 'JBrowse/View/Track/CanvasFeatures'
if 'trackType' in gffOpts:
trackType = gffOpts['trackType']
if trackType == 'JBrowse/View/Track/CanvasFeatures':
if 'transcriptType' in gffOpts and gffOpts['transcriptType']:
config['transcriptType'] = gffOpts['transcriptType']
if 'subParts' in gffOpts and gffOpts['subParts']:
config['subParts'] = gffOpts['subParts']
if 'impliedUTRs' in gffOpts and gffOpts['impliedUTRs']:
config['impliedUTRs'] = gffOpts['impliedUTRs']
elif trackType == 'JBrowse/View/Track/HTMLFeatures':
if 'transcriptType' in gffOpts and gffOpts['transcriptType']:
cmd += ['--type', gffOpts['transcriptType']]
cmd += [
'--trackType', gffOpts['trackType']
]
cmd.extend(['--config', json.dumps(config)])
self.subprocess_check_call(cmd)
if gffOpts.get('index', 'false') == 'true':
self.tracksToIndex.append("%s" % trackData['label'])
def add_rest(self, url, trackData):
data = {
"label": trackData['label'],
"key": trackData['key'],
"category": trackData['category'],
"type": "JBrowse/View/Track/HTMLFeatures",
"storeClass": "JBrowse/Store/SeqFeature/REST",
"baseUrl": url
}
self._add_track_json(data)
def add_sparql(self, url, query, trackData):
data = {
"label": trackData['label'],
"key": trackData['key'],
"category": trackData['category'],
"type": "JBrowse/View/Track/CanvasFeatures",
"storeClass": "JBrowse/Store/SeqFeature/SPARQL",
"urlTemplate": url,
"queryTemplate": query
}
self._add_track_json(data)
def traverse_to_option_parent(self, splitKey, outputTrackConfig):
trackConfigSubDict = outputTrackConfig
for part in splitKey[:-1]:
if trackConfigSubDict.get(part) is None:
trackConfigSubDict[part] = dict()
trackConfigSubDict = trackConfigSubDict[part]
assert isinstance(trackConfigSubDict, dict), 'Config element {} is not a dict'.format(trackConfigSubDict)
return trackConfigSubDict
def get_formatted_option(self, valType2ValDict, mapped_chars):
assert isinstance(valType2ValDict, dict) and len(valType2ValDict.items()) == 1
for valType, value in valType2ValDict.items():
if valType == "text":
for char, mapped_char in mapped_chars.items():
value = value.replace(mapped_char, char)
elif valType == "integer":
value = int(value)
elif valType == "float":
value = float(value)
else: # boolean
value = {'true': True, 'false': False}[value]
return value
def set_custom_track_options(self, customTrackConfig, outputTrackConfig, mapped_chars):
for optKey, optType2ValDict in customTrackConfig.items():
splitKey = optKey.split('.')
trackConfigOptionParent = self.traverse_to_option_parent(splitKey, outputTrackConfig)
optVal = self.get_formatted_option(optType2ValDict, mapped_chars)
trackConfigOptionParent[splitKey[-1]] = optVal
def process_annotations(self, track):
category = track['category'].replace('__pd__date__pd__', TODAY)
outputTrackConfig = {
'style': {
'label': track['style'].get('label', 'description'),
'className': track['style'].get('className', 'feature'),
'description': track['style'].get('description', ''),
},
'overridePlugins': track['style'].get('overridePlugins', False) == 'True',
'overrideDraggable': track['style'].get('overrideDraggable', False) == 'True',
'maxHeight': track['style'].get('maxHeight', '600'),
'category': category,
}
mapped_chars = {
'>': '__gt__',
'<': '__lt__',
"'": '__sq__',
'"': '__dq__',
'[': '__ob__',
']': '__cb__',
'{': '__oc__',
'}': '__cc__',
'@': '__at__',
'#': '__pd__',
"": '__cn__'
}
for i, (dataset_path, dataset_ext, track_human_label, extra_metadata) in enumerate(track['trackfiles']):
# Unsanitize labels (element_identifiers are always sanitized by Galaxy)
for key, value in mapped_chars.items():
track_human_label = track_human_label.replace(value, key)
log.info('Processing %s / %s', category, track_human_label)
outputTrackConfig['key'] = track_human_label
# We add extra data to hash for the case of REST + SPARQL.
if 'conf' in track and 'options' in track['conf'] and 'url' in track['conf']['options']:
rest_url = track['conf']['options']['url']
else:
rest_url = ''
# I chose to use track['category'] instead of 'category' here. This
# is intentional. This way re-running the tool on a different date
# will not generate different hashes and make comparison of outputs
# much simpler.
hashData = [str(dataset_path), track_human_label, track['category'], rest_url]
hashData = '|'.join(hashData).encode('utf-8')
outputTrackConfig['label'] = hashlib.md5(hashData).hexdigest() + '_%s' % i
outputTrackConfig['metadata'] = extra_metadata
# Colour parsing is complex due to different track types having
# different colour options.
colourOptions = self.cs.parse_colours(track['conf']['options'], track['format'], gff3=dataset_path)
# This used to be done with a dict.update() call, however that wiped out any previous style settings...
for key in colourOptions:
if key == 'style':
for subkey in colourOptions['style']:
outputTrackConfig['style'][subkey] = colourOptions['style'][subkey]
else:
outputTrackConfig[key] = colourOptions[key]
if 'menus' in track['conf']['options']:
menus = self.cs.parse_menus(track['conf']['options'])
outputTrackConfig.update(menus)
customTrackConfig = track['conf']['options'].get('custom_config', {})
if customTrackConfig:
self.set_custom_track_options(customTrackConfig, outputTrackConfig, mapped_chars)
# import pprint; pprint.pprint(track)
# import sys; sys.exit()
if dataset_ext in ('gff', 'gff3'):
self.add_gff(dataset_path, dataset_ext, outputTrackConfig,
track['conf']['options']['gff'])
elif dataset_ext in ('bed', ):
self.add_bed(dataset_path, dataset_ext, outputTrackConfig,
track['conf']['options']['gff'])
elif dataset_ext in ('genbank', ):
self.add_genbank(dataset_path, dataset_ext, outputTrackConfig,
track['conf']['options']['gff'])
elif dataset_ext == 'bigwig':
self.add_bigwig(dataset_path, outputTrackConfig,
track['conf']['options']['wiggle'])
elif dataset_ext == 'bigwig_multiple':
self.add_bigwig_multiple(dataset_path, outputTrackConfig,
track['conf']['options']['wiggle'])
elif dataset_ext == 'maf':
self.add_maf(dataset_path, outputTrackConfig,
track['conf']['options']['maf'])
elif dataset_ext == 'bam':
real_indexes = track['conf']['options']['pileup']['bam_indices']['bam_index']
if not isinstance(real_indexes, list):
# <bam_indices>
# <bam_index>/path/to/a.bam.bai</bam_index>
# </bam_indices>
#
# The above will result in the 'bam_index' key containing a
# string. If there are two or more indices, the container
# becomes a list. Fun!
real_indexes = [real_indexes]
self.add_bam(dataset_path, outputTrackConfig,
track['conf']['options']['pileup'],
bam_index=real_indexes[i])
elif dataset_ext == 'blastxml':
self.add_blastxml(dataset_path, outputTrackConfig, track['conf']['options']['blast'])
elif dataset_ext == 'vcf':
self.add_vcf(dataset_path, outputTrackConfig)
elif dataset_ext == 'rest':
self.add_rest(track['conf']['options']['rest']['url'], outputTrackConfig)
elif dataset_ext == 'sparql':
sparql_query = track['conf']['options']['sparql']['query']
for key, value in mapped_chars.items():
sparql_query = sparql_query.replace(value, key)
self.add_sparql(track['conf']['options']['sparql']['url'], sparql_query, outputTrackConfig)
else:
log.warn('Do not know how to handle %s', dataset_ext)
# Return non-human label for use in other fields
yield outputTrackConfig['label']
def add_final_data(self, data):
viz_data = {}
if len(data['visibility']['default_on']) > 0:
viz_data['defaultTracks'] = ','.join(data['visibility']['default_on'])
if len(data['visibility']['always']) > 0:
viz_data['alwaysOnTracks'] = ','.join(data['visibility']['always'])
if len(data['visibility']['force']) > 0:
viz_data['forceTracks'] = ','.join(data['visibility']['force'])
generalData = {}
if data['general']['aboutDescription'] is not None:
generalData['aboutThisBrowser'] = {'description': data['general']['aboutDescription'].strip()}
generalData['view'] = {
'trackPadding': data['general']['trackPadding']
}
generalData['shareLink'] = (data['general']['shareLink'] == 'true')
generalData['show_tracklist'] = (data['general']['show_tracklist'] == 'true')
generalData['show_nav'] = (data['general']['show_nav'] == 'true')
generalData['show_overview'] = (data['general']['show_overview'] == 'true')
generalData['show_menu'] = (data['general']['show_menu'] == 'true')
generalData['hideGenomeOptions'] = (data['general']['hideGenomeOptions'] == 'true')
generalData['plugins'] = data['plugins']
viz_data.update(generalData)
self._add_json(viz_data)
if 'GCContent' in data['plugins_python']:
self._add_track_json({
"storeClass": "JBrowse/Store/Sequence/IndexedFasta",
"type": "GCContent/View/Track/GCContentXY",
"label": "GC Content",
"key": "GCContentXY",
"urlTemplate": "seq/genome.fasta",
"bicolor_pivot": 0.5,
"category": "GC Content",
"metadata": {
"tool_tool": '<a target="_blank" href="https://github.com/elsiklab/gccontent/commit/030180e75a19fad79478d43a67c566ec6">elsiklab/gccontent</a>',
"tool_tool_version": "5c8b0582ecebf9edf684c76af8075fb3d30ec3fa",
"dataset_edam_format": "",
"dataset_size": "",
"history_display_name": "",
"history_user_email": "",
"metadata_dbkey": "",
}
# TODO: Expose params for everyone.
})
self._add_track_json({
"storeClass": "JBrowse/Store/Sequence/IndexedFasta",
"type": "GCContent/View/Track/GCContentXY",
"label": "GC skew",
"key": "GCSkew",
"urlTemplate": "seq/genome.fasta",
"gcMode": "skew",
"min_score": -1,
"bicolor_pivot": 0,
"category": "GC Content",
"metadata": {
"tool_tool": '<a target="_blank" href="https://github.com/elsiklab/gccontent/commit/030180e75a19fad79478d43a67c566ec6">elsiklab/gccontent</a>',
"tool_tool_version": "5c8b0582ecebf9edf684c76af8075fb3d30ec3fa",
"dataset_edam_format": "",
"dataset_size": "",
"history_display_name": "",
"history_user_email": "",
"metadata_dbkey": "",
}
# TODO: Expose params for everyone.
})
if 'ComboTrackSelector' in data['plugins_python']:
with open(os.path.join(self.outdir, 'data', 'trackList.json'), 'r') as handle:
trackListJson = json.load(handle)
trackListJson.update({
"trackSelector": {
"renameFacets": {
"tool_tool": "Tool ID",
"tool_tool_id": "Tool ID",
"tool_tool_version": "Tool Version",
"dataset_edam_format": "EDAM",
"dataset_size": "Size",
"history_display_name": "History Name",
"history_user_email": "Owner",
"metadata_dbkey": "Dbkey",
},
"displayColumns": [
"key",
"tool_tool",
"tool_tool_version",
"dataset_edam_format",
"dataset_size",
"history_display_name",
"history_user_email",
"metadata_dbkey",
],
"type": "Faceted",
"title": ["Galaxy Metadata"],
"icon": "https://galaxyproject.org/images/logos/galaxy-icon-square.png",
"escapeHTMLInData": False
},
"trackMetadata": {
"indexFacets": [
"category",
"key",
"tool_tool_id",
"tool_tool_version",
"dataset_edam_format",
"history_user_email",
"history_display_name"
]
}
})
with open(os.path.join(self.outdir, 'data', 'trackList2.json'), 'w') as handle:
json.dump(trackListJson, handle)
def clone_jbrowse(self, jbrowse_dir, destination, minimal=False):
"""Clone a JBrowse directory into a destination directory.
"""
if minimal:
# Should be the absolute minimum required for JBrowse to function.
interesting = [
'dist', 'img', 'index.html', 'jbrowse.conf', 'jbrowse_conf.json', 'webpack.config.js'
]
for i in interesting:
cmd = ['cp', '-r', os.path.join(jbrowse_dir, i), destination]
self.subprocess_check_call(cmd)
else:
# JBrowse seems to have included some bad symlinks, cp ignores bad symlinks
# unlike copytree
cmd = ['cp', '-r', os.path.join(jbrowse_dir, '.'), destination]
self.subprocess_check_call(cmd)
cmd = ['mkdir', '-p', os.path.join(destination, 'data', 'raw')]
self.subprocess_check_call(cmd)
# http://unix.stackexchange.com/a/38691/22785
# JBrowse releases come with some broken symlinks
cmd = ['find', destination, '-type', 'l', '-xtype', 'l']
symlinks = self.subprocess_check_output(cmd)
for i in symlinks:
try:
os.unlink(i)
except OSError:
pass
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="", epilog="")
parser.add_argument('xml', type=argparse.FileType('r'), help='Track Configuration')
parser.add_argument('--jbrowse', help='Folder containing a jbrowse release')
parser.add_argument('--outdir', help='Output directory', default='out')
parser.add_argument('--standalone', choices=['complete', 'minimal', 'data'], help='Standalone mode includes a copy of JBrowse')
parser.add_argument('--version', '-V', action='version', version="%(prog)s 0.8.0")
args = parser.parse_args()
tree = ET.parse(args.xml.name)
root = tree.getroot()
# This should be done ASAP
GALAXY_INFRASTRUCTURE_URL = root.find('metadata/galaxyUrl').text
# Sometimes this comes as `localhost` without a protocol
if not GALAXY_INFRASTRUCTURE_URL.startswith('http'):
# so we'll prepend `http://` and hope for the best. Requests *should*
# be GET and not POST so it should redirect OK
GALAXY_INFRASTRUCTURE_URL = 'http://' + GALAXY_INFRASTRUCTURE_URL
jc = JbrowseConnector(
jbrowse=args.jbrowse,
outdir=args.outdir,
genomes=[
{
'path': os.path.realpath(x.attrib['path']),
'meta': metadata_from_node(x.find('metadata'))
}
for x in root.findall('metadata/genomes/genome')
],
standalone=args.standalone,
gencode=root.find('metadata/gencode').text
)
extra_data = {
'visibility': {
'default_on': [],
'default_off': [],
'force': [],
'always': [],
},
'general': {
'defaultLocation': root.find('metadata/general/defaultLocation').text,
'trackPadding': int(root.find('metadata/general/trackPadding').text),
'shareLink': root.find('metadata/general/shareLink').text,
'aboutDescription': root.find('metadata/general/aboutDescription').text,
'show_tracklist': root.find('metadata/general/show_tracklist').text,
'show_nav': root.find('metadata/general/show_nav').text,
'show_overview': root.find('metadata/general/show_overview').text,
'show_menu': root.find('metadata/general/show_menu').text,
'hideGenomeOptions': root.find('metadata/general/hideGenomeOptions').text,
},
'plugins': [],
'plugins_python': [],
}
plugins = root.find('plugins').attrib
if plugins['GCContent'] == 'True':
extra_data['plugins_python'].append('GCContent')
extra_data['plugins'].append({
'location': 'https://cdn.jsdelivr.net/gh/elsiklab/gccontent@5c8b0582ecebf9edf684c76af8075fb3d30ec3fa/',
'name': 'GCContent'
})
# Not needed in 1.16.1: it's built in the conda package now, and this plugin doesn't need to be enabled anywhere
# if plugins['Bookmarks'] == 'True':
# extra_data['plugins'].append({
# 'location': 'https://cdn.jsdelivr.net/gh/TAMU-CPT/bookmarks-jbrowse@5242694120274c86e1ccd5cb0e5e943e78f82393/',
# 'name': 'Bookmarks'
# })
# Not needed in 1.16.1: it's built in the conda package now, and this plugin doesn't need to be enabled anywhere
if plugins['ComboTrackSelector'] == 'True':
extra_data['plugins_python'].append('ComboTrackSelector')
# Not needed in 1.16.1: it's built in the conda package now, and this plugin doesn't need to be enabled anywhere
# extra_data['plugins'].append({
# 'location': 'https://cdn.jsdelivr.net/gh/Arabidopsis-Information-Portal/ComboTrackSelector@52403928d5ccbe2e3a86b0fa5eb8e61c0f2e2f57/',
# 'icon': 'https://galaxyproject.org/images/logos/galaxy-icon-square.png',
# 'name': 'ComboTrackSelector'
# })
if plugins['theme'] == 'Minimalist':
extra_data['plugins'].append({
'location': 'https://cdn.jsdelivr.net/gh/erasche/jbrowse-minimalist-theme@d698718442da306cf87f033c72ddb745f3077775/',
'name': 'MinimalistTheme'
})
elif plugins['theme'] == 'Dark':
extra_data['plugins'].append({
'location': 'https://cdn.jsdelivr.net/gh/erasche/jbrowse-dark-theme@689eceb7e33bbc1b9b15518d45a5a79b2e5d0a26/',
'name': 'DarkTheme'
})
if plugins['BlastView'] == 'True':
extra_data['plugins_python'].append('BlastView')
extra_data['plugins'].append({
'location': 'https://cdn.jsdelivr.net/gh/TAMU-CPT/blastview@97572a21b7f011c2b4d9a0b5af40e292d694cbef/',
'name': 'BlastView'
})
for track in root.findall('tracks/track'):
track_conf = {}
track_conf['trackfiles'] = []
is_multi_bigwig = False
try:
if track.find('options/wiggle/multibigwig') and (track.find('options/wiggle/multibigwig').text == 'True'):
is_multi_bigwig = True
multi_bigwig_paths = []
except KeyError:
pass
trackfiles = track.findall('files/trackFile')
if trackfiles:
for x in track.findall('files/trackFile'):
if is_multi_bigwig:
multi_bigwig_paths.append((x.attrib['label'], os.path.realpath(x.attrib['path'])))
else:
if trackfiles:
metadata = metadata_from_node(x.find('metadata'))
track_conf['trackfiles'].append((
os.path.realpath(x.attrib['path']),
x.attrib['ext'],
x.attrib['label'],
metadata
))
else:
# For tracks without files (rest, sparql)
track_conf['trackfiles'].append((
'', # N/A, no path for rest or sparql
track.attrib['format'],
track.find('options/label').text,
{}
))
if is_multi_bigwig:
metadata = metadata_from_node(x.find('metadata'))
track_conf['trackfiles'].append((
multi_bigwig_paths, # Passing an array of paths to represent as one track
'bigwig_multiple',
'MultiBigWig', # Giving an hardcoded name for now
{} # No metadata for multiple bigwig
))
track_conf['category'] = track.attrib['cat']
track_conf['format'] = track.attrib['format']
try:
# Only pertains to gff3 + blastxml. TODO?
track_conf['style'] = {t.tag: t.text for t in track.find('options/style')}
except TypeError:
track_conf['style'] = {}
pass
track_conf['conf'] = etree_to_dict(track.find('options'))
keys = jc.process_annotations(track_conf)
for key in keys:
extra_data['visibility'][track.attrib.get('visibility', 'default_off')].append(key)
jc.add_final_data(extra_data)
jc.generate_names()
| |
"""Module to implement ORM to the ome database"""
from types import MethodType
from os import system
from sqlalchemy.orm import sessionmaker, relationship, aliased
from sqlalchemy.orm.session import Session as _SA_Session
from sqlalchemy import Table, MetaData, create_engine,Column, Integer, \
String, Float, ForeignKey, and_, or_, not_, distinct, select
from sqlalchemy.schema import UniqueConstraint
from sqlalchemy.ext.declarative import declarative_base
from om import settings
import pymongo
engine = create_engine("postgresql://%s:%s@%s/%s" %
(settings.postgres_user, settings.postgres_password, settings.postgres_host, settings.postgres_database))
Base = declarative_base(bind=engine)
metadata = MetaData(bind=engine)
connection = pymongo.Connection()
omics_database = connection.omics_database
class Genome(Base):
__tablename__ = 'genome'
id = Column(Integer, primary_key=True, autoincrement=True)
genbank_id = Column(String(200))
ncbi_id = Column(String(100))
organism = Column(String(200))
__table_args__ = (UniqueConstraint('genbank_id'),{})
def __repr__(self):
return "Genome (#%d): %s, %s, %s" % \
(self.id, self.organism, self.genbank_id, self.ncbi_id)
def __init__(self, genbank_id, ncbi_id, organism):
self.genbank_id = genbank_id
self.ncbi_id = ncbi_id
self.organism = organism
class GenomeRegion(Base):
__tablename__ = 'genome_region'
id = Column(Integer, primary_key=True, autoincrement=True)
genome_id = Column(Integer, ForeignKey('genome.id'))
name = Column(String(15))
leftpos = Column(Integer, nullable=False)
rightpos = Column(Integer, nullable=False)
strand = Column(String(1), nullable=False)
type = Column(String(20))
__table_args__ = (UniqueConstraint('leftpos','rightpos','strand', 'genome_id'),{})
__mapper_args__ = {'polymorphic_identity': 'genome_region',
'polymorphic_on': type
}
def __repr__(self):
return "GenomeRegion: %d-%d (%s)" % \
(self.leftpos, self.rightpos, self.strand)
def __repr__dict__(self):
return {"name":self.name,"id":self.id,"leftpos":self.leftpos,"rightpos":self.rightpos,"strand":self.strand}
def __init__(self, leftpos, rightpos, strand, genome_id, name=None):
self.leftpos = leftpos
self.rightpos = rightpos
self.strand = strand
self.genome_id = genome_id
self.name = name
class Component(Base):
__tablename__ = 'component'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(100))
type = Column(String(20))
__table_args__ = (UniqueConstraint('name'),{})
__mapper_args__ = {'polymorphic_identity': 'component',
'polymorphic_on': type
}
def __init__(self, name):
self.name = name
def __repr__(self):
return "Component (#%d): %s" % \
(self.id, self.name)
class Reaction(Base):
__tablename__ = 'reaction'
id = Column(Integer, primary_key=True, autoincrement=True)
abbreviation = Column(String(10))
name = Column(String(100))
long_name = Column(String(100))
type = Column(String(20))
__table_args__ = (UniqueConstraint('name'),{})
__mapper_args__ = {'polymorphic_identity': 'reaction',
'polymorphic_on': type
}
def __init__(self, name):
self.name = name
def __repr__(self):
return "Reaction (#%d): %s" % \
(self.id, self.name)
class DataSource(Base):
__tablename__ = 'data_source'
id = Column(Integer, primary_key=True)
name = Column(String(100))
lab = Column(String(100))
institution = Column(String(100))
#data_sets = relationship("DataSet")
__table_args__ = (UniqueConstraint('name'),{})
def __repr__(self):
return "Data Source %s (#%d)" % (self.name, self.id)
def __repr__dict__(self):
return {"name":self.name,"wid":self.id,"values":{"lab":self.lab,"institution":self.institution}}
def __repr__json__(self):
return json.dumps(self.__repr__dict__())
def __init__(self, name, lab=None, institution=None):
self.name = name
self.lab = lab
self.institution = institution
class id2otherid(Base):
__tablename__ = "id2otherid"
id = Column(Integer, primary_key=True)
other_id = Column(String(100), primary_key=True)
id_data_source_id = Column(Integer, ForeignKey('data_source.id'))
other_id_data_source_id = Column(Integer, ForeignKey('data_source.id'))
id_data_source = relationship("DataSource", primaryjoin = id_data_source_id == DataSource.id)
other_id_data_source = relationship("DataSource", primaryjoin = other_id_data_source_id == DataSource.id)
__table_args__ = (UniqueConstraint('id','other_id'),{})
def __repr__(self):
return "%s in (%s)" % (self.other_id, str(self.other_id_data_source))
def __init__(self, id, other_id, id_data_source_id, other_id_data_source_id):
self.id = id
self.other_id = other_id
self.id_data_source_id = id_data_source_id
self.other_id_data_source_id = other_id_data_source_id
class GenomeRegionMap(Base):
__tablename__ = 'genome_region_map'
genome_region_id_1 = Column(Integer, ForeignKey('genome_region.id'), primary_key=True)
genome_region_id_2 = Column(Integer, ForeignKey('genome_region.id'), primary_key=True)
distance = Column(Integer)
__table_args__ = (UniqueConstraint('genome_region_id_1','genome_region_id_2'),{})
def __repr__(self):
return "GenomeRegionMap (%d <--> %d) distance:%d" % (self.genome_region_id_1, self.genome_region_id_2, self.distance)
def __init__(self, genome_region_id_1, genome_region_id_2, distance):
self.genome_region_id_1 = genome_region_id_1
self.genome_region_id_2 = genome_region_id_2
self.distance = distance
class _Session(_SA_Session):
"""an sqlalchemy session object to interact with the OME database
This object can used to make queries against the ome database. For
example, a query without using any ORM looks like this
>>> session = Session()
>>> session.execute("SELECT name from genes where bnum='b0001'").fetchone()
(u'thrL',)
Using the sqlalchemy ORM gives more descriptive objects. For example:
>>> b0001 = session.query(Gene).filter(Gene.bnum=="b0001").first()
>>> b0001.name
u'thrL'
Raw queries which return ORM objects are also possible:
>>> sql_statement = "SELECT * from genes where bnum='b0001'"
>>> b0001 = session.query(Gene).from_statement(sql_statement).first()
>>> b0001.name
u'thrL'
The Session will automatically set the search_path to settings.schema
"""
def __init__(self, *args, **kwargs):
super(_Session, self).__init__(*args, **kwargs)
#self.execute("set search_path to %s;" % (settings.schema))
self.commit()
self.get_or_create = MethodType(get_or_create, self)
#self.search_by_synonym = MethodType(search_by_synonym, self)
def __repr__(self):
return "OME session %d" % (self.__hash__())
def get_or_create(session, class_type, **kwargs):
"""gets an object using filter_by on the unique kwargs. If no such object
is found in the database, a new one will be created which satisfies
these constraints. This is why every class that wants to use this
method to be instantiated needs to have a UniqueConstraint defined.
"""
for constraint in list(class_type.__table_args__):
if constraint.__class__.__name__ == 'UniqueConstraint':
unique_cols = constraint.columns.keys()
inherited_result = True
if '__mapper_args__' in class_type.__dict__ and 'inherits' in class_type.__mapper_args__:
inherited_class_type = class_type.__mapper_args__['inherits']
for constraint in list(inherited_class_type.__table_args__):
if constraint.__class__.__name__ == 'UniqueConstraint':
inherited_unique_cols = constraint.columns.keys()
try: inherited_result = session.query(inherited_class_type).filter_by(**{k: kwargs[k] for k in inherited_unique_cols}).first()
except: None
result = session.query(class_type).filter_by(**{k: kwargs[k] for k in unique_cols}).first()
if not result or not inherited_result:
result = class_type(**kwargs)
session.add(result)
session.commit()
return result
def update(session, object, **kwargs):
"""Ideally this would only search on the primary key columns so
that an update could be made in one call. However, its not currently
clear how to do that so necessary to pass in the actual object and
update following a call to get_or_create() There is probably some
way to do this with class_mapper but its hard right now
"""
#result = session.query(class_type).filter_by(**kwargs).first()
#result = session.query(class_type).filter_by(name=kwargs['name']).first()
#if result is None: return
for key,value in kwargs.iteritems():
setattr(object,key,value)
session.add(object)
session.commit()
return object
Session = sessionmaker(bind=engine, class_=_Session)
if __name__ == "__main__":
session = Session()
| |
from __future__ import absolute_import
import numpy as np
import re
from scipy import ndimage
from scipy import linalg
from os import listdir
from os.path import isfile, join
import random
import math
from six.moves import range
import threading
'''Fairly basic set of tools for realtime data augmentation on image data.
Can easily be extended to include new transformations, new preprocessing methods, etc...
'''
def random_rotation(x, rg, fill_mode="nearest", cval=0.):
angle = random.uniform(-rg, rg)
x = ndimage.interpolation.rotate(x, angle,
axes=(1, 2),
reshape=False,
mode=fill_mode,
cval=cval)
return x
def random_shift(x, wrg, hrg, fill_mode="nearest", cval=0.):
shift_x = shift_y = 0
if wrg:
shift_x = random.uniform(-wrg, wrg) * x.shape[2]
if hrg:
shift_y = random.uniform(-hrg, hrg) * x.shape[1]
x = ndimage.interpolation.shift(x, (0, shift_y, shift_x),
order=0,
mode=fill_mode,
cval=cval)
return x
def horizontal_flip(x):
for i in range(x.shape[0]):
x[i] = np.fliplr(x[i])
return x
def vertical_flip(x):
for i in range(x.shape[0]):
x[i] = np.flipud(x[i])
return x
def random_barrel_transform(x, intensity):
# TODO
pass
def random_shear(x, intensity, fill_mode="nearest", cval=0.):
shear = random.uniform(-intensity, intensity)
shear_matrix = np.array([[1.0, -math.sin(shear), 0.0],
[0.0, math.cos(shear), 0.0],
[0.0, 0.0, 1.0]])
x = ndimage.interpolation.affine_transform(x, shear_matrix,
mode=fill_mode,
order=3,
cval=cval)
return x
def random_channel_shift(x, rg):
# TODO
pass
def random_zoom(x, rg, fill_mode="nearest", cval=0.):
zoom_w = random.uniform(1.-rg, 1.)
zoom_h = random.uniform(1.-rg, 1.)
x = ndimage.interpolation.zoom(x, zoom=(1., zoom_w, zoom_h),
mode=fill_mode,
cval=cval)
return x # shape of result will be different from shape of input!
def array_to_img(x, scale=True):
from PIL import Image
x = x.transpose(1, 2, 0)
if scale:
x += max(-np.min(x), 0)
x /= np.max(x)
x *= 255
if x.shape[2] == 3:
# RGB
return Image.fromarray(x.astype("uint8"), "RGB")
else:
# grayscale
return Image.fromarray(x[:, :, 0].astype("uint8"), "L")
def img_to_array(img):
x = np.asarray(img, dtype='float32')
if len(x.shape) == 3:
# RGB: height, width, channel -> channel, height, width
x = x.transpose(2, 0, 1)
else:
# grayscale: height, width -> channel, height, width
x = x.reshape((1, x.shape[0], x.shape[1]))
return x
def load_img(path, grayscale=False):
from PIL import Image
img = Image.open(path)
if grayscale:
img = img.convert('L')
else: # Ensure 3 channel even when loaded image is grayscale
img = img.convert('RGB')
return img
def list_pictures(directory, ext='jpg|jpeg|bmp|png'):
return [join(directory, f) for f in listdir(directory)
if isfile(join(directory, f)) and re.match('([\w]+\.(?:' + ext + '))', f)]
class ImageDataGenerator(object):
'''Generate minibatches with
realtime data augmentation.
'''
def __init__(self,
featurewise_center=True, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=True, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=0., # degrees (0 to 180)
width_shift_range=0., # fraction of total width
height_shift_range=0., # fraction of total height
shear_range=0., # shear intensity (shear angle in radians)
horizontal_flip=False,
vertical_flip=False):
self.__dict__.update(locals())
self.mean = None
self.std = None
self.principal_components = None
self.lock = threading.Lock()
def _flow_index(self, N, batch_size=32, shuffle=False, seed=None):
b = 0
total_b = 0
while 1:
if b == 0:
if seed is not None:
np.random.seed(seed + total_b)
if shuffle:
index_array = np.random.permutation(N)
else:
index_array = np.arange(N)
current_index = (b * batch_size) % N
if N >= current_index + batch_size:
current_batch_size = batch_size
else:
current_batch_size = N - current_index
if current_batch_size == batch_size:
b += 1
else:
b = 0
total_b += 1
yield index_array[current_index: current_index + current_batch_size], current_index, current_batch_size
def flow(self, X, y, batch_size=32, shuffle=False, seed=None,
save_to_dir=None, save_prefix="", save_format="jpeg"):
assert len(X) == len(y)
self.X = X
self.y = y
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
self.flow_generator = self._flow_index(X.shape[0], batch_size, shuffle, seed)
return self
def __iter__(self):
# needed if we want to do something like for x, y in data_gen.flow(...):
return self
def next(self):
# for python 2.x
# Keep under lock only the mechainsem which advance the indexing of each batch
# see # http://anandology.com/blog/using-iterators-and-generators/
with self.lock:
index_array, current_index, current_batch_size = next(self.flow_generator)
# The transformation of images is not under thread lock so it can be done in parallel
bX = np.zeros(tuple([current_batch_size] + list(self.X.shape)[1:]))
for i, j in enumerate(index_array):
x = self.X[j]
x = self.random_transform(x.astype("float32"))
x = self.standardize(x)
bX[i] = x
if self.save_to_dir:
for i in range(current_batch_size):
img = array_to_img(bX[i], scale=True)
img.save(self.save_to_dir + "/" + self.save_prefix + "_" + str(current_index + i) + "." + self.save_format)
bY = self.y[index_array]
return bX, bY
def __next__(self):
# for python 3.x
return self.next()
def standardize(self, x):
if self.featurewise_center:
x -= self.mean
if self.featurewise_std_normalization:
x /= (self.std + 1e-7)
if self.zca_whitening:
flatx = np.reshape(x, (x.shape[0]*x.shape[1]*x.shape[2]))
whitex = np.dot(flatx, self.principal_components)
x = np.reshape(whitex, (x.shape[0], x.shape[1], x.shape[2]))
if self.samplewise_center:
x -= np.mean(x, axis=1, keepdims=True)
if self.samplewise_std_normalization:
x /= (np.std(x, axis=1, keepdims=True) + 1e-7)
return x
def random_transform(self, x):
if self.rotation_range:
x = random_rotation(x, self.rotation_range)
if self.width_shift_range or self.height_shift_range:
x = random_shift(x, self.width_shift_range, self.height_shift_range)
if self.horizontal_flip:
if random.random() < 0.5:
x = horizontal_flip(x)
if self.vertical_flip:
if random.random() < 0.5:
x = vertical_flip(x)
if self.shear_range:
x = random_shear(x, self.shear_range)
# TODO:
# zoom
# barrel/fisheye
# channel shifting
return x
def fit(self, X,
augment=False, # fit on randomly augmented samples
rounds=1, # if augment, how many augmentation passes over the data do we use
seed=None):
'''Required for featurewise_center, featurewise_std_normalization and zca_whitening.
'''
X = np.copy(X)
if augment:
aX = np.zeros(tuple([rounds*X.shape[0]]+list(X.shape)[1:]))
for r in range(rounds):
for i in range(X.shape[0]):
img = array_to_img(X[i])
img = self.random_transform(img)
aX[i+r*X.shape[0]] = img_to_array(img)
X = aX
if self.featurewise_center:
self.mean = np.mean(X, axis=0)
X -= self.mean
if self.featurewise_std_normalization:
self.std = np.std(X, axis=0)
X /= (self.std + 1e-7)
if self.zca_whitening:
flatX = np.reshape(X, (X.shape[0], X.shape[1]*X.shape[2]*X.shape[3]))
sigma = np.dot(flatX.T, flatX) / flatX.shape[1]
U, S, V = linalg.svd(sigma)
self.principal_components = np.dot(np.dot(U, np.diag(1. / np.sqrt(S + 10e-7))), U.T)
class GraphImageDataGenerator(ImageDataGenerator):
'''Example of how to build a generator for a Graph model
'''
def next(self):
bX, bY = super(GraphImageDataGenerator, self).next()
return {'input': bX, 'output': bY}
| |
# Copyright 2010 http://www.collabq.com
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import hmac
import logging
import math
import random
import re
import sys
import time
import urllib
from django import http
from django.conf import settings
from django.utils import safestring
from administration import models
from common import clean
from common import exception
from beautifulsoup.BeautifulSoup import BeautifulStoneSoup
try:
import uuid
_generate_uuid = lambda: uuid.uuid4().hex
except ImportError:
logging.info("No uuid module, using fake")
_generate_uuid = lambda: str(random.randint(10000000, 20000000))
try:
import hashlib
_hash = lambda k, m: hmac.new(k, m, hashlib.sha1).hexdigest()
sha1 = lambda k: hashlib.sha1(k).hexdigest()
except ImportError:
import sha
logging.info("No hashlib module, using sha1")
_hash = lambda k, m: sha.new(k + m).hexdigest()
sha1 = lambda k: sha.new(k).hexdigest()
VALID_METHODS = ('GET', 'HEAD', 'POST', 'PUT', 'DELETE')
DEFAULT_AVATAR_PATH = 'avatar_default'
def add_caching_headers(response, headers):
# already has caching headers set
if response.has_header('Cache-control'):
return response
# this is a redirect or an error
if response.status_code != 200:
return response
for k, v in headers.iteritems():
response[k] = v
return response
CACHE_NEVER_HEADERS = {'Cache-control': 'no-cache, must-revalidate',
'Pragma': 'no-cache',
'Expires': 'Fri, 01 Jan 1990 00:00:00 GMT',
}
a_bit_less_than_one_year_from_when_this_file_was_loaded = (
datetime.datetime.now() + datetime.timedelta(days=364)
).strftime('%a, %d %b %Y %H:%M:%S GMT')
CACHE_FOREVER_HEADERS = {
'Expires': a_bit_less_than_one_year_from_when_this_file_was_loaded,
'Cache-control': 'public, max-age=%d' % (86400*364)
}
def HttpRssResponse(content, request):
response = http.HttpResponse(content)
response['Content-type'] = 'application/rss+xml; charset=utf-8'
return response
def HttpAtomResponse(content, request):
response = http.HttpResponse(content)
response['Content-type'] = 'application/atom+xml; charset=utf-8'
return response
def HttpJsonResponse(content, request):
response = http.HttpResponse(content)
response['Content-type'] = 'text/javascript; charset=utf-8'
return response
def hash_password(nick, password):
return sha1(password)
def hash_password_intermediate(nick, password):
return _hash(hash_salt() + nick, password)
def domain(request):
domain = request.META['wsgi.url_scheme']+"://"+request.META['SERVER_NAME']
if request.META['SERVER_PORT'] != '80':
domain += ":%s" % request.META['SERVER_PORT']
return domain
def here(request):
base = domain(request)
url = base + request.META['PATH_INFO']
return url
def hash_salt():
return settings.SECRET_KEY;
def hash_generic(value):
value = clean.encoding.smart_str(value)
return _hash(hash_salt(), value)
def generate_uuid():
return _generate_uuid()
def generate_password():
"""Create a password for the user (to change)."""
return hash_generic(generate_uuid())[:8]
def create_nonce(user, action, offset=0):
if not user:
nick = ""
else:
try:
nick = user.nick
except AttributeError:
if settings.MANAGE_PY:
# extra case to make testing easier
nick = clean.nick(user)
else:
raise
i = math.ceil(time.time() / 43200)
i += offset
nonce = hash_generic(str(i) + action + nick)
return nonce[-12:-2]
def safe(f):
def _wrapper(value, arg=None):
rv = f(value, arg)
return safestring.mark_safe(rv)
#_wrapper.func_name = f.func_name
_wrapper.__name__ = f.__name__
return _wrapper
def get_boolcheckbox_value(value):
if value:
return 1
return 0
def get_redirect_to(request, default=None):
redirect_to = request.REQUEST.get('redirect_to', default)
if redirect_to is None:
# TODO make this domain aware
redirect_to = request.META.get('PATH_INFO')
return redirect_to
def RedirectFlash(url, message):
url = qsa(url,
params={'flash': message,
'_flash': create_nonce(None, message)
}
)
return http.HttpResponseRedirect(url)
def RedirectError(message):
url = qsa('http://%s/error' % settings.DOMAIN,
params={'error': message,
'_error': create_nonce(None, message)
}
)
return http.HttpResponseRedirect(url)
def query_dict_to_keywords(query_dict):
if settings.DEBUG:
# support for profiling, pretend profiling stuff doesn't exist
[logging.info("Key: %s -- Value: %s" % (k,v)) for k, v in query_dict.items() if not k.startswith('_prof')]
return dict([(str(k), v) for k, v in query_dict.items() if not k.startswith('_prof')])
return dict([(str(k), v) for k, v in query_dict.items()])
def href_to_queryparam_dict(href):
ret = {}
qparamstr_parts = href.split('?')
if len(qparamstr_parts) > 1:
qparamstr = qparamstr_parts[1]
for qparam in qparamstr.split('&'):
keyvalue = [urllib.unquote(kv) for kv in qparam.split('=')]
ret[keyvalue[0]] = keyvalue[1]
return ret
def email_domain(s):
"""Returns the domain part of an email address."""
return s.split('@')[-1]
def is_remote(s):
# XXX termie: this should look up something in a list of local domains
return s.split('@')[-1] != settings.NS_DOMAIN
def is_channel_nick(nick):
return nick.startswith("#")
def get_user_from_topic(s):
"""Extracts the username from a topic or Stream object.
Topics look like: 'stream/bar@example.com/comments'
Returns:
A string, the username, or None if the topic name didn't appear to contain a
valid userid.
"""
o = None
# Check whether we got a topic name or a Stream instance
if not (isinstance(s, str) or isinstance(s, unicode)):
s = s.key().name()
list = s.split('/')
try:
email = list[1]
if '@' in email:
o = email
except IndexError: # No '/' in s.
pass
return o
def qsa(url, params):
# TODO termie make better
sep = "?"
if sep in url:
sep = "&"
url = url + sep + urllib.urlencode(params)
return url
def datetime_to_timestamp(dt):
return time.mktime(dt.utctimetuple())
def page_offset(request):
"""attempts to normalize timestamps into datetimes for offsets"""
offset = request.GET.get('offset', None)
if offset:
try:
offset = datetime.datetime.fromtimestamp(float(offset))
except (TypeError, ValueError):
offset = None
return offset, (offset and True or False)
def page_offset_nick(request):
offset = request.GET.get('offset', None)
return offset, (offset and True or False)
def paging_get_page(request):
try:
page = int(request.GET.get('page', 1))
except ValueError:
page = 1
if page <= 0:
page = 1
return page
def paging_get_offset(page=1, limit=20):
return (page-1)*limit
def paging_filter(request):
filter = request.GET.get('filter', 'all')
if(filter != 'all' and filter != 'member'):
filter = 'all'
return filter
def paging_get_results(results, page, limit):
offset = (page-1)*limit
results = results[offset:(offset+limit)]
return results
def paging_url(filter, nick, owner_nick):
if filter == 'member':
if nick == owner_nick:
url = 'filter=%s' % filter
else:
url = 'filter=%s&owner=%s' % (nick, owner_nick)
return url
return 'filter=all'
def paging_type(request):
filter = request.GET.get('type', None)
return filter
def get_owner(request):
owner = request.GET.get('owner', request.user.nick)
nick = clean.nick(owner)
return nick
def paging(page, per_page, size):
start = (page - 1)*per_page+1
end = page*per_page
if(end>size):
end = size
next = page+1
prev = page-1
first = 1
if(size%per_page == 0):
last = int(size/per_page)
else:
last = int(size/per_page)+1
return start, end, next, prev, first, last
def page_entries(request, entries, per_page):
if len(entries) > per_page > 0:
more = datetime_to_timestamp(entries[-2].created_at)
return entries[:-1], more
return entries, None
def page_actors(request, actors, per_page):
""" attempts to break a result into pages
if the number of actors is greater than per_page hand over the nick
of the second-to-last actor to use as an offset.
the length of actors should never be more than per_page + 1
"""
if len(actors) > per_page:
more = actors[-2].nick
return actors[:-1], more
return actors, None
def display_nick(nick):
# TODO(teemu): combine nick functionality from models.py with this
return nick.split("@")[0]
def url_nick(nick):
short = nick.split("@")[0]
if re.match('^#', short):
return short[1:]
return short
def BREAKPOINT():
import pdb
p = pdb.Pdb(None, sys.__stdin__, sys.__stdout__)
p.set_trace()
def htmlentities_decode(str):
return unicode(BeautifulStoneSoup(str,convertEntities=BeautifulStoneSoup.HTML_ENTITIES ))
def set_metadata(name, value, order=0, extra={}):
metadata_ref = get_metadata_ref(name)
if not metadata_ref:
metadata_ref = models.Metadata(name=name, value=value, order=order, extra=extra)
else:
metadata_ref.value = value
metadata_ref.put()
return metadata_ref
def get_metadata_ref(name):
query = models.Metadata.gql("WHERE name=:1", name)
metadata_ref = query.get()
if metadata_ref:
return metadata_ref
return None
def get_metadata(name, default=None):
metadata_ref = get_metadata_ref(name)
if metadata_ref:
value = metadata_ref.get_value()
return value
if default is None:
try:
default = getattr(settings, name)
except AttributeError:
logging.warning("AttributeError, %s is not in settings" % name)
return default
def get_metadata_list(name):
data = models.Metadata.all().filter("name", name).order('order')
return data
| |
'''
Module Name : Command-line Output Draw
Author : Rungsimun Saenprasert
Date : 06/07/2014 17:25
Special Thanks: Chotipat Pornavalai for creating pre-programing program.
Naoki Meida for teach and assist.
Sittipong Suwantri for new algorithm and new idea.
Sorrawut Kittikeereechaikun for function line algorithm.
'''
CH = { ' ' : [" ",
" ",
" ",
" ",
" ",
" ",
" "],
'A' : [" * ",
" * * ",
"* *",
"*****",
"* *",
"* *",
"* *"],
'B' : ["**** ",
"* *",
"* *",
"**** ",
"* *",
"* *",
"**** "],
'C' : [" *** ",
"* *",
"* ",
"* ",
"* ",
"* *",
" *** "],
'D' : ["**** ",
"* *",
"* *",
"* *",
"* *",
"* *",
"**** "],
'E' : ["*****",
"* ",
"* ",
"*****",
"* ",
"* ",
"*****"],
'F' : ["*****",
"* ",
"* ",
"*****",
"* ",
"* ",
"* "],
'G' : ["*****",
"* ",
"* ",
"* **",
"* *",
"* *",
"*****"],
'H' : ["* *",
"* *",
"* *",
"*****",
"* *",
"* *",
"* *"],
'I' : ["*****",
" * ",
" * ",
" * ",
" * ",
" * ",
"*****"],
'J' : [" ***",
" *",
" *",
" *",
" *",
"* *",
" *** "],
'K' : ["* *",
"* * ",
"* * ",
"** ",
"* * ",
"* * ",
"* *"],
'L' : ["* ",
"* ",
"* ",
"* ",
"* ",
"* ",
"*****"],
'M' : ["** **",
"* * *",
"* * * ",
"* * * ",
"* * *",
"* * *",
"* *"],
'N' : ["* *",
"** *",
"* * *",
"* * *",
"* * *",
"* **",
"* *"],
'O' : ["*****",
"* *",
"* *",
"* *",
"* *",
"* *",
"*****"],
'P' : ["*****",
"* *",
"* *",
"*****",
"* ",
"* ",
"* "],
'Q' : ["*****",
"* *",
"* *",
"*****",
" *",
" *",
" *"],
'R' : ["**** ",
"* *",
"* *",
"**** ",
"* * ",
"* * ",
"* *"],
'S' : ["*****",
"* ",
"* ",
"*****",
" *",
" *",
"*****"],
'T' : ["*****",
" * ",
" * ",
" * ",
" * ",
" * ",
" * "],
'U' : ["* *",
"* *",
"* *",
"* *",
"* *",
"* *",
"*****"],
'V' : ["* *",
"* *",
"* *",
"* *",
"* *",
" * * ",
" * "],
'W' : ["* *",
"* * *",
"* * *",
"* * *",
"* * *",
"* * *",
" * * "],
'X' : ["* *",
"* *",
" * *",
" * ",
" * * ",
"* *",
"* *"],
'Y' : ["* *",
"* *",
"* *",
" * * ",
" * ",
" * ",
" * "],
'Z' : ["*****",
" *",
" * ",
" * ",
" * ",
"* ",
"*****"],
'Z' : ["*****",
" *",
" * ",
" * ",
" * ",
"* ",
"*****"],
'0' : [" *** ",
"* *",
"** *",
"* * *",
"* **",
"* *",
" *** "],
'1' : [" ** ",
" * ",
" * ",
" * ",
" * ",
" * ",
" *** "],
'2' : ["**** ",
" *",
" *",
" *** ",
"* ",
"* ",
" ****"],
'3' : ["**** ",
" *",
" *",
"**** ",
" *",
" *",
"**** "],
'4' : [" * ",
" ** ",
" * * ",
"* * ",
"*****",
" * ",
" * "],
'5' : [" ****",
"* ",
"* ",
" *** ",
" *",
" *",
"**** "],
'6' : [" *** ",
"* *",
"* ",
"**** ",
"* *",
"* *",
" *** "],
'7' : ["*****",
" *",
" * ",
" * ",
" * ",
" * ",
" * "],
'8' : [" *** ",
"* *",
"* *",
" *** ",
"* *",
"* *",
" *** "],
'9' : [" *** ",
"* *",
"* *",
" ****",
" *",
"* *",
" *** "],
':' : [" ",
" * ",
" ",
" ",
" ",
" * ",
" "],
'-' : [" ",
" ",
" ",
" *** ",
" ",
" ",
" "],
'!' : [" *** ",
" *** ",
" *** ",
" *** ",
" *** ",
" ",
" *** "],
'=' : [" ",
" ",
"*****",
"*****",
"*****",
" ",
" "],
'>' : ["* ",
"** ",
"*** ",
"**** ",
"*** ",
"** ",
"* "],
'.' : [" ",
" ",
" ",
" ",
" ",
" *** ",
" *** "],
'?' : [" *** ",
"* *",
"* *",
" * ",
" * ",
" ",
" * "],
}
class Layer(object):
'''
This class use for Command line drawing.
Variable Type: Layer(int, int, str(Optional))
Argument Require: Layer(width, height, default_one_charecters(Optional)) default_one_charecters default is white space.
BUILT-IN VARIABLE:
width
height
screen [Type:list]
BUILT-IN FUNCTION:
point(x, y, char(Optional)) draw point into layer with 1 charecters. (char default is white space)
line(x1, y2, x2, y2, text) draw line into layer with string.
text(x, y, text, alpha(Optional)) draw text into layer with string. (alpha default is False)
rectangle(x1, y1, x2, y2, text, outline(Optional)) draw rectangle into layer with string. (outline default is False)
circle(x1, y1, x2, y2, text, outline(Optional)) draw circle into layer with string. (outline default is False)
clear() clear layer.
draw() draw layer.
FOR EXAMPLE:
layer1 = Layer(100, 45)
'''
def error(self, error_text):
'''
built-in print error message
'''
self.err = 1
print "ERROR in", self.current_func, ":", error_text
def __init__(self, width, height, char=" ", bg="FFFFFF", fg="000000"):
'''
init variable into Layer
Variable Type: Layer(int, int, str)
Argument Require: Layer(width, height, default_one_charecters)
'''
char = str(char)
self.current_func = "Layer("+str(width)+","+str(height)+',"'+char+'")'
if self.check_len(char, 1, 1):
self.width = width
self.height = height
self.screen = []
self.pixel_line = []
self.screen_color = []
self.pixel_color_line = []
self.layer_char = char
self.layer_bg = bg
self.layer_fg = fg
self.err = 0
i = 0
while(i <= self.width):
self.pixel_line.append(char)
self.pixel_color_line.append([bg, fg])
i += 1
i = 0
while(i <= self.height):
self.screen.append(list(self.pixel_line))
self.screen_color.append(list(self.pixel_color_line))
i += 1
def check_len(self, char, length_min, length_max):
'''
built-in check length of string from length_min and length_max
'''
char = len(str(char))
if (char >= length_min or length_min == -1) and (char <= length_max or length_max == -1):
return True
elif char < length_min:
self.error("characters must be greater than "+str(length_min-1)+".")
else:
self.error("characters must not exceed "+str(length_max)+".")
return False
def check_area(self, num_x1=0, num_y1=0):
'''
built-in
'''
if num_x1 >= 0 and num_x1 <= self.width:
if num_y1 >= 0 and num_y1 <= self.height:
return True
else:
self.error("y out of layer area. (layer height is "+str(self.height)+")")
else:
self.error("x out of layer area. (layer width is "+str(self.width)+")")
return False
def point(self, num_x1, num_y1, char="#", bg="FFFFFF", fg="000000"):
'''
point(int, int, str)
Add Point into Layer
Variable Type: text(int, int, str)
Argument Require: text(x, y, text)
'''
if self.err == 0:
char = str(char)
self.current_func = "point("+str(num_x1)+", "+str(num_y1)+', "'+str(char)+'")'
if self.check_area(num_x1, num_y1) and self.check_len(char, 1, 1):
self.screen[num_y1][num_x1] = char
self.screen_color[num_y1][num_x1] = [bg, fg]
def line(self, num_x1, num_y1, num_x2, num_y2, text="#", bg="FFFFFF", fg="000000"):
'''
Add Line into Layer
Variable Type: line(int, int, int, int, str)
Argument Require: line(int, int, int, int, str)
'''
from math import sqrt, floor
if num_x1 > num_x2:
num_x1,num_x2 = num_x2,num_x1
if num_y1 > num_y2:
num_y1,num_y2 = num_y2,num_y1
linear = sqrt(num_x2-num_x1)
#print linear
t = 0
t2 = len(text)
i = num_y1
while i <= num_y2:
j = num_x1
while j <= num_x2:
#No Algorithm T_T
if 1:
self.screen[i][j] = text[t]
self.screen_color[i][j] = [bg, fg]
t += 1
if t == t2:
t = 0
j += 1
i += 1
slope = ((num_x2-num_x1)*(num_y2-num_y1))**0.5
start_x = num_x1
start_y = num_y1
stop_x = num_x2
for i in xrange(start_x,start_y+1):
line = ''
print_x_at = int(i / slope)
next_x_at = int(i+1 / slope)
half_gap = abs(next_x_at - print_x_at) / 2
for j in xrange(start_x,stop_x+1):
if print_x_at == j:
line += '#'
else:
if j < print_x_at and j + half_gap >= print_x_at:
line +='#'
elif j > print_x_at and j < next_x_at - half_gap:
line +='#'
else:
print ' '
print line
'''
self.screen[i][num_x2+jx] = text[t]
t += 1
if t == t2:
t = 0
i += 1
'''
def circle(self, num_x1, num_y1, num_x2, num_y2, text="#"):
'''
'''
def text(self, num_x1, num_y1, text="#", alpha=0):
'''
Add Text into Layer
Variable Type: text(int, int, str)
Argument Require: text(x, y, text)
'''
if self.err == 0:
char = str(text)
text_len = len(text)
self.current_func = "text("+str(num_x1)+", "+str(num_y1)+', "'+str(text)+'")'
if self.check_area(num_x1, num_y1) and self.check_area(num_x1+text_len, num_y1):
i = 0
i_max = text_len
while i < i_max:
if alpha == 0 or (alpha == 1 and text[i] != " "):
self.screen[num_y1][num_x1+i] = text[i]
self.screen_color[num_y1][num_x1+i] = [bg, fg]
i += 1
def text_ex(self, x = 0, y = 0, text = "", char = "#", bg="FFFFFF", fg="000000"):
t = 0
t_max = len(text)
while t < t_max:
i = 0
while i < 7:
c = 0
while c < 5:
if CH[text[t]][i][c] == "*":
self.point(x+c, y+i, char, bg, fg)
c += 1
i += 1
t += 1
x += 6
def rectangle(self, num_x1, num_y1, num_x2, num_y2, text="#", bg="FFFFFF", fg="000000", outline = 0):
'''
Add Rectangle into Layer
Variable Type: rectangle(int, int, int, int, str)
Argument Require: rectangle(x1, y1, x2, y2, charecters)
'''
if self.err == 0:
text = str(text)
self.current_func = "rectangle("+str(num_x1)+", "+str(num_y1)+", "+str(num_x2)+", "+str(num_y2)+', "'+str(text)+'", '+str(outline)+")"
if self.check_area(num_x1, num_y1) and self.check_area(num_x2, num_y2) and self.check_len(text, 1, -1):
if num_x1 > num_x2:
num_x1,num_x2 = num_x2,num_x1
if num_y1 > num_y2:
num_y1,num_y2 = num_y2,num_y1
t = 0
t2 = len(text)
i = num_y1
while i <= num_y2:
j = num_x1
while j <= num_x2:
if outline == 0 or outline == 1 and (i == num_y1 or i == num_y2 or j == num_x1 or j == num_x2):
self.screen[i][j] = text[t]
self.screen_color[i][j] = [bg, fg]
t += 1
if t == t2:
t = 0
j += 1
i += 1
def add_part(self, layer, num_x, num_y):
i = 0
while i <= layer.height:
j = 0
while j <= layer.width:
if num_y+i >= 0 and num_y+i <= self.height and num_x+j >= 0 and num_x+j <= self.width:
self.screen[num_y+i][num_x+j] = layer.screen[i][j]
self.screen_color[num_y+i][num_x+j] = layer.screen_color[i][j]
j += 1
i += 1
def move(self, move_x, move_y):
'''
Move Layer
Variable Type: move(int, int)
Argument Require: move(x, y)
'''
from copy import deepcopy
screen_old = deepcopy(self.screen)
screen_color_old = deepcopy(self.screen_color)
self.clear()
move_x = -move_x
move_y = -move_y
i = 0
while i <= self.height:
j = 0
while j <= self.width:
if i+move_y >= 0 and i+move_y <= self.height and j+move_x >= 0 and j+move_x <= self.width:
self.screen[i][j] = screen_old[i+move_y][j+move_x]
self.screen_color[i][j] = screen_color_old[i+move_y][j+move_x]
j += 1
i += 1
def filp(self):
'''
Move Layer
Variable Type: move(int, int)
Argument Require: move(x, y)
'''
from copy import deepcopy
screen_old = deepcopy(self.screen)
screen_color_old = deepcopy(self.screen_color)
self.clear()
i = 0
while i <= self.height:
j = 0
while j <= self.width:
self.screen[i][j] = screen_old[self.height-i][self.width-j]
self.screen_color[i][j] = screen_color_old[self.height-i][self.width-j]
j += 1
i += 1
def clear(self):
'''
Clear Layer
Variable Type: clear()
Argument Require: clear()
'''
if self.err == 0:
i = 0
while i <= self.height:
j = 0
while j <= self.width:
self.screen[i][j] = self.layer_char
self.screen_color[i][j] = [self.layer_bg, self.layer_fg]
j += 1
i += 1
def draw(self):
'''
Draw Layer
Variable Type: draw()
Argument Require: draw()
'''
if self.err == 0:
i = 0
render = "";
while i <= self.height:
j = 0
if i != 0: render += "\n"
while j <= self.width:
render += self.screen[i][j]
j += 1
i += 1
print render
def getdraw(self):
'''
Draw Layer
Variable Type: draw()
Argument Require: draw()
'''
screenlist = list()
if self.err == 0:
i = 0
render = ""
bg = ""
fg = ""
while i <= self.height:
j = 0
if i != 0:
screenlist.append(render)
render = ""
bg = ""
fg = ""
while j <= self.width:
if self.screen_color[i][j][0] != bg:
bg = self.screen_color[i][j][0]
render += "{:%s}" % bg
if self.screen_color[i][j][1] != fg:
fg = self.screen_color[i][j][1]
render += "{%s}" % fg
render += self.screen[i][j]
j += 1
i += 1
return screenlist
'''
layer1 = Layer(100, 45)
layer1.rectangle(0, 0, 100, 45, "#", 1)
layer1.rectangle(2, 2, 33, 8, ".")
layer1.rectangle(40, 12, 20, 6, "Boss")
layer1.rectangle(4, 4, 35, 10, "#", 1)
layer1.text(50, 20, "Yaranaika ?")
layer1.point(0, 0, "+")
layer1.point(0, 45, "+")
layer1.point(100, 0, "+")
layer1.point(100, 45, "+")
layer1.line(20, 20, 30, 40, "#")
#layer1.line(0, 0, 5, 5, "#")
#layer1.line(0, 0, 0, 5, "#")
#layer1.move(-1,5)
layer1.move(-5,0)
layer1.draw()
'''
| |
p=[0.2, 0.2, 0.2, 0.2, 0.2]
world=['green', 'red', 'red', 'green', 'green']
measurements = ['red', 'green']
motions = [1,1]
pHit = 0.6
pMiss = 0.2
pExact = 0.8
pOvershoot = 0.1
pUndershoot = 0.1
def sense(p, Z):
q=[]
for i in range(len(p)):
hit = (Z == world[i])
q.append(p[i] * (hit * pHit + (1-hit) * pMiss))
s = sum(q)
for i in range(len(q)):
q[i] = q[i] / s
return q
def move(p, U):
q = []
for i in range(len(p)):
s = pExact * p[(i-U) % len(p)]
s = s + pOvershoot * p[(i-U-1) % len(p)]
s = s + pUndershoot * p[(i-U+1) % len(p)]
q.append(s)
return q
for m in range(len(motions)):
p = sense(p, measurements[m])
p = move(p, motions[m])
print p
###### HOMEWORK 1 PROGRAMMING ###########
colors = [['red', 'green', 'green', 'red' , 'red'],
['red', 'red', 'green', 'red', 'red'],
['red', 'red', 'green', 'green', 'red'],
['red', 'red', 'red', 'red', 'red']]
measurements = ['green', 'green', 'green' ,'green', 'green']
motions = [[0,0],[0,1],[1,0],[1,0],[0,1]]
sensor_right = 0.7
p_move = 0.8
def show(p):
for i in range(len(p)):
print p[i]
# CODE GOES AFTER
pHit = sensor_right
pMiss = 1 - sensor_right
pExact = p_move
pNoMove = 1 - p_move
p = []
# Initialize the probabilities
total_number_of_cells = 0
for y in range(len(colors)):
total_number_of_cells += len(colors[y])
for y in range(len(colors)):
q = []
for x in range(len(colors[y])):
q.append( 1. / total_number_of_cells)
p.append(q)
def sense(p, Z):
q = []
for y in range(len(p)):
r = []
for x in range(len(p[y])):
hit = (Z == colors[y][x])
r.append(p[y][x] * (hit * pHit + (1-hit) * pMiss))
q.append(r)
s = 0
for y in range(len(q)):
s += sum(q[y])
z = []
for y in range(len(q)):
r = []
for x in range(len(q[y])):
r.append(q[y][x] / s)
z.append(r)
return z
def move(p, U):
q = []
for y in range(len(p)):
r = []
for x in range(len(p[y])):
s = pExact * p[y-U[0]][(x-U[1]) % len(p[y])]
s = s + pNoMove * p[y][x]
r.append(s)
q.append(r)
return q
for m in range(len(motions)):
p = move(p, motions[m])
p = sense(p, measurements[m])
#Your probability array must be printed
#with the following code.
show(p)
# Example 1:
colors = [['green', 'green', 'green'],
['green', 'red', 'green'],
['green', 'green', 'green']]
measurements = ['red']
motions=[[0,0]]
sensor_right = 1.0
p_move = 1.0
# Paste above code in here
print "p should equal:"
print "[0.0, 0.0, 0.0]"
print "[0.0, 1.0, 0.0]"
print "[0.0, 0.0, 0.0]"
print "p actually equals:"
show(p)
# Example 2:
colors = [['green', 'green', 'green'],
['green', 'red', 'red'],
['green', 'green', 'green']]
measurements = ['red']
motions=[[0,0]]
sensor_right = 1.0
p_move = 1.0
# Paste above code in here
print "p should equal:"
print "[0.0, 0.0, 0.0]"
print "[0.0, 0.5, 0.5]"
print "[0.0, 0.0, 0.0]"
print "p actually equals:"
show(p)
# Example 3:
colors = [['green', 'green', 'green'],
['green', 'red', 'red'],
['green', 'green', 'green']]
measurements = ['red']
motions=[[0,0]]
sensor_right = 0.8
p_move = 1.0
# Paste above code in here
print "p should equal:"
print "[0.0666, 0.0666, 0.0666]"
print "[0.0666, 0.2666, 0.2666]"
print "[0.0666, 0.0666, 0.0666]"
print "p actually equals:"
show(p)
# Example 4:
colors = [['green', 'green', 'green'],
['green', 'red', 'red'],
['green', 'green', 'green']]
measurements = ['red', 'red']
motions=[[0,0], [0,1]]
sensor_right = 0.8
p_move = 1.0
# Paste above code in here
print "p should equal:"
print "[0.0333, 0.0333, 0.0333]"
print "[0.1333, 0.1333, 0.5333]"
print "[0.0333, 0.0333, 0.0333]"
print "p actually equals:"
show(p)
# Example 4:
colors = [['green', 'green', 'green'],
['green', 'red', 'red'],
['green', 'green', 'green']]
measurements = ['red', 'red']
motions=[[0,0], [0,1]]
sensor_right = 1.0
p_move = 1.0
# Paste above code in here
print "p should equal:"
print "[0.0, 0.0, 0.0]"
print "[0.0, 0.0, 1.0]"
print "[0.0, 0.0, 0.0]"
print "p actually equals:"
show(p)
# Example 5:
colors = [['green', 'green', 'green'],
['green', 'red', 'red'],
['green', 'green', 'green']]
measurements = ['red', 'red']
motions=[[0,0], [0,1]]
sensor_right = 0.8
p_move = 0.5
# Paste above code in here
print "p should equal:"
print "[0.0289, 0.0289, 0.0289]"
print "[0.0724, 0.0289, 0.4637]"
print "[0.0289, 0.0289, 0.0289]"
print "p actually equals:"
show(p)
| |
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import cloudstorage as gcs
import view_build
import main_test
import gcs_async_test
import github.models
import testgrid_test
import view_pr
app = main_test.app
init_build = main_test.init_build
write = gcs_async_test.write
class ParseJunitTest(unittest.TestCase):
@staticmethod
def parse(xml):
return list(view_build.parse_junit(xml, "fp"))
def test_normal(self):
failures = self.parse(main_test.JUNIT_SUITE)
stack = '/go/src/k8s.io/kubernetes/test.go:123\nError Goes Here'
self.assertEqual(failures, [('Third', 96.49, stack, "fp")])
def test_testsuites(self):
failures = self.parse('''
<testsuites>
<testsuite name="k8s.io/suite">
<properties>
<property name="go.version" value="go1.6"/>
</properties>
<testcase name="TestBad" time="0.1">
<failure>something bad</failure>
</testcase>
</testsuite>
</testsuites>''')
self.assertEqual(failures,
[('k8s.io/suite TestBad', 0.1, 'something bad', "fp")])
def test_bad_xml(self):
self.assertEqual(self.parse('''<body />'''), [])
def test_corrupt_xml(self):
self.assertEqual(self.parse('<a>\xff</a>'), [])
failures = self.parse('''
<testsuites>
<testsuite name="a">
<testcase name="Corrupt" time="0">
<failure>something bad \xff</failure>
</testcase>
</testsuite>
</testsuites>''')
self.assertEqual(failures, [('a Corrupt', 0.0, 'something bad ?', 'fp')])
def test_not_xml(self):
failures = self.parse('\x01')
self.assertEqual(failures,
[(failures[0][0], 0.0, 'not well-formed (invalid token): line 1, column 0', 'fp')])
class BuildTest(main_test.TestBase):
# pylint: disable=too-many-public-methods
BUILD_DIR = '/kubernetes-jenkins/logs/somejob/1234/'
def setUp(self):
self.init_stubs()
init_build(self.BUILD_DIR)
testgrid_test.write_config()
def get_build_page(self, trailing=''):
return app.get('/build' + self.BUILD_DIR + trailing)
def test_missing(self):
"""Test that a missing build gives a 404."""
response = app.get('/build' + self.BUILD_DIR.replace('1234', '1235'),
status=404)
self.assertIn('1235', response)
def test_missing_started(self):
"""Test that a missing started.json still renders a proper page."""
build_dir = '/kubernetes-jenkins/logs/job-with-no-started/1234/'
init_build(build_dir, started=False)
response = app.get('/build' + build_dir)
self.assertRegexpMatches(response.body, 'Result.*SUCCESS')
self.assertIn('job-with-no-started', response)
self.assertNotIn('Started', response) # no start timestamp
self.assertNotIn('github.com', response) # no version => no src links
def test_missing_finished(self):
"""Test that a missing finished.json still renders a proper page."""
build_dir = '/kubernetes-jenkins/logs/job-still-running/1234/'
init_build(build_dir, finished=False)
response = app.get('/build' + build_dir)
self.assertRegexpMatches(response.body, 'Result.*Not Finished')
self.assertIn('job-still-running', response)
self.assertIn('Started', response)
def test_build(self):
"""Test that the build page works in the happy case."""
response = self.get_build_page()
self.assertIn('2014-07-28', response) # started
self.assertIn('v1+56', response) # build version
self.assertIn('16m40s', response) # build duration
self.assertIn('Third', response) # test name
self.assertIn('1m36s', response) # test duration
self.assertRegexpMatches(response.body, 'Result.*SUCCESS')
self.assertIn('Error Goes Here', response)
self.assertIn('test.go#L123">', response) # stacktrace link works
def test_finished_has_version(self):
"""Test that metadata with version in finished works."""
init_build(self.BUILD_DIR, finished_has_version=True)
self.test_build()
def test_build_no_failures(self):
"""Test that builds with no Junit artifacts work."""
gcs.delete(self.BUILD_DIR + 'artifacts/junit_01.xml')
response = self.get_build_page()
self.assertIn('No Test Failures', response)
def test_show_metadata(self):
write(self.BUILD_DIR + 'started.json',
{
'version': 'v1+56',
'timestamp': 1406535800,
'jenkins-node': 'agent-light-7',
'pull': 'master:1234,35:abcd',
'metadata': {
'master-version': 'm12'
}
})
response = self.get_build_page()
self.assertIn('v1+56', response)
self.assertIn('agent-light-7', response)
self.assertIn('<td>master-version<td>m12', response)
self.assertIn('1234', response)
self.assertIn('abcd', response)
def test_build_show_log(self):
"""Test that builds that failed with no failures show the build log."""
gcs.delete(self.BUILD_DIR + 'artifacts/junit_01.xml')
write(self.BUILD_DIR + 'finished.json',
{'result': 'FAILURE', 'timestamp': 1406536800})
# Unable to fetch build-log.txt, still works.
response = self.get_build_page()
self.assertNotIn('Error lines', response)
self.testbed.init_memcache_stub() # clear cached result
write(self.BUILD_DIR + 'build-log.txt',
u'ERROR: test \u039A\n\n\n\n\n\n\n\n\nblah'.encode('utf8'))
response = self.get_build_page()
self.assertIn('Error lines', response)
self.assertIn('No Test Failures', response)
self.assertIn('ERROR</span>: test', response)
self.assertNotIn('blah', response)
def test_build_optional_log(self):
write(self.BUILD_DIR + 'build-log.txt', 'error or timeout or something')
response = self.get_build_page()
self.assertIn('<a href="?log#log">', response)
self.assertNotIn('timeout', response)
self.assertNotIn('build-log.txt', response)
response = self.get_build_page('?log')
self.assertIn('timeout', response)
self.assertIn('build-log.txt', response)
def test_build_testgrid_links(self):
response = self.get_build_page()
base = 'https://k8s-testgrid.appspot.com/k8s#ajob'
self.assertIn('a href="%s"' % base, response)
option = '&include-filter-by-regex=%5EOverall%24%7CThird'
self.assertIn('a href="%s%s"' % (base, option), response)
def test_build_failure_no_text(self):
# Some failures don't have any associated text.
write(self.BUILD_DIR + 'artifacts/junit_01.xml', '''
<testsuites>
<testsuite tests="1" failures="1" time="3.274" name="k8s.io/test/integration">
<testcase classname="integration" name="TestUnschedulableNodes" time="0.210">
<failure message="Failed" type=""/>
</testcase>
</testsuite>
</testsuites>''')
response = self.get_build_page()
self.assertIn('TestUnschedulableNodes', response)
self.assertIn('junit_01.xml', response)
def test_build_empty_junit(self):
# Sometimes junit files are actually empty (???)
write(self.BUILD_DIR + 'artifacts/junit_01.xml', '')
response = self.get_build_page()
print response
self.assertIn('No Test Failures', response)
def test_parse_pr_path(self):
for prefix, expected in [
('kubernetes-jenkins/logs/e2e', (None, None, None)),
('kubernetes-jenkins/pr-logs/pull/123', ('123', '', 'kubernetes/kubernetes')),
('kubernetes-jenkins/pr-logs/pull/charts/123', ('123', 'charts/', 'kubernetes/charts')),
]:
self.assertEqual(view_build.parse_pr_path(prefix), expected)
def test_build_pr_link(self):
''' The build page for a PR build links to the PR results.'''
build_dir = '/%s/123/e2e/567/' % view_pr.PR_PREFIX
init_build(build_dir)
response = app.get('/build' + build_dir)
self.assertIn('PR #123', response)
self.assertIn('href="/pr/123"', response)
def test_build_pr_link_other(self):
build_dir = '/%s/charts/123/e2e/567/' % view_pr.PR_PREFIX
init_build(build_dir)
response = app.get('/build' + build_dir)
self.assertIn('PR #123', response)
self.assertIn('href="/pr/charts/123"', response)
def test_build_xref(self):
'''Test that builds show issues that reference them.'''
github.models.GHIssueDigest.make(
'org/repo', 123, True, True, [],
{'xrefs': [self.BUILD_DIR[:-1]], 'title': 'an update on testing'}, None).put()
response = app.get('/build' + self.BUILD_DIR)
self.assertIn('PR #123', response)
self.assertIn('an update on testing', response)
self.assertIn('org/repo/issues/123', response)
def test_cache(self):
"""Test that caching works at some level."""
response = self.get_build_page()
gcs.delete(self.BUILD_DIR + 'started.json')
gcs.delete(self.BUILD_DIR + 'finished.json')
response2 = self.get_build_page()
self.assertEqual(str(response), str(response2))
def do_view_build_list_test(self, job_dir='/buck/some-job/', indirect=False):
sta_result = {'timestamp': 12345}
fin_result = {'result': 'SUCCESS'}
for n in xrange(120):
write('%s%d/started.json' % (job_dir, n), sta_result)
write('%s%d/finished.json' % (job_dir, n), fin_result)
if indirect:
for n in xrange(120):
write('%sdirectory/%d.txt' % (job_dir, n), 'gs:/%s%d' % (job_dir, n))
view_target = job_dir if not indirect else job_dir + 'directory/'
builds = view_build.build_list(view_target, None)
self.assertEqual(builds,
[(str(n), '%s%s' % (job_dir, n), sta_result, fin_result)
for n in range(119, 79, -1)])
# test that ?before works
builds = view_build.build_list(view_target, '80')
self.assertEqual(builds,
[(str(n), '%s%s' % (job_dir, n), sta_result, fin_result)
for n in range(79, 39, -1)])
def test_view_build_list_with_latest(self):
write('/buck/some-job/latest-build.txt', '119')
self.do_view_build_list_test()
def test_view_build_list_with_old_latest(self):
# latest-build.txt is a hint -- it will probe newer by looking for started.json
write('/buck/some-job/latest-build.txt', '110')
self.do_view_build_list_test()
def test_view_build_list_no_latest(self):
self.do_view_build_list_test()
def test_view_build_list_indirect_with_latest(self):
write('/buck/some-job/directory/latest-build.txt', '119')
self.do_view_build_list_test(indirect=True)
def test_view_build_list_indirect_no_latest(self):
self.do_view_build_list_test(indirect=True)
def test_build_list_handler(self):
"""Test that the job page shows a list of builds."""
response = app.get('/builds' + os.path.dirname(self.BUILD_DIR[:-1]))
self.assertIn('/1234/">1234', response)
self.assertIn('gcsweb', response)
def test_job_list(self):
"""Test that the job list shows our job."""
response = app.get('/jobs/kubernetes-jenkins/logs')
self.assertIn('somejob/">somejob</a>', response)
| |
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_context import context as o_context
from oslo_context import fixture as o_fixture
from nova import context
from nova import objects
from nova import test
class ContextTestCase(test.NoDBTestCase):
def setUp(self):
super(ContextTestCase, self).setUp()
self.useFixture(o_fixture.ClearRequestContext())
def test_request_context_elevated(self):
user_ctxt = context.RequestContext('111',
'222',
admin=False)
self.assertFalse(user_ctxt.is_admin)
admin_ctxt = user_ctxt.elevated()
self.assertTrue(admin_ctxt.is_admin)
self.assertIn('admin', admin_ctxt.roles)
self.assertFalse(user_ctxt.is_admin)
self.assertNotIn('admin', user_ctxt.roles)
def test_request_context_sets_is_admin(self):
ctxt = context.RequestContext('111',
'222',
roles=['admin', 'weasel'])
self.assertTrue(ctxt.is_admin)
def test_request_context_sets_is_admin_by_role(self):
ctxt = context.RequestContext('111',
'222',
roles=['administrator'])
self.assertTrue(ctxt.is_admin)
def test_request_context_sets_is_admin_upcase(self):
ctxt = context.RequestContext('111',
'222',
roles=['Admin', 'weasel'])
self.assertTrue(ctxt.is_admin)
def test_request_context_read_deleted(self):
ctxt = context.RequestContext('111',
'222',
read_deleted='yes')
self.assertEqual('yes', ctxt.read_deleted)
ctxt.read_deleted = 'no'
self.assertEqual('no', ctxt.read_deleted)
def test_request_context_read_deleted_invalid(self):
self.assertRaises(ValueError,
context.RequestContext,
'111',
'222',
read_deleted=True)
ctxt = context.RequestContext('111', '222')
self.assertRaises(ValueError,
setattr,
ctxt,
'read_deleted',
True)
def test_extra_args_to_context_get_logged(self):
info = {}
def fake_warn(log_msg):
info['log_msg'] = log_msg
self.stub_out('nova.context.LOG.warning', fake_warn)
c = context.RequestContext('user', 'project',
extra_arg1='meow', extra_arg2='wuff')
self.assertTrue(c)
self.assertIn("'extra_arg1': 'meow'", info['log_msg'])
self.assertIn("'extra_arg2': 'wuff'", info['log_msg'])
def test_service_catalog_default(self):
ctxt = context.RequestContext('111', '222')
self.assertEqual([], ctxt.service_catalog)
ctxt = context.RequestContext('111', '222',
service_catalog=[])
self.assertEqual([], ctxt.service_catalog)
ctxt = context.RequestContext('111', '222',
service_catalog=None)
self.assertEqual([], ctxt.service_catalog)
def test_service_catalog_cinder_only(self):
service_catalog = [
{u'type': u'compute', u'name': u'nova'},
{u'type': u's3', u'name': u's3'},
{u'type': u'image', u'name': u'glance'},
{u'type': u'volume', u'name': u'cinder'},
{u'type': u'ec2', u'name': u'ec2'},
{u'type': u'object-store', u'name': u'swift'},
{u'type': u'identity', u'name': u'keystone'},
{u'type': None, u'name': u'S_withouttype'},
{u'type': u'vo', u'name': u'S_partofvolume'}]
volume_catalog = [{u'type': u'volume', u'name': u'cinder'}]
ctxt = context.RequestContext('111', '222',
service_catalog=service_catalog)
self.assertEqual(volume_catalog, ctxt.service_catalog)
def test_to_dict_from_dict_no_log(self):
warns = []
def stub_warn(msg, *a, **kw):
if (a and len(a) == 1 and isinstance(a[0], dict) and a[0]):
a = a[0]
warns.append(str(msg) % a)
self.stub_out('nova.context.LOG.warn', stub_warn)
ctxt = context.RequestContext('111',
'222',
roles=['admin', 'weasel'])
context.RequestContext.from_dict(ctxt.to_dict())
self.assertEqual(0, len(warns), warns)
def test_store_when_no_overwrite(self):
# If no context exists we store one even if overwrite is false
# (since we are not overwriting anything).
ctx = context.RequestContext('111',
'222',
overwrite=False)
self.assertIs(o_context.get_current(), ctx)
def test_no_overwrite(self):
# If there is already a context in the cache a new one will
# not overwrite it if overwrite=False.
ctx1 = context.RequestContext('111',
'222',
overwrite=True)
context.RequestContext('333',
'444',
overwrite=False)
self.assertIs(o_context.get_current(), ctx1)
def test_admin_no_overwrite(self):
# If there is already a context in the cache creating an admin
# context will not overwrite it.
ctx1 = context.RequestContext('111',
'222',
overwrite=True)
context.get_admin_context()
self.assertIs(o_context.get_current(), ctx1)
def test_convert_from_rc_to_dict(self):
ctx = context.RequestContext(
111, 222, request_id='req-679033b7-1755-4929-bf85-eb3bfaef7e0b',
timestamp='2015-03-02T22:31:56.641629')
values2 = ctx.to_dict()
expected_values = {'auth_token': None,
'domain': None,
'instance_lock_checked': False,
'is_admin': False,
'project_id': 222,
'project_domain': None,
'project_name': None,
'quota_class': None,
'read_deleted': 'no',
'read_only': False,
'remote_address': None,
'request_id':
'req-679033b7-1755-4929-bf85-eb3bfaef7e0b',
'resource_uuid': None,
'roles': [],
'service_catalog': [],
'show_deleted': False,
'tenant': 222,
'timestamp': '2015-03-02T22:31:56.641629',
'user': 111,
'user_domain': None,
'user_id': 111,
'user_identity': '111 222 - - -',
'user_name': None}
self.assertEqual(expected_values, values2)
def test_convert_from_dict_then_to_dict(self):
values = {'user': '111',
'user_id': '111',
'tenant': '222',
'project_id': '222',
'domain': None, 'project_domain': None,
'auth_token': None,
'resource_uuid': None, 'read_only': False,
'user_identity': '111 222 - - -',
'instance_lock_checked': False,
'user_name': None, 'project_name': None,
'timestamp': '2015-03-02T20:03:59.416299',
'remote_address': None, 'quota_class': None,
'is_admin': True,
'service_catalog': [],
'read_deleted': 'no', 'show_deleted': False,
'roles': [],
'request_id': 'req-956637ad-354a-4bc5-b969-66fd1cc00f50',
'user_domain': None}
ctx = context.RequestContext.from_dict(values)
self.assertEqual('111', ctx.user)
self.assertEqual('222', ctx.tenant)
self.assertEqual('111', ctx.user_id)
self.assertEqual('222', ctx.project_id)
values2 = ctx.to_dict()
self.assertEqual(values, values2)
@mock.patch('nova.db.create_context_manager')
def test_target_cell(self, mock_create_ctxt_mgr):
mock_create_ctxt_mgr.return_value = mock.sentinel.cm
ctxt = context.RequestContext('111',
'222',
roles=['admin', 'weasel'])
# Verify the existing db_connection, if any, is restored
ctxt.db_connection = mock.sentinel.db_conn
mapping = objects.CellMapping(database_connection='fake://')
with context.target_cell(ctxt, mapping):
self.assertEqual(ctxt.db_connection, mock.sentinel.cm)
self.assertEqual(mock.sentinel.db_conn, ctxt.db_connection)
| |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Commonly used utils in pandas-on-Spark.
"""
import functools
from contextlib import contextmanager
import os
from typing import (
Any,
Callable,
Dict,
Iterator,
List,
Optional,
Tuple,
Union,
TYPE_CHECKING,
cast,
no_type_check,
overload,
)
import warnings
from pyspark.sql import functions as F, Column, DataFrame as SparkDataFrame, SparkSession
from pyspark.sql.types import DoubleType
import pandas as pd
from pandas.api.types import is_list_like # type: ignore[attr-defined]
# For running doctests and reference resolution in PyCharm.
from pyspark import pandas as ps # noqa: F401
from pyspark.pandas._typing import Axis, Label, Name, DataFrameOrSeries
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.typedef.typehints import as_spark_type
if TYPE_CHECKING:
from pyspark.pandas.base import IndexOpsMixin
from pyspark.pandas.frame import DataFrame
from pyspark.pandas.internal import InternalFrame
from pyspark.pandas.series import Series
ERROR_MESSAGE_CANNOT_COMBINE = (
"Cannot combine the series or dataframe because it comes from a different dataframe. "
"In order to allow this operation, enable 'compute.ops_on_diff_frames' option."
)
SPARK_CONF_ARROW_ENABLED = "spark.sql.execution.arrow.pyspark.enabled"
class PandasAPIOnSparkAdviceWarning(Warning):
pass
def same_anchor(
this: Union["DataFrame", "IndexOpsMixin", "InternalFrame"],
that: Union["DataFrame", "IndexOpsMixin", "InternalFrame"],
) -> bool:
"""
Check if the anchors of the given DataFrame or Series are the same or not.
"""
from pyspark.pandas.base import IndexOpsMixin
from pyspark.pandas.frame import DataFrame
from pyspark.pandas.internal import InternalFrame
if isinstance(this, InternalFrame):
this_internal = this
else:
assert isinstance(this, (DataFrame, IndexOpsMixin)), type(this)
this_internal = this._internal
if isinstance(that, InternalFrame):
that_internal = that
else:
assert isinstance(that, (DataFrame, IndexOpsMixin)), type(that)
that_internal = that._internal
return (
this_internal.spark_frame is that_internal.spark_frame
and this_internal.index_level == that_internal.index_level
and all(
spark_column_equals(this_scol, that_scol)
for this_scol, that_scol in zip(
this_internal.index_spark_columns, that_internal.index_spark_columns
)
)
)
def combine_frames(
this: "DataFrame",
*args: DataFrameOrSeries,
how: str = "full",
preserve_order_column: bool = False,
) -> "DataFrame":
"""
This method combines `this` DataFrame with a different `that` DataFrame or
Series from a different DataFrame.
It returns a DataFrame that has prefix `this_` and `that_` to distinct
the columns names from both DataFrames
It internally performs a join operation which can be expensive in general.
So, if `compute.ops_on_diff_frames` option is False,
this method throws an exception.
"""
from pyspark.pandas.config import get_option
from pyspark.pandas.frame import DataFrame
from pyspark.pandas.internal import (
InternalField,
InternalFrame,
HIDDEN_COLUMNS,
NATURAL_ORDER_COLUMN_NAME,
SPARK_INDEX_NAME_FORMAT,
)
from pyspark.pandas.series import Series
if all(isinstance(arg, Series) for arg in args):
assert all(
same_anchor(arg, args[0]) for arg in args
), "Currently only one different DataFrame (from given Series) is supported"
assert not same_anchor(this, args[0]), "We don't need to combine. All series is in this."
that = args[0]._psdf[list(args)]
elif len(args) == 1 and isinstance(args[0], DataFrame):
assert isinstance(args[0], DataFrame)
assert not same_anchor(
this, args[0]
), "We don't need to combine. `this` and `that` are same."
that = args[0]
else:
raise AssertionError("args should be single DataFrame or " "single/multiple Series")
if get_option("compute.ops_on_diff_frames"):
def resolve(internal: InternalFrame, side: str) -> InternalFrame:
rename = lambda col: "__{}_{}".format(side, col)
internal = internal.resolved_copy
sdf = internal.spark_frame
sdf = internal.spark_frame.select(
*[
scol_for(sdf, col).alias(rename(col))
for col in sdf.columns
if col not in HIDDEN_COLUMNS
],
*HIDDEN_COLUMNS,
)
return internal.copy(
spark_frame=sdf,
index_spark_columns=[
scol_for(sdf, rename(col)) for col in internal.index_spark_column_names
],
index_fields=[
field.copy(name=rename(field.name)) for field in internal.index_fields
],
data_spark_columns=[
scol_for(sdf, rename(col)) for col in internal.data_spark_column_names
],
data_fields=[field.copy(name=rename(field.name)) for field in internal.data_fields],
)
this_internal = resolve(this._internal, "this")
that_internal = resolve(that._internal, "that")
this_index_map = list(
zip(
this_internal.index_spark_column_names,
this_internal.index_names,
this_internal.index_fields,
)
)
that_index_map = list(
zip(
that_internal.index_spark_column_names,
that_internal.index_names,
that_internal.index_fields,
)
)
assert len(this_index_map) == len(that_index_map)
join_scols = []
merged_index_scols = []
# Note that the order of each element in index_map is guaranteed according to the index
# level.
this_and_that_index_map = list(zip(this_index_map, that_index_map))
this_sdf = this_internal.spark_frame.alias("this")
that_sdf = that_internal.spark_frame.alias("that")
# If the same named index is found, that's used.
index_column_names = []
index_use_extension_dtypes = []
for (
i,
((this_column, this_name, this_field), (that_column, that_name, that_field)),
) in enumerate(this_and_that_index_map):
if this_name == that_name:
# We should merge the Spark columns into one
# to mimic pandas' behavior.
this_scol = scol_for(this_sdf, this_column)
that_scol = scol_for(that_sdf, that_column)
join_scol = this_scol == that_scol
join_scols.append(join_scol)
column_name = SPARK_INDEX_NAME_FORMAT(i)
index_column_names.append(column_name)
index_use_extension_dtypes.append(
any(field.is_extension_dtype for field in [this_field, that_field])
)
merged_index_scols.append(
F.when(this_scol.isNotNull(), this_scol).otherwise(that_scol).alias(column_name)
)
else:
raise ValueError("Index names must be exactly matched currently.")
assert len(join_scols) > 0, "cannot join with no overlapping index names"
joined_df = this_sdf.join(that_sdf, on=join_scols, how=how)
if preserve_order_column:
order_column = [scol_for(this_sdf, NATURAL_ORDER_COLUMN_NAME)]
else:
order_column = []
joined_df = joined_df.select(
*merged_index_scols,
*(
scol_for(this_sdf, this_internal.spark_column_name_for(label))
for label in this_internal.column_labels
),
*(
scol_for(that_sdf, that_internal.spark_column_name_for(label))
for label in that_internal.column_labels
),
*order_column,
)
index_spark_columns = [scol_for(joined_df, col) for col in index_column_names]
index_columns = set(index_column_names)
new_data_columns = [
col
for col in joined_df.columns
if col not in index_columns and col != NATURAL_ORDER_COLUMN_NAME
]
schema = joined_df.select(*index_spark_columns, *new_data_columns).schema
index_fields = [
InternalField.from_struct_field(struct_field, use_extension_dtypes=use_extension_dtypes)
for struct_field, use_extension_dtypes in zip(
schema.fields[: len(index_spark_columns)], index_use_extension_dtypes
)
]
data_fields = [
InternalField.from_struct_field(
struct_field, use_extension_dtypes=field.is_extension_dtype
)
for struct_field, field in zip(
schema.fields[len(index_spark_columns) :],
this_internal.data_fields + that_internal.data_fields,
)
]
level = max(this_internal.column_labels_level, that_internal.column_labels_level)
def fill_label(label: Optional[Label]) -> List:
if label is None:
return ([""] * (level - 1)) + [None]
else:
return ([""] * (level - len(label))) + list(label)
column_labels = [
tuple(["this"] + fill_label(label)) for label in this_internal.column_labels
] + [tuple(["that"] + fill_label(label)) for label in that_internal.column_labels]
column_label_names = (
cast(List[Optional[Label]], [None]) * (1 + level - this_internal.column_labels_level)
) + this_internal.column_label_names
return DataFrame(
InternalFrame(
spark_frame=joined_df,
index_spark_columns=index_spark_columns,
index_names=this_internal.index_names,
index_fields=index_fields,
column_labels=column_labels,
data_spark_columns=[scol_for(joined_df, col) for col in new_data_columns],
data_fields=data_fields,
column_label_names=column_label_names,
)
)
else:
raise ValueError(ERROR_MESSAGE_CANNOT_COMBINE)
def align_diff_frames(
resolve_func: Callable[
["DataFrame", List[Label], List[Label]], Iterator[Tuple["Series", Label]]
],
this: "DataFrame",
that: "DataFrame",
fillna: bool = True,
how: str = "full",
preserve_order_column: bool = False,
) -> "DataFrame":
"""
This method aligns two different DataFrames with a given `func`. Columns are resolved and
handled within the given `func`.
To use this, `compute.ops_on_diff_frames` should be True, for now.
:param resolve_func: Takes aligned (joined) DataFrame, the column of the current DataFrame, and
the column of another DataFrame. It returns an iterable that produces Series.
>>> from pyspark.pandas.config import set_option, reset_option
>>>
>>> set_option("compute.ops_on_diff_frames", True)
>>>
>>> psdf1 = ps.DataFrame({'a': [9, 8, 7, 6, 5, 4, 3, 2, 1]})
>>> psdf2 = ps.DataFrame({'a': [9, 8, 7, 6, 5, 4, 3, 2, 1]})
>>>
>>> def func(psdf, this_column_labels, that_column_labels):
... psdf # conceptually this is A + B.
...
... # Within this function, Series from A or B can be performed against `psdf`.
... this_label = this_column_labels[0] # this is ('a',) from psdf1.
... that_label = that_column_labels[0] # this is ('a',) from psdf2.
... new_series = (psdf[this_label] - psdf[that_label]).rename(str(this_label))
...
... # This new series will be placed in new DataFrame.
... yield (new_series, this_label)
>>>
>>>
>>> align_diff_frames(func, psdf1, psdf2).sort_index()
a
0 0
1 0
2 0
3 0
4 0
5 0
6 0
7 0
8 0
>>> reset_option("compute.ops_on_diff_frames")
:param this: a DataFrame to align
:param that: another DataFrame to align
:param fillna: If True, it fills missing values in non-common columns in both `this` and `that`.
Otherwise, it returns as are.
:param how: join way. In addition, it affects how `resolve_func` resolves the column conflict.
- full: `resolve_func` should resolve only common columns from 'this' and 'that' DataFrames.
For instance, if 'this' has columns A, B, C and that has B, C, D, `this_columns` and
'that_columns' in this function are B, C and B, C.
- left: `resolve_func` should resolve columns including that columns.
For instance, if 'this' has columns A, B, C and that has B, C, D, `this_columns` is
B, C but `that_columns` are B, C, D.
- inner: Same as 'full' mode; however, internally performs inner join instead.
:return: Aligned DataFrame
"""
from pyspark.pandas.frame import DataFrame
assert how == "full" or how == "left" or how == "inner"
this_column_labels = this._internal.column_labels
that_column_labels = that._internal.column_labels
common_column_labels = set(this_column_labels).intersection(that_column_labels)
# 1. Perform the join given two dataframes.
combined = combine_frames(this, that, how=how, preserve_order_column=preserve_order_column)
# 2. Apply the given function to transform the columns in a batch and keep the new columns.
combined_column_labels = combined._internal.column_labels
that_columns_to_apply: List[Label] = []
this_columns_to_apply: List[Label] = []
additional_that_columns: List[Label] = []
columns_to_keep: List[Union[Series, Column]] = []
column_labels_to_keep: List[Label] = []
for combined_label in combined_column_labels:
for common_label in common_column_labels:
if combined_label == tuple(["this", *common_label]):
this_columns_to_apply.append(combined_label)
break
elif combined_label == tuple(["that", *common_label]):
that_columns_to_apply.append(combined_label)
break
else:
if how == "left" and combined_label in [
tuple(["that", *label]) for label in that_column_labels
]:
# In this case, we will drop `that_columns` in `columns_to_keep` but passes
# it later to `func`. `func` should resolve it.
# Note that adding this into a separate list (`additional_that_columns`)
# is intentional so that `this_columns` and `that_columns` can be paired.
additional_that_columns.append(combined_label)
elif fillna:
columns_to_keep.append(SF.lit(None).cast(DoubleType()).alias(str(combined_label)))
column_labels_to_keep.append(combined_label)
else:
columns_to_keep.append(combined._psser_for(combined_label))
column_labels_to_keep.append(combined_label)
that_columns_to_apply += additional_that_columns
# Should extract columns to apply and do it in a batch in case
# it adds new columns for example.
columns_applied: List[Union[Series, Column]]
column_labels_applied: List[Label]
if len(this_columns_to_apply) > 0 or len(that_columns_to_apply) > 0:
psser_set, column_labels_set = zip(
*resolve_func(combined, this_columns_to_apply, that_columns_to_apply)
)
columns_applied = list(psser_set)
column_labels_applied = list(column_labels_set)
else:
columns_applied = []
column_labels_applied = []
applied: DataFrame = DataFrame(
combined._internal.with_new_columns(
columns_applied + columns_to_keep,
column_labels=column_labels_applied + column_labels_to_keep,
)
)
# 3. Restore the names back and deduplicate columns.
this_labels: Dict[Label, Label] = {}
# Add columns in an order of its original frame.
for this_label in this_column_labels:
for new_label in applied._internal.column_labels:
if new_label[1:] not in this_labels and this_label == new_label[1:]:
this_labels[new_label[1:]] = new_label
# After that, we will add the rest columns.
other_labels: Dict[Label, Label] = {}
for new_label in applied._internal.column_labels:
if new_label[1:] not in this_labels:
other_labels[new_label[1:]] = new_label
psdf = applied[list(this_labels.values()) + list(other_labels.values())]
psdf.columns = psdf.columns.droplevel()
return psdf
def is_testing() -> bool:
"""Indicates whether Spark is currently running tests."""
return "SPARK_TESTING" in os.environ
def default_session() -> SparkSession:
spark = SparkSession.getActiveSession()
if spark is not None:
return spark
builder = SparkSession.builder.appName("pandas-on-Spark")
return builder.getOrCreate()
@contextmanager
def sql_conf(pairs: Dict[str, Any], *, spark: Optional[SparkSession] = None) -> Iterator[None]:
"""
A convenient context manager to set `value` to the Spark SQL configuration `key` and
then restores it back when it exits.
"""
assert isinstance(pairs, dict), "pairs should be a dictionary."
if spark is None:
spark = default_session()
keys = pairs.keys()
new_values = pairs.values()
old_values = [spark.conf.get(key, None) for key in keys]
for key, new_value in zip(keys, new_values):
spark.conf.set(key, new_value)
try:
yield
finally:
for key, old_value in zip(keys, old_values):
if old_value is None:
spark.conf.unset(key)
else:
spark.conf.set(key, old_value)
def validate_arguments_and_invoke_function(
pobj: Union[pd.DataFrame, pd.Series],
pandas_on_spark_func: Callable,
pandas_func: Callable,
input_args: Dict,
) -> Any:
"""
Invokes a pandas function.
This is created because different versions of pandas support different parameters, and as a
result when we code against the latest version, our users might get a confusing
"got an unexpected keyword argument" error if they are using an older version of pandas.
This function validates all the arguments, removes the ones that are not supported if they
are simply the default value (i.e. most likely the user didn't explicitly specify it). It
throws a TypeError if the user explicitly specify an argument that is not supported by the
pandas version available.
For example usage, look at DataFrame.to_html().
:param pobj: the pandas DataFrame or Series to operate on
:param pandas_on_spark_func: pandas-on-Spark function, used to get default parameter values
:param pandas_func: pandas function, used to check whether pandas supports all the arguments
:param input_args: arguments to pass to the pandas function, often created by using locals().
Make sure locals() call is at the top of the function so it captures only
input parameters, rather than local variables.
:return: whatever pandas_func returns
"""
import inspect
# Makes a copy since whatever passed in is likely created by locals(), and we can't delete
# 'self' key from that.
args = input_args.copy()
del args["self"]
if "kwargs" in args:
# explode kwargs
kwargs = args["kwargs"]
del args["kwargs"]
args = {**args, **kwargs}
pandas_on_spark_params = inspect.signature(pandas_on_spark_func).parameters
pandas_params = inspect.signature(pandas_func).parameters
for param in pandas_on_spark_params.values():
if param.name not in pandas_params:
if args[param.name] == param.default:
del args[param.name]
else:
raise TypeError(
(
"The pandas version [%s] available does not support parameter '%s' "
+ "for function '%s'."
)
% (pd.__version__, param.name, pandas_func.__name__)
)
args["self"] = pobj
return pandas_func(**args)
@no_type_check
def lazy_property(fn: Callable[[Any], Any]) -> property:
"""
Decorator that makes a property lazy-evaluated.
Copied from https://stevenloria.com/lazy-properties/
"""
attr_name = "_lazy_" + fn.__name__
@property
@functools.wraps(fn)
def wrapped_lazy_property(self):
if not hasattr(self, attr_name):
setattr(self, attr_name, fn(self))
return getattr(self, attr_name)
def deleter(self):
if hasattr(self, attr_name):
delattr(self, attr_name)
return wrapped_lazy_property.deleter(deleter)
def scol_for(sdf: SparkDataFrame, column_name: str) -> Column:
"""Return Spark Column for the given column name."""
return sdf["`{}`".format(column_name)]
def column_labels_level(column_labels: List[Label]) -> int:
"""Return the level of the column index."""
if len(column_labels) == 0:
return 1
else:
levels = set(1 if label is None else len(label) for label in column_labels)
assert len(levels) == 1, levels
return list(levels)[0]
def name_like_string(name: Optional[Name]) -> str:
"""
Return the name-like strings from str or tuple of str
Examples
--------
>>> name = 'abc'
>>> name_like_string(name)
'abc'
>>> name = ('abc',)
>>> name_like_string(name)
'abc'
>>> name = ('a', 'b', 'c')
>>> name_like_string(name)
'(a, b, c)'
"""
label: Label
if name is None:
label = ("__none__",)
elif is_list_like(name):
label = tuple([str(n) for n in name])
else:
label = (str(name),)
return ("(%s)" % ", ".join(label)) if len(label) > 1 else label[0]
def is_name_like_tuple(value: Any, allow_none: bool = True, check_type: bool = False) -> bool:
"""
Check the given tuple is be able to be used as a name.
Examples
--------
>>> is_name_like_tuple(('abc',))
True
>>> is_name_like_tuple((1,))
True
>>> is_name_like_tuple(('abc', 1, None))
True
>>> is_name_like_tuple(('abc', 1, None), check_type=True)
True
>>> is_name_like_tuple((1.0j,))
True
>>> is_name_like_tuple(tuple())
False
>>> is_name_like_tuple((list('abc'),))
False
>>> is_name_like_tuple(('abc', 1, None), allow_none=False)
False
>>> is_name_like_tuple((1.0j,), check_type=True)
False
"""
if value is None:
return allow_none
elif not isinstance(value, tuple):
return False
elif len(value) == 0:
return False
elif not allow_none and any(v is None for v in value):
return False
elif any(is_list_like(v) or isinstance(v, slice) for v in value):
return False
elif check_type:
return all(
v is None or as_spark_type(type(v), raise_error=False) is not None for v in value
)
else:
return True
def is_name_like_value(
value: Any, allow_none: bool = True, allow_tuple: bool = True, check_type: bool = False
) -> bool:
"""
Check the given value is like a name.
Examples
--------
>>> is_name_like_value('abc')
True
>>> is_name_like_value(1)
True
>>> is_name_like_value(None)
True
>>> is_name_like_value(('abc',))
True
>>> is_name_like_value(1.0j)
True
>>> is_name_like_value(list('abc'))
False
>>> is_name_like_value(None, allow_none=False)
False
>>> is_name_like_value(('abc',), allow_tuple=False)
False
>>> is_name_like_value(1.0j, check_type=True)
False
"""
if value is None:
return allow_none
elif isinstance(value, tuple):
return allow_tuple and is_name_like_tuple(
value, allow_none=allow_none, check_type=check_type
)
elif is_list_like(value) or isinstance(value, slice):
return False
elif check_type:
return as_spark_type(type(value), raise_error=False) is not None
else:
return True
def validate_axis(axis: Optional[Axis] = 0, none_axis: int = 0) -> int:
"""Check the given axis is valid."""
# convert to numeric axis
axis = cast(Dict[Optional[Axis], int], {None: none_axis, "index": 0, "columns": 1}).get(
axis, axis
)
if axis in (none_axis, 0, 1):
return cast(int, axis)
else:
raise ValueError("No axis named {0}".format(axis))
def validate_bool_kwarg(value: Any, arg_name: str) -> Optional[bool]:
"""Ensures that argument passed in arg_name is of type bool."""
if not (isinstance(value, bool) or value is None):
raise TypeError(
'For argument "{}" expected type bool, received '
"type {}.".format(arg_name, type(value).__name__)
)
return value
def validate_how(how: str) -> str:
"""Check the given how for join is valid."""
if how == "full":
warnings.warn(
"Warning: While pandas-on-Spark will accept 'full', you should use 'outer' "
+ "instead to be compatible with the pandas merge API",
UserWarning,
)
if how == "outer":
# 'outer' in pandas equals 'full' in Spark
how = "full"
if how not in ("inner", "left", "right", "full"):
raise ValueError(
"The 'how' parameter has to be amongst the following values: ",
"['inner', 'left', 'right', 'outer']",
)
return how
def validate_mode(mode: str) -> str:
"""Check the given mode for writing is valid."""
if mode in ("w", "w+"):
# 'w' in pandas equals 'overwrite' in Spark
# '+' is meaningless for writing methods, but pandas just pass it as 'w'.
mode = "overwrite"
if mode in ("a", "a+"):
# 'a' in pandas equals 'append' in Spark
# '+' is meaningless for writing methods, but pandas just pass it as 'a'.
mode = "append"
if mode not in (
"w",
"a",
"w+",
"a+",
"overwrite",
"append",
"ignore",
"error",
"errorifexists",
):
raise ValueError(
"The 'mode' parameter has to be amongst the following values: ",
"['w', 'a', 'w+', 'a+', 'overwrite', 'append', 'ignore', 'error', 'errorifexists']",
)
return mode
@overload
def verify_temp_column_name(df: SparkDataFrame, column_name_or_label: str) -> str:
...
@overload
def verify_temp_column_name(df: "DataFrame", column_name_or_label: Name) -> Label:
...
def verify_temp_column_name(
df: Union["DataFrame", SparkDataFrame], column_name_or_label: Union[str, Name]
) -> Union[str, Label]:
"""
Verify that the given column name does not exist in the given pandas-on-Spark or
Spark DataFrame.
The temporary column names should start and end with `__`. In addition, `column_name_or_label`
expects a single string, or column labels when `df` is a pandas-on-Spark DataFrame.
>>> psdf = ps.DataFrame({("x", "a"): ['a', 'b', 'c']})
>>> psdf["__dummy__"] = 0
>>> psdf[("", "__dummy__")] = 1
>>> psdf # doctest: +NORMALIZE_WHITESPACE
x __dummy__
a __dummy__
0 a 0 1
1 b 0 1
2 c 0 1
>>> verify_temp_column_name(psdf, '__tmp__')
('__tmp__', '')
>>> verify_temp_column_name(psdf, ('', '__tmp__'))
('', '__tmp__')
>>> verify_temp_column_name(psdf, '__dummy__')
Traceback (most recent call last):
...
AssertionError: ... `(__dummy__, )` ...
>>> verify_temp_column_name(psdf, ('', '__dummy__'))
Traceback (most recent call last):
...
AssertionError: ... `(, __dummy__)` ...
>>> verify_temp_column_name(psdf, 'dummy')
Traceback (most recent call last):
...
AssertionError: ... should be empty or start and end with `__`: ('dummy', '')
>>> verify_temp_column_name(psdf, ('', 'dummy'))
Traceback (most recent call last):
...
AssertionError: ... should be empty or start and end with `__`: ('', 'dummy')
>>> internal = psdf._internal.resolved_copy
>>> sdf = internal.spark_frame
>>> sdf.select(internal.data_spark_columns).show() # doctest: +NORMALIZE_WHITESPACE
+------+---------+-------------+
|(x, a)|__dummy__|(, __dummy__)|
+------+---------+-------------+
| a| 0| 1|
| b| 0| 1|
| c| 0| 1|
+------+---------+-------------+
>>> verify_temp_column_name(sdf, '__tmp__')
'__tmp__'
>>> verify_temp_column_name(sdf, '__dummy__')
Traceback (most recent call last):
...
AssertionError: ... `__dummy__` ... '(x, a)', '__dummy__', '(, __dummy__)', ...
>>> verify_temp_column_name(sdf, ('', '__dummy__'))
Traceback (most recent call last):
...
AssertionError: <class 'tuple'>
>>> verify_temp_column_name(sdf, 'dummy')
Traceback (most recent call last):
...
AssertionError: ... should start and end with `__`: dummy
"""
from pyspark.pandas.frame import DataFrame
if isinstance(df, DataFrame):
if isinstance(column_name_or_label, str):
column_name = column_name_or_label
level = df._internal.column_labels_level
column_name_or_label = tuple([column_name_or_label] + ([""] * (level - 1)))
else:
column_name = name_like_string(column_name_or_label)
assert any(len(label) > 0 for label in column_name_or_label) and all(
label == "" or (label.startswith("__") and label.endswith("__"))
for label in column_name_or_label
), "The temporary column name should be empty or start and end with `__`: {}".format(
column_name_or_label
)
assert all(
column_name_or_label != label for label in df._internal.column_labels
), "The given column name `{}` already exists in the pandas-on-Spark DataFrame: {}".format(
name_like_string(column_name_or_label), df.columns
)
df = df._internal.resolved_copy.spark_frame
else:
assert isinstance(column_name_or_label, str), type(column_name_or_label)
assert column_name_or_label.startswith("__") and column_name_or_label.endswith(
"__"
), "The temporary column name should start and end with `__`: {}".format(
column_name_or_label
)
column_name = column_name_or_label
assert isinstance(df, SparkDataFrame), type(df)
assert (
column_name not in df.columns
), "The given column name `{}` already exists in the Spark DataFrame: {}".format(
column_name, df.columns
)
return column_name_or_label
def spark_column_equals(left: Column, right: Column) -> bool:
"""
Check both `left` and `right` have the same expressions.
>>> spark_column_equals(SF.lit(0), SF.lit(0))
True
>>> spark_column_equals(SF.lit(0) + 1, SF.lit(0) + 1)
True
>>> spark_column_equals(SF.lit(0) + 1, SF.lit(0) + 2)
False
>>> sdf1 = ps.DataFrame({"x": ['a', 'b', 'c']}).to_spark()
>>> spark_column_equals(sdf1["x"] + 1, sdf1["x"] + 1)
True
>>> sdf2 = ps.DataFrame({"x": ['a', 'b', 'c']}).to_spark()
>>> spark_column_equals(sdf1["x"] + 1, sdf2["x"] + 1)
False
"""
return left._jc.equals(right._jc) # type: ignore[operator]
def compare_null_first(
left: Column,
right: Column,
comp: Callable[[Column, Column], Column],
) -> Column:
return (left.isNotNull() & right.isNotNull() & comp(left, right)) | (
left.isNull() & right.isNotNull()
)
def compare_null_last(
left: Column,
right: Column,
comp: Callable[[Column, Column], Column],
) -> Column:
return (left.isNotNull() & right.isNotNull() & comp(left, right)) | (
left.isNotNull() & right.isNull()
)
def compare_disallow_null(
left: Column,
right: Column,
comp: Callable[[Column, Column], Column],
) -> Column:
return left.isNotNull() & right.isNotNull() & comp(left, right)
def compare_allow_null(
left: Column,
right: Column,
comp: Callable[[Column, Column], Column],
) -> Column:
return left.isNull() | right.isNull() | comp(left, right)
def log_advice(message: str) -> None:
"""
Display advisory logs for functions to be aware of when using pandas API on Spark
for the existing pandas/PySpark users who may not be familiar with distributed environments
or the behavior of pandas.
"""
warnings.warn(message, PandasAPIOnSparkAdviceWarning)
def _test() -> None:
import os
import doctest
import sys
from pyspark.sql import SparkSession
import pyspark.pandas.utils
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.utils.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]").appName("pyspark.pandas.utils tests").getOrCreate()
)
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.utils,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| |
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from testtools import matchers
from neutron.common import exceptions as exc
import neutron.db.api as db
from neutron.plugins.ml2 import db as ml2_db
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2.drivers import type_gre
from neutron.tests import base
TUNNEL_IP_ONE = "10.10.10.10"
TUNNEL_IP_TWO = "10.10.10.20"
TUN_MIN = 100
TUN_MAX = 109
TUNNEL_RANGES = [(TUN_MIN, TUN_MAX)]
UPDATED_TUNNEL_RANGES = [(TUN_MIN + 5, TUN_MAX + 5)]
class GreTypeTest(base.BaseTestCase):
def setUp(self):
super(GreTypeTest, self).setUp()
ml2_db.initialize()
self.driver = type_gre.GreTypeDriver()
self.driver.gre_id_ranges = TUNNEL_RANGES
self.driver._sync_gre_allocations()
self.session = db.get_session()
self.addCleanup(db.clear_db)
def test_validate_provider_segment(self):
segment = {api.NETWORK_TYPE: 'gre',
api.PHYSICAL_NETWORK: 'phys_net',
api.SEGMENTATION_ID: None}
with testtools.ExpectedException(exc.InvalidInput):
self.driver.validate_provider_segment(segment)
segment[api.PHYSICAL_NETWORK] = None
with testtools.ExpectedException(exc.InvalidInput):
self.driver.validate_provider_segment(segment)
def test_sync_tunnel_allocations(self):
self.assertIsNone(
self.driver.get_gre_allocation(self.session,
(TUN_MIN - 1))
)
self.assertFalse(
self.driver.get_gre_allocation(self.session,
(TUN_MIN)).allocated
)
self.assertFalse(
self.driver.get_gre_allocation(self.session,
(TUN_MIN + 1)).allocated
)
self.assertFalse(
self.driver.get_gre_allocation(self.session,
(TUN_MAX - 1)).allocated
)
self.assertFalse(
self.driver.get_gre_allocation(self.session,
(TUN_MAX)).allocated
)
self.assertIsNone(
self.driver.get_gre_allocation(self.session,
(TUN_MAX + 1))
)
self.driver.gre_id_ranges = UPDATED_TUNNEL_RANGES
self.driver._sync_gre_allocations()
self.assertIsNone(
self.driver.get_gre_allocation(self.session,
(TUN_MIN + 5 - 1))
)
self.assertFalse(
self.driver.get_gre_allocation(self.session,
(TUN_MIN + 5)).allocated
)
self.assertFalse(
self.driver.get_gre_allocation(self.session,
(TUN_MIN + 5 + 1)).allocated
)
self.assertFalse(
self.driver.get_gre_allocation(self.session,
(TUN_MAX + 5 - 1)).allocated
)
self.assertFalse(
self.driver.get_gre_allocation(self.session,
(TUN_MAX + 5)).allocated
)
self.assertIsNone(
self.driver.get_gre_allocation(self.session,
(TUN_MAX + 5 + 1))
)
def test_reserve_provider_segment(self):
segment = {api.NETWORK_TYPE: 'gre',
api.PHYSICAL_NETWORK: 'None',
api.SEGMENTATION_ID: 101}
self.driver.reserve_provider_segment(self.session, segment)
alloc = self.driver.get_gre_allocation(self.session,
segment[api.SEGMENTATION_ID])
self.assertTrue(alloc.allocated)
with testtools.ExpectedException(exc.TunnelIdInUse):
self.driver.reserve_provider_segment(self.session, segment)
self.driver.release_segment(self.session, segment)
alloc = self.driver.get_gre_allocation(self.session,
segment[api.SEGMENTATION_ID])
self.assertFalse(alloc.allocated)
segment[api.SEGMENTATION_ID] = 1000
self.driver.reserve_provider_segment(self.session, segment)
alloc = self.driver.get_gre_allocation(self.session,
segment[api.SEGMENTATION_ID])
self.assertTrue(alloc.allocated)
self.driver.release_segment(self.session, segment)
alloc = self.driver.get_gre_allocation(self.session,
segment[api.SEGMENTATION_ID])
self.assertIsNone(alloc)
def test_allocate_tenant_segment(self):
tunnel_ids = set()
for x in xrange(TUN_MIN, TUN_MAX + 1):
segment = self.driver.allocate_tenant_segment(self.session)
self.assertThat(segment[api.SEGMENTATION_ID],
matchers.GreaterThan(TUN_MIN - 1))
self.assertThat(segment[api.SEGMENTATION_ID],
matchers.LessThan(TUN_MAX + 1))
tunnel_ids.add(segment[api.SEGMENTATION_ID])
segment = self.driver.allocate_tenant_segment(self.session)
self.assertIsNone(segment)
segment = {api.NETWORK_TYPE: 'gre',
api.PHYSICAL_NETWORK: 'None',
api.SEGMENTATION_ID: tunnel_ids.pop()}
self.driver.release_segment(self.session, segment)
segment = self.driver.allocate_tenant_segment(self.session)
self.assertThat(segment[api.SEGMENTATION_ID],
matchers.GreaterThan(TUN_MIN - 1))
self.assertThat(segment[api.SEGMENTATION_ID],
matchers.LessThan(TUN_MAX + 1))
tunnel_ids.add(segment[api.SEGMENTATION_ID])
for tunnel_id in tunnel_ids:
segment[api.SEGMENTATION_ID] = tunnel_id
self.driver.release_segment(self.session, segment)
def test_gre_endpoints(self):
tun_1 = self.driver.add_endpoint(TUNNEL_IP_ONE)
tun_2 = self.driver.add_endpoint(TUNNEL_IP_TWO)
self.assertEqual(TUNNEL_IP_ONE, tun_1.ip_address)
self.assertEqual(TUNNEL_IP_TWO, tun_2.ip_address)
# Get all the endpoints
endpoints = self.driver.get_endpoints()
for endpoint in endpoints:
self.assertIn(endpoint['ip_address'],
[TUNNEL_IP_ONE, TUNNEL_IP_TWO])
class GreTypeMultiRangeTest(base.BaseTestCase):
TUN_MIN0 = 100
TUN_MAX0 = 101
TUN_MIN1 = 200
TUN_MAX1 = 201
TUNNEL_MULTI_RANGES = [(TUN_MIN0, TUN_MAX0), (TUN_MIN1, TUN_MAX1)]
def setUp(self):
super(GreTypeMultiRangeTest, self).setUp()
ml2_db.initialize()
self.driver = type_gre.GreTypeDriver()
self.driver.gre_id_ranges = self.TUNNEL_MULTI_RANGES
self.driver._sync_gre_allocations()
self.session = db.get_session()
self.addCleanup(db.clear_db)
def test_release_segment(self):
segments = [self.driver.allocate_tenant_segment(self.session)
for i in range(4)]
# Release them in random order. No special meaning.
for i in (0, 2, 1, 3):
self.driver.release_segment(self.session, segments[i])
for key in (self.TUN_MIN0, self.TUN_MAX0,
self.TUN_MIN1, self.TUN_MAX1):
alloc = self.driver.get_gre_allocation(self.session, key)
self.assertFalse(alloc.allocated)
| |
#Copyright 2009 Almero Gouws, <14366037@sun.ac.za>
"""
This module supplies functions used to implement graph theory.
"""
__docformat__ = 'restructuredtext'
import numpy as np
from dfs import dfs
from general import find, issubset
def parents(adj_mat, i):
"""
Returns the indices of the parent nodes of the input node, i, in the
given adjacency matrix.
Parameters
----------
adj_mat: Numpy ndarray
Adjacency matrix. If adj_mat[i, j] = 1, there exists a directed
edge from node i to node j.
i: Int
The index of the node whose parents are to be found.
"""
"""Check if this is perhaps a sparse matrix"""
if type(adj_mat) != np.ndarray:
posi = np.array((adj_mat[:, i].todense() == 1)).squeeze()
else:
posi = np.array((adj_mat[:, i] == 1))
vals = []
while np.sum(posi)!=0:
t_pos = np.argmax(posi)
posi[t_pos]=0
vals.append(t_pos)
return np.array(vals,dtype=np.int32)
def children(adj_mat, i):
"""
Returns the indices of the children nodes of the input node, i, in the
given adjacency matrix.
Parameters
----------
adj_mat: Numpy ndarray
Adjacency matrix. If adj_mat[i, j] = 1, there exists a directed
edge from node i to node j.
i: Int
The index of the node whose parents are to be found.
"""
"""Check if this is perhaps a sparse matrix"""
if type(adj_mat) != np.ndarray:
adj_mat = adj_mat.tocsr()
posi = np.array((adj_mat[i, :].todense() == 1)).squeeze()
adj_mat = adj_mat.tocsc()
else:
posi = np.array((adj_mat[i, :] == 1))
vals = []
while np.sum(posi)!=0:
t_pos = np.argmax(posi)
posi[t_pos]=0
vals.append(t_pos)
return vals
def neighbours(adj_mat, i):
"""
Returns the indices of the neighbours nodes of the input node, i, in the
given adjacency matrix.
Parameters
----------
adj_mat: Numpy ndarray
Adjacency matrix. If adj_mat[i, j] = 1, there exists a directed
edge from node i to node j.
i: Int
The index of the node whose parents are to be found.
"""
kids = np.array(children(adj_mat, i))
folks = np.array(parents(adj_mat,i))
if issubset(kids, folks) and issubset(folks, kids):
nbrs = kids
else:
nbrs = np.hstack((kids, folks)).tolist()
return nbrs
def family(adj_mat, i):
"""
Returns the indices of the family nodes of the input node, i, in the
given adjacency matrix.
Parameters
----------
adj_mat: Numpy ndarray
Adjacency matrix. If adj_mat[i, j] = 1, there exists a directed
edge from node i to node j.
i: Int
The index of the node whose parents are to be found.
"""
f = parents(adj_mat, i)
f = np.hstack([f,i])
return f
def topological_sort(A):
"""
Returns the indices of the nodes in the graph defined by the adjacency
matrix A in topological order.
Parameters
----------
A: Scipy sparse csc_matrix
Adjacency matrix. If A[i, j] = 1, there exists a directed edge from
node i to node j.
"""
n = A.shape[0]
indeg = []
zero_indeg = []
for i in range(0,n):
indeg.append(len(parents(A,i)))
if indeg[i] == 0:
zero_indeg.append(i)
zero_indeg.reverse()
t = 1
order = []
while len(zero_indeg)!=0:
v = zero_indeg.pop()
order.append(v)
t = t + 1
cs = children(A, v)
for j in range(0,len(cs)):
c = cs[j]
indeg[c] = indeg[c] - 1
if indeg[c] == 0:
zero_indeg.insert(0,c)
return order
def moralize(G):
"""
Converts a directed graph to an undirected graph, by connecting the
parents of every node together.
Parameters
----------
G: Numpy ndarray
Adjacency matrix. If A[i, j] = 1, there exists a directed edge from
node i to node j.
"""
M = G.copy()
n = M.shape[0]
for i in range(0,n):
fam = family(G,i)
for j in fam:
M[j, fam] = 1
"""Make sure no node has an edge to itself"""
M = setdiag(M, 0)
moral_edges = np.triu(M-G,0)
return [M, moral_edges]
def setdiag(G, val):
"""
Sets the diagonal elements of a matrix to a specified value.
Parameters
----------
G: A 2D matrix or array.
The matrix to modify.
val: Int or float
The value to which the diagonal of 'G' will be set.
"""
n = G.shape[0]
for i in range(0,n):
G[i,i] = val
return G
def graph_to_jtree(model_graph, ns):
"""
This function triangulates a moral graph and obtains a junction tree
from the cliques of the triangulated graph by computing the maximum
spanning tree for those cliques.
Parameters
----------
model_graph: Numpy ndarray
MG[i,j] = 1 iff there is an edge between node i and node j.
ns: List
The node sizes, where ns[i] = the number of discrete values node i
can take on [1 if observed].
Output
------
jtree: Numpy ndarray
A matrix reprsenting the edges in the junction tree. jtree(i,j)=1
iff there is an edge between clique i and clique j.
root: Int
The index of the root clique.
cliques: List
A list of lists of the indices of each clique. cliques[i] = the
indices of the nodes in clique i.
B: Numpy ndarray
A map of which clique each node appears in, B[i,j] = 1 iff node j
occurs in clique i.
w: List
The weights of the cliques, w[i] = weight of clique i.
"""
"""Make sure that no node has a edge connecting to itself."""
model_graph = setdiag(model_graph, 0)
"""Determine the elimination order"""
elim_order = best_first_elim_order(model_graph.copy(), ns)
"""
Using the elimination order and the moral graph, obtain the new cliques
using triangulation.
"""
[triangulated_graph, cliques] = triangulate(model_graph.copy(), elim_order)
"""
Obtain a junction tree from the set of cliques.
"""
[jtree, root, B, w] = cliques_to_jtree(cliques, ns)
return [jtree, root, cliques, B, w]
def best_first_elim_order(G, node_sizes, stage=[]):
"""
This function greedily searches for an optimal elimination order.
Find an order in which to eliminate nodes from the graph in such a way
as to try and minimize the weight of the resulting triangulated graph.
The weight of a graph is the sum of the weights of each of its cliques;
the weight of a clique is the product of the weights of each of its
members; the weight of a node is the number of values it can take on.
Since this is an NP-hard problem, we use the following greedy heuristic:
At each step, eliminate that node which will result in the addition of
the least number of fill-in edges, breaking ties by choosing the node
that induces the lighest clique.
For details, see
- Kjaerulff, "Triangulation of graphs -- algorithms giving small total
state space", Univ. Aalborg tech report, 1990 (www.cs.auc.dk/~uk)
- C. Huang and A. Darwiche, "Inference in Belief Networks: A procedural
guide", Intl. J. Approx. Reasoning, 11, 1994
Parameters
----------
G: Numpy ndarray
G[i,j] = 1 iff there is an edge between node i and node j.
node_sizes: List
The node sizes, where ns[i] = the number of discrete values
node i can take on [1 if observed].
stage: List
stage[i] is a list of the nodes that must be eliminated at i'th
stage.
"""
"""Obtain the number of nodes in the graph"""
n = G.shape[0]
if stage == []:
stage = [range(0, n)]
MG = G.copy()
uneliminated = np.ones((1, n))
order = np.zeros((1, n))
t = 0
"""For each node in the graph"""
for i in range(0, n):
"""Find the indices of the unelminated elements"""
U = find(uneliminated == 1)
"""Find which nodes can be removed in this stage."""
valid = np.intersect1d_nu(np.array(U), np.array([stage[t]]))
"""
Determine which of the valid nodes will add the least number of fill in
edges once eliminated. If 2 or more nodes add the same least number of
fill in edges, then choose the one that results in the lightest clique.
"""
min_fill = np.zeros((1, len(valid)))
min_weight = np.zeros((1, len(valid)))
"""For each node that is valid for elimination"""
for j in range(0, len(valid)):
k = valid[j]
"""Obtain the uneliminated neighbours of the node to be eliminated"""
nbrs = neighbours(G, k)
nbrs = np.intersect1d_nu(np.array([nbrs]), np.array(U))
l = len(nbrs)
M = np.zeros((l, l))
count = 0
for x in nbrs:
for y in range(0, len(nbrs)):
M[count, y] = MG[x, nbrs[y]]
count = count + 1
"""Save the number of fill-in edges required to eliminate node j"""
min_fill[0, j] = l**2 - np.sum(M)
nbrs = nbrs.tolist()
nbrs.insert(0, k)
"""Save the clique weight obtained by eliminating node j"""
min_weight[0, j] = np.prod(node_sizes[nbrs])
"""Determine which of the nodes create the lightest clique."""
lightest_nbrs = find(min_weight == np.min(min_weight))
"""
Determine which of nodes found in the step above, require the least
number of fill-in edges to eliminate.
"""
best_nbr_ndx = np.argmin(min_fill[0, lightest_nbrs.tolist()])
j = lightest_nbrs[0, best_nbr_ndx]
"""
Determine which of the nodes found in the step above are valid for
elimination, these are the nodes to be eliminated.
"""
k = valid[j]
uneliminated[0, k] = 0
"""Add the nodes to be eliminated to the elimination order"""
order[0, i] = k
"""Determine the nieghbours of the nodes to be eliminated"""
ns = neighbours(G, k)
ns = np.intersect1d_nu(np.array([ns]), np.array(U))
"""Eliminate the nodes"""
if len(ns) != 0:
for x in ns:
for y in ns:
G[x, y] = 1
G = setdiag(G, 0)
"""
If all the nodes valid for elimination in this stage have been
eliminated, then advance to the next stage.
"""
if np.sum(np.abs(uneliminated[0, stage[t]])) == 0:
t = t + 1
return order
def triangulate(G, order):
"""
This function ensures that the input graph is triangulated (chordal),
i.e., every cycle of length > 3 has a chord. To find the maximal
cliques, we save each induced cluster (created by adding connecting
neighbors) that is not a subset of any previously saved cluster. (A
cluster is a complete, but not necessarily maximal, set of nodes.)
Parameters
----------
G: Numpy ndarray
G[i,j] = 1 iff there is an edge between node i and node j.
order: List
The order in which to eliminate the nodes.
"""
MG = G.copy()
"""Obtain the the number of nodes in the graph"""
n = G.shape[0]
eliminated = np.zeros((1,n))
cliques = []
for i in range(0,n):
"""Obtain the index of the next node to be eliminated"""
u = order[0,i]
U = find(eliminated == 0)
nodes = np.intersect1d_nu(neighbours(G, u), U)
nodes = np.union1d(nodes, np.array([u]))
"""
Connect all uneliminated neighbours of the node to be eliminated
together.
"""
for i in nodes:
for j in nodes:
G[i, j] = 1
G = setdiag(G, 0)
"""Mark the node as 'eliminated'"""
eliminated[0, u] = 1
"""
If the generated clique is a subset of an existing clique, then it is
not a maximal clique, so it is excluded from the list if cliques.
"""
exclude = False
for c in range(0, len(cliques)):
if issubset(nodes, np.array(cliques[c])):
exclude = True
break
if not exclude:
cliques.append(nodes)
return [G, cliques]
def cliques_to_jtree(cliques, ns):
"""
This function produces an optimal junction tree from a set of cliques.
A junction tree is a tree that satisfies the jtree property, which says:
for each pair of cliques U, V with intersection S, all cliques on the
path between U and V contain S. (This ensures that local propagation
leads to global consistency.)
The best jtree is the maximal spanning tree which minimizes the sum of
the costs on each edge. The cost on an edge connecting cliques i and j,
is the weight of the seperator set between the two cliques, defined as
the intersection between cliques i and j.
Therefore, to determine the cost of an edge connecting 2 cliques:
C[i] = clique i, and
C[j] = clique j,
S[i, j] = Intersection(C[i], C[j]), is the seperator set between i
and j,
w[S[i, j]]= weight of the seperator set, which is the product of the
weights of each node in S, where the weight of a node is the number of
values that node can take on. Therefore the cost of an edge connecting
clique i and clique j is: cost[i, j] = W[S[i, j]].
For details, see
- Jensen and Jensen, "Optimal Junction Trees", UAI 94.
Parameters
----------
cliques: List
cliques[i] contains the indices of the nodes in clique i.
ns: List
The node sizes, ns[i] is the number of values node i can take on.
Ouput
-----
jtree: Numpy ndarray
A matrix reprsenting the edges in the junction tree. jtree(i,j)=1
iff there is an edge between clique i and clique j.
root: Int
The index of the root clique.
cliques: List
A list of lists of the indices of the nodes in each clique. cliques[i] =
the indices of the nodes in clique i.
B: Numpy ndarray
A map of which clique each node appears in, B[i,j] = 1 iff node j
occurs in clique i.
w: List
The weights of the cliques, w[i] = weight of clique i.
"""
num_cliques = len(cliques)
w = np.zeros((num_cliques, 1))
B = np.zeros((num_cliques, ns.size))
for i in range(0, num_cliques):
B[i, cliques[i].tolist()] = 1
w[i] = np.prod(ns[cliques[i].tolist()])
C1 = np.mat(B) * np.mat(B).T
C1 = setdiag(C1, 0)
W = np.repeat(w, num_cliques, 1)
C2 = W + np.mat(W).T
C2 = setdiag(C2, 0)
"""Using -C1 gives us the maximum spanning tree"""
jtree = minimum_spanning_tree(-1*C1, C2)
return [jtree, num_cliques, B, w]
def minimum_spanning_tree(C1, C2):
"""
This function finds the minimum spanning tree using Prim's algorithm.
We assume that absent edges have 0 cost. To find the maximum spanning
tree, use -1*C.
We partition the nodes into those in U and those not in U.
closest[i] is the vertex in U that is closest to i in V-U.
lowcost[i] is the cost of the edge [i, closest[i]], or infinity if i has
been used.
For details see
- Aho, Hopcroft & Ullman 1983, "Data structures and algorithms",
p 237.
Parameters
----------
C1: Numpy matrix
C1[i,j] is the primary cost of connecting i to j.
C2: Numpy matrix
C2[i,j] is the (optional) secondary cost of connecting i to j, used
to break ties.
"""
n = C1.shape[0]
A = np.zeros((n,n))
closest = np.zeros((1,n))
used = np.zeros((1,n))
used[0,0] = 1
C1 = C1 + np.nan_to_num((C1 == 0) * np.Inf )
C2 = C2 + np.nan_to_num((C2 == 0) * np.Inf )
lowcost1 = C1[0,:]
lowcost2 = C2[0,:]
for i in range(1,n):
ks = find(np.array(lowcost1) == np.min(lowcost1))
k = ks[0, np.argmin(lowcost2[0, ks])]
A[k, closest[0,k]] = 1
A[closest[0,k], k] = 1
lowcost1[0,k] = np.nan_to_num(np.Inf)
lowcost2[0,k] = np.nan_to_num(np.Inf)
used[0,k] = 1
NU = find(used == 0)
for ji in range(0, NU.shape[1]):
j = NU[0, ji]
if C1[k, j] < lowcost1[0, j]:
lowcost1[0, j] = float(C1[k, j])
lowcost2[0, j] = float(C2[k, j])
closest[0, j] = float(k)
return A
def mk_rooted_tree(G, root):
"""
This function reproduces G as a directed tree pointing away from the
root.
Parameters
----------
G: Numpy ndarray
G[i,j] = 1 iff there is an edge between node i and node j.
root: Int
The index of the root node.
Output
------
T: Numpy ndarray
The rooted tree, T[i,j] = 1 iff there is an edge between node i and
node j.
pre: List
The pre visting order.
post: List
The post visting order.
cycle: Int
Equals 1 if there is a cycle in the rooted tree.
"""
n = G.shape[0]
T = np.zeros((n, n))
directed = 0
"""Preform depth first search"""
searched = dfs(G, root, directed)
for i in range(0, searched.pred.shape[1]):
if searched.pred[0, i]!=-1:
T[searched.pred[0, i], i] = 1
return [T, searched.pre, searched.post, searched.cycle]
| |
# -*- coding: utf-8 -*-
import json
import re
class InvalidExpressionError(Exception):
""" Generic Invalid Expression Error """
class DataObject(object):
@property
def in_json(self):
raise RuntimeError('Unknown data object')
def __str__(self):
return str(self.in_json)
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, self.__str__())
class ExpressionOperand(object):
OP_EQ = '='
OP_NE = '!='
OP_GE = '>='
OP_GT = '>'
OP_LE = '<='
OP_LT = '<'
OP_IN = 'in'
OP_NOT_IN = 'not in'
OP_SQL_LIKE = 'like'
OP_REGEXP_LIKE = 'rlike'
OP_INDEX_SEARCH = 'indexed with'
class ExpressionType(object):
IS_PARAMETER = 'param'
IS_PROPERTY_PATH = 'path'
IS_DATA = 'data'
class Expression(DataObject):
""" Query Expression
:param passerine.db.expression.ExpressionPart left: the left part
:param passerine.db.expression.ExpressionPart right: the right part
:param str operand: the generic operand
"""
def __init__(self, left, operand, right):
self.left = left
self.operand = operand
self.right = right
@property
def in_json(self):
return {
'left': self.left,
'right': self.right,
'operand': self.operand
}
def __str__(self):
return '{left} {operand} {right}'.format(
left = self.left.original,
right = self.right.original,
operand = self.operand
)
class ExpressionPart(DataObject):
""" Query Expression
:param str original: the original query
:param str kind: the type of the part
:param value: the parameter value only for a data part
:param str alias: the entity alias for a property part or the name of the parameter of a parameter part
"""
def __init__(self, original, kind, value, alias):
self.original = original
self.kind = kind
self.value = value
self.alias = alias
@property
def in_json(self):
return {
'original': self.original,
'kind': self.kind,
'value': self.value,
'alias': self.alias
}
class ExpressionSet(DataObject):
""" Representation of Analyzed Expression """
def __init__(self, expressions):
self.properties = {}
self.parameters = []
self.expressions = expressions
@property
def in_json(self):
return {
'properties': self.properties,
'parameters': self.parameters,
'expressions': self.expressions
}
class Criteria(object):
""" Expression Criteria
Support operands: =, <=, <, >, >=, in, like (SQL-like string pattern), rlike (Regular-expression pattern), indexed with (only for Riak)
"""
def __init__(self):
self._is_updated = False
self._sub_expressions = []
self._analyzed_map = None
self._re_parameter = re.compile('^:[a-zA-Z0-9_]+$')
self._re_root_path = re.compile('^[a-zA-Z][a-zA-Z0-9_]*$')
self._re_property_path = re.compile('^[a-zA-Z][a-zA-Z0-9_]*(\.[a-zA-Z][a-zA-Z0-9_]*)+$')
self._re_statement = re.compile(
'^\s*(?P<left>.+)\s+(?P<operand>{eq}|{ne}|{ge}|{gt}|{le}|{lt}|{xin}|{xnin}|{like}|{rlike}|{indexed})\s+(?P<right>.+)\s*$'.format(
eq = ExpressionOperand.OP_EQ,
ne = ExpressionOperand.OP_NE,
ge = ExpressionOperand.OP_GE,
gt = ExpressionOperand.OP_GT,
le = ExpressionOperand.OP_LE,
lt = ExpressionOperand.OP_LT,
xin = ExpressionOperand.OP_IN,
xnin = ExpressionOperand.OP_NOT_IN,
like = ExpressionOperand.OP_SQL_LIKE,
rlike = ExpressionOperand.OP_REGEXP_LIKE,
indexed = ExpressionOperand.OP_INDEX_SEARCH
),
re.IGNORECASE
)
self._re_property_path_delimiter = re.compile('\.')
def get_analyzed_version(self):
if self._is_updated:
return self._analyzed_map
analyzed_expression = ExpressionSet(self._sub_expressions)
# Scan for all property paths and parameters.
for se in self._sub_expressions:
self._scan_for_property_paths_and_parameters(analyzed_expression, se.left)
self._scan_for_property_paths_and_parameters(analyzed_expression, se.right)
if not analyzed_expression.properties:
raise InvalidExpressionError('There must be at least one property path. It is prone to query injection.')
self._analyzed_map = analyzed_expression
return self._analyzed_map
def _scan_for_property_paths_and_parameters(self, analyzed_expression, sub_expression_part):
""" Search for all property paths and parameters.
"""
# Search for all referred property paths.
if sub_expression_part.kind == ExpressionType.IS_PROPERTY_PATH:
property_path = sub_expression_part.original
analyzed_expression.properties[property_path] = None
# Search for all referred property paths.
if sub_expression_part.kind == ExpressionType.IS_PARAMETER:
parameter_name = sub_expression_part.original[1:]
analyzed_expression.parameters.append(parameter_name)
def expect(self, statement):
self._is_updated = False
expr = self._compile(statement)
self._sub_expressions.append(expr)
@property
def _fixed_syntax_operands(self):
return ('in', 'like', 'rlike', 'indexed with')
def _compile(self, statement):
fixed_syntax_operands = self._fixed_syntax_operands
expr = self._parse(statement)
try:
expr.left = self._parse_side(expr.left)
except InvalidExpressionError as exception:
raise InvalidExpressionError('The left side of the expression cannot be parsed.')
try:
expr.right = self._parse_side(expr.right)
except InvalidExpressionError as exception:
raise InvalidExpressionError('The left side of the expression cannot be parsed.')
# Validate the syntax on the fixed syntaxes.
if expr.operand in fixed_syntax_operands:
if expr.left.kind != ExpressionType.IS_PROPERTY_PATH:
raise InvalidExpressionError('The property path must be on the left of the operand.')
if expr.right.kind == ExpressionType.IS_PROPERTY_PATH:
raise InvalidExpressionError('The property path cannot be on the right of the operand.')
# If the left side refers to the root path but not for the index search,
# the method will raise the invalid expression error.
if expr.left.kind == ExpressionType.IS_PROPERTY_PATH \
and '.' not in expr.left.original \
and expr.operand != ExpressionOperand.OP_INDEX_SEARCH:
raise InvalidExpressionError('The property path to the root entity can only be used by index search.')
# The property path must be in the expression.
if expr.left.kind != ExpressionType.IS_PROPERTY_PATH and expr.right.kind != ExpressionType.IS_PROPERTY_PATH:
raise InvalidExpressionError('The property path must be in the expression.')
return expr
def _parse_side(self, sub_statement):
kind = ExpressionType.IS_DATA
if self._re_parameter.match(sub_statement):
kind = ExpressionType.IS_PARAMETER
elif self._re_property_path.match(sub_statement) or self._re_root_path.match(sub_statement):
kind = ExpressionType.IS_PROPERTY_PATH
if kind != ExpressionType.IS_DATA:
alias = self._re_property_path_delimiter.split(sub_statement)[0]\
if self._re_property_path_delimiter.search(sub_statement) \
else sub_statement[1:]
return self._create_expression_part({
'original': sub_statement,
'kind': kind,
'value': None,
'alias': alias
})
decoded_data = None
try:
decoded_data = json.loads(sub_statement)
except ValueError as exception:
raise InvalidExpressionError('Unable to decode the data.')
return self._create_expression_part({
'original': sub_statement,
'kind': kind,
'value': decoded_data,
'alias': None
})
def _create_expression_part(self, parameters):
return ExpressionPart(**parameters)
def _parse(self, statement):
matches = self._re_statement.match(statement)
if not matches:
raise InvalidExpressionError('Incomplete statement: {}'.format(statement))
raw_expr = matches.groupdict()
expression = Expression(**raw_expr)
return expression
| |
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Hoverlabel(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "sunburst"
_path_str = "sunburst.hoverlabel"
_valid_props = {
"align",
"alignsrc",
"bgcolor",
"bgcolorsrc",
"bordercolor",
"bordercolorsrc",
"font",
"namelength",
"namelengthsrc",
}
# align
# -----
@property
def align(self):
"""
Sets the horizontal alignment of the text content within hover
label box. Has an effect only if the hover label text spans
more two or more lines
The 'align' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'right', 'auto']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["align"]
@align.setter
def align(self, val):
self["align"] = val
# alignsrc
# --------
@property
def alignsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `align`.
The 'alignsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["alignsrc"]
@alignsrc.setter
def alignsrc(self, val):
self["alignsrc"] = val
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the background color of the hover labels for this trace
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# bgcolorsrc
# ----------
@property
def bgcolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `bgcolor`.
The 'bgcolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bgcolorsrc"]
@bgcolorsrc.setter
def bgcolorsrc(self, val):
self["bgcolorsrc"] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the border color of the hover labels for this trace.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
# bordercolorsrc
# --------------
@property
def bordercolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
The 'bordercolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bordercolorsrc"]
@bordercolorsrc.setter
def bordercolorsrc(self, val):
self["bordercolorsrc"] = val
# font
# ----
@property
def font(self):
"""
Sets the font used in hover labels.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.sunburst.hoverlabel.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
colorsrc
Sets the source reference on Chart Studio Cloud
for `color`.
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for `family`.
size
sizesrc
Sets the source reference on Chart Studio Cloud
for `size`.
Returns
-------
plotly.graph_objs.sunburst.hoverlabel.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# namelength
# ----------
@property
def namelength(self):
"""
Sets the default length (in number of characters) of the trace
name in the hover labels for all traces. -1 shows the whole
name regardless of length. 0-3 shows the first 0-3 characters,
and an integer >3 will show the whole name if it is less than
that many characters, but if it is longer, will truncate to
`namelength - 3` characters and add an ellipsis.
The 'namelength' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [-1, 9223372036854775807]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["namelength"]
@namelength.setter
def namelength(self, val):
self["namelength"] = val
# namelengthsrc
# -------------
@property
def namelengthsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`namelength`.
The 'namelengthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["namelengthsrc"]
@namelengthsrc.setter
def namelengthsrc(self, val):
self["namelengthsrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
`align`.
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
`namelength`.
"""
def __init__(
self,
arg=None,
align=None,
alignsrc=None,
bgcolor=None,
bgcolorsrc=None,
bordercolor=None,
bordercolorsrc=None,
font=None,
namelength=None,
namelengthsrc=None,
**kwargs
):
"""
Construct a new Hoverlabel object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.sunburst.Hoverlabel`
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
`align`.
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
`namelength`.
Returns
-------
Hoverlabel
"""
super(Hoverlabel, self).__init__("hoverlabel")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.sunburst.Hoverlabel
constructor must be a dict or
an instance of :class:`plotly.graph_objs.sunburst.Hoverlabel`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("align", None)
_v = align if align is not None else _v
if _v is not None:
self["align"] = _v
_v = arg.pop("alignsrc", None)
_v = alignsrc if alignsrc is not None else _v
if _v is not None:
self["alignsrc"] = _v
_v = arg.pop("bgcolor", None)
_v = bgcolor if bgcolor is not None else _v
if _v is not None:
self["bgcolor"] = _v
_v = arg.pop("bgcolorsrc", None)
_v = bgcolorsrc if bgcolorsrc is not None else _v
if _v is not None:
self["bgcolorsrc"] = _v
_v = arg.pop("bordercolor", None)
_v = bordercolor if bordercolor is not None else _v
if _v is not None:
self["bordercolor"] = _v
_v = arg.pop("bordercolorsrc", None)
_v = bordercolorsrc if bordercolorsrc is not None else _v
if _v is not None:
self["bordercolorsrc"] = _v
_v = arg.pop("font", None)
_v = font if font is not None else _v
if _v is not None:
self["font"] = _v
_v = arg.pop("namelength", None)
_v = namelength if namelength is not None else _v
if _v is not None:
self["namelength"] = _v
_v = arg.pop("namelengthsrc", None)
_v = namelengthsrc if namelengthsrc is not None else _v
if _v is not None:
self["namelengthsrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| |
import numpy as np
import pandas.util.testing as tm
from pandas import (Series, date_range, DatetimeIndex, Index, RangeIndex,
Float64Index, IntervalIndex)
class SetOperations:
params = (['datetime', 'date_string', 'int', 'strings'],
['intersection', 'union', 'symmetric_difference'])
param_names = ['dtype', 'method']
def setup(self, dtype, method):
N = 10**5
dates_left = date_range('1/1/2000', periods=N, freq='T')
fmt = '%Y-%m-%d %H:%M:%S'
date_str_left = Index(dates_left.strftime(fmt))
int_left = Index(np.arange(N))
str_left = tm.makeStringIndex(N)
data = {'datetime': {'left': dates_left, 'right': dates_left[:-1]},
'date_string': {'left': date_str_left,
'right': date_str_left[:-1]},
'int': {'left': int_left, 'right': int_left[:-1]},
'strings': {'left': str_left, 'right': str_left[:-1]}}
self.left = data[dtype]['left']
self.right = data[dtype]['right']
def time_operation(self, dtype, method):
getattr(self.left, method)(self.right)
class SetDisjoint:
def setup(self):
N = 10**5
B = N + 20000
self.datetime_left = DatetimeIndex(range(N))
self.datetime_right = DatetimeIndex(range(N, B))
def time_datetime_difference_disjoint(self):
self.datetime_left.difference(self.datetime_right)
class Datetime:
def setup(self):
self.dr = date_range('20000101', freq='D', periods=10000)
def time_is_dates_only(self):
self.dr._is_dates_only
class Ops:
params = ['float', 'int']
param_names = ['dtype']
def setup(self, dtype):
N = 10**6
indexes = {'int': 'makeIntIndex', 'float': 'makeFloatIndex'}
self.index = getattr(tm, indexes[dtype])(N)
def time_add(self, dtype):
self.index + 2
def time_subtract(self, dtype):
self.index - 2
def time_multiply(self, dtype):
self.index * 2
def time_divide(self, dtype):
self.index / 2
def time_modulo(self, dtype):
self.index % 2
class Range:
def setup(self):
self.idx_inc = RangeIndex(start=0, stop=10**7, step=3)
self.idx_dec = RangeIndex(start=10**7, stop=-1, step=-3)
def time_max(self):
self.idx_inc.max()
def time_max_trivial(self):
self.idx_dec.max()
def time_min(self):
self.idx_dec.min()
def time_min_trivial(self):
self.idx_inc.min()
def time_get_loc_inc(self):
self.idx_inc.get_loc(900000)
def time_get_loc_dec(self):
self.idx_dec.get_loc(100000)
class IndexAppend:
def setup(self):
N = 10000
self.range_idx = RangeIndex(0, 100)
self.int_idx = self.range_idx.astype(int)
self.obj_idx = self.int_idx.astype(str)
self.range_idxs = []
self.int_idxs = []
self.object_idxs = []
for i in range(1, N):
r_idx = RangeIndex(i * 100, (i + 1) * 100)
self.range_idxs.append(r_idx)
i_idx = r_idx.astype(int)
self.int_idxs.append(i_idx)
o_idx = i_idx.astype(str)
self.object_idxs.append(o_idx)
def time_append_range_list(self):
self.range_idx.append(self.range_idxs)
def time_append_int_list(self):
self.int_idx.append(self.int_idxs)
def time_append_obj_list(self):
self.obj_idx.append(self.object_idxs)
class Indexing:
params = ['String', 'Float', 'Int']
param_names = ['dtype']
def setup(self, dtype):
N = 10**6
self.idx = getattr(tm, 'make{}Index'.format(dtype))(N)
self.array_mask = (np.arange(N) % 3) == 0
self.series_mask = Series(self.array_mask)
self.sorted = self.idx.sort_values()
half = N // 2
self.non_unique = self.idx[:half].append(self.idx[:half])
self.non_unique_sorted = (self.sorted[:half].append(self.sorted[:half])
.sort_values())
self.key = self.sorted[N // 4]
def time_boolean_array(self, dtype):
self.idx[self.array_mask]
def time_boolean_series(self, dtype):
self.idx[self.series_mask]
def time_get(self, dtype):
self.idx[1]
def time_slice(self, dtype):
self.idx[:-1]
def time_slice_step(self, dtype):
self.idx[::2]
def time_get_loc(self, dtype):
self.idx.get_loc(self.key)
def time_get_loc_sorted(self, dtype):
self.sorted.get_loc(self.key)
def time_get_loc_non_unique(self, dtype):
self.non_unique.get_loc(self.key)
def time_get_loc_non_unique_sorted(self, dtype):
self.non_unique_sorted.get_loc(self.key)
class Float64IndexMethod:
# GH 13166
def setup(self):
N = 100000
a = np.arange(N)
self.ind = Float64Index(a * 4.8000000418824129e-08)
def time_get_loc(self):
self.ind.get_loc(0)
class IntervalIndexMethod:
# GH 24813
params = [10**3, 10**5]
def setup(self, N):
left = np.append(np.arange(N), np.array(0))
right = np.append(np.arange(1, N + 1), np.array(1))
self.intv = IntervalIndex.from_arrays(left, right)
self.intv._engine
self.intv2 = IntervalIndex.from_arrays(left + 1, right + 1)
self.intv2._engine
self.left = IntervalIndex.from_breaks(np.arange(N))
self.right = IntervalIndex.from_breaks(np.arange(N - 3, 2 * N - 3))
def time_monotonic_inc(self, N):
self.intv.is_monotonic_increasing
def time_is_unique(self, N):
self.intv.is_unique
def time_intersection(self, N):
self.left.intersection(self.right)
def time_intersection_one_duplicate(self, N):
self.intv.intersection(self.right)
def time_intersection_both_duplicate(self, N):
self.intv.intersection(self.intv2)
from .pandas_vb_common import setup # noqa: F401
| |
import json
from sfsimodels.models import soils, buildings, foundations, systems, abstract_models, loads, materials, sections, hazards
from collections import OrderedDict
from sfsimodels.functions import add_to_obj
from sfsimodels.exceptions import deprecation, ModelError
from sfsimodels.__about__ import __version__
import numpy as np
from inspect import signature
standard_types = ["soil", "soil_profile", "foundation", "building", "section", "system", "custom_type"]
def _json_default(o):
"""Converts numpy types to json serialisable python types"""
if isinstance(o, np.int64):
return int(o)
raise TypeError
def load_json(ffp, custom=None, default_to_base=False, verbose=0):
"""
Given a json file it creates a dictionary of sfsi objects
:param ffp: str, Full file path to json file
:param custom: dict, used to load custom objects, {model type: custom object}
:param verbose: int, console output
:return: dict
"""
data = json.load(open(ffp))
return ecp_dict_to_objects(data, custom, default_to_base=default_to_base, verbose=verbose)
def load_json_and_meta(ffp, custom=None, verbose=0):
data = json.load(open(ffp))
md = {}
for item in data:
if item != "models":
md[item] = data[item]
return ecp_dict_to_objects(data, custom, verbose=verbose), md
def loads_json(p_str, custom=None, meta=False, verbose=0):
"""
Given a json string it creates a dictionary of sfsi objects
:param ffp: str, Full file path to json file
:param custom: dict, used to load custom objects, {model type: custom object}
:param meta: bool, if true then also return all ecp meta data in separate dict
:param verbose: int, console output
:return: dict
"""
data = json.loads(p_str)
if meta:
md = {}
for item in data:
if item != "models":
md[item] = data[item]
return ecp_dict_to_objects(data, custom, verbose=verbose), md
else:
return ecp_dict_to_objects(data, custom, verbose=verbose)
def get_matching_args_and_kwargs(in_dict, sm_obj, custom=None, overrides=None):
if custom is None:
custom = {}
if overrides is None:
overrides = {}
sig = signature(sm_obj)
kwargs = OrderedDict()
args = []
missing = []
sig_vals = sig.parameters.values()
for p in sig_vals:
if p.name in custom:
pname = custom[p.name]
else:
pname = p.name
if pname == 'kwargs':
continue
if pname in overrides:
val = overrides[pname]
else:
try:
val = in_dict[pname]
except KeyError as e:
if p.default == p.empty:
missing.append((pname, len(args)))
val = None # needs to be replaced
else:
val = p.default
if p.default == p.empty:
args.append(val)
else:
if val is not None:
kwargs[p.name] = val
return args, kwargs, missing
# Deprecated name
def dicts_to_objects(data, verbose=0):
"""Deprecated. Use ecp_dict_to_objects"""
deprecation('Deprecated, dicts_to_objects should be switched to ecp_dict_to_objects')
ecp_dict_to_objects(data, verbose=verbose)
deprecated_types = OrderedDict([
("structure", "sdof"),
("frame_building", "building_frame"),
("frame_building_2D", "building_frame2D"),
("wall_building", "building_wall"),
("pad_foundation", "foundation_pad"),
("raft_foundation", "foundation_raft")
])
def get_std_obj_map():
obj_map = {
"soil-soil": soils.Soil,
"soil-critical_soil": soils.CriticalSoil,
"soil-soil_critical": soils.CriticalSoil, # deprecated type
"soil-stress_dependent_soil": soils.StressDependentSoil,
"soil-soil_stress_dependent": soils.StressDependentSoil,
"soil_profile-soil_profile": soils.SoilProfile,
"beam_column_element-beam_column_element": buildings.BeamColumnElement,
"beam_column_element-wall_element": buildings.WallElement,
"section-section": sections.Section,
"section-rc_beam_section": sections.RCBeamSection,
"building-building": buildings.Building,
"building-null_building": buildings.NullBuilding,
"building-frame_building": buildings.FrameBuilding,
"building-frame_building2D": buildings.FrameBuilding2D,
"building-building_frame2D": buildings.FrameBuilding2D, # deprecated type
"building-wall_building": buildings.WallBuilding,
"building-building_wall": buildings.WallBuilding, # deprecated type
"building-single_wall": buildings.SingleWall,
"building-structure": buildings.SDOFBuilding, # Deprecated type, remove in v1
"building-sdof": buildings.SDOFBuilding,
"foundation-foundation": foundations.Foundation,
"foundation-foundation_raft": foundations.RaftFoundation, # deprecated type
"foundation-raft_foundation": foundations.RaftFoundation,
"foundation-raft": foundations.RaftFoundation, # Deprecated approach for type, remove in v1
"foundation-pad_foundation": foundations.PadFoundation,
"foundation-foundation_pad": foundations.PadFoundation, # deprecated type
"foundation-pad_footing": foundations.PadFooting,
"foundation-strip_foundation": foundations.StripFoundation,
"custom_object-custom_object": abstract_models.CustomObject,
"system-system": systems.SoilStructureSystem, # deprecated type
"system-sfs": systems.SoilStructureSystem,
"system-two_d_system": systems.TwoDSystem,
"load-load": loads.Load,
"load-load_at_coords": loads.LoadAtCoords,
"material-rc_material": materials.ReinforcedConcreteMaterial,
'hazard-seismic_hazard': hazards.SeismicHazard,
}
return obj_map
def ecp_dict_to_objects(ecp_dict, custom_map=None, default_to_base=False, verbose=0):
"""
Given an ecp dictionary, build a dictionary of sfsi objects
:param ecp_dict: dict, engineering consistency project dictionary
:param custom: dict, used to load custom objects, {model type: custom object}
:param verbose: int, console output
:return: dict
"""
if custom_map is None:
custom_map = {}
obj_map = get_std_obj_map()
# merge and overwrite the object map with custom maps
# for item in custom_map:
# obj_map[item] = custom_map[item]
obj_map = {**obj_map, **custom_map}
data_models = ecp_dict["models"]
exception_list = []
objs = OrderedDict()
collected = set([])
# Set base type properly
mtypes = list(data_models)
for mtype in mtypes:
base_type = mtype
if base_type[:-1] in standard_types: # support the loading of old plural based ecp files
base_type = base_type[:-1]
data_models[base_type] = data_models[mtype]
del data_models[mtype]
for m_id in data_models[base_type]:
data_models[base_type][m_id]["base_type"] = base_type
load_later = {}
for mtype in data_models:
base_type = mtype
if base_type in exception_list:
continue
collected.add(base_type)
objs[base_type] = OrderedDict()
for m_id in data_models[mtype]:
obj = data_models[mtype][m_id]
if "type" not in obj:
obj["type"] = base_type
try:
obj_class = obj_map["%s-%s" % (base_type, obj["type"])]
except KeyError:
if default_to_base and f'{base_type}-{base_type}' in obj_map:
obj_class = obj_map[f'{base_type}-{base_type}']
elif obj["type"] in deprecated_types:
try:
obj_class = obj_map["%s-%s" % (base_type, deprecated_types[obj["type"]])]
except KeyError:
raise KeyError("Map for Model: '%s' index: '%s' and type: '%s' not available, "
"add '%s-%s' to custom dict" % (base_type, m_id, base_type, base_type, obj["type"]))
else:
raise KeyError("Map for Model: '%s' index: '%s' and type: '%s' not available, "
"add '%s-%s' to custom dict" % (base_type, m_id, base_type, base_type, obj["type"]))
# try:
args, kwargs, missing = get_matching_args_and_kwargs(data_models[mtype][m_id], obj_class)
if len(missing):
for m_item in missing:
name = m_item[0]
m_indy = m_item[1]
if name == 'n_storeys':
args[m_indy] = len(data_models[mtype][m_id]["storey_masses"])
elif name == 'n_bays':
args[m_indy] = len(data_models[mtype][m_id]["bay_lengths"])
new_instance = obj_class(*args, **kwargs)
# add_to_obj(new_instance, data_models[mtype][m_id], objs=objs, verbose=verbose)
try:
add_to_obj(new_instance, data_models[mtype][m_id], objs=objs, verbose=verbose)
except KeyError as e:
if hasattr(new_instance, 'loading_pre_reqs'):
if new_instance.base_type not in load_later:
load_later[new_instance.base_type] = []
load_later[new_instance.base_type].append([new_instance, data_models[mtype][m_id], verbose])
continue
else:
raise KeyError(e)
# print(mtype, m_id)
objs[base_type][int(data_models[mtype][m_id]["id"])] = new_instance
ll_types = list(load_later)
now_loaded = []
for ll_type in ll_types:
if ll_type not in now_loaded:
load_last_objects(objs, load_later, ll_type, now_loaded)
# Deal with all the exceptions
# for mtype in data_models:
# base_type = mtype
#
# if base_type in collected:
# continue
# if base_type not in objs:
# objs[base_type] = OrderedDict()
all_bts = list(objs)
for base_type in all_bts: # Support for old style ecp file
if base_type in standard_types:
objs[base_type + "s"] = objs[base_type]
return objs
def load_last_objects(objs, load_later, ll_type, now_loaded):
# if ll_type not in load_later:
ll_objs = load_later[ll_type]
for obj_pms in ll_objs:
for pre_req in obj_pms[0].loading_pre_reqs:
if pre_req in load_later and pre_req not in now_loaded:
load_last_objects(objs, load_later, pre_req, now_loaded) # could get into infinite loop if prereqs dependent on each other
add_to_obj(obj_pms[0], obj_pms[1], objs=objs, verbose=obj_pms[2])
objs[ll_type][int(obj_pms[1]["id"])] = obj_pms[0]
now_loaded.append(ll_type)
class Output(object):
name = ""
units = None
global_units = None
doi = ""
comments = ""
compression = True
reset_ids = True
def __init__(self):
self.unordered_models = {}
self.id2hash_dict = {}
@property
def sfsimodels_version(self):
return __version__
@sfsimodels_version.setter
def sfsimodels_version(self, value):
deprecation('sfsimodels_version automatically set')
def add_to_dict(self, an_object, export_none=False, extras=None):
"""
Convert models to json serialisable output
:param an_object: An instance of a model object
:param extras: A dictionary of extra variables that should be
:return:
"""
if an_object.id is None and self.reset_ids is False:
raise ModelError("id must be set on object before adding to output.")
if hasattr(an_object, "base_type"):
mtype = an_object.base_type
elif hasattr(an_object, "type"):
if an_object.type in standard_types:
mtype = an_object.type
else:
mtype = "custom_type"
else:
raise ModelError("Object does not have attribute 'base_type' or 'type', cannot add to output.")
if mtype not in self.unordered_models: # Catch any custom objects
self.unordered_models[mtype] = {}
if hasattr(an_object, "add_to_dict"):
an_object.add_to_dict(self.unordered_models, export_none=export_none)
elif hasattr(an_object, "to_dict"):
self.unordered_models[mtype][an_object.unique_hash] = an_object.to_dict(compression=self.compression,
export_none=export_none)
else:
raise ModelError("Object does not have method 'to_dict', cannot add to output.")
def build_id2hash_dict(self):
for mtype in self.unordered_models:
if mtype not in self.id2hash_dict: # Catch any custom objects
self.id2hash_dict[mtype] = OrderedDict()
for unique_hash in self.unordered_models[mtype]:
if self.reset_ids is False:
obj_id = self.unordered_models[mtype][unique_hash]['id']
if obj_id in self.id2hash_dict[mtype]:
raise ModelError('Duplicate id: {0} for model type: {1}'.format(obj_id, mtype))
else:
obj_id = len(self.id2hash_dict[mtype]) + 1
self.id2hash_dict[mtype][obj_id] = unique_hash
def get_id_from_hash(self, mtype, unique_hash):
for m_id in self.id2hash_dict[mtype]:
if self.id2hash_dict[mtype][m_id] == unique_hash:
return m_id
return None
def _replace_single_id(self, value, item, pdict=None): # returns value
"""
A recursive method to cycle through output dictionary and replace ids with the correct id in the id2hash_dict
:param value:
:param item:
:param pdict:
:return:
"""
if isinstance(value, str):
pass
elif hasattr(value, '__len__'):
tolist = getattr(value, "tolist", None)
if hasattr(value, 'keys'):
# odict = OrderedDict()
for i2 in value:
self._replace_single_id(value[i2], i2, value)
return value
elif callable(tolist):
values = value.tolist()
else:
values = value
for i, val2 in enumerate(values):
values[i] = self._replace_single_id(val2, '') # if it is a list then check if dict is deeper
return values
if '_unique_hash' in item: # detect link to new object
child_mtype = item.replace('_unique_hash', '')
child_hash = value
pdict['{0}_id'.format(child_mtype)] = self.get_id_from_hash(child_mtype, child_hash)
return value
def replace_conflicting_ids(self):
"""
Goes through output dictionary and replaces all ids with the correct id from the id2hash_dict
:return:
"""
self.build_id2hash_dict()
for mtype in self.unordered_models:
for unique_hash in self.unordered_models[mtype]:
umd = self.unordered_models[mtype][unique_hash]
umd['id'] = self.get_id_from_hash(mtype, unique_hash)
for item in umd:
val = umd[item]
umd[item] = self._replace_single_id(val, item, umd)
def add_to_output(self, mtype, m_id, serialisable_dict):
"""
Can add additional objects or dictionaries to output file that don't conform to standard objects.
:param mtype:
:param m_id:
:param serialisable_dict:
:return:
"""
if mtype not in self.unordered_models:
self.unordered_models[mtype] = OrderedDict()
self.unordered_models[mtype][m_id] = serialisable_dict
def get_models(self):
"""Unhashed"""
self.replace_conflicting_ids()
models_dict = OrderedDict()
collected = []
for item in standard_types:
if item in self.unordered_models:
new_dict, replacement_dict = unhash_dict(self.unordered_models[item])
models_dict[item] = new_dict
collected.append(item)
for item in self.unordered_models:
# print("item: ", item)
if item not in collected:
new_dict, replacement_dict = unhash_dict(self.unordered_models[item])
models_dict[item] = new_dict
return models_dict
@staticmethod
def parameters():
return ["name", "units", "doi", "sfsimodels_version", "comments", "models"]
def to_dict(self):
outputs = OrderedDict()
for item in self.parameters():
if item == 'models':
outputs[item] = self.get_models()
else:
outputs[item] = self.__getattribute__(item)
return outputs
def to_file(self, ffp, indent=4, name=None, units=None, comments=None):
"""Export to json file"""
if name is not None:
self.name = "%s" % name
if units is not None:
self.units = units
if comments is not None:
self.comments = comments
json.dump(self.to_dict(), open(ffp, "w"), indent=indent, default=_json_default)
def to_str(self, indent=4, name=None, units=None, comments=None):
"""Return as a json string"""
if name is not None:
self.name = "%s" % name
if units is not None:
self.units = units
if comments is not None:
self.comments = comments
return json.dumps(self.to_dict(), indent=indent, default=_json_default)
def migrate_ecp(in_ffp, out_ffp):
"""Migrates and ECP file to the current version of sfsimodels"""
objs, meta_data = load_json_and_meta(in_ffp)
ecp_output = Output()
for m_type in objs:
for instance in objs[m_type]:
ecp_output.add_to_dict(objs[m_type][instance])
ecp_output.name = meta_data["name"]
ecp_output.units = meta_data["units"]
ecp_output.comments = meta_data["comments"]
p_str = json.dumps(ecp_output.to_dict(), skipkeys=["__repr__"], indent=4)
a = open(out_ffp, "w")
a.write(p_str)
a.close()
def unhash_dict(pdict): # TODO: make method
new_dict = OrderedDict()
replacement_dict = OrderedDict()
for i, item in enumerate(pdict):
key = str(i + 1)
# assert int(item) > 1000 # avoid hashes that are in the same range as ids!
new_dict[key] = pdict[item]
replacement_dict[item] = key
return new_dict, replacement_dict
def _load_mod_dat():
import os
folder_path = os.path.dirname(os.path.realpath(__file__))
return open(os.path.join(folder_path, 'models_data.dat'))
| |
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:23064")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:23064")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a EPRCOIN address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a EPRCOIN address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.