gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License
"""
This module implements reading and writing of ShengBTE CONTROL files.
"""
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from monty.dev import requires
from monty.json import MSONable
from pymatgen.core.structure import Structure
from pymatgen.io.vasp import Kpoints
try:
import f90nml
except ImportError:
f90nml = None
__author__ = "Rees Chang, Alex Ganose"
__copyright__ = "Copyright 2019, The Materials Project"
__version__ = "0.1"
__email__ = "rc564@cornell.edu, aganose@lbl.gov"
__date__ = "June 27, 2019"
class Control(MSONable, dict):
"""
Class for reading, updating, and writing ShengBTE CONTROL files.
See https://bitbucket.org/sousaw/shengbte/src/master/ for more
detailed description and default values of CONTROL arguments.
"""
required_params = [
"nelements",
"natoms",
"ngrid",
"lattvec",
"types",
"elements",
"positions",
"scell",
]
allocations_keys = ["nelements", "natoms", "ngrid", "norientations"]
crystal_keys = [
"lfactor",
"lattvec",
"types",
"elements",
"positions",
"masses",
"gfactors",
"epsilon",
"born",
"scell",
"orientations",
]
params_keys = [
"t",
"t_min",
"t_max",
"t_step",
"omega_max",
"scalebroad",
"rmin",
"rmax",
"dr",
"maxiter",
"nticks",
"eps",
]
flags_keys = [
"nonanalytic",
"convergence",
"isotopes",
"autoisotopes",
"nanowires",
"onlyharmonic",
"espresso",
]
def __init__(self, ngrid: Optional[List[int]] = None, temperature: Union[float, Dict[str, float]] = 300, **kwargs):
"""
Args:
ngrid: Reciprocal space grid density as a list of 3 ints.
temperature: The temperature to calculate the lattice thermal
conductivity for. Can be given as a single float, or a dictionary
with the keys "min", "max", "step".
**kwargs: Other ShengBTE parameters. Several parameters are required
for ShengBTE to run - we have listed these parameters below:
- nelements (int): number of different elements in the compound
- natoms (int): number of atoms in the unit cell
- lattvec (size 3x3 array): real-space lattice vectors, in units
of lfactor
- lfactor (float): unit of measurement for lattice vectors (nm).
I.e., set to 0.1 if lattvec given in Angstrom.
- types (size natom list): a vector of natom integers, ranging
from 1 to nelements, assigning an element to each atom in the
system
- elements (size natom list): a vector of element names
- positions (size natomx3 array): atomic positions in lattice
coordinates
- scell (size 3 list): supercell sizes along each crystal axis
used for the 2nd-order force constant calculation
"""
super().__init__()
if ngrid is None:
ngrid = [25, 25, 25]
self["ngrid"] = ngrid
if isinstance(temperature, (int, float)):
self["t"] = temperature
elif isinstance(temperature, dict):
self["t_min"] = temperature["min"]
self["t_max"] = temperature["max"]
self["t_step"] = temperature["step"]
else:
raise ValueError("Unsupported temperature type, must be float or dict")
self.update(kwargs)
@classmethod
@requires(
f90nml,
"ShengBTE Control object requires f90nml to be installed. Please get it at https://pypi.org/project/f90nml.",
)
def from_file(cls, filepath: str):
"""
Read a CONTROL namelist file and output a 'Control' object
Args:
filepath: Path of the CONTROL file.
Returns:
'Control' object with parameters instantiated.
"""
nml = f90nml.read(filepath)
sdict = nml.todict()
all_dict: Dict[str, Any] = {}
all_dict.update(sdict["allocations"])
all_dict.update(sdict["crystal"])
all_dict.update(sdict["parameters"])
all_dict.update(sdict["flags"])
all_dict.pop("_start_index") # remove unnecessary cruft
return cls.from_dict(all_dict)
@classmethod
def from_dict(cls, control_dict: Dict):
"""
Write a CONTROL file from a Python dictionary. Description and default
parameters can be found at
https://bitbucket.org/sousaw/shengbte/src/master/.
Note some parameters are mandatory. Optional parameters default here to
None and will not be written to file.
Args:
control_dict: A Python dictionary of ShengBTE input parameters.
"""
return cls(**control_dict)
@requires(
f90nml,
"ShengBTE Control object requires f90nml to be installed. Please get it at https://pypi.org/project/f90nml.",
)
def to_file(self, filename: str = "CONTROL"):
"""
Writes ShengBTE CONTROL file from 'Control' object
Args:
filename: A file name.
"""
for param in self.required_params:
if param not in self.as_dict():
warnings.warn(f"Required parameter '{param}' not specified!")
alloc_dict = _get_subdict(self, self.allocations_keys)
alloc_nml = f90nml.Namelist({"allocations": alloc_dict})
control_str = str(alloc_nml) + "\n"
crystal_dict = _get_subdict(self, self.crystal_keys)
crystal_nml = f90nml.Namelist({"crystal": crystal_dict})
control_str += str(crystal_nml) + "\n"
params_dict = _get_subdict(self, self.params_keys)
params_nml = f90nml.Namelist({"parameters": params_dict})
control_str += str(params_nml) + "\n"
flags_dict = _get_subdict(self, self.flags_keys)
flags_nml = f90nml.Namelist({"flags": flags_dict})
control_str += str(flags_nml) + "\n"
with open(filename, "w") as file:
file.write(control_str)
@classmethod
def from_structure(cls, structure: Structure, reciprocal_density: Optional[int] = 50000, **kwargs):
"""
Get a ShengBTE control object from a structure.
Args:
structure: A structure object.
reciprocal_density: If not None, the q-point grid ("ngrid") will be
set using this density.
kwargs: Additional options to be passed to the Control constructor.
See the docstring of the __init__ method for more details
Returns:
A ShengBTE control object.
"""
elements = list(map(str, structure.composition.elements))
unique_nums = np.unique(structure.atomic_numbers)
types_dict = dict(zip(unique_nums, range(len(unique_nums))))
types = [types_dict[i] + 1 for i in structure.atomic_numbers]
control_dict = {
"nelements": structure.ntypesp,
"natoms": structure.num_sites,
"norientations": 0,
"lfactor": 0.1,
"lattvec": structure.lattice.matrix.tolist(),
"elements": elements,
"types": types,
"positions": structure.frac_coords.tolist(),
}
if reciprocal_density:
kpoints = Kpoints.automatic_density(structure, reciprocal_density)
control_dict["ngrid"] = kpoints.kpts[0]
control_dict.update(**kwargs)
return Control(**control_dict)
def get_structure(self) -> Structure:
"""
Get a pymatgen Structure from a ShengBTE control object.
The control object must have the "lattvec", "types", "elements", and
"positions" settings otherwise an error will be thrown.
Returns:
The structure.
"""
required = ["lattvec", "types", "elements", "positions"]
if not all(r in self for r in required):
raise ValueError("All of ['lattvec', 'types', 'elements', 'positions'] must be in control object")
unique_elements = self["elements"]
n_unique_elements = len(unique_elements)
element_map = dict(zip(range(1, n_unique_elements + 1), unique_elements))
species = [element_map[i] for i in self["types"]]
cell = np.array(self["lattvec"])
if "lfactor" in self:
cell *= self["lfactor"] * 10 # to nm then to Angstrom
return Structure(cell, species, self["positions"])
def as_dict(self):
"""
Returns: MSONAble dict
"""
return dict(self)
def _get_subdict(master_dict, subkeys):
"""Helper method to get a set of keys from a larger dictionary"""
return {k: master_dict[k] for k in subkeys if k in master_dict and master_dict[k] is not None}
| |
# -*- coding: iso-8859-1 -*-
#
# Copyright (c) 2002, 2003, 2005, 2006 Allan Saddi <allan@saddi.com>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Allan Saddi <allan@saddi.com>
"""
fcgi - a FastCGI/WSGI gateway.
For more information about FastCGI, see <http://www.fastcgi.com/>.
For more information about the Web Server Gateway Interface, see
<http://www.python.org/peps/pep-0333.html>.
Example usage:
#!/usr/bin/env python
from myapplication import app # Assume app is your WSGI application object
from fcgi import WSGIServer
WSGIServer(app).run()
See the documentation for WSGIServer/Server for more information.
On most platforms, fcgi will fallback to regular CGI behavior if run in a
non-FastCGI context. If you want to force CGI behavior, set the environment
variable FCGI_FORCE_CGI to "Y" or "y".
"""
__author__ = 'Allan Saddi <allan@saddi.com>'
__version__ = '$Revision: 2025 $'
import sys
import os
import signal
import struct
import cStringIO as StringIO
import select
import socket
import errno
import traceback
try:
import thread
import threading
thread_available = True
except ImportError:
import dummy_thread as thread
import dummy_threading as threading
thread_available = False
# Apparently 2.3 doesn't define SHUT_WR? Assume it is 1 in this case.
if not hasattr(socket, 'SHUT_WR'):
socket.SHUT_WR = 1
__all__ = ['WSGIServer']
# Constants from the spec.
FCGI_LISTENSOCK_FILENO = 0
FCGI_HEADER_LEN = 8
FCGI_VERSION_1 = 1
FCGI_BEGIN_REQUEST = 1
FCGI_ABORT_REQUEST = 2
FCGI_END_REQUEST = 3
FCGI_PARAMS = 4
FCGI_STDIN = 5
FCGI_STDOUT = 6
FCGI_STDERR = 7
FCGI_DATA = 8
FCGI_GET_VALUES = 9
FCGI_GET_VALUES_RESULT = 10
FCGI_UNKNOWN_TYPE = 11
FCGI_MAXTYPE = FCGI_UNKNOWN_TYPE
FCGI_NULL_REQUEST_ID = 0
FCGI_KEEP_CONN = 1
FCGI_RESPONDER = 1
FCGI_AUTHORIZER = 2
FCGI_FILTER = 3
FCGI_REQUEST_COMPLETE = 0
FCGI_CANT_MPX_CONN = 1
FCGI_OVERLOADED = 2
FCGI_UNKNOWN_ROLE = 3
FCGI_MAX_CONNS = 'FCGI_MAX_CONNS'
FCGI_MAX_REQS = 'FCGI_MAX_REQS'
FCGI_MPXS_CONNS = 'FCGI_MPXS_CONNS'
FCGI_Header = '!BBHHBx'
FCGI_BeginRequestBody = '!HB5x'
FCGI_EndRequestBody = '!LB3x'
FCGI_UnknownTypeBody = '!B7x'
FCGI_EndRequestBody_LEN = struct.calcsize(FCGI_EndRequestBody)
FCGI_UnknownTypeBody_LEN = struct.calcsize(FCGI_UnknownTypeBody)
if __debug__:
import time
# Set non-zero to write debug output to a file.
DEBUG = 0
DEBUGLOG = '/tmp/fcgi.log'
def _debug(level, msg):
if DEBUG < level:
return
try:
f = open(DEBUGLOG, 'a')
f.write('%sfcgi: %s\n' % (time.ctime()[4:-4], msg))
f.close()
except:
pass
class InputStream(object):
"""
File-like object representing FastCGI input streams (FCGI_STDIN and
FCGI_DATA). Supports the minimum methods required by WSGI spec.
"""
def __init__(self, conn):
self._conn = conn
# See Server.
self._shrinkThreshold = conn.server.inputStreamShrinkThreshold
self._buf = ''
self._bufList = []
self._pos = 0 # Current read position.
self._avail = 0 # Number of bytes currently available.
self._eof = False # True when server has sent EOF notification.
def _shrinkBuffer(self):
"""Gets rid of already read data (since we can't rewind)."""
if self._pos >= self._shrinkThreshold:
self._buf = self._buf[self._pos:]
self._avail -= self._pos
self._pos = 0
assert self._avail >= 0
def _waitForData(self):
"""Waits for more data to become available."""
self._conn.process_input()
def read(self, n=-1):
if self._pos == self._avail and self._eof:
return ''
while True:
if n < 0 or (self._avail - self._pos) < n:
# Not enough data available.
if self._eof:
# And there's no more coming.
newPos = self._avail
break
else:
# Wait for more data.
self._waitForData()
continue
else:
newPos = self._pos + n
break
# Merge buffer list, if necessary.
if self._bufList:
self._buf += ''.join(self._bufList)
self._bufList = []
r = self._buf[self._pos:newPos]
self._pos = newPos
self._shrinkBuffer()
return r
def readline(self, length=None):
if self._pos == self._avail and self._eof:
return ''
while True:
# Unfortunately, we need to merge the buffer list early.
if self._bufList:
self._buf += ''.join(self._bufList)
self._bufList = []
# Find newline.
i = self._buf.find('\n', self._pos)
if i < 0:
# Not found?
if self._eof:
# No more data coming.
newPos = self._avail
break
else:
# Wait for more to come.
self._waitForData()
continue
else:
newPos = i + 1
break
if length is not None:
if self._pos + length < newPos:
newPos = self._pos + length
r = self._buf[self._pos:newPos]
self._pos = newPos
self._shrinkBuffer()
return r
def readlines(self, sizehint=0):
total = 0
lines = []
line = self.readline()
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline()
return lines
def __iter__(self):
return self
def next(self):
r = self.readline()
if not r:
raise StopIteration
return r
def add_data(self, data):
if not data:
self._eof = True
else:
self._bufList.append(data)
self._avail += len(data)
class MultiplexedInputStream(InputStream):
"""
A version of InputStream meant to be used with MultiplexedConnections.
Assumes the MultiplexedConnection (the producer) and the Request
(the consumer) are running in different threads.
"""
def __init__(self, conn):
super(MultiplexedInputStream, self).__init__(conn)
# Arbitrates access to this InputStream (it's used simultaneously
# by a Request and its owning Connection object).
lock = threading.RLock()
# Notifies Request thread that there is new data available.
self._lock = threading.Condition(lock)
def _waitForData(self):
# Wait for notification from add_data().
self._lock.wait()
def read(self, n=-1):
self._lock.acquire()
try:
return super(MultiplexedInputStream, self).read(n)
finally:
self._lock.release()
def readline(self, length=None):
self._lock.acquire()
try:
return super(MultiplexedInputStream, self).readline(length)
finally:
self._lock.release()
def add_data(self, data):
self._lock.acquire()
try:
super(MultiplexedInputStream, self).add_data(data)
self._lock.notify()
finally:
self._lock.release()
class OutputStream(object):
"""
FastCGI output stream (FCGI_STDOUT/FCGI_STDERR). By default, calls to
write() or writelines() immediately result in Records being sent back
to the server. Buffering should be done in a higher level!
"""
def __init__(self, conn, req, type, buffered=False):
self._conn = conn
self._req = req
self._type = type
self._buffered = buffered
self._bufList = [] # Used if buffered is True
self.dataWritten = False
self.closed = False
def _write(self, data):
length = len(data)
while length:
toWrite = min(length, self._req.server.maxwrite - FCGI_HEADER_LEN)
rec = Record(self._type, self._req.requestId)
rec.contentLength = toWrite
rec.contentData = data[:toWrite]
self._conn.writeRecord(rec)
data = data[toWrite:]
length -= toWrite
def write(self, data):
assert not self.closed
if not data:
return
self.dataWritten = True
if self._buffered:
self._bufList.append(data)
else:
self._write(data)
def writelines(self, lines):
assert not self.closed
for line in lines:
self.write(line)
def flush(self):
# Only need to flush if this OutputStream is actually buffered.
if self._buffered:
data = ''.join(self._bufList)
self._bufList = []
self._write(data)
# Though available, the following should NOT be called by WSGI apps.
def close(self):
"""Sends end-of-stream notification, if necessary."""
if not self.closed and self.dataWritten:
self.flush()
rec = Record(self._type, self._req.requestId)
self._conn.writeRecord(rec)
self.closed = True
class TeeOutputStream(object):
"""
Simple wrapper around two or more output file-like objects that copies
written data to all streams.
"""
def __init__(self, streamList):
self._streamList = streamList
def write(self, data):
for f in self._streamList:
f.write(data)
def writelines(self, lines):
for line in lines:
self.write(line)
def flush(self):
for f in self._streamList:
f.flush()
class StdoutWrapper(object):
"""
Wrapper for sys.stdout so we know if data has actually been written.
"""
def __init__(self, stdout):
self._file = stdout
self.dataWritten = False
def write(self, data):
if data:
self.dataWritten = True
self._file.write(data)
def writelines(self, lines):
for line in lines:
self.write(line)
def __getattr__(self, name):
return getattr(self._file, name)
def decode_pair(s, pos=0):
"""
Decodes a name/value pair.
The number of bytes decoded as well as the name/value pair
are returned.
"""
nameLength = ord(s[pos])
if nameLength & 128:
nameLength = struct.unpack('!L', s[pos:pos+4])[0] & 0x7fffffff
pos += 4
else:
pos += 1
valueLength = ord(s[pos])
if valueLength & 128:
valueLength = struct.unpack('!L', s[pos:pos+4])[0] & 0x7fffffff
pos += 4
else:
pos += 1
name = s[pos:pos+nameLength]
pos += nameLength
value = s[pos:pos+valueLength]
pos += valueLength
return (pos, (name, value))
def encode_pair(name, value):
"""
Encodes a name/value pair.
The encoded string is returned.
"""
nameLength = len(name)
if nameLength < 128:
s = chr(nameLength)
else:
s = struct.pack('!L', nameLength | 0x80000000L)
valueLength = len(value)
if valueLength < 128:
s += chr(valueLength)
else:
s += struct.pack('!L', valueLength | 0x80000000L)
return s + name + value
class Record(object):
"""
A FastCGI Record.
Used for encoding/decoding records.
"""
def __init__(self, type=FCGI_UNKNOWN_TYPE, requestId=FCGI_NULL_REQUEST_ID):
self.version = FCGI_VERSION_1
self.type = type
self.requestId = requestId
self.contentLength = 0
self.paddingLength = 0
self.contentData = ''
@staticmethod
def _recvall(sock, length):
"""
Attempts to receive length bytes from a socket, blocking if necessary.
(Socket may be blocking or non-blocking.)
"""
dataList = []
recvLen = 0
while length:
try:
data = sock.recv(length)
except socket.error, e:
if e[0] == errno.EAGAIN:
select.select([sock], [], [])
continue
else:
raise
if not data: # EOF
break
dataList.append(data)
dataLen = len(data)
recvLen += dataLen
length -= dataLen
return ''.join(dataList), recvLen
def read(self, sock):
"""Read and decode a Record from a socket."""
try:
header, length = self._recvall(sock, FCGI_HEADER_LEN)
except:
raise EOFError
if length < FCGI_HEADER_LEN:
raise EOFError
self.version, self.type, self.requestId, self.contentLength, \
self.paddingLength = struct.unpack(FCGI_Header, header)
if __debug__: _debug(9, 'read: fd = %d, type = %d, requestId = %d, '
'contentLength = %d' %
(sock.fileno(), self.type, self.requestId,
self.contentLength))
if self.contentLength:
try:
self.contentData, length = self._recvall(sock,
self.contentLength)
except:
raise EOFError
if length < self.contentLength:
raise EOFError
if self.paddingLength:
try:
self._recvall(sock, self.paddingLength)
except:
raise EOFError
@staticmethod
def _sendall(sock, data):
"""
Writes data to a socket and does not return until all the data is sent.
"""
length = len(data)
while length:
try:
sent = sock.send(data)
except socket.error, e:
if e[0] == errno.EAGAIN:
select.select([], [sock], [])
continue
else:
raise
data = data[sent:]
length -= sent
def write(self, sock):
"""Encode and write a Record to a socket."""
self.paddingLength = -self.contentLength & 7
if __debug__: _debug(9, 'write: fd = %d, type = %d, requestId = %d, '
'contentLength = %d' %
(sock.fileno(), self.type, self.requestId,
self.contentLength))
header = struct.pack(FCGI_Header, self.version, self.type,
self.requestId, self.contentLength,
self.paddingLength)
self._sendall(sock, header)
if self.contentLength:
self._sendall(sock, self.contentData)
if self.paddingLength:
self._sendall(sock, '\x00'*self.paddingLength)
class Request(object):
"""
Represents a single FastCGI request.
These objects are passed to your handler and is the main interface
between your handler and the fcgi module. The methods should not
be called by your handler. However, server, params, stdin, stdout,
stderr, and data are free for your handler's use.
"""
def __init__(self, conn, inputStreamClass):
self._conn = conn
self.server = conn.server
self.params = {}
self.stdin = inputStreamClass(conn)
self.stdout = OutputStream(conn, self, FCGI_STDOUT)
self.stderr = OutputStream(conn, self, FCGI_STDERR, buffered=True)
self.data = inputStreamClass(conn)
def run(self):
"""Runs the handler, flushes the streams, and ends the request."""
try:
protocolStatus, appStatus = self.server.handler(self)
except:
traceback.print_exc(file=self.stderr)
self.stderr.flush()
if not self.stdout.dataWritten:
self.server.error(self)
protocolStatus, appStatus = FCGI_REQUEST_COMPLETE, 0
if __debug__: _debug(1, 'protocolStatus = %d, appStatus = %d' %
(protocolStatus, appStatus))
try:
self._flush()
self._end(appStatus, protocolStatus)
except socket.error, e:
if e[0] != errno.EPIPE:
raise
def _end(self, appStatus=0L, protocolStatus=FCGI_REQUEST_COMPLETE):
self._conn.end_request(self, appStatus, protocolStatus)
def _flush(self):
self.stdout.close()
self.stderr.close()
class CGIRequest(Request):
"""A normal CGI request disguised as a FastCGI request."""
def __init__(self, server):
# These are normally filled in by Connection.
self.requestId = 1
self.role = FCGI_RESPONDER
self.flags = 0
self.aborted = False
self.server = server
self.params = dict(os.environ)
self.stdin = sys.stdin
self.stdout = StdoutWrapper(sys.stdout) # Oh, the humanity!
self.stderr = sys.stderr
self.data = StringIO.StringIO()
def _end(self, appStatus=0L, protocolStatus=FCGI_REQUEST_COMPLETE):
sys.exit(appStatus)
def _flush(self):
# Not buffered, do nothing.
pass
class Connection(object):
"""
A Connection with the web server.
Each Connection is associated with a single socket (which is
connected to the web server) and is responsible for handling all
the FastCGI message processing for that socket.
"""
_multiplexed = False
_inputStreamClass = InputStream
def __init__(self, sock, addr, server):
self._sock = sock
self._addr = addr
self.server = server
# Active Requests for this Connection, mapped by request ID.
self._requests = {}
def _cleanupSocket(self):
"""Close the Connection's socket."""
try:
self._sock.shutdown(socket.SHUT_WR)
except:
return
try:
while True:
r, w, e = select.select([self._sock], [], [])
if not r or not self._sock.recv(1024):
break
except:
pass
self._sock.close()
def run(self):
"""Begin processing data from the socket."""
self._keepGoing = True
while self._keepGoing:
try:
self.process_input()
except EOFError:
break
except (select.error, socket.error), e:
if e[0] == errno.EBADF: # Socket was closed by Request.
break
raise
self._cleanupSocket()
def process_input(self):
"""Attempt to read a single Record from the socket and process it."""
# Currently, any children Request threads notify this Connection
# that it is no longer needed by closing the Connection's socket.
# We need to put a timeout on select, otherwise we might get
# stuck in it indefinitely... (I don't like this solution.)
while self._keepGoing:
try:
r, w, e = select.select([self._sock], [], [], 1.0)
except ValueError:
# Sigh. ValueError gets thrown sometimes when passing select
# a closed socket.
raise EOFError
if r: break
if not self._keepGoing:
return
rec = Record()
rec.read(self._sock)
if rec.type == FCGI_GET_VALUES:
self._do_get_values(rec)
elif rec.type == FCGI_BEGIN_REQUEST:
self._do_begin_request(rec)
elif rec.type == FCGI_ABORT_REQUEST:
self._do_abort_request(rec)
elif rec.type == FCGI_PARAMS:
self._do_params(rec)
elif rec.type == FCGI_STDIN:
self._do_stdin(rec)
elif rec.type == FCGI_DATA:
self._do_data(rec)
elif rec.requestId == FCGI_NULL_REQUEST_ID:
self._do_unknown_type(rec)
else:
# Need to complain about this.
pass
def writeRecord(self, rec):
"""
Write a Record to the socket.
"""
rec.write(self._sock)
def end_request(self, req, appStatus=0L,
protocolStatus=FCGI_REQUEST_COMPLETE, remove=True):
"""
End a Request.
Called by Request objects. An FCGI_END_REQUEST Record is
sent to the web server. If the web server no longer requires
the connection, the socket is closed, thereby ending this
Connection (run() returns).
"""
rec = Record(FCGI_END_REQUEST, req.requestId)
rec.contentData = struct.pack(FCGI_EndRequestBody, appStatus,
protocolStatus)
rec.contentLength = FCGI_EndRequestBody_LEN
self.writeRecord(rec)
if remove:
del self._requests[req.requestId]
if __debug__: _debug(2, 'end_request: flags = %d' % req.flags)
if not (req.flags & FCGI_KEEP_CONN) and not self._requests:
self._cleanupSocket()
self._keepGoing = False
def _do_get_values(self, inrec):
"""Handle an FCGI_GET_VALUES request from the web server."""
outrec = Record(FCGI_GET_VALUES_RESULT)
pos = 0
while pos < inrec.contentLength:
pos, (name, value) = decode_pair(inrec.contentData, pos)
cap = self.server.capability.get(name)
if cap is not None:
outrec.contentData += encode_pair(name, str(cap))
outrec.contentLength = len(outrec.contentData)
self.writeRecord(outrec)
def _do_begin_request(self, inrec):
"""Handle an FCGI_BEGIN_REQUEST from the web server."""
role, flags = struct.unpack(FCGI_BeginRequestBody, inrec.contentData)
req = self.server.request_class(self, self._inputStreamClass)
req.requestId, req.role, req.flags = inrec.requestId, role, flags
req.aborted = False
if not self._multiplexed and self._requests:
# Can't multiplex requests.
self.end_request(req, 0L, FCGI_CANT_MPX_CONN, remove=False)
else:
self._requests[inrec.requestId] = req
def _do_abort_request(self, inrec):
"""
Handle an FCGI_ABORT_REQUEST from the web server.
We just mark a flag in the associated Request.
"""
req = self._requests.get(inrec.requestId)
if req is not None:
req.aborted = True
def _start_request(self, req):
"""Run the request."""
# Not multiplexed, so run it inline.
req.run()
def _do_params(self, inrec):
"""
Handle an FCGI_PARAMS Record.
If the last FCGI_PARAMS Record is received, start the request.
"""
req = self._requests.get(inrec.requestId)
if req is not None:
if inrec.contentLength:
pos = 0
while pos < inrec.contentLength:
pos, (name, value) = decode_pair(inrec.contentData, pos)
req.params[name] = value
else:
self._start_request(req)
def _do_stdin(self, inrec):
"""Handle the FCGI_STDIN stream."""
req = self._requests.get(inrec.requestId)
if req is not None:
req.stdin.add_data(inrec.contentData)
def _do_data(self, inrec):
"""Handle the FCGI_DATA stream."""
req = self._requests.get(inrec.requestId)
if req is not None:
req.data.add_data(inrec.contentData)
def _do_unknown_type(self, inrec):
"""Handle an unknown request type. Respond accordingly."""
outrec = Record(FCGI_UNKNOWN_TYPE)
outrec.contentData = struct.pack(FCGI_UnknownTypeBody, inrec.type)
outrec.contentLength = FCGI_UnknownTypeBody_LEN
self.writeRecord(outrec)
class MultiplexedConnection(Connection):
"""
A version of Connection capable of handling multiple requests
simultaneously.
"""
_multiplexed = True
_inputStreamClass = MultiplexedInputStream
def __init__(self, sock, addr, server):
super(MultiplexedConnection, self).__init__(sock, addr, server)
# Used to arbitrate access to self._requests.
lock = threading.RLock()
# Notification is posted everytime a request completes, allowing us
# to quit cleanly.
self._lock = threading.Condition(lock)
def _cleanupSocket(self):
# Wait for any outstanding requests before closing the socket.
self._lock.acquire()
while self._requests:
self._lock.wait()
self._lock.release()
super(MultiplexedConnection, self)._cleanupSocket()
def writeRecord(self, rec):
# Must use locking to prevent intermingling of Records from different
# threads.
self._lock.acquire()
try:
# Probably faster than calling super. ;)
rec.write(self._sock)
finally:
self._lock.release()
def end_request(self, req, appStatus=0L,
protocolStatus=FCGI_REQUEST_COMPLETE, remove=True):
self._lock.acquire()
try:
super(MultiplexedConnection, self).end_request(req, appStatus,
protocolStatus,
remove)
self._lock.notify()
finally:
self._lock.release()
def _do_begin_request(self, inrec):
self._lock.acquire()
try:
super(MultiplexedConnection, self)._do_begin_request(inrec)
finally:
self._lock.release()
def _do_abort_request(self, inrec):
self._lock.acquire()
try:
super(MultiplexedConnection, self)._do_abort_request(inrec)
finally:
self._lock.release()
def _start_request(self, req):
thread.start_new_thread(req.run, ())
def _do_params(self, inrec):
self._lock.acquire()
try:
super(MultiplexedConnection, self)._do_params(inrec)
finally:
self._lock.release()
def _do_stdin(self, inrec):
self._lock.acquire()
try:
super(MultiplexedConnection, self)._do_stdin(inrec)
finally:
self._lock.release()
def _do_data(self, inrec):
self._lock.acquire()
try:
super(MultiplexedConnection, self)._do_data(inrec)
finally:
self._lock.release()
class Server(object):
"""
The FastCGI server.
Waits for connections from the web server, processing each
request.
If run in a normal CGI context, it will instead instantiate a
CGIRequest and run the handler through there.
"""
request_class = Request
cgirequest_class = CGIRequest
# Limits the size of the InputStream's string buffer to this size + the
# server's maximum Record size. Since the InputStream is not seekable,
# we throw away already-read data once this certain amount has been read.
inputStreamShrinkThreshold = 102400 - 8192
def __init__(self, handler=None, maxwrite=8192, bindAddress=None,
umask=None, multiplexed=False):
"""
handler, if present, must reference a function or method that
takes one argument: a Request object. If handler is not
specified at creation time, Server *must* be subclassed.
(The handler method below is abstract.)
maxwrite is the maximum number of bytes (per Record) to write
to the server. I've noticed mod_fastcgi has a relatively small
receive buffer (8K or so).
bindAddress, if present, must either be a string or a 2-tuple. If
present, run() will open its own listening socket. You would use
this if you wanted to run your application as an 'external' FastCGI
app. (i.e. the webserver would no longer be responsible for starting
your app) If a string, it will be interpreted as a filename and a UNIX
socket will be opened. If a tuple, the first element, a string,
is the interface name/IP to bind to, and the second element (an int)
is the port number.
Set multiplexed to True if you want to handle multiple requests
per connection. Some FastCGI backends (namely mod_fastcgi) don't
multiplex requests at all, so by default this is off (which saves
on thread creation/locking overhead). If threads aren't available,
this keyword is ignored; it's not possible to multiplex requests
at all.
"""
if handler is not None:
self.handler = handler
self.maxwrite = maxwrite
if thread_available:
try:
import resource
# Attempt to glean the maximum number of connections
# from the OS.
maxConns = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
except (ImportError, AttributeError):
maxConns = 100 # Just some made up number.
maxReqs = maxConns
if multiplexed:
self._connectionClass = MultiplexedConnection
maxReqs *= 5 # Another made up number.
else:
self._connectionClass = Connection
self.capability = {
FCGI_MAX_CONNS: maxConns,
FCGI_MAX_REQS: maxReqs,
FCGI_MPXS_CONNS: multiplexed and 1 or 0
}
else:
self._connectionClass = Connection
self.capability = {
# If threads aren't available, these are pretty much correct.
FCGI_MAX_CONNS: 1,
FCGI_MAX_REQS: 1,
FCGI_MPXS_CONNS: 0
}
self._bindAddress = bindAddress
self._umask = umask
def _setupSocket(self):
if self._bindAddress is None: # Run as a normal FastCGI?
isFCGI = True
sock = socket.fromfd(FCGI_LISTENSOCK_FILENO, socket.AF_INET,
socket.SOCK_STREAM)
try:
sock.getpeername()
except socket.error, e:
if e[0] == errno.ENOTSOCK:
# Not a socket, assume CGI context.
isFCGI = False
elif e[0] != errno.ENOTCONN:
raise
# FastCGI/CGI discrimination is broken on Mac OS X.
# Set the environment variable FCGI_FORCE_CGI to "Y" or "y"
# if you want to run your app as a simple CGI. (You can do
# this with Apache's mod_env [not loaded by default in OS X
# client, ha ha] and the SetEnv directive.)
if not isFCGI or \
os.environ.get('FCGI_FORCE_CGI', 'N').upper().startswith('Y'):
req = self.cgirequest_class(self)
req.run()
sys.exit(0)
else:
# Run as a server
oldUmask = None
if type(self._bindAddress) is str:
# Unix socket
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
os.unlink(self._bindAddress)
except OSError:
pass
if self._umask is not None:
oldUmask = os.umask(self._umask)
else:
# INET socket
assert type(self._bindAddress) is tuple
assert len(self._bindAddress) == 2
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(self._bindAddress)
sock.listen(socket.SOMAXCONN)
if oldUmask is not None:
os.umask(oldUmask)
return sock
def _cleanupSocket(self, sock):
"""Closes the main socket."""
sock.close()
def _installSignalHandlers(self):
self._oldSIGs = [(x,signal.getsignal(x)) for x in
(signal.SIGHUP, signal.SIGINT, signal.SIGTERM)]
signal.signal(signal.SIGHUP, self._hupHandler)
signal.signal(signal.SIGINT, self._intHandler)
signal.signal(signal.SIGTERM, self._intHandler)
def _restoreSignalHandlers(self):
for signum,handler in self._oldSIGs:
signal.signal(signum, handler)
def _hupHandler(self, signum, frame):
self._hupReceived = True
self._keepGoing = False
def _intHandler(self, signum, frame):
self._keepGoing = False
def run(self, timeout=1.0):
"""
The main loop. Exits on SIGHUP, SIGINT, SIGTERM. Returns True if
SIGHUP was received, False otherwise.
"""
web_server_addrs = os.environ.get('FCGI_WEB_SERVER_ADDRS')
if web_server_addrs is not None:
web_server_addrs = map(lambda x: x.strip(),
web_server_addrs.split(','))
sock = self._setupSocket()
self._keepGoing = True
self._hupReceived = False
# Install signal handlers.
self._installSignalHandlers()
while self._keepGoing:
try:
r, w, e = select.select([sock], [], [], timeout)
except select.error, e:
if e[0] == errno.EINTR:
continue
raise
if r:
try:
clientSock, addr = sock.accept()
except socket.error, e:
if e[0] in (errno.EINTR, errno.EAGAIN):
continue
raise
if web_server_addrs and \
(len(addr) != 2 or addr[0] not in web_server_addrs):
clientSock.close()
continue
# Instantiate a new Connection and begin processing FastCGI
# messages (either in a new thread or this thread).
conn = self._connectionClass(clientSock, addr, self)
thread.start_new_thread(conn.run, ())
self._mainloopPeriodic()
# Restore signal handlers.
self._restoreSignalHandlers()
self._cleanupSocket(sock)
return self._hupReceived
def _mainloopPeriodic(self):
"""
Called with just about each iteration of the main loop. Meant to
be overridden.
"""
pass
def _exit(self, reload=False):
"""
Protected convenience method for subclasses to force an exit. Not
really thread-safe, which is why it isn't public.
"""
if self._keepGoing:
self._keepGoing = False
self._hupReceived = reload
def handler(self, req):
"""
Default handler, which just raises an exception. Unless a handler
is passed at initialization time, this must be implemented by
a subclass.
"""
raise NotImplementedError, self.__class__.__name__ + '.handler'
def error(self, req):
"""
Called by Request if an exception occurs within the handler. May and
should be overridden.
"""
import cgitb
req.stdout.write('Content-Type: text/html\r\n\r\n' +
cgitb.html(sys.exc_info()))
class WSGIServer(Server):
"""
FastCGI server that supports the Web Server Gateway Interface. See
<http://www.python.org/peps/pep-0333.html>.
"""
def __init__(self, application, environ=None,
multithreaded=True, **kw):
"""
environ, if present, must be a dictionary-like object. Its
contents will be copied into application's environ. Useful
for passing application-specific variables.
Set multithreaded to False if your application is not MT-safe.
"""
if 'handler' in kw:
del kw['handler'] # Doesn't make sense to let this through
super(WSGIServer, self).__init__(**kw)
if environ is None:
environ = {}
self.application = application
self.environ = environ
self.multithreaded = multithreaded
# Used to force single-threadedness
self._app_lock = thread.allocate_lock()
def handler(self, req):
"""Special handler for WSGI."""
if req.role != FCGI_RESPONDER:
return FCGI_UNKNOWN_ROLE, 0
# Mostly taken from example CGI gateway.
environ = req.params
environ.update(self.environ)
environ['wsgi.version'] = (1,0)
environ['wsgi.input'] = req.stdin
if self._bindAddress is None:
stderr = req.stderr
else:
stderr = TeeOutputStream((sys.stderr, req.stderr))
environ['wsgi.errors'] = stderr
environ['wsgi.multithread'] = not isinstance(req, CGIRequest) and \
thread_available and self.multithreaded
# Rationale for the following: If started by the web server
# (self._bindAddress is None) in either FastCGI or CGI mode, the
# possibility of being spawned multiple times simultaneously is quite
# real. And, if started as an external server, multiple copies may be
# spawned for load-balancing/redundancy. (Though I don't think
# mod_fastcgi supports this?)
environ['wsgi.multiprocess'] = True
environ['wsgi.run_once'] = isinstance(req, CGIRequest)
if environ.get('HTTPS', 'off') in ('on', '1'):
environ['wsgi.url_scheme'] = 'https'
else:
environ['wsgi.url_scheme'] = 'http'
self._sanitizeEnv(environ)
headers_set = []
headers_sent = []
result = None
def write(data):
assert type(data) is str, 'write() argument must be string'
assert headers_set, 'write() before start_response()'
if not headers_sent:
status, responseHeaders = headers_sent[:] = headers_set
found = False
for header,value in responseHeaders:
if header.lower() == 'content-length':
found = True
break
if not found and result is not None:
try:
if len(result) == 1:
responseHeaders.append(('Content-Length',
str(len(data))))
except:
pass
s = 'Status: %s\r\n' % status
for header in responseHeaders:
s += '%s: %s\r\n' % header
s += '\r\n'
req.stdout.write(s)
req.stdout.write(data)
req.stdout.flush()
def start_response(status, response_headers, exc_info=None):
if exc_info:
try:
if headers_sent:
# Re-raise if too late
raise exc_info[0], exc_info[1], exc_info[2]
finally:
exc_info = None # avoid dangling circular ref
else:
assert not headers_set, 'Headers already set!'
assert type(status) is str, 'Status must be a string'
assert len(status) >= 4, 'Status must be at least 4 characters'
assert int(status[:3]), 'Status must begin with 3-digit code'
assert status[3] == ' ', 'Status must have a space after code'
assert type(response_headers) is list, 'Headers must be a list'
if __debug__:
for name,val in response_headers:
assert type(name) is str, 'Header names must be strings'
assert type(val) is str, 'Header values must be strings'
headers_set[:] = [status, response_headers]
return write
if not self.multithreaded:
self._app_lock.acquire()
try:
try:
result = self.application(environ, start_response)
try:
for data in result:
if data:
write(data)
if not headers_sent:
write('') # in case body was empty
finally:
if hasattr(result, 'close'):
result.close()
except socket.error, e:
if e[0] != errno.EPIPE:
raise # Don't let EPIPE propagate beyond server
finally:
if not self.multithreaded:
self._app_lock.release()
return FCGI_REQUEST_COMPLETE, 0
def _sanitizeEnv(self, environ):
"""Ensure certain values are present, if required by WSGI."""
if 'SCRIPT_NAME' not in environ:
environ['SCRIPT_NAME'] = ''
if 'PATH_INFO' not in environ:
environ['PATH_INFO'] = ''
# If any of these are missing, it probably signifies a broken
# server...
for name,default in [('REQUEST_METHOD', 'GET'),
('SERVER_NAME', 'localhost'),
('SERVER_PORT', '80'),
('SERVER_PROTOCOL', 'HTTP/1.0')]:
if name not in environ:
environ['wsgi.errors'].write('%s: missing FastCGI param %s '
'required by WSGI!\n' %
(self.__class__.__name__, name))
environ[name] = default
if __name__ == '__main__':
def test_app(environ, start_response):
"""Probably not the most efficient example."""
import cgi
start_response('200 OK', [('Content-Type', 'text/html')])
yield '<html><head><title>Hello World!</title></head>\n' \
'<body>\n' \
'<p>Hello World!</p>\n' \
'<table border="1">'
names = environ.keys()
names.sort()
for name in names:
yield '<tr><td>%s</td><td>%s</td></tr>\n' % (
name, cgi.escape(`environ[name]`))
form = cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ,
keep_blank_values=1)
if form.list:
yield '<tr><th colspan="2">Form data</th></tr>'
for field in form.list:
yield '<tr><td>%s</td><td>%s</td></tr>\n' % (
field.name, field.value)
yield '</table>\n' \
'</body></html>\n'
WSGIServer(test_app).run()
| |
"""Test spherical harmonic models and the tools associated with those models.
"""
import warnings
import numpy as np
import numpy.linalg as npl
from dipy.testing import assert_true
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_equal, assert_raises, run_module_suite)
from scipy.special import sph_harm as sph_harm_sp
from dipy.core.sphere import hemi_icosahedron
from dipy.core.gradients import gradient_table
from dipy.core.interpolation import NearestNeighborInterpolator
from dipy.sims.voxel import single_tensor
from dipy.direction.peaks import peak_directions
from dipy.reconst.shm import sf_to_sh, sh_to_sf
from dipy.sims.voxel import multi_tensor_odf
from dipy.data import mrtrix_spherical_functions
from dipy.reconst import odf
from dipy.reconst.shm import (real_sph_harm, real_sym_sh_basis,
real_sym_sh_mrtrix, sph_harm_ind_list,
order_from_ncoef,
OpdtModel, normalize_data, hat, lcr_matrix,
smooth_pinv, bootstrap_data_array,
bootstrap_data_voxel, ResidualBootstrapWrapper,
CsaOdfModel, QballModel, SphHarmFit,
spherical_harmonics, anisotropic_power,
calculate_max_order)
def test_order_from_ncoeff():
"""
"""
# Just try some out:
for sh_order in [2, 4, 6, 8, 12, 24]:
m, n = sph_harm_ind_list(sh_order)
n_coef = m.shape[0]
assert_equal(order_from_ncoef(n_coef), sh_order)
def test_sph_harm_ind_list():
m_list, n_list = sph_harm_ind_list(8)
assert_equal(m_list.shape, n_list.shape)
assert_equal(m_list.shape, (45,))
assert_true(np.all(np.abs(m_list) <= n_list))
assert_array_equal(n_list % 2, 0)
assert_raises(ValueError, sph_harm_ind_list, 1)
def test_real_sph_harm():
# Tests derived from tables in
# http://en.wikipedia.org/wiki/Table_of_spherical_harmonics
# where real spherical harmonic $Y^m_n$ is defined to be:
# Real($Y^m_n$) * sqrt(2) if m > 0
# $Y^m_n$ if m == 0
# Imag($Y^m_n$) * sqrt(2) if m < 0
rsh = real_sph_harm
pi = np.pi
sqrt = np.sqrt
sin = np.sin
cos = np.cos
assert_array_almost_equal(rsh(0, 0, 0, 0),
0.5 / sqrt(pi))
assert_array_almost_equal(rsh(-2, 2, pi / 5, pi / 3),
0.25 * sqrt(15. / (2. * pi)) *
(sin(pi / 5.)) ** 2. * cos(0 + 2. * pi / 3) *
sqrt(2))
assert_array_almost_equal(rsh(2, 2, pi / 5, pi / 3),
-1 * 0.25 * sqrt(15. / (2. * pi)) *
(sin(pi / 5.)) ** 2. * sin(0 - 2. * pi / 3) *
sqrt(2))
assert_array_almost_equal(rsh(-2, 2, pi / 2, pi),
0.25 * sqrt(15 / (2. * pi)) *
cos(2. * pi) * sin(pi / 2.) ** 2. * sqrt(2))
assert_array_almost_equal(rsh(2, 4, pi / 3., pi / 4.),
-1 * (3. / 8.) * sqrt(5. / (2. * pi)) *
sin(0 - 2. * pi / 4.) *
sin(pi / 3.) ** 2. *
(7. * cos(pi / 3.) ** 2. - 1) * sqrt(2))
assert_array_almost_equal(rsh(-4, 4, pi / 6., pi / 8.),
(3. / 16.) * sqrt(35. / (2. * pi)) *
cos(0 + 4. * pi / 8.) * sin(pi / 6.) ** 4. *
sqrt(2))
assert_array_almost_equal(rsh(4, 4, pi / 6., pi / 8.),
-1 * (3. / 16.) * sqrt(35. / (2. * pi)) *
sin(0 - 4. * pi / 8.) * sin(pi / 6.) ** 4. *
sqrt(2))
aa = np.ones((3, 1, 1, 1))
bb = np.ones((1, 4, 1, 1))
cc = np.ones((1, 1, 5, 1))
dd = np.ones((1, 1, 1, 6))
assert_equal(rsh(aa, bb, cc, dd).shape, (3, 4, 5, 6))
def test_real_sym_sh_mrtrix():
coef, expected, sphere = mrtrix_spherical_functions()
basis, m, n = real_sym_sh_mrtrix(8, sphere.theta, sphere.phi)
func = np.dot(coef, basis.T)
assert_array_almost_equal(func, expected, 4)
def test_real_sym_sh_basis():
# This test should do for now
# The tournier07 basis should be the same as re-ordering and re-scaling the
# descoteaux07 basis
new_order = [0, 5, 4, 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, 7, 6]
sphere = hemi_icosahedron.subdivide(2)
basis, m, n = real_sym_sh_mrtrix(4, sphere.theta, sphere.phi)
expected = basis[:, new_order]
expected *= np.where(m == 0, 1., np.sqrt(2))
descoteaux07_basis, m, n = real_sym_sh_basis(4, sphere.theta, sphere.phi)
assert_array_almost_equal(descoteaux07_basis, expected)
def test_smooth_pinv():
hemi = hemi_icosahedron.subdivide(2)
m, n = sph_harm_ind_list(4)
B = real_sph_harm(m, n, hemi.theta[:, None], hemi.phi[:, None])
L = np.zeros(len(m))
C = smooth_pinv(B, L)
D = np.dot(npl.inv(np.dot(B.T, B)), B.T)
assert_array_almost_equal(C, D)
L = n * (n + 1) * .05
C = smooth_pinv(B, L)
L = np.diag(L)
D = np.dot(npl.inv(np.dot(B.T, B) + L * L), B.T)
assert_array_almost_equal(C, D)
L = np.arange(len(n)) * .05
C = smooth_pinv(B, L)
L = np.diag(L)
D = np.dot(npl.inv(np.dot(B.T, B) + L * L), B.T)
assert_array_almost_equal(C, D)
def test_normalize_data():
sig = np.arange(1, 66)[::-1]
where_b0 = np.zeros(65, 'bool')
where_b0[0] = True
assert_raises(ValueError, normalize_data, sig, where_b0, out=sig)
norm_sig = normalize_data(sig, where_b0, min_signal=1)
assert_array_almost_equal(norm_sig, sig / 65.)
norm_sig = normalize_data(sig, where_b0, min_signal=5)
assert_array_almost_equal(norm_sig[-5:], 5 / 65.)
where_b0[[0, 1]] = [True, True]
norm_sig = normalize_data(sig, where_b0, min_signal=1)
assert_array_almost_equal(norm_sig, sig / 64.5)
norm_sig = normalize_data(sig, where_b0, min_signal=5)
assert_array_almost_equal(norm_sig[-5:], 5 / 64.5)
sig = sig * np.ones((2, 3, 1))
where_b0[[0, 1]] = [True, False]
norm_sig = normalize_data(sig, where_b0, min_signal=1)
assert_array_almost_equal(norm_sig, sig / 65.)
norm_sig = normalize_data(sig, where_b0, min_signal=5)
assert_array_almost_equal(norm_sig[..., -5:], 5 / 65.)
where_b0[[0, 1]] = [True, True]
norm_sig = normalize_data(sig, where_b0, min_signal=1)
assert_array_almost_equal(norm_sig, sig / 64.5)
norm_sig = normalize_data(sig, where_b0, min_signal=5)
assert_array_almost_equal(norm_sig[..., -5:], 5 / 64.5)
def make_fake_signal():
hemisphere = hemi_icosahedron.subdivide(2)
bvecs = np.concatenate(([[0, 0, 0]], hemisphere.vertices))
bvals = np.zeros(len(bvecs)) + 2000
bvals[0] = 0
gtab = gradient_table(bvals, bvecs)
evals = np.array([[2.1, .2, .2], [.2, 2.1, .2]]) * 10 ** -3
evecs0 = np.eye(3)
evecs1 = evecs0
a = evecs0[0]
b = evecs1[1]
S1 = single_tensor(gtab, .55, evals[0], evecs0)
S2 = single_tensor(gtab, .45, evals[1], evecs1)
return S1 + S2, gtab, np.vstack([a, b])
class TestQballModel(object):
model = QballModel
def test_single_voxel_fit(self):
signal, gtab, expected = make_fake_signal()
sphere = hemi_icosahedron.subdivide(4)
model = self.model(gtab, sh_order=4, min_signal=1e-5,
assume_normed=True)
fit = model.fit(signal)
odf = fit.odf(sphere)
assert_equal(odf.shape, sphere.phi.shape)
directions, _, _ = peak_directions(odf, sphere)
# Check the same number of directions
n = len(expected)
assert_equal(len(directions), n)
# Check directions are unit vectors
cos_similarity = (directions * directions).sum(-1)
assert_array_almost_equal(cos_similarity, np.ones(n))
# Check the directions == expected or -expected
cos_similarity = (directions * expected).sum(-1)
assert_array_almost_equal(abs(cos_similarity), np.ones(n))
# Test normalize data
model = self.model(gtab, sh_order=4, min_signal=1e-5,
assume_normed=False)
fit = model.fit(signal * 5)
odf_with_norm = fit.odf(sphere)
assert_array_almost_equal(odf, odf_with_norm)
def test_mulit_voxel_fit(self):
signal, gtab, expected = make_fake_signal()
sphere = hemi_icosahedron
nd_signal = np.vstack([signal, signal])
model = self.model(gtab, sh_order=4, min_signal=1e-5,
assume_normed=True)
fit = model.fit(nd_signal)
odf = fit.odf(sphere)
assert_equal(odf.shape, (2,) + sphere.phi.shape)
# Test fitting with mask, where mask is False odf should be 0
fit = model.fit(nd_signal, mask=[False, True])
odf = fit.odf(sphere)
assert_array_equal(odf[0], 0.)
def test_sh_order(self):
signal, gtab, expected = make_fake_signal()
model = self.model(gtab, sh_order=4, min_signal=1e-5)
assert_equal(model.B.shape[1], 15)
assert_equal(max(model.n), 4)
model = self.model(gtab, sh_order=6, min_signal=1e-5)
assert_equal(model.B.shape[1], 28)
assert_equal(max(model.n), 6)
def test_gfa(self):
signal, gtab, expected = make_fake_signal()
signal = np.ones((2, 3, 4, 1)) * signal
sphere = hemi_icosahedron.subdivide(3)
model = self.model(gtab, 6, min_signal=1e-5)
fit = model.fit(signal)
gfa_shm = fit.gfa
gfa_odf = odf.gfa(fit.odf(sphere))
assert_array_almost_equal(gfa_shm, gfa_odf, 3)
# gfa should be 0 if all coefficients are 0 (masked areas)
mask = np.zeros(signal.shape[:-1])
fit = model.fit(signal, mask)
assert_array_equal(fit.gfa, 0)
def test_min_signal_default(self):
signal, gtab, expected = make_fake_signal()
model_default = self.model(gtab, 4)
shm_default = model_default.fit(signal).shm_coeff
model_correct = self.model(gtab, 4, min_signal=1e-5)
shm_correct = model_correct.fit(signal).shm_coeff
assert_equal(shm_default, shm_correct)
def test_SphHarmFit():
coef = np.zeros((3, 4, 5, 45))
mask = np.zeros((3, 4, 5), dtype=bool)
fit = SphHarmFit(None, coef, mask)
item = fit[0, 0, 0]
assert_equal(item.shape, ())
slice = fit[0]
assert_equal(slice.shape, (4, 5))
slice = fit[:, :, 0]
assert_equal(slice.shape, (3, 4))
class TestOpdtModel(TestQballModel):
model = OpdtModel
class TestCsaOdfModel(TestQballModel):
model = CsaOdfModel
def test_hat_and_lcr():
hemi = hemi_icosahedron.subdivide(3)
m, n = sph_harm_ind_list(8)
B = real_sph_harm(m, n, hemi.theta[:, None], hemi.phi[:, None])
H = hat(B)
B_hat = np.dot(H, B)
assert_array_almost_equal(B, B_hat)
R = lcr_matrix(H)
d = np.arange(len(hemi.theta))
r = d - np.dot(H, d)
lev = np.sqrt(1 - H.diagonal())
r /= lev
r -= r.mean()
r2 = np.dot(R, d)
assert_array_almost_equal(r, r2)
r3 = np.dot(d, R.T)
assert_array_almost_equal(r, r3)
def test_bootstrap_array():
B = np.array([[4, 5, 7, 4, 2.],
[4, 6, 2, 3, 6.]])
H = hat(B.T)
R = np.zeros((5, 5))
d = np.arange(1, 6)
dhat = np.dot(H, d)
assert_array_almost_equal(bootstrap_data_voxel(dhat, H, R), dhat)
assert_array_almost_equal(bootstrap_data_array(dhat, H, R), dhat)
def test_ResidualBootstrapWrapper():
B = np.array([[4, 5, 7, 4, 2.],
[4, 6, 2, 3, 6.]])
B = B.T
H = hat(B)
d = np.arange(10) / 8.
d.shape = (2, 5)
dhat = np.dot(d, H)
signal_object = NearestNeighborInterpolator(dhat, (1,))
ms = .2
where_dwi = np.ones(len(H), dtype=bool)
boot_obj = ResidualBootstrapWrapper(signal_object, B, where_dwi, ms)
assert_array_almost_equal(boot_obj[0], dhat[0].clip(ms, 1))
assert_array_almost_equal(boot_obj[1], dhat[1].clip(ms, 1))
dhat = np.column_stack([[.6, .7], dhat])
signal_object = NearestNeighborInterpolator(dhat, (1,))
where_dwi = np.concatenate([[False], where_dwi])
boot_obj = ResidualBootstrapWrapper(signal_object, B, where_dwi, ms)
assert_array_almost_equal(boot_obj[0], dhat[0].clip(ms, 1))
assert_array_almost_equal(boot_obj[1], dhat[1].clip(ms, 1))
def test_sf_to_sh():
# Subdividing a hemi_icosahedron twice produces 81 unique points, which
# is more than enough to fit a order 8 (45 coefficients) spherical harmonic
sphere = hemi_icosahedron.subdivide(2)
mevals = np.array(([0.0015, 0.0003, 0.0003], [0.0015, 0.0003, 0.0003]))
angles = [(0, 0), (90, 0)]
odf = multi_tensor_odf(sphere.vertices, mevals, angles, [50, 50])
# 1D case with the 3 bases functions
odf_sh = sf_to_sh(odf, sphere, 8)
odf2 = sh_to_sf(odf_sh, sphere, 8)
assert_array_almost_equal(odf, odf2, 2)
odf_sh = sf_to_sh(odf, sphere, 8, "tournier07")
odf2 = sh_to_sf(odf_sh, sphere, 8, "tournier07")
assert_array_almost_equal(odf, odf2, 2)
odf_sh = sf_to_sh(odf, sphere, 8, "descoteaux07")
odf2 = sh_to_sf(odf_sh, sphere, 8, "descoteaux07")
assert_array_almost_equal(odf, odf2, 2)
# 2D case
odf2d = np.vstack((odf2, odf))
odf2d_sh = sf_to_sh(odf2d, sphere, 8)
odf2d_sf = sh_to_sf(odf2d_sh, sphere, 8)
assert_array_almost_equal(odf2d, odf2d_sf, 2)
def test_faster_sph_harm():
sh_order = 8
m, n = sph_harm_ind_list(sh_order)
theta = np.array([1.61491146, 0.76661665, 0.11976141, 1.20198246,
1.74066314, 1.5925956, 2.13022055, 0.50332859,
1.19868988, 0.78440679, 0.50686938, 0.51739718,
1.80342999, 0.73778957, 2.28559395, 1.29569064,
1.86877091, 0.39239191, 0.54043037, 1.61263047,
0.72695314, 1.90527318, 1.58186125, 0.23130073,
2.51695237, 0.99835604, 1.2883426, 0.48114057,
1.50079318, 1.07978624, 1.9798903, 2.36616966,
2.49233299, 2.13116602, 1.36801518, 1.32932608,
0.95926683, 1.070349, 0.76355762, 2.07148422,
1.50113501, 1.49823314, 0.89248164, 0.22187079,
1.53805373, 1.9765295, 1.13361568, 1.04908355,
1.68737368, 1.91732452, 1.01937457, 1.45839,
0.49641525, 0.29087155, 0.52824641, 1.29875871,
1.81023541, 1.17030475, 2.24953206, 1.20280498,
0.76399964, 2.16109722, 0.79780421, 0.87154509])
phi = np.array([-1.5889514, -3.11092733, -0.61328674, -2.4485381,
2.88058822, 2.02165946, -1.99783366, 2.71235211,
1.41577992, -2.29413676, -2.24565773, -1.55548635,
2.59318232, -1.84672472, -2.33710739, 2.12111948,
1.87523722, -1.05206575, -2.85381987,
-2.22808984, 2.3202034, -2.19004474, -1.90358372,
2.14818373, 3.1030696, -2.86620183, -2.19860123,
-0.45468447, -3.0034923, 1.73345011, -2.51716288,
2.49961525, -2.68782986, 2.69699056, 1.78566133,
-1.59119705, -2.53378963, -2.02476738, 1.36924987,
2.17600517, 2.38117241, 2.99021511, -1.4218007,
-2.44016802, -2.52868164, 3.01531658, 2.50093627,
-1.70745826, -2.7863931, -2.97359741, 2.17039906,
2.68424643, 1.77896086, 0.45476215, 0.99734418,
-2.73107896, 2.28815009, 2.86276506, 3.09450274,
-3.09857384, -1.06955885, -2.83826831, 1.81932195,
2.81296654])
sh = spherical_harmonics(m, n, theta[:, None], phi[:, None])
sh2 = sph_harm_sp(m, n, theta[:, None], phi[:, None])
assert_array_almost_equal(sh, sh2, 8)
sh = spherical_harmonics(m, n, theta[:, None], phi[:, None],
use_scipy=False)
assert_array_almost_equal(sh, sh2, 8)
def test_anisotropic_power():
for n_coeffs in [6, 15, 28, 45, 66, 91]:
for norm_factor in [0.0005, 0.00001]:
# Create some really simple cases:
coeffs = np.ones((3, n_coeffs))
max_order = calculate_max_order(coeffs.shape[-1])
# For the case where all coeffs == 1, the ap is simply log of the
# number of even orders up to the maximal order:
analytic = (np.log(len(range(2, max_order + 2, 2))) -
np.log(norm_factor))
answers = [analytic] * 3
apvals = anisotropic_power(coeffs, norm_factor=norm_factor)
assert_array_almost_equal(apvals, answers)
# Test that this works for single voxel arrays as well:
assert_array_almost_equal(
anisotropic_power(coeffs[1],
norm_factor=norm_factor),
answers[1])
# Test that even when we look at an all-zeros voxel, this
# avoids a log-of-zero warning:
with warnings.catch_warnings(record=True) as w:
assert_equal(anisotropic_power(np.zeros(6)), 0)
assert len(w) == 0
def test_calculate_max_order():
"""Based on the table in:
http://jdtournier.github.io/mrtrix-0.2/tractography/preprocess.html
"""
orders = [2, 4, 6, 8, 10, 12]
n_coeffs = [6, 15, 28, 45, 66, 91]
for o, n in zip(orders, n_coeffs):
assert_equal(calculate_max_order(n), o)
assert_raises(ValueError, calculate_max_order, 29)
if __name__ == "__main__":
run_module_suite()
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.mgmt.core import ARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Optional
from azure.core.credentials import TokenCredential
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from ._configuration import NetworkManagementClientConfiguration
from .operations import ApplicationGatewaysOperations
from .operations import ApplicationSecurityGroupsOperations
from .operations import AvailableDelegationsOperations
from .operations import AvailableResourceGroupDelegationsOperations
from .operations import AvailableServiceAliasesOperations
from .operations import AzureFirewallsOperations
from .operations import AzureFirewallFqdnTagsOperations
from .operations import BastionHostsOperations
from .operations import NetworkManagementClientOperationsMixin
from .operations import DdosCustomPoliciesOperations
from .operations import DdosProtectionPlansOperations
from .operations import AvailableEndpointServicesOperations
from .operations import ExpressRouteCircuitAuthorizationsOperations
from .operations import ExpressRouteCircuitPeeringsOperations
from .operations import ExpressRouteCircuitConnectionsOperations
from .operations import PeerExpressRouteCircuitConnectionsOperations
from .operations import ExpressRouteCircuitsOperations
from .operations import ExpressRouteServiceProvidersOperations
from .operations import ExpressRouteCrossConnectionsOperations
from .operations import ExpressRouteCrossConnectionPeeringsOperations
from .operations import ExpressRouteGatewaysOperations
from .operations import ExpressRouteConnectionsOperations
from .operations import ExpressRoutePortsLocationsOperations
from .operations import ExpressRoutePortsOperations
from .operations import ExpressRouteLinksOperations
from .operations import FirewallPoliciesOperations
from .operations import FirewallPolicyRuleGroupsOperations
from .operations import IpGroupsOperations
from .operations import LoadBalancersOperations
from .operations import LoadBalancerBackendAddressPoolsOperations
from .operations import LoadBalancerFrontendIPConfigurationsOperations
from .operations import InboundNatRulesOperations
from .operations import LoadBalancerLoadBalancingRulesOperations
from .operations import LoadBalancerOutboundRulesOperations
from .operations import LoadBalancerNetworkInterfacesOperations
from .operations import LoadBalancerProbesOperations
from .operations import NatGatewaysOperations
from .operations import NetworkInterfacesOperations
from .operations import NetworkInterfaceIPConfigurationsOperations
from .operations import NetworkInterfaceLoadBalancersOperations
from .operations import NetworkInterfaceTapConfigurationsOperations
from .operations import NetworkProfilesOperations
from .operations import NetworkSecurityGroupsOperations
from .operations import SecurityRulesOperations
from .operations import DefaultSecurityRulesOperations
from .operations import NetworkVirtualAppliancesOperations
from .operations import NetworkWatchersOperations
from .operations import PacketCapturesOperations
from .operations import ConnectionMonitorsOperations
from .operations import FlowLogsOperations
from .operations import Operations
from .operations import PrivateEndpointsOperations
from .operations import AvailablePrivateEndpointTypesOperations
from .operations import PrivateLinkServicesOperations
from .operations import PublicIPAddressesOperations
from .operations import PublicIPPrefixesOperations
from .operations import RouteFiltersOperations
from .operations import RouteFilterRulesOperations
from .operations import RouteTablesOperations
from .operations import RoutesOperations
from .operations import BgpServiceCommunitiesOperations
from .operations import ServiceEndpointPoliciesOperations
from .operations import ServiceEndpointPolicyDefinitionsOperations
from .operations import ServiceTagsOperations
from .operations import UsagesOperations
from .operations import VirtualNetworksOperations
from .operations import SubnetsOperations
from .operations import ResourceNavigationLinksOperations
from .operations import ServiceAssociationLinksOperations
from .operations import VirtualNetworkPeeringsOperations
from .operations import VirtualNetworkGatewaysOperations
from .operations import VirtualNetworkGatewayConnectionsOperations
from .operations import LocalNetworkGatewaysOperations
from .operations import VirtualNetworkTapsOperations
from .operations import VirtualRoutersOperations
from .operations import VirtualRouterPeeringsOperations
from .operations import VirtualWansOperations
from .operations import VpnSitesOperations
from .operations import VpnSiteLinksOperations
from .operations import VpnSitesConfigurationOperations
from .operations import VpnServerConfigurationsOperations
from .operations import VirtualHubsOperations
from .operations import HubVirtualNetworkConnectionsOperations
from .operations import VpnGatewaysOperations
from .operations import VpnConnectionsOperations
from .operations import VpnSiteLinkConnectionsOperations
from .operations import VpnLinkConnectionsOperations
from .operations import P2SVpnGatewaysOperations
from .operations import VpnServerConfigurationsAssociatedWithVirtualWanOperations
from .operations import VirtualHubRouteTableV2SOperations
from .operations import WebApplicationFirewallPoliciesOperations
from . import models
class NetworkManagementClient(NetworkManagementClientOperationsMixin):
"""Network Client.
:ivar application_gateways: ApplicationGatewaysOperations operations
:vartype application_gateways: azure.mgmt.network.v2019_12_01.operations.ApplicationGatewaysOperations
:ivar application_security_groups: ApplicationSecurityGroupsOperations operations
:vartype application_security_groups: azure.mgmt.network.v2019_12_01.operations.ApplicationSecurityGroupsOperations
:ivar available_delegations: AvailableDelegationsOperations operations
:vartype available_delegations: azure.mgmt.network.v2019_12_01.operations.AvailableDelegationsOperations
:ivar available_resource_group_delegations: AvailableResourceGroupDelegationsOperations operations
:vartype available_resource_group_delegations: azure.mgmt.network.v2019_12_01.operations.AvailableResourceGroupDelegationsOperations
:ivar available_service_aliases: AvailableServiceAliasesOperations operations
:vartype available_service_aliases: azure.mgmt.network.v2019_12_01.operations.AvailableServiceAliasesOperations
:ivar azure_firewalls: AzureFirewallsOperations operations
:vartype azure_firewalls: azure.mgmt.network.v2019_12_01.operations.AzureFirewallsOperations
:ivar azure_firewall_fqdn_tags: AzureFirewallFqdnTagsOperations operations
:vartype azure_firewall_fqdn_tags: azure.mgmt.network.v2019_12_01.operations.AzureFirewallFqdnTagsOperations
:ivar bastion_hosts: BastionHostsOperations operations
:vartype bastion_hosts: azure.mgmt.network.v2019_12_01.operations.BastionHostsOperations
:ivar ddos_custom_policies: DdosCustomPoliciesOperations operations
:vartype ddos_custom_policies: azure.mgmt.network.v2019_12_01.operations.DdosCustomPoliciesOperations
:ivar ddos_protection_plans: DdosProtectionPlansOperations operations
:vartype ddos_protection_plans: azure.mgmt.network.v2019_12_01.operations.DdosProtectionPlansOperations
:ivar available_endpoint_services: AvailableEndpointServicesOperations operations
:vartype available_endpoint_services: azure.mgmt.network.v2019_12_01.operations.AvailableEndpointServicesOperations
:ivar express_route_circuit_authorizations: ExpressRouteCircuitAuthorizationsOperations operations
:vartype express_route_circuit_authorizations: azure.mgmt.network.v2019_12_01.operations.ExpressRouteCircuitAuthorizationsOperations
:ivar express_route_circuit_peerings: ExpressRouteCircuitPeeringsOperations operations
:vartype express_route_circuit_peerings: azure.mgmt.network.v2019_12_01.operations.ExpressRouteCircuitPeeringsOperations
:ivar express_route_circuit_connections: ExpressRouteCircuitConnectionsOperations operations
:vartype express_route_circuit_connections: azure.mgmt.network.v2019_12_01.operations.ExpressRouteCircuitConnectionsOperations
:ivar peer_express_route_circuit_connections: PeerExpressRouteCircuitConnectionsOperations operations
:vartype peer_express_route_circuit_connections: azure.mgmt.network.v2019_12_01.operations.PeerExpressRouteCircuitConnectionsOperations
:ivar express_route_circuits: ExpressRouteCircuitsOperations operations
:vartype express_route_circuits: azure.mgmt.network.v2019_12_01.operations.ExpressRouteCircuitsOperations
:ivar express_route_service_providers: ExpressRouteServiceProvidersOperations operations
:vartype express_route_service_providers: azure.mgmt.network.v2019_12_01.operations.ExpressRouteServiceProvidersOperations
:ivar express_route_cross_connections: ExpressRouteCrossConnectionsOperations operations
:vartype express_route_cross_connections: azure.mgmt.network.v2019_12_01.operations.ExpressRouteCrossConnectionsOperations
:ivar express_route_cross_connection_peerings: ExpressRouteCrossConnectionPeeringsOperations operations
:vartype express_route_cross_connection_peerings: azure.mgmt.network.v2019_12_01.operations.ExpressRouteCrossConnectionPeeringsOperations
:ivar express_route_gateways: ExpressRouteGatewaysOperations operations
:vartype express_route_gateways: azure.mgmt.network.v2019_12_01.operations.ExpressRouteGatewaysOperations
:ivar express_route_connections: ExpressRouteConnectionsOperations operations
:vartype express_route_connections: azure.mgmt.network.v2019_12_01.operations.ExpressRouteConnectionsOperations
:ivar express_route_ports_locations: ExpressRoutePortsLocationsOperations operations
:vartype express_route_ports_locations: azure.mgmt.network.v2019_12_01.operations.ExpressRoutePortsLocationsOperations
:ivar express_route_ports: ExpressRoutePortsOperations operations
:vartype express_route_ports: azure.mgmt.network.v2019_12_01.operations.ExpressRoutePortsOperations
:ivar express_route_links: ExpressRouteLinksOperations operations
:vartype express_route_links: azure.mgmt.network.v2019_12_01.operations.ExpressRouteLinksOperations
:ivar firewall_policies: FirewallPoliciesOperations operations
:vartype firewall_policies: azure.mgmt.network.v2019_12_01.operations.FirewallPoliciesOperations
:ivar firewall_policy_rule_groups: FirewallPolicyRuleGroupsOperations operations
:vartype firewall_policy_rule_groups: azure.mgmt.network.v2019_12_01.operations.FirewallPolicyRuleGroupsOperations
:ivar ip_groups: IpGroupsOperations operations
:vartype ip_groups: azure.mgmt.network.v2019_12_01.operations.IpGroupsOperations
:ivar load_balancers: LoadBalancersOperations operations
:vartype load_balancers: azure.mgmt.network.v2019_12_01.operations.LoadBalancersOperations
:ivar load_balancer_backend_address_pools: LoadBalancerBackendAddressPoolsOperations operations
:vartype load_balancer_backend_address_pools: azure.mgmt.network.v2019_12_01.operations.LoadBalancerBackendAddressPoolsOperations
:ivar load_balancer_frontend_ip_configurations: LoadBalancerFrontendIPConfigurationsOperations operations
:vartype load_balancer_frontend_ip_configurations: azure.mgmt.network.v2019_12_01.operations.LoadBalancerFrontendIPConfigurationsOperations
:ivar inbound_nat_rules: InboundNatRulesOperations operations
:vartype inbound_nat_rules: azure.mgmt.network.v2019_12_01.operations.InboundNatRulesOperations
:ivar load_balancer_load_balancing_rules: LoadBalancerLoadBalancingRulesOperations operations
:vartype load_balancer_load_balancing_rules: azure.mgmt.network.v2019_12_01.operations.LoadBalancerLoadBalancingRulesOperations
:ivar load_balancer_outbound_rules: LoadBalancerOutboundRulesOperations operations
:vartype load_balancer_outbound_rules: azure.mgmt.network.v2019_12_01.operations.LoadBalancerOutboundRulesOperations
:ivar load_balancer_network_interfaces: LoadBalancerNetworkInterfacesOperations operations
:vartype load_balancer_network_interfaces: azure.mgmt.network.v2019_12_01.operations.LoadBalancerNetworkInterfacesOperations
:ivar load_balancer_probes: LoadBalancerProbesOperations operations
:vartype load_balancer_probes: azure.mgmt.network.v2019_12_01.operations.LoadBalancerProbesOperations
:ivar nat_gateways: NatGatewaysOperations operations
:vartype nat_gateways: azure.mgmt.network.v2019_12_01.operations.NatGatewaysOperations
:ivar network_interfaces: NetworkInterfacesOperations operations
:vartype network_interfaces: azure.mgmt.network.v2019_12_01.operations.NetworkInterfacesOperations
:ivar network_interface_ip_configurations: NetworkInterfaceIPConfigurationsOperations operations
:vartype network_interface_ip_configurations: azure.mgmt.network.v2019_12_01.operations.NetworkInterfaceIPConfigurationsOperations
:ivar network_interface_load_balancers: NetworkInterfaceLoadBalancersOperations operations
:vartype network_interface_load_balancers: azure.mgmt.network.v2019_12_01.operations.NetworkInterfaceLoadBalancersOperations
:ivar network_interface_tap_configurations: NetworkInterfaceTapConfigurationsOperations operations
:vartype network_interface_tap_configurations: azure.mgmt.network.v2019_12_01.operations.NetworkInterfaceTapConfigurationsOperations
:ivar network_profiles: NetworkProfilesOperations operations
:vartype network_profiles: azure.mgmt.network.v2019_12_01.operations.NetworkProfilesOperations
:ivar network_security_groups: NetworkSecurityGroupsOperations operations
:vartype network_security_groups: azure.mgmt.network.v2019_12_01.operations.NetworkSecurityGroupsOperations
:ivar security_rules: SecurityRulesOperations operations
:vartype security_rules: azure.mgmt.network.v2019_12_01.operations.SecurityRulesOperations
:ivar default_security_rules: DefaultSecurityRulesOperations operations
:vartype default_security_rules: azure.mgmt.network.v2019_12_01.operations.DefaultSecurityRulesOperations
:ivar network_virtual_appliances: NetworkVirtualAppliancesOperations operations
:vartype network_virtual_appliances: azure.mgmt.network.v2019_12_01.operations.NetworkVirtualAppliancesOperations
:ivar network_watchers: NetworkWatchersOperations operations
:vartype network_watchers: azure.mgmt.network.v2019_12_01.operations.NetworkWatchersOperations
:ivar packet_captures: PacketCapturesOperations operations
:vartype packet_captures: azure.mgmt.network.v2019_12_01.operations.PacketCapturesOperations
:ivar connection_monitors: ConnectionMonitorsOperations operations
:vartype connection_monitors: azure.mgmt.network.v2019_12_01.operations.ConnectionMonitorsOperations
:ivar flow_logs: FlowLogsOperations operations
:vartype flow_logs: azure.mgmt.network.v2019_12_01.operations.FlowLogsOperations
:ivar operations: Operations operations
:vartype operations: azure.mgmt.network.v2019_12_01.operations.Operations
:ivar private_endpoints: PrivateEndpointsOperations operations
:vartype private_endpoints: azure.mgmt.network.v2019_12_01.operations.PrivateEndpointsOperations
:ivar available_private_endpoint_types: AvailablePrivateEndpointTypesOperations operations
:vartype available_private_endpoint_types: azure.mgmt.network.v2019_12_01.operations.AvailablePrivateEndpointTypesOperations
:ivar private_link_services: PrivateLinkServicesOperations operations
:vartype private_link_services: azure.mgmt.network.v2019_12_01.operations.PrivateLinkServicesOperations
:ivar public_ip_addresses: PublicIPAddressesOperations operations
:vartype public_ip_addresses: azure.mgmt.network.v2019_12_01.operations.PublicIPAddressesOperations
:ivar public_ip_prefixes: PublicIPPrefixesOperations operations
:vartype public_ip_prefixes: azure.mgmt.network.v2019_12_01.operations.PublicIPPrefixesOperations
:ivar route_filters: RouteFiltersOperations operations
:vartype route_filters: azure.mgmt.network.v2019_12_01.operations.RouteFiltersOperations
:ivar route_filter_rules: RouteFilterRulesOperations operations
:vartype route_filter_rules: azure.mgmt.network.v2019_12_01.operations.RouteFilterRulesOperations
:ivar route_tables: RouteTablesOperations operations
:vartype route_tables: azure.mgmt.network.v2019_12_01.operations.RouteTablesOperations
:ivar routes: RoutesOperations operations
:vartype routes: azure.mgmt.network.v2019_12_01.operations.RoutesOperations
:ivar bgp_service_communities: BgpServiceCommunitiesOperations operations
:vartype bgp_service_communities: azure.mgmt.network.v2019_12_01.operations.BgpServiceCommunitiesOperations
:ivar service_endpoint_policies: ServiceEndpointPoliciesOperations operations
:vartype service_endpoint_policies: azure.mgmt.network.v2019_12_01.operations.ServiceEndpointPoliciesOperations
:ivar service_endpoint_policy_definitions: ServiceEndpointPolicyDefinitionsOperations operations
:vartype service_endpoint_policy_definitions: azure.mgmt.network.v2019_12_01.operations.ServiceEndpointPolicyDefinitionsOperations
:ivar service_tags: ServiceTagsOperations operations
:vartype service_tags: azure.mgmt.network.v2019_12_01.operations.ServiceTagsOperations
:ivar usages: UsagesOperations operations
:vartype usages: azure.mgmt.network.v2019_12_01.operations.UsagesOperations
:ivar virtual_networks: VirtualNetworksOperations operations
:vartype virtual_networks: azure.mgmt.network.v2019_12_01.operations.VirtualNetworksOperations
:ivar subnets: SubnetsOperations operations
:vartype subnets: azure.mgmt.network.v2019_12_01.operations.SubnetsOperations
:ivar resource_navigation_links: ResourceNavigationLinksOperations operations
:vartype resource_navigation_links: azure.mgmt.network.v2019_12_01.operations.ResourceNavigationLinksOperations
:ivar service_association_links: ServiceAssociationLinksOperations operations
:vartype service_association_links: azure.mgmt.network.v2019_12_01.operations.ServiceAssociationLinksOperations
:ivar virtual_network_peerings: VirtualNetworkPeeringsOperations operations
:vartype virtual_network_peerings: azure.mgmt.network.v2019_12_01.operations.VirtualNetworkPeeringsOperations
:ivar virtual_network_gateways: VirtualNetworkGatewaysOperations operations
:vartype virtual_network_gateways: azure.mgmt.network.v2019_12_01.operations.VirtualNetworkGatewaysOperations
:ivar virtual_network_gateway_connections: VirtualNetworkGatewayConnectionsOperations operations
:vartype virtual_network_gateway_connections: azure.mgmt.network.v2019_12_01.operations.VirtualNetworkGatewayConnectionsOperations
:ivar local_network_gateways: LocalNetworkGatewaysOperations operations
:vartype local_network_gateways: azure.mgmt.network.v2019_12_01.operations.LocalNetworkGatewaysOperations
:ivar virtual_network_taps: VirtualNetworkTapsOperations operations
:vartype virtual_network_taps: azure.mgmt.network.v2019_12_01.operations.VirtualNetworkTapsOperations
:ivar virtual_routers: VirtualRoutersOperations operations
:vartype virtual_routers: azure.mgmt.network.v2019_12_01.operations.VirtualRoutersOperations
:ivar virtual_router_peerings: VirtualRouterPeeringsOperations operations
:vartype virtual_router_peerings: azure.mgmt.network.v2019_12_01.operations.VirtualRouterPeeringsOperations
:ivar virtual_wans: VirtualWansOperations operations
:vartype virtual_wans: azure.mgmt.network.v2019_12_01.operations.VirtualWansOperations
:ivar vpn_sites: VpnSitesOperations operations
:vartype vpn_sites: azure.mgmt.network.v2019_12_01.operations.VpnSitesOperations
:ivar vpn_site_links: VpnSiteLinksOperations operations
:vartype vpn_site_links: azure.mgmt.network.v2019_12_01.operations.VpnSiteLinksOperations
:ivar vpn_sites_configuration: VpnSitesConfigurationOperations operations
:vartype vpn_sites_configuration: azure.mgmt.network.v2019_12_01.operations.VpnSitesConfigurationOperations
:ivar vpn_server_configurations: VpnServerConfigurationsOperations operations
:vartype vpn_server_configurations: azure.mgmt.network.v2019_12_01.operations.VpnServerConfigurationsOperations
:ivar virtual_hubs: VirtualHubsOperations operations
:vartype virtual_hubs: azure.mgmt.network.v2019_12_01.operations.VirtualHubsOperations
:ivar hub_virtual_network_connections: HubVirtualNetworkConnectionsOperations operations
:vartype hub_virtual_network_connections: azure.mgmt.network.v2019_12_01.operations.HubVirtualNetworkConnectionsOperations
:ivar vpn_gateways: VpnGatewaysOperations operations
:vartype vpn_gateways: azure.mgmt.network.v2019_12_01.operations.VpnGatewaysOperations
:ivar vpn_connections: VpnConnectionsOperations operations
:vartype vpn_connections: azure.mgmt.network.v2019_12_01.operations.VpnConnectionsOperations
:ivar vpn_site_link_connections: VpnSiteLinkConnectionsOperations operations
:vartype vpn_site_link_connections: azure.mgmt.network.v2019_12_01.operations.VpnSiteLinkConnectionsOperations
:ivar vpn_link_connections: VpnLinkConnectionsOperations operations
:vartype vpn_link_connections: azure.mgmt.network.v2019_12_01.operations.VpnLinkConnectionsOperations
:ivar p2_svpn_gateways: P2SVpnGatewaysOperations operations
:vartype p2_svpn_gateways: azure.mgmt.network.v2019_12_01.operations.P2SVpnGatewaysOperations
:ivar vpn_server_configurations_associated_with_virtual_wan: VpnServerConfigurationsAssociatedWithVirtualWanOperations operations
:vartype vpn_server_configurations_associated_with_virtual_wan: azure.mgmt.network.v2019_12_01.operations.VpnServerConfigurationsAssociatedWithVirtualWanOperations
:ivar virtual_hub_route_table_v2_s: VirtualHubRouteTableV2SOperations operations
:vartype virtual_hub_route_table_v2_s: azure.mgmt.network.v2019_12_01.operations.VirtualHubRouteTableV2SOperations
:ivar web_application_firewall_policies: WebApplicationFirewallPoliciesOperations operations
:vartype web_application_firewall_policies: azure.mgmt.network.v2019_12_01.operations.WebApplicationFirewallPoliciesOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The subscription credentials which uniquely identify the Microsoft Azure subscription. The subscription ID forms part of the URI for every service call.
:type subscription_id: str
:param str base_url: Service URL
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
def __init__(
self,
credential, # type: "TokenCredential"
subscription_id, # type: str
base_url=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
if not base_url:
base_url = 'https://management.azure.com'
self._config = NetworkManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.application_gateways = ApplicationGatewaysOperations(
self._client, self._config, self._serialize, self._deserialize)
self.application_security_groups = ApplicationSecurityGroupsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.available_delegations = AvailableDelegationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.available_resource_group_delegations = AvailableResourceGroupDelegationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.available_service_aliases = AvailableServiceAliasesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.azure_firewalls = AzureFirewallsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.azure_firewall_fqdn_tags = AzureFirewallFqdnTagsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.bastion_hosts = BastionHostsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.ddos_custom_policies = DdosCustomPoliciesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.ddos_protection_plans = DdosProtectionPlansOperations(
self._client, self._config, self._serialize, self._deserialize)
self.available_endpoint_services = AvailableEndpointServicesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_circuit_authorizations = ExpressRouteCircuitAuthorizationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_circuit_peerings = ExpressRouteCircuitPeeringsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_circuit_connections = ExpressRouteCircuitConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.peer_express_route_circuit_connections = PeerExpressRouteCircuitConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_circuits = ExpressRouteCircuitsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_service_providers = ExpressRouteServiceProvidersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_cross_connections = ExpressRouteCrossConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_cross_connection_peerings = ExpressRouteCrossConnectionPeeringsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_gateways = ExpressRouteGatewaysOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_connections = ExpressRouteConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_ports_locations = ExpressRoutePortsLocationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_ports = ExpressRoutePortsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_links = ExpressRouteLinksOperations(
self._client, self._config, self._serialize, self._deserialize)
self.firewall_policies = FirewallPoliciesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.firewall_policy_rule_groups = FirewallPolicyRuleGroupsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.ip_groups = IpGroupsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.load_balancers = LoadBalancersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.load_balancer_backend_address_pools = LoadBalancerBackendAddressPoolsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.load_balancer_frontend_ip_configurations = LoadBalancerFrontendIPConfigurationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.inbound_nat_rules = InboundNatRulesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.load_balancer_load_balancing_rules = LoadBalancerLoadBalancingRulesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.load_balancer_outbound_rules = LoadBalancerOutboundRulesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.load_balancer_network_interfaces = LoadBalancerNetworkInterfacesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.load_balancer_probes = LoadBalancerProbesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.nat_gateways = NatGatewaysOperations(
self._client, self._config, self._serialize, self._deserialize)
self.network_interfaces = NetworkInterfacesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.network_interface_ip_configurations = NetworkInterfaceIPConfigurationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.network_interface_load_balancers = NetworkInterfaceLoadBalancersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.network_interface_tap_configurations = NetworkInterfaceTapConfigurationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.network_profiles = NetworkProfilesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.network_security_groups = NetworkSecurityGroupsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.security_rules = SecurityRulesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.default_security_rules = DefaultSecurityRulesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.network_virtual_appliances = NetworkVirtualAppliancesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.network_watchers = NetworkWatchersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.packet_captures = PacketCapturesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.connection_monitors = ConnectionMonitorsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.flow_logs = FlowLogsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.operations = Operations(
self._client, self._config, self._serialize, self._deserialize)
self.private_endpoints = PrivateEndpointsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.available_private_endpoint_types = AvailablePrivateEndpointTypesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.private_link_services = PrivateLinkServicesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.public_ip_addresses = PublicIPAddressesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.public_ip_prefixes = PublicIPPrefixesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.route_filters = RouteFiltersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.route_filter_rules = RouteFilterRulesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.route_tables = RouteTablesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.routes = RoutesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.bgp_service_communities = BgpServiceCommunitiesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.service_endpoint_policies = ServiceEndpointPoliciesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.service_endpoint_policy_definitions = ServiceEndpointPolicyDefinitionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.service_tags = ServiceTagsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.usages = UsagesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_networks = VirtualNetworksOperations(
self._client, self._config, self._serialize, self._deserialize)
self.subnets = SubnetsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.resource_navigation_links = ResourceNavigationLinksOperations(
self._client, self._config, self._serialize, self._deserialize)
self.service_association_links = ServiceAssociationLinksOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_network_peerings = VirtualNetworkPeeringsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_network_gateways = VirtualNetworkGatewaysOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_network_gateway_connections = VirtualNetworkGatewayConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.local_network_gateways = LocalNetworkGatewaysOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_network_taps = VirtualNetworkTapsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_routers = VirtualRoutersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_router_peerings = VirtualRouterPeeringsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_wans = VirtualWansOperations(
self._client, self._config, self._serialize, self._deserialize)
self.vpn_sites = VpnSitesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.vpn_site_links = VpnSiteLinksOperations(
self._client, self._config, self._serialize, self._deserialize)
self.vpn_sites_configuration = VpnSitesConfigurationOperations(
self._client, self._config, self._serialize, self._deserialize)
self.vpn_server_configurations = VpnServerConfigurationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_hubs = VirtualHubsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.hub_virtual_network_connections = HubVirtualNetworkConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.vpn_gateways = VpnGatewaysOperations(
self._client, self._config, self._serialize, self._deserialize)
self.vpn_connections = VpnConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.vpn_site_link_connections = VpnSiteLinkConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.vpn_link_connections = VpnLinkConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.p2_svpn_gateways = P2SVpnGatewaysOperations(
self._client, self._config, self._serialize, self._deserialize)
self.vpn_server_configurations_associated_with_virtual_wan = VpnServerConfigurationsAssociatedWithVirtualWanOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_hub_route_table_v2_s = VirtualHubRouteTableV2SOperations(
self._client, self._config, self._serialize, self._deserialize)
self.web_application_firewall_policies = WebApplicationFirewallPoliciesOperations(
self._client, self._config, self._serialize, self._deserialize)
def _send_request(self, http_request, **kwargs):
# type: (HttpRequest, Any) -> HttpResponse
"""Runs the network request through the client's chained policies.
:param http_request: The network request you want to make. Required.
:type http_request: ~azure.core.pipeline.transport.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to True.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.pipeline.transport.HttpResponse
"""
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
http_request.url = self._client.format_url(http_request.url, **path_format_arguments)
stream = kwargs.pop("stream", True)
pipeline_response = self._client._pipeline.run(http_request, stream=stream, **kwargs)
return pipeline_response.http_response
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> NetworkManagementClient
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details)
| |
# (C) Copyright 2017 Inova Development Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Define the Notifications table and provide for import from multiple bases.
This table consists of the following fields
NotificationID = Column(Integer, primary_key=True)
NotifyTime = Column(DateTime, nullable=False)
UserID = Column(Integer, ForeignKey("Users.UserID"))
TargetID = Column(Integer(11), ForeignKey("Targets.TargetID"))
Message = Column(String(100), nullable=False)
"""
from __future__ import print_function, absolute_import
import os
import csv
from mysql.connector import Error
from ._dbtablebase import DBTableBase
from ._mysqldbmixin import MySQLDBMixin
from ._common import compute_startend_dates
__all__ = ['NotificationsTable']
class NotificationsTable(DBTableBase):
"""
Abstract class for NotificationsTable
This table contains a single entry, the last time a scan was executed.
"""
key_field = 'NotifyID'
fields = [key_field, 'NotifyTime', 'UserID', 'TargetID', 'Message']
table_name = 'Notifications'
def __init__(self, db_dict, db_type, verbose):
super(NotificationsTable, self).__init__(db_dict, db_type, verbose)
@classmethod
def factory(cls, db_dict, db_type, verbose):
"""Factory method to select subclass based on database type.
Currently the types sql and csv are supported.
Returns instance object of the defined type.
"""
inst = None
if verbose:
print('notification factory datafile %s dbtype %s verbose %s'
% (db_dict, db_type, verbose))
if db_type == 'csv':
inst = CsvNotificationsTable(db_dict, db_type, verbose)
elif db_type == 'mysql':
inst = MySQLNotificationsTable(db_dict, db_type, verbose)
else:
ValueError('Invalid notifications table factory db_type %s' %
db_type)
if verbose:
print('Notifications table factory inst %r' % inst)
return inst
class CsvNotificationsTable(NotificationsTable):
"""
Notifications Table functions for csv based table
"""
def __init__(self, db_dict, dbtype, verbose):
super(CsvNotificationsTable, self).__init__(db_dict, dbtype, verbose)
fn = db_dict['lastscanfilename']
self.filename = fn
# If the filename is not a full directory, the data file must be
# either in the local directory or the same directory as the
# config file defined by the db_dict entry directory
if os.path.isabs(fn):
if not os.path.isfile(fn):
ValueError('CSV file %s does not exist ' % fn)
else:
self.filename = fn
else:
if os.path.isfile(fn):
self.filename = fn
else:
full_fn = os.path.join(db_dict['directory'], fn)
if not os.path.isfile(full_fn):
ValueError('CSV file %s does not exist '
'in local directory or config directory %s' %
(fn, db_dict['directory']))
else:
self.filename = full_fn
with open(self.filename) as input_file:
reader = csv.DictReader(input_file)
# create dictionary (id = key) with dictionary for
# each set of entries
result = {}
for row in reader:
key = int(row['TargetID'])
if key in result:
# duplicate row handling
print('ERROR. Duplicate Id in table: %s\nrow=%s' %
(key, row))
raise ValueError('Input Error. duplicate Id')
else:
result[key] = row
self.data_dict = result
class SQLNotificationsTable(NotificationsTable):
""""
Table representing the Notifications database table
This table supports a single dictionary that contains the data
when the table is intialized.
"""
def __init__(self, db_dict, dbtype, verbose):
"""Pass through to SQL"""
if verbose:
print('SQL Database type %s verbose=%s' % (db_dict, verbose))
super(SQLNotificationsTable, self).__init__(db_dict, dbtype, verbose)
self.connection = None
def db_info(self):
"""
Display the db info and Return info on the database used as a
dictionary.
"""
try:
print('Database characteristics')
for key in self.db_dict:
print('%s: %s' % key, self.db_dict[key])
except ValueError as ve:
print('Invalid database configuration exception %s' % ve)
return self.db_dict
class MySQLNotificationsTable(NotificationsTable, MySQLDBMixin):
""" Class representing the connection with a mysql database"""
def __init__(self, db_dict, dbtype, verbose):
"""Read the input file into a dictionary."""
super(MySQLNotificationsTable, self).__init__(db_dict, dbtype, verbose)
self.connectdb(db_dict, verbose)
self._load_table()
def select_by_daterange(self, start_date, end_date=None,
number_of_days=None, target_id=None):
"""
Select records between two timestamps and return the set of
records selected
Parameters:
start_date(:class:`py:datetime.datetime` or `None`):
The starttime for the select statement. If `None' the oldest
timestamp in the database is used.
end_date(:class:`py:datetime.datetime` or `None`):
The end datetime for the scan. If `None`, the current date time
is used
days(:term:`py:integer`)
Number of days from startdate to gather. If end_date is set also
this is invalid.
target_id(:term:`integer`):
Optional Integer defining a target id in the target table. The
result if filtered by this target id against the TargetID field
in the Pings record if the value is not `None`.
Returns:
List of tuples representing rows in the Pings table. Each entry in
the return is a field in the Pings table
Exceptions:
ValueError if input parameters incorrect.
"""
start_date, end_date = compute_startend_dates(
start_date,
end_date=end_date,
number_of_days=number_of_days)
cursor = self.connection.cursor()
try:
if target_id is None:
cursor.execute('SELECT * '
'FROM Notifications '
'WHERE NotifyTime BETWEEN %s AND %s',
(start_date, end_date))
else:
cursor.execute('SELECT * '
'FROM Notifications WHERE TargetID = %s AND '
'NotifyTime BETWEEN %s AND %s',
(target_id, start_date, end_date))
rows = cursor.fetchall()
return rows
finally:
cursor.close()
def delete_by_daterange(self, start_date, end_date, target_id=None):
"""
Deletes records from the database based on start_date, end_date and
optional target_id. This requires start date and end date explicitly
and does not allow number of days paramter
Parameters:
start_date(:class:`py:datetime.datetime` or `None`):
The starttime for the select statement. If `None' the oldest
timestamp in the database is used.
end_date(:class:`py:datetime.datetime` or `None`):
The end datetime for the scan.
Target_id: Optional target it to filter delete request.
Exceptions:
Database error if the execute failed.
"""
cursor = self.connection.cursor()
try:
try:
if target_id is None:
cursor.execute('DELETE '
'FROM Notifications '
'WHERE NotifyTime BETWEEN %s AND %s',
(start_date, end_date))
else:
cursor.execute('DELETE '
'FROM Notifications WHERE TargetID = %s AND '
'NotifyTime BETWEEN %s AND %s',
(target_id, start_date, end_date))
except Error as err:
print(err)
self.connection.rollback()
raise
self.connection.commit()
finally:
cursor.close()
def record_count(self):
"""
Get count of records in pings table
"""
cursor = self.connection.cursor()
query = "SELECT COUNT(*) from Notifications"
cursor.execute(query)
res = cursor.fetchone()
return res[0]
| |
# -*- coding: utf-8 -*-
"""DPyGetOpt -- Demiurge Python GetOptions Module
$Id: DPyGetOpt.py 2872 2007-11-25 17:58:05Z fperez $
This module is modeled after perl's Getopt::Long module-- which
is, in turn, modeled after GNU's extended getopt() function.
Upon instantiation, the option specification should be a sequence
(list) of option definitions.
Options that take no arguments should simply contain the name of
the option. If a ! is post-pended, the option can be negated by
prepending 'no'; ie 'debug!' specifies that -debug and -nodebug
should be accepted.
Mandatory arguments to options are specified using a postpended
'=' + a type specifier. '=s' specifies a mandatory string
argument, '=i' specifies a mandatory integer argument, and '=f'
specifies a mandatory real number. In all cases, the '=' can be
substituted with ':' to specify that the argument is optional.
Dashes '-' in option names are allowed.
If an option has the character '@' postpended (after the
argumentation specification), it can appear multiple times within
each argument list that is processed. The results will be stored
in a list.
The option name can actually be a list of names separated by '|'
characters; ie-- 'foo|bar|baz=f@' specifies that all -foo, -bar,
and -baz options that appear on within the parsed argument list
must have a real number argument and that the accumulated list
of values will be available under the name 'foo'
$Id: DPyGetOpt.py 2872 2007-11-25 17:58:05Z fperez $"""
#*****************************************************************************
#
# Copyright (c) 2001 Bill Bumgarner <bbum@friday.com>
#
#
# Published under the terms of the MIT license, hereby reproduced:
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
#*****************************************************************************
__author__ = 'Bill Bumgarner <bbum@friday.com>'
__license__ = 'MIT'
__version__ = '1.2'
# Modified to use re instead of regex and regsub modules.
# 2001/5/7, Jonathan Hogg <jonathan@onegoodidea.com>
import re
import string
import sys
import types
class Error(Exception):
"""Base class for exceptions in the DPyGetOpt module."""
class ArgumentError(Error):
"""Exception indicating an error in the arguments passed to
DPyGetOpt.processArguments."""
class SpecificationError(Error):
"""Exception indicating an error with an option specification."""
class TerminationError(Error):
"""Exception indicating an error with an option processing terminator."""
specificationExpr = re.compile('(?P<required>.)(?P<type>.)(?P<multi>@?)')
ArgRequired = 'Requires an Argument'
ArgOptional = 'Argument Optional'
# The types modules is not used for these identifiers because there
# is no identifier for 'boolean' or 'generic'
StringArgType = 'String Argument Type'
IntegerArgType = 'Integer Argument Type'
RealArgType = 'Real Argument Type'
BooleanArgType = 'Boolean Argument Type'
GenericArgType = 'Generic Argument Type'
# dictionary of conversion functions-- boolean and generic options
# do not accept arguments and do not need conversion functions;
# the identity function is used purely for convenience.
ConversionFunctions = {
StringArgType : lambda x: x,
IntegerArgType : string.atoi,
RealArgType : string.atof,
BooleanArgType : lambda x: x,
GenericArgType : lambda x: x,
}
class DPyGetOpt:
def __init__(self, spec = None, terminators = ['--']):
"""
Declare and intialize instance variables
Yes, declaration is not necessary... but one of the things
I sorely miss from C/Obj-C is the concept of having an
interface definition that clearly declares all instance
variables and methods without providing any implementation
details. it is a useful reference!
all instance variables are initialized to 0/Null/None of
the appropriate type-- not even the default value...
"""
# sys.stderr.write(string.join(spec) + "\n")
self.allowAbbreviations = 1 # boolean, 1 if abbreviations will
# be expanded
self.freeValues = [] # list, contains free values
self.ignoreCase = 0 # boolean, YES if ignoring case
self.needsParse = 0 # boolean, YES if need to reparse parameter spec
self.optionNames = {} # dict, all option names-- value is index of tuple
self.optionStartExpr = None # regexp defining the start of an option (ie; '-', '--')
self.optionTuples = [] # list o' tuples containing defn of options AND aliases
self.optionValues = {} # dict, option names (after alias expansion) -> option value(s)
self.orderMixed = 0 # boolean, YES if options can be mixed with args
self.posixCompliance = 0 # boolean, YES indicates posix like behaviour
self.spec = [] # list, raw specs (in case it must be reparsed)
self.terminators = terminators # list, strings that terminate argument processing
self.termValues = [] # list, values after terminator
self.terminator = None # full name of terminator that ended
# option processing
# set up defaults
self.setPosixCompliance()
self.setIgnoreCase()
self.setAllowAbbreviations()
# parse spec-- if present
if spec:
self.parseConfiguration(spec)
def setPosixCompliance(self, aFlag = 0):
"""
Enables and disables posix compliance.
When enabled, '+' can be used as an option prefix and free
values can be mixed with options.
"""
self.posixCompliance = aFlag
self.needsParse = 1
if self.posixCompliance:
self.optionStartExpr = re.compile('(--|-)(?P<option>[A-Za-z0-9_-]+)(?P<arg>=.*)?')
self.orderMixed = 0
else:
self.optionStartExpr = re.compile('(--|-|\+)(?P<option>[A-Za-z0-9_-]+)(?P<arg>=.*)?')
self.orderMixed = 1
def isPosixCompliant(self):
"""
Returns the value of the posix compliance flag.
"""
return self.posixCompliance
def setIgnoreCase(self, aFlag = 1):
"""
Enables and disables ignoring case during option processing.
"""
self.needsParse = 1
self.ignoreCase = aFlag
def ignoreCase(self):
"""
Returns 1 if the option processor will ignore case when
processing options.
"""
return self.ignoreCase
def setAllowAbbreviations(self, aFlag = 1):
"""
Enables and disables the expansion of abbreviations during
option processing.
"""
self.allowAbbreviations = aFlag
def willAllowAbbreviations(self):
"""
Returns 1 if abbreviated options will be automatically
expanded to the non-abbreviated form (instead of causing an
unrecognized option error).
"""
return self.allowAbbreviations
def addTerminator(self, newTerm):
"""
Adds newTerm as terminator of option processing.
Whenever the option processor encounters one of the terminators
during option processing, the processing of options terminates
immediately, all remaining options are stored in the termValues
instance variable and the full name of the terminator is stored
in the terminator instance variable.
"""
self.terminators = self.terminators + [newTerm]
def _addOption(self, oTuple):
"""
Adds the option described by oTuple (name, (type, mode,
default), alias) to optionTuples. Adds index keyed under name
to optionNames. Raises SpecificationError if name already in
optionNames
"""
(name, (type, mode, default, multi), realName) = oTuple
# verify name and add to option names dictionary
if self.optionNames.has_key(name):
if realName:
raise SpecificationError('Alias \'' + name + '\' for \'' +
realName +
'\' already used for another option or alias.')
else:
raise SpecificationError('Option named \'' + name +
'\' specified more than once. Specification: '
+ option)
# validated. add to optionNames
self.optionNames[name] = self.tupleIndex
self.tupleIndex = self.tupleIndex + 1
# add to optionTuples
self.optionTuples = self.optionTuples + [oTuple]
# if type is boolean, add negation
if type == BooleanArgType:
alias = 'no' + name
specTuple = (type, mode, 0, multi)
oTuple = (alias, specTuple, name)
# verify name and add to option names dictionary
if self.optionNames.has_key(alias):
if realName:
raise SpecificationError('Negated alias \'' + name +
'\' for \'' + realName +
'\' already used for another option or alias.')
else:
raise SpecificationError('Negated option named \'' + name +
'\' specified more than once. Specification: '
+ option)
# validated. add to optionNames
self.optionNames[alias] = self.tupleIndex
self.tupleIndex = self.tupleIndex + 1
# add to optionTuples
self.optionTuples = self.optionTuples + [oTuple]
def addOptionConfigurationTuple(self, oTuple):
(name, argSpec, realName) = oTuple
if self.ignoreCase:
name = string.lower(name)
if realName:
realName = string.lower(realName)
else:
realName = name
oTuple = (name, argSpec, realName)
# add option
self._addOption(oTuple)
def addOptionConfigurationTuples(self, oTuple):
if type(oTuple) is ListType:
for t in oTuple:
self.addOptionConfigurationTuple(t)
else:
self.addOptionConfigurationTuple(oTuple)
def parseConfiguration(self, spec):
# destroy previous stored information + store raw spec
self.spec = spec
self.optionTuples = []
self.optionNames = {}
self.tupleIndex = 0
tupleIndex = 0
# create some regex's for parsing each spec
splitExpr = \
re.compile('(?P<names>\w+[-A-Za-z0-9|]*)?(?P<spec>!|[=:][infs]@?)?')
for option in spec:
# push to lower case (does not negatively affect
# specification)
if self.ignoreCase:
option = string.lower(option)
# break into names, specification
match = splitExpr.match(option)
if match is None:
raise SpecificationError('Invalid specification {' + option +
'}')
names = match.group('names')
specification = match.group('spec')
# break name into name, aliases
nlist = string.split(names, '|')
# get name
name = nlist[0]
aliases = nlist[1:]
# specificationExpr = regex.symcomp('\(<required>.\)\(<type>.\)\(<multi>@?\)')
if not specification:
#spec tuple is ('type', 'arg mode', 'default value', 'multiple')
argType = GenericArgType
argMode = None
argDefault = 1
argMultiple = 0
elif specification == '!':
argType = BooleanArgType
argMode = None
argDefault = 1
argMultiple = 0
else:
# parse
match = specificationExpr.match(specification)
if match is None:
# failed to parse, die
raise SpecificationError('Invalid configuration for option \''
+ option + '\'')
# determine mode
required = match.group('required')
if required == '=':
argMode = ArgRequired
elif required == ':':
argMode = ArgOptional
else:
raise SpecificationError('Unknown requirement configuration \''
+ required + '\'')
# determine type
type = match.group('type')
if type == 's':
argType = StringArgType
argDefault = ''
elif type == 'i':
argType = IntegerArgType
argDefault = 1
elif type == 'f' or type == 'n':
argType = RealArgType
argDefault = 1
else:
raise SpecificationError('Unknown type specifier \'' +
type + '\'')
# determine quantity
if match.group('multi') == '@':
argMultiple = 1
else:
argMultiple = 0
## end else (of not specification)
# construct specification tuple
specTuple = (argType, argMode, argDefault, argMultiple)
# add the option-- option tuple is (name, specTuple, real name)
oTuple = (name, specTuple, name)
self._addOption(oTuple)
for alias in aliases:
# drop to all lower (if configured to do so)
if self.ignoreCase:
alias = string.lower(alias)
# create configuration tuple
oTuple = (alias, specTuple, name)
# add
self._addOption(oTuple)
# successfully parsed....
self.needsParse = 0
def _getArgTuple(self, argName):
"""
Returns a list containing all the specification tuples that
match argName. If none match, None is returned. If one
matches, a list with one tuple is returned. If more than one
match, a list containing all the tuples that matched is
returned.
In other words, this function does not pass judgement upon the
validity of multiple matches.
"""
# is it in the optionNames dict?
try:
# sys.stderr.write(argName + string.join(self.optionNames.keys()) + "\n")
# yes, get index
tupleIndex = self.optionNames[argName]
# and return tuple as element of list
return [self.optionTuples[tupleIndex]]
except KeyError:
# are abbreviations allowed?
if not self.allowAbbreviations:
# No! terefore, this cannot be valid argument-- nothing found
return None
# argName might be an abbreviation (and, abbreviations must
# be allowed... or this would not have been reached!)
# create regex for argName
argExpr = re.compile('^' + argName)
tuples = filter(lambda x, argExpr=argExpr: argExpr.search(x[0]) is not None,
self.optionTuples)
if not len(tuples):
return None
else:
return tuples
def _isTerminator(self, optionName):
"""
Returns the full name of the terminator if optionName is a valid
terminator. If it is, sets self.terminator to the full name of
the terminator.
If more than one terminator matched, raises a TerminationError with a
string describing the ambiguity.
"""
# sys.stderr.write(optionName + "\n")
# sys.stderr.write(repr(self.terminators))
if optionName in self.terminators:
self.terminator = optionName
elif not self.allowAbbreviations:
return None
# regex thing in bogus
# termExpr = regex.compile('^' + optionName)
terms = filter(lambda x, on=optionName: string.find(x,on) == 0, self.terminators)
if not len(terms):
return None
elif len(terms) > 1:
raise TerminationError('Ambiguous terminator \'' + optionName +
'\' matches ' + repr(terms))
self.terminator = terms[0]
return self.terminator
def processArguments(self, args = None):
"""
Processes args, a list of arguments (including options).
If args is the same as sys.argv, automatically trims the first
argument (the executable name/path).
If an exception is not raised, the argument list was parsed
correctly.
Upon successful completion, the freeValues instance variable
will contain all the arguments that were not associated with an
option in the order they were encountered. optionValues is a
dictionary containing the value of each option-- the method
valueForOption() can be used to query this dictionary.
terminator will contain the argument encountered that terminated
option processing (or None, if a terminator was never
encountered) and termValues will contain all of the options that
appeared after the Terminator (or an empty list).
"""
if hasattr(sys, "argv") and args == sys.argv:
args = sys.argv[1:]
max = len(args) # maximum index + 1
self.freeValues = [] # array to hold return values
self.optionValues= {}
index = 0 # initial index
self.terminator = None
self.termValues = []
while index < max:
# obtain argument
arg = args[index]
# increment index -- REMEMBER; it is NOW incremented
index = index + 1
# terminate immediately if option terminator encountered
if self._isTerminator(arg):
self.freeValues = self.freeValues + args[index:]
self.termValues = args[index:]
return
# is this possibly an option?
match = self.optionStartExpr.match(arg)
if match is None:
# not an option-- add to freeValues
self.freeValues = self.freeValues + [arg]
if not self.orderMixed:
# mixing not allowed; add rest of args as freeValues
self.freeValues = self.freeValues + args[index:]
# return to caller
return
else:
continue
# grab name
optName = match.group('option')
# obtain next argument-- index has already been incremented
nextArg = match.group('arg')
if nextArg:
nextArg = nextArg[1:]
index = index - 1 # put it back
else:
try:
nextArg = args[index]
except:
nextArg = None
# transpose to lower case, if necessary
if self.ignoreCase:
optName = string.lower(optName)
# obtain defining tuple
tuples = self._getArgTuple(optName)
if tuples == None:
raise ArgumentError('Illegal option \'' + arg + '\'')
elif len(tuples) > 1:
raise ArgumentError('Ambiguous option \'' + arg +
'\'; matches ' +
repr(map(lambda x: x[0], tuples)))
else:
config = tuples[0]
# config is now set to the configuration tuple for the
# argument
(fullName, spec, realName) = config
(optType, optMode, optDefault, optMultiple) = spec
# if opt mode required, but nextArg is none, raise an error
if (optMode == ArgRequired):
if (not nextArg) or self._isTerminator(nextArg):
# print nextArg
raise ArgumentError('Option \'' + arg +
'\' requires an argument of type ' +
optType)
if (not optMode == None) and nextArg and (not self._isTerminator(nextArg)):
# nextArg defined, option configured to possibly consume arg
try:
# grab conversion function-- the try is more for internal diagnostics
func = ConversionFunctions[optType]
try:
optionValue = func(nextArg)
index = index + 1
except:
# only raise conversion error if REQUIRED to consume argument
if optMode == ArgRequired:
raise ArgumentError('Invalid argument to option \''
+ arg + '\'; should be \'' +
optType + '\'')
else:
optionValue = optDefault
except ArgumentError:
raise
except:
raise ArgumentError('(' + arg +
') Conversion function for \'' +
optType + '\' not found.')
else:
optionValue = optDefault
# add value to options dictionary
if optMultiple:
# can be multiple values
try:
# try to append element
self.optionValues[realName] = self.optionValues[realName] + [optionValue]
except:
# failed-- must not exist; add it
self.optionValues[realName] = [optionValue]
else:
# only one value per
if self.isPosixCompliant and self.optionValues.has_key(realName):
raise ArgumentError('Argument \'' + arg +
'\' occurs multiple times.')
self.optionValues[realName] = optionValue
def valueForOption(self, optionName, defaultValue = None):
"""
Return the value associated with optionName. If optionName was
not encountered during parsing of the arguments, returns the
defaultValue (which defaults to None).
"""
try:
optionValue = self.optionValues[optionName]
except:
optionValue = defaultValue
return optionValue
##
## test/example section
##
test_error = 'Test Run Amok!'
def _test():
"""
A relatively complete test suite.
"""
try:
DPyGetOpt(['foo', 'bar=s', 'foo'])
except Error, exc:
print 'EXCEPTION (should be \'foo\' already used..): %s' % exc
try:
DPyGetOpt(['foo|bar|apple=s@', 'baz|apple!'])
except Error, exc:
print 'EXCEPTION (should be duplicate alias/name error): %s' % exc
x = DPyGetOpt(['apple|atlas=i@', 'application|executable=f@'])
try:
x.processArguments(['-app', '29.3'])
except Error, exc:
print 'EXCEPTION (should be ambiguous argument): %s' % exc
x = DPyGetOpt(['foo'], ['antigravity', 'antithesis'])
try:
x.processArguments(['-foo', 'anti'])
except Error, exc:
print 'EXCEPTION (should be ambiguous terminator): %s' % exc
profile = ['plain-option',
'boolean-option!',
'list-of-integers=i@',
'list-real-option|list-real-alias|list-real-pseudonym=f@',
'optional-string-option:s',
'abbreviated-string-list=s@']
terminators = ['terminator']
args = ['-plain-option',
'+noboolean-option',
'--list-of-integers', '1',
'+list-of-integers', '2',
'-list-of-integers', '3',
'freeargone',
'-list-real-option', '1.1',
'+list-real-alias', '1.2',
'--list-real-pseudonym', '1.3',
'freeargtwo',
'-abbreviated-string-list', 'String1',
'--abbreviated-s', 'String2',
'-abbrev', 'String3',
'-a', 'String4',
'-optional-string-option',
'term',
'next option should look like an invalid arg',
'-a']
print 'Using profile: ' + repr(profile)
print 'With terminator: ' + repr(terminators)
print 'Processing arguments: ' + repr(args)
go = DPyGetOpt(profile, terminators)
go.processArguments(args)
print 'Options (and values): ' + repr(go.optionValues)
print 'free args: ' + repr(go.freeValues)
print 'term args: ' + repr(go.termValues)
| |
# graphicsCrawlerDisplay.py
# -------------------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to
# http://inst.eecs.berkeley.edu/~cs188/pacman/pacman.html
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
# graphicsCrawlerDisplay.py
# -------------------------
# Licensing Information: Please do not distribute or publish solutions to this
# project. You are free to use and extend these projects for educational
# purposes. The Pacman AI projects were developed at UC Berkeley, primarily by
# John DeNero (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and Pieter
# Abbeel in Spring 2013.
# For more info, see http://inst.eecs.berkeley.edu/~cs188/pacman/pacman.html
import Tkinter
import qlearningAgents
import time
import threading
import sys
import crawler
#import pendulum
import math
from math import pi as PI
robotType = 'crawler'
class Application:
def sigmoid(self, x):
return 1.0 / (1.0 + 2.0 ** (-x))
def incrementSpeed(self, inc):
self.tickTime *= inc
# self.epsilon = min(1.0, self.epsilon)
# self.epsilon = max(0.0,self.epsilon)
# self.learner.setSpeed(self.epsilon)
self.speed_label['text'] = 'Step Delay: %.5f' % (self.tickTime)
def incrementEpsilon(self, inc):
self.ep += inc
self.epsilon = self.sigmoid(self.ep)
self.learner.setEpsilon(self.epsilon)
self.epsilon_label['text'] = 'Epsilon: %.3f' % (self.epsilon)
def incrementGamma(self, inc):
self.ga += inc
self.gamma = self.sigmoid(self.ga)
self.learner.setDiscount(self.gamma)
self.gamma_label['text'] = 'Discount: %.3f' % (self.gamma)
def incrementAlpha(self, inc):
self.al += inc
self.alpha = self.sigmoid(self.al)
self.learner.setLearningRate(self.alpha)
self.alpha_label['text'] = 'Learning Rate: %.3f' % (self.alpha)
def __initGUI(self, win):
## Window ##
self.win = win
## Initialize Frame ##
win.grid()
self.dec = -.5
self.inc = .5
self.tickTime = 0.1
## Epsilon Button + Label ##
self.setupSpeedButtonAndLabel(win)
self.setupEpsilonButtonAndLabel(win)
## Gamma Button + Label ##
self.setUpGammaButtonAndLabel(win)
## Alpha Button + Label ##
self.setupAlphaButtonAndLabel(win)
## Exit Button ##
#self.exit_button = Tkinter.Button(win,text='Quit', command=self.exit)
#self.exit_button.grid(row=0, column=9)
## Simulation Buttons ##
# self.setupSimulationButtons(win)
## Canvas ##
self.canvas = Tkinter.Canvas(root, height=200, width=1000)
self.canvas.grid(row=2,columnspan=10)
def setupAlphaButtonAndLabel(self, win):
self.alpha_minus = Tkinter.Button(win,
text="-",command=(lambda: self.incrementAlpha(self.dec)))
self.alpha_minus.grid(row=1, column=3, padx=10)
self.alpha = self.sigmoid(self.al)
self.alpha_label = Tkinter.Label(win, text='Learning Rate: %.3f' % (self.alpha))
self.alpha_label.grid(row=1, column=4)
self.alpha_plus = Tkinter.Button(win,
text="+",command=(lambda: self.incrementAlpha(self.inc)))
self.alpha_plus.grid(row=1, column=5, padx=10)
def setUpGammaButtonAndLabel(self, win):
self.gamma_minus = Tkinter.Button(win,
text="-",command=(lambda: self.incrementGamma(self.dec)))
self.gamma_minus.grid(row=1, column=0, padx=10)
self.gamma = self.sigmoid(self.ga)
self.gamma_label = Tkinter.Label(win, text='Discount: %.3f' % (self.gamma))
self.gamma_label.grid(row=1, column=1)
self.gamma_plus = Tkinter.Button(win,
text="+",command=(lambda: self.incrementGamma(self.inc)))
self.gamma_plus.grid(row=1, column=2, padx=10)
def setupEpsilonButtonAndLabel(self, win):
self.epsilon_minus = Tkinter.Button(win,
text="-",command=(lambda: self.incrementEpsilon(self.dec)))
self.epsilon_minus.grid(row=0, column=3)
self.epsilon = self.sigmoid(self.ep)
self.epsilon_label = Tkinter.Label(win, text='Epsilon: %.3f' % (self.epsilon))
self.epsilon_label.grid(row=0, column=4)
self.epsilon_plus = Tkinter.Button(win,
text="+",command=(lambda: self.incrementEpsilon(self.inc)))
self.epsilon_plus.grid(row=0, column=5)
def setupSpeedButtonAndLabel(self, win):
self.speed_minus = Tkinter.Button(win,
text="-",command=(lambda: self.incrementSpeed(.5)))
self.speed_minus.grid(row=0, column=0)
self.speed_label = Tkinter.Label(win, text='Step Delay: %.5f' % (self.tickTime))
self.speed_label.grid(row=0, column=1)
self.speed_plus = Tkinter.Button(win,
text="+",command=(lambda: self.incrementSpeed(2)))
self.speed_plus.grid(row=0, column=2)
def skip5kSteps(self):
self.stepsToSkip = 5000
def __init__(self, win):
self.ep = 0
self.ga = 2
self.al = 2
self.stepCount = 0
## Init Gui
self.__initGUI(win)
# Init environment
if robotType == 'crawler':
self.robot = crawler.CrawlingRobot(self.canvas)
self.robotEnvironment = crawler.CrawlingRobotEnvironment(self.robot)
elif robotType == 'pendulum':
self.robot = pendulum.PendulumRobot(self.canvas)
self.robotEnvironment = \
pendulum.PendulumRobotEnvironment(self.robot)
else:
raise "Unknown RobotType"
# Init Agent
simulationFn = lambda agent: \
simulation.SimulationEnvironment(self.robotEnvironment,agent)
actionFn = lambda state: \
self.robotEnvironment.getPossibleActions(state)
self.learner = qlearningAgents.QLearningAgent(actionFn=actionFn)
self.learner.setEpsilon(self.epsilon)
self.learner.setLearningRate(self.alpha)
self.learner.setDiscount(self.gamma)
# Start GUI
self.running = True
self.stopped = False
self.stepsToSkip = 0
self.thread = threading.Thread(target=self.run)
self.thread.start()
def exit(self):
self.running = False
for i in range(5):
if not self.stopped:
time.sleep(0.1)
try:
self.win.destroy()
except:
pass
sys.exit(0)
def step(self):
self.stepCount += 1
state = self.robotEnvironment.getCurrentState()
actions = self.robotEnvironment.getPossibleActions(state)
if len(actions) == 0.0:
self.robotEnvironment.reset()
state = self.robotEnvironment.getCurrentState()
actions = self.robotEnvironment.getPossibleActions(state)
print 'Reset!'
action = self.learner.getAction(state)
if action == None:
raise 'None action returned: Code Not Complete'
nextState, reward = self.robotEnvironment.doAction(action)
self.learner.observeTransition(state, action, nextState, reward)
def animatePolicy(self):
if robotType != 'pendulum':
raise 'Only pendulum can animatePolicy'
totWidth = self.canvas.winfo_reqwidth()
totHeight = self.canvas.winfo_reqheight()
length = 0.48 * min(totWidth, totHeight)
x,y = totWidth-length-30, length+10
angleMin, angleMax = self.robot.getMinAndMaxAngle()
velMin, velMax = self.robot.getMinAndMaxAngleVelocity()
if not 'animatePolicyBox' in dir(self):
self.canvas.create_line(x,y,x+length,y)
self.canvas.create_line(x+length,y,x+length,y-length)
self.canvas.create_line(x+length,y-length,x,y-length)
self.canvas.create_line(x,y-length,x,y)
self.animatePolicyBox = 1
self.canvas.create_text(x+length/2,y+10,text='angle')
self.canvas.create_text(x-30,y-length/2,text='velocity')
self.canvas.create_text(x-60,y-length/4,text='Blue = kickLeft')
self.canvas.create_text(x-60,y-length/4+20,text='Red = kickRight')
self.canvas.create_text(x-60,y-length/4+40,text='White = doNothing')
angleDelta = (angleMax-angleMin) / 100
velDelta = (velMax-velMin) / 100
for i in range(100):
angle = angleMin + i * angleDelta
for j in range(100):
vel = velMin + j * velDelta
state = self.robotEnvironment.getState(angle,vel)
max, argMax = None, None
if not self.learner.seenState(state):
argMax = 'unseen'
else:
for action in ('kickLeft','kickRight','doNothing'):
qVal = self.learner.getQValue(state, action)
if max == None or qVal > max:
max, argMax = qVal, action
if argMax != 'unseen':
if argMax == 'kickLeft':
color = 'blue'
elif argMax == 'kickRight':
color = 'red'
elif argMax == 'doNothing':
color = 'white'
dx = length / 100.0
dy = length / 100.0
x0, y0 = x+i*dx, y-j*dy
self.canvas.create_rectangle(x0,y0,x0+dx,y0+dy,fill=color)
def run(self):
self.stepCount = 0
self.learner.startEpisode()
while True:
minSleep = .01
tm = max(minSleep, self.tickTime)
time.sleep(tm)
self.stepsToSkip = int(tm / self.tickTime) - 1
if not self.running:
self.stopped = True
return
for i in range(self.stepsToSkip):
self.step()
self.stepsToSkip = 0
self.step()
# self.robot.draw()
self.learner.stopEpisode()
def start(self):
self.win.mainloop()
def run():
global root
root = Tkinter.Tk()
root.title( 'Crawler GUI' )
root.resizable( 0, 0 )
# root.mainloop()
app = Application(root)
def update_gui():
app.robot.draw(app.stepCount, app.tickTime)
root.after(10, update_gui)
update_gui()
root.protocol( 'WM_DELETE_WINDOW', app.exit)
try:
app.start()
except:
app.exit()
| |
##########################################################################
#
# Copyright (c) 2015, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import unittest
import imath
import PyOpenColorIO
import IECore
import Gaffer
import GafferTest
import GafferImage
import GafferImageTest
class LUTTest( GafferImageTest.ImageTestCase ) :
imageFile = os.path.expandvars( "$GAFFER_ROOT/python/GafferImageTest/images/checker.exr" )
lut = os.path.expandvars( "$GAFFER_ROOT/python/GafferImageTest/openColorIO/luts/slog10.spi1d" )
def test( self ) :
n = GafferImage.ImageReader()
n["fileName"].setValue( self.imageFile )
o = GafferImage.LUT()
o["in"].setInput( n["out"] )
self.assertImagesEqual( n["out"], o["out"] )
o["fileName"].setValue( self.lut )
o["interpolation"].setValue( GafferImage.LUT.Interpolation.Linear )
forward = GafferImage.ImageAlgo.image( o["out"] )
self.assertNotEqual( GafferImage.ImageAlgo.image( n["out"] ), forward )
o["direction"].setValue( GafferImage.LUT.Direction.Inverse )
inverse = GafferImage.ImageAlgo.image( o["out"] )
self.assertNotEqual( GafferImage.ImageAlgo.image( n["out"] ), inverse )
self.assertNotEqual( forward, inverse )
def testBadFileName( self ) :
n = GafferImage.ImageReader()
n["fileName"].setValue( self.imageFile )
o = GafferImage.LUT()
o["in"].setInput( n["out"] )
o["fileName"].setValue( "/not/a/real.cube" )
self.assertRaises( RuntimeError, GafferImage.ImageAlgo.image, o["out"] )
def testBadInterpolation( self ) :
n = GafferImage.ImageReader()
n["fileName"].setValue( self.imageFile )
o = GafferImage.LUT()
o["in"].setInput( n["out"] )
o["fileName"].setValue( self.lut )
image = GafferImage.ImageAlgo.image( o["out"] )
log = []
def loggingFunction( message ) :
log.append( message )
try :
PyOpenColorIO.SetLoggingFunction( loggingFunction )
o["interpolation"].setValue( GafferImage.LUT.Interpolation.Tetrahedral )
# Bad interpolations fall back to the default interpolation, but
# also emit a warning message.
self.assertEqual( GafferImage.ImageAlgo.image( o["out"] ), image )
finally :
PyOpenColorIO.ResetToDefaultLoggingFunction()
## \todo Perhaps libGafferImage should permanently install a logging function that
# forwards messages to `IECore::MessageHandler`?
self.assertEqual( len( log ), 1 )
self.assertIn(
"Interpolation specified by FileTransform 'tetrahedral' is not allowed with the given file",
log[0]
)
def testHashPassThrough( self ) :
n = GafferImage.ImageReader()
n["fileName"].setValue( self.imageFile )
o = GafferImage.LUT()
o["in"].setInput( n["out"] )
self.assertImageHashesEqual( n["out"], o["out"] )
self.assertImagesEqual( n["out"], o["out"] )
o["fileName"].setValue( self.lut )
self.assertNotEqual( GafferImage.ImageAlgo.image( n["out"] ), GafferImage.ImageAlgo.image( o["out"] ) )
o["enabled"].setValue( False )
self.assertImageHashesEqual( n["out"], o["out"] )
self.assertImagesEqual( n["out"], o["out"] )
self.assertEqual( n["out"]['format'].hash(), o["out"]['format'].hash() )
self.assertEqual( n["out"]['dataWindow'].hash(), o["out"]['dataWindow'].hash() )
self.assertEqual( n["out"]["metadata"].getValue(), o["out"]["metadata"].getValue() )
self.assertEqual( n["out"]['channelNames'].hash(), o["out"]['channelNames'].hash() )
o["enabled"].setValue( True )
o["fileName"].setValue( "" )
self.assertImageHashesEqual( n["out"], o["out"] )
self.assertImagesEqual( n["out"], o["out"] )
self.assertEqual( n["out"]['format'].hash(), o["out"]['format'].hash() )
self.assertEqual( n["out"]['dataWindow'].hash(), o["out"]['dataWindow'].hash() )
self.assertEqual( n["out"]["metadata"].getValue(), o["out"]["metadata"].getValue() )
self.assertEqual( n["out"]['channelNames'].hash(), o["out"]['channelNames'].hash() )
def testImageHashPassThrough( self ) :
i = GafferImage.ImageReader()
i["fileName"].setValue( self.imageFile )
o = GafferImage.LUT()
o["in"].setInput( i["out"] )
self.assertEqual( GafferImage.ImageAlgo.imageHash( i["out"] ), GafferImage.ImageAlgo.imageHash( o["out"] ) )
o["fileName"].setValue( self.lut )
self.assertNotEqual( GafferImage.ImageAlgo.imageHash( i["out"] ), GafferImage.ImageAlgo.imageHash( o["out"] ) )
def testChannelsAreSeparate( self ) :
i = GafferImage.ImageReader()
i["fileName"].setValue( os.path.expandvars( "$GAFFER_ROOT/python/GafferImageTest/images/circles.exr" ) )
o = GafferImage.LUT()
o["in"].setInput( i["out"] )
o["fileName"].setValue( self.lut )
self.assertNotEqual(
o["out"].channelDataHash( "R", imath.V2i( 0 ) ),
o["out"].channelDataHash( "G", imath.V2i( 0 ) )
)
self.assertNotEqual(
o["out"].channelData( "R", imath.V2i( 0 ) ),
o["out"].channelData( "G", imath.V2i( 0 ) )
)
def testPassThrough( self ) :
i = GafferImage.ImageReader()
i["fileName"].setValue( self.imageFile )
o = GafferImage.LUT()
o["in"].setInput( i["out"] )
o["fileName"].setValue( self.lut )
self.assertEqual( i["out"]["format"].hash(), o["out"]["format"].hash() )
self.assertEqual( i["out"]["dataWindow"].hash(), o["out"]["dataWindow"].hash() )
self.assertEqual( i["out"]["metadata"].getValue(), o["out"]["metadata"].getValue() )
self.assertEqual( i["out"]["channelNames"].hash(), o["out"]["channelNames"].hash() )
self.assertEqual( i["out"]["format"].getValue(), o["out"]["format"].getValue() )
self.assertEqual( i["out"]["dataWindow"].getValue(), o["out"]["dataWindow"].getValue() )
self.assertEqual( i["out"]["metadata"].getValue(), o["out"]["metadata"].getValue() )
self.assertEqual( i["out"]["channelNames"].getValue(), o["out"]["channelNames"].getValue() )
if __name__ == "__main__":
unittest.main()
| |
from __future__ import absolute_import, unicode_literals
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.utils import override_settings
from wagtail.tests.testapp.models import SingleEventPage
from wagtail.tests.testapp.rich_text import CustomRichTextArea
from wagtail.tests.utils import WagtailTestUtils
from wagtail.wagtailadmin.rich_text import HalloRichTextArea, get_rich_text_editor_widget
from wagtail.wagtailcore.blocks import RichTextBlock
from wagtail.wagtailcore.models import Page, get_page_models
from wagtail.wagtailcore.rich_text import RichText
class BaseRichTextEditHandlerTestCase(TestCase):
def _clear_edit_handler_cache(self):
"""
These tests generate new EditHandlers with different settings. The
cached edit handlers should be cleared before and after each test run
to ensure that no changes leak through to other tests.
"""
from wagtail.tests.testapp.models import DefaultRichBlockFieldPage
block_page_edit_handler = DefaultRichBlockFieldPage.get_edit_handler()
if block_page_edit_handler._form_class:
rich_text_block = block_page_edit_handler._form_class.base_fields['body'].block.child_blocks['rich_text']
if hasattr(rich_text_block, 'field'):
del rich_text_block.field
for page_class in get_page_models():
page_class.get_edit_handler.cache_clear()
def setUp(self):
super(BaseRichTextEditHandlerTestCase, self).setUp()
self._clear_edit_handler_cache()
def tearDown(self):
self._clear_edit_handler_cache()
super(BaseRichTextEditHandlerTestCase, self).tearDown()
class TestGetRichTextEditorWidget(TestCase):
@override_settings()
def test_default(self):
# Simulate the absence of a setting
if hasattr(settings, 'WAGTAILADMIN_RICH_TEXT_EDITORS'):
del settings.WAGTAILADMIN_RICH_TEXT_EDITORS
self.assertIsInstance(get_rich_text_editor_widget(), HalloRichTextArea)
@override_settings(WAGTAILADMIN_RICH_TEXT_EDITORS={
'default': {
'WIDGET': 'wagtail.tests.testapp.rich_text.CustomRichTextArea'
},
})
def test_overridden_default_editor(self):
self.assertIsInstance(get_rich_text_editor_widget(), CustomRichTextArea)
@override_settings(WAGTAILADMIN_RICH_TEXT_EDITORS={
'custom': {
'WIDGET': 'wagtail.tests.testapp.rich_text.CustomRichTextArea'
},
})
def test_custom_editor_without_default(self):
self.assertIsInstance(get_rich_text_editor_widget('custom'), CustomRichTextArea)
@override_settings(WAGTAILADMIN_RICH_TEXT_EDITORS={
'default': {
'WIDGET': 'wagtail.wagtailadmin.rich_text.HalloRichTextArea'
},
'custom': {
'WIDGET': 'wagtail.tests.testapp.rich_text.CustomRichTextArea'
},
})
def test_custom_editor_with_default(self):
self.assertIsInstance(get_rich_text_editor_widget(), HalloRichTextArea)
self.assertIsInstance(get_rich_text_editor_widget('custom'), CustomRichTextArea)
@override_settings()
class TestDefaultRichText(BaseRichTextEditHandlerTestCase, WagtailTestUtils):
def setUp(self):
super(TestDefaultRichText, self).setUp()
# Find root page
self.root_page = Page.objects.get(id=2)
self.login()
# Simulate the absence of a setting
if hasattr(settings, 'WAGTAILADMIN_RICH_TEXT_EDITORS'):
del settings.WAGTAILADMIN_RICH_TEXT_EDITORS
def test_default_editor_in_rich_text_field(self):
response = self.client.get(reverse(
'wagtailadmin_pages:add', args=('tests', 'defaultrichtextfieldpage', self.root_page.id)
))
# Check status code
self.assertEqual(response.status_code, 200)
# Check that hallo (default editor by now)
self.assertContains(response, 'makeHalloRichTextEditable("id_body");')
# check that media for the default hallo features (but not others) is being imported
self.assertContains(response, 'wagtaildocs/js/hallo-plugins/hallo-wagtaildoclink.js')
self.assertNotContains(response, 'testapp/js/hallo-blockquote.js')
def test_default_editor_in_rich_text_block(self):
response = self.client.get(reverse(
'wagtailadmin_pages:add', args=('tests', 'defaultrichblockfieldpage', self.root_page.id)
))
# Check status code
self.assertEqual(response.status_code, 200)
# Check that hallo (default editor by now)
self.assertContains(response, 'makeHalloRichTextEditable("__PREFIX__-value");')
# check that media for the default hallo features (but not others) is being imported
self.assertContains(response, 'wagtaildocs/js/hallo-plugins/hallo-wagtaildoclink.js')
self.assertNotContains(response, 'testapp/js/hallo-blockquote.js')
@override_settings(WAGTAILADMIN_RICH_TEXT_EDITORS={
'default': {
'WIDGET': 'wagtail.tests.testapp.rich_text.CustomRichTextArea'
},
})
class TestOverriddenDefaultRichText(BaseRichTextEditHandlerTestCase, WagtailTestUtils):
def setUp(self):
super(TestOverriddenDefaultRichText, self).setUp()
# Find root page
self.root_page = Page.objects.get(id=2)
self.login()
def test_overridden_default_editor_in_rich_text_field(self):
response = self.client.get(reverse(
'wagtailadmin_pages:add', args=('tests', 'defaultrichtextfieldpage', self.root_page.id)
))
# Check status code
self.assertEqual(response.status_code, 200)
# Check that hallo (default editor by now) was replaced with fake editor
self.assertNotContains(response, 'makeHalloRichTextEditable("id_body");')
self.assertContains(response, 'customEditorInitScript("id_body");')
def test_overridden_default_editor_in_rich_text_block(self):
response = self.client.get(reverse(
'wagtailadmin_pages:add', args=('tests', 'defaultrichblockfieldpage', self.root_page.id)
))
# Check status code
self.assertEqual(response.status_code, 200)
# Check that hallo (default editor by now) was replaced with fake editor
self.assertNotContains(response, 'makeHalloRichTextEditable("__PREFIX__-value");')
self.assertContains(response, 'customEditorInitScript("__PREFIX__-value");')
@override_settings(WAGTAILADMIN_RICH_TEXT_EDITORS={
'default': {
'WIDGET': 'wagtail.wagtailadmin.rich_text.HalloRichTextArea'
},
'custom': {
'WIDGET': 'wagtail.tests.testapp.rich_text.CustomRichTextArea'
},
})
class TestCustomDefaultRichText(BaseRichTextEditHandlerTestCase, WagtailTestUtils):
def setUp(self):
super(TestCustomDefaultRichText, self).setUp()
# Find root page
self.root_page = Page.objects.get(id=2)
self.login()
def test_custom_editor_in_rich_text_field(self):
response = self.client.get(reverse(
'wagtailadmin_pages:add', args=('tests', 'customrichtextfieldpage', self.root_page.id)
))
# Check status code
self.assertEqual(response.status_code, 200)
# Check that hallo (default editor by now) was replaced with fake editor
self.assertNotContains(response, 'makeHalloRichTextEditable("id_body");')
self.assertContains(response, 'customEditorInitScript("id_body");')
def test_custom_editor_in_rich_text_block(self):
response = self.client.get(reverse(
'wagtailadmin_pages:add', args=('tests', 'customrichblockfieldpage', self.root_page.id)
))
# Check status code
self.assertEqual(response.status_code, 200)
# Check that hallo (default editor by now) was replaced with fake editor
self.assertNotContains(response, 'makeHalloRichTextEditable("__PREFIX__-value");')
self.assertContains(response, 'customEditorInitScript("__PREFIX__-value");')
class TestRichTextValue(TestCase):
def setUp(self):
self.root_page = Page.objects.get(id=2)
self.single_event_page = SingleEventPage(
title="foo",
location='the moon', audience='public',
cost='free', date_from='2001-01-01',
)
self.root_page.add_child(instance=self.single_event_page)
def test_render(self):
text = '<p>To the <a linktype="page" id="{}">moon</a>!</p>'.format(
self.single_event_page.id
)
value = RichText(text)
result = str(value)
expected = (
'<div class="rich-text"><p>To the <a href="'
'/foo/pointless-suffix/">moon</a>!</p></div>')
self.assertEqual(result, expected)
@override_settings(WAGTAILADMIN_RICH_TEXT_EDITORS={
'default': {
'WIDGET': 'wagtail.wagtailadmin.rich_text.HalloRichTextArea'
},
'custom': {
'WIDGET': 'wagtail.wagtailadmin.rich_text.HalloRichTextArea',
'OPTIONS': {
'plugins': {
'halloheadings': {'formatBlocks': ['p', 'h2']},
}
}
},
})
class TestHalloJsWithCustomPluginOptions(BaseRichTextEditHandlerTestCase, WagtailTestUtils):
def setUp(self):
super(TestHalloJsWithCustomPluginOptions, self).setUp()
# Find root page
self.root_page = Page.objects.get(id=2)
self.login()
def test_custom_editor_in_rich_text_field(self):
response = self.client.get(reverse(
'wagtailadmin_pages:add', args=('tests', 'customrichtextfieldpage', self.root_page.id)
))
# Check status code
self.assertEqual(response.status_code, 200)
# Check that the custom plugin options are being passed in the hallo initialiser
self.assertContains(response, 'makeHalloRichTextEditable("id_body", {"halloheadings": {"formatBlocks": ["p", "h2"]}});')
def test_custom_editor_in_rich_text_block(self):
block = RichTextBlock(editor='custom')
form_html = block.render_form(block.to_python("<p>hello</p>"), 'body')
# Check that the custom plugin options are being passed in the hallo initialiser
self.assertIn('makeHalloRichTextEditable("body", {"halloheadings": {"formatBlocks": ["p", "h2"]}});', form_html)
class TestHalloJsWithFeaturesKwarg(BaseRichTextEditHandlerTestCase, WagtailTestUtils):
def setUp(self):
super(TestHalloJsWithFeaturesKwarg, self).setUp()
# Find root page
self.root_page = Page.objects.get(id=2)
self.login()
def test_features_list_on_rich_text_field(self):
response = self.client.get(reverse(
'wagtailadmin_pages:add', args=('tests', 'richtextfieldwithfeaturespage', self.root_page.id)
))
# Check status code
self.assertEqual(response.status_code, 200)
# Check that the custom plugin options are being passed in the hallo initialiser
self.assertContains(response, '"halloblockquote":')
self.assertContains(response, '"hallowagtailembeds":')
self.assertNotContains(response, '"hallolists":')
self.assertNotContains(response, '"hallowagtailimage":')
# check that media (js/css) from the features is being imported
self.assertContains(response, 'testapp/js/hallo-blockquote.js')
self.assertContains(response, 'testapp/css/hallo-blockquote.css')
# check that we're NOT importing media for the default features we're not using
self.assertNotContains(response, 'wagtaildocs/js/hallo-plugins/hallo-wagtaildoclink.js')
def test_features_list_on_rich_text_block(self):
block = RichTextBlock(features=['blockquote', 'embed', 'made-up-feature'])
form_html = block.render_form(block.to_python("<p>hello</p>"), 'body')
# Check that the custom plugin options are being passed in the hallo initialiser
self.assertIn('"halloblockquote":', form_html)
self.assertIn('"hallowagtailembeds":', form_html)
self.assertNotIn('"hallolists":', form_html)
self.assertNotIn('"hallowagtailimage":', form_html)
# check that media (js/css) from the features is being imported
media_html = str(block.media)
self.assertIn('testapp/js/hallo-blockquote.js', media_html)
self.assertIn('testapp/css/hallo-blockquote.css', media_html)
# check that we're NOT importing media for the default features we're not using
self.assertNotIn('wagtaildocs/js/hallo-plugins/hallo-wagtaildoclink.js', media_html)
@override_settings(WAGTAILADMIN_RICH_TEXT_EDITORS={
'default': {
'WIDGET': 'wagtail.wagtailadmin.rich_text.HalloRichTextArea',
'OPTIONS': {
'features': ['blockquote', 'image']
}
},
'custom': {
'WIDGET': 'wagtail.wagtailadmin.rich_text.HalloRichTextArea',
'OPTIONS': {
'features': ['blockquote', 'image']
}
},
})
class TestHalloJsWithCustomFeatureOptions(BaseRichTextEditHandlerTestCase, WagtailTestUtils):
def setUp(self):
super(TestHalloJsWithCustomFeatureOptions, self).setUp()
# Find root page
self.root_page = Page.objects.get(id=2)
self.login()
def test_custom_features_option_on_rich_text_field(self):
response = self.client.get(reverse(
'wagtailadmin_pages:add', args=('tests', 'customrichtextfieldpage', self.root_page.id)
))
# Check status code
self.assertEqual(response.status_code, 200)
# Check that the custom plugin options are being passed in the hallo initialiser
self.assertContains(response, '"halloblockquote":')
self.assertContains(response, '"hallowagtailimage":')
self.assertNotContains(response, '"hallolists":')
self.assertNotContains(response, '"hallowagtailembeds":')
# a 'features' list passed on the RichTextField (as we do in richtextfieldwithfeaturespage)
# should override the list in OPTIONS
response = self.client.get(reverse(
'wagtailadmin_pages:add', args=('tests', 'richtextfieldwithfeaturespage', self.root_page.id)
))
self.assertEqual(response.status_code, 200)
self.assertContains(response, '"halloblockquote":')
self.assertContains(response, '"hallowagtailembeds":')
self.assertNotContains(response, '"hallolists":')
self.assertNotContains(response, '"hallowagtailimage":')
# check that media (js/css) from the features is being imported
self.assertContains(response, 'testapp/js/hallo-blockquote.js')
self.assertContains(response, 'testapp/css/hallo-blockquote.css')
# check that we're NOT importing media for the default features we're not using
self.assertNotContains(response, 'wagtaildocs/js/hallo-plugins/hallo-wagtaildoclink.js')
def test_custom_features_option_on_rich_text_block(self):
block = RichTextBlock(editor='custom')
form_html = block.render_form(block.to_python("<p>hello</p>"), 'body')
# Check that the custom plugin options are being passed in the hallo initialiser
self.assertIn('"halloblockquote":', form_html)
self.assertIn('"hallowagtailimage":', form_html)
self.assertNotIn('"hallowagtailembeds":', form_html)
self.assertNotIn('"hallolists":', form_html)
# a 'features' list passed on the RichTextBlock
# should override the list in OPTIONS
block = RichTextBlock(editor='custom', features=['blockquote', 'embed'])
form_html = block.render_form(block.to_python("<p>hello</p>"), 'body')
self.assertIn('"halloblockquote":', form_html)
self.assertIn('"hallowagtailembeds":', form_html)
self.assertNotIn('"hallowagtailimage":', form_html)
self.assertNotIn('"hallolists":', form_html)
# check that media (js/css) from the features is being imported
media_html = str(block.media)
self.assertIn('testapp/js/hallo-blockquote.js', media_html)
self.assertIn('testapp/css/hallo-blockquote.css', media_html)
# check that we're NOT importing media for the default features we're not using
self.assertNotIn('wagtaildocs/js/hallo-plugins/hallo-wagtaildoclink.js', media_html)
| |
# Copyright 2015 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
import pytest
import ibis
import ibis.expr.types as ir
from ibis.compat import unittest
from ibis.util import pandas_to_ibis_schema
from ibis.common import IbisTypeError
from ibis.tests.util import ImpalaE2E
functional_alltypes_with_nulls = pd.DataFrame({
'bigint_col': np.int64([0, 10, 20, 30, 40, 50, 60, 70, 80, 90]),
'bool_col': np.bool_([True, False, True, False, True, None, True, False, True,
False]),
'date_string_col': ['11/01/10', None, '11/01/10', '11/01/10',
'11/01/10', '11/01/10', '11/01/10', '11/01/10',
'11/01/10', '11/01/10'],
'double_col': np.float64([0.0, 10.1, None, 30.299999999999997,
40.399999999999999, 50.5, 60.599999999999994,
70.700000000000003, 80.799999999999997, 90.899999999999991]),
'float_col': np.float32([None, 1.1000000238418579, 2.2000000476837158,
3.2999999523162842, 4.4000000953674316, 5.5,
6.5999999046325684, 7.6999998092651367, 8.8000001907348633,
9.8999996185302734]),
'int_col': np.int32([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
'month': [11, 11, 11, 11, 2, 11, 11, 11, 11, 11],
'smallint_col': np.int16([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
'string_col': ['0', '1', None, '3', '4', '5', '6', '7', '8', '9'],
'timestamp_col': [pd.Timestamp('2010-11-01 00:00:00'),
None,
pd.Timestamp('2010-11-01 00:02:00.100000'),
pd.Timestamp('2010-11-01 00:03:00.300000'),
pd.Timestamp('2010-11-01 00:04:00.600000'),
pd.Timestamp('2010-11-01 00:05:00.100000'),
pd.Timestamp('2010-11-01 00:06:00.150000'),
pd.Timestamp('2010-11-01 00:07:00.210000'),
pd.Timestamp('2010-11-01 00:08:00.280000'),
pd.Timestamp('2010-11-01 00:09:00.360000')],
'tinyint_col': np.int8([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
'year': [2010, 2010, 2010, 2010, 2010, 2010, 2010, 2010, 2010, 2010]})
class TestPandasTypeInterop(unittest.TestCase):
def test_series_to_ibis_literal(self):
values = [1, 2, 3, 4]
s = pd.Series(values)
expr = ir.as_value_expr(s)
expected = ir.sequence(list(s))
assert expr.equals(expected)
class TestPandasSchemaInference(unittest.TestCase):
def test_dtype_bool(self):
df = pd.DataFrame({'col': [True, False, False]})
inferred = pandas_to_ibis_schema(df)
expected = ibis.schema([('col', 'boolean')])
assert inferred == expected
def test_dtype_int8(self):
df = pd.DataFrame({'col': np.int8([-3, 9, 17])})
inferred = pandas_to_ibis_schema(df)
expected = ibis.schema([('col', 'int8')])
assert inferred == expected
def test_dtype_int16(self):
df = pd.DataFrame({'col': np.int16([-5, 0, 12])})
inferred = pandas_to_ibis_schema(df)
expected = ibis.schema([('col', 'int16')])
assert inferred == expected
def test_dtype_int32(self):
df = pd.DataFrame({'col': np.int32([-12, 3, 25000])})
inferred = pandas_to_ibis_schema(df)
expected = ibis.schema([('col', 'int32')])
assert inferred == expected
def test_dtype_int64(self):
df = pd.DataFrame({'col': np.int64([102, 67228734, -0])})
inferred = pandas_to_ibis_schema(df)
expected = ibis.schema([('col', 'int64')])
assert inferred == expected
def test_dtype_float32(self):
df = pd.DataFrame({'col': np.float32([45e-3, -0.4, 99.])})
inferred = pandas_to_ibis_schema(df)
expected = ibis.schema([('col', 'float')])
assert inferred == expected
def test_dtype_float64(self):
df = pd.DataFrame({'col': np.float64([-3e43, 43., 10000000.])})
inferred = pandas_to_ibis_schema(df)
expected = ibis.schema([('col', 'double')])
assert inferred == expected
def test_dtype_uint8(self):
df = pd.DataFrame({'col': np.uint8([3, 0, 16])})
inferred = pandas_to_ibis_schema(df)
expected = ibis.schema([('col', 'int16')])
assert inferred == expected
def test_dtype_uint16(self):
df = pd.DataFrame({'col': np.uint16([5569, 1, 33])})
inferred = pandas_to_ibis_schema(df)
expected = ibis.schema([('col', 'int32')])
assert inferred == expected
def test_dtype_uint32(self):
df = pd.DataFrame({'col': np.uint32([100, 0, 6])})
inferred = pandas_to_ibis_schema(df)
expected = ibis.schema([('col', 'int64')])
assert inferred == expected
def test_dtype_uint64(self):
df = pd.DataFrame({'col': np.uint64([666, 2, 3])})
with self.assertRaises(IbisTypeError):
inferred = pandas_to_ibis_schema(df)
def test_dtype_datetime64(self):
df = pd.DataFrame({
'col': [pd.Timestamp('2010-11-01 00:01:00'),
pd.Timestamp('2010-11-01 00:02:00.1000'),
pd.Timestamp('2010-11-01 00:03:00.300000')]})
inferred = pandas_to_ibis_schema(df)
expected = ibis.schema([('col', 'timestamp')])
assert inferred == expected
def test_dtype_timedelta64(self):
df = pd.DataFrame({
'col': [pd.Timedelta('1 days'),
pd.Timedelta('-1 days 2 min 3us'),
pd.Timedelta('-2 days +23:57:59.999997')]})
inferred = pandas_to_ibis_schema(df)
expected = ibis.schema([('col', 'int64')])
assert inferred == expected
def test_dtype_string(self):
df = pd.DataFrame({'col': ['foo', 'bar', 'hello']})
inferred = pandas_to_ibis_schema(df)
expected = ibis.schema([('col', 'string')])
assert inferred == expected
def test_dtype_categorical(self):
df = pd.DataFrame({'col': ['a', 'b', 'c', 'a']}, dtype='category')
inferred = pandas_to_ibis_schema(df)
expected = ibis.schema([('col', 'category')])
assert inferred == expected
@pytest.mark.e2e
class TestPandasRoundTrip(ImpalaE2E, unittest.TestCase):
def test_round_trip(self):
pytest.skip('fails')
df1 = self.alltypes.execute()
df2 = self.con.pandas(df1, 'bamboo', database=self.tmp_db).execute()
assert (df1.columns == df2.columns).all()
assert (df1.dtypes == df2.dtypes).all()
assert (df1 == df2).all().all()
def test_round_trip_non_int_missing_data(self):
df1 = functional_alltypes_with_nulls
table = self.con.pandas(df1, 'fawn', database=self.tmp_db)
df2 = table.execute()
assert (df1.columns == df2.columns).all()
assert (df1.dtypes == df2.dtypes).all()
# bool/int cols should be exact
assert (df1.bool_col == df2.bool_col).all()
assert (df1.tinyint_col == df2.tinyint_col).all()
assert (df1.smallint_col == df2.smallint_col).all()
assert (df1.int_col == df2.int_col).all()
assert (df1.bigint_col == df2.bigint_col).all()
assert (df1.month == df2.month).all()
assert (df1.year == df2.year).all()
# string cols should be equal everywhere except for the NULLs
assert ((df1.string_col == df2.string_col) ==
[1, 1, 0, 1, 1, 1, 1, 1, 1, 1]).all()
assert ((df1.date_string_col == df2.date_string_col) ==
[1, 0, 1, 1, 1, 1, 1, 1, 1, 1]).all()
# float cols within tolerance, and NULLs should be False
assert ((df1.double_col - df2.double_col < 1e-9) ==
[1, 1, 0, 1, 1, 1, 1, 1, 1, 1]).all()
assert ((df1.float_col - df2.float_col < 1e-9) ==
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1]).all()
def test_round_trip_missing_type_promotion(self):
pytest.skip('unfinished')
# prepare Impala table with missing ints
# TODO: switch to self.con.raw_sql once #412 is fixed
create_query = ('CREATE TABLE {0}.missing_ints '
' (tinyint_col TINYINT, bigint_col BIGINT) '
'STORED AS PARQUET'.format(self.tmp_db))
insert_query = ('INSERT INTO {0}.missing_ints '
'VALUES (NULL, 3), (-5, NULL), (19, 444444)'.format(
self.tmp_db))
self.con.con.cursor.execute(create_query)
self.con.con.cursor.execute(insert_query)
table = self.con.table('missing_ints', database=self.tmp_db)
df = table.execute()
# WHAT NOW?
| |
"""ACME Identifier Validation Challenges."""
import abc
import functools
import hashlib
import logging
import socket
from cryptography.hazmat.primitives import hashes
import OpenSSL
import requests
from acme import errors
from acme import crypto_util
from acme import fields
from acme import jose
from acme import other
logger = logging.getLogger(__name__)
# pylint: disable=too-few-public-methods
class Challenge(jose.TypedJSONObjectWithFields):
# _fields_to_partial_json | pylint: disable=abstract-method
"""ACME challenge."""
TYPES = {}
@classmethod
def from_json(cls, jobj):
try:
return super(Challenge, cls).from_json(jobj)
except jose.UnrecognizedTypeError as error:
logger.debug(error)
return UnrecognizedChallenge.from_json(jobj)
class ContinuityChallenge(Challenge): # pylint: disable=abstract-method
"""Client validation challenges."""
class DVChallenge(Challenge): # pylint: disable=abstract-method
"""Domain validation challenges."""
class ChallengeResponse(jose.TypedJSONObjectWithFields):
# _fields_to_partial_json | pylint: disable=abstract-method
"""ACME challenge response."""
TYPES = {}
resource_type = 'challenge'
resource = fields.Resource(resource_type)
class UnrecognizedChallenge(Challenge):
"""Unrecognized challenge.
ACME specification defines a generic framework for challenges and
defines some standard challenges that are implemented in this
module. However, other implementations (including peers) might
define additional challenge types, which should be ignored if
unrecognized.
:ivar jobj: Original JSON decoded object.
"""
def __init__(self, jobj):
super(UnrecognizedChallenge, self).__init__()
object.__setattr__(self, "jobj", jobj)
def to_partial_json(self):
# pylint: disable=no-member
return self.jobj
@classmethod
def from_json(cls, jobj):
return cls(jobj)
class _TokenDVChallenge(DVChallenge):
"""DV Challenge with token.
:ivar bytes token:
"""
TOKEN_SIZE = 128 / 8 # Based on the entropy value from the spec
"""Minimum size of the :attr:`token` in bytes."""
# TODO: acme-spec doesn't specify token as base64-encoded value
token = jose.Field(
"token", encoder=jose.encode_b64jose, decoder=functools.partial(
jose.decode_b64jose, size=TOKEN_SIZE, minimum=True))
# XXX: rename to ~token_good_for_url
@property
def good_token(self): # XXX: @token.decoder
"""Is `token` good?
.. todo:: acme-spec wants "It MUST NOT contain any non-ASCII
characters", but it should also warrant that it doesn't
contain ".." or "/"...
"""
# TODO: check that path combined with uri does not go above
# URI_ROOT_PATH!
return b'..' not in self.token and b'/' not in self.token
class KeyAuthorizationChallengeResponse(ChallengeResponse):
"""Response to Challenges based on Key Authorization.
:param unicode key_authorization:
"""
key_authorization = jose.Field("keyAuthorization")
thumbprint_hash_function = hashes.SHA256
def verify(self, chall, account_public_key):
"""Verify the key authorization.
:param KeyAuthorization chall: Challenge that corresponds to
this response.
:param JWK account_public_key:
:return: ``True`` iff verification of the key authorization was
successful.
:rtype: bool
"""
parts = self.key_authorization.split('.') # pylint: disable=no-member
if len(parts) != 2:
logger.debug("Key authorization (%r) is not well formed",
self.key_authorization)
return False
if parts[0] != chall.encode("token"):
logger.debug("Mismatching token in key authorization: "
"%r instead of %r", parts[0], chall.encode("token"))
return False
thumbprint = jose.b64encode(account_public_key.thumbprint(
hash_function=self.thumbprint_hash_function)).decode()
if parts[1] != thumbprint:
logger.debug("Mismatching thumbprint in key authorization: "
"%r instead of %r", parts[0], thumbprint)
return False
return True
class KeyAuthorizationChallenge(_TokenDVChallenge):
# pylint: disable=abstract-class-little-used,too-many-ancestors
"""Challenge based on Key Authorization.
:param response_cls: Subclass of `KeyAuthorizationChallengeResponse`
that will be used to generate `response`.
"""
__metaclass__ = abc.ABCMeta
response_cls = NotImplemented
thumbprint_hash_function = (
KeyAuthorizationChallengeResponse.thumbprint_hash_function)
def key_authorization(self, account_key):
"""Generate Key Authorization.
:param JWK account_key:
:rtype unicode:
"""
return self.encode("token") + "." + jose.b64encode(
account_key.thumbprint(
hash_function=self.thumbprint_hash_function)).decode()
def response(self, account_key):
"""Generate response to the challenge.
:param JWK account_key:
:returns: Response (initialized `response_cls`) to the challenge.
:rtype: KeyAuthorizationChallengeResponse
"""
return self.response_cls(
key_authorization=self.key_authorization(account_key))
@abc.abstractmethod
def validation(self, account_key, **kwargs):
"""Generate validation for the challenge.
Subclasses must implement this method, but they are likely to
return completely different data structures, depending on what's
necessary to complete the challenge. Interepretation of that
return value must be known to the caller.
:param JWK account_key:
:returns: Challenge-specific validation.
"""
raise NotImplementedError() # pragma: no cover
def response_and_validation(self, account_key, *args, **kwargs):
"""Generate response and validation.
Convenience function that return results of `response` and
`validation`.
:param JWK account_key:
:rtype: tuple
"""
return (self.response(account_key),
self.validation(account_key, *args, **kwargs))
@ChallengeResponse.register
class HTTP01Response(KeyAuthorizationChallengeResponse):
"""ACME http-01 challenge response."""
typ = "http-01"
PORT = 80
"""Verification port as defined by the protocol.
You can override it (e.g. for testing) by passing ``port`` to
`simple_verify`.
"""
WHITESPACE_CUTSET = "\n\r\t "
"""Whitespace characters which should be ignored at the end of the body."""
def simple_verify(self, chall, domain, account_public_key, port=None):
"""Simple verify.
:param challenges.SimpleHTTP chall: Corresponding challenge.
:param unicode domain: Domain name being verified.
:param account_public_key: Public key for the key pair
being authorized. If ``None`` key verification is not
performed!
:param JWK account_public_key:
:param int port: Port used in the validation.
:returns: ``True`` iff validation is successful, ``False``
otherwise.
:rtype: bool
"""
if not self.verify(chall, account_public_key):
logger.debug("Verification of key authorization in response failed")
return False
# TODO: ACME specification defines URI template that doesn't
# allow to use a custom port... Make sure port is not in the
# request URI, if it's standard.
if port is not None and port != self.PORT:
logger.warning(
"Using non-standard port for http-01 verification: %s", port)
domain += ":{0}".format(port)
uri = chall.uri(domain)
logger.debug("Verifying %s at %s...", chall.typ, uri)
try:
http_response = requests.get(uri)
except requests.exceptions.RequestException as error:
logger.error("Unable to reach %s: %s", uri, error)
return False
logger.debug("Received %s: %s. Headers: %s", http_response,
http_response.text, http_response.headers)
challenge_response = http_response.text.rstrip(self.WHITESPACE_CUTSET)
if self.key_authorization != challenge_response:
logger.debug("Key authorization from response (%r) doesn't match "
"HTTP response (%r)", self.key_authorization,
challenge_response)
return False
return True
@Challenge.register # pylint: disable=too-many-ancestors
class HTTP01(KeyAuthorizationChallenge):
"""ACME http-01 challenge."""
response_cls = HTTP01Response
typ = response_cls.typ
URI_ROOT_PATH = ".well-known/acme-challenge"
"""URI root path for the server provisioned resource."""
@property
def path(self):
"""Path (starting with '/') for provisioned resource.
:rtype: string
"""
return '/' + self.URI_ROOT_PATH + '/' + self.encode('token')
def uri(self, domain):
"""Create an URI to the provisioned resource.
Forms an URI to the HTTPS server provisioned resource
(containing :attr:`~SimpleHTTP.token`).
:param unicode domain: Domain name being verified.
:rtype: string
"""
return "http://" + domain + self.path
def validation(self, account_key, **unused_kwargs):
"""Generate validation.
:param JWK account_key:
:rtype: unicode
"""
return self.key_authorization(account_key)
@ChallengeResponse.register
class TLSSNI01Response(KeyAuthorizationChallengeResponse):
"""ACME tls-sni-01 challenge response."""
typ = "tls-sni-01"
DOMAIN_SUFFIX = b".acme.invalid"
"""Domain name suffix."""
PORT = 443
"""Verification port as defined by the protocol.
You can override it (e.g. for testing) by passing ``port`` to
`simple_verify`.
"""
@property
def z(self): # pylint: disable=invalid-name
"""``z`` value used for verification.
:rtype bytes:
"""
return hashlib.sha256(
self.key_authorization.encode("utf-8")).hexdigest().lower().encode()
@property
def z_domain(self):
"""Domain name used for verification, generated from `z`.
:rtype bytes:
"""
return self.z[:32] + b'.' + self.z[32:] + self.DOMAIN_SUFFIX
def gen_cert(self, key=None, bits=2048):
"""Generate tls-sni-01 certificate.
:param OpenSSL.crypto.PKey key: Optional private key used in
certificate generation. If not provided (``None``), then
fresh key will be generated.
:param int bits: Number of bits for newly generated key.
:rtype: `tuple` of `OpenSSL.crypto.X509` and `OpenSSL.crypto.PKey`
"""
if key is None:
key = OpenSSL.crypto.PKey()
key.generate_key(OpenSSL.crypto.TYPE_RSA, bits)
return crypto_util.gen_ss_cert(key, [
# z_domain is too big to fit into CN, hence first dummy domain
'dummy', self.z_domain.decode()], force_san=True), key
def probe_cert(self, domain, **kwargs):
"""Probe tls-sni-01 challenge certificate.
:param unicode domain:
"""
# TODO: domain is not necessary if host is provided
if "host" not in kwargs:
host = socket.gethostbyname(domain)
logging.debug('%s resolved to %s', domain, host)
kwargs["host"] = host
kwargs.setdefault("port", self.PORT)
kwargs["name"] = self.z_domain
# TODO: try different methods?
# pylint: disable=protected-access
return crypto_util.probe_sni(**kwargs)
def verify_cert(self, cert):
"""Verify tls-sni-01 challenge certificate.
:param OpensSSL.crypto.X509 cert: Challenge certificate.
:returns: Whether the certificate was successfully verified.
:rtype: bool
"""
# pylint: disable=protected-access
sans = crypto_util._pyopenssl_cert_or_req_san(cert)
logging.debug('Certificate %s. SANs: %s', cert.digest('sha1'), sans)
return self.z_domain.decode() in sans
def simple_verify(self, chall, domain, account_public_key,
cert=None, **kwargs):
"""Simple verify.
Verify ``validation`` using ``account_public_key``, optionally
probe tls-sni-01 certificate and check using `verify_cert`.
:param .challenges.TLSSNI01 chall: Corresponding challenge.
:param str domain: Domain name being validated.
:param JWK account_public_key:
:param OpenSSL.crypto.X509 cert: Optional certificate. If not
provided (``None``) certificate will be retrieved using
`probe_cert`.
:param int port: Port used to probe the certificate.
:returns: ``True`` iff client's control of the domain has been
verified, ``False`` otherwise.
:rtype: bool
"""
if not self.verify(chall, account_public_key):
logger.debug("Verification of key authorization in response failed")
return False
if cert is None:
try:
cert = self.probe_cert(domain=domain, **kwargs)
except errors.Error as error:
logger.debug(error, exc_info=True)
return False
return self.verify_cert(cert)
@Challenge.register # pylint: disable=too-many-ancestors
class TLSSNI01(KeyAuthorizationChallenge):
"""ACME tls-sni-01 challenge."""
response_cls = TLSSNI01Response
typ = response_cls.typ
# boulder#962, ietf-wg-acme#22
#n = jose.Field("n", encoder=int, decoder=int)
def validation(self, account_key, **kwargs):
"""Generate validation.
:param JWK account_key:
:param OpenSSL.crypto.PKey cert_key: Optional private key used
in certificate generation. If not provided (``None``), then
fresh key will be generated.
:rtype: `tuple` of `OpenSSL.crypto.X509` and `OpenSSL.crypto.PKey`
"""
return self.response(account_key).gen_cert(key=kwargs.get('cert_key'))
@Challenge.register
class RecoveryContact(ContinuityChallenge):
"""ACME "recoveryContact" challenge.
:ivar unicode activation_url:
:ivar unicode success_url:
:ivar unicode contact:
"""
typ = "recoveryContact"
activation_url = jose.Field("activationURL", omitempty=True)
success_url = jose.Field("successURL", omitempty=True)
contact = jose.Field("contact", omitempty=True)
@ChallengeResponse.register
class RecoveryContactResponse(ChallengeResponse):
"""ACME "recoveryContact" challenge response.
:ivar unicode token:
"""
typ = "recoveryContact"
token = jose.Field("token", omitempty=True)
@Challenge.register
class ProofOfPossession(ContinuityChallenge):
"""ACME "proofOfPossession" challenge.
:ivar .JWAAlgorithm alg:
:ivar bytes nonce: Random data, **not** base64-encoded.
:ivar hints: Various clues for the client (:class:`Hints`).
"""
typ = "proofOfPossession"
NONCE_SIZE = 16
class Hints(jose.JSONObjectWithFields):
"""Hints for "proofOfPossession" challenge.
:ivar JWK jwk: JSON Web Key
:ivar tuple cert_fingerprints: `tuple` of `unicode`
:ivar tuple certs: Sequence of :class:`acme.jose.ComparableX509`
certificates.
:ivar tuple subject_key_identifiers: `tuple` of `unicode`
:ivar tuple issuers: `tuple` of `unicode`
:ivar tuple authorized_for: `tuple` of `unicode`
"""
jwk = jose.Field("jwk", decoder=jose.JWK.from_json)
cert_fingerprints = jose.Field(
"certFingerprints", omitempty=True, default=())
certs = jose.Field("certs", omitempty=True, default=())
subject_key_identifiers = jose.Field(
"subjectKeyIdentifiers", omitempty=True, default=())
serial_numbers = jose.Field("serialNumbers", omitempty=True, default=())
issuers = jose.Field("issuers", omitempty=True, default=())
authorized_for = jose.Field("authorizedFor", omitempty=True, default=())
@certs.encoder
def certs(value): # pylint: disable=missing-docstring,no-self-argument
return tuple(jose.encode_cert(cert) for cert in value)
@certs.decoder
def certs(value): # pylint: disable=missing-docstring,no-self-argument
return tuple(jose.decode_cert(cert) for cert in value)
alg = jose.Field("alg", decoder=jose.JWASignature.from_json)
nonce = jose.Field(
"nonce", encoder=jose.encode_b64jose, decoder=functools.partial(
jose.decode_b64jose, size=NONCE_SIZE))
hints = jose.Field("hints", decoder=Hints.from_json)
@ChallengeResponse.register
class ProofOfPossessionResponse(ChallengeResponse):
"""ACME "proofOfPossession" challenge response.
:ivar bytes nonce: Random data, **not** base64-encoded.
:ivar acme.other.Signature signature: Sugnature of this message.
"""
typ = "proofOfPossession"
NONCE_SIZE = ProofOfPossession.NONCE_SIZE
nonce = jose.Field(
"nonce", encoder=jose.encode_b64jose, decoder=functools.partial(
jose.decode_b64jose, size=NONCE_SIZE))
signature = jose.Field("signature", decoder=other.Signature.from_json)
def verify(self):
"""Verify the challenge."""
# self.signature is not Field | pylint: disable=no-member
return self.signature.verify(self.nonce)
@Challenge.register # pylint: disable=too-many-ancestors
class DNS(_TokenDVChallenge):
"""ACME "dns" challenge."""
typ = "dns"
LABEL = "_acme-challenge"
"""Label clients prepend to the domain name being validated."""
def gen_validation(self, account_key, alg=jose.RS256, **kwargs):
"""Generate validation.
:param .JWK account_key: Private account key.
:param .JWA alg:
:returns: This challenge wrapped in `.JWS`
:rtype: .JWS
"""
return jose.JWS.sign(
payload=self.json_dumps(sort_keys=True).encode('utf-8'),
key=account_key, alg=alg, **kwargs)
def check_validation(self, validation, account_public_key):
"""Check validation.
:param JWS validation:
:param JWK account_public_key:
:rtype: bool
"""
if not validation.verify(key=account_public_key):
return False
try:
return self == self.json_loads(
validation.payload.decode('utf-8'))
except jose.DeserializationError as error:
logger.debug("Checking validation for DNS failed: %s", error)
return False
def gen_response(self, account_key, **kwargs):
"""Generate response.
:param .JWK account_key: Private account key.
:param .JWA alg:
:rtype: DNSResponse
"""
return DNSResponse(validation=self.gen_validation(
self, account_key, **kwargs))
def validation_domain_name(self, name):
"""Domain name for TXT validation record.
:param unicode name: Domain name being validated.
"""
return "{0}.{1}".format(self.LABEL, name)
@ChallengeResponse.register
class DNSResponse(ChallengeResponse):
"""ACME "dns" challenge response.
:param JWS validation:
"""
typ = "dns"
validation = jose.Field("validation", decoder=jose.JWS.from_json)
def check_validation(self, chall, account_public_key):
"""Check validation.
:param challenges.DNS chall:
:param JWK account_public_key:
:rtype: bool
"""
return chall.check_validation(self.validation, account_public_key)
| |
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module contains a sqoop 1 operator
"""
from airflow.contrib.hooks.sqoop_hook import SqoopHook
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class SqoopOperator(BaseOperator):
"""
Execute a Sqoop job.
Documentation for Apache Sqoop can be found here: https://sqoop.apache.org/docs/1.4.2/SqoopUserGuide.html.
"""
template_fields = ('conn_id', 'cmd_type', 'table', 'query', 'target_dir', 'file_type', 'columns', 'split_by',
'where', 'export_dir', 'input_null_string', 'input_null_non_string', 'staging_table',
'enclosed_by', 'escaped_by', 'input_fields_terminated_by', 'input_lines_terminated_by',
'input_optionally_enclosed_by', 'properties', 'extra_import_options', 'driver',
'extra_export_options', 'hcatalog_database', 'hcatalog_table',)
ui_color = '#7D8CA4'
@apply_defaults
def __init__(self,
conn_id='sqoop_default',
cmd_type='import',
table=None,
query=None,
target_dir=None,
append=None,
file_type='text',
columns=None,
num_mappers=None,
split_by=None,
where=None,
export_dir=None,
input_null_string=None,
input_null_non_string=None,
staging_table=None,
clear_staging_table=False,
enclosed_by=None,
escaped_by=None,
input_fields_terminated_by=None,
input_lines_terminated_by=None,
input_optionally_enclosed_by=None,
batch=False,
direct=False,
driver=None,
verbose=False,
relaxed_isolation=False,
properties=None,
hcatalog_database=None,
hcatalog_table=None,
create_hcatalog_table=False,
extra_import_options=None,
extra_export_options=None,
*args,
**kwargs):
"""
:param conn_id: str
:param cmd_type: str specify command to execute "export" or "import"
:param table: Table to read
:param query: Import result of arbitrary SQL query. Instead of using the table,
columns and where arguments, you can specify a SQL statement with the query
argument. Must also specify a destination directory with target_dir.
:param target_dir: HDFS destination directory where the data
from the rdbms will be written
:param append: Append data to an existing dataset in HDFS
:param file_type: "avro", "sequence", "text" Imports data to
into the specified format. Defaults to text.
:param columns: <col,col,col> Columns to import from table
:param num_mappers: Use n mapper tasks to import/export in parallel
:param split_by: Column of the table used to split work units
:param where: WHERE clause to use during import
:param export_dir: HDFS Hive database directory to export to the rdbms
:param input_null_string: The string to be interpreted as null
for string columns
:param input_null_non_string: The string to be interpreted as null
for non-string columns
:param staging_table: The table in which data will be staged before
being inserted into the destination table
:param clear_staging_table: Indicate that any data present in the
staging table can be deleted
:param enclosed_by: Sets a required field enclosing character
:param escaped_by: Sets the escape character
:param input_fields_terminated_by: Sets the input field separator
:param input_lines_terminated_by: Sets the input end-of-line character
:param input_optionally_enclosed_by: Sets a field enclosing character
:param batch: Use batch mode for underlying statement execution
:param direct: Use direct export fast path
:param driver: Manually specify JDBC driver class to use
:param verbose: Switch to more verbose logging for debug purposes
:param relaxed_isolation: use read uncommitted isolation level
:param hcatalog_database: Specifies the database name for the HCatalog table
:param hcatalog_table: The argument value for this option is the HCatalog table
:param create_hcatalog_table: Have sqoop create the hcatalog table passed in or not
:param properties: additional JVM properties passed to sqoop
:param extra_import_options: Extra import options to pass as dict.
If a key doesn't have a value, just pass an empty string to it.
Don't include prefix of -- for sqoop options.
:param extra_export_options: Extra export options to pass as dict.
If a key doesn't have a value, just pass an empty string to it.
Don't include prefix of -- for sqoop options.
"""
super(SqoopOperator, self).__init__(*args, **kwargs)
self.conn_id = conn_id
self.cmd_type = cmd_type
self.table = table
self.query = query
self.target_dir = target_dir
self.append = append
self.file_type = file_type
self.columns = columns
self.num_mappers = num_mappers
self.split_by = split_by
self.where = where
self.export_dir = export_dir
self.input_null_string = input_null_string
self.input_null_non_string = input_null_non_string
self.staging_table = staging_table
self.clear_staging_table = clear_staging_table
self.enclosed_by = enclosed_by
self.escaped_by = escaped_by
self.input_fields_terminated_by = input_fields_terminated_by
self.input_lines_terminated_by = input_lines_terminated_by
self.input_optionally_enclosed_by = input_optionally_enclosed_by
self.batch = batch
self.direct = direct
self.driver = driver
self.verbose = verbose
self.relaxed_isolation = relaxed_isolation
self.hcatalog_database = hcatalog_database
self.hcatalog_table = hcatalog_table
self.create_hcatalog_table = create_hcatalog_table
self.properties = properties
self.extra_import_options = extra_import_options
self.extra_export_options = extra_export_options
def execute(self, context):
"""
Execute sqoop job
"""
hook = SqoopHook(conn_id=self.conn_id,
verbose=self.verbose,
num_mappers=self.num_mappers,
hcatalog_database=self.hcatalog_database,
hcatalog_table=self.hcatalog_table,
properties=self.properties)
if self.cmd_type == 'export':
hook.export_table(
table=self.table,
export_dir=self.export_dir,
input_null_string=self.input_null_string,
input_null_non_string=self.input_null_non_string,
staging_table=self.staging_table,
clear_staging_table=self.clear_staging_table,
enclosed_by=self.enclosed_by,
escaped_by=self.escaped_by,
input_fields_terminated_by=self.input_fields_terminated_by,
input_lines_terminated_by=self.input_lines_terminated_by,
input_optionally_enclosed_by=self.input_optionally_enclosed_by,
batch=self.batch,
relaxed_isolation=self.relaxed_isolation,
extra_export_options=self.extra_export_options)
elif self.cmd_type == 'import':
# add create hcatalog table to extra import options if option passed
# if new params are added to constructor can pass them in here so don't modify sqoop_hook for each param
if self.create_hcatalog_table:
self.extra_import_options['create-hcatalog-table'] = ''
if self.table and self.query:
raise AirflowException('Cannot specify query and table together. Need to specify either or.')
if self.table:
hook.import_table(
table=self.table,
target_dir=self.target_dir,
append=self.append,
file_type=self.file_type,
columns=self.columns,
split_by=self.split_by,
where=self.where,
direct=self.direct,
driver=self.driver,
extra_import_options=self.extra_import_options)
elif self.query:
hook.import_query(
query=self.query,
target_dir=self.target_dir,
append=self.append,
file_type=self.file_type,
split_by=self.split_by,
direct=self.direct,
driver=self.driver,
extra_import_options=self.extra_import_options)
else:
raise AirflowException(
"Provide query or table parameter to import using Sqoop"
)
else:
raise AirflowException("cmd_type should be 'import' or 'export'")
| |
# Copyright 2015-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import itertools
import operator
import random
import re
import six
from botocore.exceptions import ClientError
from dateutil.parser import parse
from concurrent.futures import as_completed
from c7n.actions import (
ActionRegistry, BaseAction, ModifyVpcSecurityGroupsAction
)
from c7n.filters import (
FilterRegistry, AgeFilter, ValueFilter, Filter, OPERATORS, DefaultVpcBase
)
from c7n.filters.offhours import OffHour, OnHour
from c7n.filters.health import HealthEventFilter
import c7n.filters.vpc as net_filters
from c7n.manager import resources
from c7n.query import QueryResourceManager
from c7n import utils
from c7n.utils import type_schema
filters = FilterRegistry('ec2.filters')
actions = ActionRegistry('ec2.actions')
filters.register('health-event', HealthEventFilter)
@resources.register('ec2')
class EC2(QueryResourceManager):
class resource_type(object):
service = 'ec2'
type = 'instance'
enum_spec = ('describe_instances', 'Reservations[].Instances[]', None)
detail_spec = None
id = 'InstanceId'
filter_name = 'InstanceIds'
filter_type = 'list'
name = 'PublicDnsName'
date = 'LaunchTime'
dimension = 'InstanceId'
config_type = "AWS::EC2::Instance"
shape = "Instance"
default_report_fields = (
'CustodianDate',
'InstanceId',
'tag:Name',
'InstanceType',
'LaunchTime',
'VpcId',
'PrivateIpAddress',
)
filter_registry = filters
action_registry = actions
# if we have to do a fallback scenario where tags don't come in describe
permissions = ('ec2:DescribeTags',)
def __init__(self, ctx, data):
super(EC2, self).__init__(ctx, data)
self.queries = QueryFilter.parse(self.data.get('query', []))
def resources(self, query=None):
q = self.resource_query()
if q is not None:
query = query or {}
query['Filters'] = q
return super(EC2, self).resources(query=query)
def resource_query(self):
qf = []
qf_names = set()
# allow same name to be specified multiple times and append the queries
# under the same name
for q in self.queries:
qd = q.query()
if qd['Name'] in qf_names:
for qf in qf:
if qd['Name'] == qf['Name']:
qf['Values'].extend(qd['Values'])
else:
qf_names.add(qd['Name'])
qf.append(qd)
return qf
def augment(self, resources):
"""EC2 API and AWOL Tags
While ec2 api generally returns tags when doing describe_x on for
various resources, it may also silently fail to do so unless a tag
is used as a filter.
See footnote on http://goo.gl/YozD9Q for official documentation.
Apriori we may be using custodian to ensure tags (including
name), so there isn't a good default to ensure that we will
always get tags from describe_x calls.
"""
# First if we're in event based lambda go ahead and skip this,
# tags can't be trusted in ec2 instances immediately post creation.
if not resources or self.data.get('mode', {}).get('type', '') in (
'cloudtrail', 'ec2-instance-state'):
return resources
# AWOL detector, so we don't make extraneous api calls.
resource_count = len(resources)
search_count = min(int(resource_count % 0.05) + 1, 5)
if search_count > resource_count:
search_count = resource_count
found = False
for r in random.sample(resources, search_count):
if 'Tags' in r:
found = True
break
if found:
return resources
# Okay go and do the tag lookup
client = utils.local_session(self.session_factory).client('ec2')
tag_set = self.retry(
client.describe_tags,
Filters=[{'Name': 'resource-type',
'Values': ['instance']}])['Tags']
resource_tags = {}
for t in tag_set:
t.pop('ResourceType')
rid = t.pop('ResourceId')
resource_tags.setdefault(rid, []).append(t)
m = self.get_model()
for r in resources:
r['Tags'] = resource_tags.get(r[m.id], ())
return resources
@filters.register('security-group')
class SecurityGroupFilter(net_filters.SecurityGroupFilter):
RelatedIdsExpression = "SecurityGroups[].GroupId"
@filters.register('subnet')
class SubnetFilter(net_filters.SubnetFilter):
RelatedIdsExpression = "SubnetId"
filters.register('network-location', net_filters.NetworkLocation)
@filters.register('state-age')
class StateTransitionAge(AgeFilter):
"""Age an instance has been in the given state.
.. code-block: yaml
policies:
- name: ec2-state-running-7-days
resource: ec2
filters:
- type: state-age
op: ge
days: 7
"""
RE_PARSE_AGE = re.compile("\(.*?\)")
# this filter doesn't use date_attribute, but needs to define it
# to pass AgeFilter's validate method
date_attribute = "dummy"
schema = type_schema(
'state-age',
op={'type': 'string', 'enum': list(OPERATORS.keys())},
days={'type': 'number'})
def get_resource_date(self, i):
v = i.get('StateTransitionReason')
if not v:
return None
dates = self.RE_PARSE_AGE.findall(v)
if dates:
return parse(dates[0][1:-1])
return None
class StateTransitionFilter(object):
"""Filter instances by state.
Try to simplify construction for policy authors by automatically
filtering elements (filters or actions) to the instances states
they are valid for.
For more details see http://goo.gl/TZH9Q5
"""
valid_origin_states = ()
def filter_instance_state(self, instances, states=None):
states = states or self.valid_origin_states
orig_length = len(instances)
results = [i for i in instances
if i['State']['Name'] in states]
self.log.info("%s %d of %d instances" % (
self.__class__.__name__, len(results), orig_length))
return results
@filters.register('ebs')
class AttachedVolume(ValueFilter):
"""EC2 instances with EBS backed volume
Filters EC2 instances with EBS backed storage devices (non ephemeral)
:Example:
.. code-block:: yaml
policies:
- name: ec2-encrypted-ebs-volumes
resource: ec2
filters:
- type: ebs
key: encrypted
value: true
"""
schema = type_schema(
'ebs', rinherit=ValueFilter.schema,
**{'operator': {'enum': ['and', 'or']},
'skip-devices': {'type': 'array', 'items': {'type': 'string'}}})
def get_permissions(self):
return self.manager.get_resource_manager('ebs').get_permissions()
def process(self, resources, event=None):
self.volume_map = self.get_volume_mapping(resources)
self.skip = self.data.get('skip-devices', [])
self.operator = self.data.get(
'operator', 'or') == 'or' and any or all
return list(filter(self, resources))
def get_volume_mapping(self, resources):
volume_map = {}
manager = self.manager.get_resource_manager('ebs')
for instance_set in utils.chunks(resources, 200):
volume_ids = []
for i in instance_set:
for bd in i.get('BlockDeviceMappings', ()):
if 'Ebs' not in bd:
continue
volume_ids.append(bd['Ebs']['VolumeId'])
for v in manager.get_resources(volume_ids):
if not v['Attachments']:
continue
volume_map.setdefault(
v['Attachments'][0]['InstanceId'], []).append(v)
return volume_map
def __call__(self, i):
volumes = self.volume_map.get(i['InstanceId'])
if not volumes:
return False
if self.skip:
for v in list(volumes):
for a in v.get('Attachments', []):
if a['Device'] in self.skip:
volumes.remove(v)
return self.operator(map(self.match, volumes))
class InstanceImageBase(object):
def prefetch_instance_images(self, instances):
image_ids = [i['ImageId'] for i in instances if 'c7n:instance-image' not in i]
self.image_map = self.get_local_image_mapping(image_ids)
def get_base_image_mapping(self):
return {i['ImageId']: i for i in
self.manager.get_resource_manager('ami').resources()}
def get_instance_image(self, instance):
image = instance.get('c7n:instance-image', None)
if not image:
image = instance['c7n:instance-image'] = self.image_map.get(instance['ImageId'], None)
return image
def get_local_image_mapping(self, image_ids):
base_image_map = self.get_base_image_mapping()
resources = {i: base_image_map[i] for i in image_ids if i in base_image_map}
missing = list(set(image_ids) - set(resources.keys()))
if missing:
loaded = self.manager.get_resource_manager('ami').get_resources(missing, False)
resources.update({image['ImageId']: image for image in loaded})
return resources
@filters.register('image-age')
class ImageAge(AgeFilter, InstanceImageBase):
"""EC2 AMI age filter
Filters EC2 instances based on the age of their AMI image (in days)
:Example:
.. code-block: yaml
policies:
- name: ec2-ancient-ami
resource: ec2
filters:
- type: image-age
op: ge
days: 90
"""
date_attribute = "CreationDate"
schema = type_schema(
'image-age',
op={'type': 'string', 'enum': list(OPERATORS.keys())},
days={'type': 'number'})
def get_permissions(self):
return self.manager.get_resource_manager('ami').get_permissions()
def process(self, resources, event=None):
self.prefetch_instance_images(resources)
return super(ImageAge, self).process(resources, event)
def get_resource_date(self, i):
image = self.get_instance_image(i)
if image:
return parse(image['CreationDate'])
else:
return parse("2000-01-01T01:01:01.000Z")
@filters.register('image')
class InstanceImage(ValueFilter, InstanceImageBase):
schema = type_schema('image', rinherit=ValueFilter.schema)
def get_permissions(self):
return self.manager.get_resource_manager('ami').get_permissions()
def process(self, resources, event=None):
self.prefetch_instance_images(resources)
return super(InstanceImage, self).process(resources, event)
def __call__(self, i):
image = self.get_instance_image(i)
# Finally, if we have no image...
if not image:
self.log.warning(
"Could not locate image for instance:%s ami:%s" % (
i['InstanceId'], i["ImageId"]))
# Match instead on empty skeleton?
return False
return self.match(image)
@filters.register('offhour')
class InstanceOffHour(OffHour, StateTransitionFilter):
"""Custodian OffHour filter
Filters running EC2 instances with the intent to stop at a given hour of
the day.
:Example:
.. code-block: yaml
policies:
- name: onhour-evening-stop
resource: ec2
filters:
- type: offhour
tag: custodian_downtime
default_tz: et
offhour: 20
actions:
- stop
"""
valid_origin_states = ('running',)
def process(self, resources, event=None):
return super(InstanceOffHour, self).process(
self.filter_instance_state(resources))
@filters.register('onhour')
class InstanceOnHour(OnHour, StateTransitionFilter):
"""Custodian OnHour filter
Filters stopped EC2 instances with the intent to start at a given hour of
the day.
:Example:
.. code-block: yaml
policies:
- name: onhour-morning-start
resource: ec2
filters:
- type: onhour
tag: custodian_downtime
default_tz: et
onhour: 6
actions:
- start
"""
valid_origin_states = ('stopped',)
def process(self, resources, event=None):
return super(InstanceOnHour, self).process(
self.filter_instance_state(resources))
@filters.register('ephemeral')
class EphemeralInstanceFilter(Filter):
"""EC2 instances with ephemeral storage
Filters EC2 instances that have ephemeral storage (an instance-store backed
root device)
:Example:
.. code-block: yaml
policies:
- name: ec2-ephemeral-instances
resource: ec2
filters:
- type: ephemeral
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html
"""
schema = type_schema('ephemeral')
def __call__(self, i):
return self.is_ephemeral(i)
@staticmethod
def is_ephemeral(i):
for bd in i.get('BlockDeviceMappings', []):
if bd['DeviceName'] in ('/dev/sda1', '/dev/xvda'):
if 'Ebs' in bd:
return False
return True
return True
@filters.register('instance-uptime')
class UpTimeFilter(AgeFilter):
date_attribute = "LaunchTime"
schema = type_schema(
'instance-uptime',
op={'type': 'string', 'enum': list(OPERATORS.keys())},
days={'type': 'number'})
@filters.register('instance-age')
class InstanceAgeFilter(AgeFilter):
"""Filters instances based on their age (in days)
:Example:
.. code-block: yaml
policies:
- name: ec2-30-days-plus
resource: ec2
filters:
- type: instance-age
op: ge
days: 30
"""
date_attribute = "LaunchTime"
ebs_key_func = operator.itemgetter('AttachTime')
schema = type_schema(
'instance-age',
op={'type': 'string', 'enum': list(OPERATORS.keys())},
days={'type': 'number'},
hours={'type': 'number'},
minutes={'type': 'number'})
def get_resource_date(self, i):
# LaunchTime is basically how long has the instance
# been on, use the oldest ebs vol attach time
ebs_vols = [
block['Ebs'] for block in i['BlockDeviceMappings']
if 'Ebs' in block]
if not ebs_vols:
# Fall back to using age attribute (ephemeral instances)
return super(InstanceAgeFilter, self).get_resource_date(i)
# Lexographical sort on date
ebs_vols = sorted(ebs_vols, key=self.ebs_key_func)
return ebs_vols[0]['AttachTime']
@filters.register('default-vpc')
class DefaultVpc(DefaultVpcBase):
""" Matches if an ec2 database is in the default vpc
"""
schema = type_schema('default-vpc')
def __call__(self, ec2):
return ec2.get('VpcId') and self.match(ec2.get('VpcId')) or False
@filters.register('singleton')
class SingletonFilter(Filter, StateTransitionFilter):
"""EC2 instances without autoscaling or a recover alarm
Filters EC2 instances that are not members of an autoscaling group
and do not have Cloudwatch recover alarms.
:Example:
.. code-block: yaml
policies:
- name: ec2-recover-instances
resource: ec2
filters:
- singleton
actions:
- type: tag
key: problem
value: instance is not resilient
https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-recover.html
"""
schema = type_schema('singleton')
permissions = ('cloudwatch:DescribeAlarmsForMetric',)
valid_origin_states = ('running', 'stopped', 'pending', 'stopping')
in_asg = ValueFilter({
'key': 'tag:aws:autoscaling:groupName',
'value': 'not-null'}).validate()
def process(self, instances, event=None):
return super(SingletonFilter, self).process(
self.filter_instance_state(instances))
def __call__(self, i):
if self.in_asg(i):
return False
else:
return not self.has_recover_alarm(i)
def has_recover_alarm(self, i):
client = utils.local_session(self.manager.session_factory).client('cloudwatch')
alarms = client.describe_alarms_for_metric(
MetricName='StatusCheckFailed_System',
Namespace='AWS/EC2',
Dimensions=[
{
'Name': 'InstanceId',
'Value': i['InstanceId']
}
]
)
for i in alarms['MetricAlarms']:
for a in i['AlarmActions']:
if (
a.startswith('arn:aws:automate:') and
a.endswith(':ec2:recover')
):
return True
return False
@actions.register('start')
class Start(BaseAction, StateTransitionFilter):
"""Starts a previously stopped EC2 instance.
:Example:
.. code-block: yaml
policies:
- name: ec2-start-stopped-instances
resource: ec2
query:
- instance-state-name: stopped
actions:
- start
http://docs.aws.amazon.com/cli/latest/reference/ec2/start-instances.html
"""
valid_origin_states = ('stopped',)
schema = type_schema('start')
permissions = ('ec2:StartInstances',)
batch_size = 10
exception = None
def _filter_ec2_with_volumes(self, instances):
return [i for i in instances if len(i['BlockDeviceMappings']) > 0]
def process(self, instances):
instances = self._filter_ec2_with_volumes(
self.filter_instance_state(instances))
if not len(instances):
return
client = utils.local_session(
self.manager.session_factory).client('ec2')
# Play nice around aws having insufficient capacity...
for itype, t_instances in utils.group_by(
instances, 'InstanceType').items():
for izone, z_instances in utils.group_by(
t_instances, 'AvailabilityZone').items():
for batch in utils.chunks(z_instances, self.batch_size):
self.process_instance_set(client, batch, itype, izone)
# Raise an exception after all batches process
if self.exception:
if self.exception.response['Error']['Code'] not in ('InsufficientInstanceCapacity'):
self.log.exception("Error while starting instances error %s", self.exception)
raise self.exception
def process_instance_set(self, client, instances, itype, izone):
# Setup retry with insufficient capacity as well
retry = utils.get_retry((
'InsufficientInstanceCapacity',
'RequestLimitExceeded', 'Client.RequestLimitExceeded'),
max_attempts=5)
instance_ids = [i['InstanceId'] for i in instances]
try:
retry(client.start_instances, InstanceIds=instance_ids)
except ClientError as e:
# Saving exception
self.exception = e
self.log.exception(
("Could not start instances:%d type:%s"
" zone:%s instances:%s error:%s"),
len(instances), itype, izone,
", ".join(instance_ids), e)
return
@actions.register('resize')
class Resize(BaseAction, StateTransitionFilter):
"""Change an instance's size.
An instance can only be resized when its stopped, this action
can optionally restart an instance if needed to effect the instance
type change. Instances are always left in the run state they were
found in.
There are a few caveats to be aware of, instance resizing
needs to maintain compatibility for architecture, virtualization type
hvm/pv, and ebs optimization at minimum.
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-resize.html
"""
schema = type_schema(
'resize',
**{'restart': {'type': 'boolean'},
'type-map': {'type': 'object'},
'default': {'type': 'string'}})
valid_origin_states = ('running', 'stopped')
def get_permissions(self):
perms = ('ec2:DescribeInstances', 'ec2:ModifyInstanceAttribute')
if self.data.get('restart', False):
perms += ('ec2:StopInstances', 'ec2:StartInstances')
return perms
def process(self, resources):
stopped_instances = self.filter_instance_state(
resources, ('stopped',))
running_instances = self.filter_instance_state(
resources, ('running',))
if self.data.get('restart') and running_instances:
Stop({'terminate-ephemeral': False},
self.manager).process(running_instances)
client = utils.local_session(
self.manager.session_factory).client('ec2')
waiter = client.get_waiter('instance_stopped')
try:
waiter.wait(
InstanceIds=[r['InstanceId'] for r in running_instances])
except ClientError as e:
self.log.exception(
"Exception stopping instances for resize:\n %s" % e)
for instance_set in utils.chunks(itertools.chain(
stopped_instances, running_instances), 20):
self.process_resource_set(instance_set)
if self.data.get('restart') and running_instances:
client.start_instances(
InstanceIds=[i['InstanceId'] for i in running_instances])
return list(itertools.chain(stopped_instances, running_instances))
def process_resource_set(self, instance_set):
type_map = self.data.get('type-map')
default_type = self.data.get('default')
client = utils.local_session(
self.manager.session_factory).client('ec2')
for i in instance_set:
self.log.debug(
"resizing %s %s" % (i['InstanceId'], i['InstanceType']))
new_type = type_map.get(i['InstanceType'], default_type)
if new_type == i['InstanceType']:
continue
try:
client.modify_instance_attribute(
InstanceId=i['InstanceId'],
InstanceType={'Value': new_type})
except ClientError as e:
self.log.exception(
"Exception resizing instance:%s new:%s old:%s \n %s" % (
i['InstanceId'], new_type, i['InstanceType'], e))
@actions.register('stop')
class Stop(BaseAction, StateTransitionFilter):
"""Stops a running EC2 instances
:Example:
.. code-block: yaml
policies:
- name: ec2-stop-running-instances
resource: ec2
query:
- instance-state-name: running
actions:
- stop
"""
valid_origin_states = ('running',)
schema = type_schema('stop', **{'terminate-ephemeral': {'type': 'boolean'}})
def get_permissions(self):
perms = ('ec2:StopInstances',)
if self.data.get('terminate-ephemeral', False):
perms += ('ec2:TerminateInstances',)
return perms
def split_on_storage(self, instances):
ephemeral = []
persistent = []
for i in instances:
if EphemeralInstanceFilter.is_ephemeral(i):
ephemeral.append(i)
else:
persistent.append(i)
return ephemeral, persistent
def process(self, instances):
instances = self.filter_instance_state(instances)
if not len(instances):
return
client = utils.local_session(
self.manager.session_factory).client('ec2')
# Ephemeral instance can't be stopped.
ephemeral, persistent = self.split_on_storage(instances)
if self.data.get('terminate-ephemeral', False) and ephemeral:
self._run_instances_op(
client.terminate_instances,
[i['InstanceId'] for i in ephemeral])
if persistent:
self._run_instances_op(
client.stop_instances,
[i['InstanceId'] for i in persistent])
return instances
def _run_instances_op(self, op, instance_ids):
while True:
try:
return self.manager.retry(op, InstanceIds=instance_ids)
except ClientError as e:
if e.response['Error']['Code'] == 'IncorrectInstanceState':
msg = e.response['Error']['Message']
e_instance_id = msg[msg.find("'") + 1:msg.rfind("'")]
instance_ids.remove(e_instance_id)
if not instance_ids:
return
continue
raise
@actions.register('terminate')
class Terminate(BaseAction, StateTransitionFilter):
""" Terminate a set of instances.
While ec2 offers a bulk delete api, any given instance can be configured
with api deletion termination protection, so we can't use the bulk call
reliabily, we need to process the instances individually. Additionally
If we're configured with 'force' then we'll turn off instance termination
protection.
:Example:
.. code-block: yaml
policies:
- name: ec2-process-termination
resource: ec2
filters:
- type: marked-for-op
op: terminate
actions:
- terminate
"""
valid_origin_states = ('running', 'stopped', 'pending', 'stopping')
schema = type_schema('terminate', force={'type': 'boolean'})
def get_permissions(self):
permissions = ("ec2:TerminateInstances",)
if self.data.get('force'):
permissions += ('ec2:ModifyInstanceAttribute',)
return permissions
def process(self, instances):
instances = self.filter_instance_state(instances)
if not len(instances):
return
if self.data.get('force'):
self.log.info("Disabling termination protection on instances")
self.disable_deletion_protection(instances)
client = utils.local_session(
self.manager.session_factory).client('ec2')
# limit batch sizes to avoid api limits
for batch in utils.chunks(instances, 100):
self.manager.retry(
client.terminate_instances,
InstanceIds=[i['InstanceId'] for i in instances])
def disable_deletion_protection(self, instances):
@utils.worker
def process_instance(i):
client = utils.local_session(
self.manager.session_factory).client('ec2')
try:
self.manager.retry(
client.modify_instance_attribute,
InstanceId=i['InstanceId'],
Attribute='disableApiTermination',
Value='false')
except ClientError as e:
if e.response['Error']['Code'] == 'IncorrectInstanceState':
return
raise
with self.executor_factory(max_workers=2) as w:
list(w.map(process_instance, instances))
@actions.register('snapshot')
class Snapshot(BaseAction):
"""Snapshots volumes attached to an EC2 instance
:Example:
.. code-block: yaml
policies:
- name: ec2-snapshots
resource: ec2
actions:
- type: snapshot
copy-tags:
- Name
"""
schema = type_schema(
'snapshot',
**{'copy-tags': {'type': 'array', 'items': {'type': 'string'}}})
permissions = ('ec2:CreateSnapshot', 'ec2:CreateTags',)
def process(self, resources):
for resource in resources:
with self.executor_factory(max_workers=2) as w:
futures = []
futures.append(w.submit(self.process_volume_set, resource))
for f in as_completed(futures):
if f.exception():
self.log.error(
"Exception creating snapshot set \n %s" % (
f.exception()))
@utils.worker
def process_volume_set(self, resource):
c = utils.local_session(self.manager.session_factory).client('ec2')
for block_device in resource['BlockDeviceMappings']:
if 'Ebs' not in block_device:
continue
volume_id = block_device['Ebs']['VolumeId']
description = "Automated,Backup,%s,%s" % (
resource['InstanceId'],
volume_id)
try:
response = c.create_snapshot(
DryRun=self.manager.config.dryrun,
VolumeId=volume_id,
Description=description)
except ClientError as e:
if e.response['Error']['Code'] == 'IncorrectState':
self.log.warning(
"action:%s volume:%s is incorrect state" % (
self.__class__.__name__.lower(),
volume_id))
continue
raise
tags = [
{'Key': 'Name', 'Value': volume_id},
{'Key': 'InstanceId', 'Value': resource['InstanceId']},
{'Key': 'DeviceName', 'Value': block_device['DeviceName']},
{'Key': 'custodian_snapshot', 'Value': ''}
]
copy_keys = self.data.get('copy-tags', [])
copy_tags = []
if copy_keys:
for t in resource.get('Tags', []):
if t['Key'] in copy_keys:
copy_tags.append(t)
if len(copy_tags) + len(tags) > 40:
self.log.warning(
"action:%s volume:%s too many tags to copy" % (
self.__class__.__name__.lower(),
volume_id))
copy_tags = []
tags.extend(copy_tags)
c.create_tags(
DryRun=self.manager.config.dryrun,
Resources=[
response['SnapshotId']],
Tags=tags)
@actions.register('modify-security-groups')
class EC2ModifyVpcSecurityGroups(ModifyVpcSecurityGroupsAction):
"""Modify security groups on an instance."""
permissions = ("ec2:ModifyNetworkInterfaceAttribute",)
def process(self, instances):
if not len(instances):
return
client = utils.local_session(
self.manager.session_factory).client('ec2')
# handle multiple ENIs
interfaces = []
for i in instances:
for eni in i['NetworkInterfaces']:
if i.get('c7n.matched-security-groups'):
eni['c7n.matched-security-groups'] = i[
'c7n.matched-security-groups']
interfaces.append(eni)
groups = super(EC2ModifyVpcSecurityGroups, self).get_groups(interfaces)
for idx, i in enumerate(interfaces):
client.modify_network_interface_attribute(
NetworkInterfaceId=i['NetworkInterfaceId'],
Groups=groups[idx])
@actions.register('autorecover-alarm')
class AutorecoverAlarm(BaseAction, StateTransitionFilter):
"""Adds a cloudwatch metric alarm to recover an EC2 instance.
This action takes effect on instances that are NOT part
of an ASG.
:Example:
.. code-block: yaml
policies:
- name: ec2-autorecover-alarm
resource: ec2
filters:
- singleton
actions:
- autorecover-alarm
https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-recover.html
"""
schema = type_schema('autorecover-alarm')
permissions = ('ec2:DescribeInstanceStatus',
'ec2:RecoverInstances',
'ec2:DescribeInstanceRecoveryAttribute')
valid_origin_states = ('running', 'stopped', 'pending', 'stopping')
filter_asg_membership = ValueFilter({
'key': 'tag:aws:autoscaling:groupName',
'value': 'empty'}).validate()
def process(self, instances):
instances = self.filter_asg_membership.process(
self.filter_instance_state(instances))
if not len(instances):
return
client = utils.local_session(
self.manager.session_factory).client('cloudwatch')
for i in instances:
client.put_metric_alarm(
AlarmName='recover-{}'.format(i['InstanceId']),
AlarmDescription='Auto Recover {}'.format(i['InstanceId']),
ActionsEnabled=True,
AlarmActions=[
'arn:aws:automate:{}:ec2:recover'.format(
i['Placement']['AvailabilityZone'][:-1])
],
MetricName='StatusCheckFailed_System',
Namespace='AWS/EC2',
Statistic='Minimum',
Dimensions=[
{
'Name': 'InstanceId',
'Value': i['InstanceId']
}
],
Period=60,
EvaluationPeriods=2,
Threshold=0,
ComparisonOperator='GreaterThanThreshold'
)
@actions.register('set-instance-profile')
class SetInstanceProfile(BaseAction, StateTransitionFilter):
"""Sets (or removes) the instance profile for a running EC2 instance.
:Example:
.. code-block: yaml
policies:
- name: set-default-instance-profile
resource: ec2
query:
- IamInstanceProfile: absent
actions:
- type: set-instance-profile
name: default
https://docs.aws.amazon.com/cli/latest/reference/ec2/associate-iam-instance-profile.html
https://docs.aws.amazon.com/cli/latest/reference/ec2/disassociate-iam-instance-profile.html
"""
schema = type_schema(
'set-instance-profile',
**{'name': {'type': 'string'}})
permissions = (
'ec2:AssociateIamInstanceProfile',
'ec2:DisassociateIamInstanceProfile',
'iam:PassRole')
valid_origin_states = ('running', 'pending')
def process(self, instances):
instances = self.filter_instance_state(instances)
if not len(instances):
return
client = utils.local_session(
self.manager.session_factory).client('ec2')
profile_name = self.data.get('name', '')
for i in instances:
if profile_name:
client.associate_iam_instance_profile(
IamInstanceProfile={'Name': self.data.get('name', '')},
InstanceId=i['InstanceId'])
else:
response = client.describe_iam_instance_profile_associations(
Filters=[
{
'Name': 'instance-id',
'Values': [i['InstanceId']],
},
{
'Name': 'state',
'Values': ['associating', 'associated']
}
]
)
for a in response['IamInstanceProfileAssociations']:
client.disassociate_iam_instance_profile(
AssociationId=a['AssociationId']
)
return instances
# Valid EC2 Query Filters
# http://docs.aws.amazon.com/AWSEC2/latest/CommandLineReference/ApiReference-cmd-DescribeInstances.html
EC2_VALID_FILTERS = {
'architecture': ('i386', 'x86_64'),
'availability-zone': str,
'iam-instance-profile.arn': str,
'image-id': str,
'instance-id': str,
'instance-lifecycle': ('spot',),
'instance-state-name': (
'pending',
'terminated',
'running',
'shutting-down',
'stopping',
'stopped'),
'instance.group-id': str,
'instance.group-name': str,
'tag-key': str,
'tag-value': str,
'tag:': str,
'vpc-id': str}
class QueryFilter(object):
@classmethod
def parse(cls, data):
results = []
for d in data:
if not isinstance(d, dict):
raise ValueError(
"EC2 Query Filter Invalid structure %s" % d)
results.append(cls(d).validate())
return results
def __init__(self, data):
self.data = data
self.key = None
self.value = None
def validate(self):
if not len(list(self.data.keys())) == 1:
raise ValueError(
"EC2 Query Filter Invalid %s" % self.data)
self.key = list(self.data.keys())[0]
self.value = list(self.data.values())[0]
if self.key not in EC2_VALID_FILTERS and not self.key.startswith(
'tag:'):
raise ValueError(
"EC2 Query Filter invalid filter name %s" % (self.data))
if self.value is None:
raise ValueError(
"EC2 Query Filters must have a value, use tag-key"
" w/ tag name as value for tag present checks"
" %s" % self.data)
return self
def query(self):
value = self.value
if isinstance(self.value, six.string_types):
value = [self.value]
return {'Name': self.key, 'Values': value}
| |
# -*- coding: utf-8 -*-
# Create your models here
import datetime
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils import timezone
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.core.validators import MinValueValidator, MaxValueValidator
from froala_editor.fields import FroalaField
from datetime import date
@python_2_unicode_compatible
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def __str__(self):
return self.question_text
def was_published_recently(self):
now = timezone.now()
return now -datetime.timedelta(days=1) <= self.pub_date <= now
was_published_recently.admin_order_field = 'pub_date'
was_published_recently.boolean = True
was_published_recently.short_description = 'Published recently'
@python_2_unicode_compatible
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete = models.CASCADE)
choicet_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __str__(self):
return self.choicet_text
'''User Profile Model'''
class UserProfile(models.Model):
user = models.OneToOneField(User)
description = models.CharField(max_length=500, default='')
city = models.CharField(max_length=50, default='')
university = models.CharField(max_length=50, default='')
facebook = models.URLField(default='')
phone = models.IntegerField(default=0)
image = models.ImageField(upload_to='profile_image', blank=True)
def __str__(self):
return self.user.username
def create_profile(sender, **kwargs):
if kwargs['created']:
user_profile = UserProfile.objects.create(user=kwargs['instance'])
post_save.connect(create_profile, sender=User)
'''Recruitment Compain'''
'''Local Committe Model'''
@python_2_unicode_compatible
class LocalCommittee(models.Model):
local_committee = models.CharField(max_length=200, default='HN')
City = models.CharField(max_length=200, default='Hanoi')
def __str__(self):
return self.local_committee
@python_2_unicode_compatible
class Recruitment(models.Model):
local_committee = models.ForeignKey(LocalCommittee, on_delete=models.CASCADE)
member = models.ManyToManyField(User, related_name='recruitment_member',
limit_choices_to={'is_staff': True},)
name = models.CharField(max_length=200)
description = models.TextField()
pub_date = models.DateTimeField('date published')
content = FroalaField()
def __str__(self):
return self.name
@property
def status(self):
if self.pub_date > timezone.make_aware(datetime.datetime.now()):
return 'pending'
else:
return 'published'
STATUS_CHOICES = (
('0', 'Not Yet'),
('1', 'Round 1'),
('2', 'Round 2'),
('3', 'Round 3'),
('p', 'Pass'),
('f', 'Fail'),
)
@python_2_unicode_compatible
class RecruitmentForm(models.Model):
user_id = models.ForeignKey(User, on_delete=models.CASCADE,)
recruiment_id = models.ForeignKey(Recruitment, on_delete=models.CASCADE)
student_name = models.CharField(max_length=200)
description = models.CharField(max_length=200)
skill = models.CharField(max_length=200)
university = models.CharField(max_length=200)
year_program = models.IntegerField()
status = models.CharField(max_length=1, choices=STATUS_CHOICES, default='0')
def __str__(self):
return self.student_name
@python_2_unicode_compatible
class Round(models.Model):
round_num = models.IntegerField()
round_name = models.CharField(max_length=200)
def __str__(self):
return self.round_name
@python_2_unicode_compatible
class Questionnaire(models.Model):
recruitment_form = models.ForeignKey(RecruitmentForm, on_delete=models.CASCADE)
round = models.ForeignKey(Round, on_delete=models.CASCADE)
question_name = models.CharField(max_length=200)
answer = models.CharField(max_length=200)
def __str__(self):
return self.question_name
@python_2_unicode_compatible
class Creatia(models.Model):
round = models.ForeignKey(Round, on_delete=models.CASCADE)
questionnaire = models.ForeignKey(Questionnaire, on_delete=models.CASCADE)
#recruitment_form = models.ForeignKey(RecruitmentForm, on_delete=models.CASCADE)
cretia_name = models.CharField(max_length=200)
point = models.IntegerField(validators=[MinValueValidator(0),
MaxValueValidator(100)])
def __str__(self):
return self.cretia_name
#def get_queryset(self):
# return super(Creatia, self).filter(round=1).values('round').annotate(sum=Sum('point'))
"""For Project Management"""
'''Kind of Project'''
@python_2_unicode_compatible
class ProjectOfKind(models.Model):
project_kind = models.CharField(max_length=200)
def __str__(self):
return self.project_kind
'''Project Model'''
@python_2_unicode_compatible
class Project(models.Model):
project_kind = models.ForeignKey(ProjectOfKind, on_delete=models.CASCADE)
local_committee = models.ForeignKey(LocalCommittee, on_delete=models.CASCADE)
project_name = models.CharField(max_length=200)
project_description = models.TextField(max_length=1000)
start_date = models.DateTimeField('Start Date')
end_date = models.DateTimeField('End Date')
def __str__(self):
return self.project_name
def status(self):
import datetime
from django.utils import timezone
now_aware = timezone.now()
if (self.start_date > timezone.make_aware(datetime.datetime.now())
) and (self.end_date >timezone.make_aware(datetime.datetime.now())):
return "Pending"
elif (self.end_date < timezone.make_aware(datetime.datetime.now())):
return "Closed"
else:
return "Running"
'''SubProject Model'''
@python_2_unicode_compatible
class SubProject(models.Model):
project = models.ForeignKey(Project, on_delete=models.CASCADE)
member = models.ManyToManyField(User, related_name='subproject_member',
limit_choices_to={'is_staff': True},)
subproject_name = models.CharField(max_length=200)
subproject_description = models.TextField(max_length = 1000)
create_date = models.DateTimeField('Created Date')
closed_date = models.DateTimeField('Closed Date')
def __str__(self):
return self.subproject_name
def status(self):
import datetime
from django.utils import timezone
now_aware = timezone.now()
if (self.create_date > timezone.make_aware(datetime.datetime.now())
) and (self.closed_date >timezone.make_aware(datetime.datetime.now())):
return "Pending"
elif (self.closed_date < timezone.make_aware(datetime.datetime.now())):
return "Closed"
else:
return "Running"
'''Event for Project'''
class PublishedEvent(models.Model):
project = models.ForeignKey(Project, on_delete= models.CASCADE)
event_name = models.CharField(max_length = 200)
event_description = FroalaField()
create_date = models.DateTimeField('Created Date')
pub_date = models.DateTimeField('Publised Date')
closed_date = models.DateTimeField('Closed Date')
price = models.IntegerField(default=0)
place = models.CharField(max_length = 200)
background_image = models.ImageField(upload_to='profile_image', blank=True)
def __str__(self):
return self.event_name
def status(self):
from django.utils import timezone
if (self.create_date < timezone.make_aware(datetime.datetime.now())) and (
self.pub_date > timezone.make_aware(datetime.datetime.now())):
return "Pending"
elif (self.closed_date < timezone.make_aware(datetime.datetime.now())):
return "Closed"
else:
return "Published"
"""Customer Register"""
class RegisterEvent(models.Model):
event = models.ForeignKey(PublishedEvent, on_delete = models.CASCADE)
customer = models.ForeignKey(User, on_delete=models.CASCADE)
attandance = models.BooleanField(default = False)
payment = models.BooleanField(default=False)
create_date = models.DateTimeField()
customer_name = models.CharField(max_length=200)
customer_phone = models.IntegerField()
customer_email = models.EmailField(max_length=200)
def __str__(self):
return self.customer_name
"""Task and assignTask"""
class Task(models.Model):
member = models.ManyToManyField(User,
limit_choices_to={'is_staff': True},)
project = models.ForeignKey(SubProject,on_delete = models.CASCADE,
)
task_name = models.CharField(max_length = 200)
requirement = models.TextField(max_length=1000)
# submit_file = models.FileField(upload_to='profile_image')
commemt = models.TextField(max_length = 1000, blank=True)
due_date = models.DateTimeField('Due Date')
def __str__(self):
return self.task_name
'''Muiltile file'''
class TaskFile(models.Model):
task = models.ForeignKey(Task, on_delete = models.CASCADE)
file = models.FileField(upload_to='profile_image')
KIND_EMAIL = (
('1', 'Recruitment Round 1'),
('2', 'Recruitment Round 2'),
('3', 'Recruitment Round 3'),
('p', 'Recruitment Pass'),
('f', 'Recruitment Fail'),
('e', 'Event'),)
'''Email Content'''
@python_2_unicode_compatible
class EmailContent(models.Model):
email_kind = models.CharField(max_length=1, choices= KIND_EMAIL)
subject = models.CharField(max_length=50)
content = models.CharField(max_length=500)
def __str__(self):
return self.email_kind
'''Email content for event'''
@python_2_unicode_compatible
class EmailContentEvent(models.Model):
event = models.ForeignKey(PublishedEvent, on_delete = models.CASCADE)
subject = models.CharField(max_length=50)
content = FroalaField()
def __str__(self):
return self.subject
'''Event Metric (vitual table)'''
class EventMetric(RegisterEvent):
class Meta:
proxy = True
verbose_name = 'Event Metric'
verbose_name_plural = 'Event Metric'
| |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mapping from schema to backend properties."""
__author__ = 'Abhinav Khandelwal (abhinavk@google.com)'
import collections
import copy
import itertools
import json
class Property(object):
"""Property."""
def __init__(
self, name, label, property_type, select_data=None, description=None,
optional=False, extra_schema_dict_values=None):
if name == 'properties':
raise ValueError('Cannot name a field "properties"; this conflicts '
'with the use of "properties" in generating JSON '
'schema dictionaries.')
self._name = name
self._label = label
self._property_type = property_type
self._select_data = select_data
self._description = description
self._optional = optional
self._extra_schema_dict_values = extra_schema_dict_values or {}
def __str__(self):
return '%s#%s' % (self._name, self._property_type)
@property
def type(self):
return self._property_type
@property
def name(self):
return self._name
@property
def description(self):
return self._description
@property
def extra_schema_dict_values(self):
return self._extra_schema_dict_values
@property
def label(self):
return self._label
def set_select_data(self, select_data):
self._select_data = select_data
def get_display_dict(self):
return {
'name': self._name,
'label': self._label,
'repeated': False,
'description': self._description,
}
class Registry(object):
"""Registry is a collection of Property's."""
SCHEMA_PATH_SEPARATOR = '/'
def __init__(self, title, description=None, extra_schema_dict_values=None):
self._name = None
self._title = title
self._registry = {'id': title, 'type': 'object'}
self._description = description
if description:
self._registry['description'] = description
self._extra_schema_dict_values = extra_schema_dict_values or {}
self._properties = []
self._sub_registries = collections.OrderedDict()
@property
def name(self):
return self._name
@property
def title(self):
return self._title
@property
def sub_registries(self):
return self._sub_registries
@property
def properties(self):
return self._properties
def add_property(self, schema_field):
"""Add a Property to this Registry."""
self._properties.append(schema_field)
def get_property(self, property_name):
for prop in self._properties:
if prop.name == property_name:
return prop
return None
def get_sub_registry(self, sub_registry_name):
return self._sub_registries.get(sub_registry_name)
def remove_property(self, property_name):
prop = self.get_property(property_name)
if prop:
return self._properties.pop(self._properties.index(prop))
def add_sub_registry(self, name, title=None, description=None,
registry=None, extra_schema_dict_values=None):
"""Add a sub registry to this Registry."""
if name in self._sub_registries:
raise Exception('Already have registry undr name %s' % name)
if not registry:
registry = self.__class__(title=title, description=description,
extra_schema_dict_values=extra_schema_dict_values)
registry._name = name # pylint: disable=protected-access
self._sub_registries[name] = registry
return registry
def has_subregistries(self):
return True if self._sub_registries else False
def get_display_dict(self):
return {
'title': self._title,
'properties': [p.get_display_dict() for p in self._properties],
'registries': [r.get_display_dict()
for r in self._sub_registries.itervalues()],
}
def clone_only_items_named(self, paths):
"""Clone only the selected items from a registry.
Args:
paths: Each item is a path into the schema, with slashes as
separators. E.g., "foo" would match things at the top level
named "foo". Similarly, 'foo/bar/baz' looks in sub-schema
"foo" for a sub-schema "bar", and within that, "baz." The
returned schema would include not just the leaf item, but
sub-registry 'foo' containing 'bar', containing 'baz'.
NOTE - Schema hierarchy components are stored separately from
properties, and so "foo" may well match _both_ a subschema
_and_ a property, if someone were unwise enough to build
a schema with overloaded names.
Also note that colons in names are not special to this function,
though they may well have special meaning to, e.g., the
course schema mapping to course.yaml dict hierarchy. Picking
out a single such field would use a name such as
"registration/course:send_welcome_notifications".
Returns:
A schema with only the named items present.
"""
# Arbitrary depth instantiate-on-reference dict constructor
treebuilder = lambda: collections.defaultdict(treebuilder)
# Build a tree of nodes from the given paths.
root = treebuilder()
for path in paths:
parts = path.split(self.SCHEMA_PATH_SEPARATOR)
node = root
for part in parts:
node = node[part]
registry = copy.deepcopy(self)
def delete_all_but(registry, node):
# pylint: disable=protected-access
# Copy so deleting does not wreck iterator.
for prop in copy.copy(registry._properties):
if prop.name not in node:
registry._properties.remove(prop)
# If this is an array of complex types, recurse.
if (node[prop.name] and
isinstance(prop, FieldArray) and
isinstance(prop._item_type, Registry)):
delete_all_but(prop._item_type, node[prop.name])
for name, value in registry._sub_registries.iteritems():
# If this subregistry is not named at all, remove it.
if name not in node:
del registry._sub_registries[name]
# If the paths-to-save gives sub-entries within this
# node, then proceed into the node to prune its members.
# Otherwise, do nothing, leaving the node and all its
# children in place.
elif node[name]:
delete_all_but(value, node[name])
delete_all_but(registry, root)
return registry
class SchemaField(Property):
"""SchemaField defines a simple field."""
def __init__(
self, name, label, property_type, select_data=None, description=None,
optional=False, hidden=False, editable=True, i18n=None,
extra_schema_dict_values=None, validator=None, default_value=None):
Property.__init__(
self, name, label, property_type, select_data=select_data,
description=description, optional=optional,
extra_schema_dict_values=extra_schema_dict_values)
self._hidden = hidden
self._editable = editable
self._validator = validator
self._i18n = i18n
self._default_value = default_value
@property
def hidden(self):
return self._hidden
@property
def editable(self):
return self._editable
@property
def i18n(self):
return self._i18n
@property
def _override_type(self):
"""The final type, if it differs from the validation type"""
if '_type' in self._extra_schema_dict_values:
return self._extra_schema_dict_values['_type']
if self._hidden:
return 'hidden'
elif not self._editable:
return 'uneditable'
elif self._select_data:
return 'select'
return None
def get_display_types(self):
"""List of types needed to render this"""
return [self._override_type or self.type]
def get_json_schema_dict(self):
"""Get the JSON schema for this field."""
prop = {}
prop['type'] = self._property_type
if self._optional:
prop['optional'] = self._optional
if self._description:
prop['description'] = self._description
if self._i18n:
prop['i18n'] = self._i18n
return prop
def _get_schema_dict(self, prefix_key):
"""Get Schema annotation dictionary for this field."""
schema = self._extra_schema_dict_values
schema['label'] = self._label
override_type = self._override_type
if override_type:
schema['_type'] = override_type
if self._property_type == 'date':
if 'dateFormat' not in schema:
schema['dateFormat'] = 'Y/m/d'
if 'valueFormat' not in schema:
schema['valueFormat'] = 'Y/m/d'
elif self._select_data:
choices = []
for value, label in self._select_data:
choices.append(
{'value': value, 'label': unicode(label)})
schema['choices'] = choices
if self._description:
schema['description'] = self._description
return [(prefix_key + ['_inputex'], schema)]
def validate(self, value, errors):
if self._validator:
self._validator(value, errors)
def __repr__(self):
return '<{} {}>'.format(self.__class__.__name__, self.name)
class FieldArray(SchemaField):
"""FieldArray is an array with object or simple items."""
def __init__(
self, name, label, description=None, item_type=None,
optional=False, extra_schema_dict_values=None, select_data=None):
super(FieldArray, self).__init__(
name, label, 'array', description=description, optional=optional,
extra_schema_dict_values=extra_schema_dict_values,
select_data=select_data)
self._item_type = item_type
@property
def item_type(self):
return self._item_type
def get_json_schema_dict(self):
json_schema = super(FieldArray, self).get_json_schema_dict()
json_schema['items'] = self._item_type.get_json_schema_dict()
return json_schema
def _get_schema_dict(self, prefix_key):
dict_list = super(FieldArray, self)._get_schema_dict(prefix_key)
# pylint: disable=protected-access
dict_list += self._item_type._get_schema_dict(prefix_key + ['items'])
# pylint: enable=protected-access
return dict_list
def get_display_dict(self):
display_dict = super(FieldArray, self).get_display_dict()
display_dict['repeated'] = True
display_dict['item_type'] = self.item_type.get_display_dict()
return display_dict
def get_display_types(self):
"""List of types needed to render this"""
return itertools.chain(
super(FieldArray, self).get_display_types(),
self.item_type.get_display_types())
class FieldRegistry(Registry):
"""FieldRegistry is an object with SchemaField properties."""
def _iter_fields(self):
"""Iterate fields like dict.iteritems"""
for schema_field in self._properties:
yield (schema_field.name, schema_field)
def _iter_sub_registries(self):
"""Iterate sub-registries like dict.iteritems"""
return self._sub_registries.iteritems()
def _iter_fields_and_sub_registries(self):
"""Iterate fields and sub-registries like dict.iteritems"""
return itertools.chain(self._iter_fields(), self._iter_sub_registries())
def _deep_iter_fields(self):
"""Iterate fields in this registry and its sub-registries recursively.
Results look like dict.iteritems. Keys are just the field names. They
don't incorporate parent keys."""
# pylint: disable=protected-access
return itertools.chain(self._iter_fields(),
itertools.chain.from_iterable(
item._deep_iter_fields()
for (key, item) in self._iter_sub_registries()))
def _get_display_type(self):
return self._extra_schema_dict_values.get('_type', 'group')
def get_display_types(self):
"""List of types needed to render this"""
return itertools.chain(
[self._get_display_type()],
itertools.chain.from_iterable([
item.get_display_types()
for (key, item) in self._deep_iter_fields()]))
def get_json_schema_dict(self):
schema_dict = dict(self._registry)
schema_dict['properties'] = collections.OrderedDict(
(key, schema_field.get_json_schema_dict())
for key, schema_field in self._iter_fields_and_sub_registries())
return schema_dict
def get_json_schema(self):
"""Get the json schema for this API."""
return json.dumps(self.get_json_schema_dict())
def _get_schema_dict(self, prefix_key):
"""Get schema dict for this API."""
title_key = list(prefix_key)
title_key.append('title')
schema_dict = [(title_key, self._title)]
if self._extra_schema_dict_values:
key = list(prefix_key)
key.append('_inputex')
schema_dict.append([key, self._extra_schema_dict_values])
base_key = list(prefix_key)
base_key.append('properties')
return schema_dict + list(itertools.chain.from_iterable(
# pylint: disable=protected-access
item._get_schema_dict(base_key + [key])
# pylint: enable=protected-access
for key, item in self._iter_fields_and_sub_registries()))
def get_schema_dict(self):
"""Get schema dict for this API."""
return self._get_schema_dict(list())
@classmethod
def _add_entry(cls, key_part_list, value, entity):
if len(key_part_list) == 1:
entity[key_part_list[0]] = value
return
key = key_part_list.pop()
if not entity.has_key(key):
entity[key] = {}
else:
assert type(entity[key]) == type(dict())
cls._add_entry(key_part_list, value, entity[key])
@classmethod
def convert_json_to_entity(cls, json_entry, entity):
assert type(json_entry) == type(dict())
for key in json_entry.keys():
if type(json_entry[key]) == type(dict()):
cls.convert_json_to_entity(json_entry[key], entity)
else:
key_parts = key.split(':')
key_parts.reverse()
cls._add_entry(key_parts, json_entry[key], entity)
@classmethod
def _get_field_name_parts(cls, field_name):
field_name_parts = field_name.split(':')
field_name_parts.reverse()
return field_name_parts
@classmethod
def _get_field_value(cls, key_part_list, entity, default):
if len(key_part_list) == 1:
if type(entity) == dict and entity.has_key(key_part_list[0]):
return entity[key_part_list[0]]
return default
key = key_part_list.pop()
if entity.has_key(key):
return cls._get_field_value(key_part_list, entity[key], default)
return default
@classmethod
def get_field_value(cls, schema_field, entity):
return cls._get_field_value(
cls._get_field_name_parts(schema_field.name), entity,
schema_field._default_value) # pylint: disable=protected-access
def convert_entity_to_json_entity(self, entity, json_entry):
for schema_field in self._properties:
value = self.get_field_value(schema_field, entity)
if type(value) != type(None):
json_entry[schema_field.name] = value
for key in self._sub_registries.keys():
json_entry[key] = {}
self._sub_registries[key].convert_entity_to_json_entity(
entity, json_entry[key])
def redact_entity_to_schema(self, entity, only_writable=True):
property_names = {p.name: p for p in self._properties}
registry_names = set(self._sub_registries.keys())
for name in copy.copy(entity.keys()):
if name not in property_names and name not in registry_names:
del entity[name]
elif name in registry_names:
self._sub_registries[name].redact_entity_to_schema(
entity[name], only_writable)
if not entity[name]:
del entity[name]
elif name in property_names:
prop = property_names[name]
if not prop.editable and only_writable:
del entity[name]
elif (isinstance(prop, FieldArray) and
isinstance(prop.item_type, Registry)):
all_empty = True
for item in entity[name]:
prop.item_type.redact_entity_to_schema(
item, only_writable)
if item:
all_empty = False
if all_empty:
del entity[name]
def validate(self, payload, errors):
for schema_field in self._properties:
value = self.get_field_value(schema_field, payload)
schema_field.validate(value, errors)
for registry in self._sub_registries.values():
registry.validate(payload, errors)
@classmethod
def is_complex_name(cls, name):
return ':' in name
@classmethod
def compute_name(cls, parent_names):
"""Computes non-indexed and indexed entity name given parent names."""
parts = []
for parent_name in parent_names:
if parent_name[0] == '[' and parent_name[-1] == ']':
parts.append('[]')
else:
parts.append(parent_name)
return ':'.join(parts), ':'.join(parent_names)
class SchemaFieldValue(object):
"""This class represents an instance of a field value."""
def __init__(self, name, field, value, setter):
"""An object that name, value and type of a field.
Args:
name: a name of the value
field: SchemaField object that holds the type
value: Python object that holds the value
setter: a function which sets the value in the underlying data
structure
"""
self._name = name
self._field = field
self._value = value
self._setter = setter
@property
def name(self):
return self._name
@property
def field(self):
return self._field
@property
def value(self):
return self._value
@value.setter
def value(self, new_value):
self._value = new_value
self._setter(new_value)
class FieldRegistryIndex(object):
"""Helper class that allows fast access to values and their fields."""
def __init__(self, registry):
self._registry = registry
self._names_in_order = []
self._complex_name_to_field = {}
self._computed_name_to_field = {}
@property
def registry(self):
return self._registry
@property
def names_in_order(self):
return self._names_in_order
def _inspect_registry(self, parent_names, registry):
"""Inspects registry and adds its items to the index."""
for field in registry._properties: # pylint: disable=protected-access
if registry.is_complex_name(field.name):
complex_name = field.name
if complex_name in self._complex_name_to_field:
raise KeyError('Field already defined: %s.' % complex_name)
# TODO(nretallack): arrays of primitive types are not indexed.
# We will need to fix this if we want to translate them.
if isinstance(field, FieldArray) and isinstance(
field.item_type, FieldRegistry):
self._inspect_registry(
[complex_name, '[]'], field.item_type)
self._complex_name_to_field[complex_name] = field
self._names_in_order.append(complex_name)
else:
computed_name = ':'.join(parent_names + [field.name])
if computed_name in self._computed_name_to_field:
raise KeyError('Field already defined: %s.' % computed_name)
# TODO(nretallack): arrays of primitive types are not indexed.
# We will need to fix this if we want to translate them.
if isinstance(field, FieldArray) and isinstance(
field.item_type, FieldRegistry):
self._inspect_registry(
parent_names + [field.name, '[]'], field.item_type)
self._computed_name_to_field[computed_name] = field
self._names_in_order.append(computed_name)
# pylint: disable=protected-access
for name, registry in registry._sub_registries.items():
self._inspect_registry(parent_names + [name], registry)
def rebuild(self):
"""Build an index."""
self._inspect_registry([], self._registry)
def find(self, name):
"""Finds and returns a field given field name."""
field = self._complex_name_to_field.get(name)
return field if field else self._computed_name_to_field.get(name)
class FieldFilter(object):
"""Filter for collections of schema fields."""
def __init__(
self, type_names=None, hidden_values=None, i18n_values=None,
editable_values=None):
self._type_names = type_names
self._hidden_values = hidden_values
self._i18n_values = i18n_values
self._editable_values = editable_values
def _filter(self, named_field_list):
"""Filters a list of name, SchemaField pairs."""
result = set()
for name, field in named_field_list:
if self._type_names and field.type not in self._type_names:
continue
if self._hidden_values and field.hidden not in self._hidden_values:
continue
if self._editable_values and (
field.editable not in self._editable_values):
continue
if self._i18n_values and field.i18n not in self._i18n_values:
continue
result.add(name)
return result
def filter_value_to_type_binding(self, binding):
"""Returns a set of value names that pass the criterion."""
named_field_list = [
(field_value.name, field_value.field)
for field_value in binding.value_list]
return self._filter(named_field_list)
def filter_field_registry_index(self, index):
"""Returns the field names in the schema that pass the criterion."""
named_field_list = [
(name, index.find(name)) for name in index.names_in_order]
return self._filter(named_field_list)
class ValueToTypeBinding(object):
"""This class provides mapping of entity attributes to their types."""
def __init__(self):
self.value_list = [] # a list of all encountered SchemaFieldValues
self.name_to_value = {} # field name to SchemaFieldValue mapping
self.name_to_field = {} # field name to SchemaField mapping
self.unmapped_names = set() # a set of field names where mapping failed
self.index = None # the indexed set of schema names
def find_value(self, name):
return self.name_to_value[name]
def find_field(self, name):
return self.name_to_field[name]
@classmethod
def _get_setter(cls, entity, key):
def setter(value):
entity[key] = value
return setter
@classmethod
def _visit_dict(cls, index, parent_names, entity, binding):
"""Visit dict entity."""
for _name, _value in entity.items():
cls._decompose_entity(
index, parent_names + [_name], _value, binding,
cls._get_setter(entity, _name))
@classmethod
def _visit_list(cls, index, parent_names, entity, binding, setter):
"""Visit list entity."""
name_no_index, name = index.registry.compute_name(parent_names)
_field = index.find(name_no_index)
if _field:
assert isinstance(_field, FieldArray)
assert name not in binding.name_to_field
binding.name_to_field[name] = _field
assert name not in binding.name_to_value, name
binding.name_to_value[name] = SchemaFieldValue(
name, _field, entity, setter)
for _index, _item in enumerate(entity):
_item_name = '[%s]' % _index
cls._decompose_entity(
index, parent_names + [_item_name], _item, binding,
cls._get_setter(entity, _index))
else:
assert name not in binding.unmapped_names
binding.unmapped_names.add(name)
@classmethod
def _visit_attribute(cls, index, parent_names, entity, binding, setter):
"""Visit simple attribute."""
name_no_index, name = index.registry.compute_name(parent_names)
_field = index.find(name_no_index)
if _field:
_value = SchemaFieldValue(name, _field, entity, setter)
binding.value_list.append(_value)
assert name not in binding.name_to_value, name
binding.name_to_value[name] = _value
assert name not in binding.name_to_field
binding.name_to_field[name] = _field
else:
assert name not in binding.unmapped_names, name
binding.unmapped_names.add(name)
@classmethod
def _decompose_entity(
cls, index, parent_names, entity, binding, setter):
"""Recursively decomposes entity."""
if isinstance(entity, dict):
cls._visit_dict(index, parent_names, entity, binding)
elif isinstance(entity, list):
cls._visit_list(index, parent_names, entity, binding, setter)
else:
cls._visit_attribute(index, parent_names, entity, binding, setter)
@classmethod
def bind_entity_to_schema(cls, json_dumpable_entity, registry):
"""Connects schema field type information to the entity attributes.
Args:
json_dumpable_entity: a Python dict recursively containing other
dict, list and primitive objects
registry: a FieldRegistry that holds entity type information
Returns:
an instance of ValueToTypeBinding object that maps entity attributes
to their types
"""
binding = ValueToTypeBinding()
index = FieldRegistryIndex(registry)
index.rebuild()
cls._decompose_entity(
index, [], json_dumpable_entity, binding, None)
binding.index = index
return binding
| |
# Copyright 2013 Josh Durgin
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from lxml import etree
import mock
from oslo_config import cfg
from oslo_utils import timeutils
import six
import six.moves.urllib.parse as urlparse
import webob
from cinder.api import extensions
from cinder.api.v2 import volumes
from cinder import context
from cinder import db
from cinder import exception
from cinder import test
from cinder.tests.api import fakes
from cinder.tests.api.v2 import stubs
from cinder.tests import fake_notifier
from cinder.tests.image import fake as fake_image
from cinder.volume import api as volume_api
CONF = cfg.CONF
NS = '{http://docs.openstack.org/api/openstack-block-storage/2.0/content}'
TEST_SNAPSHOT_UUID = '00000000-0000-0000-0000-000000000001'
def stub_snapshot_get(self, context, snapshot_id):
if snapshot_id != TEST_SNAPSHOT_UUID:
raise exception.NotFound
return {
'id': snapshot_id,
'volume_id': 12,
'status': 'available',
'volume_size': 100,
'created_at': None,
'name': 'Default name',
'description': 'Default description',
}
class VolumeApiTest(test.TestCase):
def setUp(self):
super(VolumeApiTest, self).setUp()
self.addCleanup(fake_notifier.reset)
self.ext_mgr = extensions.ExtensionManager()
self.ext_mgr.extensions = {}
fake_image.stub_out_image_service(self.stubs)
self.controller = volumes.VolumeController(self.ext_mgr)
self.flags(host='fake',
notification_driver=[fake_notifier.__name__])
self.stubs.Set(db, 'volume_get_all', stubs.stub_volume_get_all)
self.stubs.Set(volume_api.API, 'delete', stubs.stub_volume_delete)
self.stubs.Set(db, 'service_get_all_by_topic',
stubs.stub_service_get_all_by_topic)
self.maxDiff = None
def test_volume_create(self):
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create)
vol = {
"size": 100,
"name": "Volume Test Name",
"description": "Volume Test Desc",
"availability_zone": "zone1:host1"
}
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
res_dict = self.controller.create(req, body)
ex = {'volume': {'attachments': [],
'availability_zone': 'zone1:host1',
'bootable': 'false',
'consistencygroup_id': None,
'created_at': datetime.datetime(1900, 1, 1, 1, 1, 1),
'description': 'Volume Test Desc',
'id': '1',
'links':
[{'href': 'http://localhost/v2/fakeproject/volumes/1',
'rel': 'self'},
{'href': 'http://localhost/fakeproject/volumes/1',
'rel': 'bookmark'}],
'metadata': {},
'name': 'Volume Test Name',
'replication_status': 'disabled',
'multiattach': False,
'size': 100,
'snapshot_id': None,
'source_volid': None,
'status': 'fakestatus',
'user_id': 'fakeuser',
'volume_type': 'vol_type_name',
'encrypted': False}}
self.assertEqual(res_dict, ex)
def test_volume_create_with_consistencygroup_invalid_type(self):
ctxt = context.RequestContext('fake', 'fake', auth_token=True)
vol_type = db.volume_type_create(
context.get_admin_context(),
dict(name=CONF.default_volume_type, extra_specs={})
)
db_vol_type = db.volume_type_get(context.get_admin_context(),
vol_type.id)
cg = {
'id': '1',
'name': 'cg1',
'volume_type_id': db_vol_type['id'],
}
fake_type = {
'id': '9999',
'name': 'fake',
}
vol_api = volume_api.API()
self.assertRaises(exception.InvalidInput,
vol_api.create,
ctxt, 1, 'vol1', 'volume 1',
consistencygroup=cg)
self.assertRaises(exception.InvalidInput,
vol_api.create,
ctxt, 1, 'vol1', 'volume 1',
volume_type=fake_type,
consistencygroup=cg)
def test_volume_create_with_type(self):
vol_type = db.volume_type_create(
context.get_admin_context(),
dict(name=CONF.default_volume_type, extra_specs={})
)
db_vol_type = db.volume_type_get(context.get_admin_context(),
vol_type.id)
vol = {
"size": 100,
"name": "Volume Test Name",
"description": "Volume Test Desc",
"availability_zone": "zone1:host1",
"volume_type": "FakeTypeName",
}
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
# Raise 404 when type name isn't valid
self.assertRaises(webob.exc.HTTPNotFound, self.controller.create,
req, body)
# Use correct volume type name
vol.update(dict(volume_type=CONF.default_volume_type))
body.update(dict(volume=vol))
res_dict = self.controller.create(req, body)
volume_id = res_dict['volume']['id']
self.assertEqual(len(res_dict), 1)
# Use correct volume type id
vol.update(dict(volume_type=db_vol_type['id']))
body.update(dict(volume=vol))
res_dict = self.controller.create(req, body)
volume_id = res_dict['volume']['id']
self.assertEqual(len(res_dict), 1)
self.stubs.Set(volume_api.API, 'get_all',
lambda *args, **kwargs:
[stubs.stub_volume(volume_id,
volume_type={'name': vol_type})])
req = fakes.HTTPRequest.blank('/v2/volumes/detail')
res_dict = self.controller.detail(req)
def test_volume_creation_fails_with_bad_size(self):
vol = {"size": '',
"name": "Volume Test Name",
"description": "Volume Test Desc",
"availability_zone": "zone1:host1"}
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
self.assertRaises(exception.InvalidInput,
self.controller.create,
req,
body)
def test_volume_creation_fails_with_bad_availability_zone(self):
vol = {"size": '1',
"name": "Volume Test Name",
"description": "Volume Test Desc",
"availability_zone": "zonen:hostn"}
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
self.assertRaises(exception.InvalidInput,
self.controller.create,
req, body)
def test_volume_create_with_image_ref(self):
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create)
self.ext_mgr.extensions = {'os-image-create': 'fake'}
vol = {"size": '1',
"name": "Volume Test Name",
"description": "Volume Test Desc",
"availability_zone": "nova",
"imageRef": 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'}
ex = {'volume': {'attachments': [],
'availability_zone': 'nova',
'bootable': 'false',
'consistencygroup_id': None,
'created_at': datetime.datetime(1900, 1, 1, 1, 1, 1),
'description': 'Volume Test Desc',
'encrypted': False,
'id': '1',
'links':
[{'href': 'http://localhost/v2/fakeproject/volumes/1',
'rel': 'self'},
{'href': 'http://localhost/fakeproject/volumes/1',
'rel': 'bookmark'}],
'metadata': {},
'name': 'Volume Test Name',
'replication_status': 'disabled',
'multiattach': False,
'size': '1',
'snapshot_id': None,
'source_volid': None,
'status': 'fakestatus',
'user_id': 'fakeuser',
'volume_type': 'vol_type_name'}}
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
res_dict = self.controller.create(req, body)
self.assertEqual(ex, res_dict)
def test_volume_create_with_image_ref_is_integer(self):
self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create)
self.ext_mgr.extensions = {'os-image-create': 'fake'}
vol = {
"size": '1',
"name": "Volume Test Name",
"description": "Volume Test Desc",
"availability_zone": "cinder",
"imageRef": 1234,
}
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
req,
body)
def test_volume_create_with_image_ref_not_uuid_format(self):
self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create)
self.ext_mgr.extensions = {'os-image-create': 'fake'}
vol = {
"size": '1',
"name": "Volume Test Name",
"description": "Volume Test Desc",
"availability_zone": "cinder",
"imageRef": '12345'
}
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
req,
body)
def test_volume_create_with_image_ref_with_empty_string(self):
self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create)
self.ext_mgr.extensions = {'os-image-create': 'fake'}
vol = {"size": 1,
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "cinder",
"imageRef": ''}
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
req,
body)
def test_volume_create_with_image_id(self):
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create)
self.ext_mgr.extensions = {'os-image-create': 'fake'}
vol = {"size": '1',
"name": "Volume Test Name",
"description": "Volume Test Desc",
"availability_zone": "nova",
"image_id": 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'}
ex = {'volume': {'attachments': [],
'availability_zone': 'nova',
'bootable': 'false',
'consistencygroup_id': None,
'created_at': datetime.datetime(1900, 1, 1, 1, 1, 1),
'description': 'Volume Test Desc',
'encrypted': False,
'id': '1',
'links':
[{'href': 'http://localhost/v2/fakeproject/volumes/1',
'rel': 'self'},
{'href': 'http://localhost/fakeproject/volumes/1',
'rel': 'bookmark'}],
'metadata': {},
'name': 'Volume Test Name',
'replication_status': 'disabled',
'multiattach': False,
'size': '1',
'snapshot_id': None,
'source_volid': None,
'status': 'fakestatus',
'user_id': 'fakeuser',
'volume_type': 'vol_type_name'}}
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
res_dict = self.controller.create(req, body)
self.assertEqual(ex, res_dict)
def test_volume_create_with_image_id_is_integer(self):
self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create)
self.ext_mgr.extensions = {'os-image-create': 'fake'}
vol = {
"size": '1',
"name": "Volume Test Name",
"description": "Volume Test Desc",
"availability_zone": "cinder",
"image_id": 1234,
}
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
req,
body)
def test_volume_create_with_image_id_not_uuid_format(self):
self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create)
self.ext_mgr.extensions = {'os-image-create': 'fake'}
vol = {
"size": '1',
"name": "Volume Test Name",
"description": "Volume Test Desc",
"availability_zone": "cinder",
"image_id": '12345'
}
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
req,
body)
def test_volume_create_with_image_id_with_empty_string(self):
self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create)
self.ext_mgr.extensions = {'os-image-create': 'fake'}
vol = {"size": 1,
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "cinder",
"image_id": ''}
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
req,
body)
def test_volume_create_with_image_name(self):
self.stubs.Set(db, 'volume_get', stubs.stub_volume_get_db)
self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create)
self.stubs.Set(fake_image._FakeImageService,
"detail",
stubs.stub_image_service_detail)
test_id = "Fedora-x86_64-20-20140618-sda"
self.ext_mgr.extensions = {'os-image-create': 'fake'}
vol = {"size": '1',
"name": "Volume Test Name",
"description": "Volume Test Desc",
"availability_zone": "nova",
"imageRef": test_id}
ex = {'volume': {'attachments': [],
'availability_zone': 'nova',
'bootable': 'false',
'consistencygroup_id': None,
'created_at': datetime.datetime(1900, 1, 1, 1, 1, 1),
'description': 'Volume Test Desc',
'encrypted': False,
'id': '1',
'links':
[{'href': 'http://localhost/v2/fakeproject/volumes/1',
'rel': 'self'},
{'href': 'http://localhost/fakeproject/volumes/1',
'rel': 'bookmark'}],
'metadata': {},
'name': 'Volume Test Name',
'replication_status': 'disabled',
'multiattach': False,
'size': '1',
'snapshot_id': None,
'source_volid': None,
'status': 'fakestatus',
'user_id': 'fakeuser',
'volume_type': 'vol_type_name'}}
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
res_dict = self.controller.create(req, body)
self.assertEqual(ex, res_dict)
def test_volume_create_with_image_name_has_multiple(self):
self.stubs.Set(db, 'volume_get', stubs.stub_volume_get_db)
self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create)
self.stubs.Set(fake_image._FakeImageService,
"detail",
stubs.stub_image_service_detail)
test_id = "multi"
self.ext_mgr.extensions = {'os-image-create': 'fake'}
vol = {"size": '1',
"name": "Volume Test Name",
"description": "Volume Test Desc",
"availability_zone": "nova",
"imageRef": test_id}
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
req,
body)
def test_volume_create_with_image_name_no_match(self):
self.stubs.Set(db, 'volume_get', stubs.stub_volume_get_db)
self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create)
self.stubs.Set(fake_image._FakeImageService,
"detail",
stubs.stub_image_service_detail)
test_id = "MissingName"
self.ext_mgr.extensions = {'os-image-create': 'fake'}
vol = {"size": '1',
"name": "Volume Test Name",
"description": "Volume Test Desc",
"availability_zone": "nova",
"imageRef": test_id}
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
req,
body)
def test_volume_update(self):
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
self.stubs.Set(volume_api.API, "update", stubs.stub_volume_update)
updates = {
"name": "Updated Test Name",
}
body = {"volume": updates}
req = fakes.HTTPRequest.blank('/v2/volumes/1')
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
res_dict = self.controller.update(req, '1', body)
expected = {
'volume': {
'status': 'fakestatus',
'description': 'displaydesc',
'encrypted': False,
'availability_zone': 'fakeaz',
'bootable': 'false',
'consistencygroup_id': None,
'name': 'Updated Test Name',
'replication_status': 'disabled',
'multiattach': False,
'attachments': [],
'user_id': 'fakeuser',
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
'metadata': {'attached_mode': 'rw', 'readonly': 'False'},
'id': '1',
'created_at': datetime.datetime(1900, 1, 1, 1, 1, 1),
'size': 1,
'links': [
{
'href': 'http://localhost/v2/fakeproject/volumes/1',
'rel': 'self'
},
{
'href': 'http://localhost/fakeproject/volumes/1',
'rel': 'bookmark'
}
],
}
}
self.assertEqual(res_dict, expected)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
def test_volume_update_deprecation(self):
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
self.stubs.Set(volume_api.API, "update", stubs.stub_volume_update)
updates = {
"display_name": "Updated Test Name",
"display_description": "Updated Test Description",
}
body = {"volume": updates}
req = fakes.HTTPRequest.blank('/v2/volumes/1')
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
res_dict = self.controller.update(req, '1', body)
expected = {
'volume': {
'status': 'fakestatus',
'description': 'Updated Test Description',
'encrypted': False,
'availability_zone': 'fakeaz',
'bootable': 'false',
'consistencygroup_id': None,
'name': 'Updated Test Name',
'replication_status': 'disabled',
'multiattach': False,
'attachments': [],
'user_id': 'fakeuser',
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
'metadata': {'attached_mode': 'rw', 'readonly': 'False'},
'id': '1',
'created_at': datetime.datetime(1900, 1, 1, 1, 1, 1),
'size': 1,
'links': [
{
'href': 'http://localhost/v2/fakeproject/volumes/1',
'rel': 'self'
},
{
'href': 'http://localhost/fakeproject/volumes/1',
'rel': 'bookmark'
}
],
}
}
self.assertEqual(res_dict, expected)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
def test_volume_update_deprecation_key_priority(self):
"""Test current update keys have priority over deprecated keys."""
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
self.stubs.Set(volume_api.API, "update", stubs.stub_volume_update)
updates = {
"name": "New Name",
"description": "New Description",
"display_name": "Not Shown Name",
"display_description": "Not Shown Description",
}
body = {"volume": updates}
req = fakes.HTTPRequest.blank('/v2/volumes/1')
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
res_dict = self.controller.update(req, '1', body)
expected = {
'volume': {
'status': 'fakestatus',
'description': 'New Description',
'encrypted': False,
'availability_zone': 'fakeaz',
'bootable': 'false',
'consistencygroup_id': None,
'name': 'New Name',
'replication_status': 'disabled',
'multiattach': False,
'attachments': [],
'user_id': 'fakeuser',
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
'metadata': {'attached_mode': 'rw', 'readonly': 'False'},
'id': '1',
'created_at': datetime.datetime(1900, 1, 1, 1, 1, 1),
'size': 1,
'links': [
{
'href': 'http://localhost/v2/fakeproject/volumes/1',
'rel': 'self'
},
{
'href': 'http://localhost/fakeproject/volumes/1',
'rel': 'bookmark'
}
],
}
}
self.assertEqual(res_dict, expected)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
def test_volume_update_metadata(self):
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
self.stubs.Set(volume_api.API, "update", stubs.stub_volume_update)
updates = {
"metadata": {"qos_max_iops": 2000}
}
body = {"volume": updates}
req = fakes.HTTPRequest.blank('/v2/volumes/1')
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
res_dict = self.controller.update(req, '1', body)
expected = {'volume': {
'status': 'fakestatus',
'description': 'displaydesc',
'encrypted': False,
'availability_zone': 'fakeaz',
'bootable': 'false',
'consistencygroup_id': None,
'name': 'displayname',
'replication_status': 'disabled',
'multiattach': False,
'attachments': [],
'user_id': 'fakeuser',
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
'metadata': {"qos_max_iops": 2000,
"readonly": "False",
"attached_mode": "rw"},
'id': '1',
'created_at': datetime.datetime(1900, 1, 1, 1, 1, 1),
'size': 1,
'links': [
{
'href': 'http://localhost/v2/fakeproject/volumes/1',
'rel': 'self'
},
{
'href': 'http://localhost/fakeproject/volumes/1',
'rel': 'bookmark'
}
],
}}
self.assertEqual(res_dict, expected)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
def test_volume_update_with_admin_metadata(self):
self.stubs.Set(volume_api.API, "update", stubs.stub_volume_update)
volume = stubs.stub_volume("1")
del volume['name']
del volume['volume_type']
del volume['volume_type_id']
volume['metadata'] = {'key': 'value'}
db.volume_create(context.get_admin_context(), volume)
db.volume_admin_metadata_update(context.get_admin_context(), "1",
{"readonly": "True",
"invisible_key": "invisible_value"},
False)
values = {'volume_id': '1', }
attachment = db.volume_attach(context.get_admin_context(), values)
db.volume_attached(context.get_admin_context(),
attachment['id'], stubs.FAKE_UUID, None, '/')
updates = {
"name": "Updated Test Name",
}
body = {"volume": updates}
req = fakes.HTTPRequest.blank('/v2/volumes/1')
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
admin_ctx = context.RequestContext('admin', 'fake', True)
req.environ['cinder.context'] = admin_ctx
res_dict = self.controller.update(req, '1', body)
expected = {'volume': {
'status': 'in-use',
'description': 'displaydesc',
'encrypted': False,
'availability_zone': 'fakeaz',
'bootable': 'false',
'consistencygroup_id': None,
'name': 'Updated Test Name',
'replication_status': 'disabled',
'multiattach': False,
'attachments': [{
'id': '1',
'attachment_id': attachment['id'],
'volume_id': '1',
'server_id': stubs.FAKE_UUID,
'host_name': None,
'device': '/',
}],
'user_id': 'fakeuser',
'volume_type': None,
'snapshot_id': None,
'source_volid': None,
'metadata': {'key': 'value',
'readonly': 'True'},
'id': '1',
'created_at': datetime.datetime(1900, 1, 1, 1, 1, 1),
'size': 1,
'links': [
{
'href': 'http://localhost/v2/fake/volumes/1',
'rel': 'self'
},
{
'href': 'http://localhost/fake/volumes/1',
'rel': 'bookmark'
}
],
}}
self.assertEqual(expected, res_dict)
self.assertEqual(2, len(fake_notifier.NOTIFICATIONS))
def test_update_empty_body(self):
body = {}
req = fakes.HTTPRequest.blank('/v2/volumes/1')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update,
req, '1', body)
def test_update_invalid_body(self):
body = {
'name': 'missing top level volume key'
}
req = fakes.HTTPRequest.blank('/v2/volumes/1')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update,
req, '1', body)
def test_update_not_found(self):
self.stubs.Set(volume_api.API, "get", stubs.stub_volume_get_notfound)
updates = {
"name": "Updated Test Name",
}
body = {"volume": updates}
req = fakes.HTTPRequest.blank('/v2/volumes/1')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update,
req, '1', body)
def test_volume_list_summary(self):
self.stubs.Set(volume_api.API, 'get_all',
stubs.stub_volume_get_all_by_project)
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
req = fakes.HTTPRequest.blank('/v2/volumes')
res_dict = self.controller.index(req)
expected = {
'volumes': [
{
'name': 'displayname',
'id': '1',
'links': [
{
'href': 'http://localhost/v2/fakeproject/volumes/'
'1',
'rel': 'self'
},
{
'href': 'http://localhost/fakeproject/volumes/1',
'rel': 'bookmark'
}
],
}
]
}
self.assertEqual(res_dict, expected)
# Finally test that we cached the returned volumes
self.assertEqual(1, len(req.cached_resource()))
def test_volume_list_detail(self):
self.stubs.Set(volume_api.API, 'get_all',
stubs.stub_volume_get_all_by_project)
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
req = fakes.HTTPRequest.blank('/v2/volumes/detail')
res_dict = self.controller.detail(req)
expected = {
'volumes': [
{
'status': 'fakestatus',
'description': 'displaydesc',
'encrypted': False,
'availability_zone': 'fakeaz',
'bootable': 'false',
'consistencygroup_id': None,
'name': 'displayname',
'replication_status': 'disabled',
'multiattach': False,
'attachments': [],
'user_id': 'fakeuser',
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
'metadata': {'attached_mode': 'rw', 'readonly': 'False'},
'id': '1',
'created_at': datetime.datetime(1900, 1, 1, 1, 1, 1),
'size': 1,
'links': [
{
'href': 'http://localhost/v2/fakeproject/volumes/'
'1',
'rel': 'self'
},
{
'href': 'http://localhost/fakeproject/volumes/1',
'rel': 'bookmark'
}
],
}
]
}
self.assertEqual(res_dict, expected)
# Finally test that we cached the returned volumes
self.assertEqual(1, len(req.cached_resource()))
def test_volume_list_detail_with_admin_metadata(self):
volume = stubs.stub_volume("1")
del volume['name']
del volume['volume_type']
del volume['volume_type_id']
volume['metadata'] = {'key': 'value'}
db.volume_create(context.get_admin_context(), volume)
db.volume_admin_metadata_update(context.get_admin_context(), "1",
{"readonly": "True",
"invisible_key": "invisible_value"},
False)
values = {'volume_id': '1', }
attachment = db.volume_attach(context.get_admin_context(), values)
db.volume_attached(context.get_admin_context(),
attachment['id'], stubs.FAKE_UUID, None, '/')
req = fakes.HTTPRequest.blank('/v2/volumes/detail')
admin_ctx = context.RequestContext('admin', 'fakeproject', True)
req.environ['cinder.context'] = admin_ctx
res_dict = self.controller.detail(req)
expected = {
'volumes': [
{
'status': 'in-use',
'description': 'displaydesc',
'encrypted': False,
'availability_zone': 'fakeaz',
'bootable': 'false',
'consistencygroup_id': None,
'name': 'displayname',
'replication_status': 'disabled',
'multiattach': False,
'attachments': [
{
'attachment_id': attachment['id'],
'device': '/',
'server_id': stubs.FAKE_UUID,
'host_name': None,
'id': '1',
'volume_id': '1'
}
],
'user_id': 'fakeuser',
'volume_type': None,
'snapshot_id': None,
'source_volid': None,
'metadata': {'key': 'value', 'readonly': 'True'},
'id': '1',
'created_at': datetime.datetime(1900, 1, 1, 1, 1, 1),
'size': 1,
'links': [
{
'href': 'http://localhost/v2/fakeproject'
'/volumes/1',
'rel': 'self'
},
{
'href': 'http://localhost/fakeproject/volumes/1',
'rel': 'bookmark'
}
],
}
]
}
self.assertEqual(res_dict, expected)
def test_volume_index_with_marker(self):
def stub_volume_get_all_by_project(context, project_id, marker, limit,
sort_keys=None, sort_dirs=None,
filters=None,
viewable_admin_meta=False):
return [
stubs.stub_volume(1, display_name='vol1'),
stubs.stub_volume(2, display_name='vol2'),
]
self.stubs.Set(db, 'volume_get_all_by_project',
stub_volume_get_all_by_project)
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
req = fakes.HTTPRequest.blank('/v2/volumes?marker=1')
res_dict = self.controller.index(req)
volumes = res_dict['volumes']
self.assertEqual(len(volumes), 2)
self.assertEqual(volumes[0]['id'], 1)
self.assertEqual(volumes[1]['id'], 2)
def test_volume_index_limit(self):
self.stubs.Set(db, 'volume_get_all_by_project',
stubs.stub_volume_get_all_by_project)
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
req = fakes.HTTPRequest.blank('/v2/volumes'
'?limit=1&name=foo'
'&sort=id1:asc')
res_dict = self.controller.index(req)
volumes = res_dict['volumes']
self.assertEqual(len(volumes), 1)
# Ensure that the next link is correctly formatted, it should
# contain the same limit, filter, and sort information as the
# original request as well as a marker; this ensures that the
# caller can simply use the "next" link and that they do not
# need to manually insert the limit and sort information.
links = res_dict['volumes_links']
self.assertEqual(links[0]['rel'], 'next')
href_parts = urlparse.urlparse(links[0]['href'])
self.assertEqual('/v2/fakeproject/volumes', href_parts.path)
params = urlparse.parse_qs(href_parts.query)
self.assertEqual(str(volumes[0]['id']), params['marker'][0])
self.assertEqual('1', params['limit'][0])
self.assertEqual('foo', params['name'][0])
self.assertEqual('id1:asc', params['sort'][0])
def test_volume_index_limit_negative(self):
req = fakes.HTTPRequest.blank('/v2/volumes?limit=-1')
self.assertRaises(exception.Invalid,
self.controller.index,
req)
def test_volume_index_limit_non_int(self):
req = fakes.HTTPRequest.blank('/v2/volumes?limit=a')
self.assertRaises(exception.Invalid,
self.controller.index,
req)
def test_volume_index_limit_marker(self):
self.stubs.Set(db, 'volume_get_all_by_project',
stubs.stub_volume_get_all_by_project)
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
req = fakes.HTTPRequest.blank('/v2/volumes?marker=1&limit=1')
res_dict = self.controller.index(req)
volumes = res_dict['volumes']
self.assertEqual(len(volumes), 1)
self.assertEqual(volumes[0]['id'], '1')
def test_volume_index_limit_offset(self):
def stub_volume_get_all_by_project(context, project_id, marker, limit,
sort_keys=None, sort_dirs=None,
filters=None,
viewable_admin_meta=False):
return [
stubs.stub_volume(1, display_name='vol1'),
stubs.stub_volume(2, display_name='vol2'),
]
self.stubs.Set(db, 'volume_get_all_by_project',
stub_volume_get_all_by_project)
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
req = fakes.HTTPRequest.blank('/v2/volumes?limit=2&offset=1')
res_dict = self.controller.index(req)
volumes = res_dict['volumes']
self.assertEqual(len(volumes), 1)
self.assertEqual(volumes[0]['id'], 2)
req = fakes.HTTPRequest.blank('/v2/volumes?limit=-1&offset=1')
self.assertRaises(exception.InvalidInput,
self.controller.index,
req)
req = fakes.HTTPRequest.blank('/v2/volumes?limit=a&offset=1')
self.assertRaises(exception.InvalidInput,
self.controller.index,
req)
def test_volume_detail_with_marker(self):
def stub_volume_get_all_by_project(context, project_id, marker, limit,
sort_keys=None, sort_dirs=None,
filters=None,
viewable_admin_meta=False):
return [
stubs.stub_volume(1, display_name='vol1'),
stubs.stub_volume(2, display_name='vol2'),
]
self.stubs.Set(db, 'volume_get_all_by_project',
stub_volume_get_all_by_project)
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
req = fakes.HTTPRequest.blank('/v2/volumes/detail?marker=1')
res_dict = self.controller.detail(req)
volumes = res_dict['volumes']
self.assertEqual(len(volumes), 2)
self.assertEqual(volumes[0]['id'], 1)
self.assertEqual(volumes[1]['id'], 2)
def test_volume_detail_limit(self):
self.stubs.Set(db, 'volume_get_all_by_project',
stubs.stub_volume_get_all_by_project)
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
req = fakes.HTTPRequest.blank('/v2/volumes/detail?limit=1')
res_dict = self.controller.detail(req)
volumes = res_dict['volumes']
self.assertEqual(len(volumes), 1)
# Ensure that the next link is correctly formatted
links = res_dict['volumes_links']
self.assertEqual(links[0]['rel'], 'next')
href_parts = urlparse.urlparse(links[0]['href'])
self.assertEqual('/v2/fakeproject/volumes/detail', href_parts.path)
params = urlparse.parse_qs(href_parts.query)
self.assertTrue('marker' in params)
self.assertEqual('1', params['limit'][0])
def test_volume_detail_limit_negative(self):
req = fakes.HTTPRequest.blank('/v2/volumes/detail?limit=-1')
self.assertRaises(exception.Invalid,
self.controller.detail,
req)
def test_volume_detail_limit_non_int(self):
req = fakes.HTTPRequest.blank('/v2/volumes/detail?limit=a')
self.assertRaises(exception.Invalid,
self.controller.detail,
req)
def test_volume_detail_limit_marker(self):
self.stubs.Set(db, 'volume_get_all_by_project',
stubs.stub_volume_get_all_by_project)
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
req = fakes.HTTPRequest.blank('/v2/volumes/detail?marker=1&limit=1')
res_dict = self.controller.detail(req)
volumes = res_dict['volumes']
self.assertEqual(len(volumes), 1)
self.assertEqual(volumes[0]['id'], '1')
def test_volume_detail_limit_offset(self):
def stub_volume_get_all_by_project(context, project_id, marker, limit,
sort_keys=None, sort_dirs=None,
filters=None,
viewable_admin_meta=False):
return [
stubs.stub_volume(1, display_name='vol1'),
stubs.stub_volume(2, display_name='vol2'),
]
self.stubs.Set(db, 'volume_get_all_by_project',
stub_volume_get_all_by_project)
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
req = fakes.HTTPRequest.blank('/v2/volumes/detail?limit=2&offset=1')
res_dict = self.controller.detail(req)
volumes = res_dict['volumes']
self.assertEqual(len(volumes), 1)
self.assertEqual(volumes[0]['id'], 2)
req = fakes.HTTPRequest.blank('/v2/volumes/detail?limit=2&offset=1',
use_admin_context=True)
res_dict = self.controller.detail(req)
volumes = res_dict['volumes']
self.assertEqual(len(volumes), 1)
self.assertEqual(volumes[0]['id'], 2)
req = fakes.HTTPRequest.blank('/v2/volumes/detail?limit=-1&offset=1')
self.assertRaises(exception.InvalidInput,
self.controller.detail,
req)
req = fakes.HTTPRequest.blank('/v2/volumes/detail?limit=a&offset=1')
self.assertRaises(exception.InvalidInput,
self.controller.detail,
req)
def test_volume_with_limit_zero(self):
def stub_volume_get_all(context, marker, limit, **kwargs):
return []
self.stubs.Set(db, 'volume_get_all', stub_volume_get_all)
req = fakes.HTTPRequest.blank('/v2/volumes?limit=0')
res_dict = self.controller.index(req)
expected = {'volumes': []}
self.assertEqual(res_dict, expected)
def test_volume_default_limit(self):
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
def _verify_links(links, url_key):
'''Verify next link and url.'''
self.assertEqual(links[0]['rel'], 'next')
href_parts = urlparse.urlparse(links[0]['href'])
self.assertEqual('/v2/fakeproject/%s' % key, href_parts.path)
# Verify both the index and detail queries
api_keys = ['volumes', 'volumes/detail']
fns = [self.controller.index, self.controller.detail]
# Number of volumes equals the max, next link not included
def stub_volume_get_all(context, marker, limit,
sort_keys=None, sort_dirs=None,
filters=None,
viewable_admin_meta=False):
vols = [stubs.stub_volume(i)
for i in xrange(CONF.osapi_max_limit)]
if limit is None or limit >= len(vols):
return vols
return vols[:limit]
self.stubs.Set(db, 'volume_get_all', stub_volume_get_all)
for key, fn in zip(api_keys, fns):
req = fakes.HTTPRequest.blank('/v2/%s?all_tenants=1' % key,
use_admin_context=True)
res_dict = fn(req)
self.assertEqual(len(res_dict['volumes']), CONF.osapi_max_limit)
self.assertFalse('volumes_links' in res_dict)
# Number of volumes less than max, do not include
def stub_volume_get_all2(context, marker, limit,
sort_keys=None, sort_dirs=None,
filters=None,
viewable_admin_meta=False):
vols = [stubs.stub_volume(i)
for i in xrange(100)]
if limit is None or limit >= len(vols):
return vols
return vols[:limit]
self.stubs.Set(db, 'volume_get_all', stub_volume_get_all2)
for key, fn in zip(api_keys, fns):
req = fakes.HTTPRequest.blank('/v2/%s?all_tenants=1' % key,
use_admin_context=True)
res_dict = fn(req)
self.assertEqual(len(res_dict['volumes']), 100)
self.assertFalse('volumes_links' in res_dict)
# Number of volumes more than the max, include next link
def stub_volume_get_all3(context, marker, limit,
sort_keys=None, sort_dirs=None,
filters=None,
viewable_admin_meta=False):
vols = [stubs.stub_volume(i)
for i in xrange(CONF.osapi_max_limit + 100)]
if limit is None or limit >= len(vols):
return vols
return vols[:limit]
self.stubs.Set(db, 'volume_get_all', stub_volume_get_all3)
for key, fn in zip(api_keys, fns):
req = fakes.HTTPRequest.blank('/v2/%s?all_tenants=1' % key,
use_admin_context=True)
res_dict = fn(req)
self.assertEqual(len(res_dict['volumes']), CONF.osapi_max_limit)
volumes_links = res_dict['volumes_links']
_verify_links(volumes_links, key)
# Pass a limit that is greater than the max and the total number of
# volumes, ensure only the maximum is returned and that the next
# link is present.
for key, fn in zip(api_keys, fns):
req = fakes.HTTPRequest.blank('/v2/%s?all_tenants=1&limit=%d'
% (key, CONF.osapi_max_limit * 2),
use_admin_context=True)
res_dict = fn(req)
self.assertEqual(len(res_dict['volumes']), CONF.osapi_max_limit)
volumes_links = res_dict['volumes_links']
_verify_links(volumes_links, key)
def test_volume_list_default_filters(self):
"""Tests that the default filters from volume.api.API.get_all are set.
1. 'no_migration_status'=True for non-admins and get_all_by_project is
invoked.
2. 'no_migration_status' is not included for admins.
3. When 'all_tenants' is not specified, then it is removed and
get_all_by_project is invoked for admins.
3. When 'all_tenants' is specified, then it is removed and get_all
is invoked for admins.
"""
# Non-admin, project function should be called with no_migration_status
def stub_volume_get_all_by_project(context, project_id, marker, limit,
sort_keys=None, sort_dirs=None,
filters=None,
viewable_admin_meta=False):
self.assertEqual(filters['no_migration_targets'], True)
self.assertFalse('all_tenants' in filters)
return [stubs.stub_volume(1, display_name='vol1')]
def stub_volume_get_all(context, marker, limit,
sort_keys=None, sort_dirs=None,
filters=None,
viewable_admin_meta=False):
return []
self.stubs.Set(db, 'volume_get_all_by_project',
stub_volume_get_all_by_project)
self.stubs.Set(db, 'volume_get_all', stub_volume_get_all)
# all_tenants does not matter for non-admin
for params in ['', '?all_tenants=1']:
req = fakes.HTTPRequest.blank('/v2/volumes%s' % params)
resp = self.controller.index(req)
self.assertEqual(len(resp['volumes']), 1)
self.assertEqual(resp['volumes'][0]['name'], 'vol1')
# Admin, all_tenants is not set, project function should be called
# without no_migration_status
def stub_volume_get_all_by_project2(context, project_id, marker, limit,
sort_keys=None, sort_dirs=None,
filters=None,
viewable_admin_meta=False):
self.assertFalse('no_migration_targets' in filters)
return [stubs.stub_volume(1, display_name='vol2')]
def stub_volume_get_all2(context, marker, limit,
sort_keys=None, sort_dirs=None,
filters=None,
viewable_admin_meta=False):
return []
self.stubs.Set(db, 'volume_get_all_by_project',
stub_volume_get_all_by_project2)
self.stubs.Set(db, 'volume_get_all', stub_volume_get_all2)
req = fakes.HTTPRequest.blank('/v2/volumes', use_admin_context=True)
resp = self.controller.index(req)
self.assertEqual(len(resp['volumes']), 1)
self.assertEqual(resp['volumes'][0]['name'], 'vol2')
# Admin, all_tenants is set, get_all function should be called
# without no_migration_status
def stub_volume_get_all_by_project3(context, project_id, marker, limit,
sort_keys=None, sort_dirs=None,
filters=None,
viewable_admin_meta=False):
return []
def stub_volume_get_all3(context, marker, limit,
sort_keys=None, sort_dirs=None,
filters=None,
viewable_admin_meta=False):
self.assertFalse('no_migration_targets' in filters)
self.assertFalse('all_tenants' in filters)
return [stubs.stub_volume(1, display_name='vol3')]
self.stubs.Set(db, 'volume_get_all_by_project',
stub_volume_get_all_by_project3)
self.stubs.Set(db, 'volume_get_all', stub_volume_get_all3)
req = fakes.HTTPRequest.blank('/v2/volumes?all_tenants=1',
use_admin_context=True)
resp = self.controller.index(req)
self.assertEqual(len(resp['volumes']), 1)
self.assertEqual(resp['volumes'][0]['name'], 'vol3')
def test_volume_show(self):
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
req = fakes.HTTPRequest.blank('/v2/volumes/1')
res_dict = self.controller.show(req, '1')
expected = {
'volume': {
'status': 'fakestatus',
'description': 'displaydesc',
'encrypted': False,
'availability_zone': 'fakeaz',
'bootable': 'false',
'consistencygroup_id': None,
'name': 'displayname',
'replication_status': 'disabled',
'multiattach': False,
'attachments': [],
'user_id': 'fakeuser',
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
'metadata': {'attached_mode': 'rw', 'readonly': 'False'},
'id': '1',
'created_at': datetime.datetime(1900, 1, 1, 1, 1, 1),
'size': 1,
'links': [
{
'href': 'http://localhost/v2/fakeproject/volumes/1',
'rel': 'self'
},
{
'href': 'http://localhost/fakeproject/volumes/1',
'rel': 'bookmark'
}
],
}
}
self.assertEqual(res_dict, expected)
# Finally test that we cached the returned volume
self.assertIsNotNone(req.cached_resource_by_id('1'))
def test_volume_show_no_attachments(self):
def stub_volume_get(self, context, volume_id, **kwargs):
return stubs.stub_volume(volume_id, attach_status='detached')
self.stubs.Set(volume_api.API, 'get', stub_volume_get)
req = fakes.HTTPRequest.blank('/v2/volumes/1')
res_dict = self.controller.show(req, '1')
expected = {
'volume': {
'status': 'fakestatus',
'description': 'displaydesc',
'encrypted': False,
'availability_zone': 'fakeaz',
'bootable': 'false',
'consistencygroup_id': None,
'name': 'displayname',
'replication_status': 'disabled',
'multiattach': False,
'attachments': [],
'user_id': 'fakeuser',
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
'metadata': {'readonly': 'False'},
'id': '1',
'created_at': datetime.datetime(1900, 1, 1, 1, 1, 1),
'size': 1,
'links': [
{
'href': 'http://localhost/v2/fakeproject/volumes/1',
'rel': 'self'
},
{
'href': 'http://localhost/fakeproject/volumes/1',
'rel': 'bookmark'
}
],
}
}
self.assertEqual(res_dict, expected)
def test_volume_show_no_volume(self):
self.stubs.Set(volume_api.API, "get", stubs.stub_volume_get_notfound)
req = fakes.HTTPRequest.blank('/v2/volumes/1')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
req, 1)
# Finally test that nothing was cached
self.assertIsNone(req.cached_resource_by_id('1'))
def test_volume_show_with_admin_metadata(self):
volume = stubs.stub_volume("1")
del volume['name']
del volume['volume_type']
del volume['volume_type_id']
volume['metadata'] = {'key': 'value'}
db.volume_create(context.get_admin_context(), volume)
db.volume_admin_metadata_update(context.get_admin_context(), "1",
{"readonly": "True",
"invisible_key": "invisible_value"},
False)
values = {'volume_id': '1', }
attachment = db.volume_attach(context.get_admin_context(), values)
db.volume_attached(context.get_admin_context(),
attachment['id'], stubs.FAKE_UUID, None, '/')
req = fakes.HTTPRequest.blank('/v2/volumes/1')
admin_ctx = context.RequestContext('admin', 'fakeproject', True)
req.environ['cinder.context'] = admin_ctx
res_dict = self.controller.show(req, '1')
expected = {
'volume': {
'status': 'in-use',
'description': 'displaydesc',
'encrypted': False,
'availability_zone': 'fakeaz',
'bootable': 'false',
'consistencygroup_id': None,
'name': 'displayname',
'replication_status': 'disabled',
'multiattach': False,
'attachments': [
{
'attachment_id': attachment['id'],
'device': '/',
'server_id': stubs.FAKE_UUID,
'host_name': None,
'id': '1',
'volume_id': '1'
}
],
'user_id': 'fakeuser',
'volume_type': None,
'snapshot_id': None,
'source_volid': None,
'metadata': {'key': 'value',
'readonly': 'True'},
'id': '1',
'created_at': datetime.datetime(1900, 1, 1, 1, 1, 1),
'size': 1,
'links': [
{
'href': 'http://localhost/v2/fakeproject/volumes/1',
'rel': 'self'
},
{
'href': 'http://localhost/fakeproject/volumes/1',
'rel': 'bookmark'
}
],
}
}
self.assertEqual(res_dict, expected)
def test_volume_show_with_encrypted_volume(self):
def stub_volume_get(self, context, volume_id, **kwargs):
return stubs.stub_volume(volume_id, encryption_key_id='fake_id')
self.stubs.Set(volume_api.API, 'get', stub_volume_get)
req = fakes.HTTPRequest.blank('/v2/volumes/1')
res_dict = self.controller.show(req, 1)
self.assertEqual(res_dict['volume']['encrypted'], True)
def test_volume_show_with_unencrypted_volume(self):
def stub_volume_get(self, context, volume_id, **kwargs):
return stubs.stub_volume(volume_id, encryption_key_id=None)
self.stubs.Set(volume_api.API, 'get', stub_volume_get)
req = fakes.HTTPRequest.blank('/v2/volumes/1')
res_dict = self.controller.show(req, 1)
self.assertEqual(res_dict['volume']['encrypted'], False)
def test_volume_delete(self):
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
req = fakes.HTTPRequest.blank('/v2/volumes/1')
resp = self.controller.delete(req, 1)
self.assertEqual(resp.status_int, 202)
def test_volume_delete_attached(self):
def stub_volume_attached(self, context, volume, force=False):
raise exception.VolumeAttached(volume_id=volume['id'])
self.stubs.Set(volume_api.API, "delete", stub_volume_attached)
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
req = fakes.HTTPRequest.blank('/v2/volumes/1')
exp = self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.delete,
req, 1)
expect_msg = "Volume cannot be deleted while in attached state"
self.assertEqual(expect_msg, six.text_type(exp))
def test_volume_delete_no_volume(self):
self.stubs.Set(volume_api.API, "get", stubs.stub_volume_get_notfound)
req = fakes.HTTPRequest.blank('/v2/volumes/1')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
req, 1)
def test_admin_list_volumes_limited_to_project(self):
self.stubs.Set(db, 'volume_get_all_by_project',
stubs.stub_volume_get_all_by_project)
req = fakes.HTTPRequest.blank('/v2/fake/volumes',
use_admin_context=True)
res = self.controller.index(req)
self.assertIn('volumes', res)
self.assertEqual(1, len(res['volumes']))
def test_admin_list_volumes_all_tenants(self):
self.stubs.Set(db, 'volume_get_all_by_project',
stubs.stub_volume_get_all_by_project)
req = fakes.HTTPRequest.blank('/v2/fake/volumes?all_tenants=1',
use_admin_context=True)
res = self.controller.index(req)
self.assertIn('volumes', res)
self.assertEqual(3, len(res['volumes']))
def test_all_tenants_non_admin_gets_all_tenants(self):
self.stubs.Set(db, 'volume_get_all_by_project',
stubs.stub_volume_get_all_by_project)
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
req = fakes.HTTPRequest.blank('/v2/fake/volumes?all_tenants=1')
res = self.controller.index(req)
self.assertIn('volumes', res)
self.assertEqual(1, len(res['volumes']))
def test_non_admin_get_by_project(self):
self.stubs.Set(db, 'volume_get_all_by_project',
stubs.stub_volume_get_all_by_project)
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
req = fakes.HTTPRequest.blank('/v2/fake/volumes')
res = self.controller.index(req)
self.assertIn('volumes', res)
self.assertEqual(1, len(res['volumes']))
def _create_volume_bad_request(self, body):
req = fakes.HTTPRequest.blank('/v2/fake/volumes')
req.method = 'POST'
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, body)
def test_create_no_body(self):
self._create_volume_bad_request(body=None)
def test_create_missing_volume(self):
body = {'foo': {'a': 'b'}}
self._create_volume_bad_request(body=body)
def test_create_malformed_entity(self):
body = {'volume': 'string'}
self._create_volume_bad_request(body=body)
@mock.patch('cinder.volume.api.API.get_all')
def test_get_volumes_filter_with_string(self, get_all):
req = mock.MagicMock()
context = mock.Mock()
req.environ = {'cinder.context': context}
req.params = {'display_name': 'Volume-573108026'}
self.controller._view_builder.detail_list = mock.Mock()
self.controller._get_volumes(req, True)
get_all.assert_called_once_with(
context, None, None,
sort_keys=['created_at'], sort_dirs=['desc'],
filters={'display_name': 'Volume-573108026'},
viewable_admin_meta=True)
@mock.patch('cinder.volume.api.API.get_all')
def test_get_volumes_filter_with_list(self, get_all):
req = mock.MagicMock()
context = mock.Mock()
req.environ = {'cinder.context': context}
req.params = {'id': "['1', '2', '3']"}
self.controller._view_builder.detail_list = mock.Mock()
self.controller._get_volumes(req, True)
get_all.assert_called_once_with(
context, None, None,
sort_keys=['created_at'], sort_dirs=['desc'],
filters={'id': ['1', '2', '3']}, viewable_admin_meta=True)
@mock.patch('cinder.volume.api.API.get_all')
def test_get_volumes_filter_with_expression(self, get_all):
req = mock.MagicMock()
context = mock.Mock()
req.environ = {'cinder.context': context}
req.params = {'name': "d-"}
self.controller._view_builder.detail_list = mock.Mock()
self.controller._get_volumes(req, True)
get_all.assert_called_once_with(
context, None, None,
sort_keys=['created_at'], sort_dirs=['desc'],
filters={'display_name': 'd-'}, viewable_admin_meta=True)
class VolumeSerializerTest(test.TestCase):
def _verify_volume_attachment(self, attach, tree):
for attr in ('id', 'volume_id', 'server_id', 'device'):
self.assertEqual(str(attach[attr]), tree.get(attr))
def _verify_volume(self, vol, tree):
self.assertEqual(tree.tag, NS + 'volume')
for attr in ('id', 'status', 'size', 'availability_zone', 'created_at',
'name', 'description', 'volume_type', 'bootable',
'snapshot_id', 'source_volid'):
self.assertEqual(str(vol[attr]), tree.get(attr))
for child in tree:
self.assertIn(child.tag, (NS + 'attachments', NS + 'metadata'))
if child.tag == 'attachments':
self.assertEqual(1, len(child))
self.assertEqual('attachment', child[0].tag)
self._verify_volume_attachment(vol['attachments'][0], child[0])
elif child.tag == 'metadata':
not_seen = set(vol['metadata'].keys())
for gr_child in child:
self.assertIn(gr_child.get("key"), not_seen)
self.assertEqual(str(vol['metadata'][gr_child.get("key")]),
gr_child.text)
not_seen.remove(gr_child.get('key'))
self.assertEqual(0, len(not_seen))
def test_volume_show_create_serializer(self):
serializer = volumes.VolumeTemplate()
raw_volume = dict(
id='vol_id',
status='vol_status',
size=1024,
availability_zone='vol_availability',
bootable=False,
created_at=timeutils.utcnow(),
attachments=[
dict(
id='vol_id',
volume_id='vol_id',
server_id='instance_uuid',
device='/foo'
)
],
name='vol_name',
description='vol_desc',
volume_type='vol_type',
snapshot_id='snap_id',
source_volid='source_volid',
metadata=dict(
foo='bar',
baz='quux',
),
)
text = serializer.serialize(dict(volume=raw_volume))
tree = etree.fromstring(text)
self._verify_volume(raw_volume, tree)
def test_volume_index_detail_serializer(self):
serializer = volumes.VolumesTemplate()
raw_volumes = [
dict(
id='vol1_id',
status='vol1_status',
size=1024,
availability_zone='vol1_availability',
bootable=True,
created_at=timeutils.utcnow(),
attachments=[
dict(
id='vol1_id',
volume_id='vol1_id',
server_id='instance_uuid',
device='/foo1'
)
],
name='vol1_name',
description='vol1_desc',
volume_type='vol1_type',
snapshot_id='snap1_id',
source_volid=None,
metadata=dict(foo='vol1_foo',
bar='vol1_bar', ), ),
dict(
id='vol2_id',
status='vol2_status',
size=1024,
availability_zone='vol2_availability',
bootable=False,
created_at=timeutils.utcnow(),
attachments=[dict(id='vol2_id',
volume_id='vol2_id',
server_id='instance_uuid',
device='/foo2')],
name='vol2_name',
description='vol2_desc',
volume_type='vol2_type',
snapshot_id='snap2_id',
source_volid=None,
metadata=dict(foo='vol2_foo',
bar='vol2_bar', ), )]
text = serializer.serialize(dict(volumes=raw_volumes))
tree = etree.fromstring(text)
self.assertEqual(NS + 'volumes', tree.tag)
self.assertEqual(len(raw_volumes), len(tree))
for idx, child in enumerate(tree):
self._verify_volume(raw_volumes[idx], child)
class TestVolumeCreateRequestXMLDeserializer(test.TestCase):
def setUp(self):
super(TestVolumeCreateRequestXMLDeserializer, self).setUp()
self.deserializer = volumes.CreateDeserializer()
def test_minimal_volume(self):
self_request = """
<volume xmlns="http://docs.openstack.org/api/openstack-volume/2.0/content"
size="1"></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"size": "1",
},
}
self.assertEqual(request['body'], expected)
def test_name(self):
self_request = """
<volume xmlns="http://docs.openstack.org/api/openstack-volume/2.0/content"
size="1"
name="Volume-xml"></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"size": "1",
"name": "Volume-xml",
},
}
self.assertEqual(request['body'], expected)
def test_description(self):
self_request = """
<volume xmlns="http://docs.openstack.org/api/openstack-volume/2.0/content"
size="1"
name="Volume-xml"
description="description"></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"size": "1",
"name": "Volume-xml",
"description": "description",
},
}
self.assertEqual(request['body'], expected)
def test_volume_type(self):
self_request = """
<volume xmlns="http://docs.openstack.org/api/openstack-volume/2.0/content"
size="1"
name="Volume-xml"
description="description"
volume_type="289da7f8-6440-407c-9fb4-7db01ec49164"></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"name": "Volume-xml",
"size": "1",
"name": "Volume-xml",
"description": "description",
"volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164",
},
}
self.assertEqual(request['body'], expected)
def test_availability_zone(self):
self_request = """
<volume xmlns="http://docs.openstack.org/api/openstack-volume/2.0/content"
size="1"
name="Volume-xml"
description="description"
volume_type="289da7f8-6440-407c-9fb4-7db01ec49164"
availability_zone="us-east1"></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"size": "1",
"name": "Volume-xml",
"description": "description",
"volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164",
"availability_zone": "us-east1",
},
}
self.assertEqual(request['body'], expected)
def test_metadata(self):
self_request = """
<volume xmlns="http://docs.openstack.org/api/openstack-volume/2.0/content"
name="Volume-xml"
size="1">
<metadata><meta key="Type">work</meta></metadata></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"name": "Volume-xml",
"size": "1",
"metadata": {
"Type": "work",
},
},
}
self.assertEqual(request['body'], expected)
def test_full_volume(self):
self_request = """
<volume xmlns="http://docs.openstack.org/api/openstack-volume/2.0/content"
size="1"
name="Volume-xml"
description="description"
volume_type="289da7f8-6440-407c-9fb4-7db01ec49164"
availability_zone="us-east1">
<metadata><meta key="Type">work</meta></metadata></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"size": "1",
"name": "Volume-xml",
"description": "description",
"volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164",
"availability_zone": "us-east1",
"metadata": {
"Type": "work",
},
},
}
self.assertEqual(request['body'], expected)
def test_imageref(self):
self_request = """
<volume xmlns="http://docs.openstack.org/api/openstack-volume/2.0/content"
size="1"
name="Volume-xml"
description="description"
imageRef="4a90189d-d702-4c7c-87fc-6608c554d737"></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"size": "1",
"name": "Volume-xml",
"description": "description",
"imageRef": "4a90189d-d702-4c7c-87fc-6608c554d737",
},
}
self.assertEqual(expected, request['body'])
def test_snapshot_id(self):
self_request = """
<volume xmlns="http://docs.openstack.org/api/openstack-volume/2.0/content"
size="1"
name="Volume-xml"
description="description"
snapshot_id="4a90189d-d702-4c7c-87fc-6608c554d737"></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"size": "1",
"name": "Volume-xml",
"description": "description",
"snapshot_id": "4a90189d-d702-4c7c-87fc-6608c554d737",
},
}
self.assertEqual(expected, request['body'])
def test_source_volid(self):
self_request = """
<volume xmlns="http://docs.openstack.org/api/openstack-volume/2.0/content"
size="1"
name="Volume-xml"
description="description"
source_volid="4a90189d-d702-4c7c-87fc-6608c554d737"></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"size": "1",
"name": "Volume-xml",
"description": "description",
"source_volid": "4a90189d-d702-4c7c-87fc-6608c554d737",
},
}
self.assertEqual(expected, request['body'])
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import argparse
import logging
import multiprocessing
import os
import sys
import tempfile
from copy import deepcopy
from functools import cmp_to_key
from functools import partial
import sh
from six.moves import configparser
from dlrn.build import build_worker
from dlrn.config import ConfigOptions
from dlrn.config import getConfigOptions
from dlrn.config import setup_logging
from dlrn.db import CIVote
from dlrn.db import closeSession
from dlrn.db import Commit
from dlrn.db import getCommits
from dlrn.db import getLastBuiltCommit
from dlrn.db import getLastProcessedCommit
from dlrn.db import getSession
from dlrn.db import Project
from dlrn.notifications import sendnotifymail
from dlrn.notifications import submit_review
from dlrn.reporting import genreports
from dlrn.repositories import getsourcebranch
from dlrn.rpmspecfile import RpmSpecCollection
from dlrn.rpmspecfile import RpmSpecFile
from dlrn.rsync import sync_repo
from dlrn.rsync import sync_symlinks
from dlrn.utils import aggregate_repo_files
from dlrn.utils import dumpshas2file
from dlrn.utils import import_object
from dlrn.utils import isknownerror
from dlrn.utils import lock_file
from dlrn.utils import saveYAML_commit
from dlrn.utils import timesretried
from dlrn import version
logger = logging.getLogger("dlrn")
def deprecation():
# We will still call main, but will indicate that this way of calling
# the application will be deprecated.
print("Using the 'delorean' command has been deprecated. Please use 'dlrn'"
" instead.")
main()
def _add_commits(project_toprocess, toprocess, options, session):
# The first entry in the list of commits is a commit we have
# already processed, we want to process it again only if in dev
# mode or distro hash has changed, we can't simply check
# against the last commit in the db, as multiple commits can
# have the same commit date
for commit_toprocess in project_toprocess:
# We are adding an extra check here to cover a rare corner case:
# if we have two commits A and B with the exact same dt_commit, in a
# first pass we will build either just the last one (if we switched
# tags), or both. If we built the last one (A), we do not want to
# build the other (B), because B would be a previous commit.
# The only way to prevent this is to check that we have not built a
# commit with the same dt_commit and same distro and extended hashes.
# This could only be an issue if, for some reason, we want to discard
# commit A and build commit B in the future, but we can work around
# this by adding a change to the distgit.
if options.dev is True or \
options.run or \
(not session.query(Commit).filter(
Commit.commit_hash == commit_toprocess.commit_hash,
Commit.distro_hash == commit_toprocess.distro_hash,
Commit.extended_hash == commit_toprocess.extended_hash,
Commit.type == commit_toprocess.type,
Commit.status != "RETRY").all()
and not session.query(Commit).filter(
Commit.dt_commit == commit_toprocess.dt_commit,
Commit.distro_hash == commit_toprocess.distro_hash,
Commit.extended_hash == commit_toprocess.extended_hash,
Commit.type == commit_toprocess.type,
Commit.status != "RETRY").all()):
toprocess.append(commit_toprocess)
def main():
# As a first step, make sure we use the right multiprocessing start method
# The default fork method used in Linux can lead to issues when mixing
# multiprocessing and multithreading
if sys.version_info >= (3, 6):
if not multiprocessing.get_start_method(allow_none=True):
multiprocessing.set_start_method('spawn')
parser = argparse.ArgumentParser()
parser.add_argument('--config-file',
default='projects.ini',
help="Config file. Default: projects.ini")
parser.add_argument('--config-override', action='append',
help="Override a configuration option from the"
" config file. Specify it as: "
"section.option=value. Can be used multiple "
"times if more than one override is needed.")
parser.add_argument('--info-repo',
help="use a local distroinfo repo instead of"
" fetching the default one. Only applies when"
" pkginfo_driver is rdoinfo or downstream in"
" projects.ini")
parser.add_argument('--build-env', action='append',
help="Variables for the build environment.")
parser.add_argument('--local', action="store_true",
help="Use local git repos if possible. Only commited"
" changes in the local repo will be used in the"
" build.")
parser.add_argument('--head-only', action="store_true",
help="Build from the most recent Git commit only.")
group = parser.add_mutually_exclusive_group()
group.add_argument('--project-name', action='append',
help="Build a specific project name only."
" Use multiple times to build more than one "
"project in a run.")
group.add_argument('--package-name', action='append',
help="Build a specific package name only."
" Use multiple times to build more than one "
"package in a run.")
parser.add_argument('--dev', action="store_true",
help="Don't reset packaging git repo, force build "
"and add public master repo for dependencies "
"(dev mode).")
parser.add_argument('--log-commands', action="store_true",
help="Log the commands run by dlrn.")
parser.add_argument('--use-public', action="store_true",
help="Use the public master repo for dependencies "
"when doing install verification.")
parser.add_argument('--order', action="store_true",
help="Compute the build order according to the spec "
"files instead of the dates of the commits. "
"Implies --sequential.")
parser.add_argument('--sequential', action="store_true",
help="Run all actions sequentially, regardless of the"
" number of workers specified in projects.ini.")
parser.add_argument('--status', action="store_true",
help="Get the status of packages.")
parser.add_argument('--recheck', action="store_true",
help="Force a rebuild for a particular package. "
"Implies --package-name")
parser.add_argument('--force-recheck', action="store_true",
help="Force a rebuild for a particular package, even "
"if its last build was successful. Requires setting "
"allow_force_rechecks=True in projects.ini. "
"Implies --package-name and --recheck")
parser.add_argument('--version',
action='version',
version=version.version_info.version_string())
parser.add_argument('--run',
help="Run a program instead of trying to build. "
"Implies --head-only")
parser.add_argument('--stop', action="store_true",
help="Stop on error.")
parser.add_argument('--verbose-build', action="store_true",
help="Show verbose output during the package build.")
parser.add_argument('--verbose-mock', action="store_true",
help=argparse.SUPPRESS)
parser.add_argument('--no-repo', action="store_true",
help="Do not generate a repo with all the built "
"packages.")
parser.add_argument('--debug', action='store_true',
help="Print debug logs")
options = parser.parse_args(sys.argv[1:])
setup_logging(options.debug)
if options.verbose_mock:
logger.warning('The --verbose-mock command-line option is deprecated.'
' Please use --verbose-build instead.')
options.verbose_build = options.verbose_mock
cp = configparser.RawConfigParser()
cp.read(options.config_file)
if options.log_commands is True:
logging.getLogger("sh.command").setLevel(logging.INFO)
if options.order is True:
options.sequential = True
config_options = ConfigOptions(cp, overrides=options.config_override)
if options.dev:
_, tmpdb_path = tempfile.mkstemp()
logger.info("Using file %s for temporary db" % tmpdb_path)
config_options.database_connection = "sqlite:///%s" % tmpdb_path
config_options.verbose_build = options.verbose_build
session = getSession(config_options.database_connection)
pkginfo_driver = config_options.pkginfo_driver
global pkginfo
pkginfo = import_object(pkginfo_driver, cfg_options=config_options)
packages = pkginfo.getpackages(local_info_repo=options.info_repo,
tags=config_options.tags,
dev_mode=options.dev)
if options.project_name:
pkg_names = [p['name'] for p in packages
if p['project'] in options.project_name]
elif options.package_name:
pkg_names = options.package_name
else:
pkg_names = None
if options.status is True:
if not pkg_names:
pkg_names = [p['name'] for p in packages]
for name in pkg_names:
package = [p for p in packages if p['name'] == name][0]
for build_type in package.get('types', ['rpm']):
commit = getLastProcessedCommit(
session, name, 'invalid status',
type=build_type)
if commit:
print("{:>9}".format(build_type), name, commit.status)
else:
print("{:>9}".format(build_type), name, 'NO_BUILD')
sys.exit(0)
if pkg_names:
pkg_name = pkg_names[0]
else:
pkg_name = None
def recheck_commit(commit, force):
if commit.status == 'SUCCESS':
if not force:
logger.error(
"Trying to recheck an already successful commit,"
" ignoring. If you want to force it, use --force-recheck"
" and set allow_force_rechecks=True in projects.ini")
sys.exit(1)
else:
logger.info("Forcefully rechecking a successfully built "
"commit for %s" % commit.project_name)
elif commit.status == 'RETRY':
# In this case, we are going to retry anyway, so
# do nothing and exit
logger.warning("Trying to recheck a commit in RETRY state,"
" ignoring.")
sys.exit(0)
# We could set the status to RETRY here, but if we have gone
# beyond max_retries it wouldn't work as expected. Thus, our
# only chance is to remove the commit
session.delete(commit)
session.commit()
sys.exit(0)
if options.recheck is True:
if not pkg_name:
logger.error('Please use --package-name or --project-name '
'with --recheck.')
sys.exit(1)
if options.force_recheck and config_options.allow_force_rechecks:
force_recheck = True
else:
force_recheck = False
package = [p for p in packages if p['name'] == pkg_name][0]
for build_type in package.get('types', ['rpm']):
commit = getLastProcessedCommit(session, pkg_name, type=build_type)
if commit:
recheck_commit(commit, force_recheck)
else:
logger.error("There are no existing commits for package %s",
pkg_name)
sys.exit(1)
# when we run a program instead of building we don't care about
# the commits, we just want to run once per package
if options.run:
options.head_only = True
# Build a list of commits we need to process
toprocess = []
skipped_list = []
if not pkg_name and not pkg_names:
pool = multiprocessing.Pool() # This will use all the system cpus
# Use functools.partial to iterate on the packages to process,
# while keeping a few options fixed
getinfo_wrapper = partial(getinfo, local=options.local,
dev_mode=options.dev,
head_only=options.head_only,
db_connection=config_options.
database_connection,
branch=config_options.source,
pkginfo=pkginfo)
iterator = pool.imap(getinfo_wrapper, packages)
while True:
try:
project_toprocess, updated_pkg, skipped = iterator.next()
for package in packages:
if package['name'] == updated_pkg['name']:
if package['upstream'] == 'Unknown':
package['upstream'] = updated_pkg['upstream']
logger.debug(
"Updated upstream for package %s to %s",
package['name'], package['upstream'])
break
if skipped:
skipped_list.append(updated_pkg['name'])
_add_commits(project_toprocess, toprocess, options, session)
except StopIteration:
break
pool.close()
pool.join()
else:
for package in packages:
if package['name'] in pkg_names:
project_toprocess, _, skipped = getinfo(
package, local=options.local,
dev_mode=options.dev,
head_only=options.head_only,
db_connection=config_options.database_connection,
branch=config_options.source,
pkginfo=pkginfo)
if skipped:
skipped_list.append(package['name'])
_add_commits(project_toprocess, toprocess, options, session)
closeSession(session) # Close session, will reopen during post_build
# Store skip list
datadir = os.path.realpath(config_options.datadir)
if not os.path.exists(os.path.join(datadir, 'repos')):
os.makedirs(os.path.join(datadir, 'repos'))
with open(os.path.join(datadir, 'repos', 'skiplist.txt'), 'w') as fp:
for pkg in skipped_list:
fp.write(pkg + '\n')
# Check if there is any commit at all to process
if len(toprocess) == 0:
if not pkg_name:
# Use a shorter message if this was a full run
logger.info("No commits to build.")
else:
logger.info("No commits to build. If this is not expected, please"
" make sure the package name(s) are correct, and that "
"any failed commit you want to rebuild has been "
"removed from the database.")
return 0
# if requested do a sort according to build and install
# dependencies
if options.order is True:
# collect info from all spec files
logger.info("Reading rpm spec files")
projects = sorted([c.project_name for c in toprocess])
speclist = []
bootstraplist = []
for project_name in projects:
# Preprocess spec if needed
pkginfo.preprocess(package_name=project_name)
filename = None
for f in os.listdir(pkginfo.distgit_dir(project_name)):
if f.endswith('.spec'):
filename = f
if filename:
specpath = os.path.join(pkginfo.distgit_dir(project_name),
filename)
speclist.append(sh.rpmspec('-D', 'repo_bootstrap 1',
'-P', specpath))
# Check if repo_bootstrap is defined in the package.
# If so, we'll need to rebuild after the whole bootstrap
rawspec = open(specpath).read(-1)
if 'repo_bootstrap' in rawspec:
bootstraplist.append(project_name)
else:
logger.warning("Could not find a spec for package %s" %
project_name)
logger.debug("Packages to rebuild: %s" % bootstraplist)
specs = RpmSpecCollection([RpmSpecFile(spec)
for spec in speclist])
# compute order according to BuildRequires
logger.info("Computing build order")
orders = specs.compute_order()
# hack because the package name is not consistent with the directory
# name and the spec file name
if 'python-networking_arista' in orders:
orders.insert(orders.index('python-networking_arista'),
'python-networking-arista')
# sort the commits according to the score of their project and
# then use the timestamp of the commits as a secondary key
def my_cmp(a, b):
if a.project_name == b.project_name:
_a = a.dt_commit
_b = b.dt_commit
else:
_a = orders.index(a.project_name) if a.project_name in \
orders else sys.maxsize
_b = orders.index(b.project_name) if b.project_name in \
orders else sys.maxsize
# cmp is no longer available in python3 so replace it. See Ordering
# Comparisons on:
# https://docs.python.org/3.0/whatsnew/3.0.html
return (_a > _b) - (_a < _b)
toprocess.sort(key=cmp_to_key(my_cmp))
else:
# sort according to the timestamp of the commits
toprocess.sort()
exit_code = 0
if options.sequential is True:
toprocess_copy = deepcopy(toprocess)
for commit in toprocess:
status = build_worker(packages, commit, run_cmd=options.run,
build_env=options.build_env,
dev_mode=options.dev,
use_public=options.use_public,
order=options.order, sequential=True,
config_options=config_options,
pkginfo=pkginfo)
exception = status[3]
consistent = False
datadir = os.path.realpath(config_options.datadir)
with lock_file(os.path.join(datadir, 'remote.lck')):
session = getSession(config_options.database_connection)
if exception is not None:
logger.error("Received exception %s" % exception)
failures = 1
else:
if not options.run:
failures = post_build(status, packages, session,
build_repo=not options.no_repo)
consistent = (failures == 0)
exit_value = process_build_result(status, packages, session,
toprocess_copy,
dev_mode=options.dev,
run_cmd=options.run,
stop=options.stop,
build_env=options.build_env,
head_only=options.head_only,
consistent=consistent,
failures=failures)
closeSession(session)
if exit_value != 0:
exit_code = exit_value
if options.stop and exit_code != 0:
return exit_code
else:
# Setup multiprocessing pool
pool = multiprocessing.Pool(config_options.workers)
# Use functools.partial to iterate on the commits to process,
# while keeping a few options fixed
build_worker_wrapper = partial(build_worker, packages,
run_cmd=options.run,
build_env=options.build_env,
dev_mode=options.dev,
use_public=options.use_public,
order=options.order, sequential=False,
config_options=config_options,
pkginfo=pkginfo)
iterator = pool.imap(build_worker_wrapper, toprocess)
while True:
try:
status = iterator.next()
exception = status[3]
consistent = False
datadir = os.path.realpath(config_options.datadir)
with lock_file(os.path.join(datadir, 'remote.lck')):
session = getSession(config_options.database_connection)
if exception is not None:
logger.info("Received exception %s" % exception)
failures = 1
else:
# Create repo, build versions.csv file.
# This needs to be sequential
if not options.run:
failures = post_build(
status, packages, session,
build_repo=not options.no_repo)
consistent = (failures == 0)
exit_value = process_build_result(
status, packages,
session, toprocess,
dev_mode=options.dev,
run_cmd=options.run,
stop=options.stop,
build_env=options.build_env,
head_only=options.head_only,
consistent=consistent,
failures=failures)
closeSession(session)
if exit_value != 0:
exit_code = exit_value
if options.stop and exit_code != 0:
return exit_code
except StopIteration:
break
pool.close()
pool.join()
# If we were bootstrapping, set the packages that required it to RETRY
session = getSession(config_options.database_connection)
if options.order is True and not pkg_name:
for bpackage in bootstraplist:
commit = getLastProcessedCommit(session, bpackage)
commit.status = 'RETRY'
session.add(commit)
session.commit()
genreports(packages, options.head_only, session, [])
closeSession(session)
if options.dev:
os.remove(tmpdb_path)
return exit_code
def process_build_result(status, *args, **kwargs):
if status[0].type == "rpm":
return process_build_result_rpm(status, *args, **kwargs)
elif status[0].type == "container":
return process_build_result_container(status, *args, **kwargs)
else:
raise Exception("Unknown type %s" % status[0].type)
def process_build_result_container(
status, packages, session, packages_to_process,
dev_mode=False, run_cmd=False, stop=False,
build_env=None, head_only=False, consistent=False,
failures=0):
raise NotImplementedError()
def process_build_result_rpm(
status, packages, session, packages_to_process,
dev_mode=False, run_cmd=False, stop=False,
build_env=None, head_only=False, consistent=False,
failures=0):
config_options = getConfigOptions()
commit = status[0]
built_rpms = status[1]
notes = status[2]
exception = status[3]
commit_hash = commit.commit_hash
project = commit.project_name
project_info = session.query(Project).filter(
Project.project_name == project).first()
if not project_info:
project_info = Project(project_name=project, last_email=0)
exit_code = 0
if run_cmd:
if exception is not None:
exit_code = 1
if stop:
return exit_code
return exit_code
if exception is None:
commit.status = "SUCCESS"
commit.notes = notes
commit.artifacts = ",".join(built_rpms)
else:
logger.error("Received exception %s" % exception)
datadir = os.path.realpath(config_options.datadir)
yumrepodir = os.path.join(datadir, "repos",
commit.getshardedcommitdir())
logfile = os.path.join(yumrepodir,
"rpmbuild.log")
# If the log file hasn't been created we add what we have
# This happens if the rpm build script didn't run.
if not os.path.exists(yumrepodir):
os.makedirs(yumrepodir)
if not os.path.exists(logfile):
with open(logfile, "w") as fp:
fp.write(str(exception))
if (isknownerror(logfile) and
(timesretried(project, session, commit_hash, commit.distro_hash) <
config_options.maxretries)):
logger.exception("Known error building packages for %s,"
" will retry later" % project)
commit.status = "RETRY"
commit.notes = str(exception)
# do not switch from an error exit code to a retry
# exit code
if exit_code != 1:
exit_code = 2
else:
exit_code = 1
if not project_info.suppress_email():
sendnotifymail(packages, commit)
project_info.sent_email()
session.add(project_info)
# allow to submit a gerrit review only if the last build
# was successful or non existent to avoid creating a gerrit
# review for the same problem multiple times.
if config_options.gerrit is not None:
if build_env:
env_vars = list(build_env)
else:
env_vars = []
last_build = getLastProcessedCommit(session, project)
if not last_build or last_build.status == 'SUCCESS':
try:
submit_review(commit, packages, env_vars)
except Exception:
logger.error('Unable to create review '
'see review.log')
else:
logger.info('Last build not successful '
'for %s' % project)
commit.status = "FAILED"
commit.notes = str(exception)
if stop:
return exit_code
# Add commit to the session
session.add(commit)
genreports(packages, head_only, session, packages_to_process)
# Export YAML file containing commit metadata
export_commit_yaml(commit)
try:
sync_repo(commit)
except Exception as e:
logger.error('Repo sync failed for project %s' % project)
consistent = False # If we were consistent before, we are not anymore
if exit_code == 0: # The commit was ok, so marking as failed
exit_code = 1
# We need to make the commit status be "failed"
commit.status = "FAILED"
commit.notes = str(e)
session.add(commit)
# And open a review if needed
if config_options.gerrit is not None:
if build_env:
env_vars = list(build_env)
else:
env_vars = []
try:
submit_review(commit, packages, env_vars)
except Exception:
logger.error('Unable to create review '
'see review.log')
session.commit()
# Generate the current and consistent symlinks
if exception is None:
dirnames = ['current']
datadir = os.path.realpath(config_options.datadir)
yumrepodir = os.path.join(datadir, "repos",
commit.getshardedcommitdir())
yumrepodir_abs = os.path.join(datadir, yumrepodir)
if consistent:
dirnames.append('consistent')
else:
if config_options.use_components:
logger.info('%d packages not built correctly for component'
' %s: not updating the consistent symlink' %
(failures, commit.component))
else:
logger.info('%d packages not built correctly: not updating'
' the consistent symlink' % failures)
for dirname in dirnames:
if config_options.use_components:
target_repo_dir = os.path.join(datadir, "repos/component",
commit.component, dirname)
source_repo_dir = os.path.join(datadir, "repos/component",
commit.component)
else:
target_repo_dir = os.path.join(datadir, "repos", dirname)
source_repo_dir = os.path.join(datadir, "repos")
os.symlink(os.path.relpath(yumrepodir_abs, source_repo_dir),
target_repo_dir + "_")
os.rename(target_repo_dir + "_", target_repo_dir)
# If using components, synchronize the upper-level repo files
if config_options.use_components:
for dirname in dirnames:
aggregate_repo_files(dirname, datadir, session,
config_options.reponame, hashed_dir=True)
# And synchronize them
sync_symlinks(commit)
if dev_mode is False:
if consistent:
# We have a consistent repo. Let's create a CIVote entry in the DB
vote = CIVote(commit_id=commit.id, ci_name='consistent',
ci_url='', ci_vote=True, ci_in_progress=False,
timestamp=int(commit.dt_build), notes='',
component=commit.component)
session.add(vote)
session.commit()
return exit_code
def export_commit_yaml(commit):
config_options = getConfigOptions()
# Export YAML file containing commit metadata
datadir = os.path.realpath(config_options.datadir)
yumrepodir = os.path.join(datadir, "repos",
commit.getshardedcommitdir())
saveYAML_commit(commit, os.path.join(yumrepodir, 'commit.yaml'))
def post_build(status, *args, **kwargs):
if status[0].type == "rpm":
return post_build_rpm(status, *args, **kwargs)
elif status[0].type == "container":
return post_build_container(status, *args, **kwargs)
else:
raise Exception("Unknown type %s" % status[0].type)
def post_build_container(status, packages, session, build_repo=None):
raise NotImplementedError()
def post_build_rpm(status, packages, session, build_repo=True):
config_options = getConfigOptions()
commit = status[0]
built_rpms = status[1]
project_name = commit.project_name
commit_hash = commit.commit_hash
datadir = os.path.realpath(config_options.datadir)
yumrepodir = os.path.join("repos", commit.getshardedcommitdir())
yumrepodir_abs = os.path.join(datadir, yumrepodir)
shafile = open(os.path.join(yumrepodir_abs, "versions.csv"), "w")
shafile.write("Project,Source Repo,Source Sha,Dist Repo,Dist Sha,"
"Status,Last Success Timestamp,Component,Extended Sha,"
"Pkg NVR\n")
failures = 0
for otherproject in packages:
if (config_options.use_components and 'component' in otherproject and
otherproject['component'] != commit.component):
# Only dump information and create symlinks for the same component
continue
otherprojectname = otherproject["name"]
if otherprojectname == project_name:
# Output sha's this project
dumpshas2file(shafile, commit, otherproject["upstream"],
otherproject["master-distgit"], "SUCCESS",
commit.dt_build, commit.component, built_rpms)
continue
# Output sha's of all other projects represented in this repo
last_success = getCommits(session, project=otherprojectname,
with_status="SUCCESS",
type=commit.type).first()
last_processed = getCommits(session, project=otherprojectname,
type=commit.type).first()
if last_success:
if build_repo:
for rpm in last_success.artifacts.split(","):
rpm_link_src = os.path.join(yumrepodir_abs,
os.path.split(rpm)[1])
os.symlink(os.path.relpath(os.path.join(datadir, rpm),
yumrepodir_abs), rpm_link_src)
last = last_success
else:
last = last_processed
if last:
if last.artifacts:
rpmlist = last.artifacts.split(",")
else:
rpmlist = []
upstream = otherproject.get('upstream', '')
dumpshas2file(shafile, last, upstream,
otherproject["master-distgit"],
last_processed.status, last.dt_build,
commit.component, rpmlist)
if last_processed.status != 'SUCCESS':
failures += 1
else:
failures += 1
shafile.close()
if build_repo:
# Use createrepo_c when available
try:
from sh import createrepo_c
sh.createrepo = createrepo_c
except ImportError:
pass
if config_options.include_srpm_in_repo:
sh.createrepo(yumrepodir_abs)
else:
sh.createrepo('-x', '*.src.rpm', yumrepodir_abs)
with open(os.path.join(
yumrepodir_abs, "%s.repo" % config_options.reponame),
"w") as fp:
if config_options.use_components:
repo_id = "%s-component-%s" % (config_options.reponame,
commit.component)
else:
repo_id = config_options.reponame
fp.write("[%s]\nname=%s-%s-%s\nbaseurl=%s/%s\nenabled=1\n"
"gpgcheck=0\npriority=1\n" % (
repo_id,
config_options.reponame,
project_name, commit_hash,
config_options.baseurl,
commit.getshardedcommitdir()))
return failures
def getinfo(package, local=False, dev_mode=False, head_only=False,
db_connection=None, branch=None, pkginfo=None, type="rpm"):
project = package["name"]
since = "-1"
session = getSession(db_connection)
commit = getLastProcessedCommit(session, project, type=type)
if commit:
# If we have switched source branches, we want to behave
# as if no previous commits had been built, and only build
# the last one
if commit.commit_branch == getsourcebranch(package,
default_branch=branch):
# This will return all commits since the last handled commit
# including the last handled commit, remove it later if needed.
since = "--after=%d" % (commit.dt_commit)
else:
# The last processed commit belongs to a different branch. Just
# in case, let's check if we built a previous commit from the
# current branch
commit = getLastBuiltCommit(session, project,
getsourcebranch(package,
default_branch=branch),
type=type)
if commit:
logger.info("Last commit belongs to another branch, but"
" we're ok with that")
since = "--after=%d" % (commit.dt_commit)
# In any case, we just want to build the last commit, if any
head_only = True
project_toprocess, skipped = pkginfo.getinfo(
project=project, package=package,
since=since, local=local,
dev_mode=dev_mode, type=type)
closeSession(session)
# If since == -1, then we only want to trigger a build for the
# most recent change
if since == "-1" or head_only:
del project_toprocess[:-1]
return project_toprocess, package, skipped
| |
"""
Contains all the Django fields for Select2.
"""
import logging
logger = logging.getLogger(__name__)
class AutoViewFieldMixin(object):
"""
Registers itself with AutoResponseView.
All Auto fields must sub-class this mixin, so that they are registered.
.. warning:: Do not forget to include ``'django_select2.urls'`` in your url conf, else,
central view used to serve Auto fields won't be available.
"""
def __init__(self, *args, **kwargs):
"""
Class constructor.
:param auto_id: The key to use while registering this field. If it is not provided then
an auto generated key is used.
.. tip::
This mixin uses full class name of the field to register itself. This is
used like key in a :py:obj:`dict` by :py:func:`.util.register_field`.
If that key already exists then the instance is not registered again. So, eventually
all instances of an Auto field share one instance to respond to the Ajax queries for
its fields.
If for some reason any instance needs to be isolated then ``auto_id`` can be used to
provide a unique key which has never occured before.
:type auto_id: :py:obj:`unicode`
"""
name = kwargs.pop('auto_id', u"%s.%s" % (self.__module__, self.__class__.__name__))
if logger.isEnabledFor(logging.INFO):
logger.info("Registering auto field: %s", name)
from . import util
id_ = util.register_field(name, self)
self.field_id = id_
super(AutoViewFieldMixin, self).__init__(*args, **kwargs)
def security_check(self, request, *args, **kwargs):
"""
Returns ``False`` if security check fails.
:param request: The Ajax request object.
:type request: :py:class:`django.http.HttpRequest`
:param args: The ``*args`` passed to :py:meth:`django.views.generic.base.View.dispatch`.
:param kwargs: The ``**kwargs`` passed to :py:meth:`django.views.generic.base.View.dispatch`.
:return: A boolean value, signalling if check passed or failed.
:rtype: :py:obj:`bool`
.. warning:: Sub-classes should override this. You really do not want random people making
Http reqeusts to your server, be able to get access to sensitive information.
"""
return True
def get_results(self, request, term, page, context):
"See :py:meth:`.views.Select2View.get_results`."
raise NotImplementedError
import copy
from django import forms
from django.core import validators
from django.core.exceptions import ValidationError
from django.forms.models import ModelChoiceIterator
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import smart_unicode
from django.core.validators import EMPTY_VALUES
from .widgets import Select2Widget, Select2MultipleWidget,\
HeavySelect2Widget, HeavySelect2MultipleWidget, AutoHeavySelect2Widget, \
AutoHeavySelect2MultipleWidget
from .views import NO_ERR_RESP
from .util import extract_some_key_val
### Light general fields ###
class Select2ChoiceField(forms.ChoiceField):
"""
Drop-in Select2 replacement for :py:class:`forms.ChoiceField`.
"""
widget = Select2Widget
class Select2MultipleChoiceField(forms.MultipleChoiceField):
"""
Drop-in Select2 replacement for :py:class:`forms.MultipleChoiceField`.
"""
widget = Select2MultipleWidget
### Model fields related mixins ###
class ModelResultJsonMixin(object):
"""
Makes ``heavy_data.js`` parsable JSON response for queries on its model.
On query it uses :py:meth:`.prepare_qs_params` to prepare query attributes
which it then passes to ``self.queryset.filter()`` to get the results.
It is expected that sub-classes will defined a class field variable
``search_fields``, which should be a list of field names to search for.
"""
def __init__(self, *args, **kwargs):
"""
Class constructor.
:param queryset: This can be passed as kwarg here or defined as field variabel,
like ``search_fields``.
:type queryset: :py:class:`django.db.models.query.QuerySet` or None
:param max_results: Maximum number to results to return per Ajax query.
:type max_results: :py:obj:`int`
:param to_field_name: Which field's value should be returned as result tuple's
value. (Default is ``pk``, i.e. the id field of the model)
:type to_field_name: :py:obj:`str`
"""
if self.queryset is None and not kwargs.has_key('queryset'):
raise ValueError('queryset is required.')
if not self.search_fields:
raise ValueError('search_fields is required.')
self.max_results = getattr(self, 'max_results', None)
self.to_field_name = getattr(self, 'to_field_name', 'pk')
super(ModelResultJsonMixin, self).__init__(*args, **kwargs)
def label_from_instance(self, obj):
"""
Sub-classes should override this to generate custom label texts for values.
:param obj: The model object.
:type obj: :py:class:`django.model.Model`
:return: The label string.
:rtype: :py:obj:`unicode`
"""
return smart_unicode(obj)
def prepare_qs_params(self, request, search_term, search_fields):
"""
Prepares queryset parameter to use for searching.
:param search_term: The search term.
:type search_term: :py:obj:`str`
:param search_fields: The list of search fields. This is same as ``self.search_fields``.
:type search_term: :py:obj:`list`
:return: A dictionary of parameters to 'or' and 'and' together. The output format should
be ::
{
'or': [
Q(attr11=term11) | Q(attr12=term12) | ...,
Q(attrN1=termN1) | Q(attrN2=termN2) | ...,
...],
'and': {
'attrX1': termX1,
'attrX2': termX2,
...
}
}
The above would then be coaxed into ``filter()`` as below::
queryset.filter(
Q(attr11=term11) | Q(attr12=term12) | ...,
Q(attrN1=termN1) | Q(attrN2=termN2) | ...,
...,
attrX1=termX1,
attrX2=termX2,
...
)
In this implementation, ``term11, term12, termN1, ...`` etc., all are actually ``search_term``.
Also then ``and`` part is always empty.
So, let's take an example.
| Assume, ``search_term == 'John'``
| ``self.search_fields == ['first_name__icontains', 'last_name__icontains']``
So, the prepared query would be::
{
'or': [
Q(first_name__icontains=search_term) | Q(last_name__icontains=search_term)
],
'and': {}
}
:rtype: :py:obj:`dict`
"""
q = None
for field in search_fields:
kwargs = {}
kwargs[field] = search_term
if q is None:
q = Q(**kwargs)
else:
q = q | Q(**kwargs)
return {'or': [q], 'and': {},}
def get_results(self, request, term, page, context):
"""
See :py:meth:`.views.Select2View.get_results`.
This implementation takes care of detecting if more results are available.
"""
qs = copy.deepcopy(self.queryset)
params = self.prepare_qs_params(request, term, self.search_fields)
if self.max_results:
min_ = (page - 1) * self.max_results
max_ = min_ + self.max_results + 1 # fetching one extra row to check if it has more rows.
res = list(qs.filter(*params['or'], **params['and'])[min_:max_])
has_more = len(res) == (max_ - min_)
if has_more:
res = res[:-1]
else:
res = list(qs.filter(*params['or'], **params['and']))
has_more = False
res = [ (getattr(obj, self.to_field_name), self.label_from_instance(obj), ) for obj in res ]
return (NO_ERR_RESP, has_more, res, )
class UnhideableQuerysetType(type):
"""
This does some pretty nasty hacky stuff, to make sure users can
also define ``queryset`` as class-level field variable, instead of
passing it to constructor.
"""
# TODO check for alternatives. Maybe this hack is not necessary.
def __new__(cls, name, bases, dct):
_q = dct.get('queryset', None)
if _q is not None and not isinstance(_q, property):
# This hack is needed since users are allowed to
# provide queryset in sub-classes by declaring
# class variable named - queryset, which will
# effectively hide the queryset declared in this
# mixin.
dct.pop('queryset') # Throwing away the sub-class queryset
dct['_subclass_queryset'] = _q
return type.__new__(cls, name, bases, dct)
def __call__(cls, *args, **kwargs):
queryset = kwargs.get('queryset', None)
if not queryset and hasattr(cls, '_subclass_queryset'):
kwargs['queryset'] = getattr(cls, '_subclass_queryset')
return type.__call__(cls, *args, **kwargs)
class ChoiceMixin(object):
"""
Simple mixin which provides a property -- ``choices``. When ``choices`` is set,
then it sets that value to ``self.widget.choices`` too.
"""
def _get_choices(self):
if hasattr(self, '_choices'):
return self._choices
return []
def _set_choices(self, value):
# Setting choices also sets the choices on the widget.
# choices can be any iterable, but we call list() on it because
# it will be consumed more than once.
self._choices = self.widget.choices = list(value)
choices = property(_get_choices, _set_choices)
def __deepcopy__(self, memo):
result = super(ChoiceMixin, self).__deepcopy__(memo)
result._choices = copy.deepcopy(self._choices, memo)
return result
class QuerysetChoiceMixin(ChoiceMixin):
"""
Overrides ``choices``' getter to return instance of :py:class:`.ModelChoiceIterator`
instead.
"""
def _get_choices(self):
# If self._choices is set, then somebody must have manually set
# the property self.choices. In this case, just return self._choices.
if hasattr(self, '_choices'):
return self._choices
# Otherwise, execute the QuerySet in self.queryset to determine the
# choices dynamically. Return a fresh ModelChoiceIterator that has not been
# consumed. Note that we're instantiating a new ModelChoiceIterator *each*
# time _get_choices() is called (and, thus, each time self.choices is
# accessed) so that we can ensure the QuerySet has not been consumed. This
# construct might look complicated but it allows for lazy evaluation of
# the queryset.
return ModelChoiceIterator(self)
choices = property(_get_choices, ChoiceMixin._set_choices)
class ModelChoiceFieldMixin(object):
def __init__(self, *args, **kwargs):
queryset = kwargs.pop('queryset', None)
kargs = extract_some_key_val(kwargs, [
'empty_label', 'cache_choices', 'required', 'label', 'initial', 'help_text',
])
kargs['widget'] = kwargs.pop('widget', getattr(self, 'widget', None))
kargs['to_field_name'] = kwargs.pop('to_field_name', 'pk')
if hasattr(self, '_choices'): # If it exists then probably it is set by HeavySelect2FieldBase.
# We are not gonna use that anyway.
del self._choices
super(ModelChoiceFieldMixin, self).__init__(queryset, **kargs)
if hasattr(self, 'set_placeholder'):
self.widget.set_placeholder(self.empty_label)
def _get_queryset(self):
if hasattr(self, '_queryset'):
return self._queryset
### Slightly altered versions of the Django counterparts with the same name in forms module. ###
class ModelChoiceField(ModelChoiceFieldMixin, forms.ModelChoiceField):
queryset = property(ModelChoiceFieldMixin._get_queryset, forms.ModelChoiceField._set_queryset)
class ModelMultipleChoiceField(ModelChoiceFieldMixin, forms.ModelMultipleChoiceField):
queryset = property(ModelChoiceFieldMixin._get_queryset, forms.ModelMultipleChoiceField._set_queryset)
### Light Fileds specialized for Models ###
class ModelSelect2Field(ModelChoiceField) :
"""
Light Select2 field, specialized for Models.
Select2 replacement for :py:class:`forms.ModelChoiceField`.
"""
widget = Select2Widget
class ModelSelect2MultipleField(ModelMultipleChoiceField) :
"""
Light multiple-value Select2 field, specialized for Models.
Select2 replacement for :py:class:`forms.ModelMultipleChoiceField`.
"""
widget = Select2MultipleWidget
### Heavy fields ###
class HeavySelect2FieldBaseMixin(object):
"""
Base mixin field for all Heavy fields.
.. note:: Although Heavy fields accept ``choices`` parameter like all Django choice fields, but these
fields are backed by big data sources, so ``choices`` cannot possibly have all the values.
For Heavies, consider ``choices`` to be a subset of all possible choices. It is available because users
might expect it to be available.
"""
def __init__(self, *args, **kwargs):
"""
Class constructor.
:param data_view: A :py:class:`~.views.Select2View` sub-class which can respond to this widget's Ajax queries.
:type data_view: :py:class:`django.views.generic.base.View` or None
:param widget: A widget instance.
:type widget: :py:class:`django.forms.widgets.Widget` or None
.. warning:: Either of ``data_view`` or ``widget`` must be specified, else :py:exc:`ValueError` would
be raised.
"""
data_view = kwargs.pop('data_view', None)
choices = kwargs.pop('choices', [])
kargs = {}
if data_view is not None:
kargs['widget'] = self.widget(data_view=data_view)
elif kwargs.get('widget', None) is None:
raise ValueError('data_view is required else you need to provide your own widget instance.')
kargs.update(kwargs)
super(HeavySelect2FieldBaseMixin, self).__init__(*args, **kargs)
# By this time self.widget would have been instantiated.
# This piece of code is needed here since (God knows why) Django's Field class does not call
# super(); because of that __init__() of classes would get called after Field.__init__().
# If did had super() call there then we could have simply moved AutoViewFieldMixin at the
# end of the MRO list. This way it would have got widget instance instead of class and it
# could have directly set field_id on it.
if hasattr(self, 'field_id'):
self.widget.field_id = self.field_id
if not choices and hasattr(self, 'choices'): # ModelChoiceField will set this to ModelChoiceIterator
choices = self.choices
self.choices = choices
class HeavyChoiceField(ChoiceMixin, forms.Field):
"""
Reimplements :py:class:`django.forms.TypedChoiceField` in a way which suites the use of big data.
.. note:: Although this field accepts ``choices`` parameter like all Django choice fields, but these
fields are backed by big data sources, so ``choices`` cannot possibly have all the values. It is meant
to be a subset of all possible choices.
"""
default_error_messages = {
'invalid_choice': _(u'Select a valid choice. %(value)s is not one of the available choices.'),
}
empty_value = u''
"Sub-classes can set this other value if needed."
def __init__(self, *args, **kwargs):
super(HeavyChoiceField, self).__init__(*args, **kwargs)
# Widget should have been instantiated by now.
self.widget.field = self
def to_python(self, value):
if value == self.empty_value or value in validators.EMPTY_VALUES:
return self.empty_value
try:
value = self.coerce_value(value)
except (ValueError, TypeError, ValidationError):
raise ValidationError(self.error_messages['invalid_choice'] % {'value': value})
return value
def validate(self, value):
super(HeavyChoiceField, self).validate(value)
if value and not self.valid_value(value):
raise ValidationError(self.error_messages['invalid_choice'] % {'value': value})
def valid_value(self, value):
uvalue = smart_unicode(value)
for k, v in self.choices:
if uvalue == smart_unicode(k):
return True
return self.validate_value(value)
def coerce_value(self, value):
"""
Coerces ``value`` to a Python data type.
Sub-classes should override this if they do not want unicode values.
"""
return smart_unicode(value)
def validate_value(self, value):
"""
Sub-classes can override this to validate the value entered against the big data.
:param value: Value entered by the user.
:type value: As coerced by :py:meth:`.coerce_value`.
:return: ``True`` means the ``value`` is valid.
"""
return True
def _get_val_txt(self, value):
try:
value = self.coerce_value(value)
self.validate_value(value)
except Exception, e:
logger.exception("Exception while trying to get label for value")
return None
return self.get_val_txt(value)
def get_val_txt(self, value):
"""
If Heavy widgets encounter any value which it can't find in ``choices`` then it calls
this method to get the label for the value.
:param value: Value entered by the user.
:type value: As coerced by :py:meth:`.coerce_value`.
:return: The label for this value.
:rtype: :py:obj:`unicode` or None (when no possible label could be found)
"""
return None
class HeavyMultipleChoiceField(HeavyChoiceField):
"""
Reimplements :py:class:`django.forms.TypedMultipleChoiceField` in a way which suites the use of big data.
.. note:: Although this field accepts ``choices`` parameter like all Django choice fields, but these
fields are backed by big data sources, so ``choices`` cannot possibly have all the values. It is meant
to be a subset of all possible choices.
"""
hidden_widget = forms.MultipleHiddenInput
default_error_messages = {
'invalid_choice': _(u'Select a valid choice. %(value)s is not one of the available choices.'),
'invalid_list': _(u'Enter a list of values.'),
}
def to_python(self, value):
if not value:
return []
elif not isinstance(value, (list, tuple)):
raise ValidationError(self.error_messages['invalid_list'])
return [self.coerce_value(val) for val in value]
def validate(self, value):
if self.required and not value:
raise ValidationError(self.error_messages['required'])
# Validate that each value in the value list is in self.choices or
# the big data (i.e. validate_value() returns True).
for val in value:
if not self.valid_value(val):
raise ValidationError(self.error_messages['invalid_choice'] % {'value': val})
class HeavySelect2ChoiceField(HeavySelect2FieldBaseMixin, HeavyChoiceField):
"Heavy Select2 Choice field."
widget = HeavySelect2Widget
class HeavySelect2MultipleChoiceField(HeavySelect2FieldBaseMixin, HeavyMultipleChoiceField):
"Heavy Select2 Multiple Choice field."
widget = HeavySelect2MultipleWidget
### Heavy field specialized for Models ###
class HeavyModelSelect2ChoiceField(HeavySelect2FieldBaseMixin, ModelChoiceField):
"Heavy Select2 Choice field, specialized for Models."
widget = HeavySelect2Widget
def __init__(self, *args, **kwargs):
kwargs.pop('choices', None)
super(HeavyModelSelect2ChoiceField, self).__init__(*args, **kwargs)
class HeavyModelSelect2MultipleChoiceField(HeavySelect2FieldBaseMixin, ModelMultipleChoiceField):
"Heavy Select2 Multiple Choice field, specialized for Models."
widget = HeavySelect2MultipleWidget
def __init__(self, *args, **kwargs):
kwargs.pop('choices', None)
super(HeavyModelSelect2MultipleChoiceField, self).__init__(*args, **kwargs)
### Heavy general field that uses central AutoView ###
class AutoSelect2Field(AutoViewFieldMixin, HeavySelect2ChoiceField):
"""
Auto Heavy Select2 field.
This needs to be subclassed. The first instance of a class (sub-class) is used to serve all incoming
json query requests for that type (class).
.. warning:: :py:exc:`NotImplementedError` would be thrown if :py:meth:`get_results` is not implemented.
"""
widget = AutoHeavySelect2Widget
def __init__(self, *args, **kwargs):
self.data_view = "django_select2_central_json"
kwargs['data_view'] = self.data_view
super(AutoSelect2Field, self).__init__(*args, **kwargs)
class AutoSelect2MultipleField(AutoViewFieldMixin, HeavySelect2MultipleChoiceField):
"""
Auto Heavy Select2 field for multiple choices.
This needs to be subclassed. The first instance of a class (sub-class) is used to serve all incoming
json query requests for that type (class).
.. warning:: :py:exc:`NotImplementedError` would be thrown if :py:meth:`get_results` is not implemented.
"""
widget = AutoHeavySelect2MultipleWidget
def __init__(self, *args, **kwargs):
self.data_view = "django_select2_central_json"
kwargs['data_view'] = self.data_view
super(AutoSelect2MultipleField, self).__init__(*args, **kwargs)
### Heavy field, specialized for Model, that uses central AutoView ###
class AutoModelSelect2Field(ModelResultJsonMixin, AutoViewFieldMixin, HeavyModelSelect2ChoiceField):
"""
Auto Heavy Select2 field, specialized for Models.
This needs to be subclassed. The first instance of a class (sub-class) is used to serve all incoming
json query requests for that type (class).
"""
__metaclass__ = UnhideableQuerysetType # Makes sure that user defined queryset class variable is replaced by
# queryset property (as it is needed by super classes).
widget = AutoHeavySelect2Widget
def __init__(self, *args, **kwargs):
self.data_view = "django_select2_central_json"
kwargs['data_view'] = self.data_view
super(AutoModelSelect2Field, self).__init__(*args, **kwargs)
class AutoModelSelect2MultipleField(ModelResultJsonMixin, AutoViewFieldMixin, HeavyModelSelect2MultipleChoiceField):
"""
Auto Heavy Select2 field for multiple choices, specialized for Models.
This needs to be subclassed. The first instance of a class (sub-class) is used to serve all incoming
json query requests for that type (class).
"""
__metaclass__ = UnhideableQuerysetType # Makes sure that user defined queryset class variable is replaced by
# queryset property (as it is needed by super classes).
widget = AutoHeavySelect2MultipleWidget
def __init__(self, *args, **kwargs):
self.data_view = "django_select2_central_json"
kwargs['data_view'] = self.data_view
super(AutoModelSelect2MultipleField, self).__init__(*args, **kwargs)
| |
#!/usr/bin/env python3
## -*- coding: utf-8 -*-
##
## Jonathan Salwan - 2018-10-26
##
## Description: Solution of the unbreakable challenge from the Google 2016 CTF.
## In this solution, we fully emulate the binary and we solve each branch
## to go through the good path.
##
## Output:
##
## $ time python3 ./solve.py
## [+] Loading 0x400040 - 0x400200
## [+] Loading 0x400200 - 0x40021c
## [+] Loading 0x400000 - 0x403df4
## [+] Loading 0x604000 - 0x604258
## [+] Loading 0x604018 - 0x6041e8
## [+] Loading 0x40021c - 0x400260
## [+] Loading 0x403590 - 0x40378c
## [+] Loading 0x000000 - 0x000000
## [+] Hooking strncpy
## [+] Hooking puts
## [+] Hooking printf
## [+] Hooking __libc_start_main
## [+] Hooking exit
## [+] Starting emulation.
## [+] __libc_start_main hooked
## [+] argv[0] = ./unbreakable-enterprise-product-activation
## [+] argv[1] = aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
## [+] strncpy hooked
## [+] puts hooked
## Thank you - product activated!
## [+] exit hooked
## Flag: CTF{0The1Quick2Brown3Fox4Jumped5Over6The7Lazy8Fox9}
## python3 solve.py 8.04s user 0.02s system 99% cpu 8.060 total
##
from __future__ import print_function
from triton import *
import random
import string
import sys
import lief
import os
TARGET = os.path.join(os.path.dirname(__file__), 'unbreakable-enterprise-product-activation')
DEBUG = True
# The debug function
def debug(s):
if DEBUG: print(s)
# Memory mapping
BASE_PLT = 0x10000000
BASE_ARGV = 0x20000000
BASE_STACK = 0x9fffffff
# These instruction conditions must set zf to 1.
conditions = [
0x402819,
0x402859,
0x4028A3,
0x4028F3,
0x402927,
0x402969,
0x4029A9,
0x4029E0,
0x402A1F,
0x402A56,
0x402A99,
0x402AD9,
0x402B07,
0x402B37,
0x402B79,
0x402BA7,
0x402BD7,
0x402C22,
0x402C69,
0x402CA9,
0x402CD7,
0x402D22,
0x402D73,
0x402DB0,
0x402DF9,
0x402E43,
0x402E89,
0x402EC9,
0x402EF7,
0x402F30,
0x402F79,
0x402FB9,
0x402FF9,
0x403039,
0x403079,
0x4030C5,
0x403109,
0x403149,
0x403189,
0x4031B7,
0x4031F9,
0x403239,
0x403270,
0x4032B0,
0x403302,
0x403337,
0x403379,
0x4033B9,
0x4033F0,
0x403427,
0x403472,
]
def getMemoryString(ctx, addr):
s = str()
index = 0
while ctx.getConcreteMemoryValue(addr+index):
c = chr(ctx.getConcreteMemoryValue(addr+index))
if c not in string.printable: c = ""
s += c
index += 1
return s
def getFormatString(ctx, addr):
return getMemoryString(ctx, addr) \
.replace("%s", "{}").replace("%d", "{:d}").replace("%#02x", "{:#02x}") \
.replace("%#x", "{:#x}").replace("%x", "{:x}").replace("%02X", "{:02x}") \
.replace("%c", "{:c}").replace("%02x", "{:02x}").replace("%ld", "{:d}") \
.replace("%*s", "").replace("%lX", "{:x}").replace("%08x", "{:08x}") \
.replace("%u", "{:d}").replace("%lu", "{:d}") \
# Simulate the printf() function
def printfHandler(ctx):
debug('[+] printf hooked')
# Get arguments
arg1 = getFormatString(ctx, ctx.getConcreteRegisterValue(ctx.registers.rdi))
arg2 = ctx.getConcreteRegisterValue(ctx.registers.rsi)
arg3 = ctx.getConcreteRegisterValue(ctx.registers.rdx)
arg4 = ctx.getConcreteRegisterValue(ctx.registers.rcx)
arg5 = ctx.getConcreteRegisterValue(ctx.registers.r8)
arg6 = ctx.getConcreteRegisterValue(ctx.registers.r9)
nbArgs = arg1.count("{")
args = [arg2, arg3, arg4, arg5, arg6][:nbArgs]
s = arg1.format(*args)
if DEBUG:
sys.stdout.write(s)
# Return value
return len(s)
# Simulate the putchar() function
def putcharHandler(ctx):
debug('[+] putchar hooked')
# Get arguments
arg1 = ctx.getConcreteRegisterValue(ctx.registers.rdi)
sys.stdout.write(chr(arg1) + '\n')
# Return value
return 2
# Simulate the puts() function
def putsHandler(ctx):
debug('[+] puts hooked')
# Get arguments
arg1 = getMemoryString(ctx, ctx.getConcreteRegisterValue(ctx.registers.rdi))
sys.stdout.write(arg1 + '\n')
# Return value
return len(arg1) + 1
# Simulate the strncpy() function
def strncpyHandler(ctx):
debug('[+] strncpy hooked')
dst = ctx.getConcreteRegisterValue(ctx.registers.rdi)
src = ctx.getConcreteRegisterValue(ctx.registers.rsi)
cnt = ctx.getConcreteRegisterValue(ctx.registers.rdx)
for index in range(cnt):
dmem = MemoryAccess(dst + index, 1)
smem = MemoryAccess(src + index, 1)
cell = ctx.getMemoryAst(smem)
expr = ctx.newSymbolicExpression(cell, "strncpy byte")
ctx.setConcreteMemoryValue(dmem, cell.evaluate())
ctx.assignSymbolicExpressionToMemory(expr, dmem)
return dst
def exitHandler(ctx):
debug('[+] exit hooked')
ret = ctx.getConcreteRegisterValue(ctx.registers.rdi)
ast = ctx.getAstContext()
pco = ctx.getPathPredicate()
# Ask for a new model which set all symbolic variables to ascii printable characters
mod = ctx.getModel(ast.land(
[pco] +
[ast.variable(ctx.getSymbolicVariable(0)) == ord('C')] +
[ast.variable(ctx.getSymbolicVariable(1)) == ord('T')] +
[ast.variable(ctx.getSymbolicVariable(2)) == ord('F')] +
[ast.variable(ctx.getSymbolicVariable(3)) == ord('{')] +
[ast.variable(ctx.getSymbolicVariable(50)) == ord('}')] +
[ast.variable(ctx.getSymbolicVariable(x)) >= 0x30 for x in range(4, 49)] +
[ast.variable(ctx.getSymbolicVariable(x)) <= 0x7a for x in range(4, 49)] +
[ast.variable(ctx.getSymbolicVariable(x)) != 0x00 for x in range(4, 49)]
))
flag = str()
for k, v in sorted(mod.items()):
flag += chr(v.getValue())
print('Flag: %s' %(flag))
sys.exit(not (flag == 'CTF{0The1Quick2Brown3Fox4Jumped5Over6The7Lazy8Fox9}'))
def libcMainHandler(ctx):
debug('[+] __libc_start_main hooked')
# Get arguments
main = ctx.getConcreteRegisterValue(ctx.registers.rdi)
# Push the return value to jump into the main() function
ctx.setConcreteRegisterValue(ctx.registers.rsp, ctx.getConcreteRegisterValue(ctx.registers.rsp)-CPUSIZE.QWORD)
ret2main = MemoryAccess(ctx.getConcreteRegisterValue(ctx.registers.rsp), CPUSIZE.QWORD)
ctx.setConcreteMemoryValue(ret2main, main)
# Setup argc / argv
ctx.concretizeRegister(ctx.registers.rdi)
ctx.concretizeRegister(ctx.registers.rsi)
argvs = [
bytes(TARGET.encode('utf-8')), # argv[0]
bytes(b'a' * 70), # argv[1]
]
# Define argc / argv
base = BASE_ARGV
addrs = list()
index = 0
for argv in argvs:
addrs.append(base)
ctx.setConcreteMemoryAreaValue(base, argv+b'\x00')
base += len(argv)+1
debug('[+] argv[%d] = %s' %(index, argv))
index += 1
argc = len(argvs)
argv = base
for addr in addrs:
ctx.setConcreteMemoryValue(MemoryAccess(base, CPUSIZE.QWORD), addr)
base += CPUSIZE.QWORD
ctx.setConcreteRegisterValue(ctx.registers.rdi, argc)
ctx.setConcreteRegisterValue(ctx.registers.rsi, argv)
# Symbolize the first 51 bytes of the argv[1]
argv1 = ctx.getConcreteMemoryValue(MemoryAccess(ctx.getConcreteRegisterValue(ctx.registers.rsi) + 8, CPUSIZE.QWORD))
for index in range(51):
var = ctx.symbolizeMemory(MemoryAccess(argv1+index, CPUSIZE.BYTE))
return 0
# Functions to emulate
customRelocation = [
('__libc_start_main', libcMainHandler, BASE_PLT + 0),
('exit', exitHandler, BASE_PLT + 1),
('printf', printfHandler, BASE_PLT + 2),
('putchar', putcharHandler, BASE_PLT + 3),
('puts', putsHandler, BASE_PLT + 4),
('strncpy', strncpyHandler, BASE_PLT + 5),
]
def hookingHandler(ctx):
pc = ctx.getConcreteRegisterValue(ctx.registers.rip)
for rel in customRelocation:
if rel[2] == pc:
# Emulate the routine and the return value
ret_value = rel[1](ctx)
if ret_value is not None:
ctx.setConcreteRegisterValue(ctx.registers.rax, ret_value)
# Get the return address
ret_addr = ctx.getConcreteMemoryValue(MemoryAccess(ctx.getConcreteRegisterValue(ctx.registers.rsp), CPUSIZE.QWORD))
# Hijack RIP to skip the call
ctx.setConcreteRegisterValue(ctx.registers.rip, ret_addr)
# Restore RSP (simulate the ret)
ctx.setConcreteRegisterValue(ctx.registers.rsp, ctx.getConcreteRegisterValue(ctx.registers.rsp)+CPUSIZE.QWORD)
return
# Emulate the binary.
def emulate(ctx, pc):
global conditions
count = 0
while pc:
# Fetch opcodes
opcodes = ctx.getConcreteMemoryAreaValue(pc, 16)
# Create the Triton instruction
instruction = Instruction()
instruction.setOpcode(opcodes)
instruction.setAddress(pc)
# Process
if ctx.processing(instruction) == False:
debug('[-] Instruction not supported: %s' %(str(instruction)))
break
count += 1
#print(instruction)
if instruction.getType() == OPCODE.X86.HLT:
break
# Simulate routines
hookingHandler(ctx)
if instruction.getAddress() in conditions:
zf = ctx.getSymbolicRegister(ctx.registers.zf).getAst()
ast = ctx.getAstContext()
ctx.pushPathConstraint(zf == 1)
mod = ctx.getModel(ctx.getPathPredicate())
for k,v in list(mod.items()):
ctx.setConcreteVariableValue(ctx.getSymbolicVariable(v.getId()), v.getValue())
# Next
pc = ctx.getConcreteRegisterValue(ctx.registers.rip)
debug('[+] Instruction executed: %d' %(count))
return
def loadBinary(ctx, binary):
# Map the binary into the memory
phdrs = binary.segments
for phdr in phdrs:
size = phdr.physical_size
vaddr = phdr.virtual_address
debug('[+] Loading 0x%06x - 0x%06x' %(vaddr, vaddr+size))
ctx.setConcreteMemoryAreaValue(vaddr, phdr.content)
return
def makeRelocation(ctx, binary):
# Perform our own relocations
try:
for rel in binary.pltgot_relocations:
symbolName = rel.symbol.name
symbolRelo = rel.address
for crel in customRelocation:
if symbolName == crel[0]:
debug('[+] Hooking %s' %(symbolName))
ctx.setConcreteMemoryValue(MemoryAccess(symbolRelo, CPUSIZE.QWORD), crel[2])
except:
pass
# Perform our own relocations
try:
for rel in binary.dynamic_relocations:
symbolName = rel.symbol.name
symbolRelo = rel.address
for crel in customRelocation:
if symbolName == crel[0]:
debug('[+] Hooking %s' %(symbolName))
ctx.setConcreteMemoryValue(MemoryAccess(symbolRelo, CPUSIZE.QWORD), crel[2])
except:
pass
return
def run(ctx, binary):
# Define a fake stack
ctx.setConcreteRegisterValue(ctx.registers.rbp, BASE_STACK)
ctx.setConcreteRegisterValue(ctx.registers.rsp, BASE_STACK)
# Let's emulate the binary from the entry point
debug('[+] Starting emulation.')
emulate(ctx, binary.entrypoint)
debug('[+] Emulation done.')
return
def main():
# Get a Triton context
ctx = TritonContext()
# Set the architecture
ctx.setArchitecture(ARCH.X86_64)
# Set optimization
ctx.setMode(MODE.ALIGNED_MEMORY, True)
ctx.setMode(MODE.ONLY_ON_SYMBOLIZED, True)
# AST representation as Python syntax
ctx.setAstRepresentationMode(AST_REPRESENTATION.SMT)
# Parse the binary
binary = lief.parse(TARGET)
# Load the binary
loadBinary(ctx, binary)
# Perform our own relocations
makeRelocation(ctx, binary)
# Init and emulate
run(ctx, binary)
return -1
if __name__ == '__main__':
retValue = main()
sys.exit(retValue)
| |
import bintrees
import cooldict
import claripy
import cffi
import cle
from ..errors import SimMemoryError, SimSegfaultError
from .. import sim_options as options
from .memory_object import SimMemoryObject
from claripy.ast.bv import BV
_ffi = cffi.FFI()
import logging
l = logging.getLogger("angr.storage.paged_memory")
class BasePage(object):
"""
Page object, allowing for more flexibility than just a raw dict.
"""
PROT_READ = 1
PROT_WRITE = 2
PROT_EXEC = 4
def __init__(self, page_addr, page_size, permissions=None, executable=False):
"""
Create a new page object. Carries permissions information.
Permissions default to RW unless `executable` is True,
in which case permissions default to RWX.
:param int page_addr: The base address of the page.
:param int page_size: The size of the page.
:param bool executable: Whether the page is executable. Typically,
this will depend on whether the binary has an
executable stack.
:param claripy.AST permissions: A 3-bit bitvector setting specific permissions
for EXEC, READ, and WRITE
"""
self._page_addr = page_addr
self._page_size = page_size
if permissions is None:
perms = Page.PROT_READ|Page.PROT_WRITE
if executable:
perms |= Page.PROT_EXEC
self.permissions = claripy.BVV(perms, 3) # 3 bits is enough for PROT_EXEC, PROT_WRITE, PROT_READ, PROT_NONE
else:
self.permissions = permissions
@property
def concrete_permissions(self):
if self.permissions.symbolic:
return 7
else:
return self.permissions.args[0]
def contains(self, state, idx):
m = self.load_mo(state, idx)
return m is not None and m.includes(idx)
def _resolve_range(self, mo):
start = max(mo.base, self._page_addr)
end = min(mo.last_addr + 1, self._page_addr + self._page_size)
if end <= start:
l.warning("Nothing left of the memory object to store in SimPage.")
return start, end
def store_mo(self, state, new_mo, overwrite=True): #pylint:disable=unused-argument
"""
Stores a memory object.
:param new_mo: the memory object
:param overwrite: whether to overwrite objects already in memory (if false, just fill in the holes)
"""
start, end = self._resolve_range(new_mo)
if overwrite:
self.store_overwrite(state, new_mo, start, end)
else:
self.store_underwrite(state, new_mo, start, end)
def copy(self):
return Page(
self._page_addr, self._page_size,
permissions=self.permissions,
**self._copy_args()
)
#
# Abstract functions
#
def load_mo(self, state, page_idx):
"""
Loads a memory object from memory.
:param page_idx: the index into the page
:returns: a tuple of the object
"""
raise NotImplementedError()
def keys(self):
raise NotImplementedError()
def replace_mo(self, state, old_mo, new_mo):
raise NotImplementedError()
def store_overwrite(self, state, new_mo, start, end):
raise NotImplementedError()
def store_underwrite(self, state, new_mo, start, end):
raise NotImplementedError()
def load_slice(self, state, start, end): #pylint:disable=unused-argument
"""
Return the memory objects overlapping with the provided slice.
:param start: the start address
:param end: the end address (non-inclusive)
:returns: tuples of (starting_addr, memory_object)
"""
raise NotImplementedError()
def _copy_args(self):
raise NotImplementedError()
class TreePage(BasePage):
"""
Page object, implemented with a bintree.
"""
def __init__(self, *args, **kwargs):
storage = kwargs.pop("storage", None)
super(TreePage, self).__init__(*args, **kwargs)
self._storage = bintrees.AVLTree() if storage is None else storage
def keys(self):
if len(self._storage) == 0:
return set()
else:
return set.union(*(set(range(*self._resolve_range(mo))) for mo in self._storage.values()))
def replace_mo(self, state, old_mo, new_mo):
start, end = self._resolve_range(old_mo)
possible_items = list(self._storage.item_slice(start, end))
for a,v in possible_items:
if v is old_mo:
#assert new_mo.includes(a)
self._storage[a] = new_mo
def store_overwrite(self, state, new_mo, start, end):
# get a list of items that we will overwrite
current_items = list(self._storage.item_slice(start, end + 1))
updates = { start: new_mo }
# remove the items we are overwriting
if not current_items:
# make sure we aren't overwriting an entire item that starts before
# the write range and extends past the end of it
try:
_, floor_value = self._storage.floor_item(start)
if floor_value.includes(end):
updates[end] = floor_value
except KeyError:
pass
else:
# make sure we're not overwriting an entire item that starts inside
# the write range and extends past the end of it
if end < self._page_addr + self._page_size and current_items[-1][1].includes(end):
updates[end] = current_items[-1][1]
# remove existing items
del self._storage[start:end]
#assert all(m.includes(i) for i,m in updates.items())
# store the new stuff
self._storage.update(updates)
def store_underwrite(self, state, new_mo, start, end):
# first, get the current items
current_items = list(self._storage.item_slice(start, end + 1))
# go through them backwards and fill in the gaps
last_missing = end - 1
updates = { }
for _,mo in reversed(current_items):
if not mo.includes(last_missing) and not mo.base > last_missing:
# this mo does not cover up to the end; we need to fill it in
updates[mo.last_addr+1] = new_mo
last_missing = mo.base - 1
# if the beginning is missing, fill it in and make sure we're not
# overwriting something we shouldn't be
if last_missing >= start:
try:
_, floor_value = self._storage.floor_item(start)
if not floor_value.includes(last_missing):
updates[max(floor_value.last_addr+1, start)] = new_mo
except KeyError:
updates[start] = new_mo
#assert all(m.includes(i) for i,m in updates.items())
# apply it
self._storage.update(updates)
def load_mo(self, state, page_idx):
"""
Loads a memory object from memory.
:param page_idx: the index into the page
:returns: a tuple of the object
"""
try:
mo = self._storage.floor_item(page_idx)[1]
except KeyError:
mo = None
return mo
def load_slice(self, state, start, end):
"""
Return the memory objects overlapping with the provided slice.
:param start: the start address
:param end: the end address (non-inclusive)
:returns: tuples of (starting_addr, memory_object)
"""
items = list(self._storage.item_slice(start, end))
if not items or items[0][0] != start:
try:
_, floor_mo = self._storage.floor_item(start)
if floor_mo.includes(start):
items.insert(0, (start, floor_mo))
except KeyError:
pass
return items
def _copy_args(self):
return { 'storage': bintrees.AVLTree(self._storage) }
class ListPage(BasePage):
"""
Page object, implemented with a list.
"""
def __init__(self, *args, **kwargs):
storage = kwargs.pop("storage", None)
self._sinkhole = kwargs.pop("sinkhole", None)
super(ListPage, self).__init__(*args, **kwargs)
self._storage = [ None ] * self._page_size if storage is None else storage
def keys(self):
if self._sinkhole is not None:
return range(self._page_addr, self._page_addr + self._page_size)
else:
return [ self._page_addr + i for i,v in enumerate(self._storage) if v is not None ]
def replace_mo(self, state, old_mo, new_mo):
if self._sinkhole is old_mo:
self._sinkhole = new_mo
else:
start, end = self._resolve_range(old_mo)
for i in range(start, end):
if self._storage[i-self._page_addr] is old_mo:
self._storage[i-self._page_addr] = new_mo
def store_overwrite(self, state, new_mo, start, end):
if start == self._page_addr and end == self._page_addr + self._page_size:
self._sinkhole = new_mo
self._storage = [ None ] * self._page_size
else:
for i in range(start, end):
self._storage[i-self._page_addr] = new_mo
def store_underwrite(self, state, new_mo, start, end):
if start == self._page_addr and end == self._page_addr + self._page_size:
self._sinkhole = new_mo
else:
for i in range(start, end):
if self._storage[i-self._page_addr] is None:
self._storage[i-self._page_addr] = new_mo
def load_mo(self, state, page_idx):
"""
Loads a memory object from memory.
:param page_idx: the index into the page
:returns: a tuple of the object
"""
mo = self._storage[page_idx-self._page_addr]
return self._sinkhole if mo is None else mo
def load_slice(self, state, start, end):
"""
Return the memory objects overlapping with the provided slice.
:param start: the start address
:param end: the end address (non-inclusive)
:returns: tuples of (starting_addr, memory_object)
"""
items = [ ]
if start > self._page_addr + self._page_size or end < self._page_addr:
l.warning("Calling load_slice on the wrong page.")
return items
for addr in range(max(start, self._page_addr), min(end, self._page_addr + self._page_size)):
i = addr - self._page_addr
mo = self._storage[i]
if mo is None:
mo = self._sinkhole
if mo is not None and (not items or items[-1][1] is not mo):
items.append((addr, mo))
return items
def _copy_args(self):
return { 'storage': list(self._storage), 'sinkhole': self._sinkhole }
Page = ListPage
#pylint:disable=unidiomatic-typecheck
class SimPagedMemory(object):
"""
Represents paged memory.
"""
def __init__(self, memory_backer=None, permissions_backer=None, pages=None, initialized=None, name_mapping=None, hash_mapping=None, page_size=None, symbolic_addrs=None, check_permissions=False):
self._cowed = set()
self._memory_backer = { } if memory_backer is None else memory_backer
self._permissions_backer = permissions_backer # saved for copying
self._executable_pages = False if permissions_backer is None else permissions_backer[0]
self._permission_map = { } if permissions_backer is None else permissions_backer[1]
self._pages = { } if pages is None else pages
self._initialized = set() if initialized is None else initialized
self._page_size = 0x1000 if page_size is None else page_size
self._symbolic_addrs = dict() if symbolic_addrs is None else symbolic_addrs
self.state = None
self._preapproved_stack = xrange(0)
self._check_perms = check_permissions
# reverse mapping
self._name_mapping = cooldict.BranchingDict() if name_mapping is None else name_mapping
self._hash_mapping = cooldict.BranchingDict() if hash_mapping is None else hash_mapping
self._updated_mappings = set()
def __getstate__(self):
return {
'_memory_backer': self._memory_backer,
'_permissions_backer': self._permissions_backer,
'_executable_pages': self._executable_pages,
'_permission_map': self._permission_map,
'_pages': self._pages,
'_initialized': self._initialized,
'_page_size': self._page_size,
'state': None,
'_name_mapping': self._name_mapping,
'_hash_mapping': self._hash_mapping,
'_symbolic_addrs': self._symbolic_addrs,
'_preapproved_stack': self._preapproved_stack,
'_check_perms': self._check_perms
}
def __setstate__(self, s):
self._cowed = set()
self.__dict__.update(s)
def branch(self):
new_name_mapping = self._name_mapping.branch() if options.REVERSE_MEMORY_NAME_MAP in self.state.options else self._name_mapping
new_hash_mapping = self._hash_mapping.branch() if options.REVERSE_MEMORY_HASH_MAP in self.state.options else self._hash_mapping
new_pages = dict(self._pages)
m = SimPagedMemory(memory_backer=self._memory_backer,
permissions_backer=self._permissions_backer,
pages=new_pages,
initialized=set(self._initialized),
page_size=self._page_size,
name_mapping=new_name_mapping,
hash_mapping=new_hash_mapping,
symbolic_addrs=dict(self._symbolic_addrs),
check_permissions=self._check_perms)
m._preapproved_stack = self._preapproved_stack
return m
def __getitem__(self, addr):
page_num = addr / self._page_size
page_idx = addr
#print "GET", addr, page_num, page_idx
try:
v = self._get_page(page_num).load_mo(self.state, page_idx)
return v
except KeyError:
raise KeyError(addr)
def __setitem__(self, addr, v):
page_num = addr / self._page_size
page_idx = addr
#print "SET", addr, page_num, page_idx
self._get_page(page_num, write=True, create=True)[page_idx] = v
self._update_mappings(addr, v.object)
#print "...",id(self._pages[page_num])
def __delitem__(self, addr):
raise Exception("For performance reasons, deletion is not supported. Contact Yan if this needs to change.")
# Specifically, the above is for two reasons:
#
# 1. deleting stuff out of memory doesn't make sense
# 2. if the page throws a key error, the backer dict is accessed. Thus, deleting things would simply
# change them back to what they were in the backer dict
@property
def allow_segv(self):
return self._check_perms and not self.state.scratch.priv and options.STRICT_PAGE_ACCESS in self.state.options
def load_objects(self, addr, num_bytes, ret_on_segv=False):
"""
Load memory objects from paged memory.
:param addr: Address to start loading.
:param num_bytes: Number of bytes to load.
:param bool ret_on_segv: True if you want load_bytes to return directly when a SIGSEV is triggered, otherwise
a SimSegfaultError will be raised.
:return: list of tuples of (addr, memory_object)
:rtype: tuple
"""
result = [ ]
end = addr + num_bytes
for page_addr in self._containing_pages(addr, end):
try:
#print "Getting page %x" % (page_addr / self._page_size)
page = self._get_page(page_addr / self._page_size)
#print "... got it"
except KeyError:
#print "... missing"
#print "... SEGV"
# missing page
if self.allow_segv:
if ret_on_segv:
break
raise SimSegfaultError(addr, 'read-miss')
else:
continue
if self.allow_segv and not page.concrete_permissions & Page.PROT_READ:
#print "... SEGV"
if ret_on_segv:
break
raise SimSegfaultError(addr, 'non-readable')
result.extend(page.load_slice(self.state, addr, end))
return result
#
# Page management
#
def _create_page(self, page_num, permissions=None):
return Page(
page_num*self._page_size, self._page_size,
executable=self._executable_pages, permissions=permissions
)
def _initialize_page(self, n, new_page):
if n in self._initialized:
return False
self._initialized.add(n)
new_page_addr = n*self._page_size
initialized = False
if self.state is not None:
self.state.scratch.push_priv(True)
if self._memory_backer is None:
pass
elif isinstance(self._memory_backer, cle.Clemory):
# first, find the right clemory backer
for addr, backer in self._memory_backer.cbackers:
start_backer = new_page_addr - addr
if isinstance(start_backer, BV):
continue
if start_backer < 0 and abs(start_backer) >= self._page_size:
continue
if start_backer >= len(backer):
continue
# find permission backer associated with the address
# fall back to read-write if we can't find any...
flags = Page.PROT_READ | Page.PROT_WRITE
for start, end in self._permission_map:
if start <= new_page_addr < end:
flags = self._permission_map[(start, end)]
break
snip_start = max(0, start_backer)
write_start = max(new_page_addr, addr + snip_start)
write_size = self._page_size - write_start%self._page_size
snip = _ffi.buffer(backer)[snip_start:snip_start+write_size]
mo = SimMemoryObject(claripy.BVV(snip), write_start)
self._apply_object_to_page(n*self._page_size, mo, page=new_page)
new_page.permissions = claripy.BVV(flags, 3)
initialized = True
elif len(self._memory_backer) <= self._page_size:
for i in self._memory_backer:
if new_page_addr <= i and i <= new_page_addr + self._page_size:
if isinstance(self._memory_backer[i], claripy.ast.Base):
backer = self._memory_backer[i]
else:
backer = claripy.BVV(self._memory_backer[i])
mo = SimMemoryObject(backer, i)
self._apply_object_to_page(n*self._page_size, mo, page=new_page)
initialized = True
elif len(self._memory_backer) > self._page_size:
for i in range(self._page_size):
try:
if isinstance(self._memory_backer[i], claripy.ast.Base):
backer = self._memory_backer[i]
else:
backer = claripy.BVV(self._memory_backer[i])
mo = SimMemoryObject(backer, new_page_addr+i)
self._apply_object_to_page(n*self._page_size, mo, page=new_page)
initialized = True
except KeyError:
pass
if self.state is not None:
self.state.scratch.pop_priv()
return initialized
def _get_page(self, page_num, write=False, create=False, initialize=True):
page_addr = page_num * self._page_size
try:
page = self._pages[page_num]
except KeyError:
if not (initialize or create or page_addr in self._preapproved_stack):
raise
page = self._create_page(page_num)
self._symbolic_addrs[page_num] = set()
if initialize:
initialized = self._initialize_page(page_num, page)
if not initialized and not create and page_addr not in self._preapproved_stack:
raise
self._pages[page_num] = page
self._cowed.add(page_num)
return page
if write and page_num not in self._cowed:
page = page.copy()
self._symbolic_addrs[page_num] = set(self._symbolic_addrs[page_num])
self._cowed.add(page_num)
self._pages[page_num] = page
return page
def __contains__(self, addr):
try:
return self.__getitem__(addr) is not None
except KeyError:
return False
def contains_no_backer(self, addr):
"""
Tests if the address is contained in any page of paged memory, without considering memory backers.
:param int addr: The address to test.
:return: True if the address is included in one of the pages, False otherwise.
:rtype: bool
"""
for i, p in self._pages.iteritems():
if i * self._page_size <= addr < (i + 1) * self._page_size:
return addr - (i * self._page_size) in p.keys()
return False
def keys(self):
sofar = set()
sofar.update(self._memory_backer.keys())
for i, p in self._pages.items():
sofar.update([k + i * self._page_size for k in p.keys()])
return sofar
def __len__(self):
return len(self.keys())
def changed_bytes(self, other):
return self.__changed_bytes(other)
def __changed_bytes(self, other):
"""
Gets the set of changed bytes between `self` and `other`.
:type other: SimPagedMemory
:returns: A set of differing bytes.
"""
if self._page_size != other._page_size:
raise SimMemoryError("SimPagedMemory page sizes differ. This is asking for disaster.")
our_pages = set(self._pages.keys())
their_pages = set(other._pages.keys())
their_additions = their_pages - our_pages
our_additions = our_pages - their_pages
common_pages = our_pages & their_pages
candidates = set()
for p in their_additions:
candidates.update(other._pages[p].keys())
for p in our_additions:
candidates.update(self._pages[p].keys())
for p in common_pages:
our_page = self._pages[p]
their_page = other._pages[p]
if our_page is their_page:
continue
our_keys = set(our_page.keys())
their_keys = set(their_page.keys())
changes = (our_keys - their_keys) | (their_keys - our_keys) | {
i for i in (our_keys & their_keys) if our_page.load_mo(self.state, i) is not their_page.load_mo(self.state, i)
}
candidates.update(changes)
#both_changed = our_changes & their_changes
#ours_changed_only = our_changes - both_changed
#theirs_changed_only = their_changes - both_changed
#both_deleted = their_deletions & our_deletions
#ours_deleted_only = our_deletions - both_deleted
#theirs_deleted_only = their_deletions - both_deleted
differences = set()
for c in candidates:
if c not in self and c in other:
differences.add(c)
elif c in self and c not in other:
differences.add(c)
else:
if type(self[c]) is not SimMemoryObject:
self[c] = SimMemoryObject(self.state.se.BVV(ord(self[c]), 8), c)
if type(other[c]) is not SimMemoryObject:
other[c] = SimMemoryObject(self.state.se.BVV(ord(other[c]), 8), c)
if c in self and self[c] != other[c]:
# Try to see if the bytes are equal
self_byte = self[c].bytes_at(c, 1)
other_byte = other[c].bytes_at(c, 1)
if self_byte is not other_byte:
#l.debug("%s: offset %x, two different bytes %s %s from %s %s", self.id, c,
# self_byte, other_byte,
# self[c].object.model, other[c].object.model)
differences.add(c)
else:
# this means the byte is in neither memory
pass
return differences
#
# Memory object management
#
def _apply_object_to_page(self, page_base, mo, page=None, overwrite=True):
"""
Writes a memory object to a `page`
:param page_base: The base address of the page.
:param mo: The memory object.
:param page: (optional) the page to use.
:param overwrite: (optional) If False, only write to currently-empty memory.
"""
page_num = page_base / self._page_size
try:
page = self._get_page(page_num,
write=True,
create=not self.allow_segv) if page is None else page
except KeyError:
if self.allow_segv:
raise SimSegfaultError(mo.base, 'write-miss')
else:
raise
if self.allow_segv and not page.concrete_permissions & Page.PROT_WRITE:
raise SimSegfaultError(mo.base, 'non-writable')
page.store_mo(self.state, mo, overwrite=overwrite)
return True
def _containing_pages(self, mo_start, mo_end):
page_start = mo_start - mo_start%self._page_size
page_end = mo_end + (self._page_size - mo_end%self._page_size) if mo_end % self._page_size else mo_end
return [ b for b in range(page_start, page_end, self._page_size) ]
def _containing_pages_mo(self, mo):
mo_start = mo.base
mo_end = mo.base + mo.length
return self._containing_pages(mo_start, mo_end)
def store_memory_object(self, mo, overwrite=True):
"""
This function optimizes a large store by storing a single reference to the :class:`SimMemoryObject` instead of
one for each byte.
:param memory_object: the memory object to store
"""
for p in self._containing_pages_mo(mo):
self._apply_object_to_page(p, mo, overwrite=overwrite)
self._update_range_mappings(mo.base, mo.object, mo.length)
def replace_memory_object(self, old, new_content):
"""
Replaces the memory object `old` with a new memory object containing `new_content`.
:param old: A SimMemoryObject (i.e., one from :func:`memory_objects_for_hash()` or :func:`
memory_objects_for_name()`).
:param new_content: The content (claripy expression) for the new memory object.
:returns: the new memory object
"""
if old.object.size() != new_content.size():
raise SimMemoryError("memory objects can only be replaced by the same length content")
new = SimMemoryObject(new_content, old.base)
for p in self._containing_pages_mo(old):
self._get_page(p/self._page_size, write=True).replace_mo(self.state, old, new)
if isinstance(new.object, claripy.ast.BV):
for b in range(old.base, old.base+old.length):
self._update_mappings(b, new.object)
return new
def replace_all(self, old, new):
"""
Replaces all instances of expression `old` with expression `new`.
:param old: A claripy expression. Must contain at least one named variable (to make it possible to use the
name index for speedup).
:param new: The new variable to replace it with.
"""
if options.REVERSE_MEMORY_NAME_MAP not in self.state.options:
raise SimMemoryError("replace_all is not doable without a reverse name mapping. Please add "
"sim_options.REVERSE_MEMORY_NAME_MAP to the state options")
if not isinstance(old, claripy.ast.BV) or not isinstance(new, claripy.ast.BV):
raise SimMemoryError("old and new arguments to replace_all() must be claripy.BV objects")
if len(old.variables) == 0:
raise SimMemoryError("old argument to replace_all() must have at least one named variable")
# Compute an intersection between sets of memory objects for each unique variable name. The eventual memory
# object set contains all memory objects that we should update.
memory_objects = None
for v in old.variables:
if memory_objects is None:
memory_objects = self.memory_objects_for_name(v)
elif len(memory_objects) == 0:
# It's a set and it's already empty
# there is no way for it to go back...
break
else:
memory_objects &= self.memory_objects_for_name(v)
replaced_objects_cache = { }
for mo in memory_objects:
replaced_object = None
if mo.object in replaced_objects_cache:
if mo.object is not replaced_objects_cache[mo.object]:
replaced_object = replaced_objects_cache[mo.object]
else:
replaced_object = mo.object.replace(old, new)
replaced_objects_cache[mo.object] = replaced_object
if mo.object is replaced_object:
# The replace does not really occur
replaced_object = None
if replaced_object is not None:
self.replace_memory_object(mo, replaced_object)
#
# Mapping bullshit
#
def _mark_updated_mapping(self, d, m):
if m in self._updated_mappings:
return
if options.REVERSE_MEMORY_HASH_MAP not in self.state.options and d is self._hash_mapping:
#print "ABORTING FROM HASH"
return
if options.REVERSE_MEMORY_NAME_MAP not in self.state.options and d is self._name_mapping:
#print "ABORTING FROM NAME"
return
#print m
#SimSymbolicMemory.wtf += 1
#print SimSymbolicMemory.wtf
try:
d[m] = set(d[m])
except KeyError:
d[m] = set()
self._updated_mappings.add(m)
def _update_range_mappings(self, actual_addr, cnt, size):
if not (options.REVERSE_MEMORY_NAME_MAP in self.state.options or
options.REVERSE_MEMORY_HASH_MAP in self.state.options or
options.MEMORY_SYMBOLIC_BYTES_MAP in self.state.options):
return
for i in range(actual_addr, actual_addr+size):
self._update_mappings(i, cnt)
def _update_mappings(self, actual_addr, cnt):
if options.MEMORY_SYMBOLIC_BYTES_MAP in self.state.options:
page_num = actual_addr / self._page_size
page_idx = actual_addr
if self.state.se.symbolic(cnt):
self._symbolic_addrs[page_num].add(page_idx)
else:
self._symbolic_addrs[page_num].discard(page_idx)
if not (options.REVERSE_MEMORY_NAME_MAP in self.state.options or
options.REVERSE_MEMORY_HASH_MAP in self.state.options):
return
if (options.REVERSE_MEMORY_HASH_MAP not in self.state.options) and \
len(self.state.se.variables(cnt)) == 0:
return
l.debug("Updating mappings at address 0x%x", actual_addr)
try:
l.debug("... removing old mappings")
# remove this address for the old variables
old_obj = self[actual_addr]
if isinstance(old_obj, SimMemoryObject):
old_obj = old_obj.object
if isinstance(old_obj, claripy.ast.BV):
if options.REVERSE_MEMORY_NAME_MAP in self.state.options:
var_set = self.state.se.variables(old_obj)
for v in var_set:
self._mark_updated_mapping(self._name_mapping, v)
self._name_mapping[v].discard(actual_addr)
if len(self._name_mapping[v]) == 0:
self._name_mapping.pop(v, None)
if options.REVERSE_MEMORY_HASH_MAP in self.state.options:
h = hash(old_obj)
self._mark_updated_mapping(self._hash_mapping, h)
self._hash_mapping[h].discard(actual_addr)
if len(self._hash_mapping[h]) == 0:
self._hash_mapping.pop(h, None)
except KeyError:
pass
l.debug("... adding new mappings")
if options.REVERSE_MEMORY_NAME_MAP in self.state.options:
# add the new variables to the mapping
var_set = self.state.se.variables(cnt)
for v in var_set:
self._mark_updated_mapping(self._name_mapping, v)
if v not in self._name_mapping:
self._name_mapping[v] = set()
self._name_mapping[v].add(actual_addr)
if options.REVERSE_MEMORY_HASH_MAP in self.state.options:
# add the new variables to the hash->addrs mapping
h = hash(cnt)
self._mark_updated_mapping(self._hash_mapping, h)
if h not in self._hash_mapping:
self._hash_mapping[h] = set()
self._hash_mapping[h].add(actual_addr)
def get_symbolic_addrs(self):
symbolic_addrs = set()
for page in self._symbolic_addrs:
symbolic_addrs.update(self._symbolic_addrs[page])
return symbolic_addrs
def addrs_for_name(self, n):
"""
Returns addresses that contain expressions that contain a variable named `n`.
"""
if n not in self._name_mapping:
return
self._mark_updated_mapping(self._name_mapping, n)
to_discard = set()
for e in self._name_mapping[n]:
try:
if n in self[e].object.variables: yield e
else: to_discard.add(e)
except KeyError:
to_discard.add(e)
self._name_mapping[n] -= to_discard
def addrs_for_hash(self, h):
"""
Returns addresses that contain expressions that contain a variable with the hash of `h`.
"""
if h not in self._hash_mapping:
return
self._mark_updated_mapping(self._hash_mapping, h)
to_discard = set()
for e in self._hash_mapping[h]:
try:
if h == hash(self[e].object): yield e
else: to_discard.add(e)
except KeyError:
to_discard.add(e)
self._hash_mapping[h] -= to_discard
def memory_objects_for_name(self, n):
"""
Returns a set of :class:`SimMemoryObjects` that contain expressions that contain a variable with the name of
`n`.
This is useful for replacing those values in one fell swoop with :func:`replace_memory_object()`, even if
they have been partially overwritten.
"""
return set([ self[i] for i in self.addrs_for_name(n)])
def memory_objects_for_hash(self, n):
"""
Returns a set of :class:`SimMemoryObjects` that contain expressions that contain a variable with the hash
`h`.
"""
return set([ self[i] for i in self.addrs_for_hash(n)])
def permissions(self, addr, permissions=None):
"""
Returns the permissions for a page at address `addr`.
If optional arugment permissions is given, set page permissions to that prior to returning permissions.
"""
if self.state.se.symbolic(addr):
raise SimMemoryError("page permissions cannot currently be looked up for symbolic addresses")
if isinstance(addr, claripy.ast.bv.BV):
addr = self.state.se.eval(addr)
page_num = addr / self._page_size
try:
page = self._get_page(page_num)
except KeyError:
raise SimMemoryError("page does not exist at given address")
# Set permissions for the page
if permissions is not None:
if isinstance(permissions, (int, long)):
permissions = claripy.BVV(permissions, 3)
if not isinstance(permissions,claripy.ast.bv.BV):
raise SimMemoryError("Unknown permissions argument type of {0}.".format(type(permissions)))
page.permissions = permissions
return page.permissions
def map_region(self, addr, length, permissions, init_zero=False):
if o.TRACK_MEMORY_MAPPING not in self.state.options:
return
if self.state.se.symbolic(addr):
raise SimMemoryError("cannot map region with a symbolic address")
if isinstance(addr, claripy.ast.bv.BV):
addr = self.state.se.max_int(addr)
base_page_num = addr / self._page_size
# round length
pages = length / self._page_size
if length % self._page_size > 0:
pages += 1
# this check should not be performed when constructing a CFG
if self.state.mode != 'fastpath':
for page in xrange(pages):
page_id = base_page_num + page
if page_id * self._page_size in self:
err = "map_page received address and length combination which contained mapped page"
l.warning(err)
raise SimMemoryError(err)
if isinstance(permissions, (int, long)):
permissions = claripy.BVV(permissions, 3)
for page in xrange(pages):
page_id = base_page_num + page
self._pages[page_id] = self._create_page(page_id, permissions=permissions)
if init_zero:
mo = SimMemoryObject(claripy.BVV(0, self._page_size * 8), page_id*self._page_size)
self._apply_object_to_page(page_id*self._page_size, mo, page=self._pages[page_id])
self._symbolic_addrs[page_id] = set()
def unmap_region(self, addr, length):
if o.TRACK_MEMORY_MAPPING not in self.state.options:
return
if self.state.se.symbolic(addr):
raise SimMemoryError("cannot unmap region with a symbolic address")
if isinstance(addr, claripy.ast.bv.BV):
addr = self.state.se.max_int(addr)
base_page_num = addr / self._page_size
pages = length / self._page_size
if length % self._page_size > 0:
pages += 1
# this check should not be performed when constructing a CFG
if self.state.mode != 'fastpath':
for page in xrange(pages):
if base_page_num + page not in self._pages:
l.warning("unmap_region received address and length combination is not mapped")
return
for page in xrange(pages):
del self._pages[base_page_num + page]
del self._symbolic_addrs[base_page_num + page]
from .. import sim_options as o
| |
import base64
import datetime
import hashlib
import hmac
import logging
import re
import struct
import time
from . import packers, settings
__all__ = ["create_token", "detect_token", "parse_token"]
logger = logging.getLogger("sesame")
TIMESTAMP_OFFSET = 1577836800 # 2020-01-01T00:00:00Z
def pack_timestamp():
"""
When SESAME_MAX_AGE is enabled, encode the time in seconds since the epoch.
Return bytes.
"""
if settings.MAX_AGE is None:
return b""
timestamp = int(time.time()) - TIMESTAMP_OFFSET
return struct.pack("!i", timestamp)
def unpack_timestamp(data):
"""
When SESAME_MAX_AGE is enabled, extract the timestamp and calculate the age.
Return an age in seconds or None and the remaining bytes.
"""
if settings.MAX_AGE is None:
return None, data
# If data contains less than 4 bytes, this raises struct.error.
(timestamp,), data = struct.unpack("!i", data[:4]), data[4:]
return int(time.time()) - TIMESTAMP_OFFSET - timestamp, data
HASH_SIZES = {
"pbkdf2_sha256": 44,
"pbkdf2_sha1": 28,
"argon2": 22, # in Argon2 v1.3; previously 86
"bcrypt_sha256": 31, # salt (22) + hash (31)
"bcrypt": 31, # salt (22) + hash (31)
"sha1": 40, # hex, not base64
"md5": 32, # hex, not base64
"crypt": 11, # salt (2) + hash (11)
}
def get_revocation_key(user):
"""
When the value returned by this method changes, this revocates tokens.
It is derived from the hashed password so that changing the password
revokes tokens.
For one-time tokens, it also contains the last login datetime so that
logging in revokes existing tokens.
"""
data = ""
# Tokens generated by django-sesame are more likely to leak than hashed
# passwords. To minimize the information tokens might be revealing, we'd
# like to use only hashes, excluding salts, as suggested in issue #40.
# Since we're hashing the result again with a cryptographic hash function,
# this isn't supposed to make a difference in practice. But it alleviates
# concerns about sending data derived from hashed passwords into the wild.
# Hashed passwords may be in various formats:
# 1. "[<algorithm>$]?[<parameters>$]*[<salt>$?]?<hash>", if set_password()
# was called with a built-in hasher. Unfortunatly, the bcrypt (and
# crypt) hashers don't include a "$" between the salt and the hash, so
# we can't split on this marker. Instead we hardcode hash lengths.
# 2. "!<40 random characters>", if set_unusable_password() was called.
# 3. Anything else, if set_password() was called with a custom hasher or
# if a custom authentication backend is used.
# An alternative would be to rely on user.get_session_auth_hash(), which
# has the advantage of being a public API. It's a HMAC-SHA256 of the whole
# password hash. However, it's designed for a slightly different purpose,
# so I'm not comfortable reusing it. Also, for clarity, I don't want to
# chain more cryptographic operations than needed.
if settings.INVALIDATE_ON_PASSWORD_CHANGE and user.password is not None:
algorithm = user.password.partition("$")[0]
try:
hash_size = HASH_SIZES[algorithm]
except KeyError:
data += user.password
else:
data += user.password[-hash_size:]
if settings.ONE_TIME and user.last_login is not None:
data += user.last_login.isoformat()
return data.encode()
def sign(data):
"""
Create a MAC with keyed hashing.
"""
# We want a short signature in order to keep tokens short. A 10-bytes
# signature has about 1.2e24 possible values, which is sufficient here.
return hashlib.blake2b(
data,
digest_size=settings.SIGNATURE_SIZE,
key=settings.KEY,
person=b"sesame.tokens_v2",
).digest()
def create_token(user, scope=""):
"""
Create a v2 signed token for a user.
"""
primary_key = packers.packer.pack_pk(user.pk)
timestamp = pack_timestamp()
revocation_key = get_revocation_key(user)
signature = sign(primary_key + timestamp + revocation_key + scope.encode())
# If the revocation key changes, the signature becomes invalid, so we
# don't need to include a hash of the revocation key in the token.
data = primary_key + timestamp + signature
token = base64.urlsafe_b64encode(data).rstrip(b"=")
return token.decode()
def parse_token(token, get_user, scope="", max_age=None):
"""
Obtain a user from a v2 signed token.
"""
token = token.encode()
# Below, error messages should give a hint to developers debugging apps
# but remain sufficiently generic for the common situation where tokens
# get truncated by accident.
try:
data = base64.urlsafe_b64decode(token + b"=" * (-len(token) % 4))
except Exception:
logger.debug("Bad token: cannot decode token")
return
# Extract user primary key, token age, and signature from token.
try:
user_pk, timestamp_and_signature = packers.packer.unpack_pk(data)
except Exception:
logger.debug("Bad token: cannot extract primary key")
return
try:
age, signature = unpack_timestamp(timestamp_and_signature)
except Exception:
logger.debug("Bad token: cannot extract timestamp")
return
if len(signature) != settings.SIGNATURE_SIZE:
logger.debug("Bad token: cannot extract signature")
return
# Since we don't include the revocation key in the token, we need to fetch
# the user in the database before we can verify the signature. Usually,
# it's best to verify the signature before doing anything with a message.
# An attacker could craft tokens to fetch arbitrary users by primary key,
# like they can fetch arbitrary users by username on a login form. I'm not
# seeing how this would be exploitable. A timing attack to determine if
# there's a user with a given primary key doesn't look like a major risk.
# Check if token is expired. This is the fastest check.
if max_age is None:
max_age = settings.MAX_AGE
elif settings.MAX_AGE is None:
logger.warning(
"Ignoring max_age argument; "
"it isn't supported when SESAME_MAX_AGE = None"
)
elif isinstance(max_age, datetime.timedelta):
max_age = max_age.total_seconds()
if age is not None and age >= max_age:
logger.debug("Expired token: age = %d seconds", age)
return
# Check if user exists and can log in.
user = get_user(user_pk)
if user is None:
logger.debug("Unknown or inactive user: pk = %r", user_pk)
return
# Check if signature is valid
primary_key_and_timestamp = data[: -settings.SIGNATURE_SIZE]
revocation_key = get_revocation_key(user)
expected_signature = sign(
primary_key_and_timestamp + revocation_key + scope.encode()
)
if not hmac.compare_digest(signature, expected_signature):
log_scope = "in default scope" if scope == "" else f"in scope {scope}"
logger.debug("Invalid token for user %s %s", user, log_scope)
return
log_scope = "in default scope" if scope == "" else f"in scope {scope}"
logger.debug("Valid token for user %s %s", user, log_scope)
return user
# Tokens are arbitrary Base64-encoded bytestrings. Their size depends on
# SESAME_PACKER, SESAME_MAX_AGE, and SESAME_SIGNATURE_SIZE. Defaults are:
# - without SESAME_MAX_AGE: 4 + 10 = 14 bytes = 19 Base64 characters.
# - with SESAME_MAX_AGE: 4 + 4 + 10 = 18 bytes = 24 Base64 characters.
# Minimum "sensible" size is 1 + 0 + 2 = 3 bytes = 4 Base64 characters.
token_re = re.compile(r"[A-Za-z0-9-_]{4,}")
def detect_token(token):
"""
Tell whether token may be a v2 signed token.
"""
return token_re.fullmatch(token) is not None
| |
"""OutcomeResults API Version 1.0.
This API client was generated using a template. Make sure this code is valid before using it.
"""
import logging
from datetime import date, datetime
from .base import BaseCanvasAPI
from .base import BaseModel
class OutcomeResultsAPI(BaseCanvasAPI):
"""OutcomeResults API Version 1.0."""
def __init__(self, *args, **kwargs):
"""Init method for OutcomeResultsAPI."""
super(OutcomeResultsAPI, self).__init__(*args, **kwargs)
self.logger = logging.getLogger("py3canvas.OutcomeResultsAPI")
def get_outcome_results(
self,
course_id,
include=None,
include_hidden=None,
outcome_ids=None,
user_ids=None,
):
"""
Get outcome results.
Gets the outcome results for users and outcomes in the specified context.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""
ID
"""
path["course_id"] = course_id
# OPTIONAL - user_ids
"""
If specified, only the users whose ids are given will be included in the
results. SIS ids can be used, prefixed by "sis_user_id:".
It is an error to specify an id for a user who is not a student in
the context.
"""
if user_ids is not None:
params["user_ids"] = user_ids
# OPTIONAL - outcome_ids
"""
If specified, only the outcomes whose ids are given will be included in the
results. it is an error to specify an id for an outcome which is not linked
to the context.
"""
if outcome_ids is not None:
params["outcome_ids"] = outcome_ids
# OPTIONAL - include
"""
[String, "alignments"|"outcomes"|"outcomes.alignments"|"outcome_groups"|"outcome_links"|"outcome_paths"|"users"]
Specify additional collections to be side loaded with the result.
"alignments" includes only the alignments referenced by the returned
results.
"outcomes.alignments" includes all alignments referenced by outcomes in the
context.
"""
if include is not None:
params["include"] = include
# OPTIONAL - include_hidden
"""
If true, results that are hidden from the learning mastery gradebook and student rollup
scores will be included
"""
if include_hidden is not None:
params["include_hidden"] = include_hidden
self.logger.debug(
"GET /api/v1/courses/{course_id}/outcome_results with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"GET",
"/api/v1/courses/{course_id}/outcome_results".format(**path),
data=data,
params=params,
no_data=True,
)
def get_outcome_result_rollups(
self,
course_id,
aggregate=None,
aggregate_stat=None,
exclude=None,
include=None,
outcome_ids=None,
sort_by=None,
sort_order=None,
sort_outcome_id=None,
user_ids=None,
):
"""
Get outcome result rollups.
Gets the outcome rollups for the users and outcomes in the specified
context.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""
ID
"""
path["course_id"] = course_id
# OPTIONAL - aggregate
"""
If specified, instead of returning one rollup for each user, all the user
rollups will be combined into one rollup for the course that will contain
the average (or median, see below) rollup score for each outcome.
"""
if aggregate is not None:
self._validate_enum(aggregate, ["course"])
params["aggregate"] = aggregate
# OPTIONAL - aggregate_stat
"""
If aggregate rollups requested, then this value determines what
statistic is used for the aggregate. Defaults to "mean" if this value
is not specified.
"""
if aggregate_stat is not None:
self._validate_enum(aggregate_stat, ["mean", "median"])
params["aggregate_stat"] = aggregate_stat
# OPTIONAL - user_ids
"""
If specified, only the users whose ids are given will be included in the
results or used in an aggregate result. it is an error to specify an id
for a user who is not a student in the context
"""
if user_ids is not None:
params["user_ids"] = user_ids
# OPTIONAL - outcome_ids
"""
If specified, only the outcomes whose ids are given will be included in the
results. it is an error to specify an id for an outcome which is not linked
to the context.
"""
if outcome_ids is not None:
params["outcome_ids"] = outcome_ids
# OPTIONAL - include
"""
[String, "courses"|"outcomes"|"outcomes.alignments"|"outcome_groups"|"outcome_links"|"outcome_paths"|"users"]
Specify additional collections to be side loaded with the result.
"""
if include is not None:
params["include"] = include
# OPTIONAL - exclude
"""
Specify additional values to exclude. "missing_user_rollups" excludes
rollups for users without results.
"""
if exclude is not None:
self._validate_enum(exclude, ["missing_user_rollups"])
params["exclude"] = exclude
# OPTIONAL - sort_by
"""
If specified, sorts outcome result rollups. "student" sorting will sort
by a user's sortable name. "outcome" sorting will sort by the given outcome's
rollup score. The latter requires specifying the "sort_outcome_id" parameter.
By default, the sort order is ascending.
"""
if sort_by is not None:
self._validate_enum(sort_by, ["student", "outcome"])
params["sort_by"] = sort_by
# OPTIONAL - sort_outcome_id
"""
If outcome sorting requested, then this determines which outcome to use
for rollup score sorting.
"""
if sort_outcome_id is not None:
params["sort_outcome_id"] = sort_outcome_id
# OPTIONAL - sort_order
"""
If sorting requested, then this allows changing the default sort order of
ascending to descending.
"""
if sort_order is not None:
self._validate_enum(sort_order, ["asc", "desc"])
params["sort_order"] = sort_order
self.logger.debug(
"GET /api/v1/courses/{course_id}/outcome_rollups with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"GET",
"/api/v1/courses/{course_id}/outcome_rollups".format(**path),
data=data,
params=params,
no_data=True,
)
class Outcomeresult(BaseModel):
"""Outcomeresult Model.
A student's result for an outcome"""
def __init__(
self,
id=None,
score=None,
submitted_or_assessed_at=None,
links=None,
percent=None,
):
"""Init method for Outcomeresult class."""
self._id = id
self._score = score
self._submitted_or_assessed_at = submitted_or_assessed_at
self._links = links
self._percent = percent
self.logger = logging.getLogger("py3canvas.Outcomeresult")
@property
def id(self):
"""A unique identifier for this result."""
return self._id
@id.setter
def id(self, value):
"""Setter for id property."""
self.logger.warn(
"Setting values on id will NOT update the remote Canvas instance."
)
self._id = value
@property
def score(self):
"""The student's score."""
return self._score
@score.setter
def score(self, value):
"""Setter for score property."""
self.logger.warn(
"Setting values on score will NOT update the remote Canvas instance."
)
self._score = value
@property
def submitted_or_assessed_at(self):
"""The datetime the resulting OutcomeResult was submitted at, or absent that, when it was assessed."""
return self._submitted_or_assessed_at
@submitted_or_assessed_at.setter
def submitted_or_assessed_at(self, value):
"""Setter for submitted_or_assessed_at property."""
self.logger.warn(
"Setting values on submitted_or_assessed_at will NOT update the remote Canvas instance."
)
self._submitted_or_assessed_at = value
@property
def links(self):
"""Unique identifiers of objects associated with this result."""
return self._links
@links.setter
def links(self, value):
"""Setter for links property."""
self.logger.warn(
"Setting values on links will NOT update the remote Canvas instance."
)
self._links = value
@property
def percent(self):
"""score's percent of maximum points possible for outcome, scaled to reflect any custom mastery levels that differ from the learning outcome."""
return self._percent
@percent.setter
def percent(self, value):
"""Setter for percent property."""
self.logger.warn(
"Setting values on percent will NOT update the remote Canvas instance."
)
self._percent = value
class Outcomerollupscorelinks(BaseModel):
"""Outcomerollupscorelinks Model."""
def __init__(self, outcome=None):
"""Init method for Outcomerollupscorelinks class."""
self._outcome = outcome
self.logger = logging.getLogger("py3canvas.Outcomerollupscorelinks")
@property
def outcome(self):
"""The id of the related outcome."""
return self._outcome
@outcome.setter
def outcome(self, value):
"""Setter for outcome property."""
self.logger.warn(
"Setting values on outcome will NOT update the remote Canvas instance."
)
self._outcome = value
class Outcomerollupscore(BaseModel):
"""Outcomerollupscore Model."""
def __init__(self, score=None, count=None, links=None):
"""Init method for Outcomerollupscore class."""
self._score = score
self._count = count
self._links = links
self.logger = logging.getLogger("py3canvas.Outcomerollupscore")
@property
def score(self):
"""The rollup score for the outcome, based on the student alignment scores related to the outcome. This could be null if the student has no related scores."""
return self._score
@score.setter
def score(self, value):
"""Setter for score property."""
self.logger.warn(
"Setting values on score will NOT update the remote Canvas instance."
)
self._score = value
@property
def count(self):
"""The number of alignment scores included in this rollup."""
return self._count
@count.setter
def count(self, value):
"""Setter for count property."""
self.logger.warn(
"Setting values on count will NOT update the remote Canvas instance."
)
self._count = value
@property
def links(self):
"""links."""
return self._links
@links.setter
def links(self, value):
"""Setter for links property."""
self.logger.warn(
"Setting values on links will NOT update the remote Canvas instance."
)
self._links = value
class Outcomerolluplinks(BaseModel):
"""Outcomerolluplinks Model."""
def __init__(self, course=None, user=None, section=None):
"""Init method for Outcomerolluplinks class."""
self._course = course
self._user = user
self._section = section
self.logger = logging.getLogger("py3canvas.Outcomerolluplinks")
@property
def course(self):
"""If an aggregate result was requested, the course field will be present. Otherwise, the user and section field will be present (Optional) The id of the course that this rollup applies to."""
return self._course
@course.setter
def course(self, value):
"""Setter for course property."""
self.logger.warn(
"Setting values on course will NOT update the remote Canvas instance."
)
self._course = value
@property
def user(self):
"""(Optional) The id of the user that this rollup applies to."""
return self._user
@user.setter
def user(self, value):
"""Setter for user property."""
self.logger.warn(
"Setting values on user will NOT update the remote Canvas instance."
)
self._user = value
@property
def section(self):
"""(Optional) The id of the section the user is in."""
return self._section
@section.setter
def section(self, value):
"""Setter for section property."""
self.logger.warn(
"Setting values on section will NOT update the remote Canvas instance."
)
self._section = value
class Outcomerollup(BaseModel):
"""Outcomerollup Model."""
def __init__(self, scores=None, name=None, links=None):
"""Init method for Outcomerollup class."""
self._scores = scores
self._name = name
self._links = links
self.logger = logging.getLogger("py3canvas.Outcomerollup")
@property
def scores(self):
"""an array of OutcomeRollupScore objects."""
return self._scores
@scores.setter
def scores(self, value):
"""Setter for scores property."""
self.logger.warn(
"Setting values on scores will NOT update the remote Canvas instance."
)
self._scores = value
@property
def name(self):
"""The name of the resource for this rollup. For example, the user name."""
return self._name
@name.setter
def name(self, value):
"""Setter for name property."""
self.logger.warn(
"Setting values on name will NOT update the remote Canvas instance."
)
self._name = value
@property
def links(self):
"""links."""
return self._links
@links.setter
def links(self, value):
"""Setter for links property."""
self.logger.warn(
"Setting values on links will NOT update the remote Canvas instance."
)
self._links = value
class Outcomealignment(BaseModel):
"""Outcomealignment Model.
An asset aligned with this outcome"""
def __init__(self, id=None, name=None, html_url=None):
"""Init method for Outcomealignment class."""
self._id = id
self._name = name
self._html_url = html_url
self.logger = logging.getLogger("py3canvas.Outcomealignment")
@property
def id(self):
"""A unique identifier for this alignment."""
return self._id
@id.setter
def id(self, value):
"""Setter for id property."""
self.logger.warn(
"Setting values on id will NOT update the remote Canvas instance."
)
self._id = value
@property
def name(self):
"""The name of this alignment."""
return self._name
@name.setter
def name(self, value):
"""Setter for name property."""
self.logger.warn(
"Setting values on name will NOT update the remote Canvas instance."
)
self._name = value
@property
def html_url(self):
"""(Optional) A URL for details about this alignment."""
return self._html_url
@html_url.setter
def html_url(self, value):
"""Setter for html_url property."""
self.logger.warn(
"Setting values on html_url will NOT update the remote Canvas instance."
)
self._html_url = value
class Outcomepath(BaseModel):
"""Outcomepath Model.
The full path to an outcome"""
def __init__(self, id=None, parts=None):
"""Init method for Outcomepath class."""
self._id = id
self._parts = parts
self.logger = logging.getLogger("py3canvas.Outcomepath")
@property
def id(self):
"""A unique identifier for this outcome."""
return self._id
@id.setter
def id(self, value):
"""Setter for id property."""
self.logger.warn(
"Setting values on id will NOT update the remote Canvas instance."
)
self._id = value
@property
def parts(self):
"""an array of OutcomePathPart objects."""
return self._parts
@parts.setter
def parts(self, value):
"""Setter for parts property."""
self.logger.warn(
"Setting values on parts will NOT update the remote Canvas instance."
)
self._parts = value
class Outcomepathpart(BaseModel):
"""Outcomepathpart Model.
An outcome or outcome group"""
def __init__(self, name=None):
"""Init method for Outcomepathpart class."""
self._name = name
self.logger = logging.getLogger("py3canvas.Outcomepathpart")
@property
def name(self):
"""The title of the outcome or outcome group."""
return self._name
@name.setter
def name(self, value):
"""Setter for name property."""
self.logger.warn(
"Setting values on name will NOT update the remote Canvas instance."
)
self._name = value
| |
"""Viessmann ViCare sensor device."""
from __future__ import annotations
from collections.abc import Callable
from contextlib import suppress
from dataclasses import dataclass
import logging
from PyViCare.PyViCareDevice import Device
from PyViCare.PyViCareUtils import (
PyViCareInvalidDataError,
PyViCareNotSupportedFeatureError,
PyViCareRateLimitError,
)
import requests
from homeassistant.components.sensor import (
SensorDeviceClass,
SensorEntity,
SensorEntityDescription,
SensorStateClass,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
ENERGY_KILO_WATT_HOUR,
PERCENTAGE,
POWER_WATT,
TEMP_CELSIUS,
TIME_HOURS,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from . import ViCareRequiredKeysMixin
from .const import (
DOMAIN,
VICARE_API,
VICARE_DEVICE_CONFIG,
VICARE_NAME,
VICARE_UNIT_TO_DEVICE_CLASS,
VICARE_UNIT_TO_UNIT_OF_MEASUREMENT,
)
_LOGGER = logging.getLogger(__name__)
@dataclass
class ViCareSensorEntityDescription(SensorEntityDescription, ViCareRequiredKeysMixin):
"""Describes ViCare sensor entity."""
unit_getter: Callable[[Device], str | None] | None = None
GLOBAL_SENSORS: tuple[ViCareSensorEntityDescription, ...] = (
ViCareSensorEntityDescription(
key="outside_temperature",
name="Outside Temperature",
native_unit_of_measurement=TEMP_CELSIUS,
value_getter=lambda api: api.getOutsideTemperature(),
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
),
ViCareSensorEntityDescription(
key="return_temperature",
name="Return Temperature",
native_unit_of_measurement=TEMP_CELSIUS,
value_getter=lambda api: api.getReturnTemperature(),
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
),
ViCareSensorEntityDescription(
key="boiler_temperature",
name="Boiler Temperature",
native_unit_of_measurement=TEMP_CELSIUS,
value_getter=lambda api: api.getBoilerTemperature(),
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
),
ViCareSensorEntityDescription(
key="hotwater_gas_consumption_today",
name="Hot water gas consumption today",
value_getter=lambda api: api.getGasConsumptionDomesticHotWaterToday(),
unit_getter=lambda api: api.getGasConsumptionDomesticHotWaterUnit(),
state_class=SensorStateClass.TOTAL_INCREASING,
),
ViCareSensorEntityDescription(
key="hotwater_gas_consumption_heating_this_week",
name="Hot water gas consumption this week",
value_getter=lambda api: api.getGasConsumptionDomesticHotWaterThisWeek(),
unit_getter=lambda api: api.getGasConsumptionDomesticHotWaterUnit(),
state_class=SensorStateClass.TOTAL_INCREASING,
),
ViCareSensorEntityDescription(
key="hotwater_gas_consumption_heating_this_month",
name="Hot water gas consumption this month",
value_getter=lambda api: api.getGasConsumptionDomesticHotWaterThisMonth(),
unit_getter=lambda api: api.getGasConsumptionDomesticHotWaterUnit(),
state_class=SensorStateClass.TOTAL_INCREASING,
),
ViCareSensorEntityDescription(
key="hotwater_gas_consumption_heating_this_year",
name="Hot water gas consumption this year",
value_getter=lambda api: api.getGasConsumptionDomesticHotWaterThisYear(),
unit_getter=lambda api: api.getGasConsumptionDomesticHotWaterUnit(),
state_class=SensorStateClass.TOTAL_INCREASING,
),
ViCareSensorEntityDescription(
key="gas_consumption_heating_today",
name="Heating gas consumption today",
value_getter=lambda api: api.getGasConsumptionHeatingToday(),
unit_getter=lambda api: api.getGasConsumptionHeatingUnit(),
state_class=SensorStateClass.TOTAL_INCREASING,
),
ViCareSensorEntityDescription(
key="gas_consumption_heating_this_week",
name="Heating gas consumption this week",
value_getter=lambda api: api.getGasConsumptionHeatingThisWeek(),
unit_getter=lambda api: api.getGasConsumptionHeatingUnit(),
state_class=SensorStateClass.TOTAL_INCREASING,
),
ViCareSensorEntityDescription(
key="gas_consumption_heating_this_month",
name="Heating gas consumption this month",
value_getter=lambda api: api.getGasConsumptionHeatingThisMonth(),
unit_getter=lambda api: api.getGasConsumptionHeatingUnit(),
state_class=SensorStateClass.TOTAL_INCREASING,
),
ViCareSensorEntityDescription(
key="gas_consumption_heating_this_year",
name="Heating gas consumption this year",
value_getter=lambda api: api.getGasConsumptionHeatingThisYear(),
unit_getter=lambda api: api.getGasConsumptionHeatingUnit(),
state_class=SensorStateClass.TOTAL_INCREASING,
),
ViCareSensorEntityDescription(
key="power_production_current",
name="Power production current",
native_unit_of_measurement=POWER_WATT,
value_getter=lambda api: api.getPowerProductionCurrent(),
device_class=SensorDeviceClass.POWER,
state_class=SensorStateClass.MEASUREMENT,
),
ViCareSensorEntityDescription(
key="power_production_today",
name="Power production today",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
value_getter=lambda api: api.getPowerProductionToday(),
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
),
ViCareSensorEntityDescription(
key="power_production_this_week",
name="Power production this week",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
value_getter=lambda api: api.getPowerProductionThisWeek(),
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
),
ViCareSensorEntityDescription(
key="power_production_this_month",
name="Power production this month",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
value_getter=lambda api: api.getPowerProductionThisMonth(),
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
),
ViCareSensorEntityDescription(
key="power_production_this_year",
name="Power production this year",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
value_getter=lambda api: api.getPowerProductionThisYear(),
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
),
ViCareSensorEntityDescription(
key="solar storage temperature",
name="Solar Storage Temperature",
native_unit_of_measurement=TEMP_CELSIUS,
value_getter=lambda api: api.getSolarStorageTemperature(),
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
),
ViCareSensorEntityDescription(
key="collector temperature",
name="Solar Collector Temperature",
native_unit_of_measurement=TEMP_CELSIUS,
value_getter=lambda api: api.getSolarCollectorTemperature(),
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
),
ViCareSensorEntityDescription(
key="solar power production today",
name="Solar power production today",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
value_getter=lambda api: api.getSolarPowerProductionToday(),
unit_getter=lambda api: api.getSolarPowerProductionUnit(),
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
),
ViCareSensorEntityDescription(
key="solar power production this week",
name="Solar power production this week",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
value_getter=lambda api: api.getSolarPowerProductionThisWeek(),
unit_getter=lambda api: api.getSolarPowerProductionUnit(),
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
),
ViCareSensorEntityDescription(
key="solar power production this month",
name="Solar power production this month",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
value_getter=lambda api: api.getSolarPowerProductionThisMonth(),
unit_getter=lambda api: api.getSolarPowerProductionUnit(),
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
),
ViCareSensorEntityDescription(
key="solar power production this year",
name="Solar power production this year",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
value_getter=lambda api: api.getSolarPowerProductionThisYear(),
unit_getter=lambda api: api.getSolarPowerProductionUnit(),
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
),
ViCareSensorEntityDescription(
key="power consumption today",
name="Power consumption today",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
value_getter=lambda api: api.getPowerConsumptionToday(),
unit_getter=lambda api: api.getPowerConsumptionUnit(),
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
),
ViCareSensorEntityDescription(
key="power consumption this week",
name="Power consumption this week",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
value_getter=lambda api: api.getPowerConsumptionThisWeek(),
unit_getter=lambda api: api.getPowerConsumptionUnit(),
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
),
ViCareSensorEntityDescription(
key="power consumption this month",
name="Power consumption this month",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
value_getter=lambda api: api.getPowerConsumptionThisMonth(),
unit_getter=lambda api: api.getPowerConsumptionUnit(),
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
),
ViCareSensorEntityDescription(
key="power consumption this year",
name="Power consumption this year",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
value_getter=lambda api: api.getPowerConsumptionThisYear(),
unit_getter=lambda api: api.getPowerConsumptionUnit(),
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
),
)
CIRCUIT_SENSORS: tuple[ViCareSensorEntityDescription, ...] = (
ViCareSensorEntityDescription(
key="supply_temperature",
name="Supply Temperature",
native_unit_of_measurement=TEMP_CELSIUS,
value_getter=lambda api: api.getSupplyTemperature(),
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
),
)
BURNER_SENSORS: tuple[ViCareSensorEntityDescription, ...] = (
ViCareSensorEntityDescription(
key="burner_starts",
name="Burner Starts",
icon="mdi:counter",
value_getter=lambda api: api.getStarts(),
state_class=SensorStateClass.TOTAL_INCREASING,
),
ViCareSensorEntityDescription(
key="burner_hours",
name="Burner Hours",
icon="mdi:counter",
native_unit_of_measurement=TIME_HOURS,
value_getter=lambda api: api.getHours(),
state_class=SensorStateClass.TOTAL_INCREASING,
),
ViCareSensorEntityDescription(
key="burner_modulation",
name="Burner Modulation",
icon="mdi:percent",
native_unit_of_measurement=PERCENTAGE,
value_getter=lambda api: api.getModulation(),
state_class=SensorStateClass.MEASUREMENT,
),
)
COMPRESSOR_SENSORS: tuple[ViCareSensorEntityDescription, ...] = (
ViCareSensorEntityDescription(
key="compressor_starts",
name="Compressor Starts",
icon="mdi:counter",
value_getter=lambda api: api.getStarts(),
state_class=SensorStateClass.TOTAL_INCREASING,
),
ViCareSensorEntityDescription(
key="compressor_hours",
name="Compressor Hours",
icon="mdi:counter",
native_unit_of_measurement=TIME_HOURS,
value_getter=lambda api: api.getHours(),
state_class=SensorStateClass.TOTAL_INCREASING,
),
ViCareSensorEntityDescription(
key="compressor_hours_loadclass1",
name="Compressor Hours Load Class 1",
icon="mdi:counter",
native_unit_of_measurement=TIME_HOURS,
value_getter=lambda api: api.getHoursLoadClass1(),
state_class=SensorStateClass.TOTAL_INCREASING,
),
ViCareSensorEntityDescription(
key="compressor_hours_loadclass2",
name="Compressor Hours Load Class 2",
icon="mdi:counter",
native_unit_of_measurement=TIME_HOURS,
value_getter=lambda api: api.getHoursLoadClass2(),
state_class=SensorStateClass.TOTAL_INCREASING,
),
ViCareSensorEntityDescription(
key="compressor_hours_loadclass3",
name="Compressor Hours Load Class 3",
icon="mdi:counter",
native_unit_of_measurement=TIME_HOURS,
value_getter=lambda api: api.getHoursLoadClass3(),
state_class=SensorStateClass.TOTAL_INCREASING,
),
ViCareSensorEntityDescription(
key="compressor_hours_loadclass4",
name="Compressor Hours Load Class 4",
icon="mdi:counter",
native_unit_of_measurement=TIME_HOURS,
value_getter=lambda api: api.getHoursLoadClass4(),
state_class=SensorStateClass.TOTAL_INCREASING,
),
ViCareSensorEntityDescription(
key="compressor_hours_loadclass5",
name="Compressor Hours Load Class 5",
icon="mdi:counter",
native_unit_of_measurement=TIME_HOURS,
value_getter=lambda api: api.getHoursLoadClass5(),
state_class=SensorStateClass.TOTAL_INCREASING,
),
)
def _build_entity(name, vicare_api, device_config, sensor):
"""Create a ViCare sensor entity."""
_LOGGER.debug("Found device %s", name)
try:
sensor.value_getter(vicare_api)
_LOGGER.debug("Found entity %s", name)
except PyViCareNotSupportedFeatureError:
_LOGGER.info("Feature not supported %s", name)
return None
except AttributeError:
_LOGGER.debug("Attribute Error %s", name)
return None
return ViCareSensor(
name,
vicare_api,
device_config,
sensor,
)
async def _entities_from_descriptions(
hass, name, entities, sensor_descriptions, iterables, config_entry
):
"""Create entities from descriptions and list of burners/circuits."""
for description in sensor_descriptions:
for current in iterables:
suffix = ""
if len(iterables) > 1:
suffix = f" {current.id}"
entity = await hass.async_add_executor_job(
_build_entity,
f"{name} {description.name}{suffix}",
current,
hass.data[DOMAIN][config_entry.entry_id][VICARE_DEVICE_CONFIG],
description,
)
if entity is not None:
entities.append(entity)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Create the ViCare sensor devices."""
name = VICARE_NAME
api = hass.data[DOMAIN][config_entry.entry_id][VICARE_API]
entities = []
for description in GLOBAL_SENSORS:
entity = await hass.async_add_executor_job(
_build_entity,
f"{name} {description.name}",
api,
hass.data[DOMAIN][config_entry.entry_id][VICARE_DEVICE_CONFIG],
description,
)
if entity is not None:
entities.append(entity)
try:
await _entities_from_descriptions(
hass, name, entities, CIRCUIT_SENSORS, api.circuits, config_entry
)
except PyViCareNotSupportedFeatureError:
_LOGGER.info("No circuits found")
try:
await _entities_from_descriptions(
hass, name, entities, BURNER_SENSORS, api.burners, config_entry
)
except PyViCareNotSupportedFeatureError:
_LOGGER.info("No burners found")
try:
await _entities_from_descriptions(
hass, name, entities, COMPRESSOR_SENSORS, api.compressors, config_entry
)
except PyViCareNotSupportedFeatureError:
_LOGGER.info("No compressors found")
async_add_entities(entities)
class ViCareSensor(SensorEntity):
"""Representation of a ViCare sensor."""
entity_description: ViCareSensorEntityDescription
def __init__(
self, name, api, device_config, description: ViCareSensorEntityDescription
):
"""Initialize the sensor."""
self.entity_description = description
self._attr_name = name
self._api = api
self._device_config = device_config
self._state = None
@property
def device_info(self):
"""Return device info for this device."""
return {
"identifiers": {(DOMAIN, self._device_config.getConfig().serial)},
"name": self._device_config.getModel(),
"manufacturer": "Viessmann",
"model": (DOMAIN, self._device_config.getModel()),
"configuration_url": "https://developer.viessmann.com/",
}
@property
def available(self):
"""Return True if entity is available."""
return self._state is not None
@property
def unique_id(self):
"""Return unique ID for this device."""
tmp_id = (
f"{self._device_config.getConfig().serial}-{self.entity_description.key}"
)
if hasattr(self._api, "id"):
return f"{tmp_id}-{self._api.id}"
return tmp_id
@property
def native_value(self):
"""Return the state of the sensor."""
return self._state
def update(self):
"""Update state of sensor."""
try:
with suppress(PyViCareNotSupportedFeatureError):
self._state = self.entity_description.value_getter(self._api)
if self.entity_description.unit_getter:
vicare_unit = self.entity_description.unit_getter(self._api)
if vicare_unit is not None:
self._attr_device_class = VICARE_UNIT_TO_DEVICE_CLASS.get(
vicare_unit
)
self._attr_native_unit_of_measurement = (
VICARE_UNIT_TO_UNIT_OF_MEASUREMENT.get(vicare_unit)
)
except requests.exceptions.ConnectionError:
_LOGGER.error("Unable to retrieve data from ViCare server")
except ValueError:
_LOGGER.error("Unable to decode data from ViCare server")
except PyViCareRateLimitError as limit_exception:
_LOGGER.error("Vicare API rate limit exceeded: %s", limit_exception)
except PyViCareInvalidDataError as invalid_data_exception:
_LOGGER.error("Invalid data from Vicare server: %s", invalid_data_exception)
| |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""An executor that schedules and executes applied ptransforms."""
from __future__ import absolute_import
import collections
import logging
import Queue
import sys
import threading
import traceback
from weakref import WeakValueDictionary
from apache_beam.metrics.execution import MetricsContainer
from apache_beam.metrics.execution import ScopedMetricsContainer
class ExecutorService(object):
"""Thread pool for executing tasks in parallel."""
class CallableTask(object):
def call(self):
pass
@property
def name(self):
return None
class ExecutorServiceWorker(threading.Thread):
"""Worker thread for executing a single task at a time."""
# Amount to block waiting for getting an item from the queue in seconds.
TIMEOUT = 5
def __init__(self, queue, index):
super(ExecutorService.ExecutorServiceWorker, self).__init__()
self.queue = queue
self._index = index
self._default_name = 'ExecutorServiceWorker-' + str(index)
self._update_name()
self.shutdown_requested = False
self.start()
def _update_name(self, task=None):
if task and task.name:
name = task.name
else:
name = self._default_name
self.name = 'Thread: %d, %s (%s)' % (
self._index, name, 'executing' if task else 'idle')
def _get_task_or_none(self):
try:
# Do not block indefinitely, otherwise we may not act for a requested
# shutdown.
return self.queue.get(
timeout=ExecutorService.ExecutorServiceWorker.TIMEOUT)
except Queue.Empty:
return None
def run(self):
while not self.shutdown_requested:
task = self._get_task_or_none()
if task:
try:
if not self.shutdown_requested:
self._update_name(task)
task.call()
self._update_name()
finally:
self.queue.task_done()
def shutdown(self):
self.shutdown_requested = True
def __init__(self, num_workers):
self.queue = Queue.Queue()
self.workers = [ExecutorService.ExecutorServiceWorker(
self.queue, i) for i in range(num_workers)]
self.shutdown_requested = False
def submit(self, task):
assert isinstance(task, ExecutorService.CallableTask)
if not self.shutdown_requested:
self.queue.put(task)
def await_completion(self):
for worker in self.workers:
worker.join()
def shutdown(self):
self.shutdown_requested = True
for worker in self.workers:
worker.shutdown()
# Consume all the remaining items in the queue
while not self.queue.empty():
try:
self.queue.get_nowait()
self.queue.task_done()
except Queue.Empty:
continue
# All existing threads will eventually terminate (after they complete their
# last task).
class TransformEvaluationState(object):
def __init__(self, executor_service, scheduled):
self.executor_service = executor_service
self.scheduled = scheduled
def schedule(self, work):
self.scheduled.add(work)
self.executor_service.submit(work)
def complete(self, completed_work):
self.scheduled.remove(completed_work)
class ParallelEvaluationState(TransformEvaluationState):
"""A TransformEvaluationState with unlimited parallelism.
Any TransformExecutor scheduled will be immediately submitted to the
ExecutorService.
A principal use of this is for evaluators that can generate output bundles
only using the input bundle (e.g. ParDo).
"""
pass
class SerialEvaluationState(TransformEvaluationState):
"""A TransformEvaluationState with a single work queue.
Any TransformExecutor scheduled will be placed on the work queue. Only one
item of work will be submitted to the ExecutorService at any time.
A principal use of this is for evaluators that keeps a global state such as
GroupByKeyOnly.
"""
def __init__(self, executor_service, scheduled):
super(SerialEvaluationState, self).__init__(executor_service, scheduled)
self.serial_queue = collections.deque()
self.currently_evaluating = None
self._lock = threading.Lock()
def complete(self, completed_work):
self._update_currently_evaluating(None, completed_work)
super(SerialEvaluationState, self).complete(completed_work)
def schedule(self, new_work):
self._update_currently_evaluating(new_work, None)
def _update_currently_evaluating(self, new_work, completed_work):
with self._lock:
if new_work:
self.serial_queue.append(new_work)
if completed_work:
assert self.currently_evaluating == completed_work
self.currently_evaluating = None
if self.serial_queue and not self.currently_evaluating:
next_work = self.serial_queue.pop()
self.currently_evaluating = next_work
super(SerialEvaluationState, self).schedule(next_work)
class TransformExecutorServices(object):
"""Schedules and completes TransformExecutors.
Controls the concurrency as appropriate for the applied transform the executor
exists for.
"""
def __init__(self, executor_service):
self._executor_service = executor_service
self._scheduled = set()
self._parallel = ParallelEvaluationState(
self._executor_service, self._scheduled)
self._serial_cache = WeakValueDictionary()
def parallel(self):
return self._parallel
def serial(self, step):
cached = self._serial_cache.get(step)
if not cached:
cached = SerialEvaluationState(self._executor_service, self._scheduled)
self._serial_cache[step] = cached
return cached
@property
def executors(self):
return frozenset(self._scheduled)
class _CompletionCallback(object):
"""The default completion callback.
The default completion callback is used to complete transform evaluations
that are triggered due to the arrival of elements from an upstream transform,
or for a source transform.
"""
def __init__(self, evaluation_context, all_updates, timers=None):
self._evaluation_context = evaluation_context
self._all_updates = all_updates
self._timers = timers
def handle_result(self, input_committed_bundle, transform_result):
output_committed_bundles = self._evaluation_context.handle_result(
input_committed_bundle, self._timers, transform_result)
for output_committed_bundle in output_committed_bundles:
self._all_updates.offer(_ExecutorServiceParallelExecutor.ExecutorUpdate(
output_committed_bundle, None))
return output_committed_bundles
def handle_exception(self, exception):
self._all_updates.offer(
_ExecutorServiceParallelExecutor.ExecutorUpdate(None, exception))
class TransformExecutor(ExecutorService.CallableTask):
"""TransformExecutor will evaluate a bundle using an applied ptransform.
A CallableTask responsible for constructing a TransformEvaluator and
evaluating it on some bundle of input, and registering the result using the
completion callback.
"""
def __init__(self, transform_evaluator_registry, evaluation_context,
input_bundle, applied_transform, completion_callback,
transform_evaluation_state):
self._transform_evaluator_registry = transform_evaluator_registry
self._evaluation_context = evaluation_context
self._input_bundle = input_bundle
self._applied_transform = applied_transform
self._completion_callback = completion_callback
self._transform_evaluation_state = transform_evaluation_state
self._side_input_values = {}
self.blocked = False
self._call_count = 0
def call(self):
self._call_count += 1
assert self._call_count <= (1 + len(self._applied_transform.side_inputs))
metrics_container = MetricsContainer(self._applied_transform.full_label)
scoped_metrics_container = ScopedMetricsContainer(metrics_container)
for side_input in self._applied_transform.side_inputs:
if side_input not in self._side_input_values:
has_result, value = (
self._evaluation_context.get_value_or_schedule_after_output(
side_input, self))
if not has_result:
# Monitor task will reschedule this executor once the side input is
# available.
return
self._side_input_values[side_input] = value
side_input_values = [self._side_input_values[side_input]
for side_input in self._applied_transform.side_inputs]
try:
evaluator = self._transform_evaluator_registry.for_application(
self._applied_transform, self._input_bundle,
side_input_values, scoped_metrics_container)
if self._input_bundle:
for value in self._input_bundle.get_elements_iterable():
evaluator.process_element(value)
with scoped_metrics_container:
result = evaluator.finish_bundle()
result.logical_metric_updates = metrics_container.get_cumulative()
if self._evaluation_context.has_cache:
for uncommitted_bundle in result.uncommitted_output_bundles:
self._evaluation_context.append_to_cache(
self._applied_transform, uncommitted_bundle.tag,
uncommitted_bundle.get_elements_iterable())
undeclared_tag_values = result.undeclared_tag_values
if undeclared_tag_values:
for tag, value in undeclared_tag_values.iteritems():
self._evaluation_context.append_to_cache(
self._applied_transform, tag, value)
self._completion_callback.handle_result(self._input_bundle, result)
return result
except Exception as e: # pylint: disable=broad-except
logging.warning('Task failed: %s', traceback.format_exc(), exc_info=True)
self._completion_callback.handle_exception(e)
finally:
self._evaluation_context.metrics().commit_physical(
self._input_bundle,
metrics_container.get_cumulative())
self._transform_evaluation_state.complete(self)
class Executor(object):
def __init__(self, *args, **kwargs):
self._executor = _ExecutorServiceParallelExecutor(*args, **kwargs)
def start(self, roots):
self._executor.start(roots)
def await_completion(self):
self._executor.await_completion()
class _ExecutorServiceParallelExecutor(object):
"""An internal implementation for Executor."""
NUM_WORKERS = 1
def __init__(self, value_to_consumers, transform_evaluator_registry,
evaluation_context):
self.executor_service = ExecutorService(
_ExecutorServiceParallelExecutor.NUM_WORKERS)
self.transform_executor_services = TransformExecutorServices(
self.executor_service)
self.value_to_consumers = value_to_consumers
self.transform_evaluator_registry = transform_evaluator_registry
self.evaluation_context = evaluation_context
self.all_updates = _ExecutorServiceParallelExecutor._TypedUpdateQueue(
_ExecutorServiceParallelExecutor.ExecutorUpdate)
self.visible_updates = _ExecutorServiceParallelExecutor._TypedUpdateQueue(
_ExecutorServiceParallelExecutor.VisibleExecutorUpdate)
self.default_completion_callback = _CompletionCallback(
evaluation_context, self.all_updates)
def start(self, roots):
self.root_nodes = frozenset(roots)
self.executor_service.submit(
_ExecutorServiceParallelExecutor._MonitorTask(self))
def await_completion(self):
update = self.visible_updates.take()
try:
if update.exception:
t, v, tb = update.exc_info
raise t, v, tb
finally:
self.executor_service.shutdown()
def schedule_consumers(self, committed_bundle):
if committed_bundle.pcollection in self.value_to_consumers:
consumers = self.value_to_consumers[committed_bundle.pcollection]
for applied_ptransform in consumers:
self.schedule_consumption(applied_ptransform, committed_bundle,
self.default_completion_callback)
def schedule_consumption(self, consumer_applied_transform, committed_bundle,
on_complete):
"""Schedules evaluation of the given bundle with the transform."""
assert all([consumer_applied_transform, on_complete])
assert committed_bundle or consumer_applied_transform in self.root_nodes
if (committed_bundle
and self.transform_evaluator_registry.should_execute_serially(
consumer_applied_transform)):
transform_executor_service = self.transform_executor_services.serial(
consumer_applied_transform)
else:
transform_executor_service = self.transform_executor_services.parallel()
transform_executor = TransformExecutor(
self.transform_evaluator_registry, self.evaluation_context,
committed_bundle, consumer_applied_transform, on_complete,
transform_executor_service)
transform_executor_service.schedule(transform_executor)
class _TypedUpdateQueue(object):
"""Type checking update queue with blocking and non-blocking operations."""
def __init__(self, item_type):
self._item_type = item_type
self._queue = Queue.Queue()
def poll(self):
try:
item = self._queue.get_nowait()
self._queue.task_done()
return item
except Queue.Empty:
return None
def take(self):
item = self._queue.get()
self._queue.task_done()
return item
def offer(self, item):
assert isinstance(item, self._item_type)
self._queue.put_nowait(item)
class ExecutorUpdate(object):
"""An internal status update on the state of the executor."""
def __init__(self, produced_bundle=None, exception=None):
# Exactly one of them should be not-None
assert bool(produced_bundle) != bool(exception)
self.committed_bundle = produced_bundle
self.exception = exception
self.exc_info = sys.exc_info()
if self.exc_info[1] is not exception:
# Not the right exception.
self.exc_info = (exception, None, None)
class VisibleExecutorUpdate(object):
"""An update of interest to the user.
Used for awaiting the completion to decide whether to return normally or
raise an exception.
"""
def __init__(self, exc_info=(None, None, None)):
self.finished = exc_info[0] is not None
self.exception = exc_info[1] or exc_info[0]
self.exc_info = exc_info
class _MonitorTask(ExecutorService.CallableTask):
"""MonitorTask continuously runs to ensure that pipeline makes progress."""
def __init__(self, executor):
self._executor = executor
@property
def name(self):
return 'monitor'
def call(self):
try:
update = self._executor.all_updates.poll()
while update:
if update.committed_bundle:
self._executor.schedule_consumers(update.committed_bundle)
else:
assert update.exception
logging.warning('A task failed with exception.\n %s',
update.exception)
self._executor.visible_updates.offer(
_ExecutorServiceParallelExecutor.VisibleExecutorUpdate(
update.exc_info))
update = self._executor.all_updates.poll()
self._executor.evaluation_context.schedule_pending_unblocked_tasks(
self._executor.executor_service)
self._add_work_if_necessary(self._fire_timers())
except Exception as e: # pylint: disable=broad-except
logging.error('Monitor task died due to exception.\n %s', e)
self._executor.visible_updates.offer(
_ExecutorServiceParallelExecutor.VisibleExecutorUpdate(
sys.exc_info()))
finally:
if not self._should_shutdown():
self._executor.executor_service.submit(self)
def _should_shutdown(self):
"""_should_shutdown checks whether pipeline is completed or not.
It will check for successful completion by checking the watermarks of all
transforms. If they all reached the maximum watermark it means that
pipeline successfully reached to completion.
If the above is not true, it will check that at least one executor is
making progress. Otherwise pipeline will be declared stalled.
If the pipeline reached to a terminal state as explained above
_should_shutdown will request executor to gracefully shutdown.
Returns:
True if pipeline reached a terminal state and monitor task could finish.
Otherwise monitor task should schedule itself again for future
execution.
"""
if self._is_executing():
# There are some bundles still in progress.
return False
else:
if self._executor.evaluation_context.is_done():
self._executor.visible_updates.offer(
_ExecutorServiceParallelExecutor.VisibleExecutorUpdate())
else:
# Nothing is scheduled for execution, but watermarks incomplete.
self._executor.visible_updates.offer(
_ExecutorServiceParallelExecutor.VisibleExecutorUpdate(
(Exception('Monitor task detected a pipeline stall.'),
None,
None)))
self._executor.executor_service.shutdown()
return True
def _fire_timers(self):
"""Schedules triggered consumers if any timers fired.
Returns:
True if timers fired.
"""
fired_timers = self._executor.evaluation_context.extract_fired_timers()
for applied_ptransform in fired_timers:
# Use an empty committed bundle. just to trigger.
empty_bundle = (
self._executor.evaluation_context.create_empty_committed_bundle(
applied_ptransform.inputs[0]))
timer_completion_callback = _CompletionCallback(
self._executor.evaluation_context, self._executor.all_updates,
applied_ptransform)
self._executor.schedule_consumption(
applied_ptransform, empty_bundle, timer_completion_callback)
return bool(fired_timers)
def _is_executing(self):
"""Returns True if there is at least one non-blocked TransformExecutor."""
executors = self._executor.transform_executor_services.executors
if not executors:
# Nothing is executing.
return False
# Ensure that at least one of those executors is not blocked.
for transform_executor in executors:
if not transform_executor.blocked:
return True
return False
def _add_work_if_necessary(self, timers_fired):
"""Adds more work from the roots if pipeline requires more input.
If all active TransformExecutors are in a blocked state, add more work
from root nodes that may have additional work. This ensures that if a
pipeline has elements available from the root nodes it will add those
elements when necessary.
Args:
timers_fired: True if any timers fired prior to this call.
"""
# If any timers have fired, they will add more work; No need to add more.
if timers_fired:
return
if self._is_executing():
# We have at least one executor that can proceed without adding
# additional work.
return
# All current TransformExecutors are blocked; add more work from the
# roots.
for applied_transform in self._executor.root_nodes:
if not self._executor.evaluation_context.is_done(applied_transform):
self._executor.schedule_consumption(
applied_transform, None,
self._executor.default_completion_callback)
| |
from __future__ import unicode_literals
import io
import os
import datetime
import tempfile
import unittest
import profig
# use str for unicode data and bytes for binary data
if not profig.PY3:
str = unicode
if profig.WIN:
try:
import winreg
except ImportError:
import _winreg as winreg
class TestBasic(unittest.TestCase):
def test_initial_state(self):
c = profig.Config()
self.assertEqual(dict(c), {})
self.assertEqual(c.sources, [])
def test_root(self):
c = profig.Config()
c['a'] = 1
self.assertEqual(c.root, c)
s = c.section('a')
self.assertEqual(s.root, c)
self.assertNotEqual(s.root, s)
def test_formats(self):
self.assertIn('ini', profig.Config.known_formats())
c = profig.Config()
self.assertIsInstance(c.format, profig.IniFormat)
c = profig.Config(format='ini')
self.assertIsInstance(c.format, profig.IniFormat)
c = profig.Config(format=profig.IniFormat)
self.assertIsInstance(c.format, profig.IniFormat)
c = profig.Config()
c.set_format(profig.IniFormat(c))
self.assertIsInstance(c.format, profig.IniFormat)
with self.assertRaises(profig.UnknownFormatError):
c = profig.Config(format='marshmallow')
def test_keys(self):
c = profig.Config()
c['a'] = 1
c['a.a'] = 1
c[('a', 'a')] = 1
c[('a', ('a', 'a'))] = 1
with self.assertRaises(TypeError):
c[1] = 1
def test_unicode_keys(self):
c = profig.Config(encoding='shiftjis')
c[b'\xdc'] = 1
c[b'\xdc.\xdc'] = '\uff9c'
self.assertEqual(c[b'\xdc'], c['\uff9c'], 1)
self.assertEqual(c[b'\xdc.\xdc'], c['\uff9c.\uff9c'], '\uff9c')
def test_sync(self):
c = profig.Config()
with self.assertRaises(profig.NoSourcesError):
c.sync()
def test_len(self):
c = profig.Config()
self.assertEqual(len(c), 0)
c['a'] = 1
self.assertEqual(len(c), 1)
c['a.1'] = 1
self.assertEqual(len(c), 1)
self.assertEqual(len(c.section('a')), 1)
def test_init(self):
c = profig.Config()
c.init('a', 1)
c.init('a.a', 2)
self.assertEqual(c['a'], 1)
c['a'] = {'': 2, 'a': 3}
self.assertEqual(c['a'], 2)
self.assertEqual(c['a.a'], 3)
s = c.section('a')
s.convert(b'3')
self.assertEqual(s.value(), 3)
self.assertEqual(s._value, 3)
self.assertIs(s._type, int)
self.assertIs(s.default(), 1)
self.assertIs(s._default, 1)
def test_delayed_init(self):
c = profig.Config()
c['a'] = {'': '2', 'a': '3'}
self.assertEqual(c['a'], '2')
self.assertEqual(c['a.a'], '3')
s = c.section('a')
s.convert(b'3')
self.assertEqual(s.value(), '3')
self.assertEqual(s._value, '3')
self.assertIs(s._type, None)
self.assertIs(s._default, profig.NoValue)
with self.assertRaises(profig.NoValueError):
s.default()
c.init('a', 1)
c.init('a.a', 2)
self.assertEqual(c['a'], 3)
self.assertEqual(c['a.a'], 3)
s = c.section('a')
self.assertEqual(s.value(), 3)
self.assertEqual(s._value, 3)
self.assertIs(s._type, int)
self.assertIs(s.default(), 1)
self.assertIs(s._default, 1)
def test_get(self):
c = profig.Config()
c['a'] = 1
c.init('a.1', 1)
self.assertEqual(c.get('a'), 1)
self.assertEqual(c.get('a.1'), 1)
self.assertEqual(c.get('a.2'), None)
self.assertEqual(c.get('a.2', 2), 2)
def test_value(self):
c = profig.Config()
c['a'] = 1
c.init('b', 1)
s = c.section('c')
with self.assertRaises(profig.NoValueError):
s.value()
for key in ['a', 'b']:
s = c.section(key)
self.assertEqual(s.value(), 1)
def test_default(self):
c = profig.Config()
c['a'] = 1
c.init('b', 1)
s = c.section('a')
with self.assertRaises(profig.NoValueError):
s.default()
s = c.section('b')
self.assertEqual(s.default(), 1)
def test_set_value(self):
c = profig.Config()
c.init('c', 1)
c.section('a').set_value(2)
self.assertEqual(c['a'], 2)
c.section('b').set_value('3')
self.assertEqual(c['b'], '3')
# .init does not enforce types
c.section('c').set_value('4')
self.assertEqual(c['c'], '4')
def test_set_default(self):
c = profig.Config()
c.init('c', 1)
c.section('a').set_default(2)
self.assertEqual(c['a'], 2)
c.section('b').set_default('3')
self.assertEqual(c['b'], '3')
# .init does not enforce types
c.section('c').set_default('4')
self.assertEqual(c['c'], '4')
def test_section(self):
c = profig.Config()
with self.assertRaises(profig.InvalidSectionError):
c.section('a', create=False)
self.assertIs(c.section('a'), c._children['a'])
c['a.a.a'] = 1
child = c._children['a']._children['a']._children['a']
self.assertIs(c.section('a.a.a'), child)
self.assertIs(c.section('a').section('a').section('a'), child)
def test_as_dict(self):
c = profig.Config(dict_type=dict)
self.assertEqual(c.as_dict(), {})
c['a'] = 1
self.assertEqual(c.as_dict(), {'a': 1})
c['c.a'] = 1
self.assertEqual(c.as_dict(), {'a': 1, 'c': {'a': 1}})
c['b'] = 1
c['a.a'] = 1
self.assertEqual(c.as_dict(), {'a': {'': 1, 'a': 1}, 'b': 1, 'c': {'a': 1}})
self.assertEqual(c.as_dict(flat=True), {'a': 1, 'a.a': 1, 'b': 1, 'c.a': 1})
def test_reset(self):
c = profig.Config(dict_type=dict)
c.init('a', 1)
c.init('a.a', 1)
c['a'] = 2
c['a.a'] = 2
self.assertEqual(c.as_dict(flat=True), {'a': 2, 'a.a': 2})
c.section('a').reset(recurse=False)
self.assertEqual(c.as_dict(flat=True), {'a': 1, 'a.a': 2})
c.section('a').reset()
self.assertEqual(c.as_dict(flat=True), {'a': 1, 'a.a': 1})
c['a'] = 2
c['a.a'] = 2
c.reset()
self.assertEqual(c.as_dict(flat=True), {'a': 1, 'a.a': 1})
class TestStrictMode(unittest.TestCase):
def setUp(self):
self.c = profig.Config(strict=True)
self.c.init('a', 1)
def test_set_init(self):
self.c['a'] = 3
self.assertEqual(self.c['a'], 3)
def test_set_uninit(self):
with self.assertRaises(profig.InvalidSectionError):
self.c['b'] = 3
with self.assertRaises(profig.InvalidSectionError):
self.c.section('b')
def test_read_uninit(self):
buf = io.BytesIO(b"""\
[a]
a = 1
""")
self.c.format.error_mode = 'exception'
with self.assertRaises(profig.InvalidSectionError):
self.c.read(buf)
def test_clear_uninit_on_sync(self):
buf = io.BytesIO(b"""\
[a]
a = 1
""")
self.c.sync(buf)
self.assertEqual(buf.getvalue(), b"""\
[a] = 1
""")
class TestIniFormat(unittest.TestCase):
def setUp(self):
self.c = profig.Config(format='ini')
self.c.init('a', 1)
self.c.init('b', 'value')
self.c.init('a.1', 2)
def test_basic(self):
del self.c['a.1']
buf = io.BytesIO()
self.c.sync(buf)
self.assertEqual(buf.getvalue(), b"""\
[a] = 1
[b] = value
""")
def test_sync_read_blank(self):
c = profig.Config(format='ini')
buf = io.BytesIO(b"""\
[b] = value
[a] = 1
1 = 2
""")
c.sync(buf)
self.assertEqual(c['a'], '1')
self.assertEqual(c['b'], 'value')
self.assertEqual(c['a.1'], '2')
def test_subsection(self):
buf = io.BytesIO()
self.c.sync(buf)
self.assertEqual(buf.getvalue(), b"""\
[a] = 1
1 = 2
[b] = value
""")
def test_preserve_order(self):
buf = io.BytesIO(b"""\
[a] = 1
1 = 2
[b] = value
""")
self.c['a.1'] = 3
self.c['a'] = 2
self.c['b'] = 'test'
self.c.sync(buf)
self.assertEqual(buf.getvalue(), b"""\
[a] = 2
1 = 3
[b] = test
""")
def test_preserve_comments(self):
buf = io.BytesIO(b"""\
;a comment
[a] = 1
; another comment
1 = 2
; yet more comments?
[b] = value
;arrrrgh!
""")
self.c['a.1'] = 3
self.c['a'] = 2
self.c['b'] = 'test'
self.c.sync(buf)
self.assertEqual(buf.getvalue(), b"""\
; a comment
[a] = 2
; another comment
1 = 3
; yet more comments?
[b] = test
;arrrrgh!
""")
def test_binary_read(self):
fd, temppath = tempfile.mkstemp()
try:
with io.open(fd, 'wb') as file:
file.write(b"""\
[a] = \x00binary\xff
b = also\x00binary\xff
""")
c = profig.Config(temppath, format='ini')
c.init('a', b'')
c.init('a.b', b'')
c.read()
self.assertEqual(c['a'], b'\x00binary\xff')
self.assertEqual(c['a.b'], b'also\x00binary\xff')
finally:
os.remove(temppath)
def test_binary_write(self):
fd, temppath = tempfile.mkstemp()
try:
c = profig.Config(temppath, format='ini')
c['a'] = b'\x00binary\xff'
c.write()
with io.open(fd, 'rb') as file:
result = file.read()
self.assertEqual(result, b"""\
[a] = \x00binary\xff
""")
finally:
os.remove(temppath)
def test_unicode_read(self):
fd, temppath = tempfile.mkstemp()
try:
with io.open(fd, 'wb') as file:
file.write(b"""\
[\xdc] = \xdc
""")
c = profig.Config(temppath, format='ini', encoding='shiftjis')
c.read()
self.assertEqual(c['\uff9c'], '\uff9c')
finally:
os.remove(temppath)
def test_unicode_write(self):
fd, temppath = tempfile.mkstemp()
try:
c = profig.Config(temppath, format='ini', encoding='shiftjis')
c['\uff9c'] = '\uff9c'
c.write()
with io.open(fd, 'rb') as file:
result = file.read()
self.assertEqual(result, b"""\
[\xdc] = \xdc
""")
finally:
os.remove(temppath)
def test_repeated_values(self):
c = profig.Config(format='ini')
buf = io.BytesIO(b"""\
[a]
b = 1
b = 2
""")
c.sync(buf)
self.assertEqual(c['a.b'], '2')
self.assertEqual(buf.getvalue(), b"""\
[a]
b = 2
""")
c['a.b'] = '3'
c.sync(buf)
self.assertEqual(buf.getvalue(), b"""\
[a]
b = 3
""")
def test_repeated_sections(self):
c = profig.Config(format='ini')
buf = io.BytesIO(b"""\
[a]
b = 1
b = 2
[b]
a = 1
[a]
b = 3
""")
c.sync(buf)
self.assertEqual(c['a.b'], '3')
self.assertEqual(buf.getvalue(), b"""\
[a]
b = 3
[b]
a = 1
""")
class TestCoercer(unittest.TestCase):
def test_datetime_date(self):
c = profig.Config()
dt = datetime.date(2014, 12, 30)
c.init('timestamp', dt)
buf = io.BytesIO()
c.sync(buf)
self.assertEqual(buf.getvalue(), b"""\
[timestamp] = 2014-12-30
""")
c.init('timestamp', datetime.datetime.now().date())
c.sync(buf)
self.assertEqual(c['timestamp'], dt)
def test_datetime_time(self):
c = profig.Config()
dt = datetime.time(14, 45, 30, 655)
c.init('timestamp', dt)
buf = io.BytesIO()
c.sync(buf)
self.assertEqual(buf.getvalue(), b"""\
[timestamp] = 14:45:30.000655
""")
c.init('timestamp', datetime.datetime.now().time())
c.sync(buf)
self.assertEqual(c['timestamp'], dt)
def test_datetime_datetime(self):
c = profig.Config()
dt = datetime.datetime(2014, 12, 30, 14, 45, 30, 655)
c.init('timestamp', dt)
buf = io.BytesIO()
c.sync(buf)
self.assertEqual(buf.getvalue(), b"""\
[timestamp] = 2014-12-30 14:45:30.000655
""")
c.init('timestamp', datetime.datetime.now())
c.sync(buf)
self.assertEqual(c['timestamp'], dt)
def test_list_value(self):
c = profig.Config()
c.init('colors', ['red', 'blue'])
buf = io.BytesIO()
c.sync(buf)
self.assertEqual(buf.getvalue(), b"""\
[colors] = red, blue
""")
c.init('colors', [])
c.sync(buf)
self.assertEqual(c['colors'], ['red', 'blue'])
def test_path_value(self):
c = profig.Config()
c.init('paths', ['path1', 'path2'], 'path_list')
buf = io.BytesIO()
c.sync(buf)
self.assertEqual(buf.getvalue(), """\
[paths] = path1{sep}path2
""".format(sep=os.pathsep).encode('ascii'))
buf = io.BytesIO("""\
[paths] = path1{sep}path2{sep}path3
""".format(sep=os.pathsep).encode('ascii'))
c.sync(buf)
self.assertEqual(c['paths'], ['path1', 'path2', 'path3'])
def test_choice(self):
c = profig.Config()
c.coercer.register_choice('color', {1: 'red', 2: 'green', 3: 'blue'})
c.init('color', 1, 'color')
buf = io.BytesIO()
c.sync(buf)
self.assertEqual(buf.getvalue(), b"""\
[color] = red
""")
buf = io.BytesIO(b"""\
[color] = blue
""")
c.sync(buf)
self.assertEqual(c['color'], 3)
c['color'] = 4
with self.assertRaises(profig.AdaptError):
c.write(buf)
def test_not_exist_error(self):
c = profig.Config()
c.init('value', [], 'notexist')
buf = io.BytesIO()
with self.assertRaises(profig.NotRegisteredError):
c.write(buf)
c.init('value', [])
c['value'] = 3
with self.assertRaises(profig.AdaptError):
c.write(buf)
c.reset(clean=True)
c.init('value', 1)
buf = io.BytesIO(b"""[value] = badvalue""")
with self.assertRaises(profig.ConvertError):
c.read(buf)
class TestErrors(unittest.TestCase):
def test_FormatError(self):
c = profig.Config()
c.format.error_mode = 'exception'
buf = io.BytesIO(b"""a""")
with self.assertRaises(profig.FormatError):
c.sync(buf)
class TestMisc(unittest.TestCase):
def test_NoValue(self):
self.assertEqual(repr(profig.NoValue), 'NoValue')
def test_get_source(self):
path = os.path.dirname(__file__)
self.assertEqual(profig.get_source('test'), os.path.join(path, 'test'))
if profig.WIN:
class TestRegistryFormat(unittest.TestCase):
def setUp(self):
self.base_key = winreg.HKEY_CURRENT_USER
self.path = r'Software\_profig_test'
self.key = winreg.CreateKeyEx(self.base_key, self.path, 0,
winreg.KEY_ALL_ACCESS)
self.c = profig.Config(self.path, format='registry')
def tearDown(self):
self.c.format.delete(self.key)
def test_basic(self):
c = self.c
c.init('a', 1)
c.init('a.a', 2)
c.init('c', 'str')
c.sync()
k = winreg.OpenKeyEx(self.base_key, self.path)
self.assertEqual(winreg.QueryValueEx(k, 'c')[0], 'str')
k = winreg.OpenKeyEx(k, 'a')
self.assertEqual(winreg.QueryValueEx(k, '')[0], 1)
self.assertEqual(winreg.QueryValueEx(k, 'a')[0], 2)
def test_sync_read_blank(self):
key = winreg.CreateKeyEx(self.key, 'a')
winreg.SetValueEx(key, '', 0, winreg.REG_DWORD, 1)
winreg.SetValueEx(key, '1', 0, winreg.REG_DWORD, 2)
key = winreg.CreateKeyEx(self.key, 'b')
winreg.SetValueEx(key, '', 0, winreg.REG_SZ, 'value')
c = self.c
c.read()
self.assertEqual(c['a'], 1)
self.assertEqual(c['a.1'], 2)
self.assertEqual(c['b'], 'value')
def test_sync_blank(self):
# in this test, the value for b will be read from
# '_profig_test\b\(Default)', but will be written back
# to '_profig_test\b'. 'b' has no children, so it's considered
# a root-level value.
key = winreg.CreateKeyEx(self.key, 'a')
winreg.SetValueEx(key, '', 0, winreg.REG_DWORD, 1)
winreg.SetValueEx(key, '1', 0, winreg.REG_DWORD, 2)
key = winreg.CreateKeyEx(self.key, 'b')
winreg.SetValueEx(key, '', 0, winreg.REG_SZ, 'value')
c = self.c
c.sync()
self.assertEqual(c['a'], 1)
self.assertEqual(c['a.1'], 2)
self.assertEqual(c['b'], 'value')
def test_binary_read(self):
key = winreg.CreateKeyEx(self.key, 'a')
winreg.SetValueEx(key, '', 0, winreg.REG_BINARY, b'\x00binary\xff')
key = winreg.CreateKeyEx(key, 'b')
winreg.SetValueEx(key, '', 0, winreg.REG_BINARY, b'also\x00binary\xff')
c = self.c
c.init('a', b'')
c.init('a.b', b'')
c.read()
self.assertEqual(c['a'], b'\x00binary\xff')
self.assertEqual(c['a.b'], b'also\x00binary\xff')
def test_binary_write(self):
c = self.c
c['a'] = b'\x00binary\xff'
c.write()
value = winreg.QueryValueEx(self.key, 'a')[0]
self.assertEqual(value, b'\x00binary\xff')
def test_unicode_read(self):
key = winreg.CreateKeyEx(self.key, '\uff9c')
winreg.SetValueEx(key, '', 0, winreg.REG_SZ, '\uff9c')
c = self.c
c.init('\uff9c', '')
c.read()
self.assertEqual(c['\uff9c'], '\uff9c')
def test_unicode_write(self):
c = self.c
c.init('\uff9c', '\uff9c')
c.write()
value = winreg.QueryValueEx(self.key, '\uff9c')[0]
self.assertEqual(value, '\uff9c')
def test_unsupported_type_read(self):
key = winreg.CreateKeyEx(self.key, 'a')
winreg.SetValueEx(key, '', 0, winreg.REG_SZ, '1.11')
c = self.c
c.init('a', 1.11)
c.read()
self.assertEqual(c['a'], 1.11)
def test_unsupported_type_write(self):
c = self.c
c.init('a', 1.11)
c.write()
value = winreg.QueryValueEx(self.key, 'a')[0]
self.assertEqual(value, b'1.11')
if __name__ == '__main__':
# silence logging
import logging
logging.basicConfig(level=logging.CRITICAL)
unittest.main()
| |
#from __future__ import print_function
import re
import ast
import sys
import pytest
from astprint import as_code, as_tree
def unshift(source):
''' Shift source to the left - so that it starts with zero indentation
'''
source = source.rstrip("\n ").lstrip("\n")
indent = re.match(r"([ \t])*", source).group(0)
lines = source.split("\n")
shifted_lines = []
for line in lines:
line = line.rstrip()
if len(line) > 0:
if not line.startswith(indent):
raise ValueError("Inconsistent indent at line " + repr(line))
shifted_lines.append(line[len(indent):])
else:
shifted_lines.append(line)
return "\n".join(shifted_lines)
def assert_ast_equal(test, expected):
assert ast.dump(test) == ast.dump(expected)
def check_transformation(source):
source = unshift(source)
node = ast.parse(source)
# Check as_code()
new_source = as_code(node)
new_node = ast.parse(new_source)
# Comparing the ASTs to avoid differences in code formatting
# popping up as errors.
assert_ast_equal(node, new_node)
# Check as_tree()
tree_dump = as_tree(node)
new_node = eval(tree_dump, ast.__dict__)
assert_ast_equal(node, new_node)
def skip_if_after(major, minor):
if sys.version_info >= (major, minor):
pytest.skip()
def skip_if_before(major, minor):
if sys.version_info < (major, minor):
pytest.skip()
def test_str_default():
check_transformation(
"""
a = 'abcd'
""")
def test_str_unicode():
skip_if_after(3, 0)
check_transformation(
"""
a = u'abcd'
""")
def test_str_bytes():
skip_if_before(3, 0)
check_transformation(
"""
a = b'abcd'
""")
def test_name_constant():
# NameConstant class was introduced in Py3.4 to represent some common constants
# (Name for used before).
check_transformation(
"""
a = True
b = False
c = None
""")
def test_dict():
check_transformation(
"""
a = {}
a = {'g': 'd'}
a = {1: 4.5, e: 1j+2}
""")
def test_set():
skip_if_before(2, 7)
check_transformation(
"""
a = {'g'}
a = {1, True}
""")
def test_list():
check_transformation(
"""
a = []
a = [1]
a = ['a', None]
""")
def test_dict_comprehension():
skip_if_before(2, 7)
check_transformation(
"""
a = {x:x+1 for x in range(10)}
a = {x:'a' for x in range(10) if x > 5}
""")
def test_set_comprehension():
skip_if_before(2, 7)
check_transformation(
"""
a = {x for x in range(10)}
a = {x for x in range(10) if x > 5}
""")
def test_list_comprehension():
check_transformation(
"""
a = [x for x in range(10)]
a = [x for x in range(10) if x > 5]
a = (x for x in range(10) if x > 5)
""")
def test_tuple_assign():
check_transformation(
"""
(a, b) = (1, 2)
""")
def test_starred_assign():
skip_if_before(3, 0)
check_transformation(
"""
a, *b = it
""")
def test_multi_assign():
check_transformation(
"""
a = b = 1
""")
def test_augmented_assign():
check_transformation(
"""
a += 2
""")
def test_unary_op():
check_transformation(
"""
a = -b
b = not a
""")
def test_slice():
check_transformation(
"""
a = l[:]
a = l[1:]
a = l[:1]
a = l[1:3]
a = l[::2]
a = l[1:10:2]
a = l[1:4,:5]
""")
def test_yield():
check_transformation(
"""
def gen(x):
for i in range(x):
yield i
""")
def test_yield_from():
skip_if_before(3, 3)
check_transformation(
"""
def gen2(x):
for i in range(x):
yield from gen(i)
""")
def test_lambda():
check_transformation(
"""
func = lambda x: x + 1
func = lambda x, *args, **kwds: x + 1
func = lambda x, a=4: x + a
""")
def test_ellipsis():
skip_if_before(3, 0)
check_transformation(
"""
a = l[1:5,2:6,...,:10]
""")
def test_del():
check_transformation(
"""
del a[0]
del x, r
""")
def test_import():
check_transformation(
"""
from .mod import a as b
from ..mod import a as b
from math import pi as PI
from math import sqrt
import math
import a as b
""")
def test_function_simple():
check_transformation(
"""
def func(x, a=1):
return x + a
""")
def test_function_decorator():
check_transformation(
"""
@dec1
@dec2
def func(x, a=1):
return x + a
""")
def test_function_varargs():
check_transformation(
"""
def func(x, y, *args, **kwds):
return x + y
""")
def test_global():
check_transformation(
"""
def func(x, y, *args, **kwds):
global a
global c, d
""")
def test_nonlocal():
skip_if_before(3, 0)
check_transformation(
"""
def func(x, y, *args, **kwds):
nonlocal a
nonlocal c, d
""")
def test_call():
check_transformation(
"""
func(1)
func(1, 2)
func(a, b, c=3)
func(a, *args)
func(b, *args, **kwds)
""")
def test_attribute():
check_transformation(
"""
foo.bar()
""")
def test_for():
check_transformation(
"""
for i, j in range(10):
do_stuff()
break
continue
""")
def test_while():
check_transformation(
"""
while r > 0:
do_stuff()
break
continue
""")
def test_if_else():
check_transformation(
"""
if a:
sutff()
if (b and c):
foo()
else:
bar()
if (c and d):
foo()
elif a:
bar()
else:
blah()
""")
def test_body_else():
check_transformation(
"""
for i in range(5):
test(i)
else:
do_stuff()
""")
def test_ternary_if_else():
check_transformation(
"""
a = do_this() if e + 5 else do_that()
""")
def test_try():
check_transformation(
"""
try:
do_stuff()
except:
raise
try:
do_stuff()
except Exception:
boo()
except OtherException as a:
hoo()
except:
raise
try:
do_stuff()
finally:
clean_up()
try:
do_stuff()
except Exception:
boo()
except OtherException as a:
hoo()
finally:
clean_up()
""")
def test_raise():
check_transformation(
"""
try:
do()
except:
raise
raise Exception
raise Exception(1, 2)
""")
def test_raise_py2():
skip_if_after(3, 0)
check_transformation(
"""
raise Exception, value
raise Exception, value, traceback
""")
def test_raise_from():
skip_if_before(3, 0)
check_transformation(
"""
raise Exception() from OtherException()
""")
def test_with():
check_transformation(
"""
with open('stuff'):
do_something()
with open('stuff') as f:
do_something()
""")
def test_multiple_with():
skip_if_before(2, 7)
check_transformation(
"""
with open('stuff') as f, open('other_stuff') as f2:
do_something()
""")
def test_assert():
check_transformation(
"""
assert a == 1
assert a > 1, 'error message'
""")
def test_class_def():
check_transformation(
"""
class A:
pass
class A(B):
pass
class A(B, C):
pass
""")
def test_class_def_extended():
skip_if_before(3, 0)
check_transformation(
"""
class A(metaclass=D):
pass
class A(*args, **kwds):
pass
""")
def test_print():
skip_if_after(3, 0)
check_transformation(
"""
print 1
print a, b
print a, b,
print >> stderr, c, d
""")
def test_repr():
skip_if_after(3, 0)
check_transformation(
"""
a = `b`
""")
| |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from tests.unit import utils
SPACING = np.spacing # pylint: disable=no-member
class Test_vector_close(unittest.TestCase):
@staticmethod
def _call_function_under_test(vec1, vec2, **kwargs):
from bezier.hazmat import helpers
return helpers.vector_close(vec1, vec2, **kwargs)
def test_identical(self):
vec1 = np.asfortranarray([0.5, 4.0])
self.assertTrue(self._call_function_under_test(vec1, vec1))
def test_far_apart(self):
vec1 = np.asfortranarray([0.0, 6.0])
vec2 = np.asfortranarray([1.0, -4.0])
self.assertFalse(self._call_function_under_test(vec1, vec2))
def test_close_but_different(self):
vec1 = np.asfortranarray([2.25, -3.5])
vec2 = vec1 + np.asfortranarray([-5.0, 12.0]) / 2.0**43
self.assertTrue(self._call_function_under_test(vec1, vec2))
def test_custom_epsilon(self):
vec1 = np.asfortranarray([3.0, 4.0])
vec2 = np.asfortranarray([2.0, 5.0])
self.assertTrue(self._call_function_under_test(vec1, vec2, eps=0.5))
self.assertFalse(self._call_function_under_test(vec1, vec2))
def test_near_zero(self):
vec1 = np.asfortranarray([0.0, 0.0])
vec2 = np.asfortranarray([3.0, 4.0]) / 2.0**45
self.assertTrue(self._call_function_under_test(vec1, vec2))
def test_near_zero_fail(self):
vec1 = np.asfortranarray([1.0, 0.0]) / 2.0**20
vec2 = np.asfortranarray([0.0, 0.0])
self.assertFalse(self._call_function_under_test(vec1, vec2))
class Test_in_interval(unittest.TestCase):
@staticmethod
def _call_function_under_test(value, start, end):
from bezier.hazmat import helpers
return helpers.in_interval(value, start, end)
def test_interior(self):
self.assertTrue(self._call_function_under_test(1.5, 1.0, 2.0))
def test_barely_inside(self):
# pylint: disable=assignment-from-no-return
local_epsilon = SPACING(1.0)
# pylint: enable=assignment-from-no-return
self.assertTrue(
self._call_function_under_test(1.0 + local_epsilon, 1.0, 2.0)
)
def test_barely_outside(self):
# pylint: disable=assignment-from-no-return
local_epsilon = SPACING(1.0)
# pylint: enable=assignment-from-no-return
self.assertFalse(
self._call_function_under_test(1.0 - local_epsilon / 2.0, 1.0, 2.0)
)
def test_outside(self):
self.assertFalse(self._call_function_under_test(-1.0, 1.0, 2.0))
class Test_bbox(unittest.TestCase):
@staticmethod
def _call_function_under_test(nodes):
from bezier.hazmat import helpers
return helpers.bbox(nodes)
def test_it(self):
nodes = np.asfortranarray([[0.0, 1.0], [5.0, 3.0]])
left, right, bottom, top = self._call_function_under_test(nodes)
self.assertEqual(left, 0.0)
self.assertEqual(right, 1.0)
self.assertEqual(bottom, 3.0)
self.assertEqual(top, 5.0)
def test_lots_of_values(self):
nodes = np.asfortranarray(
[[1.0, 2.0, -1.0, 5.0, 4.0, 0.0], [0.0, 1.0, 2.0, -3.0, 4.0, 0.0]]
)
left, right, bottom, top = self._call_function_under_test(nodes)
self.assertEqual(left, -1.0)
self.assertEqual(right, 5.0)
self.assertEqual(bottom, -3.0)
self.assertEqual(top, 4.0)
class Test_contains_nd(unittest.TestCase):
@staticmethod
def _call_function_under_test(nodes, point):
from bezier.hazmat import helpers
return helpers.contains_nd(nodes, point)
def test_below(self):
nodes = np.asfortranarray([[0.0, 0.5, 1.0], [1.0, 0.0, 2.0]])
point = np.asfortranarray([-0.5, 1.0])
self.assertFalse(self._call_function_under_test(nodes, point))
def test_above(self):
nodes = np.asfortranarray([[0.0, -1.0], [-4.0, 1.0], [2.0, 3.0]])
point = np.asfortranarray([-0.5, 2.0, 2.5])
self.assertFalse(self._call_function_under_test(nodes, point))
def test_inside(self):
nodes = np.asfortranarray(
[[0.0, 1.0], [1.0, -2.0], [2.0, -4.0], [3.0, 1.0]]
)
point = np.asfortranarray([0.5, 0.0, 0.0, 2.0])
self.assertTrue(self._call_function_under_test(nodes, point))
def test_shape_mismatch(self):
nodes = np.asfortranarray([[0.0, 1.0, 2.0], [1.0, 3.0, 6.0]])
point = np.asfortranarray([0.0, 1.5, 1.0])
with self.assertRaises(ValueError):
self._call_function_under_test(nodes, point)
class Test_cross_product(utils.NumPyTestCase):
@staticmethod
def _call_function_under_test(vec0, vec1):
from bezier.hazmat import helpers
return helpers.cross_product(vec0, vec1)
def test_it(self):
vec0 = np.asfortranarray([1.0, 7.0]) / 8.0
vec1 = np.asfortranarray([-11.0, 24.0]) / 32.0
result = self._call_function_under_test(vec0, vec1)
vec0_as_3d = np.zeros((3,), order="F")
vec0_as_3d[:2] = vec0
vec1_as_3d = np.zeros((3,), order="F")
vec1_as_3d[:2] = vec1
actual_cross = np.cross(vec0_as_3d, vec1_as_3d)
expected = np.asfortranarray([0.0, 0.0, result])
self.assertEqual(actual_cross, expected)
class Test_matrix_product(utils.NumPyTestCase):
@staticmethod
def _call_function_under_test(mat1, mat2):
from bezier.hazmat import helpers
return helpers.matrix_product(mat1, mat2)
def test_it(self):
mat1 = np.asfortranarray([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])
mat2 = np.asfortranarray([[7.0, 8.0, 9.0], [10.0, 11.0, 12.0]])
result = self._call_function_under_test(mat1, mat2)
expected = np.asfortranarray(
[[27.0, 30.0, 33.0], [61.0, 68.0, 75.0], [95.0, 106.0, 117.0]]
)
self.assertEqual(result, expected)
# Make sure our data is F-contiguous.
self.assertTrue(result.flags.f_contiguous)
self.assertFalse(result.flags.c_contiguous)
# matrix_product() has the side-effect of returning a "view"
# since it returns the transpose of a product of transposes.
self.assertFalse(result.flags.owndata)
class Test_wiggle_interval(unittest.TestCase):
WIGGLE = 0.5**44
MACHINE_EPS = 0.5**52
@staticmethod
def _call_function_under_test(value, **kwargs):
from bezier.hazmat import helpers
return helpers.wiggle_interval(value, **kwargs)
def test_at_endpoint(self):
# Really just making sure the function doesn't raise.
result, success = self._call_function_under_test(0.0)
self.assertTrue(success)
self.assertEqual(result, 0.0)
result, success = self._call_function_under_test(1.0)
self.assertTrue(success)
self.assertEqual(result, 1.0)
def test_near_endpoint(self):
_, success = self._call_function_under_test(1.0 + 0.5**20)
self.assertFalse(success)
def test_outside_below(self):
_, success = self._call_function_under_test(-0.25)
self.assertFalse(success)
def test_outside_above(self):
_, success = self._call_function_under_test(1.5)
self.assertFalse(success)
def test_valid(self):
# Really just making sure the function doesn't raise.
result, success = self._call_function_under_test(0.25)
self.assertTrue(success)
self.assertEqual(result, 0.25)
def test_wiggle_below(self):
value = -(0.5**60)
result, success = self._call_function_under_test(value)
self.assertTrue(success)
self.assertEqual(result, 0.0)
def test_wiggle_above(self):
value = 1.0 + self.MACHINE_EPS
result, success = self._call_function_under_test(value)
self.assertTrue(success)
self.assertEqual(result, 1.0)
def test_outer_boundary(self):
# Values near / at the left-hand boundary.
value = -self.WIGGLE + self.MACHINE_EPS * self.WIGGLE
self.assertEqual(self._call_function_under_test(value), (0.0, True))
value = -self.WIGGLE + self.MACHINE_EPS * self.WIGGLE / 2
self.assertEqual(self._call_function_under_test(value), (0.0, True))
value = -self.WIGGLE
_, success = self._call_function_under_test(value)
self.assertFalse(success)
value = -self.WIGGLE - self.MACHINE_EPS * self.WIGGLE
_, success = self._call_function_under_test(value)
self.assertFalse(success)
# Values near / at the right-hand boundary.
value = 1.0 + self.WIGGLE - 2 * self.MACHINE_EPS
self.assertEqual(self._call_function_under_test(value), (1.0, True))
value = 1.0 + self.WIGGLE - self.MACHINE_EPS
self.assertEqual(self._call_function_under_test(value), (1.0, True))
value = 1.0 + self.WIGGLE
_, success = self._call_function_under_test(value)
self.assertFalse(success)
value = 1.0 + self.WIGGLE + self.MACHINE_EPS
_, success = self._call_function_under_test(value)
self.assertFalse(success)
def test_inner_boundary(self):
# Values near / at the left-hand boundary.
value = self.WIGGLE - self.WIGGLE * self.MACHINE_EPS
self.assertEqual(self._call_function_under_test(value), (0.0, True))
value = self.WIGGLE - self.WIGGLE * self.MACHINE_EPS / 2
self.assertEqual(self._call_function_under_test(value), (0.0, True))
value = self.WIGGLE
self.assertEqual(self._call_function_under_test(value), (value, True))
value = self.WIGGLE + self.WIGGLE * self.MACHINE_EPS
self.assertEqual(self._call_function_under_test(value), (value, True))
# Values near / at the right-hand boundary.
value = 1.0 - self.WIGGLE - self.MACHINE_EPS
self.assertEqual(self._call_function_under_test(value), (value, True))
value = 1.0 - self.WIGGLE - self.MACHINE_EPS / 2
self.assertEqual(self._call_function_under_test(value), (value, True))
value = 1.0 - self.WIGGLE
self.assertEqual(self._call_function_under_test(value), (value, True))
value = 1.0 - self.WIGGLE + self.MACHINE_EPS / 2
self.assertEqual(self._call_function_under_test(value), (1.0, True))
value = 1.0 - self.WIGGLE + self.MACHINE_EPS
self.assertEqual(self._call_function_under_test(value), (1.0, True))
def test_custom_wiggle(self):
value = 1.25
_, success = self._call_function_under_test(value)
self.assertFalse(success)
result, success = self._call_function_under_test(value, wiggle=0.5)
self.assertTrue(success)
self.assertEqual(result, 1.0)
value = 0.875
self.assertEqual(self._call_function_under_test(value), (value, True))
self.assertEqual(
self._call_function_under_test(value, wiggle=0.25), (1.0, True)
)
class Test_cross_product_compare(unittest.TestCase):
@staticmethod
def _call_function_under_test(start, candidate1, candidate2):
from bezier.hazmat import helpers
return helpers.cross_product_compare(start, candidate1, candidate2)
def test_it(self):
start = np.asfortranarray([0.0, 0.0])
candidate1 = np.asfortranarray([1.0, 0.0])
candidate2 = np.asfortranarray([1.0, 1.0])
result = self._call_function_under_test(start, candidate1, candidate2)
self.assertEqual(result, 1.0)
class Test_in_sorted(utils.NumPyTestCase):
@staticmethod
def _call_function_under_test(values, value):
from bezier.hazmat import helpers
return helpers.in_sorted(values, value)
def test_inside(self):
values = [0, 5, 8, 12, 17]
self.assertFalse(self._call_function_under_test(values, 4))
self.assertTrue(self._call_function_under_test(values, 5))
self.assertFalse(self._call_function_under_test(values, 6))
self.assertFalse(self._call_function_under_test(values, 7))
self.assertTrue(self._call_function_under_test(values, 8))
self.assertFalse(self._call_function_under_test(values, 9))
self.assertFalse(self._call_function_under_test(values, 10))
self.assertFalse(self._call_function_under_test(values, 11))
self.assertTrue(self._call_function_under_test(values, 12))
self.assertFalse(self._call_function_under_test(values, 13))
def test_left_endpoint(self):
values = [9, 11, 220]
self.assertFalse(self._call_function_under_test(values, 7))
self.assertFalse(self._call_function_under_test(values, 8))
self.assertTrue(self._call_function_under_test(values, 9))
self.assertFalse(self._call_function_under_test(values, 10))
def test_right_endpoint(self):
values = [16, 18, 20]
self.assertFalse(self._call_function_under_test(values, 19))
self.assertTrue(self._call_function_under_test(values, 20))
self.assertFalse(self._call_function_under_test(values, 21))
self.assertFalse(self._call_function_under_test(values, 22))
class Test_simple_convex_hull(utils.NumPyTestCase):
@staticmethod
def _call_function_under_test(points):
from bezier.hazmat import helpers
return helpers.simple_convex_hull(points)
def test_triangle_centroid(self):
points = np.asfortranarray(
[[0.0, 0.0, 1.0, 3.0, 0.0], [0.0, 3.0, 1.0, 0.0, 3.0]]
)
polygon = self._call_function_under_test(points)
expected = np.asfortranarray([[0.0, 3.0, 0.0], [0.0, 0.0, 3.0]])
self.assertEqual(expected, polygon)
def test_two_points(self):
points = np.asfortranarray([[0.0, 1.0], [0.0, 0.0]])
polygon = self._call_function_under_test(points)
expected = np.asfortranarray([[0.0, 1.0], [0.0, 0.0]])
self.assertEqual(expected, polygon)
# Switch the order of the points.
points = np.asfortranarray([[1.0, 0.0], [0.0, 0.0]])
polygon = self._call_function_under_test(points)
self.assertEqual(expected, polygon)
def test_one_point(self):
points = np.asfortranarray([[2.0], [3.0]])
polygon = self._call_function_under_test(points)
expected = points
self.assertEqual(expected, polygon)
def test_zero_points(self):
points = np.empty((2, 0), order="F")
polygon = self._call_function_under_test(points)
self.assertEqual(polygon.shape, (2, 0))
def test_10x10_grid(self):
points = np.empty((2, 100), order="F")
index = 0
for i in range(10):
for j in range(10):
points[:, index] = i, j
index += 1
polygon = self._call_function_under_test(points)
expected = np.asfortranarray(
[[0.0, 9.0, 9.0, 0.0], [0.0, 0.0, 9.0, 9.0]]
)
self.assertEqual(expected, polygon)
def test_almost_linear(self):
from bezier.hazmat import helpers
# In a previous implementation, this case broke the algorithm
# because the middle point of the line was placed in both the
# upper and lower hull (which used 4 points for the hull when
# only 3 were allocated).
points = np.asfortranarray(
[
[
-0.12878911375710406,
-0.08626630936431968,
-0.043743504971535306,
],
[
-0.05306646729159134,
-0.0032018988543520074,
0.04666266958288733,
],
]
)
polygon = self._call_function_under_test(points)
expected = points
self.assertEqual(expected, polygon)
# Also verify why the case failed previously.
point0 = points[:, 0]
point1 = points[:, 1]
point2 = points[:, 2]
compare_lower = helpers.cross_product_compare(point0, point1, point2)
self.assertGreater(compare_lower, 0.0)
compare_upper = helpers.cross_product_compare(point2, point1, point0)
self.assertGreater(compare_upper, 0.0)
class Test_is_separating(unittest.TestCase):
@staticmethod
def _call_function_under_test(direction, polygon1, polygon2):
from bezier.hazmat import helpers
return helpers.is_separating(direction, polygon1, polygon2)
def test_it(self):
direction = np.asfortranarray([0.0, 1.0])
polygon1 = np.asfortranarray([[1.0, 3.0, -2.0], [1.0, 4.0, 3.0]])
polygon2 = np.asfortranarray(
[[3.5, 6.5, 6.5, 3.5], [3.0, 3.0, 7.0, 7.0]]
)
self.assertTrue(
self._call_function_under_test(direction, polygon1, polygon2)
)
class Test_polygon_collide(unittest.TestCase):
@staticmethod
def _call_function_under_test(polygon1, polygon2):
from bezier.hazmat import helpers
return helpers.polygon_collide(polygon1, polygon2)
def test_first_edge(self):
polygon1 = np.asfortranarray([[1.0, 3.0, -2.0], [1.0, 4.0, 3.0]])
polygon2 = np.asfortranarray(
[[3.5, 6.5, 6.5, 3.5], [3.0, 3.0, 7.0, 7.0]]
)
self.assertFalse(self._call_function_under_test(polygon1, polygon2))
# Check with arguments swapped.
self.assertFalse(self._call_function_under_test(polygon2, polygon1))
def test_colliding(self):
polygon1 = np.asfortranarray([[0.0, 3.0, 0.0], [0.0, 0.0, 3.0]])
polygon2 = np.asfortranarray([[1.0, 4.0, 1.0], [1.0, 1.0, 4.0]])
self.assertTrue(self._call_function_under_test(polygon1, polygon2))
def test_non_first_edge_polygon1(self):
polygon1 = np.asfortranarray(
[[1.0, 1.0, 0.0, 0.0], [0.0, 1.0, 1.0, 0.0]]
)
polygon2 = np.asfortranarray(
[[2.0, 3.0, 3.0, 2.0], [0.0, 0.0, 1.0, 1.0]]
)
self.assertFalse(self._call_function_under_test(polygon1, polygon2))
def test_non_first_edge_polygon2(self):
polygon1 = np.asfortranarray(
[[0.0, 2.0, 2.0, 0.0], [0.0, 0.0, 2.0, 2.0]]
)
polygon2 = np.asfortranarray([[1.0, 4.0, 4.0], [4.0, 1.0, 4.0]])
self.assertFalse(self._call_function_under_test(polygon1, polygon2))
class Test_solve2x2(unittest.TestCase):
@staticmethod
def _call_function_under_test(lhs, rhs):
from bezier.hazmat import helpers
return helpers.solve2x2(lhs, rhs)
def test_solve_without_row_swap(self):
lhs = np.asfortranarray([[2.0, 3.0], [1.0, 2.0]])
rhs = np.asfortranarray([31.0, 19.0])
singular, x_val, y_val = self._call_function_under_test(lhs, rhs)
self.assertFalse(singular)
self.assertEqual(x_val, 5.0)
self.assertEqual(y_val, 7.0)
def test_solve_with_row_swap(self):
lhs = np.asfortranarray([[1.0, 0.0], [4.0, 1.0]])
rhs = np.asfortranarray([3.0, 13.0])
singular, x_val, y_val = self._call_function_under_test(lhs, rhs)
self.assertFalse(singular)
self.assertEqual(x_val, 3.0)
self.assertEqual(y_val, 1.0)
def test_zero_column(self):
lhs = np.zeros((2, 2), order="F")
singular, x_val, y_val = self._call_function_under_test(lhs, None)
self.assertTrue(singular)
self.assertIsNone(x_val)
self.assertIsNone(y_val)
def test_singular_without_row_swap(self):
lhs = np.asfortranarray([[2.0, 4.0], [1.0, 2.0]])
singular, x_val, y_val = self._call_function_under_test(lhs, None)
self.assertTrue(singular)
self.assertIsNone(x_val)
self.assertIsNone(y_val)
def test_singular_with_row_swap(self):
lhs = np.asfortranarray([[3.0, 1.0], [12.0, 4.0]])
singular, x_val, y_val = self._call_function_under_test(lhs, None)
self.assertTrue(singular)
self.assertIsNone(x_val)
self.assertIsNone(y_val)
class TestUnsupportedDegree(unittest.TestCase):
@staticmethod
def _get_target_class():
from bezier.hazmat import helpers
return helpers.UnsupportedDegree
def _make_one(self, *args, **kwargs):
klass = self._get_target_class()
return klass(*args, **kwargs)
def test_inherit_not_implemented(self):
klass = self._get_target_class()
self.assertTrue(issubclass(klass, NotImplementedError))
def test_constructor_defaults(self):
exc = self._make_one(3)
self.assertEqual(exc.degree, 3)
self.assertEqual(exc.supported, ())
def test_constructor_explicit(self):
exc = self._make_one(4, supported=(1, 2))
self.assertEqual(exc.degree, 4)
self.assertEqual(exc.supported, (1, 2))
def test___str__zero_supported(self):
exc = self._make_one(1)
as_str = str(exc)
expected = "degree=1"
self.assertEqual(as_str, expected)
def test___str__one_supported(self):
exc = self._make_one(2, supported=(1,))
as_str = str(exc)
expected = "The only degree supported at this time is 1 (degree=2)"
self.assertEqual(as_str, expected)
def test___str__multiple_supported(self):
exc = self._make_one(3, supported=(1, 2))
as_str = str(exc)
expected = (
"The only degrees supported at this time are 1 and 2 (degree=3)"
)
self.assertEqual(as_str, expected)
exc = self._make_one(4, supported=(1, 3, 2))
as_str = str(exc)
expected = (
"The only degrees supported at this "
"time are 1, 3 and 2 (degree=4)"
)
self.assertEqual(as_str, expected)
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for image ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import colorsys
import math
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_image_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.platform import test
def GenerateNumpyRandomRGB(shape):
# Only generate floating points that are fractions like n / 256, since they
# are RGB pixels. Some low-precision floating point types in this test can't
# handle arbitrary precision floating points well.
return np.random.randint(0, 256, shape) / 256.
class RGBToHSVTest(xla_test.XLATestCase):
def testBatch(self):
# Build an arbitrary RGB image
np.random.seed(7)
batch_size = 5
shape = (batch_size, 2, 7, 3)
for nptype in self.float_types:
inp = GenerateNumpyRandomRGB(shape).astype(nptype)
# Convert to HSV and back, as a batch and individually
with self.cached_session() as sess:
batch0 = array_ops.placeholder(nptype, shape=shape)
with self.test_scope():
batch1 = image_ops.rgb_to_hsv(batch0)
batch2 = image_ops.hsv_to_rgb(batch1)
split0 = array_ops.unstack(batch0)
with self.test_scope():
split1 = list(map(image_ops.rgb_to_hsv, split0))
split2 = list(map(image_ops.hsv_to_rgb, split1))
join1 = array_ops.stack(split1)
join2 = array_ops.stack(split2)
batch1, batch2, join1, join2 = sess.run([batch1, batch2, join1, join2],
{batch0: inp})
# Verify that processing batch elements together is the same as separate
self.assertAllClose(batch1, join1)
self.assertAllClose(batch2, join2)
self.assertAllCloseAccordingToType(
batch2, inp, bfloat16_atol=0.03, half_rtol=0.02)
def testRGBToHSVRoundTrip(self):
data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
for nptype in self.float_types:
rgb_np = np.array(data, dtype=nptype).reshape([2, 2, 3]) / 255.
with self.cached_session():
placeholder = array_ops.placeholder(nptype)
with self.test_scope():
hsv = image_ops.rgb_to_hsv(placeholder)
rgb = image_ops.hsv_to_rgb(hsv)
rgb_tf = rgb.eval(feed_dict={placeholder: rgb_np})
self.assertAllCloseAccordingToType(rgb_tf, rgb_np, bfloat16_atol=0.03)
def testRGBToHSVNumpy(self):
"""Tests the RGB to HSV conversion matches a reference implementation."""
for nptype in self.float_types:
rgb_flat = GenerateNumpyRandomRGB((64, 3)).astype(nptype)
rgb_np = rgb_flat.reshape(4, 4, 4, 3)
hsv_np = np.array([
colorsys.rgb_to_hsv(
r.astype(np.float64), g.astype(np.float64), b.astype(np.float64))
for r, g, b in rgb_flat
])
hsv_np = hsv_np.reshape(4, 4, 4, 3)
with self.cached_session():
placeholder = array_ops.placeholder(nptype)
with self.test_scope():
hsv_op = image_ops.rgb_to_hsv(placeholder)
hsv_tf = hsv_op.eval(feed_dict={placeholder: rgb_np})
self.assertAllCloseAccordingToType(hsv_tf, hsv_np)
class AdjustContrastTest(xla_test.XLATestCase):
def _testContrast(self, x_np, y_np, contrast_factor):
with self.cached_session():
x = array_ops.placeholder(x_np.dtype, shape=x_np.shape)
flt_x = image_ops.convert_image_dtype(x, dtypes.float32)
with self.test_scope():
y = image_ops.adjust_contrast(flt_x, contrast_factor)
y = image_ops.convert_image_dtype(y, x.dtype, saturate=True)
y_tf = y.eval({x: x_np})
self.assertAllClose(y_tf, y_np, 1e-6)
def testFloatContrast(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.float32).reshape(x_shape) / 255.
y_data = [
-45.25, -90.75, -92.5, 62.75, 169.25, 333.5, 28.75, -84.75, 349.5,
134.75, 409.25, -116.5
]
y_np = np.array(y_data, dtype=np.float32).reshape(x_shape) / 255.
self._testContrast(x_np, y_np, contrast_factor=2.0)
def testBatchContrast(self):
x_shape = [2, 1, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [0, 0, 0, 81, 200, 255, 10, 0, 255, 116, 255, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testContrast(x_np, y_np, contrast_factor=2.0)
def _adjustContrastNp(self, x_np, contrast_factor):
mean = np.mean(x_np, (1, 2), keepdims=True)
y_np = mean + contrast_factor * (x_np - mean)
return y_np
def _adjustContrastTf(self, x_np, contrast_factor):
with self.cached_session():
x = array_ops.placeholder(np.float32)
with self.test_scope():
y = image_ops.adjust_contrast(x, contrast_factor)
y_tf = y.eval({x: x_np})
return y_tf
def testRandomContrast(self):
x_shapes = [
[1, 2, 2, 3],
[2, 1, 2, 3],
[1, 2, 2, 3],
[2, 5, 5, 3],
[2, 1, 1, 3],
]
for x_shape in x_shapes:
x_np = np.random.rand(*x_shape) * 255.
contrast_factor = np.random.rand() * 2.0 + 0.1
y_np = self._adjustContrastNp(x_np, contrast_factor)
y_tf = self._adjustContrastTf(x_np, contrast_factor)
self.assertAllClose(y_tf, y_np, rtol=1e-5, atol=1e-5)
class AdjustHueTest(xla_test.XLATestCase):
def testAdjustNegativeHue(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
delta = -0.25
y_data = [0, 13, 1, 54, 226, 59, 8, 234, 150, 255, 39, 1]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.cached_session():
x = array_ops.placeholder(x_np.dtype, shape=x_shape)
flt_x = image_ops.convert_image_dtype(x, dtypes.float32)
with self.test_scope():
y = gen_image_ops.adjust_hue(flt_x, delta)
y = image_ops.convert_image_dtype(y, x.dtype, saturate=True)
y_tf = y.eval({x: x_np})
self.assertAllEqual(y_tf, y_np)
def testAdjustPositiveHue(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
delta = 0.25
y_data = [13, 0, 11, 226, 54, 221, 234, 8, 92, 1, 217, 255]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.cached_session():
x = array_ops.placeholder(x_np.dtype, shape=x_shape)
flt_x = image_ops.convert_image_dtype(x, dtypes.float32)
with self.test_scope():
y = gen_image_ops.adjust_hue(flt_x, delta)
y = image_ops.convert_image_dtype(y, x.dtype, saturate=True)
y_tf = y.eval({x: x_np})
self.assertAllEqual(y_tf, y_np)
def testBatchAdjustHue(self):
x_shape = [2, 1, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
delta = 0.25
y_data = [13, 0, 11, 226, 54, 221, 234, 8, 92, 1, 217, 255]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.cached_session():
x = array_ops.placeholder(x_np.dtype, shape=x_shape)
flt_x = image_ops.convert_image_dtype(x, dtypes.float32)
with self.test_scope():
y = gen_image_ops.adjust_hue(flt_x, delta)
y = image_ops.convert_image_dtype(y, x.dtype, saturate=True)
y_tf = y.eval({x: x_np})
self.assertAllEqual(y_tf, y_np)
def _adjustHueNp(self, x_np, delta_h):
self.assertEqual(x_np.shape[-1], 3)
x_v = x_np.reshape([-1, 3])
y_v = np.ndarray(x_v.shape, dtype=x_v.dtype)
channel_count = x_v.shape[0]
for i in xrange(channel_count):
r = x_v[i][0]
g = x_v[i][1]
b = x_v[i][2]
h, s, v = colorsys.rgb_to_hsv(r, g, b)
h += delta_h
h = math.fmod(h + 10.0, 1.0)
r, g, b = colorsys.hsv_to_rgb(h, s, v)
y_v[i][0] = r
y_v[i][1] = g
y_v[i][2] = b
return y_v.reshape(x_np.shape)
def _adjustHueTf(self, x_np, delta_h):
with self.cached_session():
x = array_ops.placeholder(dtypes.float32)
with self.test_scope():
y = gen_image_ops.adjust_hue(x, delta_h)
y_tf = y.eval({x: x_np})
return y_tf
def testAdjustRandomHue(self):
x_shapes = [
[2, 2, 3],
[4, 2, 3],
[2, 4, 3],
[2, 5, 3],
[1000, 1, 3],
]
test_styles = [
"all_random",
"rg_same",
"rb_same",
"gb_same",
"rgb_same",
]
for x_shape in x_shapes:
for test_style in test_styles:
x_np = np.random.rand(*x_shape) * 255.
delta_h = np.random.rand() * 2.0 - 1.0
if test_style == "all_random":
pass
elif test_style == "rg_same":
x_np[..., 1] = x_np[..., 0]
elif test_style == "rb_same":
x_np[..., 2] = x_np[..., 0]
elif test_style == "gb_same":
x_np[..., 2] = x_np[..., 1]
elif test_style == "rgb_same":
x_np[..., 1] = x_np[..., 0]
x_np[..., 2] = x_np[..., 0]
else:
raise AssertionError("Invalid test style: %s" % (test_style))
y_np = self._adjustHueNp(x_np, delta_h)
y_tf = self._adjustHueTf(x_np, delta_h)
self.assertAllClose(y_tf, y_np, rtol=2e-5, atol=1e-4)
def testInvalidShapes(self):
fused = False
if not fused:
# The tests are known to pass with the fused adjust_hue. We will enable
# them when the fused implementation is the default.
return
x_np = np.random.rand(2, 3) * 255.
delta_h = np.random.rand() * 2.0 - 1.0
fused = False
with self.assertRaisesRegexp(ValueError, "Shape must be at least rank 3"):
self._adjustHueTf(x_np, delta_h)
x_np = np.random.rand(4, 2, 4) * 255.
delta_h = np.random.rand() * 2.0 - 1.0
with self.assertRaisesOpError("input must have 3 channels"):
self._adjustHueTf(x_np, delta_h)
class AdjustSaturationTest(xla_test.XLATestCase):
def _adjust_saturation(self, image, saturation_factor):
image = ops.convert_to_tensor(image, name="image")
orig_dtype = image.dtype
flt_image = image_ops.convert_image_dtype(image, dtypes.float32)
with self.test_scope():
saturation_adjusted_image = gen_image_ops.adjust_saturation(
flt_image, saturation_factor)
return image_ops.convert_image_dtype(saturation_adjusted_image, orig_dtype)
def testHalfSaturation(self):
x_shape = [2, 2, 3]
x_rgb_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_rgb_data, dtype=np.uint8).reshape(x_shape)
saturation_factor = 0.5
y_rgb_data = [6, 9, 13, 140, 180, 226, 135, 121, 234, 172, 255, 128]
y_np = np.array(y_rgb_data, dtype=np.uint8).reshape(x_shape)
with self.cached_session():
x = array_ops.placeholder(x_np.dtype, shape=x_shape)
y = self._adjust_saturation(x, saturation_factor)
y_tf = y.eval({x: x_np})
self.assertAllEqual(y_tf, y_np)
def testTwiceSaturation(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
saturation_factor = 2.0
y_data = [0, 5, 13, 0, 106, 226, 30, 0, 234, 89, 255, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.cached_session():
x = array_ops.placeholder(x_np.dtype, shape=x_shape)
y = self._adjust_saturation(x, saturation_factor)
y_tf = y.eval({x: x_np})
self.assertAllEqual(y_tf, y_np)
def _adjustSaturationNp(self, x_np, scale):
self.assertEqual(x_np.shape[-1], 3)
x_v = x_np.reshape([-1, 3])
y_v = np.ndarray(x_v.shape, dtype=x_v.dtype)
channel_count = x_v.shape[0]
for i in xrange(channel_count):
r = x_v[i][0]
g = x_v[i][1]
b = x_v[i][2]
h, s, v = colorsys.rgb_to_hsv(r, g, b)
s *= scale
s = min(1.0, max(0.0, s))
r, g, b = colorsys.hsv_to_rgb(h, s, v)
y_v[i][0] = r
y_v[i][1] = g
y_v[i][2] = b
return y_v.reshape(x_np.shape)
def testAdjustRandomSaturation(self):
x_shapes = [
[2, 2, 3],
[4, 2, 3],
[2, 4, 3],
[2, 5, 3],
[1000, 1, 3],
]
test_styles = [
"all_random",
"rg_same",
"rb_same",
"gb_same",
"rgb_same",
]
with self.cached_session():
for x_shape in x_shapes:
for test_style in test_styles:
x_np = np.random.rand(*x_shape) * 255.
scale = np.random.rand()
if test_style == "all_random":
pass
elif test_style == "rg_same":
x_np[..., 1] = x_np[..., 0]
elif test_style == "rb_same":
x_np[..., 2] = x_np[..., 0]
elif test_style == "gb_same":
x_np[..., 2] = x_np[..., 1]
elif test_style == "rgb_same":
x_np[..., 1] = x_np[..., 0]
x_np[..., 2] = x_np[..., 0]
else:
raise AssertionError("Invalid test style: %s" % (test_style))
y_baseline = self._adjustSaturationNp(x_np, scale)
x = array_ops.placeholder(dtypes.float32, shape=x_shape)
with self.test_scope():
y_fused = self._adjust_saturation(x,
scale).eval(feed_dict={x: x_np})
self.assertAllClose(y_fused, y_baseline, rtol=2e-5, atol=1e-5)
class ResizeNearestNeighborTest(xla_test.XLATestCase):
# TODO(ilch): Wrap each test with `for dtype in self.float_types:`
# Some work to understand how that should be done was presented here:
# cl/227850213
def _assertForwardOpMatchesExpected(self,
image_np,
target_shape,
expected=None,
large_tolerance=False,
align_corners=True):
if expected is None:
self.fail("expected must be specified")
with self.cached_session() as sess, self.test_scope():
image = array_ops.placeholder(image_np.dtype)
resized = gen_image_ops.resize_nearest_neighbor(
image, target_shape, align_corners=align_corners)
out = sess.run(resized, {image: image_np[np.newaxis, :, :, np.newaxis]})
if large_tolerance:
self.assertAllClose(
expected[np.newaxis, :, :, np.newaxis], out, rtol=0.03, atol=0.1)
else:
self.assertAllClose(expected[np.newaxis, :, :, np.newaxis], out)
def testAlignCorners2x2To1x1(self):
self._assertForwardOpMatchesExpected(
np.array([[1, 2], [3, 4]], dtype=np.float32), [1, 1],
expected=np.array([[1]], dtype=np.float32))
def testAlignCorners1x1To2x2(self):
self._assertForwardOpMatchesExpected(
np.array([[1]], dtype=np.float32), [2, 2],
expected=np.array([[1, 1], [1, 1]], dtype=np.float32))
def testAlignCorners1x1To3x3(self):
self._assertForwardOpMatchesExpected(
np.array([[1]], dtype=np.float32), [3, 3],
expected=np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]], dtype=np.float32))
def testAlignCorners2x2To3x3(self):
self._assertForwardOpMatchesExpected(
np.array([[1, 2], [3, 4]], dtype=np.float32), [3, 3],
expected=np.array([[1, 2, 2], [3, 4, 4], [3, 4, 4]], dtype=np.float32))
def testAlignCorners2x2To4x4(self):
self._assertForwardOpMatchesExpected(
np.array([[1, 2], [3, 4]], dtype=np.float32), [4, 4],
expected=np.array(
[[1, 1, 2, 2], [1, 1, 2, 2], [3, 3, 4, 4], [3, 3, 4, 4]],
dtype=np.float32))
def testAlignCorners3x3To2x2(self):
self._assertForwardOpMatchesExpected(
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32), [2, 2],
expected=np.array([[1, 3], [7, 9]], dtype=np.float32))
def testAlignCorners4x4To3x3(self):
self._assertForwardOpMatchesExpected(
np.array(
[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]],
dtype=np.float32), [3, 3],
expected=np.array([[1, 3, 4], [9, 11, 12], [13, 15, 16]],
dtype=np.float32))
def testAlignCorners3x3To4x4(self):
self._assertForwardOpMatchesExpected(
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32), [4, 4],
expected=np.array(
[[1, 2, 2, 3], [4, 5, 5, 6], [4, 5, 5, 6], [7, 8, 8, 9]],
dtype=np.float32))
def testAlignCorners3x3To6x6(self):
self._assertForwardOpMatchesExpected(
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32), [6, 6],
expected=np.array(
[[1, 1, 2, 2, 3, 3], [1, 1, 2, 2, 3, 3], [4, 4, 5, 5, 6, 6],
[4, 4, 5, 5, 6, 6], [7, 7, 8, 8, 9, 9], [7, 7, 8, 8, 9, 9]],
dtype=np.float32))
def testAlignCorners3x3To9x9(self):
# The expected matrix might look uneven in terms of how many of each number
# there is, but this is an artifact of doing the dilation and convolution
# iteratively. The behavior is less esoteric in the 3x3To12x12 case below.
self._assertForwardOpMatchesExpected(
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32), [9, 9],
expected=np.array(
[[1, 2, 2, 2, 2, 3, 3, 3, 3], [4, 5, 5, 5, 5, 6, 6, 6, 6],
[4, 5, 5, 5, 5, 6, 6, 6, 6], [4, 5, 5, 5, 5, 6, 6, 6, 6],
[4, 5, 5, 5, 5, 6, 6, 6, 6], [7, 8, 8, 8, 8, 9, 9, 9, 9],
[7, 8, 8, 8, 8, 9, 9, 9, 9], [7, 8, 8, 8, 8, 9, 9, 9, 9],
[7, 8, 8, 8, 8, 9, 9, 9, 9]],
dtype=np.float32))
def testAlignCorners3x3To12x12(self):
self._assertForwardOpMatchesExpected(
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32), [12, 12],
expected=np.array([[1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3],
[1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3],
[1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3],
[4, 4, 4, 5, 5, 5, 5, 5, 5, 6, 6, 6],
[4, 4, 4, 5, 5, 5, 5, 5, 5, 6, 6, 6],
[4, 4, 4, 5, 5, 5, 5, 5, 5, 6, 6, 6],
[4, 4, 4, 5, 5, 5, 5, 5, 5, 6, 6, 6],
[4, 4, 4, 5, 5, 5, 5, 5, 5, 6, 6, 6],
[4, 4, 4, 5, 5, 5, 5, 5, 5, 6, 6, 6],
[7, 7, 7, 8, 8, 8, 8, 8, 8, 9, 9, 9],
[7, 7, 7, 8, 8, 8, 8, 8, 8, 9, 9, 9],
[7, 7, 7, 8, 8, 8, 8, 8, 8, 9, 9, 9]],
dtype=np.float32))
class ResizeBilinearTest(xla_test.XLATestCase):
def _assertForwardOpMatchesExpected(self,
image_np,
target_shape,
expected=None,
large_tolerance=False,
align_corners=True):
if expected is None:
self.fail("expected must be specified")
with self.cached_session() as sess, self.test_scope():
image = array_ops.placeholder(image_np.dtype)
resized = gen_image_ops.resize_bilinear(
image, target_shape, align_corners=align_corners)
out = sess.run(resized, {image: image_np[np.newaxis, :, :, np.newaxis]})
if large_tolerance:
self.assertAllClose(
expected[np.newaxis, :, :, np.newaxis], out, rtol=0.03, atol=0.1)
else:
self.assertAllClose(expected[np.newaxis, :, :, np.newaxis], out)
def _assertBackwardOpMatchesExpected(self,
grads_np,
input_shape=None,
dtype=None,
expected=None):
if input_shape is None:
self.fail("input_shape must be specified")
if expected is None:
self.fail("expected must be specified")
with self.cached_session() as sess, self.test_scope():
dtype = dtype or np.float32
grads = array_ops.placeholder(np.float32)
resized = gen_image_ops.resize_bilinear_grad(
grads,
np.zeros([1, input_shape[0], input_shape[1], 1], dtype=dtype),
align_corners=True)
out = sess.run(resized, {grads: grads_np[np.newaxis, :, :, np.newaxis]})
self.assertAllCloseAccordingToType(expected[np.newaxis, :, :, np.newaxis],
out)
def testAlignCorners1x2To3x3(self):
for dtype in self.float_types:
self._assertForwardOpMatchesExpected(
np.array([[1, 2]], dtype=dtype), [3, 3],
expected=np.array([[1, 1.5, 2], [1, 1.5, 2], [1, 1.5, 2]],
dtype=np.float32))
def testAlignCorners1x2To3x3Grad(self):
for dtype in self.float_types:
self._assertBackwardOpMatchesExpected(
np.array([[1, 2], [3, 4], [5, 6]], dtype=np.float32),
input_shape=[1, 2],
dtype=dtype,
expected=np.array([[9, 12]], dtype=np.float32))
def testAlignCorners2x2To1x1(self):
for dtype in self.float_types:
self._assertForwardOpMatchesExpected(
np.array([[1, 2], [3, 4]], dtype=dtype), [1, 1],
expected=np.array([[1]], dtype=np.float32))
def testAlignCorners2x2To1x1Grad(self):
for dtype in self.float_types:
self._assertBackwardOpMatchesExpected(
np.array([[7]], dtype=np.float32),
input_shape=[2, 2],
dtype=dtype,
expected=np.array([[7, 0], [0, 0]], dtype=np.float32))
def testAlignCorners2x2To3x3(self):
for dtype in self.float_types:
self._assertForwardOpMatchesExpected(
np.array([[1, 2], [3, 4]], dtype=dtype), [3, 3],
expected=np.array([[1, 1.5, 2], [2, 2.5, 3], [3, 3.5, 4]],
dtype=np.float32))
def testAlignCorners2x2To3x3Grad(self):
self._assertBackwardOpMatchesExpected(
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32),
input_shape=[2, 2],
expected=np.array([[5.25, 8.25], [14.25, 17.25]], dtype=np.float32))
def testAlignCorners3x3To2x2(self):
for dtype in self.float_types:
self._assertForwardOpMatchesExpected(
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=dtype), [2, 2],
expected=np.array([[1, 3], [7, 9]], dtype=np.float32))
def testAlignCorners3x3To2x2Grad(self):
for dtype in self.float_types:
self._assertBackwardOpMatchesExpected(
np.array([[7, 13], [22, 4]], dtype=np.float32),
input_shape=[3, 3],
dtype=dtype,
expected=np.array([[7, 0, 13], [0, 0, 0], [22, 0, 4]],
dtype=np.float32))
def testAlignCorners4x4To3x3(self):
for dtype in self.float_types:
self._assertForwardOpMatchesExpected(
np.array(
[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]],
dtype=dtype), [3, 3],
expected=np.array([[1, 2.5, 4], [7, 8.5, 10], [13, 14.5, 16]],
dtype=np.float32))
def testAlignCorners4x4To3x3Grad(self):
for dtype in self.float_types:
self._assertBackwardOpMatchesExpected(
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32),
input_shape=[4, 4],
dtype=dtype,
expected=np.array([[1, 1, 1, 3], [2, 1.25, 1.25, 3],
[2, 1.25, 1.25, 3], [7, 4, 4, 9]],
dtype=np.float32))
def testAlignCorners3x3To9x9(self):
for dtype in self.float_types:
self._assertForwardOpMatchesExpected(
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=dtype), [9, 9],
expected=np.array(
[[1.0, 1.25, 1.50, 1.75, 2.00, 2.25, 2.50, 2.75, 3.00],
[1.75, 2.00, 2.25, 2.50, 2.75, 3.00, 3.25, 3.50, 3.75],
[2.50, 2.75, 3.00, 3.25, 3.50, 3.75, 4.00, 4.25, 4.50],
[3.25, 3.50, 3.75, 4.00, 4.25, 4.50, 4.75, 5.00, 5.25],
[4.00, 4.25, 4.50, 4.75, 5.00, 5.25, 5.50, 5.75, 6.00],
[4.75, 5.00, 5.25, 5.50, 5.75, 6.00, 6.25, 6.50, 6.75],
[5.50, 5.75, 6.00, 6.25, 6.50, 6.75, 7.00, 7.25, 7.50],
[6.25, 6.50, 6.75, 7.00, 7.25, 7.50, 7.75, 8.00, 8.25],
[7.00, 7.25, 7.50, 7.75, 8.00, 8.25, 8.50, 8.75, 9.00]],
dtype=np.float32))
def testAlignCorners3x3To9x9Grad(self):
for dtype in self.float_types:
self._assertBackwardOpMatchesExpected(
np.array([[1.00, 1.25, 1.50, 1.75, 2.00, 2.25, 2.50, 2.75, 3.00],
[1.75, 2.00, 2.25, 2.50, 2.75, 3.00, 3.25, 3.50, 3.75],
[2.50, 2.75, 3.00, 3.25, 3.50, 3.75, 4.00, 4.25, 4.50],
[3.25, 3.50, 3.75, 4.00, 4.25, 4.50, 4.75, 5.00, 5.25],
[4.00, 4.25, 4.50, 4.75, 5.00, 5.25, 5.50, 5.75, 6.00],
[4.75, 5.00, 5.25, 5.50, 5.75, 6.00, 6.25, 6.50, 6.75],
[5.50, 5.75, 6.00, 6.25, 6.50, 6.75, 7.00, 7.25, 7.50],
[6.25, 6.50, 6.75, 7.00, 7.25, 7.50, 7.75, 8.00, 8.25],
[7.00, 7.25, 7.50, 7.75, 8.00, 8.25, 8.50, 8.75, 9.00]],
dtype=np.float32),
input_shape=[3, 3],
dtype=dtype,
expected=np.array(
[[12.5, 27.5, 21.875], [42.5, 80.0, 57.5], [40.625, 72.5, 50]],
dtype=np.float32))
def testAlignCorners4x4To8x8(self):
self._assertForwardOpMatchesExpected(
(np.array([[0, 1, 2, 3]], dtype=np.float32) + np.array(
[[0], [1], [2], [3]], dtype=np.float32)) * 7.0, [8, 8],
expected=3 *
(np.array([[0, 1, 2, 3, 4, 5, 6, 7]], dtype=np.float32) + np.array(
[[0], [1], [2], [3], [4], [5], [6], [7]], dtype=np.float32)),
large_tolerance=True)
def testAlignCorners8x8To16x16(self):
self._assertForwardOpMatchesExpected(
(np.array([[0, 1, 2, 3, 4, 5, 6, 7]], dtype=np.float32) + np.array(
[[0], [1], [2], [3], [4], [5], [6], [7]], dtype=np.float32)) * 15.0,
[16, 16],
expected=7 *
(np.array([[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]],
dtype=np.float32) +
np.array([[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11],
[12], [13], [14], [15]],
dtype=np.float32)),
large_tolerance=True)
def testNonAlignCorners3x2To6x4(self):
input_data = [[64, 32], [32, 64], [50, 100]]
expected_data = [[64.0, 48.0, 32.0, 32.0], [48.0, 48.0, 48.0, 48.0],
[32.0, 48.0, 64.0, 64.0], [41.0, 61.5, 82.0, 82.0],
[50.0, 75.0, 100.0, 100.0], [50.0, 75.0, 100.0, 100.0]]
for dtype in self.float_types:
self._assertForwardOpMatchesExpected(
np.array(input_data, dtype=dtype), [6, 4],
expected=np.array(expected_data, dtype=np.float32),
align_corners=False)
def testNonAlignCorners6x4To3x2(self):
input_data = [[127, 127, 64, 64], [127, 127, 64, 64], [64, 64, 127, 127],
[64, 64, 127, 127], [50, 50, 100, 100], [50, 50, 100, 100]]
expected_data = [[127, 64], [64, 127], [50, 100]]
for dtype in self.float_types:
self._assertForwardOpMatchesExpected(
np.array(input_data, dtype=dtype), [3, 2],
expected=np.array(expected_data, dtype=dtype),
align_corners=False)
def testNonAlignCorners3x2To6x4Batch2(self):
input_data = [[[64, 32], [32, 64], [50, 100]], [[32, 16], [16, 32],
[25, 50]]]
expected_data = [[[64.0, 48.0, 32.0, 32.0], [48.0, 48.0, 48.0, 48.0],
[32.0, 48.0, 64.0, 64.0], [41.0, 61.5, 82.0, 82.0],
[50.0, 75.0, 100.0, 100.0], [50.0, 75.0, 100.0, 100.0]],
[[32.0, 24.0, 16.0, 16.0], [24.0, 24.0, 24.0, 24.0],
[16.0, 24.0, 32.0, 32.0], [20.5, 30.75, 41.0, 41.0],
[25.0, 37.5, 50.0, 50.0], [25.0, 37.5, 50.0, 50.0]]]
for dtype in self.float_types:
input_image = np.array(input_data, dtype=dtype)
expected = np.array(expected_data, dtype=dtype)
with self.cached_session() as sess, self.test_scope():
image = array_ops.placeholder(input_image.dtype)
resized = gen_image_ops.resize_bilinear(
image, [6, 4], align_corners=False)
out = sess.run(resized, {image: input_image[:, :, :, np.newaxis]})
self.assertAllClose(expected[:, :, :, np.newaxis], out)
class NonMaxSuppressionTest(xla_test.XLATestCase):
def testNMS128From1024(self):
num_boxes = 1024
boxes_np = np.random.normal(50, 10, (num_boxes, 4)).astype("f4")
scores_np = np.random.normal(0.5, 0.1, (num_boxes,)).astype("f4")
max_output_size = 128
iou_threshold_np = np.array(0.5, dtype=np.float32)
score_threshold_np = np.array(0.0, dtype=np.float32)
with self.cached_session() as sess:
boxes = array_ops.placeholder(boxes_np.dtype, shape=boxes_np.shape)
scores = array_ops.placeholder(scores_np.dtype, shape=scores_np.shape)
iou_threshold = array_ops.placeholder(iou_threshold_np.dtype,
iou_threshold_np.shape)
score_threshold = array_ops.placeholder(score_threshold_np.dtype,
score_threshold_np.shape)
with self.test_scope():
selected_indices = image_ops.non_max_suppression_padded(
boxes=boxes,
scores=scores,
max_output_size=max_output_size,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
pad_to_max_output_size=True)
inputs_feed = {
boxes: boxes_np,
scores: scores_np,
score_threshold: score_threshold_np,
iou_threshold: iou_threshold_np
}
(indices_tf, _) = sess.run(selected_indices, feed_dict=inputs_feed)
self.assertEqual(indices_tf.size, max_output_size)
def testNMS3From6Boxes(self):
# Three boxes are selected based on IOU.
boxes_data = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
boxes_np = np.array(boxes_data, dtype=np.float32)
scores_data = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]
scores_np = np.array(scores_data, dtype=np.float32)
max_output_size = 3
iou_threshold_np = np.array(0.5, dtype=np.float32)
score_threshold_np = np.array(0.0, dtype=np.float32)
with self.cached_session() as sess:
boxes = array_ops.placeholder(boxes_np.dtype, shape=boxes_np.shape)
scores = array_ops.placeholder(scores_np.dtype, shape=scores_np.shape)
iou_threshold = array_ops.placeholder(iou_threshold_np.dtype,
iou_threshold_np.shape)
score_threshold = array_ops.placeholder(score_threshold_np.dtype,
score_threshold_np.shape)
with self.test_scope():
selected_indices = image_ops.non_max_suppression_padded(
boxes=boxes,
scores=scores,
max_output_size=max_output_size,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
pad_to_max_output_size=True)
inputs_feed = {
boxes: boxes_np,
scores: scores_np,
score_threshold: score_threshold_np,
iou_threshold: iou_threshold_np
}
(indices_tf, num_valid) = sess.run(
selected_indices, feed_dict=inputs_feed)
self.assertEqual(indices_tf.size, max_output_size)
self.assertEqual(num_valid, 3)
self.assertAllClose(indices_tf[:num_valid], [3, 0, 5])
def testNMS3Then2WithScoreThresh(self):
# Three boxes are selected based on IOU.
# One is filtered out by score threshold.
boxes_data = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
boxes_np = np.array(boxes_data, dtype=np.float32)
scores_data = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]
scores_np = np.array(scores_data, dtype=np.float32)
max_output_size = 3
iou_threshold_np = np.array(0.5, dtype=np.float32)
score_threshold_np = np.array(0.4, dtype=np.float32)
with self.cached_session() as sess:
boxes = array_ops.placeholder(boxes_np.dtype, shape=boxes_np.shape)
scores = array_ops.placeholder(scores_np.dtype, shape=scores_np.shape)
iou_threshold = array_ops.placeholder(iou_threshold_np.dtype,
iou_threshold_np.shape)
score_threshold = array_ops.placeholder(score_threshold_np.dtype,
score_threshold_np.shape)
with self.test_scope():
selected_indices = image_ops.non_max_suppression_padded(
boxes=boxes,
scores=scores,
max_output_size=max_output_size,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
pad_to_max_output_size=True)
inputs_feed = {
boxes: boxes_np,
scores: scores_np,
iou_threshold: iou_threshold_np,
score_threshold: score_threshold_np
}
(indices_tf, num_valid) = sess.run(
selected_indices, feed_dict=inputs_feed)
self.assertEqual(indices_tf.size, max_output_size)
self.assertEqual(num_valid, 2)
self.assertAllClose(indices_tf[:num_valid], [3, 0])
def testNMS3Then1WithScoreMaxThresh(self):
# Three boxes are selected based on IOU.
# One is filtered out by score threshold.
# One is filtered out by max_output_size.
boxes_data = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
boxes_np = np.array(boxes_data, dtype=np.float32)
scores_data = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]
scores_np = np.array(scores_data, dtype=np.float32)
max_output_size = 1
iou_threshold_np = np.array(0.5, dtype=np.float32)
score_threshold_np = np.array(0.4, dtype=np.float32)
with self.cached_session() as sess:
boxes = array_ops.placeholder(boxes_np.dtype, shape=boxes_np.shape)
scores = array_ops.placeholder(scores_np.dtype, shape=scores_np.shape)
iou_threshold = array_ops.placeholder(iou_threshold_np.dtype,
iou_threshold_np.shape)
score_threshold = array_ops.placeholder(score_threshold_np.dtype,
score_threshold_np.shape)
with self.test_scope():
selected_indices = image_ops.non_max_suppression_padded(
boxes=boxes,
scores=scores,
max_output_size=max_output_size,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
pad_to_max_output_size=True)
inputs_feed = {
boxes: boxes_np,
scores: scores_np,
iou_threshold: iou_threshold_np,
score_threshold: score_threshold_np
}
(indices_tf, num_valid) = sess.run(
selected_indices, feed_dict=inputs_feed)
self.assertEqual(indices_tf.size, max_output_size)
self.assertEqual(num_valid, 1)
self.assertAllClose(indices_tf[:num_valid], [3])
def testSelectFromContinuousOverLap(self):
# Tests that a suppressed box does not itself suppress other boxes.
boxes_data = [[0, 0, 1, 1], [0, 0.2, 1, 1.2], [0, 0.4, 1, 1.4],
[0, 0.6, 1, 1.6], [0, 0.8, 1, 1.8], [0, 2, 1, 3]]
boxes_np = np.array(boxes_data, dtype=np.float32)
scores_data = [0.9, 0.75, 0.6, 0.5, 0.4, 0.3]
scores_np = np.array(scores_data, dtype=np.float32)
max_output_size = 3
iou_threshold_np = np.array(0.5, dtype=np.float32)
score_threshold_np = np.array(0.1, dtype=np.float32)
with self.cached_session() as sess:
boxes = array_ops.placeholder(boxes_np.dtype, shape=boxes_np.shape)
scores = array_ops.placeholder(scores_np.dtype, shape=scores_np.shape)
iou_threshold = array_ops.placeholder(iou_threshold_np.dtype,
iou_threshold_np.shape)
score_threshold = array_ops.placeholder(score_threshold_np.dtype,
score_threshold_np.shape)
with self.test_scope():
selected_indices = image_ops.non_max_suppression_padded(
boxes=boxes,
scores=scores,
max_output_size=max_output_size,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
pad_to_max_output_size=True)
inputs_feed = {
boxes: boxes_np,
scores: scores_np,
iou_threshold: iou_threshold_np,
score_threshold: score_threshold_np
}
(indices_tf, num_valid) = sess.run(
selected_indices, feed_dict=inputs_feed)
self.assertEqual(indices_tf.size, max_output_size)
self.assertEqual(num_valid, 3)
self.assertAllClose(indices_tf[:num_valid], [0, 2, 4])
if __name__ == "__main__":
test.main()
| |
import os;
import os.path;
import report_utils;
import platform;
import utils;
import shutil;
import subprocess;
import sys;
import zipfile;
arguments = sys.argv[1:]
if 3 > len(arguments):
print 'Usage: ./create_performance_mail_report.py Gpu Recepients Link [Branch Revision]'
exit(1)
gpu = arguments[0]
recipients = arguments[1]
link = arguments[2]
branch = 0
revision = 0
if 5 == len(arguments):
branch = arguments[3]
revision = arguments[4]
currentDir = os.getcwd();
toolDir = os.path.realpath(currentDir + "/../../Tools/Bin/")
data = os.path.realpath(currentDir + "/DataSource/")
input = os.path.realpath(currentDir + "/DataSource/TestData/")
output = os.path.realpath(currentDir + "/Data/TestData/")
data_folder = os.path.realpath(currentDir + "/Data")
process = os.path.realpath(currentDir + "/DataSource/$process/")
results = os.path.realpath(currentDir + "/Results/" + gpu)
tests_results = {"Tests" : {}}
print "*** DAVA AUTOTEST Cleen up working dirctories ***"
if os.path.exists(currentDir + "/Artifacts/" + gpu):
print "Remove folder " + currentDir + "/Artifacts/" + gpu
shutil.rmtree(currentDir + "/Artifacts/" + gpu)
if os.path.exists(output):
print "Remove folder " + output
shutil.rmtree(output)
if os.path.exists(process):
print "Remove folder " + process
shutil.rmtree(process)
if not os.path.exists(data_folder):
print "Create folder " + data_folder
os.mkdir(data_folder)
print "*** DAVA AUTOTEST Run convert_graphics.py script for %s ***" % gpu
os.chdir(data)
params = [sys.executable, 'convert_graphics.py']
if (len(arguments) > 0):
params = params + ["-gpu", arguments[0]]
print "subprocess.call " + "[%s]" % ", ".join(map(str, params))
f = open(gpu + "_log.txt", 'w')
subprocess.call(params, stdout=f)
f.close()
shutil.move(gpu + "_log.txt", output)
print "*** DAVA AUTOTEST Check result for %s ***" % gpu
os.chdir(currentDir)
os_name = "Windows"
print "Convert DDS files:"
if (platform.system() == "Windows"):
subprocess.call(toolDir + "/ImageUnpacker.exe -folder " + output, shell=True)
else:
os_name = "MacOS"
subprocess.call(toolDir + "/ImageUnpacker -folder " + output, shell=True)
i = 0
for test in os.listdir(results):
if(os.path.isdir(os.path.realpath(results + "/" + test))):
i = i + 1
result = {}
print "*** Test#%d %s:" % (i, test)
result['Name'] = test
result['Number'] = i
result['Success'] = True
result['Error_msg'] = ""
result['txt_Success'] = True
result['tex_Success'] = True
result['img_Success'] = True
expected = os.path.realpath(results + "/" + test)
actual = os.path.realpath(output + "/" + test)
# Check TEXT files
print "Check TXT files"
files = filter(lambda x: x[-3:] == "txt", os.listdir(expected))
if len(files) != 0:
print files
for file in files:
res = utils.compare_txt(expected + "/" + file, actual + "/" + file)
if res != None:
result['txt_Success'] = False
result['Error_msg'] = result['Error_msg'] + str(res) + "\n"
print res
#Check TEX files
print "Check TEX files"
files = filter(lambda x: x[-3:] == "tex", os.listdir(expected))
if len(files) != 0:
print files
for file in files:
res = utils.compare_tex(expected + "/" + file, actual + "/" + file)
if res != None:
result['tex_Success'] = False
result['Error_msg'] = result['Error_msg'] + str(res) + "\n"
print res
# Check IMAGE files
print "Check IMAGE files"
files = filter(lambda x: x[-3:] == "png", os.listdir(expected))
if len(files) != 0:
print files
for file in files:
res = utils.compare_img(expected + "/" + file, actual + "/" + file)
if isinstance(res, str):
result['img_Success'] = False
result['Error_msg'] = result['Error_msg'] + str(res) + "\n"
else:
if res > 0.01:
result['img_Success'] = False
result['Error_msg'] = result['Error_msg'] + "Image %s differce from expected on %f%%.\n" % (actual + "/" + file, res * 100)
utils.save_diff(expected + "/" + file, actual + "/" + file)
result['Success'] = result['tex_Success'] and result['txt_Success'] and result['img_Success']
if result['Success']:
print "Test passed!"
tests_results["Tests"][test] = result
#Check graphics files
print
print
# Make final results
test_num = 0
test_success = 0
tex_failure = 0
txt_failure = 0
img_failure = 0
for test in tests_results["Tests"].values():
test_num = test_num + 1
if test['Success']:
test_success = test_success + 1
if not test['tex_Success']:
tex_failure = tex_failure + 1
if not test['txt_Success']:
txt_failure = txt_failure + 1
if not test['img_Success']:
img_failure = img_failure + 1
tests_results['tests'] = i
tests_results["success"] = test_success
tests_results["tex_failure"] = tex_failure
tests_results["txt_failure"] = txt_failure
tests_results["img_failure"] = img_failure
tests_results['gpu'] = gpu
report_utils.print_result(tests_results)
report_utils.create_html(tests_results, currentDir + "/" + gpu + ".html")
print
print
if tests_results["success"] != tests_results['tests']:
print "*** DAVA AUTOTEST Send letter with info about failures ***"
subject = "[AUTOTEST] Test for resource packer: Platform = %s GPU = %s" % (os_name, gpu)
msg = "Test: runned= %d succes= %d failed= %d <br>" % (tests_results['tests'], tests_results['success'], tests_results['tests'] - tests_results['success'])
msg += "Failures: Txt %d Tex %d Image %d <br>" % (tests_results['txt_failure'], tests_results['tex_failure'], tests_results['img_failure'])
msg += "<br> Link: %s/%s.html" % (link, gpu)
if (branch != 0):
msg += "<br>Framewok: %s %s" % (branch, revision)
utils.call("python", "mail.py", recipients, subject, msg)
print
print
print "*** DAVA AUTOTEST Copy results for artifact storing ***"
print "Copy results for storing in TC %s -> %s" % (output, currentDir + "/Artifacts/" + gpu)
shutil.copytree(output, currentDir + "/Artifacts/" + gpu)
for test in os.listdir(results):
shutil.copytree(input + "/" + test, currentDir + "/Artifacts/" + gpu + "/" + test + "/input/")
shutil.copytree(results + "/" + test, currentDir + "/Artifacts/" + gpu + "/" + test + "/expected_results/")
print "*** DAVA AUTOTEST Zip results for artefacts ***"
os.chdir(currentDir + "/Artifacts/")
utils.zip(gpu, gpu)
| |
# coding: utf-8
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from oslo_config import fixture as config_fixture
from oslo_utils import timeutils
from murano.api.v1 import environments
from murano.db import models
from murano.services import states
import murano.tests.unit.api.base as tb
import murano.tests.unit.utils as test_utils
class TestEnvironmentApi(tb.ControllerTest, tb.MuranoApiTestCase):
def setUp(self):
super(TestEnvironmentApi, self).setUp()
self.controller = environments.Controller()
self.fixture = self.useFixture(config_fixture.Config())
self.fixture.conf(args=[])
def test_list_empty_environments(self):
"""Check that with no environments an empty list is returned."""
self._set_policy_rules(
{'list_environments': '@'}
)
self.expect_policy_check('list_environments')
req = self._get('/environments')
result = req.get_response(self.api)
self.assertEqual({'environments': []}, json.loads(result.body))
def test_list_all_tenants(self):
"""Check whether all_tenants param is taken into account."""
self._set_policy_rules(
{'list_environments': '@',
'create_environment': '@',
'list_environments_all_tenants': '@'}
)
self.expect_policy_check('create_environment')
body = {'name': 'my_env'}
req = self._post('/environments', json.dumps(body), tenant="other")
req.get_response(self.api)
self._check_listing(False, 'list_environments', 0)
self._check_listing(True, 'list_environments_all_tenants', 1)
def _check_listing(self, all_tenants, expected_check, expected_count):
self.expect_policy_check(expected_check)
req = self._get('/environments', {'all_tenants': all_tenants})
response = req.get_response(self.api)
body = json.loads(response.body)
self.assertEqual(200, response.status_code)
self.assertEqual(expected_count, len(body['environments']))
def test_create_environment(self):
"""Create an environment, test environment.show()."""
self._set_policy_rules(
{'list_environments': '@',
'create_environment': '@',
'show_environment': '@'}
)
self.expect_policy_check('create_environment')
fake_now = timeutils.utcnow()
timeutils.utcnow.override_time = fake_now
uuids = ('env_object_id', 'network_id', 'environment_id')
mock_uuid = self._stub_uuid(uuids)
expected = {'tenant_id': self.tenant,
'id': 'environment_id',
'name': 'my_env',
'version': 0,
'created': timeutils.isotime(fake_now)[:-1],
'updated': timeutils.isotime(fake_now)[:-1],
}
body = {'name': 'my_env'}
req = self._post('/environments', json.dumps(body))
result = req.get_response(self.api)
self.assertEqual(expected, json.loads(result.body))
expected['status'] = 'ready'
# Reset the policy expectation
self.expect_policy_check('list_environments')
req = self._get('/environments')
result = req.get_response(self.api)
self.assertEqual(200, result.status_code)
self.assertEqual({'environments': [expected]}, json.loads(result.body))
expected['services'] = []
expected['acquired_by'] = None
# Reset the policy expectation
self.expect_policy_check('show_environment',
{'environment_id': uuids[-1]})
req = self._get('/environments/%s' % uuids[-1])
result = req.get_response(self.api)
self.assertEqual(expected, json.loads(result.body))
self.assertEqual(3, mock_uuid.call_count)
def test_illegal_environment_name_create(self):
"""Check that an illegal env name results in an HTTPClientError."""
self._set_policy_rules(
{'list_environments': '@',
'create_environment': '@',
'show_environment': '@'}
)
self.expect_policy_check('create_environment')
body = {'name': ' '}
req = self._post('/environments', json.dumps(body))
result = req.get_response(self.api)
self.assertEqual(400, result.status_code)
def test_unicode_environment_name_create(self):
"""Check that an unicode env name doesn't raise an HTTPClientError."""
self._set_policy_rules(
{'list_environments': '@',
'create_environment': '@',
'show_environment': '@'}
)
self.expect_policy_check('create_environment')
body = {'name': u'$yaql \u2665 unicode'}
req = self._post('/environments', json.dumps(body))
result = req.get_response(self.api)
self.assertEqual(200, result.status_code)
def test_no_environment_name_create(self):
"""Check that no env name provided results in an HTTPBadResquest."""
self._set_policy_rules(
{'list_environments': '@',
'create_environment': '@',
'show_environment': '@'}
)
self.expect_policy_check('create_environment')
body = {'no_name': 'fake'}
req = self._post('/environments', json.dumps(body))
result = req.get_response(self.api)
self.assertEqual(400, result.status_code)
result_msg = result.text.replace('\n', '')
self.assertIn('Please, specify a name of the environment to create',
result_msg)
def test_too_long_environment_name_create(self):
"""Check that a too long env name results in an HTTPBadResquest."""
self._set_policy_rules(
{'list_environments': '@',
'create_environment': '@',
'show_environment': '@'}
)
self.expect_policy_check('create_environment')
body = {'name': 'a' * 256}
req = self._post('/environments', json.dumps(body))
result = req.get_response(self.api)
self.assertEqual(400, result.status_code)
result_msg = result.text.replace('\n', '')
self.assertIn('Environment name should be 255 characters maximum',
result_msg)
def test_create_environment_with_empty_body(self):
"""Check that empty request body results in an HTTPBadResquest."""
body = ''
req = self._post('/environments', body)
result = req.get_response(self.api)
self.assertEqual(400, result.status_code)
result_msg = result.text.replace('\n', '')
self.assertIn('The server could not comply with the request since it '
'is either malformed or otherwise incorrect.',
result_msg)
def test_missing_environment(self):
"""Check that a missing environment results in an HTTPNotFound.
Environment check will be made in the decorator and raises,
no need to check policy in this testcase.
"""
req = self._get('/environments/no-such-id')
result = req.get_response(self.api)
self.assertEqual(404, result.status_code)
def test_update_environment(self):
"""Check that environment rename works."""
self._set_policy_rules(
{'show_environment': '@',
'update_environment': '@'}
)
self.expect_policy_check('update_environment',
{'environment_id': '12345'})
fake_now = timeutils.utcnow()
timeutils.utcnow.override_time = fake_now
expected = dict(
id='12345',
name='my-env',
version=0,
created=fake_now,
updated=fake_now,
tenant_id=self.tenant,
description={
'Objects': {
'?': {'id': '12345'}
},
'Attributes': []
}
)
e = models.Environment(**expected)
test_utils.save_models(e)
fake_now = timeutils.utcnow()
timeutils.utcnow.override_time = fake_now
del expected['description']
expected['services'] = []
expected['status'] = 'ready'
expected['name'] = 'renamed_env'
expected['updated'] = fake_now
body = {
'name': 'renamed_env'
}
req = self._put('/environments/12345', json.dumps(body))
result = req.get_response(self.api)
self.assertEqual(200, result.status_code)
self.expect_policy_check('show_environment',
{'environment_id': '12345'})
req = self._get('/environments/12345')
result = req.get_response(self.api)
self.assertEqual(200, result.status_code)
expected['created'] = timeutils.isotime(expected['created'])[:-1]
expected['updated'] = timeutils.isotime(expected['updated'])[:-1]
expected['acquired_by'] = None
self.assertEqual(expected, json.loads(result.body))
def test_update_environment_with_invalid_name(self):
"""Check that update an invalid env name results in
an HTTPBadResquest.
"""
self._set_policy_rules(
{'update_environment': '@'}
)
self._create_fake_environment('env1', '111')
self.expect_policy_check('update_environment',
{'environment_id': '111'})
body = {
'name': ' '
}
req = self._put('/environments/111', json.dumps(body))
result = req.get_response(self.api)
self.assertEqual(400, result.status_code)
result_msg = result.text.replace('\n', '')
msg = ('Environment name must contain at least one '
'non-white space symbol')
self.assertIn(msg, result_msg)
def test_update_environment_with_existing_name(self):
self._set_policy_rules(
{'update_environment': '@'}
)
self._create_fake_environment('env1', '111')
self._create_fake_environment('env2', '222')
self.expect_policy_check('update_environment',
{'environment_id': '111'})
body = {
'name': 'env2'
}
req = self._put('/environments/111', json.dumps(body))
result = req.get_response(self.api)
self.assertEqual(409, result.status_code)
def test_too_long_environment_name_update(self):
"""Check that update a too long env name results in
an HTTPBadResquest.
"""
self._set_policy_rules(
{'update_environment': '@'}
)
self._create_fake_environment('env1', '111')
self.expect_policy_check('update_environment',
{'environment_id': '111'})
new_name = 'env1' * 64
body = {
'name': new_name
}
req = self._put('/environments/111', json.dumps(body))
result = req.get_response(self.api)
self.assertEqual(400, result.status_code)
result_msg = result.text.replace('\n', '')
self.assertIn('Environment name should be 255 characters maximum',
result_msg)
def test_delete_environment(self):
"""Test that environment deletion results in the correct rpc call."""
result = self._test_delete_or_abandon(abandon=False)
self.assertEqual('', result.body)
self.assertEqual(200, result.status_code)
def test_abandon_environment(self):
"""Check that abandon feature works"""
result = self._test_delete_or_abandon(abandon=True)
self.assertEqual('', result.body)
self.assertEqual(200, result.status_code)
def test_abandon_environment_of_different_tenant(self):
"""Test abandon environment belongs to another tenant."""
result = self._test_delete_or_abandon(abandon=True, tenant='not_match')
self.assertEqual(403, result.status_code)
self.assertTrue(('User is not authorized to access these'
' tenant resources') in result.body)
def test_get_last_status_of_different_tenant(self):
"""Test get last services status of env belongs to another tenant."""
self._create_fake_environment('env1', '111')
req = self._get('/environments/111/lastStatus', tenant='not_match')
result = req.get_response(self.api)
self.assertEqual(403, result.status_code)
self.assertTrue(('User is not authorized to access these'
' tenant resources') in result.body)
def test_get_environment(self):
"""Test GET request of an environment in ready status"""
self._set_policy_rules(
{'show_environment': '@'}
)
self.expect_policy_check('show_environment',
{'environment_id': '123'})
fake_now = timeutils.utcnow()
timeutils.utcnow.override_time = fake_now
env_id = '123'
self._create_fake_environment(env_id=env_id)
req = self._get('/environments/{0}'.format(env_id))
result = req.get_response(self.api)
self.assertEqual(200, result.status_code)
expected = {'tenant_id': self.tenant,
'id': env_id,
'name': 'my-env',
'version': 0,
'created': timeutils.isotime(fake_now)[:-1],
'updated': timeutils.isotime(fake_now)[:-1],
'acquired_by': None,
'services': [],
'status': 'ready',
}
self.assertEqual(expected, json.loads(result.body))
def test_get_environment_acquired(self):
"""Test GET request of an environment in deploying status"""
self._set_policy_rules(
{'show_environment': '@'}
)
self.expect_policy_check('show_environment',
{'environment_id': '1234'})
fake_now = timeutils.utcnow()
timeutils.utcnow.override_time = fake_now
env_id = '1234'
self._create_fake_environment(env_id=env_id)
sess_id = '321'
expected = dict(
id=sess_id,
environment_id=env_id,
version=0,
state=states.SessionState.DEPLOYING,
user_id=self.tenant,
description={
'Objects': {
'?': {'id': '{0}'.format(env_id)}
},
'Attributes': {}
}
)
s = models.Session(**expected)
test_utils.save_models(s)
req = self._get('/environments/{0}'.format(env_id))
result = req.get_response(self.api)
self.assertEqual(200, result.status_code)
expected = {'tenant_id': self.tenant,
'id': env_id,
'name': 'my-env',
'version': 0,
'created': timeutils.isotime(fake_now)[:-1],
'updated': timeutils.isotime(fake_now)[:-1],
'acquired_by': sess_id,
'services': [],
'status': states.EnvironmentStatus.DEPLOYING,
}
self.assertEqual(expected, json.loads(result.body))
def _create_fake_environment(self, env_name='my-env', env_id='123'):
fake_now = timeutils.utcnow()
expected = dict(
id=env_id,
name=env_name,
version=0,
created=fake_now,
updated=fake_now,
tenant_id=self.tenant,
description={
'Objects': {
'?': {'id': '{0}'.format(env_id)}
},
'Attributes': {}
}
)
e = models.Environment(**expected)
test_utils.save_models(e)
def _test_delete_or_abandon(self, abandon, env_name='my-env',
env_id='123', tenant=None):
self._set_policy_rules(
{'delete_environment': '@'}
)
self.expect_policy_check(
'delete_environment',
{'environment_id': '{0}'.format(env_id)}
)
self._create_fake_environment(env_name, env_id)
path = '/environments/{0}'.format(env_id)
req = self._delete(path, params={'abandon': abandon},
tenant=tenant or self.tenant)
result = req.get_response(self.api)
return result
| |
# Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
r"""
There are two use cases for the ConfigFilter class:
1. Help enforce that a given module does not access options registered
by another module, without first declaring those cross-module
dependencies using import_opt().
2. Prevent private configuration opts from being visible to modules
other than the one which registered it.
Cross-Module Option Dependencies
--------------------------------
When using the global cfg.CONF object, it is quite common for a module
to require the existence of configuration options registered by other
modules.
For example, if module 'foo' registers the 'blaa' option and the module
'bar' uses the 'blaa' option then 'bar' might do::
import foo
print(CONF.blaa)
However, it's completely non-obvious why foo is being imported (is it
unused, can we remove the import) and where the 'blaa' option comes from.
The CONF.import_opt() method allows such a dependency to be explicitly
declared::
CONF.import_opt('blaa', 'foo')
print(CONF.blaa)
However, import_opt() has a weakness - if 'bar' imports 'foo' using the
import builtin and doesn't use import_opt() to import 'blaa', then 'blaa'
can still be used without problems. Similarly, where multiple options
are registered a module imported via importopt(), a lazy programmer can
get away with only declaring a dependency on a single option.
The ConfigFilter class provides a way to ensure that options are not
available unless they have been registered in the module or imported using
import_opt() for example with::
CONF = ConfigFilter(cfg.CONF)
CONF.import_opt('blaa', 'foo')
print(CONF.blaa)
no other options other than 'blaa' are available via CONF.
Private Configuration Options
-----------------------------
Libraries which register configuration options typically do not want
users of the library API to access those configuration options. If
API users do access private configuration options, those users will
be disrupted if and when a configuration option is renamed. In other
words, one does not typically wish for the name of the private config
options to be part of the public API.
The ConfigFilter class provides a way for a library to register
options such that they are not visible via the ConfigOpts instance
which the API user supplies to the library. For example::
from __future__ import print_function
from oslo.config.cfg import *
from oslo.config.cfgfilter import *
class Widget(object):
def __init__(self, conf):
self.conf = conf
self._private_conf = ConfigFilter(self.conf)
self._private_conf.register_opt(StrOpt('foo'))
@property
def foo(self):
return self._private_conf.foo
conf = ConfigOpts()
widget = Widget(conf)
print(widget.foo)
print(conf.foo) # raises NoSuchOptError
"""
import collections
import itertools
from oslo.config import cfg
class ConfigFilter(collections.Mapping):
"""A helper class which wraps a ConfigOpts object.
ConfigFilter enforces the explicit declaration of dependencies on external
options and allows private options which are not registered with the
wrapped Configopts object.
"""
def __init__(self, conf):
"""Construct a ConfigFilter object.
:param conf: a ConfigOpts object
"""
self._conf = conf
self._fconf = cfg.ConfigOpts()
self._sync()
self._imported_opts = set()
self._imported_groups = dict()
def _sync(self):
if self._fconf._namespace is not self._conf._namespace:
self._fconf.clear()
self._fconf._namespace = self._conf._namespace
self._fconf._args = self._conf._args
def __getattr__(self, name):
"""Look up an option value.
:param name: the opt name (or 'dest', more precisely)
:returns: the option value (after string subsititution) or a GroupAttr
:raises: NoSuchOptError,ConfigFileValueError,TemplateSubstitutionError
"""
if name in self._imported_groups:
return self._imported_groups[name]
elif name in self._imported_opts:
return getattr(self._conf, name)
else:
self._sync()
return getattr(self._fconf, name)
def __getitem__(self, key):
"""Look up an option value."""
return getattr(self, key)
def __contains__(self, key):
"""Return True if key is the name of a registered opt or group."""
return (key in self._fconf or
key in self._imported_opts or
key in self._imported_groups)
def __iter__(self):
"""Iterate over all registered opt and group names."""
return itertools.chain(self._fconf.keys(),
self._imported_opts,
self._imported_groups.keys())
def __len__(self):
"""Return the number of options and option groups."""
return (len(self._fconf) +
len(self._imported_opts) +
len(self._imported_groups))
@staticmethod
def _already_registered(conf, opt, group=None):
group_name = group.name if isinstance(group, cfg.OptGroup) else group
return ((group_name is None and
opt.dest in conf) or
(group_name is not None and
group_name in conf and
opt.dest in conf[group_name]))
def register_opt(self, opt, group=None):
"""Register an option schema.
:param opt: an instance of an Opt sub-class
:param group: an optional OptGroup object or group name
:return: False if the opt was already registered, True otherwise
:raises: DuplicateOptError
"""
if self._already_registered(self._conf, opt, group):
# Raises DuplicateError if there is another opt with the same name
ret = self._conf.register_opt(opt, group)
self._import_opt(opt.dest, group)
return ret
else:
return self._fconf.register_opt(opt, group)
def register_opts(self, opts, group=None):
"""Register multiple option schemas at once."""
for opt in opts:
self.register_opt(opt, group)
def register_cli_opt(self, opt, group=None):
"""Register a CLI option schema.
:param opt: an instance of an Opt sub-class
:param group: an optional OptGroup object or group name
:return: False if the opt was already register, True otherwise
:raises: DuplicateOptError, ArgsAlreadyParsedError
"""
if self._already_registered(self._conf, opt, group):
# Raises DuplicateError if there is another opt with the same name
ret = self._conf.register_cli_opt(opt, group)
self._import_opt(opt.dest, group)
return ret
else:
return self._fconf.register_cli_opt(opt, group)
def register_cli_opts(self, opts, group=None):
"""Register multiple CLI option schemas at once."""
for opt in opts:
self.register_cli_opt(opt, group)
def register_group(self, group):
"""Register an option group.
:param group: an OptGroup object
"""
self._fconf.register_group(group)
def import_opt(self, opt_name, module_str, group=None):
"""Import an option definition from a module.
:param name: the name/dest of the opt
:param module_str: the name of a module to import
:param group: an option OptGroup object or group name
:raises: NoSuchOptError, NoSuchGroupError
"""
self._conf.import_opt(opt_name, module_str, group)
self._import_opt(opt_name, group)
def import_group(self, group, module_str):
"""Import an option group from a module.
Note that this allows access to all options registered with
the group whether or not those options were registered by
the given module.
:param group: an option OptGroup object or group name
:param module_str: the name of a module to import
:raises: ImportError, NoSuchGroupError
"""
self._conf.import_group(group, module_str)
group = self._import_group(group)
group._all_opts = True
def _import_opt(self, opt_name, group):
if group is None:
self._imported_opts.add(opt_name)
return True
else:
group = self._import_group(group)
return group._import_opt(opt_name)
def _import_group(self, group_or_name):
if isinstance(group_or_name, cfg.OptGroup):
group_name = group_or_name.name
else:
group_name = group_or_name
if group_name in self._imported_groups:
return self._imported_groups[group_name]
else:
group = self.GroupAttr(self._conf, group_name)
self._imported_groups[group_name] = group
return group
class GroupAttr(collections.Mapping):
"""Helper class to wrap a group object.
Represents the option values of a group as a mapping and attributes.
"""
def __init__(self, conf, group):
"""Construct a GroupAttr object.
:param conf: a ConfigOpts object
:param group: an OptGroup object
"""
self._conf = conf
self._group = group
self._imported_opts = set()
self._all_opts = False
def __getattr__(self, name):
"""Look up an option value."""
if not self._all_opts and name not in self._imported_opts:
raise cfg.NoSuchOptError(name)
return getattr(self._conf[self._group], name)
def __getitem__(self, key):
"""Look up an option value."""
return getattr(self, key)
def __contains__(self, key):
"""Return True if key is the name of a registered opt or group."""
return key in self._imported_opts
def __iter__(self):
"""Iterate over all registered opt and group names."""
for key in self._imported_opts:
yield key
def __len__(self):
"""Return the number of options and option groups."""
return len(self._imported_opts)
def _import_opt(self, opt_name):
self._imported_opts.add(opt_name)
| |
from json import dumps
from datetime import timedelta
from draughtcraft.tests import TestApp, TestAuthenticatedApp
from draughtcraft import model
class TestAuthenticatedUserRecipeLookup(TestAuthenticatedApp):
def test_lookup(self):
"""
If the recipe has an author, and we're logged in as that author,
we should have access to edit the recipe.
"""
model.Recipe(
name='American IPA',
slugs=[
model.RecipeSlug(name='American IPA'),
model.RecipeSlug(name='American IPA (Revised)')
],
author=model.User.get(1)
)
model.commit()
response = self.get('/recipes/500/american-ipa/builder', status=404)
assert response.status_int == 404
response = self.get('/recipes/1/american-ipa/builder')
assert response.status_int == 200
response = self.get('/recipes/1/american-ipa-revised/builder')
assert response.status_int == 200
response = self.get('/recipes/1/invalid_slug/builder', status=404)
assert response.status_int == 404
def test_unauthorized_lookup_trial_recipe(self):
"""
If the recipe has no author, and we're logged in as any user,
we should *not* have access to edit the recipe.
"""
model.Recipe(
name='American IPA',
slugs=[
model.RecipeSlug(name='American IPA')
]
)
model.commit()
response = self.get('/recipes/1/american-ipa/builder', status=401)
assert response.status_int == 401
def test_unauthorized_lookup_draft(self):
"""
If the recipe is published, and we're logged in as the author,
we should *not* have access to edit the recipe in published form.
"""
model.Recipe(
name='American IPA',
slugs=[
model.RecipeSlug(name='American IPA')
],
author=model.User.get(1),
state='PUBLISHED'
)
model.commit()
response = self.get('/recipes/1/american-ipa/builder', status=401)
assert response.status_int == 401
def test_unauthorized_lookup_other_user(self):
"""
If the recipe has an author, and we're logged in as another user,
we should *not* have access to edit the recipe.
"""
model.Recipe(
name='American IPA',
slugs=[
model.RecipeSlug(name='American IPA')
],
author=model.User()
)
model.commit()
response = self.get('/recipes/1/american-ipa/builder', status=401)
assert response.status_int == 401
class TestTrialRecipeLookup(TestApp):
def test_unauthorized_lookup_trial_user(self):
"""
If the recipe has an author, and we're *not* logged in as any user,
we should *not* have access to edit the recipe.
"""
model.Recipe(
name='American IPA',
slugs=[
model.RecipeSlug(name='American IPA')
],
author=model.User.get(1)
)
model.commit()
response = self.get('/recipes/1/american-ipa/builder', status=401)
assert response.status_int == 401
def test_unauthorized_lookup_trial_other_user(self):
"""
If the recipe is a trial recipe, but is not *our* trial recipe,
we should *not* have access to edit the recipe.
"""
model.Recipe(
name='American IPA',
slugs=[
model.RecipeSlug(name='American IPA')
]
)
model.commit()
response = self.get('/recipes/1/american-ipa/builder', status=401)
assert response.status_int == 401
class TestRecipeSave(TestAuthenticatedApp):
def test_name_update(self):
model.Recipe(
name='American IPA',
slugs=[
model.RecipeSlug(name='American IPA'),
model.RecipeSlug(name='American IPA (Revised)')
],
author=model.User.get(1)
)
model.commit()
response = self.post(
'/recipes/1/american-ipa/builder?_method=PUT',
params={
'recipe': dumps({'name': 'Some Recipe'})
}
)
assert response.status_int == 200
recipe = model.Recipe.query.first()
assert recipe.name == 'Some Recipe'
slugs = recipe.slugs
assert len(slugs) == 3
assert slugs[0].slug == 'american-ipa'
assert slugs[1].slug == 'american-ipa-revised'
assert slugs[2].slug == 'some-recipe'
def test_volume_update(self):
model.Recipe(
name='American IPA',
slugs=[
model.RecipeSlug(name='American IPA'),
model.RecipeSlug(name='American IPA (Revised)')
],
author=model.User.get(1)
)
model.commit()
response = self.post(
'/recipes/1/american-ipa/builder?_method=PUT',
params={
'recipe': dumps({'gallons': 10})
}
)
assert response.status_int == 200
recipe = model.Recipe.query.first()
assert recipe.gallons == 10
def test_style_save(self):
model.Style(name='Some Style')
model.Recipe(
name='American IPA',
slugs=[
model.RecipeSlug(name='American IPA'),
model.RecipeSlug(name='American IPA (Revised)')
],
author=model.User.get(1)
)
model.commit()
response = self.post(
'/recipes/1/american-ipa/builder?_method=PUT',
params={
'recipe': dumps({'style': 1})
}
)
assert response.status_int == 200
recipe = model.Recipe.query.first()
assert recipe.style.name == 'Some Style'
def test_style_remove(self):
model.Recipe(
name='American IPA',
slugs=[
model.RecipeSlug(name='American IPA'),
model.RecipeSlug(name='American IPA (Revised)')
],
style=model.Style(name='Some Style'),
author=model.User.get(1)
)
model.commit()
response = self.post(
'/recipes/1/american-ipa/builder?_method=PUT',
params={
'recipe': dumps({'style': None})
}
)
assert response.status_int == 200
recipe = model.Recipe.query.first()
assert recipe.style is None
def test_mash_settings_update(self):
model.Recipe(
name='American IPA',
slugs=[
model.RecipeSlug(name='American IPA'),
model.RecipeSlug(name='American IPA (Revised)')
],
author=model.User.get(1)
)
model.commit()
response = self.post(
'/recipes/1/american-ipa/builder?_method=PUT',
params={
'recipe': dumps({
'mash_method': 'TEMPERATURE',
'mash_instructions': 'Mash for an hour.'
})
}
)
assert response.status_int == 200
recipe = model.Recipe.query.first()
assert recipe.mash_method == 'TEMPERATURE'
assert recipe.mash_instructions == 'Mash for an hour.'
def test_boil_settings_update(self):
model.Recipe(
name='American IPA',
slugs=[
model.RecipeSlug(name='American IPA'),
model.RecipeSlug(name='American IPA (Revised)')
],
author=model.User.get(1)
)
model.commit()
response = self.post(
'/recipes/1/american-ipa/builder?_method=PUT',
params={
'recipe': dumps({
'boil_minutes': 90
})
}
)
assert response.status_int == 200
recipe = model.Recipe.query.first()
assert recipe.boil_minutes == 90
def test_fermentation_steps_update(self):
model.Recipe(
name='American IPA',
slugs=[
model.RecipeSlug(name='American IPA'),
model.RecipeSlug(name='American IPA (Revised)')
],
author=model.User.get(1)
)
model.commit()
response = self.post(
'/recipes/1/american-ipa/builder?_method=PUT',
params={
'recipe': dumps({
'fermentation_steps': [{
'step': 'PRIMARY',
'days': 7,
'fahrenheit': 68
}, {
'step': 'SECONDARY',
'days': 14,
'fahrenheit': 62
}, {
'step': 'TERTIARY',
'days': 60,
'fahrenheit': 38
}]
})
}
)
assert response.status_int == 200
recipe = model.Recipe.query.first()
steps = recipe.fermentation_steps
assert len(steps) == 3
assert steps[0].step == 'PRIMARY'
assert steps[0].days == 7
assert steps[0].fahrenheit == 68
assert steps[1].step == 'SECONDARY'
assert steps[1].days == 14
assert steps[1].fahrenheit == 62
assert steps[2].step == 'TERTIARY'
assert steps[2].days == 60
assert steps[2].fahrenheit == 38
def test_notes_update(self):
model.Recipe(
name='American IPA',
slugs=[
model.RecipeSlug(name='American IPA'),
model.RecipeSlug(name='American IPA (Revised)')
],
author=model.User.get(1)
)
model.commit()
response = self.post(
'/recipes/1/american-ipa/builder?_method=PUT',
params={
'recipe': dumps({'notes': 'ABC123'})
}
)
assert response.status_int == 200
recipe = model.Recipe.query.first()
assert recipe.notes == 'ABC123'
class TestMashAdditions(TestAuthenticatedApp):
def test_fermentable(self):
model.Recipe(
name='American IPA',
author=model.User.get(1)
)
model.Fermentable(
name='2-Row',
origin='US',
ppg=36,
lovibond=2
)
model.commit()
data = {
u'mash': {
u'additions': [{
u'amount': 5,
u'unit': u'POUND',
u'ingredient': {
u'id': 1,
u'class': 'Fermentable'
}
}]
}
}
self.post(
'/recipes/1/american-ipa/builder?_method=PUT',
params={'recipe': dumps(data)}
)
assert model.RecipeAddition.query.count() == 1
a = model.RecipeAddition.get(1)
assert a.recipe == model.Recipe.get(1)
assert a.amount == 5
assert a.unit == 'POUND'
assert a.fermentable == model.Fermentable.get(1)
def test_hop(self):
model.Recipe(
name='American IPA',
author=model.User.get(1)
)
model.Hop(name='Cascade', origin='US')
model.commit()
data = {
u'mash': {
u'additions': [{
u'use': u'MASH',
u'form': u'PELLET',
u'alpha_acid': 8,
u'amount': 16,
u'duration': 60,
u'unit': u'OUNCE',
u'ingredient': {
u'id': 1,
u'class': u'Hop'
}
}]
}
}
self.post(
'/recipes/1/american-ipa/builder?_method=PUT',
params={'recipe': dumps(data)}
)
assert model.HopAddition.query.count() == 1
a = model.HopAddition.get(1)
assert a.recipe == model.Recipe.get(1)
assert a.amount == 16
assert a.duration == timedelta(minutes=60)
assert a.unit == 'OUNCE'
assert a.form == 'PELLET'
assert a.alpha_acid == 8
assert a.hop == model.Hop.get(1)
class TestHopAdditions(TestAuthenticatedApp):
def test_fermentable(self):
model.Recipe(
name='American IPA',
author=model.User.get(1)
)
model.Fermentable(
name='2-Row',
origin='US',
ppg=36,
lovibond=2
)
model.commit()
data = {
u'boil': {
u'additions': [{
u'amount': 5,
u'use': 'BOIL',
u'duration': 15,
u'unit': u'POUND',
u'ingredient': {
u'id': 1,
u'class': 'Fermentable'
}
}]
}
}
self.post(
'/recipes/1/american-ipa/builder?_method=PUT',
params={'recipe': dumps(data)}
)
assert model.RecipeAddition.query.count() == 1
a = model.RecipeAddition.get(1)
assert a.recipe == model.Recipe.get(1)
assert a.amount == 5
assert a.use == 'BOIL'
assert a.duration == timedelta(minutes=15)
assert a.unit == 'POUND'
assert a.fermentable == model.Fermentable.get(1)
def test_hop(self):
model.Recipe(
name='American IPA',
author=model.User.get(1)
)
model.Hop(name='Cascade', origin='US')
model.commit()
data = {
u'boil': {
u'additions': [{
u'use': u'BOIL',
u'form': u'PELLET',
u'alpha_acid': 8,
u'amount': 16,
u'duration': 60,
u'unit': u'OUNCE',
u'ingredient': {
u'id': 1,
u'class': u'Hop'
}
}]
}
}
self.post(
'/recipes/1/american-ipa/builder?_method=PUT',
params={'recipe': dumps(data)}
)
assert model.HopAddition.query.count() == 1
a = model.HopAddition.get(1)
assert a.recipe == model.Recipe.get(1)
assert a.amount == 16
assert a.use == 'BOIL'
assert a.duration == timedelta(minutes=60)
assert a.unit == 'OUNCE'
assert a.form == 'PELLET'
assert a.alpha_acid == 8
assert a.hop == model.Hop.get(1)
class TestFermentationAdditions(TestAuthenticatedApp):
def test_hop(self):
model.Recipe(
name='American IPA',
author=model.User.get(1)
)
model.Hop(name='Cascade', origin='US')
model.commit()
data = {
u'fermentation': {
u'additions': [{
u'use': u'SECONDARY',
u'form': u'PELLET',
u'alpha_acid': 8,
u'amount': 16,
u'unit': u'OUNCE',
u'ingredient': {
u'id': 1,
u'class': u'Hop'
}
}]
}
}
self.post(
'/recipes/1/american-ipa/builder?_method=PUT',
params={'recipe': dumps(data)}
)
assert model.HopAddition.query.count() == 1
a = model.HopAddition.get(1)
assert a.recipe == model.Recipe.get(1)
assert a.amount == 16
assert a.use == 'SECONDARY'
assert a.unit == 'OUNCE'
assert a.form == 'PELLET'
assert a.alpha_acid == 8
assert a.hop == model.Hop.get(1)
def test_yeast(self):
model.Recipe(
name='American IPA',
author=model.User.get(1)
)
model.Yeast(
name='Wyeast 1056 - American Ale',
form='LIQUID',
attenuation=.75
)
model.commit()
data = {
u'mash': {
u'additions': [{
u'use': u'MASH',
u'amount': 1,
u'use': 'PRIMARY',
u'ingredient': {
u'id': 1,
u'class': u'Yeast'
}
}]
}
}
self.post(
'/recipes/1/american-ipa/builder?_method=PUT',
params={'recipe': dumps(data)}
)
assert model.RecipeAddition.query.count() == 1
a = model.RecipeAddition.get(1)
assert a.recipe == model.Recipe.get(1)
assert a.amount == 1
assert a.use == 'PRIMARY'
assert a.yeast == model.Yeast.get(1)
class TestRecipePublish(TestAuthenticatedApp):
def test_simple_publish(self):
model.Recipe(name='Rocky Mountain River IPA', author=model.User.get(1))
model.Fermentable(
name='2-Row',
origin='US',
ppg=36,
lovibond=2
)
model.commit()
assert model.Recipe.query.first().state == "DRAFT"
self.post('/recipes/1/rocky-mountain-river-ipa/builder/publish/')
assert model.Recipe.query.first().state == "PUBLISHED"
| |
# coding: utf-8
"""
Vericred API
Vericred's API allows you to search for Health Plans that a specific doctor
accepts.
## Getting Started
Visit our [Developer Portal](https://developers.vericred.com) to
create an account.
Once you have created an account, you can create one Application for
Production and another for our Sandbox (select the appropriate Plan when
you create the Application).
## SDKs
Our API follows standard REST conventions, so you can use any HTTP client
to integrate with us. You will likely find it easier to use one of our
[autogenerated SDKs](https://github.com/vericred/?query=vericred-),
which we make available for several common programming languages.
## Authentication
To authenticate, pass the API Key you created in the Developer Portal as
a `Vericred-Api-Key` header.
`curl -H 'Vericred-Api-Key: YOUR_KEY' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Versioning
Vericred's API default to the latest version. However, if you need a specific
version, you can request it with an `Accept-Version` header.
The current version is `v3`. Previous versions are `v1` and `v2`.
`curl -H 'Vericred-Api-Key: YOUR_KEY' -H 'Accept-Version: v2' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Pagination
Endpoints that accept `page` and `per_page` parameters are paginated. They expose
four additional fields that contain data about your position in the response,
namely `Total`, `Per-Page`, `Link`, and `Page` as described in [RFC-5988](https://tools.ietf.org/html/rfc5988).
For example, to display 5 results per page and view the second page of a
`GET` to `/networks`, your final request would be `GET /networks?....page=2&per_page=5`.
## Sideloading
When we return multiple levels of an object graph (e.g. `Provider`s and their `State`s
we sideload the associated data. In this example, we would provide an Array of
`State`s and a `state_id` for each provider. This is done primarily to reduce the
payload size since many of the `Provider`s will share a `State`
```
{
providers: [{ id: 1, state_id: 1}, { id: 2, state_id: 1 }],
states: [{ id: 1, code: 'NY' }]
}
```
If you need the second level of the object graph, you can just match the
corresponding id.
## Selecting specific data
All endpoints allow you to specify which fields you would like to return.
This allows you to limit the response to contain only the data you need.
For example, let's take a request that returns the following JSON by default
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890',
field_we_dont_care_about: 'value_we_dont_care_about'
},
states: [{
id: 1,
name: 'New York',
code: 'NY',
field_we_dont_care_about: 'value_we_dont_care_about'
}]
}
```
To limit our results to only return the fields we care about, we specify the
`select` query string parameter for the corresponding fields in the JSON
document.
In this case, we want to select `name` and `phone` from the `provider` key,
so we would add the parameters `select=provider.name,provider.phone`.
We also want the `name` and `code` from the `states` key, so we would
add the parameters `select=states.name,states.code`. The id field of
each document is always returned whether or not it is requested.
Our final request would be `GET /providers/12345?select=provider.name,provider.phone,states.name,states.code`
The response would be
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890'
},
states: [{
id: 1,
name: 'New York',
code: 'NY'
}]
}
```
## Benefits summary format
Benefit cost-share strings are formatted to capture:
* Network tiers
* Compound or conditional cost-share
* Limits on the cost-share
* Benefit-specific maximum out-of-pocket costs
**Example #1**
As an example, we would represent [this Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/33602TX0780032.pdf) as:
* **Hospital stay facility fees**:
- Network Provider: `$400 copay/admit plus 20% coinsurance`
- Out-of-Network Provider: `$1,500 copay/admit plus 50% coinsurance`
- Vericred's format for this benefit: `In-Network: $400 before deductible then 20% after deductible / Out-of-Network: $1,500 before deductible then 50% after deductible`
* **Rehabilitation services:**
- Network Provider: `20% coinsurance`
- Out-of-Network Provider: `50% coinsurance`
- Limitations & Exceptions: `35 visit maximum per benefit period combined with Chiropractic care.`
- Vericred's format for this benefit: `In-Network: 20% after deductible / Out-of-Network: 50% after deductible | limit: 35 visit(s) per Benefit Period`
**Example #2**
In [this other Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/40733CA0110568.pdf), the **specialty_drugs** cost-share has a maximum out-of-pocket for in-network pharmacies.
* **Specialty drugs:**
- Network Provider: `40% coinsurance up to a $500 maximum for up to a 30 day supply`
- Out-of-Network Provider `Not covered`
- Vericred's format for this benefit: `In-Network: 40% after deductible, up to $500 per script / Out-of-Network: 100%`
**BNF**
Here's a description of the benefits summary string, represented as a context-free grammar:
```
root ::= coverage
coverage ::= (simple_coverage | tiered_coverage) (space pipe space coverage_modifier)?
tiered_coverage ::= tier (space slash space tier)*
tier ::= tier_name colon space (tier_coverage | not_applicable)
tier_coverage ::= simple_coverage (space (then | or | and) space simple_coverage)* tier_limitation?
simple_coverage ::= (pre_coverage_limitation space)? coverage_amount (space post_coverage_limitation)? (comma? space coverage_condition)?
coverage_modifier ::= limit_condition colon space (((simple_coverage | simple_limitation) (semicolon space see_carrier_documentation)?) | see_carrier_documentation | waived_if_admitted | shared_across_tiers)
waived_if_admitted ::= ("copay" space)? "waived if admitted"
simple_limitation ::= pre_coverage_limitation space "copay applies"
tier_name ::= "In-Network-Tier-2" | "Out-of-Network" | "In-Network"
limit_condition ::= "limit" | "condition"
tier_limitation ::= comma space "up to" space (currency | (integer space time_unit plural?)) (space post_coverage_limitation)?
coverage_amount ::= currency | unlimited | included | unknown | percentage | (digits space (treatment_unit | time_unit) plural?)
pre_coverage_limitation ::= first space digits space time_unit plural?
post_coverage_limitation ::= (((then space currency) | "per condition") space)? "per" space (treatment_unit | (integer space time_unit) | time_unit) plural?
coverage_condition ::= ("before deductible" | "after deductible" | "penalty" | allowance | "in-state" | "out-of-state") (space allowance)?
allowance ::= upto_allowance | after_allowance
upto_allowance ::= "up to" space (currency space)? "allowance"
after_allowance ::= "after" space (currency space)? "allowance"
see_carrier_documentation ::= "see carrier documentation for more information"
shared_across_tiers ::= "shared across all tiers"
unknown ::= "unknown"
unlimited ::= /[uU]nlimited/
included ::= /[iI]ncluded in [mM]edical/
time_unit ::= /[hH]our/ | (((/[cC]alendar/ | /[cC]ontract/) space)? /[yY]ear/) | /[mM]onth/ | /[dD]ay/ | /[wW]eek/ | /[vV]isit/ | /[lL]ifetime/ | ((((/[bB]enefit/ plural?) | /[eE]ligibility/) space)? /[pP]eriod/)
treatment_unit ::= /[pP]erson/ | /[gG]roup/ | /[cC]ondition/ | /[sS]cript/ | /[vV]isit/ | /[eE]xam/ | /[iI]tem/ | /[sS]tay/ | /[tT]reatment/ | /[aA]dmission/ | /[eE]pisode/
comma ::= ","
colon ::= ":"
semicolon ::= ";"
pipe ::= "|"
slash ::= "/"
plural ::= "(s)" | "s"
then ::= "then" | ("," space) | space
or ::= "or"
and ::= "and"
not_applicable ::= "Not Applicable" | "N/A" | "NA"
first ::= "first"
currency ::= "$" number
percentage ::= number "%"
number ::= float | integer
float ::= digits "." digits
integer ::= /[0-9]/+ (comma_int | under_int)*
comma_int ::= ("," /[0-9]/*3) !"_"
under_int ::= ("_" /[0-9]/*3) !","
digits ::= /[0-9]/+ ("_" /[0-9]/+)*
space ::= /[ \t]/+
```
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class ProviderNotificationSubscriptionsApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def create_provider_notification_subscription(self, **kwargs):
"""
Subscribe
Subscribe to receive webhook notifications when providers join or leave a network. The request must include a list of National Provider Index (NPI) numbers for providers, a callback URL where notifications should be sent, and either a plan ID or a network ID. The response will include a `nonce` value. The `nonce` will be included in all webhook notifications originating from this subscription and will be used as the identifier for all subsequent requests. The `network_id` and `plan_id` are mutually exclusive. The request must include a value for one of the fields, but cannot include both. Examples of valid request bodies are as follows: ``` { \"npis\": [\"2712589\", \"8498549\", \"19528190\"], \"plan_id\": 1, \"callback_url\": \"https://example.com/webhook\" } ``` ``` { \"npis\": [\"2712589\", \"8498549\", \"19528190\"], \"network_id\": 1, \"callback_url\": \"https://example.com/webhook\" } ```
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_provider_notification_subscription(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param RequestProviderNotificationSubscription root:
:return: NotificationSubscriptionResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_provider_notification_subscription_with_http_info(**kwargs)
else:
(data) = self.create_provider_notification_subscription_with_http_info(**kwargs)
return data
def create_provider_notification_subscription_with_http_info(self, **kwargs):
"""
Subscribe
Subscribe to receive webhook notifications when providers join or leave a network. The request must include a list of National Provider Index (NPI) numbers for providers, a callback URL where notifications should be sent, and either a plan ID or a network ID. The response will include a `nonce` value. The `nonce` will be included in all webhook notifications originating from this subscription and will be used as the identifier for all subsequent requests. The `network_id` and `plan_id` are mutually exclusive. The request must include a value for one of the fields, but cannot include both. Examples of valid request bodies are as follows: ``` { \"npis\": [\"2712589\", \"8498549\", \"19528190\"], \"plan_id\": 1, \"callback_url\": \"https://example.com/webhook\" } ``` ``` { \"npis\": [\"2712589\", \"8498549\", \"19528190\"], \"network_id\": 1, \"callback_url\": \"https://example.com/webhook\" } ```
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_provider_notification_subscription_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param RequestProviderNotificationSubscription root:
:return: NotificationSubscriptionResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['root']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_provider_notification_subscription" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/providers/subscription'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'root' in params:
body_params = params['root']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['Vericred-Api-Key']
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='NotificationSubscriptionResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def delete_provider_notification_subscription(self, nonce, **kwargs):
"""
Unsubscribe
Unsubscribe from an existing webhook notification.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_provider_notification_subscription(nonce, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str nonce: The nonce value that was included in the response when the subscription was created (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_provider_notification_subscription_with_http_info(nonce, **kwargs)
else:
(data) = self.delete_provider_notification_subscription_with_http_info(nonce, **kwargs)
return data
def delete_provider_notification_subscription_with_http_info(self, nonce, **kwargs):
"""
Unsubscribe
Unsubscribe from an existing webhook notification.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_provider_notification_subscription_with_http_info(nonce, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str nonce: The nonce value that was included in the response when the subscription was created (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['nonce']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_provider_notification_subscription" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'nonce' is set
if ('nonce' not in params) or (params['nonce'] is None):
raise ValueError("Missing the required parameter `nonce` when calling `delete_provider_notification_subscription`")
collection_formats = {}
resource_path = '/providers/subscription/{nonce}'.replace('{format}', 'json')
path_params = {}
if 'nonce' in params:
path_params['nonce'] = params['nonce']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['Vericred-Api-Key']
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def notify_provider_notification_subscription(self, **kwargs):
"""
Webhook
Webhook notifications are sent when there are events relevant to a subscription. Notifications will be sent to the callback URL that was provided in the original request. The endpoint handling this request should respond with a successful status code (200 <= Status Code < 300). If a successful status code is not returned the notification will be sent again at a regular interval.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.notify_provider_notification_subscription(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param ProviderNetworkEventNotification root:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.notify_provider_notification_subscription_with_http_info(**kwargs)
else:
(data) = self.notify_provider_notification_subscription_with_http_info(**kwargs)
return data
def notify_provider_notification_subscription_with_http_info(self, **kwargs):
"""
Webhook
Webhook notifications are sent when there are events relevant to a subscription. Notifications will be sent to the callback URL that was provided in the original request. The endpoint handling this request should respond with a successful status code (200 <= Status Code < 300). If a successful status code is not returned the notification will be sent again at a regular interval.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.notify_provider_notification_subscription_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param ProviderNetworkEventNotification root:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['root']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method notify_provider_notification_subscription" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/CALLBACK_URL'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'root' in params:
body_params = params['root']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['Vericred-Api-Key']
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
| |
#!/usr/bin/env python3
"""Config schema validation"""
# Copyright 2019 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import jsonschema
from jsonschema import validate
import jsl
import lib.logger as logger
from lib.exception import UserException, UserCriticalException
def _string_int_field(**kwargs):
return jsl.fields.AnyOfField(
[
jsl.fields.StringField(),
jsl.fields.IntField()],
**kwargs)
def _string_int_array_field(**kwargs):
return jsl.fields.AnyOfField(
[
jsl.fields.StringField(),
jsl.fields.IntField(),
jsl.fields.ArrayField(_string_int_field())],
**kwargs)
class Globals(jsl.Document):
introspection = jsl.fields.BooleanField()
env_variables = jsl.fields.DictField()
switch_mode_mgmt = jsl.fields.StringField()
switch_mode_data = jsl.fields.StringField()
dhcp_lease_time = _string_int_field()
class LocationRacks(jsl.Document):
label = _string_int_field()
room = _string_int_field()
row = _string_int_field()
cell = _string_int_field()
class Location(jsl.Document):
time_zone = jsl.fields.StringField()
data_center = _string_int_field()
racks = jsl.fields.ArrayField(
jsl.fields.DocumentField(LocationRacks))
class DeployerNetworks(jsl.Document):
mgmt = jsl.fields.ArrayField(jsl.fields.DictField(
properties={
'device': jsl.fields.StringField(required=True),
'interface_ipaddr': jsl.fields.IPv4Field(),
'container_ipaddr': jsl.fields.IPv4Field(),
'bridge_ipaddr': jsl.fields.IPv4Field(),
'vlan': jsl.fields.IntField(),
'netmask': jsl.fields.IPv4Field(),
'prefix': jsl.fields.IntField()},
additional_properties=False,
required=True))
client = jsl.fields.ArrayField(jsl.fields.DictField(
properties={
'type': jsl.fields.StringField(required=True),
'device': jsl.fields.StringField(required=True),
'container_ipaddr': jsl.fields.IPv4Field(required=True),
'bridge_ipaddr': jsl.fields.IPv4Field(required=True),
'vlan': jsl.fields.IntField(required=True),
'netmask': jsl.fields.IPv4Field(),
'prefix': jsl.fields.IntField()},
additional_properties=False,
required=True))
class Deployer(jsl.Document):
gateway = jsl.fields.BooleanField()
networks = jsl.DocumentField(DeployerNetworks)
class SwitchesMgmtData(object):
interfaces = jsl.fields.DictField(
properties={
'type': jsl.fields.StringField(required=True),
'ipaddr': jsl.fields.IPv4Field(required=True),
'vlan': jsl.fields.IntField(),
'port': _string_int_field(),
'netmask': jsl.fields.IPv4Field(),
'prefix': jsl.fields.IntField()},
additional_properties=False,
required=True)
links = jsl.fields.DictField(
properties={
'target': jsl.fields.StringField(required=True),
'ports': _string_int_array_field(required=True),
'ipaddr': jsl.fields.IPv4Field(),
'vlan': jsl.fields.IntField(),
'vip': jsl.fields.IPv4Field(),
'netmask': jsl.fields.IPv4Field(),
'prefix': jsl.fields.IntField()},
additional_properties=False,
required=True)
mgmt_data = jsl.fields.DictField(
properties={
'label': jsl.fields.StringField(required=True),
'hostname': jsl.fields.StringField(),
'userid': jsl.fields.StringField(),
'password': jsl.fields.StringField(),
'ssh_key': jsl.fields.StringField(),
'class': jsl.fields.StringField(),
'rack_id': _string_int_field(),
'rack_eia': _string_int_field(),
'interfaces': jsl.fields.ArrayField(interfaces),
'links': jsl.fields.ArrayField(links)},
additional_properties=False,
required=True)
class Switches(jsl.Document):
mgmt = jsl.fields.ArrayField(SwitchesMgmtData.mgmt_data)
data = jsl.fields.ArrayField(SwitchesMgmtData.mgmt_data)
class Interfaces(jsl.Document):
label = jsl.fields.StringField()
description = jsl.fields.StringField()
iface = jsl.fields.StringField()
address_start = jsl.fields.IPv4Field()
address_list = jsl.fields.ArrayField()
method = jsl.fields.StringField()
dns_search = jsl.fields.StringField()
dns_nameservers = jsl.fields.StringField()
broadcast = jsl.fields.IPv4Field()
netmask = jsl.fields.IPv4Field()
gateway = jsl.fields.IPv4Field()
mtu = jsl.fields.IntField()
vlan_raw_device = jsl.fields.StringField()
pre_up = jsl.fields.StringField()
bridge_stp = jsl.fields.BooleanField()
bridge_maxage = jsl.fields.IntField()
bridge_fd = jsl.fields.IntField()
bridge_ports = jsl.fields.StringField()
bridge_hello = jsl.fields.IntField()
bond_primary = jsl.fields.StringField()
bond_master = jsl.fields.StringField()
bond_mode = jsl.fields.StringField()
bond_miimon = jsl.fields.IntField()
bond_slaves = jsl.fields.StringField()
DEVICE = jsl.fields.StringField()
TYPE = jsl.fields.StringField()
IPADDR_start = jsl.fields.IPv4Field()
IPADDR_list = jsl.fields.ArrayField()
BOOTPROTO = jsl.fields.StringField()
ONBOOT = jsl.fields.BooleanField()
ONPARENT = jsl.fields.BooleanField()
SEARCH = jsl.fields.StringField()
DNS1 = jsl.fields.IPv4Field()
DNS2 = jsl.fields.IPv4Field()
NETMASK = jsl.fields.IPv4Field()
GATEWAY = jsl.fields.IPv4Field()
BROADCAST = jsl.fields.IPv4Field()
VLAN = jsl.fields.BooleanField()
MTU = jsl.fields.IntField()
STP = jsl.fields.BooleanField()
MASTER = jsl.fields.StringField()
SLAVE = jsl.fields.BooleanField()
BRIDGE = jsl.fields.StringField()
BONDING_OPTS = jsl.fields.StringField()
BONDING_MASTER = jsl.fields.BooleanField()
NM_CONTROLLED = jsl.fields.BooleanField()
class Networks(jsl.Document):
label = jsl.fields.StringField()
interfaces = jsl.fields.ArrayField(
jsl.fields.StringField(),
required=True)
class SoftwareBootstrap(jsl.Document):
hosts = jsl.fields.StringField()
executable = jsl.fields.StringField()
command = jsl.fields.StringField()
class SchemaDefinition(jsl.Document):
version = jsl.fields.StringField(required=True)
globals = jsl.fields.DocumentField(Globals)
location = jsl.fields.DocumentField(Location)
deployer = jsl.DocumentField(Deployer)
switches = jsl.fields.DocumentField(Switches)
interfaces = jsl.fields.ArrayField(
jsl.fields.DocumentField(Interfaces),
required=True)
networks = jsl.fields.ArrayField(jsl.fields.DocumentField(Networks))
node_templates = jsl.fields.ArrayField(required=True)
software_bootstrap = jsl.fields.ArrayField(
jsl.fields.DocumentField(SoftwareBootstrap))
class ValidateConfigSchema(object):
"""Config schema validation
Args:
config (object): Config
"""
def __init__(self, config):
self.log = logger.getlogger()
self.config = config
def validate_config_schema(self):
"""Config schema validation
Exception:
If schema validation fails
"""
schema = SchemaDefinition.get_schema(ordered=True)
try:
validate(
self.config, schema, format_checker=jsonschema.FormatChecker())
except jsonschema.exceptions.ValidationError as error:
if error.cause is None:
path = None
for index, element in enumerate(error.path):
if isinstance(element, int):
path += '[{}]'.format(element)
else:
if index == 0:
path = '{}'.format(element)
else:
path += '.{}'.format(element)
exc = 'Schema validation failed - {} - {}'.format(
path, str(error))
else:
exc = 'Schema validation failed - {} - {}'.format(
error.cause, str(error))
if 'Additional properties are not allowed' in str(error):
raise UserException(exc)
else:
raise UserCriticalException(exc)
| |
from __future__ import unicode_literals
import json
import re
import socket
from .common import InfoExtractor
from ..compat import (
compat_etree_fromstring,
compat_http_client,
compat_urllib_error,
compat_urllib_parse_unquote,
compat_urllib_parse_unquote_plus,
)
from ..utils import (
error_to_compat_str,
ExtractorError,
limit_length,
sanitized_Request,
urlencode_postdata,
get_element_by_id,
clean_html,
)
class FacebookIE(InfoExtractor):
_VALID_URL = r'''(?x)
(?:
https?://
(?:\w+\.)?facebook\.com/
(?:[^#]*?\#!/)?
(?:
(?:
video/video\.php|
photo\.php|
video\.php|
video/embed|
story\.php
)\?(?:.*?)(?:v|video_id|story_fbid)=|
[^/]+/videos/(?:[^/]+/)?|
[^/]+/posts/|
groups/[^/]+/permalink/
)|
facebook:
)
(?P<id>[0-9]+)
'''
_LOGIN_URL = 'https://www.facebook.com/login.php?next=http%3A%2F%2Ffacebook.com%2Fhome.php&login_attempt=1'
_CHECKPOINT_URL = 'https://www.facebook.com/checkpoint/?next=http%3A%2F%2Ffacebook.com%2Fhome.php&_fb_noscript=1'
_NETRC_MACHINE = 'facebook'
IE_NAME = 'facebook'
_CHROME_USER_AGENT = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.97 Safari/537.36'
_VIDEO_PAGE_TEMPLATE = 'https://www.facebook.com/video/video.php?v=%s'
_TESTS = [{
'url': 'https://www.facebook.com/video.php?v=637842556329505&fref=nf',
'md5': '6a40d33c0eccbb1af76cf0485a052659',
'info_dict': {
'id': '637842556329505',
'ext': 'mp4',
'title': 're:Did you know Kei Nishikori is the first Asian man to ever reach a Grand Slam',
'uploader': 'Tennis on Facebook',
}
}, {
'note': 'Video without discernible title',
'url': 'https://www.facebook.com/video.php?v=274175099429670',
'info_dict': {
'id': '274175099429670',
'ext': 'mp4',
'title': 'Facebook video #274175099429670',
'uploader': 'Asif Nawab Butt',
},
'expected_warnings': [
'title'
]
}, {
'note': 'Video with DASH manifest',
'url': 'https://www.facebook.com/video.php?v=957955867617029',
'md5': '54706e4db4f5ad58fbad82dde1f1213f',
'info_dict': {
'id': '957955867617029',
'ext': 'mp4',
'title': 'When you post epic content on instagram.com/433 8 million followers, this is ...',
'uploader': 'Demy de Zeeuw',
},
}, {
'url': 'https://www.facebook.com/maxlayn/posts/10153807558977570',
'md5': '037b1fa7f3c2d02b7a0d7bc16031ecc6',
'info_dict': {
'id': '544765982287235',
'ext': 'mp4',
'title': '"What are you doing running in the snow?"',
'uploader': 'FailArmy',
}
}, {
'url': 'https://m.facebook.com/story.php?story_fbid=1035862816472149&id=116132035111903',
'md5': '1deb90b6ac27f7efcf6d747c8a27f5e3',
'info_dict': {
'id': '1035862816472149',
'ext': 'mp4',
'title': 'What the Flock Is Going On In New Zealand Credit: ViralHog',
'uploader': 'S. Saint',
},
}, {
'note': 'swf params escaped',
'url': 'https://www.facebook.com/barackobama/posts/10153664894881749',
'md5': '97ba073838964d12c70566e0085c2b91',
'info_dict': {
'id': '10153664894881749',
'ext': 'mp4',
'title': 'Facebook video #10153664894881749',
},
}, {
'url': 'https://www.facebook.com/video.php?v=10204634152394104',
'only_matching': True,
}, {
'url': 'https://www.facebook.com/amogood/videos/1618742068337349/?fref=nf',
'only_matching': True,
}, {
'url': 'https://www.facebook.com/ChristyClarkForBC/videos/vb.22819070941/10153870694020942/?type=2&theater',
'only_matching': True,
}, {
'url': 'facebook:544765982287235',
'only_matching': True,
}, {
'url': 'https://www.facebook.com/groups/164828000315060/permalink/764967300301124/',
'only_matching': True,
}]
def _login(self):
(useremail, password) = self._get_login_info()
if useremail is None:
return
login_page_req = sanitized_Request(self._LOGIN_URL)
self._set_cookie('facebook.com', 'locale', 'en_US')
login_page = self._download_webpage(login_page_req, None,
note='Downloading login page',
errnote='Unable to download login page')
lsd = self._search_regex(
r'<input type="hidden" name="lsd" value="([^"]*)"',
login_page, 'lsd')
lgnrnd = self._search_regex(r'name="lgnrnd" value="([^"]*?)"', login_page, 'lgnrnd')
login_form = {
'email': useremail,
'pass': password,
'lsd': lsd,
'lgnrnd': lgnrnd,
'next': 'http://facebook.com/home.php',
'default_persistent': '0',
'legacy_return': '1',
'timezone': '-60',
'trynum': '1',
}
request = sanitized_Request(self._LOGIN_URL, urlencode_postdata(login_form))
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
try:
login_results = self._download_webpage(request, None,
note='Logging in', errnote='unable to fetch login page')
if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None:
error = self._html_search_regex(
r'(?s)<div[^>]+class=(["\']).*?login_error_box.*?\1[^>]*><div[^>]*>.*?</div><div[^>]*>(?P<error>.+?)</div>',
login_results, 'login error', default=None, group='error')
if error:
raise ExtractorError('Unable to login: %s' % error, expected=True)
self._downloader.report_warning('unable to log in: bad username/password, or exceeded login rate limit (~3/min). Check credentials or wait.')
return
fb_dtsg = self._search_regex(
r'name="fb_dtsg" value="(.+?)"', login_results, 'fb_dtsg', default=None)
h = self._search_regex(
r'name="h"\s+(?:\w+="[^"]+"\s+)*?value="([^"]+)"', login_results, 'h', default=None)
if not fb_dtsg or not h:
return
check_form = {
'fb_dtsg': fb_dtsg,
'h': h,
'name_action_selected': 'dont_save',
}
check_req = sanitized_Request(self._CHECKPOINT_URL, urlencode_postdata(check_form))
check_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
check_response = self._download_webpage(check_req, None,
note='Confirming login')
if re.search(r'id="checkpointSubmitButton"', check_response) is not None:
self._downloader.report_warning('Unable to confirm login, you have to login in your browser and authorize the login.')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.report_warning('unable to log in: %s' % error_to_compat_str(err))
return
def _real_initialize(self):
self._login()
def _extract_from_url(self, url, video_id, fatal_if_no_video=True):
req = sanitized_Request(url)
req.add_header('User-Agent', self._CHROME_USER_AGENT)
webpage = self._download_webpage(req, video_id)
video_data = None
BEFORE = '{swf.addParam(param[0], param[1]);});'
AFTER = '.forEach(function(variable) {swf.addVariable(variable[0], variable[1]);});'
m = re.search(re.escape(BEFORE) + '(?:\n|\\\\n)(.*?)' + re.escape(AFTER), webpage)
if m:
swf_params = m.group(1).replace('\\\\', '\\').replace('\\"', '"')
data = dict(json.loads(swf_params))
params_raw = compat_urllib_parse_unquote(data['params'])
video_data = json.loads(params_raw)['video_data']
def video_data_list2dict(video_data):
ret = {}
for item in video_data:
format_id = item['stream_type']
ret.setdefault(format_id, []).append(item)
return ret
if not video_data:
server_js_data = self._parse_json(self._search_regex(
r'handleServerJS\(({.+})\);', webpage, 'server js data', default='{}'), video_id)
for item in server_js_data.get('instances', []):
if item[1][0] == 'VideoConfig':
video_data = video_data_list2dict(item[2][0]['videoData'])
break
if not video_data:
if not fatal_if_no_video:
return webpage, False
m_msg = re.search(r'class="[^"]*uiInterstitialContent[^"]*"><div>(.*?)</div>', webpage)
if m_msg is not None:
raise ExtractorError(
'The video is not available, Facebook said: "%s"' % m_msg.group(1),
expected=True)
else:
raise ExtractorError('Cannot parse data')
formats = []
for format_id, f in video_data.items():
if not f or not isinstance(f, list):
continue
for quality in ('sd', 'hd'):
for src_type in ('src', 'src_no_ratelimit'):
src = f[0].get('%s_%s' % (quality, src_type))
if src:
preference = -10 if format_id == 'progressive' else 0
if quality == 'hd':
preference += 5
formats.append({
'format_id': '%s_%s_%s' % (format_id, quality, src_type),
'url': src,
'preference': preference,
})
dash_manifest = f[0].get('dash_manifest')
if dash_manifest:
formats.extend(self._parse_mpd_formats(
compat_etree_fromstring(compat_urllib_parse_unquote_plus(dash_manifest))))
if not formats:
raise ExtractorError('Cannot find video formats')
self._sort_formats(formats)
video_title = self._html_search_regex(
r'<h2\s+[^>]*class="uiHeaderTitle"[^>]*>([^<]*)</h2>', webpage, 'title',
default=None)
if not video_title:
video_title = self._html_search_regex(
r'(?s)<span class="fbPhotosPhotoCaption".*?id="fbPhotoPageCaption"><span class="hasCaption">(.*?)</span>',
webpage, 'alternative title', default=None)
video_title = limit_length(video_title, 80)
if not video_title:
video_title = 'Facebook video #%s' % video_id
uploader = clean_html(get_element_by_id('fbPhotoPageAuthorName', webpage))
info_dict = {
'id': video_id,
'title': video_title,
'formats': formats,
'uploader': uploader,
}
return webpage, info_dict
def _real_extract(self, url):
video_id = self._match_id(url)
real_url = self._VIDEO_PAGE_TEMPLATE % video_id if url.startswith('facebook:') else url
webpage, info_dict = self._extract_from_url(real_url, video_id, fatal_if_no_video=False)
if info_dict:
return info_dict
if '/posts/' in url:
entries = [
self.url_result('facebook:%s' % vid, FacebookIE.ie_key())
for vid in self._parse_json(
self._search_regex(
r'(["\'])video_ids\1\s*:\s*(?P<ids>\[.+?\])',
webpage, 'video ids', group='ids'),
video_id)]
return self.playlist_result(entries, video_id)
else:
_, info_dict = self._extract_from_url(
self._VIDEO_PAGE_TEMPLATE % video_id,
video_id, fatal_if_no_video=True)
return info_dict
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
import uuid
from .. import models
class SubscriptionInCredentialsOperations(object):
"""SubscriptionInCredentialsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def post_method_global_valid(
self, custom_headers={}, raw=False, **operation_config):
"""
POST method with subscriptionId modeled in credentials. Set the
credential subscriptionId to '1234-5678-9012-3456' to succeed
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/azurespecials/subscriptionId/method/string/none/path/global/1234-5678-9012-3456/{subscriptionId}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def post_method_global_null(
self, custom_headers={}, raw=False, **operation_config):
"""
POST method with subscriptionId modeled in credentials. Set the
credential subscriptionId to null, and client-side validation should
prevent you from making this call
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/azurespecials/subscriptionId/method/string/none/path/global/null/{subscriptionId}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def post_method_global_not_provided_valid(
self, custom_headers={}, raw=False, **operation_config):
"""
POST method with subscriptionId modeled in credentials. Set the
credential subscriptionId to '1234-5678-9012-3456' to succeed
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/azurespecials/subscriptionId/method/string/none/path/globalNotProvided/1234-5678-9012-3456/{subscriptionId}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def post_path_global_valid(
self, custom_headers={}, raw=False, **operation_config):
"""
POST method with subscriptionId modeled in credentials. Set the
credential subscriptionId to '1234-5678-9012-3456' to succeed
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/azurespecials/subscriptionId/path/string/none/path/global/1234-5678-9012-3456/{subscriptionId}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def post_swagger_global_valid(
self, custom_headers={}, raw=False, **operation_config):
"""
POST method with subscriptionId modeled in credentials. Set the
credential subscriptionId to '1234-5678-9012-3456' to succeed
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/azurespecials/subscriptionId/swagger/string/none/path/global/1234-5678-9012-3456/{subscriptionId}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
| |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from oslo.config import cfg
from neutron.agent.linux import utils
from neutron.common import exceptions
from neutron.openstack.common import log as logging
OPTS = [
cfg.BoolOpt('ip_lib_force_root',
default=False,
help=_('Force ip_lib calls to use the root helper')),
]
LOG = logging.getLogger(__name__)
LOOPBACK_DEVNAME = 'lo'
# NOTE(ethuleau): depend of the version of iproute2, the vlan
# interface details vary.
VLAN_INTERFACE_DETAIL = ['vlan protocol 802.1q',
'vlan protocol 802.1Q',
'vlan id']
class SubProcessBase(object):
def __init__(self, root_helper=None, namespace=None,
log_fail_as_error=True):
self.root_helper = root_helper
self.namespace = namespace
self.log_fail_as_error = log_fail_as_error
try:
self.force_root = cfg.CONF.ip_lib_force_root
except cfg.NoSuchOptError:
# Only callers that need to force use of the root helper
# need to register the option.
self.force_root = False
def _run(self, options, command, args):
if self.namespace:
return self._as_root(options, command, args)
elif self.force_root:
# Force use of the root helper to ensure that commands
# will execute in dom0 when running under XenServer/XCP.
return self._execute(options, command, args, self.root_helper,
log_fail_as_error=self.log_fail_as_error)
else:
return self._execute(options, command, args,
log_fail_as_error=self.log_fail_as_error)
def _as_root(self, options, command, args, use_root_namespace=False):
if not self.root_helper:
raise exceptions.SudoRequired()
namespace = self.namespace if not use_root_namespace else None
return self._execute(options,
command,
args,
self.root_helper,
namespace,
log_fail_as_error=self.log_fail_as_error)
@classmethod
def _execute(cls, options, command, args, root_helper=None,
namespace=None, log_fail_as_error=True):
opt_list = ['-%s' % o for o in options]
if namespace:
ip_cmd = ['ip', 'netns', 'exec', namespace, 'ip']
else:
ip_cmd = ['ip']
return utils.execute(ip_cmd + opt_list + [command] + list(args),
root_helper=root_helper,
log_fail_as_error=log_fail_as_error)
def set_log_fail_as_error(self, fail_with_error):
self.log_fail_as_error = fail_with_error
class IPWrapper(SubProcessBase):
def __init__(self, root_helper=None, namespace=None):
super(IPWrapper, self).__init__(root_helper=root_helper,
namespace=namespace)
self.netns = IpNetnsCommand(self)
def device(self, name):
return IPDevice(name, self.root_helper, self.namespace)
def get_devices(self, exclude_loopback=False):
retval = []
output = self._execute(['o', 'd'], 'link', ('list',),
self.root_helper, self.namespace)
for line in output.split('\n'):
if '<' not in line:
continue
tokens = line.split(' ', 2)
if len(tokens) == 3:
if any(v in tokens[2] for v in VLAN_INTERFACE_DETAIL):
delimiter = '@'
else:
delimiter = ':'
name = tokens[1].rpartition(delimiter)[0].strip()
if exclude_loopback and name == LOOPBACK_DEVNAME:
continue
retval.append(IPDevice(name,
self.root_helper,
self.namespace))
return retval
def add_tuntap(self, name, mode='tap'):
self._as_root('', 'tuntap', ('add', name, 'mode', mode))
return IPDevice(name, self.root_helper, self.namespace)
def add_veth(self, name1, name2, namespace2=None):
args = ['add', name1, 'type', 'veth', 'peer', 'name', name2]
if namespace2 is None:
namespace2 = self.namespace
else:
self.ensure_namespace(namespace2)
args += ['netns', namespace2]
self._as_root('', 'link', tuple(args))
return (IPDevice(name1, self.root_helper, self.namespace),
IPDevice(name2, self.root_helper, namespace2))
def del_veth(self, name):
"""Delete a virtual interface between two namespaces."""
self._as_root('', 'link', ('del', name))
def ensure_namespace(self, name):
if not self.netns.exists(name):
ip = self.netns.add(name)
lo = ip.device(LOOPBACK_DEVNAME)
lo.link.set_up()
else:
ip = IPWrapper(self.root_helper, name)
return ip
def namespace_is_empty(self):
return not self.get_devices(exclude_loopback=True)
def garbage_collect_namespace(self):
"""Conditionally destroy the namespace if it is empty."""
if self.namespace and self.netns.exists(self.namespace):
if self.namespace_is_empty():
self.netns.delete(self.namespace)
return True
return False
def add_device_to_namespace(self, device):
if self.namespace:
device.link.set_netns(self.namespace)
def add_vxlan(self, name, vni, group=None, dev=None, ttl=None, tos=None,
local=None, port=None, proxy=False):
cmd = ['add', name, 'type', 'vxlan', 'id', vni]
if group:
cmd.extend(['group', group])
if dev:
cmd.extend(['dev', dev])
if ttl:
cmd.extend(['ttl', ttl])
if tos:
cmd.extend(['tos', tos])
if local:
cmd.extend(['local', local])
if proxy:
cmd.append('proxy')
# tuple: min,max
if port and len(port) == 2:
cmd.extend(['port', port[0], port[1]])
elif port:
raise exceptions.NetworkVxlanPortRangeError(vxlan_range=port)
self._as_root('', 'link', cmd)
return (IPDevice(name, self.root_helper, self.namespace))
@classmethod
def get_namespaces(cls, root_helper):
output = cls._execute('', 'netns', ('list',), root_helper=root_helper)
return [l.strip() for l in output.split('\n')]
class IpRule(IPWrapper):
def _check_exists(self, ip, table, rule_pr):
cmd = 'from ' + str(ip)
args = ['list']
output = self._as_root('', 'rule', tuple(args))
for line in output.split('\n'):
if cmd in line:
LOG.info("_check_exists exists")
return True
LOG.info("_check_exists not exists")
return False
def add_rule_from(self, ip, table, rule_pr):
args = ['add', 'from', ip, 'lookup', table, 'priority', rule_pr]
ip = self._as_root('', 'rule', tuple(args))
return ip
def add_rule_from_with_check(self, ip, table, rule_pr):
exist_flag = self._check_exists(ip, table, rule_pr)
if exist_flag is True:
return
return self.add_rule_from(ip, table, rule_pr)
def del_rule_from(self, ip, table, rule_pr):
args = ['del', 'from', ip, 'lookup', table, 'priority', rule_pr]
ip = self._as_root('', 'rule', tuple(args))
return ip
def delete_rule_priority(self, rule_pr):
args = ['del', 'priority', rule_pr]
ip = self._as_root('', 'rule', tuple(args))
return ip
class IPDevice(SubProcessBase):
def __init__(self, name, root_helper=None, namespace=None):
super(IPDevice, self).__init__(root_helper=root_helper,
namespace=namespace)
self.name = name
self.link = IpLinkCommand(self)
self.addr = IpAddrCommand(self)
self.route = IpRouteCommand(self)
self.neigh = IpNeighCommand(self)
def __eq__(self, other):
return (other is not None and self.name == other.name
and self.namespace == other.namespace)
def __str__(self):
return self.name
class IpCommandBase(object):
COMMAND = ''
def __init__(self, parent):
self._parent = parent
def _run(self, *args, **kwargs):
return self._parent._run(kwargs.get('options', []), self.COMMAND, args)
def _as_root(self, *args, **kwargs):
return self._parent._as_root(kwargs.get('options', []),
self.COMMAND,
args,
kwargs.get('use_root_namespace', False))
class IpDeviceCommandBase(IpCommandBase):
@property
def name(self):
return self._parent.name
class IpLinkCommand(IpDeviceCommandBase):
COMMAND = 'link'
def set_address(self, mac_address):
self._as_root('set', self.name, 'address', mac_address)
def set_mtu(self, mtu_size):
self._as_root('set', self.name, 'mtu', mtu_size)
def set_up(self):
self._as_root('set', self.name, 'up')
def set_down(self):
self._as_root('set', self.name, 'down')
def set_netns(self, namespace):
self._as_root('set', self.name, 'netns', namespace)
self._parent.namespace = namespace
def set_name(self, name):
self._as_root('set', self.name, 'name', name)
self._parent.name = name
def set_alias(self, alias_name):
self._as_root('set', self.name, 'alias', alias_name)
def delete(self):
self._as_root('delete', self.name)
@property
def address(self):
return self.attributes.get('link/ether')
@property
def state(self):
return self.attributes.get('state')
@property
def mtu(self):
return self.attributes.get('mtu')
@property
def qdisc(self):
return self.attributes.get('qdisc')
@property
def qlen(self):
return self.attributes.get('qlen')
@property
def alias(self):
return self.attributes.get('alias')
@property
def attributes(self):
return self._parse_line(self._run('show', self.name, options='o'))
def _parse_line(self, value):
if not value:
return {}
device_name, settings = value.replace("\\", '').split('>', 1)
tokens = settings.split()
keys = tokens[::2]
values = [int(v) if v.isdigit() else v for v in tokens[1::2]]
retval = dict(zip(keys, values))
return retval
class IpAddrCommand(IpDeviceCommandBase):
COMMAND = 'addr'
def add(self, ip_version, cidr, broadcast, scope='global'):
self._as_root('add',
cidr,
'brd',
broadcast,
'scope',
scope,
'dev',
self.name,
options=[ip_version])
def delete(self, ip_version, cidr):
self._as_root('del',
cidr,
'dev',
self.name,
options=[ip_version])
def flush(self):
self._as_root('flush', self.name)
def list(self, scope=None, to=None, filters=None):
if filters is None:
filters = []
retval = []
if scope:
filters += ['scope', scope]
if to:
filters += ['to', to]
for line in self._run('show', self.name, *filters).split('\n'):
line = line.strip()
if not line.startswith('inet'):
continue
parts = line.split()
if parts[0] == 'inet6':
version = 6
scope = parts[3]
broadcast = '::'
else:
version = 4
if parts[2] == 'brd':
broadcast = parts[3]
scope = parts[5]
else:
# sometimes output of 'ip a' might look like:
# inet 192.168.100.100/24 scope global eth0
# and broadcast needs to be calculated from CIDR
broadcast = str(netaddr.IPNetwork(parts[1]).broadcast)
scope = parts[3]
retval.append(dict(cidr=parts[1],
broadcast=broadcast,
scope=scope,
ip_version=version,
dynamic=('dynamic' == parts[-1])))
return retval
class IpRouteCommand(IpDeviceCommandBase):
COMMAND = 'route'
def add_gateway(self, gateway, metric=None, table=None):
args = ['replace', 'default', 'via', gateway]
if metric:
args += ['metric', metric]
args += ['dev', self.name]
if table:
args += ['table', table]
self._as_root(*args)
def delete_gateway(self, gateway=None, table=None):
args = ['del', 'default']
if gateway:
args += ['via', gateway]
args += ['dev', self.name]
if table:
args += ['table', table]
self._as_root(*args)
def list_onlink_routes(self):
def iterate_routes():
output = self._run('list', 'dev', self.name, 'scope', 'link')
for line in output.split('\n'):
line = line.strip()
if line and not line.count('src'):
yield line
return [x for x in iterate_routes()]
def add_onlink_route(self, cidr):
self._as_root('replace', cidr, 'dev', self.name, 'scope', 'link')
def delete_onlink_route(self, cidr):
self._as_root('del', cidr, 'dev', self.name, 'scope', 'link')
def get_gateway(self, scope=None, filters=None):
if filters is None:
filters = []
retval = None
if scope:
filters += ['scope', scope]
route_list_lines = self._run('list', 'dev', self.name,
*filters).split('\n')
default_route_line = next((x.strip() for x in
route_list_lines if
x.strip().startswith('default')), None)
if default_route_line:
gateway_index = 2
parts = default_route_line.split()
retval = dict(gateway=parts[gateway_index])
if 'metric' in parts:
metric_index = parts.index('metric') + 1
retval.update(metric=int(parts[metric_index]))
return retval
def pullup_route(self, interface_name):
"""Ensures that the route entry for the interface is before all
others on the same subnet.
"""
device_list = []
device_route_list_lines = self._run('list', 'proto', 'kernel',
'dev', interface_name).split('\n')
for device_route_line in device_route_list_lines:
try:
subnet = device_route_line.split()[0]
except Exception:
continue
subnet_route_list_lines = self._run('list', 'proto', 'kernel',
'match', subnet).split('\n')
for subnet_route_line in subnet_route_list_lines:
i = iter(subnet_route_line.split())
while(i.next() != 'dev'):
pass
device = i.next()
try:
while(i.next() != 'src'):
pass
src = i.next()
except Exception:
src = ''
if device != interface_name:
device_list.append((device, src))
else:
break
for (device, src) in device_list:
self._as_root('del', subnet, 'dev', device)
if (src != ''):
self._as_root('append', subnet, 'proto', 'kernel',
'src', src, 'dev', device)
else:
self._as_root('append', subnet, 'proto', 'kernel',
'dev', device)
def add_route(self, cidr, ip, table=None):
args = ['replace', cidr, 'via', ip, 'dev', self.name]
if table:
args += ['table', table]
self._as_root(*args)
def delete_route(self, cidr, ip, table=None):
args = ['del', cidr, 'via', ip, 'dev', self.name]
if table:
args += ['table', table]
self._as_root(*args)
class IpNeighCommand(IpDeviceCommandBase):
COMMAND = 'neigh'
def add(self, ip_version, ip_address, mac_address):
self._as_root('replace',
ip_address,
'lladdr',
mac_address,
'nud',
'permanent',
'dev',
self.name,
options=[ip_version])
def delete(self, ip_version, ip_address, mac_address):
self._as_root('del',
ip_address,
'lladdr',
mac_address,
'dev',
self.name,
options=[ip_version])
class IpNetnsCommand(IpCommandBase):
COMMAND = 'netns'
def add(self, name):
self._as_root('add', name, use_root_namespace=True)
wrapper = IPWrapper(self._parent.root_helper, name)
wrapper.netns.execute(['sysctl', '-w',
'net.ipv4.conf.all.promote_secondaries=1'])
return wrapper
def delete(self, name):
self._as_root('delete', name, use_root_namespace=True)
def execute(self, cmds, addl_env={}, check_exit_code=True):
ns_params = []
if self._parent.namespace:
if not self._parent.root_helper:
raise exceptions.SudoRequired()
ns_params = ['ip', 'netns', 'exec', self._parent.namespace]
env_params = []
if addl_env:
env_params = (['env'] +
['%s=%s' % pair for pair in addl_env.items()])
return utils.execute(
ns_params + env_params + list(cmds),
root_helper=self._parent.root_helper,
check_exit_code=check_exit_code)
def exists(self, name):
output = self._parent._execute('o', 'netns', ['list'])
for line in output.split('\n'):
if name == line.strip():
return True
return False
def device_exists(device_name, root_helper=None, namespace=None):
"""Return True if the device exists in the namespace."""
try:
dev = IPDevice(device_name, root_helper, namespace)
dev.set_log_fail_as_error(False)
address = dev.link.address
except RuntimeError:
return False
return bool(address)
def device_exists_with_ip_mac(device_name, ip_cidr, mac, namespace=None,
root_helper=None):
"""Return True if the device with the given IP and MAC addresses
exists in the namespace.
"""
try:
device = IPDevice(device_name, root_helper, namespace)
if mac != device.link.address:
return False
if ip_cidr not in (ip['cidr'] for ip in device.addr.list()):
return False
except RuntimeError:
return False
else:
return True
def ensure_device_is_ready(device_name, root_helper=None, namespace=None):
dev = IPDevice(device_name, root_helper, namespace)
dev.set_log_fail_as_error(False)
try:
# Ensure the device is up, even if it is already up. If the device
# doesn't exist, a RuntimeError will be raised.
dev.link.set_up()
except RuntimeError:
return False
return True
def iproute_arg_supported(command, arg, root_helper=None):
command += ['help']
stdout, stderr = utils.execute(command, root_helper=root_helper,
check_exit_code=False, return_stderr=True)
return any(arg in line for line in stderr.split('\n'))
| |
# Part of django-hookbox
# Copyright 2011, Duane Griffin <duaneg@dghda.com>
# TODO: We really need to mimic client-side connections to properly test things
from django.conf import settings
from django.contrib.auth.models import User
from django.core.handlers.wsgi import WSGIHandler
from django.core.management.base import CommandError
from django.core.servers import basehttp
from django.core.urlresolvers import reverse
from django.dispatch import receiver
from django.test import TestCase
import djhookbox
from djhookbox.management.commands import runhookbox
import json
import logging
import os
import random
import re
import subprocess
import sys
import threading
import urllib
from testfixtures import LogCapture
# TODO: Set this from something sensible
verbose = False
connect_url = reverse('hookbox_connect')
# HACK: Next port to start the hookbox server on
# Can't start & stop the server quickly on the same port as it doesn't
# use SO_REUSEADDR
nextport = random.randint(10000, 20000)
HOOKBOX_STARTED = re.compile('hookbox - INFO - Listening to hookbox on http://([\w\d\.]+):(\d+)')
# Base code taken from: http://djangosnippets.org/snippets/1570/
class TestServerThread(threading.Thread):
"""
Thread for running a http server while tests are running.
Taken from: http://code.djangoproject.com/attachment/ticket/2879/django_live_server_r7936.diff
with some modifications to avoid patching django.
"""
def __init__(self, address, port):
self.address = address
self.port = port
self._started = threading.Event()
self._stopped = False
self._error = None
super(TestServerThread, self).__init__()
def start(self):
""" Start the server thread and wait for it to be ready """
super(TestServerThread, self).start()
self._started.wait()
if self._error:
raise self._error
def stop(self):
""" Stop the server """
self._stopped = True
# Send an http request to wake the server
url = urllib.urlopen('http://%s:%d/en/fake/request/' % (self.address, self.port))
url.read()
# Wait for server to finish
self.join(5)
if self._error:
raise self._error
def run(self):
""" Sets up test server and database and loops over handling http requests. """
# Idea taken from: http://djangosnippets.org/snippets/2050/
class QuietWSGIRequestHandler(basehttp.WSGIRequestHandler):
def log_message(self, format, *args):
if verbose:
return basehttp.WSGIRequestHandler.log_message(self, format, *args)
try:
handler = basehttp.AdminMediaHandler(WSGIHandler())
server_address = (self.address, self.port)
httpd = basehttp.WSGIServer(server_address, QuietWSGIRequestHandler)
httpd.set_app(handler)
except basehttp.WSGIServerException, e:
self._error = e
finally:
self._started.set()
# Loop until we get a stop event.
while not self._stopped:
httpd.handle_request()
def server(method):
'Decorator that starts & stops hookbox for those tests that require it.'
def wrapper(self, *args, **kwargs):
global nextport
# Start the test server
server = TestServerThread('localhost', nextport)
server.start()
try:
nextport += 1
# Start hookbox
hookboxcmd = runhookbox.Command()
hookboxcmd.start_hookbox({
'executable': os.path.join(os.path.dirname(sys.executable), 'hookbox'),
'cbport': str(nextport - 1),
'port': str(nextport),
'admin-password': 'admin',
}, stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
# TODO: Retry at different port if cannot bind
# This will be tricky, however, since hookbox prints the
# listening message *before* it binds...
output = hookboxcmd.proc.stdout.readline()
match = HOOKBOX_STARTED.search(output)
if not match:
hookboxcmd.proc.kill()
raise CommandError('Could not start hookbox server: %s' % output)
# Update hookbox settings to point to the running server
settings.HOOKBOX_PORT = nextport
nextport += 1
# Perform the tests
try:
result = method(self, *args, **kwargs)
finally:
hookboxcmd.stop_hookbox()
if verbose:
for line in hookboxcmd.proc.stdout:
print line.strip('\n')
return result
finally:
server.stop()
return wrapper
class DjangoHookboxTest(TestCase):
def _cb_all(self, op, user, channel = '-', payload = None):
if channel in self.all_calls:
self.all_calls[channel] += 1
else:
self.all_calls[channel] = 1
return None
def _cb_create(self, op, user, channel = None):
if channel in self.create_calls:
self.create_calls[channel] += 1
else:
self.create_calls[channel] = 1
if channel == '/a/':
return {
'history_size': 2,
'reflective': False,
'presenceful': False,
'moderated': True,
}
elif channel == '/b/':
return 'denied'
elif channel == '/c/':
return [False, {'msg': 'also denied'}]
else:
return None
def setUp(self):
self.all_calls = {}
self.create_calls = {}
# HACK: don't allow other apps to mess with us or vice versa...
self.old_cbs = djhookbox.views._callbacks
djhookbox.views._callbacks = []
djhookbox.whcallback(self._cb_all)
djhookbox.whcallback('create')(self._cb_create)
User.objects.create_user('a', 'a@example.com', 'a').save()
self.logcap = LogCapture()
def tearDown(self):
djhookbox.views._callbacks = self.old_cbs
self.logcap.uninstall()
@server
def test_create(self):
self.assertRaises(djhookbox.HookboxError,
djhookbox.publish, '/a/', json.dumps({'foo': 'bar'}))
djhookbox.create('/a/')
djhookbox.publish('/a/', json.dumps({'foo': 'bar'}))
# TODO: Test send_hook works
# TODO: Confirm it actually did something
@server
def test_web_api_token(self):
secret = djhookbox.apitoken
try:
djhookbox.apitoken += '...not!'
self.assertRaises(djhookbox.HookboxError,
djhookbox.publish, '/a/', json.dumps({'foo': 'bar'}))
self.assertCreateCalls({})
finally:
djhookbox.apitoken = secret
def test_webhook_secret(self):
self.client.login(username = 'a', password = 'a')
response = self.client.post(connect_url, {
'channel_name': 'a',
'secret': djhookbox.views.secret,
})
self.assertSuccess(response)
response = self.client.post(connect_url, {
'channel_name': 'a',
})
data = self.decode(response)
self.assertFalse(data[0], 'webhook secret verification should have failed (forgotton to set settings.HOOKBOX_WEBHOOK_SECRET?)')
response = self.client.post(connect_url, {
'channel_name': 'a',
'secret': djhookbox.views.secret + '...not!',
})
data = self.decode(response)
self.assertFalse(data[0], 'webhook secret verification should have failed')
def test_signals(self):
class Listener(object):
def __call__(self, *args, **kwargs):
self.signal = kwargs.get('signal')
self.sender = kwargs.get('sender').username
self.kwargs = kwargs
def doTest(which, params = dict(), **checks):
listener = Listener()
djhookbox.views.signals[which].connect(listener)
self.client.login(username = 'a', password = 'a')
params['secret'] = djhookbox.views.secret
response = self.client.post(reverse('hookbox_%s' % which), params)
self.assertSuccess(response)
self.assertEquals(listener.sender, 'a')
for (key, value) in checks.iteritems():
self.assertEquals(listener.kwargs.get(key), value)
self.client.logout()
djhookbox.views.signals[which].disconnect(listener)
doTest('connect')
doTest('disconnect')
doTest('subscribe', {'channel_name': 'b'}, channel = 'b')
doTest('unsubscribe', {'channel_name': 'b'}, channel = 'b')
def test_all_cbs(self):
self.client.login(username = 'a', password = 'a')
params = {
'secret': djhookbox.views.secret,
'channel_name': 'a',
}
response = self.client.post(connect_url, params)
self.assertSuccess(response)
self.assertAllCalls({'-': 1})
response = self.client.post(reverse('hookbox_subscribe'), params)
self.assertSuccess(response)
self.assertAllCalls({'-': 1, 'a': 1})
response = self.client.post(reverse('hookbox_publish'), {
'secret': djhookbox.views.secret,
'channel_name': 'a',
'payload': json.dumps(["Hello world"]),
})
self.assertSuccess(response)
self.assertAllCalls({'-': 1, 'a': 2})
response = self.client.post(reverse('hookbox_destroy_channel'), params)
self.assertSuccess(response)
self.assertAllCalls({'-': 1, 'a': 3})
response = self.client.post(reverse('hookbox_disconnect'), params)
self.assertSuccess(response)
self.assertAllCalls({'-': 2, 'a': 3})
def test_warn_multiple_results(self):
@djhookbox.whcallback
def _cb_1(op, user, channel = '-'):
return [True, {}]
@djhookbox.whcallback
def _cb_2(op, user, channel = '-'):
return [True, {}]
params = {'secret': djhookbox.views.secret}
logging.getLogger('djhookbox').setLevel(logging.WARNING)
response = self.client.post(connect_url, params)
self.assertSuccess(response)
self.assertAllCalls({'-': 1})
response = self.client.post(reverse('hookbox_disconnect'), params)
self.assertSuccess(response)
self.assertAllCalls({'-': 2})
self.logcap.check(
('djhookbox', 'WARNING', 'multiple results returned from connect callback'),
('djhookbox', 'WARNING', 'multiple results returned from disconnect callback'),
)
def test_explicit_deny(self):
response = self.client.post(reverse('hookbox_create_channel'), {
'secret': djhookbox.views.secret,
'channel_name': '/b/',
})
data = self.decode(response)
self.assertEquals(data[0], False, 'unexpected success')
self.assertEquals(data[1], {'msg': 'denied'})
self.assertAllCalls({'/b/': 1})
response = self.client.post(reverse('hookbox_create_channel'), {
'secret': djhookbox.views.secret,
'channel_name': '/c/',
})
data = self.decode(response)
self.assertEquals(data[0], False, 'unexpected success')
self.assertEquals(data[1], {'msg': 'also denied'})
self.assertAllCalls({'/b/': 1, '/c/': 1})
def test_callback_error(self):
@djhookbox.whcallback
def _cb_1(op, user, channel = '-'):
raise Exception('something bad')
response = self.client.post(reverse('hookbox_create_channel'), {
'secret': djhookbox.views.secret,
'channel_name': '/a/',
})
data = self.decode(response)
self.assertEquals(data[0], False, 'unexpected success')
self.assertEquals(data[1], {'msg': 'something bad'})
self.assertAllCalls({'/a/': 1})
def decode(self, response):
self.assertEquals(response.status_code, 200)
self.assert_(('Content-Type', 'application/json') in response.items())
result = json.loads(response.content)
self.assert_(isinstance(result, list), 'unexpected result returned from server: %s' % str(result))
self.assertEquals(len(result), 2)
self.assert_(isinstance(result[0], bool), 'unexpected result returned from server: %s' % str(result))
self.assert_(isinstance(result[1], dict), 'unexpected result returned from server: %s' % str(result))
return result
def assertSuccess(self, response):
data = self.decode(response)
if not data[0] and 'msg' in data[1]:
self.fail(data[1]['msg'])
else:
self.assert_(data[0])
def assertAllCalls(self, calls):
self.assertEquals(self.all_calls, calls)
def assertCreateCalls(self, calls):
self.assertEquals(self.create_calls, calls)
| |
# Copyright (c) 2013 The SAYCBridge Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from core.position import *
from core.call import Call
from suit import SUITS
import copy
import math
import operator
# I'm not sure this needs to be its own class.
class Vulnerability(object):
def __init__(self, name):
# FIXME: We should find a better storage system than strings.
self.name = name or 'None'
assert self.name in ('E-W', 'N-S', 'None', 'Both'), "%s is not a valid vulnerability" % self.name
name_to_identifier = { 'E-W': 'EW', 'N-S': 'NS', 'None': 'NO', 'Both': 'BO' }
identifier_to_name = dict([(identifier, name) for name, identifier in name_to_identifier.items()])
@property
def identifier(self):
return self.name_to_identifier[self.name]
@classmethod
def from_identifier(cls, identifier):
return Vulnerability(cls.identifier_to_name[identifier])
@classmethod
def from_string(cls, string):
return Vulnerability(string)
def gib_name(self):
return { 'E-W': 'e', 'N-S': 'n', 'None': '-', 'Both': 'b' }[self.name]
@classmethod
def from_board_number(self, board_number):
# http://www.jazclass.aust.com/bridge/scoring/score11.htm
# FIXME: There must be a more compact way to represent this series.
number_to_vulnerability = {
0: 'E-W', # board 16
1: 'None',
2: 'N-S',
3: 'E-W',
4: 'Both',
5: 'N-S',
6: 'E-W',
7: 'Both',
8: 'None',
9: 'E-W',
10: 'Both',
11: 'None',
12: 'N-S',
13: 'Both',
14: 'None',
15: 'N-S',
}
return Vulnerability(number_to_vulnerability[board_number % 16])
def is_vulnerable(self, position):
if self.name == "None":
return False
if self.name == "Both":
return True
return position.char in self.name
# FIXME: It's unclear if this class should expose just call_names or Call objects.
class CallHistory(object):
@classmethod
def _calls_from_calls_string(cls, calls_string):
if not calls_string:
return []
if ',' in calls_string:
calls_string = calls_string.replace(',', ' ')
calls_string = calls_string.strip() # Remove any trailing whitespace.
call_names = calls_string.split(' ')
# This if exists to support string == ''
if not call_names or not call_names[0]:
return []
# from_string may be more forgiving than we want...
calls = map(Call.from_string, call_names)
assert None not in calls, "Failed to parse calls string: '%s'" % calls_string
return calls
@classmethod
def from_string(cls, history_string, dealer_char=None, vulnerability_string=None):
dealer = Position.from_char(dealer_char) if dealer_char else None
vulnerability = Vulnerability.from_string(vulnerability_string)
calls = cls._calls_from_calls_string(history_string)
return CallHistory(calls, dealer=dealer, vulnerability=vulnerability)
@classmethod
def dealer_from_board_number(cls, board_number):
# It's unclear if this number->dealer/vulnerability knowledge belongs in CallHistory or in Board.
dealer_index = (board_number + 3) % 4
return Position.from_index(dealer_index)
@classmethod
def from_board_number_and_calls_string(cls, board_number, calls_string):
vulnerability = Vulnerability.from_board_number(board_number)
dealer = cls.dealer_from_board_number(board_number)
calls = cls._calls_from_calls_string(calls_string)
return CallHistory(calls=calls, dealer=dealer, vulnerability=vulnerability)
@classmethod
def empty_for_board_number(cls, board_number):
return cls.from_board_number_and_calls_string(board_number, '')
def __init__(self, calls=None, dealer=None, vulnerability=None):
self.calls = calls or []
self.dealer = dealer or NORTH
self.vulnerability = vulnerability or Vulnerability.from_board_number(1)
def __str__(self):
return self.calls_string()
def __len__(self):
return len(self.calls)
def can_double(self):
# Make sure we haven't already doubled.
if not self.last_non_pass().is_contract():
return False
return not self.declarer().in_partnership_with(self.position_to_call())
def can_redouble(self):
if not self.last_non_pass().is_double():
return False
return self.declarer().in_partnership_with(self.position_to_call())
# This may belong on a separate bridge-rules object?
def is_legal_call(self, call):
assert not self.is_complete()
if call.is_pass():
return True
last_contract = self.last_contract()
if not last_contract:
return not call.is_double() and not call.is_redouble()
# Doubles do not have levels.
if call.level:
if last_contract.level > call.level:
return False
if last_contract.level == call.level and last_contract.strain >= call.strain:
return False
if call.is_double() and not self.can_double():
return False
if call.is_redouble() and not self.can_redouble():
return False
return True
def copy_appending_call(self, call):
assert call
assert self.is_legal_call(call)
new_call_history = copy.deepcopy(self)
new_call_history.calls.append(call)
return new_call_history
def copy_with_partial_history(self, last_entry):
partial_history = copy.copy(self)
partial_history.calls = self.calls[:last_entry]
return partial_history
def ascending_partial_histories(self, step):
partial_histories = []
partial_history = self
while partial_history.calls: # We only terminate from here if passed in an empty history.
partial_histories.insert(0, partial_history)
if len(partial_history.calls) < step:
break
partial_history = partial_history.copy_with_partial_history(-step)
return partial_histories
@property
def identifier(self):
return "%s:%s:%s" % (self.dealer.char, self.vulnerability.identifier, self.comma_separated_calls())
@classmethod
def from_identifier(cls, identifier):
components = identifier.split(":")
if len(components) == 3:
dealer_char, vulenerability_identifier, calls_identifier = components
elif len(components) == 2:
# It's very common to have the last colon in the URL missing.
dealer_char, vulenerability_identifier = components
calls_identifier = ""
else:
assert False, "Invalid history identifier: %s" % identifier
dealer = Position.from_char(dealer_char)
vulnerability = Vulnerability.from_identifier(vulenerability_identifier)
calls = cls._calls_from_calls_string(calls_identifier)
return CallHistory(calls=calls, dealer=dealer, vulnerability=vulnerability)
def pretty_one_line(self):
return "Deal: %s, Bids: %s" % (self.dealer.char, self.calls_string())
def calls_string(self):
return " ".join([call.name for call in self.calls])
def comma_separated_calls(self):
return ",".join([call.name for call in self.calls])
@property
def last_call(self):
if not self.calls:
return None
return self.calls[-1]
@property
def last_to_call(self):
if not self.calls:
return None
return self.dealer.position_after_n_calls(len(self.calls) - 1)
def last_non_pass(self):
for call in reversed(self.calls):
if not call.is_pass():
return call
return None
def last_to_not_pass(self):
for callder, call in self.enumerate_reversed_calls():
if not call.is_pass():
return callder
return None
def last_contract(self):
for call in reversed(self.calls):
if call.is_contract():
return call
return None
def position_to_call(self):
# FIXME: Should this return None when is_complete?
# We'd have to check callers, some may assume it's OK to call position_to_call after is_complete.
return self.dealer.position_after_n_calls(len(self.calls))
def calls_by(self, position):
offset_from_dealer = self.dealer.calls_between(position)
if len(self.calls) <= offset_from_dealer:
return []
return [self.calls[i] for i in range(offset_from_dealer, len(self.calls), 4)]
def enumerate_calls(self):
for call_offset, call in enumerate(self.calls):
yield self.dealer.position_after_n_calls(call_offset), call
def enumerate_reversed_calls(self):
# FIXME: This is needlessly complicated.
for call_offset, call in enumerate(reversed(self.calls)):
caller_offset = len(self.calls) - 1 - call_offset
yield self.dealer.position_after_n_calls(caller_offset), call
def competative_auction(self):
first_caller = None
for caller, call in self.enumerate_calls():
if not first_caller and call.is_contract():
first_caller = caller
if call.is_contract() and not caller.in_partnership_with(first_caller):
return True
return False
def last_call_by(self, position):
calls = self.calls_by(position)
if not calls:
return None
return calls[-1]
def first_call_by(self, position):
calls = self.calls_by(position)
if not calls:
return None
return calls[0]
def last_call_by_next_bidder(self):
next_caller = self.position_to_call()
return self.last_call_by(next_caller)
def opener(self):
for caller, call in self.enumerate_calls():
if call.is_contract():
return caller
return None
def declarer(self):
first_caller = None
first_call = None
last_caller = None
last_call = None
for caller, call in self.enumerate_reversed_calls():
if not call.is_contract():
continue
if not last_call:
last_call = call
last_caller = caller
if call.strain == last_call.strain and caller.in_partnership_with(last_caller):
first_call = call
first_caller = caller
return first_caller
def dummy(self):
return declarer.partner
def contract(self):
# Maybe we need a Contract object which holds declarer, suit, level, and doubles?
last_contract = self.last_contract()
if last_contract:
last_non_pass = self.last_non_pass()
double_string = ''
if last_non_pass.is_double():
double_string = 'X'
elif last_non_pass.is_redouble():
double_string = 'XX'
return "%s%s" % (last_contract.name, double_string)
return None
def is_complete(self):
return len(self.calls) > 3 and self.calls[-1].is_pass() and self.calls[-2].is_pass() and self.calls[-3].is_pass()
def is_passout(self):
return self.is_complete() and self.calls[-4].is_pass()
| |
from flask import Flask, request, send_file
import database
import base64
import os
import uuid
import calendar
from fuzzy_search import fuzzy
from image_capture import save_image
app = Flask(__name__)
db = database.Database()
UPLOAD_FOLDER = 'static/videos'
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
@app.route("/html/<filename>", methods = ['GET'])
def return_html(filename):
"""
- Returns the html file with the corresponding filename.
"""
if request.method == 'GET':
return send_file('./templates/{}'.format(filename), mimetype='text/html')
@app.route("/css/<filename>", methods = ['GET'])
def return_css(filename):
"""
- Returns the css file with the corresponding filename.
"""
if request.method == 'GET':
return send_file('./static/css/{}'.format(filename), mimetype='text/css')
@app.route("/js/<filename>", methods = ['GET'])
def return_js(filename):
"""
- Returns the js file with the corresponding filename.
"""
if request.method == 'GET':
return send_file('./static/js/{}'.format(filename), mimetype='text/js')
@app.route("/favicon.png", methods = ['GET'])
def return_favicon():
"""
- Returns the favicon image.
"""
if request.method == 'GET':
return send_file('./static/img/favicon.png', mimetype='image/png')
@app.route("/is-available/<video_ID>", methods = ['GET'])
def return_availability(video_ID):
"""
- Returns True is the video ID is present in the database.
- Otherwise False.
"""
if request.method == 'GET':
return str(db.is_available(video_ID))
@app.route("/video/<video_ID>", methods = ['GET'])
def return_video(video_ID):
"""
- Returns the video file with the corresponding video ID.
"""
if request.method == 'GET':
return send_file('./static/videos/{}.mp4'.format(video_ID), mimetype='video/mp4')
@app.route("/image/<video_ID>", methods = ['GET'])
def return_image(video_ID):
"""
- Returns the image file with the corresponding video ID.
"""
if request.method == 'GET':
return send_file('./static/images/{}.jpg'.format(video_ID), mimetype='image/jpg')
@app.route("/title/<video_ID>", methods = ['GET'])
def return_title(video_ID):
"""
- Returns the title of the video with the corresponding video ID.
"""
if request.method == 'GET':
return db.get_video_title(video_ID)
@app.route("/views/<video_ID>", methods = ['GET'])
def return_views(video_ID):
"""
- Returns the view count of the video with the corresponding video ID.
"""
if request.method == 'GET':
return db.get_views(video_ID)
@app.route("/uploader/<video_ID>", methods = ['GET'])
def return_uploader(video_ID):
"""
- Returns the uploader of the video with the corresponding video ID.
"""
if request.method == 'GET':
return db.get_video_uploader(video_ID)
@app.route("/upload-date/<video_ID>", methods = ['GET'])
def return_date(video_ID):
"""
- Returns the upload date of the video with the corresponding video ID.
"""
if request.method == 'GET':
upload_date = str(db.get_upload_date(video_ID))
vid_date = upload_date.split("-")
month = calendar.month_abbr[int(vid_date[1])]
video_upload_date = "{} {}, {}".format(month, vid_date[2], vid_date[0])
return video_upload_date
@app.route("/update-count", methods = ['POST'])
def update_count():
"""
- Updates the view count of the video with the corresponding video ID.
"""
if request.method == 'POST':
video_ID = request.form['video_ID']
db.update_view_count(video_ID)
return "1"
@app.route("/update-watched", methods = ['POST'])
def update_watched():
"""
- Updates the watched list of the user.
"""
if request.method == 'POST':
username = request.form['username']
video_ID = request.form['video_ID']
db.update_watched(username, video_ID)
return "1"
@app.route("/random", methods = ['GET'])
def return_random_ID():
"""
- Return a ranodm video ID.
"""
if request.method == 'GET':
return db.get_random_ID()
@app.route("/fuzzy/<search_key>", methods = ['GET'])
def fuzzy_results(search_key):
"""
- Returns a list of closest matches for the search key.
"""
if request.method == 'GET':
video_dict, video_titles = db.video_dict()
return str(fuzzy(search_key, video_dict, video_titles))
@app.route("/get-most-viewed", methods = ['GET'])
def return_most_viewed():
"""
- Returns a list of most viewed videos.
"""
if request.method == 'GET':
return str(db.get_most_viewed())
@app.route("/is-valid-user", methods = ['POST'])
def return_is_valid_user():
"""
- Returns True if the user is a valid user.
- Else returns False.
"""
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
return str(db.is_valid_user(username, password))
@app.route("/is-valid-username/<username>", methods = ["GET"])
def return_is_valid_username(username):
"""
- Checks if the user is a valid user.
"""
if request.method == 'GET':
return str(db.is_valid_username(username))
@app.route("/add-user", methods = ['POST'])
def add_user():
"""
- Adds the new user credentials to the database.
"""
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
db.add_user(username, password)
return "1"
@app.route("/update-password", methods = ['POST'])
def update_password():
"""
- Updates the password of the user.
"""
if request.method == 'POST':
username = request.form['username']
old_password = request.form['old_password']
new_password = request.form['new_password']
if db.is_valid_user(username, old_password) == True:
db.update_password(username, new_password)
return "True"
else:
return "False"
@app.route("/delete-user", methods = ['POST'])
def delete_user():
"""
- Deletes the user's account.
"""
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
if db.is_valid_user(username, password):
db.delete_user(username)
return "True"
else:
return "False"
@app.route("/is-admin/<username>", methods = ['GET'])
def return_is_admin(username):
"""
- Checks if the user is an administrator.
"""
if request.method == 'GET':
return str(db.is_admin(username))
@app.route("/upload", methods = ['POST'])
def upload_video():
"""
- Uploads the video.
"""
if request.method == 'POST':
video_ID = str(base64.urlsafe_b64encode(str.encode(str(uuid.uuid4().fields[5]))))[2:-1]
username = request.form['username']
title = request.form['title']
file = request.form['file']
filename = open('./static/videos/{}.mp4'.format(video_ID), "wb")
filename.write(base64.b64decode(file))
db.upload_video(video_ID, username, title)
save_image(video_ID)
return video_ID
@app.route("/watched/<username>", methods = ['GET'])
def return_watched(username):
"""
- Returns a list of video IDs watched by the user.
"""
if request.method == 'GET':
return str(db.get_watched(username))
@app.route("/uploaded/<username>", methods = ['GET'])
def return_uploaded(username):
"""
- Returns a list of video IDs uploaded by the user.
"""
if request.method == 'GET':
return str(db.get_uploaded(username))
@app.route("/is-user-present/<username>", methods = ['GET'])
def return_user_availability(username):
"""
- Checks if the user is present in the database.
"""
if request.method == 'GET':
return str(db.is_user_present(username))
@app.route("/delete-video", methods = ['POST'])
def delete_video():
"""
- If the user is the uploader of the video, it is deleted.
"""
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
video_ID = request.form['video_ID']
if db.is_valid_user(username, password) == True:
db.delete_video(video_ID)
return str(True)
else:
return str(False)
@app.route("/get-random/<video_ID>", methods = ['GET'])
def return_random_video_IDs(video_ID):
"""
- Returns 5 random video IDs.
- If the list contains the current video ID, it is removed.
"""
if request.method == 'GET':
random = db.get_five_random_IDs()
if video_ID in random:
random.remove(video_ID)
return str(random)
@app.route("/flag", methods = ['POST'])
def flag_video_ID():
"""
- Gets the username and the video ID to be flagged.
- Flags the video is the FLAGS table.
"""
if request.method == 'POST':
username = request.form['username']
video_ID = request.form['video_ID']
db.flag_ID(username, video_ID)
return "1"
@app.route("/user-video-count/<username>", methods = ['GET'])
def return_user_video_count(username):
"""
In GET request
- Returns number of videos uploaded by the user.
"""
if request.method == 'GET':
return str(db.get_user_video_count(username))
@app.route("/user-view-count/<username>", methods = ['GET'])
def return_user_view_count(username):
"""
In GET request
- Returns number of views on all videos uploaded by the user.
"""
if request.method == 'GET':
return str(db.get_user_view_count(username))
@app.route("/user-best-video/<username>", methods = ['GET'])
def return_user_best_video(username):
"""
In GET request
- Returns video ID of the video uploaded by the user with the highest view count.
"""
if request.method == 'GET':
return str(db.get_best_video_ID(username))
@app.route("/user-fav-video/<username>", methods = ['GET'])
def return_user_fav_video(username):
"""
In GET request
- Returns video ID of the video uploaded by the user with the highest view count.
"""
if request.method == 'GET':
return str(db.get_fav_video_ID(username))
# ADMIN PART
@app.route("/add-admin", methods = ['POST'])
def add_admin():
"""
In POST request
- Adds the new administrator to the ADMINS table.
"""
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
db.add_admin(username, password)
return "1"
@app.route("/flagger/<video_ID>", methods = ['GET'])
def return_flagger(video_ID):
"""
In GET request
- Returns the username of the user that flagged the video with the corresponding video ID from the FLAGS table.
"""
if request.method == 'GET':
return str(db.get_flagger(video_ID))
@app.route("/flagged", methods = ['GET'])
def return_flagged():
"""
In GET request
- Returns a list of flagged videos.
"""
if request.method == 'GET':
return str(db.get_flagged())
@app.route("/admin-delete-video", methods = ['POST'])
def admin_delete_video():
"""
In POST request
- Deletes the video from VIDEOS table.
"""
if request.method == 'POST':
video_ID = request.form['video_ID']
print(video_ID)
db.delete_video(video_ID)
return "1"
@app.route("/user-list", methods = ['GET'])
def return_users_list():
"""
In GET request
- Returns a list of users in the database.
"""
if request.method == 'GET':
return str(db.user_list())
@app.route("/num-videos/<username>", methods = ['GET'])
def return_user_video_number(username):
"""
In GET request
- Returns the number of videos uploaded by the user with the corresponding username.
"""
if request.method == 'GET':
return str(db.get_video_num(username))
@app.route("/num-flags/<username>", methods = ['GET'])
def return_user_flagged_number(username):
"""
In GET request
- Returns the number of videos uploaded by the user that have been flagged by other users.
"""
if request.method == 'GET':
return str(db.get_flagged_num(username))
@app.route("/admin-delete-user", methods = ['POST'])
def admin_delete_user():
"""
In POST request
- Delete the user with the corresponding username.
"""
if request.method == 'POST':
username = request.form['username']
db.delete_user(username)
return "1"
@app.route("/user-count", methods = ['GET'])
def return_user_count():
"""
In GET request
- Returns number of users in the USERS table.
"""
if request.method == 'GET':
return str(db.get_user_count())
@app.route("/video-count", methods = ['GET'])
def return_video_count():
"""
In GET request
- Returns number of videos in the VIDEOS table.
"""
if request.method == 'GET':
return str(db.get_video_count())
@app.route("/view-count", methods = ['GET'])
def return_view_count():
"""
In GET request
- Returns number of views on all videos in the VIDEOS table.
"""
if request.method == 'GET':
return str(db.get_total_view_count())
@app.route("/flag-count", methods = ['GET'])
def return_flag_count():
"""
In GET request
- Returns number of flagged videos in the VIDEOS table.
"""
if request.method == 'GET':
return str(db.get_flag_count())
@app.route("/favourites/<username>", methods = ['GET'])
def return_favourites(username):
"""
In GET request
- Returns a list of videos favourited by the user.
"""
if request.method == 'GET':
return str(db.get_favourites(username))
@app.route("/remove-flag", methods = ['POST'])
def remove_flag():
"""
In POST request
- Removes the flag for the video with the corresponding video ID from the FLAGS table.
"""
if request.method == 'POST':
video_ID = request.form['video_ID']
db.delete_flag(video_ID)
return "1"
if __name__ == '__main__':
app.run(port=8080, debug=True)
| |
from __future__ import unicode_literals
from __future__ import print_function
from moya import tools
from moya.context.missing import Missing
from moya.compat import text_type
from moya.elements.elementbase import ReturnContainer
import pytz
import datetime
import unittest
import io
import os.path
class TestTools(unittest.TestCase):
def test_extract_namespace(self):
tests = [
("{http://moyaproject.com}test1", ("http://moyaproject.com", "test1")),
("test2", ("http://moyaproject.com", "test2")),
(
"{http://moyaproject.com/db}query",
("http://moyaproject.com/db", "query"),
),
]
for test, result in tests:
self.assertEqual(tools.extract_namespace(test), result)
def test_asint(self):
assert tools.asint("5") == 5
assert tools.asint("-5") == -5
assert tools.asint("foo", 3) == 3
def test_match_exception(self):
tests = [
("*", "anything", True),
("foo", "foo", True),
("foo.bar", "foo.bar", True),
("foo.*", "foo.bar", True),
("foo.*", "foo.bar.baz", True),
("bar", "foo", False),
("foo.bar.*", "foo.baz.egg", False),
]
for m, exc, result in tests:
print(exc, m, result)
assert tools.match_exception(exc, m) == result
def test_md5_hexdigest(self):
assert tools.md5_hexdigest("foo") == "acbd18db4cc2f85cedef654fccc4a4d8"
def test_check_missing(self):
tools.check_missing({"foo": "bar"})
try:
tools.check_missing({"foo": Missing("bar")})
except ValueError:
pass
else:
assert False
def test_timer(self):
with tools.timer("foo"):
pass
with tools.timer("foo", ms=True):
pass
with tools.timer("foo", write_file="/tmp/__timertest__"):
pass
def test_parse_timedelta(self):
assert tools.parse_timedelta("10") == 10
assert tools.parse_timedelta("10s") == 10 * 1000
assert tools.parse_timedelta("1m") == 60 * 1000
try:
tools.parse_timedelta("agfdwrg")
except ValueError:
assert True
else:
assert False
def test_get_moya_dir(self):
moya_dir = os.path.join(os.path.dirname(__file__), "moyadir")
path = os.path.join(moya_dir, "foo")
assert tools.get_moya_dir(path) == moya_dir
try:
tools.get_moya_dir()
except ValueError:
assert True
else:
assert False
try:
tools.get_moya_dir("/")
except ValueError:
assert True
else:
assert False
def test_is_moya_dir(self):
moya_dir = os.path.join(os.path.dirname(__file__), "moyadir")
path = os.path.join(moya_dir, "foo")
assert not tools.is_moya_dir(path)
assert tools.is_moya_dir(moya_dir)
assert not tools.is_moya_dir("/")
assert not tools.is_moya_dir()
def test_file_chunker(self):
text = b"Hello, World"
f = io.BytesIO(text)
chunks = list(tools.file_chunker(f, 1))
assert chunks == [
b"H",
b"e",
b"l",
b"l",
b"o",
b",",
b" ",
b"W",
b"o",
b"r",
b"l",
b"d",
]
f = io.BytesIO(text)
chunks = list(tools.file_chunker(f, 2))
assert chunks == [b"He", b"ll", b"o,", b" W", b"or", b"ld"]
f = io.BytesIO(text)
chunks = list(tools.file_chunker(f, 256))
assert chunks == [text]
def test_make_id(self):
assert tools.make_id() != tools.make_id()
def test_datetime_to_epoch(self):
assert tools.datetime_to_epoch(100) == 100
epoch_start = datetime.datetime(1970, 1, 1, 0, 0, tzinfo=pytz.UTC)
self.assertEqual(tools.datetime_to_epoch(epoch_start), 0)
def test_split_commas(self):
assert tools.split_commas("foo, bar") == ["foo", "bar"]
def test_summarize_text(self):
assert tools.summarize_text(None) == ""
assert tools.summarize_text("hello") == "hello"
assert tools.summarize_text("hello, world", max_length=5) == "hello[...]"
def test_get_return(self):
assert tools.get_return(None) == {}
assert tools.get_return(100) == 100
ret = ReturnContainer("foo")
assert tools.get_return(ret) == "foo"
def test_as_dict(self):
assert tools.as_dict({"foo": "bar"}) == {"foo": "bar"}
class D(object):
def items(self):
return [("foo", "bar")]
def iteritems(self):
return iter(self.items())
d = D()
assert tools.as_dict(d) == {"foo": "bar"}
def test_quote(self):
assert tools.quote("hello") == '"hello"'
def test_squote(self):
assert tools.squote("hello") == "'hello'"
def test_textual_list(self):
assert tools.textual_list(["foo", "bar"]) == "'foo' or 'bar'"
assert tools.textual_list(["foo", "bar", "baz"]) == "'foo', 'bar' or 'baz'"
assert tools.textual_list(["foo"]) == "'foo'"
assert tools.textual_list([], empty="nadda") == "nadda"
def test_moya_update(self):
d = {}
tools.moya_update(d, {"foo": "bar"})
self.assertEqual(d, {"foo": "bar"})
def test_url_join(self):
assert (
tools.url_join("http://moyaproject.com/", "/foo/")
== "http://moyaproject.com/foo/"
)
def test_remove_padding(self):
assert tools.remove_padding(" ") == ""
assert tools.remove_padding("") == ""
assert tools.remove_padding(" hello ") == " hello "
assert tools.remove_padding("\n\nhello\n\n") == "hello"
assert tools.remove_padding("\n\nhello\nworld\n\n") == "hello\nworld"
def test_unique(self):
assert tools.unique([]) == []
assert tools.unique(["foo"]) == ["foo"]
assert tools.unique(["foo", "bar"]) == ["foo", "bar"]
assert tools.unique(["foo", "bar", "bar", "bar", "baz"]) == [
"foo",
"bar",
"baz",
]
assert tools.unique(5) == []
def test_format_element_type(self):
assert tools.format_element_type(("foo", "bar")) == "{foo}bar"
assert tools.format_element_type("foo") == "foo"
# def test_get_ids(self):
# class O(object):
# id = 1
# assert tools.get_ids([O(), None]) == [1]
def test_multi_replace(self):
replacer = tools.MultiReplace({"foo": "bar", "baz": "egg"})
assert replacer("foo baz foo ok") == "bar egg bar ok"
def test_dummy_lock(self):
with tools.DummyLock() as _lock:
pass
def test_make_cache_key(self):
assert tools.make_cache_key(["foo", "bar"]) == "foo.bar"
assert tools.make_cache_key(["foo", "bar", [1, 2]]) == "foo.bar.1-2"
assert (
tools.make_cache_key(["foo", "bar", [1, 2], {"foo": "bar"}])
== "foo.bar.1-2.foo_bar"
)
def test_nearers_word(self):
assert tools.nearest_word("floo", ["foo", "bar"]) == "foo"
def test_show_tb(self):
@tools.show_tb
def test():
raise Exception("everything is fine")
try:
test()
except:
assert True
else:
assert False
def test_normalize_url_path(self):
assert tools.normalize_url_path("") == "/"
assert tools.normalize_url_path("foo") == "/foo/"
assert tools.normalize_url_path("foo/bar") == "/foo/bar/"
def test_lazystr(self):
s = tools.lazystr(lambda: "foo")
assert text_type(s) == "foo"
s = tools.lazystr(lambda: "foo")
assert len(s) == 3
s = tools.lazystr(lambda: "foo")
assert s.upper() == "FOO"
| |
#!/bin/env python
import subprocess, sys, os, random, json, math, datetime as dt
from uuid import uuid4
from StringIO import StringIO
random.seed()
nameData = {}
class EST(dt.tzinfo):
def utcoffset(self, d):
return dt.timedelta(hours=-5)
def dst(self, d):
return dt.timedelta(hours=1)
class Student(object):
def __init__(self, average):
self.firstName = random.choice(nameData['firstNames'])
self.lastName = random.choice(nameData['lastNames'])
self.name = u'{} {}'.format(self.firstName, self.lastName)
self.email = u'{}.{}@myschool.edu'.format(self.firstName.lower(), self.lastName.lower())
self.average = average
def answerQuestion(self, difficulty):
difference = self.average - difficulty
successProbability = math.atan((difference+12)/10)/math.pi + 0.5
return random.random() < successProbability
#return random.normalvariate(self.average,sqrt(self.variance)) >= difficulty
class Class(object):
def __init__(self, numStudents, classAverage, classVariance):
self.id = 'cop-3223'
self.instructor = Student(100)
self.students = []
for i in range(numStudents):
studentAverage = random.normalvariate(classAverage, math.sqrt(classVariance))
self.students.append( Student(studentAverage) )
def takeTest(self, test):
results = TestResults(len(test))
results.classHandle = self
for questionNum,difficulty in enumerate(test):
for student in self.students:
results.logAnswer( student, questionNum, student.answerQuestion(difficulty) )
return results
class Test(list):
def __init__(self, name, questions):
self.name = name
for q in questions:
self.append(q)
class Battery(object):
def __init__(self):
self.tests = []
def run(self, group):
result = {}
for test in self.tests:
result[test.name] = group.takeTest(test)
return result
class TestResults(list):
def __init__(self, numQuestions):
for i in range(numQuestions):
self.append({})
def logAnswer(self, student, questionNumber, success):
self[questionNumber][student] = success
def genStatements(results):
xapiStatements = []
for testid,questions in results.items():
times = {}
sums = {}
try:
startTime += dt.timedelta(hours=3)
except UnboundLocalError:
startTime = dt.datetime.now(EST())
for qNum,qResults in enumerate(questions):
for student,result in qResults.items():
if qNum == 0:
xapiStatements.append( genStatement(questions.classHandle, student,'attempted',testid,startTime) )
activity = '{}/q{}'.format(testid,qNum)
times[student] = times.get(student, startTime) + dt.timedelta(seconds=random.randint(30,90))
sums[student] = sums.get(student,0) + (100 if result else 0)
xapiStatements.append( genStatement(questions.classHandle, student,'answered',activity,times[student],result) )
for student,time in times.items():
xapiStatements.append( genStatement(questions.classHandle, student,'completed',testid,time) )
average = sums[student]/len(questions)
passed = 'passed' if average >= 60 else 'failed'
xapiStatements.append( genStatement(questions.classHandle, student,passed,testid,time,average) )
def sortKey(e):
return e['stored']
xapiStatements.sort(key=sortKey, reverse=True)
return xapiStatements
def genStatement(c, student, verb, activity, time, score=None):
stmt = {
'actor': {
'name': student.name,
'mbox': 'mailto:'+student.email
},
'verb': {
'id': 'http://adlnet.gov/expapi/verbs/'+verb,
'display': {'en-US': verb}
},
'object': {
'id': 'http://myschool.edu/xapi/{}/{}'.format(c.id,activity),
'definition': {
'name': activity
}
},
'context': {
'instructor': {
'objectType': 'Agent',
'name': c.instructor.name,
'mbox': 'mailto:'+c.instructor.email
},
'contextActivities': {
'grouping': [{'id': 'http://myschool.edu/xapi/'+c.id}]
}
},
'authority': {
'mbox': 'mailto:admin@myschool.edu',
'objectType': 'Agent'
},
'timestamp': time.isoformat(),
'stored': time.isoformat(),
'version': '1.0.1',
'id': str(uuid4())
}
if verb == 'answered':
stmt['context']['contextActivities']['parent'] = [{
'id': 'http://myschool.edu/xapi/{}/{}'.format(c.id, activity.split('/')[0])
}]
if isinstance(score, bool):
stmt['result'] = {
'success': score
}
elif isinstance(score, (int,long,float)):
stmt['result'] = {
'score': {
'raw': score,
'min': 0,
'max': 100
}
}
return stmt
def main():
if not set(sys.argv).isdisjoint(set(['-?','-h','--help'])):
print 'Generate SCORM-style xAPI statements'
print 'options:'
print ' -o <filename> - output to a file instead of the console'
print ' -p - pack the JSON payload in a compressed javascript file'
return
path = os.path.dirname(os.path.realpath(__file__))
with open(path+'/names.json','r') as names:
global nameData
nameData = json.load(names)
battery = Battery()
battery.tests.append( Test('test1', [random.randint(65,80) for i in range(50)]) )
battery.tests.append( Test('test2', [random.randint(65,80) for i in range(50)]) )
battery.tests.append( Test('test3', [random.randint(65,80) for i in range(50)]) )
battery.tests.append( Test('test4', [random.randint(65,80) for i in range(50)]) )
battery.tests.append( Test('final', [random.randint(70,85) for i in range(50)]) )
#battery.tests.append( Test('test2', [50 for i in range(100)]) )
myclass = Class(30, 75,20)
results = battery.run(myclass)
statements = genStatements(results)
stmtString = json.dumps(statements, indent=4)
if '-p' in sys.argv:
p = subprocess.Popen(['node', path+'/compress.js'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=path)
stmtString, err = p.communicate(stmtString)
if err != '':
print 'Error', err
if '-o' in sys.argv:
i = sys.argv.index('-o')
try:
with open(sys.argv[i+1],'w') as outfile:
outfile.write(stmtString)
except IndexError:
print stmtString
else:
print stmtString
if __name__ == '__main__':
main()
| |
"""Test the August config flow."""
from unittest.mock import patch
from yalexs.authenticator import ValidationResult
from homeassistant import config_entries, setup
from homeassistant.components.august.const import (
CONF_ACCESS_TOKEN_CACHE_FILE,
CONF_INSTALL_ID,
CONF_LOGIN_METHOD,
DOMAIN,
VERIFICATION_CODE_KEY,
)
from homeassistant.components.august.exceptions import (
CannotConnect,
InvalidAuth,
RequireValidation,
)
from homeassistant.const import CONF_PASSWORD, CONF_TIMEOUT, CONF_USERNAME
from tests.common import MockConfigEntry
async def test_form(hass):
"""Test we get the form."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
with patch(
"homeassistant.components.august.config_flow.AugustGateway.async_authenticate",
return_value=True,
), patch(
"homeassistant.components.august.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_LOGIN_METHOD: "email",
CONF_USERNAME: "my@email.tld",
CONF_PASSWORD: "test-password",
},
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "my@email.tld"
assert result2["data"] == {
CONF_LOGIN_METHOD: "email",
CONF_USERNAME: "my@email.tld",
CONF_INSTALL_ID: None,
CONF_ACCESS_TOKEN_CACHE_FILE: ".my@email.tld.august.conf",
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_invalid_auth(hass):
"""Test we handle invalid auth."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.august.config_flow.AugustGateway.async_authenticate",
side_effect=InvalidAuth,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_LOGIN_METHOD: "email",
CONF_USERNAME: "my@email.tld",
CONF_PASSWORD: "test-password",
},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "invalid_auth"}
async def test_user_unexpected_exception(hass):
"""Test we handle an unexpected exception."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.august.config_flow.AugustGateway.async_authenticate",
side_effect=ValueError,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_LOGIN_METHOD: "email",
CONF_USERNAME: "my@email.tld",
CONF_PASSWORD: "test-password",
},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "unknown"}
async def test_form_cannot_connect(hass):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.august.config_flow.AugustGateway.async_authenticate",
side_effect=CannotConnect,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_LOGIN_METHOD: "email",
CONF_USERNAME: "my@email.tld",
CONF_PASSWORD: "test-password",
},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_needs_validate(hass):
"""Test we present validation when we need to validate."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.august.config_flow.AugustGateway.async_authenticate",
side_effect=RequireValidation,
), patch(
"homeassistant.components.august.gateway.AuthenticatorAsync.async_send_verification_code",
return_value=True,
) as mock_send_verification_code:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_LOGIN_METHOD: "email",
CONF_USERNAME: "my@email.tld",
CONF_PASSWORD: "test-password",
},
)
assert len(mock_send_verification_code.mock_calls) == 1
assert result2["type"] == "form"
assert result2["errors"] is None
assert result2["step_id"] == "validation"
# Try with the WRONG verification code give us the form back again
with patch(
"homeassistant.components.august.config_flow.AugustGateway.async_authenticate",
side_effect=RequireValidation,
), patch(
"homeassistant.components.august.gateway.AuthenticatorAsync.async_validate_verification_code",
return_value=ValidationResult.INVALID_VERIFICATION_CODE,
) as mock_validate_verification_code, patch(
"homeassistant.components.august.gateway.AuthenticatorAsync.async_send_verification_code",
return_value=True,
) as mock_send_verification_code, patch(
"homeassistant.components.august.async_setup_entry", return_value=True
) as mock_setup_entry:
result3 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{VERIFICATION_CODE_KEY: "incorrect"},
)
# Make sure we do not resend the code again
# so they have a chance to retry
assert len(mock_send_verification_code.mock_calls) == 0
assert len(mock_validate_verification_code.mock_calls) == 1
assert result3["type"] == "form"
assert result3["errors"] is None
assert result3["step_id"] == "validation"
# Try with the CORRECT verification code and we setup
with patch(
"homeassistant.components.august.config_flow.AugustGateway.async_authenticate",
return_value=True,
), patch(
"homeassistant.components.august.gateway.AuthenticatorAsync.async_validate_verification_code",
return_value=ValidationResult.VALIDATED,
) as mock_validate_verification_code, patch(
"homeassistant.components.august.gateway.AuthenticatorAsync.async_send_verification_code",
return_value=True,
) as mock_send_verification_code, patch(
"homeassistant.components.august.async_setup_entry", return_value=True
) as mock_setup_entry:
result4 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{VERIFICATION_CODE_KEY: "correct"},
)
await hass.async_block_till_done()
assert len(mock_send_verification_code.mock_calls) == 0
assert len(mock_validate_verification_code.mock_calls) == 1
assert result4["type"] == "create_entry"
assert result4["title"] == "my@email.tld"
assert result4["data"] == {
CONF_LOGIN_METHOD: "email",
CONF_USERNAME: "my@email.tld",
CONF_INSTALL_ID: None,
CONF_ACCESS_TOKEN_CACHE_FILE: ".my@email.tld.august.conf",
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_reauth(hass):
"""Test reauthenticate."""
entry = MockConfigEntry(
domain=DOMAIN,
data={
CONF_LOGIN_METHOD: "email",
CONF_USERNAME: "my@email.tld",
CONF_PASSWORD: "test-password",
CONF_INSTALL_ID: None,
CONF_TIMEOUT: 10,
CONF_ACCESS_TOKEN_CACHE_FILE: ".my@email.tld.august.conf",
},
unique_id="my@email.tld",
)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_REAUTH}, data=entry.data
)
assert result["type"] == "form"
assert result["errors"] == {}
with patch(
"homeassistant.components.august.config_flow.AugustGateway.async_authenticate",
return_value=True,
), patch(
"homeassistant.components.august.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_PASSWORD: "new-test-password",
},
)
await hass.async_block_till_done()
assert result2["type"] == "abort"
assert result2["reason"] == "reauth_successful"
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_reauth_with_2fa(hass):
"""Test reauthenticate with 2fa."""
entry = MockConfigEntry(
domain=DOMAIN,
data={
CONF_LOGIN_METHOD: "email",
CONF_USERNAME: "my@email.tld",
CONF_PASSWORD: "test-password",
CONF_INSTALL_ID: None,
CONF_TIMEOUT: 10,
CONF_ACCESS_TOKEN_CACHE_FILE: ".my@email.tld.august.conf",
},
unique_id="my@email.tld",
)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_REAUTH}, data=entry.data
)
assert result["type"] == "form"
assert result["errors"] == {}
with patch(
"homeassistant.components.august.config_flow.AugustGateway.async_authenticate",
side_effect=RequireValidation,
), patch(
"homeassistant.components.august.gateway.AuthenticatorAsync.async_send_verification_code",
return_value=True,
) as mock_send_verification_code:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_PASSWORD: "new-test-password",
},
)
await hass.async_block_till_done()
assert len(mock_send_verification_code.mock_calls) == 1
assert result2["type"] == "form"
assert result2["errors"] is None
assert result2["step_id"] == "validation"
# Try with the CORRECT verification code and we setup
with patch(
"homeassistant.components.august.config_flow.AugustGateway.async_authenticate",
return_value=True,
), patch(
"homeassistant.components.august.gateway.AuthenticatorAsync.async_validate_verification_code",
return_value=ValidationResult.VALIDATED,
) as mock_validate_verification_code, patch(
"homeassistant.components.august.gateway.AuthenticatorAsync.async_send_verification_code",
return_value=True,
) as mock_send_verification_code, patch(
"homeassistant.components.august.async_setup_entry", return_value=True
) as mock_setup_entry:
result3 = await hass.config_entries.flow.async_configure(
result2["flow_id"],
{VERIFICATION_CODE_KEY: "correct"},
)
await hass.async_block_till_done()
assert len(mock_validate_verification_code.mock_calls) == 1
assert len(mock_send_verification_code.mock_calls) == 0
assert result3["type"] == "abort"
assert result3["reason"] == "reauth_successful"
assert len(mock_setup_entry.mock_calls) == 1
| |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1
from google.api_core import grpc_helpers_async
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.dialogflowcx_v3beta1.types import webhook
from google.cloud.dialogflowcx_v3beta1.types import webhook as gcdc_webhook
from google.protobuf import empty_pb2 # type: ignore
from .base import WebhooksTransport, DEFAULT_CLIENT_INFO
from .grpc import WebhooksGrpcTransport
class WebhooksGrpcAsyncIOTransport(WebhooksTransport):
"""gRPC AsyncIO backend transport for Webhooks.
Service for managing
[Webhooks][google.cloud.dialogflow.cx.v3beta1.Webhook].
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "dialogflow.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "dialogflow.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def list_webhooks(
self,
) -> Callable[
[webhook.ListWebhooksRequest], Awaitable[webhook.ListWebhooksResponse]
]:
r"""Return a callable for the list webhooks method over gRPC.
Returns the list of all webhooks in the specified
agent.
Returns:
Callable[[~.ListWebhooksRequest],
Awaitable[~.ListWebhooksResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_webhooks" not in self._stubs:
self._stubs["list_webhooks"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.cx.v3beta1.Webhooks/ListWebhooks",
request_serializer=webhook.ListWebhooksRequest.serialize,
response_deserializer=webhook.ListWebhooksResponse.deserialize,
)
return self._stubs["list_webhooks"]
@property
def get_webhook(
self,
) -> Callable[[webhook.GetWebhookRequest], Awaitable[webhook.Webhook]]:
r"""Return a callable for the get webhook method over gRPC.
Retrieves the specified webhook.
Returns:
Callable[[~.GetWebhookRequest],
Awaitable[~.Webhook]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_webhook" not in self._stubs:
self._stubs["get_webhook"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.cx.v3beta1.Webhooks/GetWebhook",
request_serializer=webhook.GetWebhookRequest.serialize,
response_deserializer=webhook.Webhook.deserialize,
)
return self._stubs["get_webhook"]
@property
def create_webhook(
self,
) -> Callable[[gcdc_webhook.CreateWebhookRequest], Awaitable[gcdc_webhook.Webhook]]:
r"""Return a callable for the create webhook method over gRPC.
Creates a webhook in the specified agent.
Returns:
Callable[[~.CreateWebhookRequest],
Awaitable[~.Webhook]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_webhook" not in self._stubs:
self._stubs["create_webhook"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.cx.v3beta1.Webhooks/CreateWebhook",
request_serializer=gcdc_webhook.CreateWebhookRequest.serialize,
response_deserializer=gcdc_webhook.Webhook.deserialize,
)
return self._stubs["create_webhook"]
@property
def update_webhook(
self,
) -> Callable[[gcdc_webhook.UpdateWebhookRequest], Awaitable[gcdc_webhook.Webhook]]:
r"""Return a callable for the update webhook method over gRPC.
Updates the specified webhook.
Returns:
Callable[[~.UpdateWebhookRequest],
Awaitable[~.Webhook]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_webhook" not in self._stubs:
self._stubs["update_webhook"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.cx.v3beta1.Webhooks/UpdateWebhook",
request_serializer=gcdc_webhook.UpdateWebhookRequest.serialize,
response_deserializer=gcdc_webhook.Webhook.deserialize,
)
return self._stubs["update_webhook"]
@property
def delete_webhook(
self,
) -> Callable[[webhook.DeleteWebhookRequest], Awaitable[empty_pb2.Empty]]:
r"""Return a callable for the delete webhook method over gRPC.
Deletes the specified webhook.
Returns:
Callable[[~.DeleteWebhookRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_webhook" not in self._stubs:
self._stubs["delete_webhook"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.cx.v3beta1.Webhooks/DeleteWebhook",
request_serializer=webhook.DeleteWebhookRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_webhook"]
def close(self):
return self.grpc_channel.close()
__all__ = ("WebhooksGrpcAsyncIOTransport",)
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import io
from unittest import mock
from oslo_concurrency import processutils as putils
from cinder import context
from cinder import exception
from cinder.tests.unit.targets import targets_fixture as tf
from cinder import utils
from cinder.volume.targets import iet
class TestIetAdmDriver(tf.TargetDriverFixture):
def setUp(self):
super(TestIetAdmDriver, self).setUp()
self.target = iet.IetAdm(root_helper=utils.get_root_helper(),
configuration=self.configuration)
def test_get_target(self):
tmp_file = io.StringIO()
tmp_file.write(
'tid:1 name:iqn.2010-10.org.openstack:'
'volume-83c2e877-feed-46be-8435-77884fe55b45\n'
' sid:844427031282176 initiator:'
'iqn.1994-05.com.redhat:5a6894679665\n'
' cid:0 ip:10.9.8.7 state:active hd:none dd:none')
tmp_file.seek(0)
with mock.patch('builtins.open') as mock_open:
mock_open.return_value = contextlib.closing(tmp_file)
self.assertEqual('1',
self.target._get_target(
'iqn.2010-10.org.openstack:'
'volume-83c2e877-feed-46be-8435-77884fe55b45'
))
# Test the failure case: Failed to handle the config file
mock_open.side_effect = MemoryError()
self.assertRaises(MemoryError,
self.target._get_target,
'')
@mock.patch('cinder.volume.targets.iet.IetAdm._get_target',
return_value=0)
@mock.patch('cinder.privsep.targets.iet.new_target')
@mock.patch('cinder.privsep.targets.iet.new_logicalunit')
@mock.patch('os.path.exists', return_value=True)
@mock.patch('cinder.utils.temporary_chown')
@mock.patch.object(iet, 'LOG')
def test_create_iscsi_target(self, mock_log, mock_chown, mock_exists,
mock_new_logical_unit, mock_new_target,
mock_get_targ):
tmp_file = io.StringIO()
with mock.patch('builtins.open') as mock_open:
mock_open.return_value = contextlib.closing(tmp_file)
self.assertEqual(
0,
self.target.create_iscsi_target(
self.test_vol,
0,
0,
self.fake_volumes_dir))
self.assertTrue(mock_new_target.called)
self.assertTrue(mock_open.called)
self.assertTrue(mock_get_targ.called)
self.assertTrue(mock_new_logical_unit.called)
# Test the failure case: Failed to chown the config file
mock_open.side_effect = putils.ProcessExecutionError
self.assertRaises(exception.ISCSITargetCreateFailed,
self.target.create_iscsi_target,
self.test_vol,
0,
0,
self.fake_volumes_dir)
# Test the failure case: Failed to set new auth
mock_new_target.side_effect = putils.ProcessExecutionError
self.assertRaises(exception.ISCSITargetCreateFailed,
self.target.create_iscsi_target,
self.test_vol,
0,
0,
self.fake_volumes_dir)
@mock.patch('cinder.utils.execute')
@mock.patch('os.path.exists', return_value=True)
def test_update_config_file_failure(self, mock_exists, mock_execute):
# Test the failure case: conf file does not exist
mock_exists.return_value = False
mock_execute.side_effect = putils.ProcessExecutionError
self.assertRaises(exception.ISCSITargetCreateFailed,
self.target.update_config_file,
self.test_vol,
0,
self.fake_volumes_dir,
"foo bar")
@mock.patch('cinder.volume.targets.iet.IetAdm._get_target',
return_value=1)
@mock.patch('cinder.privsep.targets.iet.new_target')
@mock.patch('cinder.privsep.targets.iet.new_logicalunit')
def test_create_iscsi_target_already_exists(
self, mock_new_logical_unit, mock_new_target, mock_get_targ):
self.assertEqual(
1,
self.target.create_iscsi_target(
self.test_vol,
1,
0,
self.fake_volumes_dir))
self.assertTrue(mock_get_targ.called)
self.assertTrue(mock_new_target.called)
self.assertTrue(mock_new_logical_unit.called)
@mock.patch('cinder.volume.targets.iet.IetAdm._find_sid_cid_for_target',
return_value=None)
@mock.patch('os.path.exists', return_value=False)
@mock.patch('cinder.privsep.targets.iet.delete_logicalunit')
@mock.patch('cinder.privsep.targets.iet.delete_target')
def test_remove_iscsi_target(
self, mock_delete_target,
mock_delete_logicalunit, mock_exists, mock_find):
# Test the normal case
self.target.remove_iscsi_target(1,
0,
self.testvol['id'],
self.testvol['name'])
mock_delete_logicalunit.assert_called_once_with(1, 0)
mock_delete_target.assert_called_once_with(1)
# Test the failure case: putils.ProcessExecutionError
mock_delete_logicalunit.side_effect = putils.ProcessExecutionError
self.assertRaises(exception.ISCSITargetRemoveFailed,
self.target.remove_iscsi_target,
1,
0,
self.testvol['id'],
self.testvol['name'])
@mock.patch('cinder.privsep.targets.iet.delete_target')
def test_find_sid_cid_for_target(self, mock_delete_target):
tmp_file = io.StringIO()
tmp_file.write(
'tid:1 name:iqn.2010-10.org.openstack:'
'volume-83c2e877-feed-46be-8435-77884fe55b45\n'
' sid:844427031282176 initiator:'
'iqn.1994-05.com.redhat:5a6894679665\n'
' cid:0 ip:10.9.8.7 state:active hd:none dd:none')
tmp_file.seek(0)
with mock.patch('builtins.open') as mock_open:
mock_open.return_value = contextlib.closing(tmp_file)
self.assertEqual(('844427031282176', '0'),
self.target._find_sid_cid_for_target(
'1',
'iqn.2010-10.org.openstack:'
'volume-83c2e877-feed-46be-8435-77884fe55b45',
'volume-83c2e877-feed-46be-8435-77884fe55b45'
))
@mock.patch('cinder.volume.targets.iet.IetAdm._get_target',
return_value=1)
@mock.patch('cinder.privsep.targets.iet.new_target')
@mock.patch('cinder.privsep.targets.iet.new_logicalunit')
@mock.patch('cinder.privsep.targets.iet.new_auth')
@mock.patch.object(iet.IetAdm, '_get_target_chap_auth')
def test_create_export(
self, mock_get_chap, mock_new_auth, mock_new_logicalunit,
mock_new_target, mock_get_targ):
mock_get_chap.return_value = ('QZJbisGmn9AL954FNF4D',
'P68eE7u9eFqDGexd28DQ')
expected_result = {'location': '10.9.8.7:3260,1 '
'iqn.2010-10.org.openstack:testvol 0',
'auth': 'CHAP '
'QZJbisGmn9AL954FNF4D P68eE7u9eFqDGexd28DQ'}
ctxt = context.get_admin_context()
self.assertEqual(expected_result,
self.target.create_export(ctxt,
self.testvol,
self.fake_volumes_dir))
self.assertTrue(mock_new_logicalunit.called)
self.assertTrue(mock_new_target.called)
@mock.patch('cinder.volume.targets.iet.IetAdm._get_target_chap_auth',
return_value=None)
@mock.patch('cinder.volume.targets.iet.IetAdm._get_target',
return_value=1)
def test_ensure_export(self, mock_get_targetm, mock_get_chap):
ctxt = context.get_admin_context()
with mock.patch.object(self.target, 'create_iscsi_target'):
self.target.ensure_export(ctxt,
self.testvol,
self.fake_volumes_dir)
self.target.create_iscsi_target.assert_called_once_with(
'iqn.2010-10.org.openstack:testvol',
1, 0, self.fake_volumes_dir, None,
portals_ips=[self.configuration.target_ip_address],
portals_port=int(self.configuration.target_port),
check_exit_code=False,
old_name=None)
| |
#
# Copyright (c) 2010-2011, Nick Blundell
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Nick Blundell nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author: Nick Blundell <blundeln [AT] gmail [DOT] com>
# Organisation: www.nickblundell.org.uk
#
#
import inspect
from nbdebug import d, breakpoint, set_indent_function, IN_DEBUG_MODE
from exceptions import *
from containers import *
from readers import *
from util import *
from debug import *
from base_lenses import *
from core_lenses import *
class OneOrMore(Repeat) :
def __init__(self, *args, **kargs):
if "min_count" not in kargs :
kargs["min_count"] = 1
# Mental note: Don't accidentally write something like super(Repeat...
super(OneOrMore, self).__init__(*args, **kargs)
@staticmethod
def TESTS() :
# This is really just to check the lens construction.
lens = OneOrMore(AnyOf(nums, type=int), type=list)
assert(lens.get("123") == [1,2,3])
class ZeroOrMore(Repeat) :
def __init__(self, *args, **kargs):
if "min_count" not in kargs :
kargs["min_count"] = 0
super(ZeroOrMore, self).__init__(*args, **kargs)
class Optional(Or) :
def __init__(self, lens, **kargs):
super(Optional, self).__init__(lens, Empty(), **kargs)
@staticmethod
def TESTS():
GlobalSettings.check_consumption = False
lens = Optional(AnyOf(alphas, type=str))
assert(lens.get("abc") == "a")
assert(lens.get("123") == None)
assert(lens.put("a") == "a")
assert(lens.put(1) == "")
class List(And) :
"""Shortcut lens for delimited lists."""
def __init__(self, lens, delimiter_lens, **kargs):
super(List, self).__init__(lens, ZeroOrMore(And(delimiter_lens, lens)), **kargs)
@staticmethod
def TESTS() :
lens = List(AnyOf(nums, type=int), ",", type=list)
d("GET")
assert(lens.get("1,2,3") == [1,2,3])
d("PUT")
assert(lens.put([6,2,6,7,4,8]) == "6,2,6,7,4,8")
# It was getting flattened due to And within And!
test_description("Test a bug I found with nested lists.")
INPUT = "1|2,3|4,5|6"
lens = List(
List(AnyOf(nums, type=int), "|", name="inner_list", type=list),
",", name="outer_list",
type=list,
)
got = lens.get(INPUT)
assert_equal(got, [[1,2],[3,4],[5,6]])
got.insert(2, [6,7])
assert_equal(lens.put(got), "1|2,3|4,6|7,5|6")
class NewLine(Or) :
"""Matches a newline char or the end of text, so extends the Or lens."""
def __init__(self, **kargs) :
super(NewLine, self).__init__("\n", Empty(mode=Empty.END_OF_TEXT), **kargs)
# TODO: Ensure it puts a \n regardless of being at end of file, to allow
# appending. Could hook put
@staticmethod
def TESTS() :
lens = NewLine()
assert(lens.get("\n") == None)
assert(lens.get("") == None)
with assert_raises(LensException) :
lens.get("abc")
assert(lens.put("\n") == "\n")
NL = NewLine # Abbreviation
class Word(And) :
"""
Useful for handling keywords of a specific char range.
"""
def __init__(self, body_chars, init_chars=None, min_count=1, max_count=None, negate=False, **kargs):
assert_msg(min_count > 0, "min_count should be more than zero.")
# For convenience, enable type if label or is_label is set on this lens.
if "is_label" in kargs or "label" in kargs :
kargs["type"] = str
if "type" in kargs and has_value(kargs["type"]):
assert_msg(kargs["type"] == str, "If set the type of Word should be str.")
any_of_type = str
# Ensure the And type is list
kargs["type"] = list
else :
any_of_type = None
and_type = None
# Ensure chars are combined if this is a STORE lens.
kargs["combine_chars"] = True
left_lens = AnyOf(init_chars or body_chars, type=any_of_type)
right_lens = Repeat(AnyOf(body_chars, type=any_of_type), min_count=min_count-1, max_count=max_count and max_count-1 or None)
super(Word, self).__init__(left_lens, right_lens, **kargs)
@staticmethod
def TESTS() :
GlobalSettings.check_consumption = False
lens = Word(alphanums, init_chars=alphas, type=str, max_count=5)
d("GET")
assert(lens.get("w23dffdf3") == "w23df")
with assert_raises(LensException) :
assert(lens.get("1w23dffdf3") == "w23df")
d("PUT")
assert(lens.put("R2D2") == "R2D2")
with assert_raises(LensException) :
lens.put("2234") == "R2D2"
# XXX: Should fail if length checking working correctly.
#with assert_raises(LensException) :
# lens.put("TooL0ng")
d("Test with no type")
lens = Word(alphanums, init_chars=alphas, max_count=5, default="a123d")
assert(lens.get("w23dffdf3") == None)
concrete_input_reader = ConcreteInputReader("ab12_3456")
assert(lens.put(None, concrete_input_reader) == "ab12")
assert(concrete_input_reader.get_remaining() == "_3456")
assert(lens.put() == "a123d")
class Whitespace(Or) :
"""
Whitespace helper lens, that knows how to handle (logically) continued lines with '\\n'
or that preclude an indent which are useful for certain config files.
"""
def __init__(self, default=" ", optional=False, space_chars=" \t", slash_continuation=False, indent_continuation=False, **kargs):
# Ensure default gets passed up to parent class - we use default to
# determine if this lens is optional
if "type" in kargs and has_value(kargs["type"]):
# XXX: Could adapt this for storing spaces, though to be useful would need
# to construct in such a way as to combine chars.
assert_msg(False, "This lens cannot be used as a STORE lens")
# XXX: This could be used later when we wish to make this a STORE lens.
word_type = None
# TODO: Could also use default to switch on, say, indent_continuation.
# Set-up a lens the literally matches space.
spaces = Word(space_chars, type=word_type, name="spaces")
or_lenses = []
# Optionally, augment with a slash continuation lens.
if slash_continuation :
or_lenses.append(Optional(spaces) + "\\\n" + Optional(spaces))
# Optionally, augment with a indent continuation lens.
if indent_continuation :
or_lenses.append(Optional(spaces) + "\n" + spaces)
# Lastly, add the straighforward spaces lens - since otherwise this would match before the others.
or_lenses.append(spaces)
# If the user set the default as the empty space, the Empty must also be a valid lens.
if default == "" or optional:
or_lenses.append(Empty())
# Set up kargs for Or.
kargs["default"] = default
super(Whitespace, self).__init__(*or_lenses, **kargs)
@staticmethod
def TESTS() :
GlobalSettings.check_consumption = False
# Simple whitespace.
lens = Whitespace(" ")
concrete_input_reader = ConcreteInputReader(" \t xyz")
assert(lens.get(concrete_input_reader) == None and concrete_input_reader.get_remaining() == "xyz")
assert(lens.put() == " ")
# Test that the Empty lens is valid when the default space is set to empty string (i.e. not space).
lens = Whitespace("")
assert(lens.get("xyz") == None)
assert(lens.put() == "")
# With slash continuation.
lens = Whitespace(" ", slash_continuation=True)
concrete_input_reader = ConcreteInputReader(" \t\\\n xyz")
assert(lens.get(concrete_input_reader) == None and concrete_input_reader.get_remaining() == "xyz")
# With indent continuation.
lens = Whitespace(" ", indent_continuation=True)
concrete_input_reader = ConcreteInputReader(" \n xyz")
assert(lens.get(concrete_input_reader) == None and concrete_input_reader.get_remaining() == "xyz")
WS = Whitespace # Abreviation.
class NullLens(Lens) :
"""
When writing new lenses, particularly in a top-down fashion, this lens is
useful for filling in lens branches that are yet to be completed.
"""
def _get(self, concrete_input_reader) :
raise LensException("NullLens always fails, and is useful as a filler for the incremental writing of lenses.")
def _put(self, abstract_token, concrete_input_reader) :
raise LensException("NullLens always fails, and is useful as a filler for the incremental writing of lenses.")
# He, he. I won't test this one.
class KeyValue(Group) :
"""
Simply sets up the Group as an auto_list, which is useful when we just wish
to store a value by a key.
"""
def __init__(self, *args, **kargs):
if "type" not in kargs:
kargs["type"] = list
if "auto_list" not in kargs:
kargs["auto_list"] = True
super(KeyValue, self).__init__(*args, **kargs)
class BlankLine(And) :
"""
Matches a blank line (i.e. optional whitespace followed by NewLine().
"""
def __init__(self, **kargs):
super(BlankLine, self).__init__(WS(""), NewLine(), **kargs)
class Keyword(Word) :
"""
A lens for matching a typical keyword.
"""
def __init__(self, additional_chars="_", **kargs):
super(Keyword, self).__init__(alphanums+additional_chars, init_chars = alphas+additional_chars, **kargs)
class AutoGroup(Group):
"""
Sometimes it may be convenient to not explicitly set a type on an outer lens
in order to extract one or more items from sub-lenses, so this lens allows an
outer container to be set automatically, using auto_list such that a single
item may be passed through the lens. If the enclosed lens has a type, then
this lens simply becomes a transparent wrapper.
"""
def __init__(self, lens, **kargs):
"""Note, this replaces __init__ of Group, which checks for a type."""
if not lens.has_type() :
kargs["type"] = list
kargs["auto_list"] = True
super(Group, self).__init__(**kargs)
self.extend_sublenses([lens])
class HashComment(And) :
"""A common hash comment."""
def __init__(self, **kargs):
super(HashComment, self).__init__("#", Until(NewLine()), NewLine(), **kargs)
| |
# Copyright (c) 2009-2015, Dmitry Vasiliev <dima@hlabs.org>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import errno
import unittest
from erlport.erlproto import Port
from erlport.erlterms import Atom
class TestPortClient(object):
def __init__(self, **kwargs):
r, self.out_d = os.pipe()
self.in_d, w = os.pipe()
self.port = Port(descriptors=(r, w), **kwargs)
def read(self):
return os.read(self.in_d, 65536)
def write(self, data):
return os.write(self.out_d, data)
def close(self):
os.close(self.in_d)
os.close(self.out_d)
class PortTestCase(unittest.TestCase):
def test_default_port_read(self):
client = TestPortClient()
self.assertEqual(12, client.write("\0\0\0\10\x83d\0\4test"))
atom = client.port.read()
self.assert_(isinstance(atom, Atom))
self.assertEqual(Atom("test"), atom)
def test_default_port_write(self):
client = TestPortClient()
self.assertEqual(12, client.port.write(Atom("test")))
self.assertEqual("\0\0\0\10\x83d\0\4test", client.read())
def test_invalid_packet_value(self):
self.assertRaises(ValueError, Port, packet=0)
self.assertRaises(ValueError, Port, packet=3)
def test_use_stdio(self):
port = Port()
self.assertEqual(0, port.in_d)
self.assertEqual(1, port.out_d)
port = Port(use_stdio=True)
self.assertEqual(0, port.in_d)
self.assertEqual(1, port.out_d)
def test_nouse_stdio(self):
port = Port(use_stdio=False)
self.assertEqual(3, port.in_d)
self.assertEqual(4, port.out_d)
def test_descriptors(self):
port = Port(descriptors=(10, 20))
self.assertEqual(10, port.in_d)
self.assertEqual(20, port.out_d)
def test_port_close(self):
client = TestPortClient()
client.port.close()
self.assertRaises(OSError, client.write, "data")
self.assertEqual("", client.read())
def test_closed_port(self):
client = TestPortClient()
client.close()
self.assertRaises(EOFError, client.port.read)
self.assertRaises(EOFError, client.port.write, "data")
def test_read_multiple_terms(self):
client = TestPortClient()
atom_data = "\0\0\0\10\x83d\0\4test"
self.assertEqual(24, client.write(atom_data + atom_data))
atom = client.port.read()
self.assert_(isinstance(atom, Atom))
self.assertEqual(Atom("test"), atom)
atom = client.port.read()
self.assert_(isinstance(atom, Atom))
self.assertEqual(Atom("test"), atom)
def test_small_buffer_read(self):
client = TestPortClient(buffer_size=1)
self.assertEqual(12, client.write("\0\0\0\10\x83d\0\4test"))
atom = client.port.read()
self.assert_(isinstance(atom, Atom))
self.assertEqual(Atom("test"), atom)
def test_invalid_buffer_size(self):
self.assertRaises(ValueError, Port, buffer_size=0)
def test_packet4_port_read(self):
client = TestPortClient(packet=4)
self.assertEqual(12, client.write("\0\0\0\10\x83d\0\4test"))
atom = client.port.read()
self.assert_(isinstance(atom, Atom))
self.assertEqual(Atom("test"), atom)
def test_packet4_port_write(self):
client = TestPortClient(packet=4)
self.assertEqual(12, client.port.write(Atom("test")))
self.assertEqual("\0\0\0\10\x83d\0\4test", client.read())
def test_packet2_port_read(self):
client = TestPortClient(packet=2)
self.assertEqual(10, client.write("\0\10\x83d\0\4test"))
atom = client.port.read()
self.assert_(isinstance(atom, Atom))
self.assertEqual(Atom("test"), atom)
def test_packet2_port_write(self):
client = TestPortClient(packet=2)
self.assertEqual(10, client.port.write(Atom("test")))
self.assertEqual("\0\10\x83d\0\4test", client.read())
def test_packet1_port_read(self):
client = TestPortClient(packet=1)
self.assertEqual(9, client.write("\10\x83d\0\4test"))
atom = client.port.read()
self.assert_(isinstance(atom, Atom))
self.assertEqual(Atom("test"), atom)
def test_packet1_port_write(self):
client = TestPortClient(packet=1)
self.assertEqual(9, client.port.write(Atom("test")))
self.assertEqual("\10\x83d\0\4test", client.read())
def test_compressed_port_read(self):
client = TestPortClient(packet=1, compressed=True)
self.assertEqual(26, client.write("\x19\x83P\0\0\0\x1a\x78\x9c\xcb\x61"
"\x60\x60\x60\xcd\x66\x60\xd4\x43\xc7\x59\0\x30\x48\3\xde"))
self.assertEqual([[46], [46], [46], [46], [46]], client.port.read())
def test_compressed_port_write(self):
client = TestPortClient(packet=1, compressed=True)
self.assertEqual(26, client.port.write([[46], [46], [46], [46], [46]]))
self.assertEqual("\x19\x83P\0\0\0\x1a\x78\x9c\xcb\x61"
"\x60\x60\x60\xcd\x66\x60\xd4\x43\xc7\x59\0\x30\x48\3\xde",
client.read())
def test_slow_write(self):
write = os.write
os.write = lambda d, data: 1
try:
port = Port(packet=1)
self.assertEqual(9, port.write(Atom("test")))
finally:
os.write = write
def test_no_data_written(self):
write = os.write
os.write = lambda d, data: 0
try:
port = Port()
self.assertRaises(EOFError, port.write, "test")
finally:
os.write = write
def test_error_on_write(self):
def test_write(d, data):
raise OSError()
write = os.write
os.write = test_write
try:
port = Port()
self.assertRaises(OSError, port.write, "test")
finally:
os.write = write
def test_error_on_read(self):
def test_read(d, buffer_size):
raise OSError()
read = os.read
os.read = test_read
try:
port = Port()
self.assertRaises(OSError, port.read)
finally:
os.read = read
def test_close_on_read(self):
def test_read(d, buffer_size):
raise OSError(errno.EPIPE, "Pipe closed")
read = os.read
os.read = test_read
try:
port = Port()
self.assertRaises(EOFError, port.read)
finally:
os.read = read
def get_suite():
load = unittest.TestLoader().loadTestsFromTestCase
suite = unittest.TestSuite()
suite.addTests(load(PortTestCase))
return suite
| |
from statsmodels.compat.python import (range, StringIO, urlopen, HTTPError, lrange,
cPickle)
import sys
import shutil
from os import environ
from os import makedirs
from os.path import basename
from os.path import expanduser
from os.path import exists
from os.path import expanduser
from os.path import join
import time
import numpy as np
from numpy import genfromtxt, array
from pandas import read_csv
class Dataset(dict):
def __init__(self, **kw):
# define some default attributes, so pylint can find them
self.endog = None
self.exog = None
self.data = None
self.names = None
dict.__init__(self,kw)
self.__dict__ = self
# Some datasets have string variables. If you want a raw_data
# attribute you must create this in the dataset's load function.
try: # some datasets have string variables
self.raw_data = self.data.view((float, len(self.names)))
except:
pass
def __repr__(self):
return str(self.__class__)
def process_recarray(data, endog_idx=0, exog_idx=None, stack=True, dtype=None):
names = list(data.dtype.names)
if isinstance(endog_idx, int):
endog = array(data[names[endog_idx]], dtype=dtype)
endog_name = names[endog_idx]
endog_idx = [endog_idx]
else:
endog_name = [names[i] for i in endog_idx]
if stack:
endog = np.column_stack(data[field] for field in endog_name)
else:
endog = data[endog_name]
if exog_idx is None:
exog_name = [names[i] for i in range(len(names))
if i not in endog_idx]
else:
exog_name = [names[i] for i in exog_idx]
if stack:
exog = np.column_stack(data[field] for field in exog_name)
else:
exog = data[exog_name]
if dtype:
endog = endog.astype(dtype)
exog = exog.astype(dtype)
dataset = Dataset(data=data, names=names, endog=endog, exog=exog,
endog_name=endog_name, exog_name=exog_name)
return dataset
def process_recarray_pandas(data, endog_idx=0, exog_idx=None, dtype=None,
index_idx=None):
from pandas import DataFrame
data = DataFrame(data, dtype=dtype)
names = data.columns
if isinstance(endog_idx, int):
endog_name = names[endog_idx]
endog = data[endog_name]
if exog_idx is None:
exog = data.drop([endog_name], axis=1)
else:
exog = data.filter(names[exog_idx])
else:
endog = data.ix[:, endog_idx]
endog_name = list(endog.columns)
if exog_idx is None:
exog = data.drop(endog_name, axis=1)
elif isinstance(exog_idx, int):
exog = data.filter([names[exog_idx]])
else:
exog = data.filter(names[exog_idx])
if index_idx is not None: #NOTE: will have to be improved for dates
from pandas import Index
endog.index = Index(data.ix[:, index_idx])
exog.index = Index(data.ix[:, index_idx])
data = data.set_index(names[index_idx])
exog_name = list(exog.columns)
dataset = Dataset(data=data, names=list(names), endog=endog, exog=exog,
endog_name=endog_name, exog_name=exog_name)
return dataset
def _maybe_reset_index(data):
"""
All the Rdatasets have the integer row.labels from R if there is no
real index. Strip this for a zero-based index
"""
from pandas import Index
if data.index.equals(Index(lrange(1,len(data)+1))):
data = data.reset_index(drop=True)
return data
def _get_cache(cache):
if cache is False:
# do not do any caching or load from cache
cache = None
elif cache is True: # use default dir for cache
cache = get_data_home(None)
else:
cache = get_data_home(cache)
return cache
def _cache_it(data, cache_path):
if sys.version_info[0] >= 3:
# for some reason encode("zip") won't work for me in Python 3?
import zlib
# use protocol 2 so can open with python 2.x if cached in 3.x
open(cache_path, "wb").write(zlib.compress(cPickle.dumps(data,
protocol=2)))
else:
open(cache_path, "wb").write(cPickle.dumps(data).encode("zip"))
def _open_cache(cache_path):
if sys.version_info[0] >= 3:
#NOTE: don't know why but decode('zip') doesn't work on my
# Python 3 build
import zlib
data = zlib.decompress(open(cache_path, 'rb').read())
# return as bytes object encoded in utf-8 for cross-compat of cached
data = cPickle.loads(data).encode('utf-8')
else:
data = open(cache_path, 'rb').read().decode('zip')
data = cPickle.loads(data)
return data
def _urlopen_cached(url, cache):
"""
Tries to load data from cache location otherwise downloads it. If it
downloads the data and cache is not None then it will put the downloaded
data in the cache path.
"""
from_cache = False
if cache is not None:
cache_path = join(cache,
url.split("://")[-1].replace('/', ',') +".zip")
try:
data = _open_cache(cache_path)
from_cache = True
except:
pass
# not using the cache or didn't find it in cache
if not from_cache:
data = urlopen(url).read()
if cache is not None: # then put it in the cache
_cache_it(data, cache_path)
return data, from_cache
def _get_data(base_url, dataname, cache, extension="csv"):
url = base_url + (dataname + ".%s") % extension
try:
data, from_cache = _urlopen_cached(url, cache)
except HTTPError as err:
if '404' in str(err):
raise ValueError("Dataset %s was not found." % dataname)
else:
raise err
data = data.decode('utf-8', 'strict')
return StringIO(data), from_cache
def _get_dataset_meta(dataname, package, cache):
# get the index, you'll probably want this cached because you have
# to download info about all the data to get info about any of the data...
index_url = ("https://raw.github.com/vincentarelbundock/Rdatasets/master/"
"datasets.csv")
data, _ = _urlopen_cached(index_url, cache)
#Python 3
if sys.version[0] == '3': # pragma: no cover
data = data.decode('utf-8', 'strict')
index = read_csv(StringIO(data))
idx = np.logical_and(index.Item == dataname, index.Package == package)
dataset_meta = index.ix[idx]
return dataset_meta["Title"].item()
def get_rdataset(dataname, package="datasets", cache=False):
"""download and return R dataset
Parameters
----------
dataname : str
The name of the dataset you want to download
package : str
The package in which the dataset is found. The default is the core
'datasets' package.
cache : bool or str
If True, will download this data into the STATSMODELS_DATA folder.
The default location is a folder called statsmodels_data in the
user home folder. Otherwise, you can specify a path to a folder to
use for caching the data. If False, the data will not be cached.
Returns
-------
dataset : Dataset instance
A `statsmodels.data.utils.Dataset` instance. This objects has
attributes::
* data - A pandas DataFrame containing the data
* title - The dataset title
* package - The package from which the data came
* from_cache - Whether not cached data was retrieved
* __doc__ - The verbatim R documentation.
Notes
-----
If the R dataset has an integer index. This is reset to be zero-based.
Otherwise the index is preserved. The caching facilities are dumb. That
is, no download dates, e-tags, or otherwise identifying information
is checked to see if the data should be downloaded again or not. If the
dataset is in the cache, it's used.
"""
#NOTE: use raw github bc html site might not be most up to date
data_base_url = ("https://raw.github.com/vincentarelbundock/Rdatasets/"
"master/csv/"+package+"/")
docs_base_url = ("https://raw.github.com/vincentarelbundock/Rdatasets/"
"master/doc/"+package+"/rst/")
cache = _get_cache(cache)
data, from_cache = _get_data(data_base_url, dataname, cache)
data = read_csv(data, index_col=0)
data = _maybe_reset_index(data)
title = _get_dataset_meta(dataname, package, cache)
doc, _ = _get_data(docs_base_url, dataname, cache, "rst")
return Dataset(data=data, __doc__=doc.read(), package=package, title=title,
from_cache=from_cache)
### The below function were taken from sklearn
def get_data_home(data_home=None):
"""Return the path of the statsmodels data dir.
This folder is used by some large dataset loaders to avoid
downloading the data several times.
By default the data dir is set to a folder named 'statsmodels_data'
in the user home folder.
Alternatively, it can be set by the 'STATSMODELS_DATA' environment
variable or programatically by giving an explit folder path. The
'~' symbol is expanded to the user home folder.
If the folder does not already exist, it is automatically created.
"""
if data_home is None:
data_home = environ.get('STATSMODELS_DATA',
join('~', 'statsmodels_data'))
data_home = expanduser(data_home)
if not exists(data_home):
makedirs(data_home)
return data_home
def clear_data_home(data_home=None):
"""Delete all the content of the data home cache."""
data_home = get_data_home(data_home)
shutil.rmtree(data_home)
| |
#!/usr/bin/env python
# coding: utf-8
from datetime import datetime
from distutils import spawn
import argparse
import json
import os
import platform
import shutil
import socket
import sys
import urllib
import urllib2
__version__ = '6.2.1'
###############################################################################
# Options
###############################################################################
PARSER = argparse.ArgumentParser()
PARSER.add_argument(
'-d', '--dependencies', dest='install_dependencies', action='store_true',
help='install virtualenv and python dependencies',
)
PARSER.add_argument(
'-s', '--start', dest='start', action='store_true',
help='starts the dev_appserver.py with storage_path pointing to temp',
)
PARSER.add_argument(
'-o', '--host', dest='host', action='store', default='127.0.0.1',
help='the host to start the dev_appserver.py',
)
PARSER.add_argument(
'-p', '--port', dest='port', action='store', default='8080',
help='the port to start the dev_appserver.py',
)
PARSER.add_argument(
'--appserver-args', dest='args', nargs=argparse.REMAINDER, default=[],
help='all following args are passed to dev_appserver.py',
)
PARSER.add_argument(
'-v', '--version', dest='show_version', action='store_true',
help='Show gae-init version',
)
ARGS = PARSER.parse_args()
###############################################################################
# Globals
###############################################################################
BAD_ENDINGS = ['pyc', 'pyo', '~']
GAE_PATH = ''
IS_WINDOWS = platform.system() == 'Windows'
###############################################################################
# Directories
###############################################################################
DIR_MAIN = 'main'
DIR_TEMP = 'temp'
DIR_VENV = os.path.join(DIR_TEMP, 'venv')
DIR_LIB = os.path.join(DIR_MAIN, 'lib')
DIR_LIBX = os.path.join(DIR_MAIN, 'libx')
FILE_LIB = '%s.zip' % DIR_LIB
FILE_REQUIREMENTS = 'requirements.txt'
FILE_PIP_GUARD = os.path.join(DIR_TEMP, 'pip.guard')
FILE_VENV = os.path.join(DIR_VENV, 'Scripts', 'activate.bat') \
if IS_WINDOWS \
else os.path.join(DIR_VENV, 'bin', 'activate')
DIR_STORAGE = os.path.join(DIR_TEMP, 'storage')
FILE_UPDATE = os.path.join(DIR_TEMP, 'update.json')
###############################################################################
# Other global variables
###############################################################################
CORE_VERSION_URL = 'https://gae-init.appspot.com/_s/version/'
INTERNET_TEST_URL = 'https://www.google.com'
REQUIREMENTS_URL = 'http://docs.gae-init.appspot.com/requirement/'
TRAVIS = 'TRAVIS' in os.environ
###############################################################################
# Helpers
###############################################################################
def print_out(script, filename=''):
timestamp = datetime.now().strftime('%H:%M:%S')
if not filename:
filename = '-' * 46
script = script.rjust(12, '-')
print('[%s] %12s %s' % (timestamp, script, filename))
def make_dirs(directory):
if not os.path.exists(directory):
os.makedirs(directory)
def listdir(directory, split_ext=False):
try:
if split_ext:
return [os.path.splitext(dir_)[0] for dir_ in os.listdir(directory)]
return os.listdir(directory)
except OSError:
return []
def site_packages_path():
if IS_WINDOWS:
return os.path.join(DIR_VENV, 'Lib', 'site-packages')
py_version = 'python%s.%s' % sys.version_info[:2]
return os.path.join(DIR_VENV, 'lib', py_version, 'site-packages')
def create_virtualenv():
if not os.path.exists(FILE_VENV):
os.system('virtualenv --no-site-packages -p python2 %s' % DIR_VENV)
os.system('echo %s >> %s' % (
'set PYTHONPATH=' if IS_WINDOWS else 'unset PYTHONPATH', FILE_VENV
))
pth_file = os.path.join(site_packages_path(), 'gae.pth')
echo_to = 'echo %s >> {pth}'.format(pth=pth_file)
os.system(echo_to % find_gae_path())
os.system(echo_to % os.path.abspath(DIR_LIBX))
return True
def exec_pip_commands(command):
script = []
if create_virtualenv():
activate_cmd = 'call %s' if IS_WINDOWS else 'source %s'
activate_cmd %= FILE_VENV
script.append(activate_cmd)
script.append('echo %s' % command)
script.append('%s SKIP_GOOGLEAPICLIENT_COMPAT_CHECK=1' %
('set' if IS_WINDOWS else 'export'))
script.append(command)
script = '&'.join(script) if IS_WINDOWS else \
'/bin/bash -c "%s"' % ';'.join(script)
return os.system(script)
def make_guard(fname, cmd, spec):
with open(fname, 'w') as guard:
guard.write('Prevents %s execution if newer than %s' % (cmd, spec))
def guard_is_newer(guard, watched):
if os.path.exists(guard):
return os.path.getmtime(guard) > os.path.getmtime(watched)
return False
def check_if_pip_should_run():
return not guard_is_newer(FILE_PIP_GUARD, FILE_REQUIREMENTS)
def install_py_libs():
return_code = 0
if not check_if_pip_should_run() and os.path.exists(DIR_LIB):
return return_code
make_guard_flag = True
if TRAVIS:
return_code = exec_pip_commands('pip install -v -r %s' % FILE_REQUIREMENTS)
else:
return_code = exec_pip_commands('pip install -q -r %s' % FILE_REQUIREMENTS)
if return_code:
print('ERROR running pip install')
make_guard_flag = False
exclude_ext = ['.pth', '.pyc', '.egg-info', '.dist-info', '.so']
exclude_prefix = ['setuptools-', 'pip-', 'Pillow-']
exclude = ['test', 'tests', 'pip', 'setuptools', '_markerlib', 'PIL', 'easy_install.py']
def _exclude_prefix(pkg):
for prefix in exclude_prefix:
if pkg.startswith(prefix):
return True
return False
def _exclude_ext(pkg):
for ext in exclude_ext:
if pkg.endswith(ext):
return True
return False
def _get_dest(pkg):
make_dirs(DIR_LIB)
return os.path.join(DIR_LIB, pkg)
site_packages = site_packages_path()
dir_libs = listdir(DIR_LIB)
dir_libs.extend(listdir(DIR_LIBX))
for dir_ in listdir(site_packages):
if dir_ in dir_libs or dir_ in exclude:
continue
if _exclude_prefix(dir_) or _exclude_ext(dir_):
continue
src_path = os.path.join(site_packages, dir_)
copy = shutil.copy if os.path.isfile(src_path) else shutil.copytree
copy(src_path, _get_dest(dir_))
if make_guard_flag:
make_guard(FILE_PIP_GUARD, 'pip', FILE_REQUIREMENTS)
return return_code
def install_dependencies():
make_dirs(DIR_TEMP)
return install_py_libs()
def check_for_update():
if os.path.exists(FILE_UPDATE):
mtime = os.path.getmtime(FILE_UPDATE)
last = datetime.utcfromtimestamp(mtime).strftime('%Y-%m-%d')
today = datetime.utcnow().strftime('%Y-%m-%d')
if last == today:
return
try:
with open(FILE_UPDATE, 'a'):
os.utime(FILE_UPDATE, None)
request = urllib2.Request(
CORE_VERSION_URL,
urllib.urlencode({'version': __version__}),
)
response = urllib2.urlopen(request)
with open(FILE_UPDATE, 'w') as update_json:
update_json.write(response.read())
except (urllib2.HTTPError, urllib2.URLError):
pass
def print_out_update(force_show=False):
try:
import pip
SemVer = pip.util.version.SemanticVersion
except AttributeError:
import pip._vendor.distlib.version
SemVer = pip._vendor.distlib.version.SemanticVersion
try:
with open(FILE_UPDATE, 'r') as update_json:
data = json.load(update_json)
if SemVer(__version__) < SemVer(data['version']) or force_show:
print_out('UPDATE')
print_out(data['version'], 'Latest version of gae-init')
print_out(__version__, 'Your version is a bit behind')
print_out('CHANGESET', data['changeset'])
except (ValueError, KeyError):
os.remove(FILE_UPDATE)
except IOError:
pass
###############################################################################
# Doctor
###############################################################################
def internet_on():
try:
urllib2.urlopen(INTERNET_TEST_URL, timeout=2)
return True
except (urllib2.URLError, socket.timeout):
return False
def check_requirement(check_func):
result, name, help_url_id = check_func()
if not result:
print_out('NOT FOUND', name)
if help_url_id:
print('Please see %s%s' % (REQUIREMENTS_URL, help_url_id))
return False
return True
def find_gae_path():
global GAE_PATH
if GAE_PATH:
return GAE_PATH
if IS_WINDOWS:
gae_path = None
for path in os.environ['PATH'].split(os.pathsep):
if os.path.isfile(os.path.join(path, 'dev_appserver.py')):
gae_path = path
else:
gae_path = spawn.find_executable('dev_appserver.py')
if gae_path:
gae_path = os.path.dirname(os.path.realpath(gae_path))
if not gae_path:
return ''
gcloud_exec = 'gcloud.cmd' if IS_WINDOWS else 'gcloud'
if not os.path.isfile(os.path.join(gae_path, gcloud_exec)):
GAE_PATH = gae_path
else:
gae_path = os.path.join(gae_path, '..', 'platform', 'google_appengine')
if os.path.exists(gae_path):
GAE_PATH = os.path.realpath(gae_path)
return GAE_PATH
def check_internet():
return internet_on(), 'Internet', ''
def check_gae():
return bool(find_gae_path()), 'Google App Engine SDK', '#gae'
def check_git():
return bool(spawn.find_executable('git')), 'Git', '#git'
def check_nodejs():
return bool(spawn.find_executable('node')), 'Node.js', '#nodejs'
def check_pip():
return bool(spawn.find_executable('pip')), 'pip', '#pip'
def check_virtualenv():
return bool(spawn.find_executable('virtualenv')), 'virtualenv', '#virtualenv'
def doctor_says_ok():
checkers = [check_gae, check_git, check_nodejs, check_pip, check_virtualenv]
if False in [check_requirement(check) for check in checkers]:
sys.exit(1)
return check_requirement(check_internet)
###############################################################################
# Main
###############################################################################
def run_start():
make_dirs(DIR_STORAGE)
port = int(ARGS.port)
run_command = ' '.join(map(str, [
'dev_appserver.py',
DIR_MAIN,
'--host %s' % ARGS.host,
'--port %s' % port,
'--admin_port %s' % (port + 1),
'--storage_path=%s' % DIR_STORAGE,
'--skip_sdk_update_check',
] + ARGS.args))
os.system(run_command)
def run():
return_code = 0
if len(sys.argv) == 1 or (ARGS.args and not ARGS.start):
PARSER.print_help()
sys.exit(1)
os.chdir(os.path.dirname(os.path.realpath(__file__)))
if doctor_says_ok():
return_code |= install_dependencies()
check_for_update()
if ARGS.show_version:
print_out_update(force_show=True)
else:
print_out_update()
if ARGS.start:
run_start()
if ARGS.install_dependencies:
return_code |= install_dependencies()
sys.exit(return_code)
if __name__ == '__main__':
run()
| |
import sys
import os
import pickle
from itertools import chain
from threading import current_thread
from pulsar import HaltServer, CommandError, MonitorStarted, system
from pulsar.utils.log import WritelnDecorator
from .events import EventHandler
from .proxy import ActorProxy, ActorProxyMonitor, actor_identity
from .mailbox import command_in_context
from .access import get_actor
from .cov import Coverage
from .consts import * # noqa
__all__ = ['is_actor', 'send', 'spawn',
'Actor', 'ACTOR_STATES', 'get_stream']
def is_actor(obj):
return isinstance(obj, Actor)
def get_stream(cfg):
'''Obtain the python stream handler given a config dictionary.
'''
stream = sys.stderr
return WritelnDecorator(stream)
def send(target, action, *args, **params):
'''Send a :ref:`message <api-remote_commands>` to ``target``
The message is to perform a given ``action``. The actor sending the
message is obtained via the :func:`get_actor` function.
:parameter target: the :class:`Actor` id or an :class:`.ActorProxy` or
name of the target actor which will receive the message.
:parameter action: the :ref:`remote command <api-remote_commands>`
to perform in the ``target`` :class:`Actor`.
:parameter args: positional arguments to pass to the
:ref:`remote command <api-remote_commands>` ``action``.
:parameter params: dictionary of parameters to pass to
:ref:`remote command <api-remote_commands>` ``action``.
:return: an :class:`~asyncio.Future` if the action acknowledge the
caller or `None`.
Typical example::
>>> r = send(p,'ping')
>>> r.result()
'pong'
'''
actor = get_actor()
if not actor:
raise RuntimeError('No actor available, cannot send messages')
else:
return actor.send(target, action, *args, **params)
def spawn(**kwargs):
'''Spawn a new :class:`.Actor` and return an :class:`.ActorProxyFuture`.
**Parameter kwargs**
These optional parameters are:
* ``aid`` the actor id
* ``name`` the actor name
* :ref:`actor hooks <actor-hooks>` such as ``start``, ``stopping``
and ``periodic_task``
:return: an :class:`.ActorProxyFuture`.
A typical usage::
>>> def do_something(actor):
...
>>> a = spawn(start=do_something, ...)
>>> a.aid
'ba42b02b'
>>> a.called
True
>>> p = a.result()
>>> p.address
('127.0.0.1', 46691)
'''
actor = get_actor()
if not actor:
raise RuntimeError('No actor available, cannot spawn')
else:
return actor.spawn(**kwargs)
class Actor(EventHandler, Coverage):
'''The base class for parallel execution in pulsar.
In computer science, the **Actor model** is a mathematical model
of concurrent computation that treats *actors* as the universal primitives
of computation.
In response to a message that it receives, an actor can make local
decisions, create more actors, send more messages, and determine how
to respond to the next message received.
The current implementation allows for actors to perform specific tasks
such as listening to a socket, acting as http server, consuming
a task queue and so forth.
To spawn a new actor::
>>> from pulsar import spawn
>>> a = spawn()
>>> a.is_alive()
True
Here ``a`` is actually a reference to the remote actor, it is
an :class:`.ActorProxy`.
**ATTRIBUTES**
.. attribute:: name
The name of this :class:`Actor`.
.. attribute:: aid
Unique ID for this :class:`Actor`.
.. attribute:: impl
The :class:`.Concurrency` implementation for this :class:`Actor`.
.. attribute:: _loop
An :ref:`event loop <asyncio-event-loop>` which listen
for input/output events on sockets or socket-like objects.
It is the driver of the :class:`Actor`.
If the :attr:`_loop` stops, the :class:`Actor` stops
running and goes out of scope.
.. attribute:: mailbox
Used to send and receive :ref:`actor messages <tutorials-messages>`.
.. attribute:: address
The socket address for this :attr:`Actor.mailbox`.
.. attribute:: proxy
Instance of a :class:`.ActorProxy` holding a reference
to this :class:`Actor`. The proxy is a lightweight representation
of the actor which can be shared across different processes
(i.e. it is picklable).
.. attribute:: state
The actor :ref:`numeric state <actor-states>`.
.. attribute:: extra
A dictionary which can be populated with extra parameters useful
for other actors. This dictionary is included in the dictionary
returned by the :meth:`info` method.
Check the :ref:`info command <actor_info_command>` for how to obtain
information about an actor.
.. attribute:: info_state
Current state description string. One of ``initial``, ``running``,
``stopping``, ``closed`` and ``terminated``.
.. attribute:: next_periodic_task
The :class:`asyncio.Handle` for the next
:ref:`actor periodic task <actor-periodic-task>`.
.. attribute:: stream
A ``stream`` handler to write information messages without using
the :attr:`~.AsyncObject.logger`.
'''
ONE_TIME_EVENTS = ('start', 'stopping')
MANY_TIMES_EVENTS = ('on_info', 'on_params', 'periodic_task')
exit_code = None
mailbox = None
monitor = None
next_periodic_task = None
def __init__(self, impl):
self.state = ACTOR_STATES.INITIAL
self.__impl = impl
self.servers = {}
self.extra = {}
self.stream = get_stream(self.cfg)
self.tid = current_thread().ident
self.pid = os.getpid()
hooks = []
for name in chain(self.ONE_TIME_EVENTS, self.MANY_TIMES_EVENTS):
hook = impl.params.pop(name, None)
if hook:
hooks.append((name, hook))
for name, value in impl.params.items():
setattr(self, name, value)
del impl.params
super().__init__(impl.setup_event_loop(self))
for name, hook in hooks:
self.bind_event(name, hook)
try:
self.cfg.post_fork(self)
except Exception: # pragma nocover
pass
def __repr__(self):
return self.impl.unique_name
def __str__(self):
return self.__repr__()
# ############################################################# PROPERTIES
@property
def name(self):
return self.__impl.name
@property
def aid(self):
return self.__impl.aid
@property
def impl(self):
return self.__impl
@property
def cfg(self):
return self.__impl.cfg
@property
def proxy(self):
return ActorProxy(self)
@property
def address(self):
return self.mailbox.address
@property
def info_state(self):
return ACTOR_STATES.DESCRIPTION[self.state]
@property
def monitors(self):
'''Dictionary of monitors or None'''
return self.__impl.monitors
@property
def managed_actors(self):
'''Dictionary of managed actors or None'''
return self.__impl.managed_actors
@property
def terminated_actors(self):
'''Dictionary of terminated actors or None'''
return self.__impl.terminated_actors
@property
def registered(self):
'''Dictionary of registered actors or None'''
return self.__impl.registered
#######################################################################
# HIGH LEVEL API METHODS
#######################################################################
def start(self):
'''Called after forking to start the actor's life.
This is where logging is configured, the :attr:`mailbox` is
registered and the :attr:`_loop` is initialised and
started. Calling this method more than once does nothing.
'''
if self.state == ACTOR_STATES.INITIAL:
self.__impl.before_start(self)
self._started = self._loop.time()
self.state = ACTOR_STATES.STARTING
self._run()
def send(self, target, action, *args, **kwargs):
'''Send a message to ``target`` to perform ``action`` with given
positional ``args`` and key-valued ``kwargs``.
Returns a coroutine or a Future.
'''
target = self.monitor if target == 'monitor' else target
mailbox = self.mailbox
if isinstance(target, ActorProxyMonitor):
mailbox = target.mailbox
else:
actor = self.get_actor(target)
if isinstance(actor, Actor):
# this occur when sending a message from arbiter to monitors or
# vice-versa.
return command_in_context(action, self, actor, args, kwargs)
elif isinstance(actor, ActorProxyMonitor):
mailbox = actor.mailbox
if hasattr(mailbox, 'request'):
# if not mailbox.closed:
return mailbox.request(action, self, target, args, kwargs)
else:
raise CommandError('Cannot execute "%s" in %s. Unknown actor %s.'
% (action, self, target))
def spawn(self, **params):
'''Spawn a new actor
'''
return self.__impl.spawn(self, **params)
def stop(self, exc=None, exit_code=None):
'''Gracefully stop the :class:`Actor`.
Implemented by the :meth:`.Concurrency.stop` method of the :attr:`impl`
attribute.'''
return self.__impl.stop(self, exc, exit_code)
def add_monitor(self, monitor_name, **params):
return self.__impl.add_monitor(self, monitor_name, **params)
def actorparams(self):
'''Returns a dictionary of parameters for spawning actors.
The disctionary is passed to the spawn method when creating new
actors. Fire the :ref:`on_params actor hook <actor-hooks>`.
'''
data = {}
self.fire_event('on_params', params=data)
return data
# ############################################################## STATES
def is_running(self):
'''``True`` if actor is running, that is when the :attr:`state`
is equal to :ref:`ACTOR_STATES.RUN <actor-states>` and the loop is
running.'''
return self.state == ACTOR_STATES.RUN and self._loop.is_running()
def started(self):
'''``True`` if actor has started.
It does not necessarily mean it is running.
Its state is greater or equal :ref:`ACTOR_STATES.RUN <actor-states>`.
'''
return self.state >= ACTOR_STATES.RUN
def closed(self):
'''``True`` if actor has exited in an clean fashion.
Its :attr:`state` is :ref:`ACTOR_STATES.CLOSE <actor-states>`.
'''
return self.state == ACTOR_STATES.CLOSE
def stopped(self):
'''``True`` if actor has exited.
Its :attr:`state` is greater or equal to
:ref:`ACTOR_STATES.CLOSE <actor-states>`.
'''
return self.state >= ACTOR_STATES.CLOSE
def is_arbiter(self):
'''``True`` if ``self`` is the ``arbiter``'''
return self.__impl.is_arbiter()
def is_monitor(self):
'''``True`` if ``self`` is a ``monitor``'''
return self.__impl.is_monitor()
def is_process(self):
'''boolean indicating if this is an actor on a child process.'''
return self.__impl.is_process()
def __reduce__(self):
raise pickle.PicklingError('{0} - Cannot pickle Actor instances'
.format(self))
#######################################################################
# INTERNALS
#######################################################################
def get_actor(self, aid, check_monitor=True):
'''Given an actor unique id return the actor proxy.
'''
aid = actor_identity(aid)
return self.__impl.get_actor(self, aid, check_monitor=check_monitor)
def info(self):
'''Return a nested dictionary of information related to the actor
status and performance. The dictionary contains the following entries:
* ``actor`` a dictionary containing information regarding the type of
actor and its status.
* ``events`` a dictionary of information about the
:ref:`event loop <asyncio-event-loop>` running the actor.
* ``extra`` the :attr:`extra` attribute (you can use it to add stuff).
* ``system`` system info.
This method is invoked when you run the
:ref:`info command <actor_info_command>` from another actor.
'''
if not self.started():
return
isp = self.is_process()
actor = {'name': self.name,
'state': self.info_state,
'actor_id': self.aid,
'uptime': self._loop.time() - self._started,
'thread_id': self.tid,
'process_id': self.pid,
'is_process': isp,
'age': self.impl.age}
events = {'callbacks': len(self._loop._ready),
'scheduled': len(self._loop._scheduled)}
data = {'actor': actor,
'events': events,
'extra': self.extra}
if isp:
data['system'] = system.process_info(self.pid)
self.fire_event('on_info', info=data)
return data
def _run(self, initial=True):
exc = None
if initial:
try:
self.cfg.when_ready(self)
except Exception: # pragma nocover
self.logger.exception('Unhandled exception in when_ready hook')
try:
exc = self.__impl.run_actor(self)
except MonitorStarted:
return
except (Exception, HaltServer) as exc:
return self.stop(exc)
except BaseException:
pass
self.stop()
def _remove_actor(self, actor, log=True):
return self.__impl._remove_actor(self, actor, log=log)
| |
# coding: utf-8
from flask import render_template, redirect, request, url_for, flash, \
jsonify
from flask_login import login_required, current_user
from . import project
from .. import db, flash_errors
from ..models import User, Department, Software, Idc, Project, ProjectSchema, Module, ModuleSchema, \
Environment, EnvironmentSchema
from .forms import AddProjectForm, EditProjectForm, AddModuleForm, \
EditModuleForm, AddEnvironmentForm, EditEnvironmentForm
project_schema = ProjectSchema()
projects_schema = ProjectSchema(many=True)
module_schema = ModuleSchema()
modules_schema = ModuleSchema(many=True)
environment_schema = EnvironmentSchema()
environments_schema = EnvironmentSchema(many=True)
@project.route('/')
@login_required
def project_main():
add_project_form = AddProjectForm()
edit_project_form = EditProjectForm()
return render_template('project/project.html', add_project_form=add_project_form,
edit_project_form=edit_project_form)
@project.route('/list')
@login_required
def project_list():
projects = Project.query.all()
if not projects:
return jsonify({})
else:
# Serialize the queryset
result = projects_schema.dump(projects)
return jsonify(result.data)
@project.route('/add', methods=['POST'])
@login_required
def project_add():
form = AddProjectForm(data=request.get_json())
if form.validate_on_submit():
project = Project(name=form.name.data,
department=Department.query.get(form.department.data),
pm=User.query.get(form.pm.data),
sla=form.sla.data,
check_point=form.check_point.data,
description=form.description.data)
db.session.add(project)
db.session.commit()
flash('project: ' + form.name.data + ' is add.')
else:
flash_errors(form)
return redirect(url_for('.project_main'))
@project.route('/edit', methods=['POST'])
@login_required
def project_edit():
id = request.form.get('e_id')
project = Project.query.get_or_404(id)
form = EditProjectForm(id=id)
if form.validate_on_submit():
project.name = form.e_name.data
project.department = Department.query.get(form.e_department.data)
project.pm = User.query.get(form.e_pm.data)
project.sla = form.e_sla.data
project.check_point = form.e_check_point.data
project.description = form.e_description.data
db.session.add(project)
flash('project: ' + request.form.get('e_name') + ' is update.')
else:
flash_errors(form)
return redirect(url_for('.project_main'))
@project.route('/del', methods=['POST'])
@login_required
def project_del():
id = request.form.get('id')
project = Project.query.filter_by(id=id).first()
if project is None:
flash('Non-existent project: ' + request.form.get('name'), 'error')
else:
db.session.delete(project)
db.session.commit()
flash('project: ' + request.form.get('name') + ' is del.')
return redirect(url_for('.project_main'))
@project.route('/module')
@login_required
def module_main():
add_module_form = AddModuleForm()
edit_module_form = EditModuleForm()
return render_template('project/module.html', add_module_form=add_module_form,
edit_module_form=edit_module_form)
@project.route('/module-list')
@login_required
def module_list():
if request.args.get('project'):
modules = Module.query.filter_by(project=Project.query.filter_by(name=request.args.get('project')).first()).all()
elif request.args.get('project_id'):
modules = Module.query.filter_by(project=Project.query.filter_by(id=request.args.get('project_id')).first())
else:
modules = Module.query.all()
if not modules:
return jsonify({})
else:
# Serialize the queryset
result = modules_schema.dump(modules)
return jsonify(result.data)
@project.route('/module-add', methods=['POST'])
@login_required
def module_add():
form = AddModuleForm(data=request.get_json())
if form.validate_on_submit():
module = Module(name=form.name.data,
project=Project.query.get(form.project.data),
svn=form.svn.data,
parent=Module.query.get(form.parent.data),
dev=User.query.get(form.dev.data),
qa=User.query.get(form.qa.data),
ops=User.query.get(form.ops.data),
software=Software.query.get(form.software.data),
description=form.description.data)
db.session.add(module)
db.session.commit()
flash('module: ' + form.name.data + 'is add.')
else:
flash_errors(form)
return redirect(url_for('.module_main'))
@project.route('/module-edit', methods=['POST'])
@login_required
def module_edit():
id = request.form.get('e_id')
module = Module.query.get_or_404(id)
form = EditModuleForm(id=id)
if form.validate_on_submit():
module.name = form.e_name.data
module.project = Project.query.get(form.e_project.data)
module.svn = form.e_svn.data
module.parent = Module.query.get(form.e_parent.data)
module.dev = User.query.get(form.e_dev.data)
module.qa = User.query.get(form.e_qa.data)
module.ops = User.query.get(form.e_ops.data)
module.software = Software.query.get(form.e_software.data)
module.description = form.e_description.data
db.session.add(module)
flash('module: ' + form.e_name.data + ' is update.')
else:
flash_errors(form)
return redirect(url_for('.module_main'))
@project.route('/module-del', methods=['POST'])
@login_required
def module_del():
id = request.form.get('id')
module = Module.query.filter_by(id=id).first()
if module is None:
flash('Non-existent module: ' + request.form.get('name'), 'error')
else:
db.session.delete(module)
db.session.commit()
flash('module: ' + request.form.get('name') + ' is del.')
return redirect(url_for('.module_main'))
@project.route('/environment')
@login_required
def environment_main():
add_environment_form = AddEnvironmentForm()
edit_environment_form = EditEnvironmentForm()
return render_template('project/environment.html', add_environment_form=add_environment_form,
edit_environment_form=edit_environment_form)
@project.route('/environment-list')
@login_required
def environment_list():
if request.args.get('module'):
environments = Environment.query.filter_by(module=Module.query.filter_by(name=request.args.get('module')).first()).all()
else:
environments = Environment.query.all()
if not environments:
return jsonify({})
else:
# Serialize the queryset
result = environments_schema.dump(environments)
return jsonify(result.data)
@project.route('/environment-add', methods=['POST'])
@login_required
def environment_add():
form = AddEnvironmentForm(data=request.get_json())
if form.validate_on_submit():
environment = Environment(
module=Module.query.get(form.module.data),
idc=Idc.query.get(form.idc.data),
env=form.env.data,
check_point1=form.check_point1.data,
check_point2=form.check_point2.data,
check_point3=form.check_point3.data,
deploy_path=form.deploy_path.data,
server_ip=form.server_ip.data,
online_since=form.online_since.data,
domain=form.domain.data)
db.session.add(environment)
db.session.commit()
flash('environment: ' + Module.query.get(form.module.data).name + ": " + form.env.data + ' is add.')
else:
flash_errors(form)
return redirect(url_for('.environment_main'))
@project.route('/environment-edit', methods=['POST'])
@login_required
def environment_edit():
id = request.form.get('e_id')
environment = Environment.query.get_or_404(id)
form = EditEnvironmentForm(id=id)
if form.validate_on_submit():
environment.module = Module.query.get(form.e_module.data)
environment.idc = Idc.query.get(form.e_idc.data)
environment.env = form.e_env.data
environment.check_point1 = form.e_check_point1.data
environment.check_point2 = form.e_check_point2.data
environment.check_point3 = form.e_check_point3.data
environment.deploy_path = form.e_deploy_path.data
environment.server_ip = form.e_server_ip.data
environment.online_since = form.e_online_since.data
environment.domain = form.e_domain.data
db.session.add(environment)
flash('environment: ' + Module.query.get(form.e_module.data).name + ": " + form.e_env.data + ' is update.')
else:
flash_errors(form)
return redirect(url_for('.environment_main'))
@project.route('/environment-del', methods=['POST'])
@login_required
def environment_del():
id = request.form.get('id')
environment = Environment.query.filter_by(id=id).first()
if environment is None:
flash('Non-existent environment: ' + + request.form.get('module') + ": " + request.form.get('env'), 'error')
return redirect(url_for('.environment_main'))
db.session.delete(environment)
db.session.commit()
flash('environment: ' + request.form.get('module') + ": " + request.form.get('env') + ' is del.')
return redirect(url_for('.environment_main'))
| |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from django.contrib.auth import get_user_model
User = get_user_model()
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
Redirect = orm['redirects.Redirect']
current_redirects = set(
Redirect.objects.values_list(
'old_path', flat=True
)
)
for container in orm.Container.objects.all():
old_path = u'/{0}/{1}'.format(
container.channel_long_slug,
container.slug
)
if old_path not in current_redirects:
Redirect.objects.create(
old_path=old_path,
new_path=u'{0}.html'.format(old_path),
site_id=container.site_id
)
current_redirects.add(old_path)
old_path2 = u'{0}/'.format(old_path)
if old_path2 not in current_redirects:
Redirect.objects.create(
old_path=old_path2,
new_path=u'{0}.html'.format(old_path),
site_id=container.site_id
)
current_redirects.add(old_path2)
def backwards(self, orm):
raise RuntimeError("Cannot reverse this migration.")
models = {
u'%s.%s' % (User._meta.app_label, User._meta.module_name): {
'Meta': {'object_name': User.__name__},
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'boxes.queryset': {
'Meta': {'object_name': 'QuerySet'},
'channel': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['channels.Channel']", 'null': 'True', 'blank': 'True'}),
'date_available': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'date_insert': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'filters': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'limit': ('django.db.models.fields.PositiveIntegerField', [], {'default': '7'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'order': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': u"orm['sites.Site']"}),
'site_domain': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'site_iid': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'max_length': '4', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '150'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['%s.%s']" % (User._meta.app_label, User._meta.object_name)})
},
u'channels.channel': {
'Meta': {'ordering': "['name', 'parent__id', 'published']", 'unique_together': "(('site', 'long_slug', 'slug', 'parent'),)", 'object_name': 'Channel'},
'date_available': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'date_insert': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'homepage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'include_in_main_rss': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'layout': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '250', 'db_index': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'long_slug': ('django.db.models.fields.SlugField', [], {'max_length': '250'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'subchannel'", 'null': 'True', 'to': u"orm['channels.Channel']"}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'show_in_menu': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': u"orm['sites.Site']"}),
'site_domain': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'site_iid': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'max_length': '4', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '150'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['%s.%s']" % (User._meta.app_label, User._meta.object_name)})
},
u'containers.container': {
'Meta': {'ordering': "['-date_available']", 'unique_together': "(('site', 'channel_long_slug', 'slug'),)", 'object_name': 'Container'},
'channel': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['channels.Channel']"}),
'channel_long_slug': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '250', 'null': 'True', 'blank': 'True'}),
'channel_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '140', 'null': 'True', 'blank': 'True'}),
'child_app_label': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '30', 'null': 'True', 'blank': 'True'}),
'child_class': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '30', 'null': 'True', 'blank': 'True'}),
'child_module': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '120', 'null': 'True', 'blank': 'True'}),
'date_available': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'date_insert': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'hat': ('django.db.models.fields.CharField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['images.Image']", 'null': 'True', 'through': u"orm['containers.ContainerImage']", 'blank': 'True'}),
'main_image': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'containers_container_mainimage'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['images.Image']"}),
'main_image_caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'polymorphic_containers.container_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'show_on_root_channel': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': u"orm['sites.Site']"}),
'site_domain': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'site_iid': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'max_length': '4', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '150'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.CharField', [], {'max_length': '4000', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['%s.%s']" % (User._meta.app_label, User._meta.object_name)})
},
u'containers.containerbox': {
'Meta': {'object_name': 'ContainerBox'},
'channel': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['channels.Channel']"}),
'channel_long_slug': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '250', 'null': 'True', 'blank': 'True'}),
'channel_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '140', 'null': 'True', 'blank': 'True'}),
'containers': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'containerbox_containers'", 'to': u"orm['containers.Container']", 'through': u"orm['containers.ContainerBoxContainers']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'date_available': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'date_insert': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'queryset': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'containerbox_querysets'", 'null': 'True', 'to': u"orm['boxes.QuerySet']"}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': u"orm['sites.Site']"}),
'site_domain': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'site_iid': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'max_length': '4', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '150'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['%s.%s']" % (User._meta.app_label, User._meta.object_name)})
},
u'containers.containerboxcontainers': {
'Meta': {'ordering': "('order', 'aggregate')", 'object_name': 'ContainerBoxContainers'},
'aggregate': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'container': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['containers.Container']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'containerbox': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['containers.ContainerBox']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'date_available': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'date_end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'hat': ('django.db.models.fields.CharField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'main_image': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['images.Image']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'main_image_caption': ('django.db.models.fields.CharField', [], {'max_length': '4000', 'null': 'True', 'blank': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'containers.containerimage': {
'Meta': {'ordering': "('order',)", 'object_name': 'ContainerImage'},
'caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'container': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['containers.Container']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['images.Image']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'images.image': {
'Meta': {'object_name': 'Image'},
'archive': ('django.db.models.fields.files.FileField', [], {'max_length': '255'}),
'crop_example': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'crop_x1': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'crop_x2': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'crop_y1': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'crop_y2': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'date_available': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'date_insert': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'fit_in': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'flip': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'flop': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'halign': ('django.db.models.fields.CharField', [], {'default': 'False', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': u"orm['sites.Site']"}),
'site_domain': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'site_iid': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'max_length': '4', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '150'}),
'smart': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.CharField', [], {'max_length': '4000', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['%s.%s']" % (User._meta.app_label, User._meta.object_name)}),
'valign': ('django.db.models.fields.CharField', [], {'default': 'False', 'max_length': '6', 'null': 'True', 'blank': 'True'})
},
u'redirects.redirect': {
'Meta': {'ordering': "('old_path',)", 'unique_together': "(('site', 'old_path'),)", 'object_name': 'Redirect', 'db_table': "'django_redirect'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_path': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'old_path': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"})
},
u'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['containers']
symmetrical = True
| |
import mido
from threading import Thread, Lock
import time
import math
import re
import numpy
from random import randint
# Standard notes in an octave
keys_in_octave = ['C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B']
# For remapping certain note values to more standard note names:
note_remap = {
'Db': 'C#',
'Eb': 'D#',
'E#': 'F',
'Fb': 'E',
'Gb': 'F#',
'Ab': 'G#',
'Bb': 'A#',
'B#': 'C',
'Cb': 'B'
}
# Relative positions of notes in major/minor scales (e.g. any major scale goes
# base note, whole step, whole step, half step, whole step, whole step, whole
# step, half step)
major_scale_progression = [0, 2, 4, 5, 7, 9, 11, 12]
minor_scale_progression = [0, 2, 3, 5, 7, 8, 10, 12]
# Valid chords that can be transitioned to from some given diatonic triad.
# Taken from http://www.angelfire.com/music/HarpOn/theory2.html
transitions_major = {
'I': ['I', 'ii', 'iii', 'IV', 'V', 'vi', 'vii0'],
'ii': ['V', 'vii0'],
'iii': ['IV', 'vi'],
'IV': ['I', 'ii', 'V', 'vii0'],
'V': ['I', 'vi'],
'vi': ['ii', 'IV', 'V'],
'vii0': ['I']
}
transitions_minor = {
'i': ['i', 'ii0', 'III', 'iv', 'V', 'VI', 'VII', 'vii0'],
'ii0': ['V', 'vii0'],
'III': ['iv', 'VI'],
'iv': ['i', 'ii0', 'V', 'vii0'],
'V': ['i', 'VI'],
'VI': ['ii0', 'iv', 'V'],
'VII': ['III'],
'vii0': ['i', 'V']
}
# Produces the MIDI numbers (in relation to the base note) for a minor
# diatonic triad given the major chord symbol (e.g. major_chord_to_minor('I')
# gives the minor sequence [0, 3, 7])
def major_chord_to_minor(symbol):
minor = list(triad_notes[symbol])
minor[1] -= 1
return minor
# Defines the MIDI numbers (in relation to the base note) for diatonic triads.
# These can be combined with the base note to produce chords with MIDI. For
# example, the I chord with base note middle C (60) could be produced with the
# MIDI note numbers [60+0, 60+4, 60+7]
triad_notes = {
'I': [0, 4, 7],
'II': [2, 6, 9],
'III': [4, 8, 11],
'IV': [5, 9, 12],
'V': [7, 11, 14],
'VI': [9, 13, 16],
'VII': [11, 15, 18],
'ii0': [1, 4, 7, 9],
'vii0': [0, 4, 7, 10]
}
triad_notes['i'] = major_chord_to_minor('I')
triad_notes['ii'] = major_chord_to_minor('II')
triad_notes['iii'] = major_chord_to_minor('III')
triad_notes['iv'] = major_chord_to_minor('IV')
triad_notes['v'] = major_chord_to_minor('V')
triad_notes['vi'] = major_chord_to_minor('VI')
triad_notes['vii'] = major_chord_to_minor('VII')
# Open MIDI virtual output device
output = mido.open_output()
# Calling mido is not thread-safe. We segfault if we're trying to play a bunch
# of notes using different threads
mido_mutex = Lock()
# We play each note in its own thread. This is inefficient, but it lets us
# deal with note lengths much more easily. Instead of trying to track which
# notes are playing and need to be stopped when, we just start a thread that
# will send the note_off message at the appropriate time. Not the ideal
# solution but it's the easiest to implement
def _play(note, vel, duration, channel=1):
mido_mutex.acquire()
try:
msg = mido.Message('note_on', note=note, velocity=vel, channel=channel)
msg.channel = channel
output.send(msg)
finally:
mido_mutex.release()
time.sleep(duration)
mido_mutex.acquire()
try:
msg = mido.Message('note_off', note=note, velocity=vel, channel=channel)
msg.channel = channel
output.send(msg)
finally:
mido_mutex.release()
def play(note, vel, duration, channel=1):
"""
Plays a note (specified as an integer, e.g. 60 for middle C) at a given
velocity for a given duration (in seconds) over a given MIDI channel.
"""
Thread(target=_play, args=(note, vel, duration, channel)).start()
# major_scales = ['Cb', 'Gb', 'Db', 'Ab', 'Eb', 'Bb', 'F', 'C', 'G', 'D', 'A', 'E', 'B', 'F#', 'C#']
# minor_scales = ['Ab', 'Eb', 'Bb', 'F', 'C', 'G', 'D', 'A', 'E', 'B', 'F#', 'C#', 'G#', 'D#', 'A#']
# def minor_to_major(scale):
# return major_scales[minor_scales.index(scale)]
def note_number(note):
"""
Returns the MIDI note number for a given note name (e.g. "C#4")
"""
try:
note, octave = re.match(r'^([A-Z][b#]?)(\d+)$', note[0].upper()+note[1:]).groups()
except AttributeError:
raise Exception('Bad note input to note_number %r' % note)
if note in note_remap.keys():
note = note_remap[note]
position_in_octave = keys_in_octave.index(note)
return (int(octave)+2)*12 + position_in_octave
def generate_scale(name, octave, major=True):
"""
Generates a sequence of MIDI note numbers for a scale (do re mi fa sol la
si do). `name` specifies the base note, `octave` specifies in which octave
the scale should be, and `major` designates whether the produced scale
should be major or minor.
"""
scale = major_scale_progression if major else minor_scale_progression
base_note = note_number(name+str(octave))
return [ base_note + x for x in scale ]
def get_chord(symbol, base_note):
"""
Generates a list of MIDI note numbers representing a diatonic triad chord
given the roman numeral symbol and a base note (e.g. get_chord('I', 60)
will return [60, 64, 67] which is the first diatonic triad in C major
starting at middle C).
"""
return [triad_notes[symbol][i] + base_note for i in range(len(triad_notes[symbol]))]
def play_progression(*args):
"""
Plays a chord progression in C major given a series of diatonic triads in
roman numeral form (e.g. play_progression('iii', 'vi', 'ii', 'V'))
"""
for x in args:
for note in get_chord(x, note_number('C5')):
play(note, 80, 1)
time.sleep(1)
def pick_next_chord(seed, transitions):
"""
Given a current chord and a graph with valid transitions (see
transitions_major or transitions_minor), picks the next chord in a
progression
"""
return transitions[seed][randint(0, len(transitions[seed])-1)]
def generate_progression(bars, major=True, seed=None):
"""
Generates an n-bar chord progression. Returns a list of roman numerals
representing the chords (for any key). `major` specifies whether a major
key is being used. If `seed` is provided, the progression will be
generated such that `seed` transitions to the first chord of the
progression; otherwise, a random first chord will be chosen.
"""
transitions = transitions_major if major else transitions_minor
progression = [ pick_next_chord(seed, transitions) if seed else transitions.keys()[randint(0, len(transitions.keys())-1)] ]
for _ in range(bars - 1):
progression.append(pick_next_chord(progression[-1], transitions))
return progression
def generate_melody(key, progression, progression_repeats, major=True):
"""
Generates a {len(progression)*progression_repeats}-bar melody given a key,
chord progression, and whether or not the key is a major key.
"""
out = []
for _ in range(progression_repeats):
time_used = 0.0 # Number of measures that have been generated so far
for i, chord in enumerate(progression):
all_tones = generate_scale(key, 2, major)
chord_tones = get_chord(chord, note_number(key+'2'))
chord_tones.extend([x+12 for x in chord_tones])
non_chord_tones = list(set(all_tones[:-1]) - set(chord_tones))
non_chord_tones.extend([x+12 for x in non_chord_tones])
last_played = None
# Generate a sequence of notes to fill a measure for this chord
while time_used < i + 1:
note_vals = [(0.125, 2), (0.25, 4), (0.375, 2), (0.5, 2), (0.75, 1), (1.0, 1), (1.25, 0.5), (1.5, 0.25)]
# Only allow note lengths that will fit into the len(progression)
# measures for this chord progression (i.e. don't allow spill
# of notes into different repetitions of the progression)
possible_note_vals = [x for x, p in note_vals if time_used + x <= len(progression)]
note_vals_prob = [p for x, p in note_vals if time_used + x <= len(progression)]
note_vals_prob = [x*1.0/sum(note_vals_prob) for x in note_vals_prob]
# Choose a note length
note_val = numpy.random.choice(possible_note_vals, p=note_vals_prob)
# Choose the set of note numbers we could pick from (either
# the chord tones or non-chord tones)
select_from = non_chord_tones if int(randint(0, 10)/10.0) else chord_tones
# Incentivize choosing notes that are close to the previously
# played note so that we aren't just jumping all over the
# place and sounding terribly random
NEARBY_INCENTIVE = 2.0
select_from_probabilities = [ (36 - int(math.fabs(last_played - x)))**NEARBY_INCENTIVE if last_played else 1 for x in select_from ]
select_from_probabilities = [ x * 1.0 / sum(select_from_probabilities) for x in select_from_probabilities ]
out.append((numpy.random.choice(select_from, p=select_from_probabilities), 80, note_val))
last_played = out[-1][0]
out.extend([None for x in range(int(note_val/0.125)-1)])
time_used += note_val
assert len(out) == 8 * len(progression) * progression_repeats
return out
if __name__ == '__main__':
# Choose tempo, key, and whether or not we have a swing feel
tempo = randint(100, 200)
print "Tempo: {}bpm".format(tempo)
seconds_per_beat = 60.0/tempo
# major = bool(randint(0, 1))
major = True
key = keys_in_octave[randint(0, len(keys_in_octave)-1)]
print "Key: {} major".format(key)
swing = bool(randint(0, 1))
print "Swing feel: {}".format(swing)
verse_progression = generate_progression(4, major=major)
print "Verse progression: {}".format(verse_progression)
verse_chords = []
for _ in range(4):
for chord in verse_progression:
verse_chords.append((get_chord(chord, note_number(key+'2')), 80, 1.0))
verse_chords.extend([None for x in range(7)])
verse_rhythm_chords = []
for _ in range(4):
for chord in verse_progression:
verse_rhythm_chords.extend([(get_chord(chord, note_number(key+'2')), 80, 0.125),
(get_chord(chord, note_number(key+'2')), 60, 0.125)]*4)
verse_arp = []
for _ in range(4):
for chord in verse_progression:
notes_ = get_chord(chord, note_number(key+'2'))
for note in (notes_ + [notes_[1]] if len(notes_) == 3 else notes_):
verse_arp.append((note, 80, 0.25))
verse_arp.append(None)
# Prevent ghost notes from getting too messy at high tempos
# ghost_note_penalty = int((tempo/80.0)**2)
ghost_note_penalty = 1
snare = [ randint(20, 50) if int(randint(0, ghost_note_penalty)*1.0/ghost_note_penalty) else 0 for _ in range(8) ]
snare[2] = 80
snare[6] = 80
bass = [ randint(30, 80) if int(randint(0, 2*ghost_note_penalty)/(2.0*ghost_note_penalty)) and i not in (2, 6) else 0 for i in range(8) ]
verse_snare = [ (note_number('G1'), x, 0.03) if x else None for x in snare ] * 16
verse_bass = [ (note_number('C1'), x, 0.03) if x else None for x in bass ] * 16
verse_hihat = [(note_number('C#2'), 70, 0.03), (note_number('C#2'), 40, 0.03)]*64 if randint(0, 1) else [None for x in range(128)]
melody_repeats = 2*int(randint(3, 6)/3.0) # Either 2 or 4
verse_melody = generate_melody(key, verse_progression, melody_repeats, major)
if melody_repeats == 2:
verse_melody = verse_melody * 2
chorus_progression = generate_progression(4, major=major, seed=verse_progression[-1])
print "Chorus progression: {}".format(chorus_progression)
chorus_chords = []
for _ in range(4):
for chord in chorus_progression:
chorus_chords.append((get_chord(chord, note_number(key+'2')), 80, 1.0))
chorus_chords.extend([None for x in range(7)])
chorus_arp = []
for _ in range(4):
for chord in chorus_progression:
notes_ = get_chord(chord, note_number(key+'2'))
for note in (notes_ + [notes_[1]] if len(notes_) == 3 else notes_):
chorus_arp.append((note, 80, 0.25))
chorus_arp.append(None)
snare = [ randint(20, 50) if int(randint(0, ghost_note_penalty)*1.0/ghost_note_penalty) else 0 for _ in range(8) ]
snare[2] = 80
snare[6] = 80
bass = [ randint(30, 80) if int(randint(0, 2*ghost_note_penalty)/(2.0*ghost_note_penalty)) and i not in (2, 6) else 0 for i in range(8) ]
chorus_snare = [ (note_number('G1'), x, 0.03) if x else None for x in snare ] * 16
chorus_bass = [ (note_number('C1'), x, 0.03) if x else None for x in bass ] * 16
chorus_hihat = [(note_number('C#2'), 70, 0.03), (note_number('C#2'), 40, 0.03)]*64 if randint(0, 1) else [None for x in range(128)]
melody_repeats = 2*int(randint(3, 6)/3.0)
chorus_melody = generate_melody(key, verse_progression, melody_repeats, major)
if melody_repeats == 2:
chorus_melody = chorus_melody * 2
instruments = [(4, 0), (5, 0), (6, 0), (7, 0), (8, 0)]
# Only use violin/cello for slower tempos (the sample packs are slow to respond)
if tempo <= 140:
instruments.extend([(9, -12), (10, 12)])
bass_arp_instrument = [3, 11][randint(0, 1)]
drum_instrument = [2, 12, 13, 14][randint(0, 3)]
verse_instrument = instruments[randint(0, len(instruments)-1)]
chorus_instrument = instruments[randint(0, len(instruments)-1)]
print 'starting playback'
# Play verse
for i in range(128):
for note in verse_chords[i][0] if verse_chords[i] else []:
play(note, verse_chords[i][1], verse_chords[i][2]*seconds_per_beat*4, channel=0)
for note in verse_rhythm_chords[i][0] if verse_rhythm_chords[i] else []:
play(note, verse_rhythm_chords[i][1], ((0.167 if i%2 == 0 else 0.083) if swing else 0.125)*seconds_per_beat*4, channel=1)
for event in [(verse_arp[i], 1),
((verse_arp[i][0]-12, verse_arp[i][1], verse_arp[i][2]) if verse_arp[i] else None, bass_arp_instrument),
(verse_snare[i], drum_instrument),
(verse_bass[i], drum_instrument),
(verse_hihat[i], drum_instrument),
((verse_melody[i][0]+verse_instrument[1], verse_melody[i][1], verse_melody[i][2]) if verse_melody[i] else None, verse_instrument[0])]:
if event[0]:
if swing and event[0][2] == 0.125:
duration = 0.167 if i%2 == 0 else 0.083
else:
duration = event[0][2]
play(event[0][0], event[0][1], duration*seconds_per_beat*4, channel=event[1])
time.sleep(((0.667 if i%2==0 else 0.333) if swing else 0.5)*seconds_per_beat)
# Play chorus
for i in range(128):
for note in chorus_chords[i][0] if chorus_chords[i] else []:
play(note, chorus_chords[i][1], chorus_chords[i][2]*seconds_per_beat*4, channel=0)
for event in [(chorus_arp[i], 1),
((chorus_arp[i][0]-12, chorus_arp[i][1], chorus_arp[i][2]) if chorus_arp[i] else None, bass_arp_instrument),
(chorus_snare[i], drum_instrument),
(chorus_bass[i], drum_instrument),
(chorus_hihat[i], drum_instrument),
((chorus_melody[i][0]+chorus_instrument[1], chorus_melody[i][1], chorus_melody[i][2]) if chorus_melody[i] else None, chorus_instrument[0])]:
if event[0]:
if swing and event[0][2] == 0.125:
duration = 0.167 if i%2 == 0 else 0.083
else:
duration = event[0][2]
play(event[0][0], event[0][1], duration*seconds_per_beat*4, channel=event[1])
time.sleep(((0.667 if i%2==0 else 0.333) if swing else 0.5)*seconds_per_beat)
# Play verse
for i in range(128):
for note in verse_chords[i][0] if verse_chords[i] else []:
play(note, verse_chords[i][1], verse_chords[i][2]*seconds_per_beat*4, channel=0)
for event in [(verse_arp[i], 1),
((verse_arp[i][0]-12, verse_arp[i][1], verse_arp[i][2]) if verse_arp[i] else None, bass_arp_instrument),
(verse_snare[i], drum_instrument),
(verse_bass[i], drum_instrument),
(verse_hihat[i], drum_instrument),
((verse_melody[i][0]+verse_instrument[1], verse_melody[i][1], verse_melody[i][2]) if verse_melody[i] else None, verse_instrument[0])]:
if event[0]:
if swing and event[0][2] == 0.125:
duration = 0.167 if i%2 == 0 else 0.083
else:
duration = event[0][2]
play(event[0][0], event[0][1], duration*seconds_per_beat*4, channel=event[1])
time.sleep(((0.667 if i%2==0 else 0.333) if swing else 0.5)*seconds_per_beat)
# End the song playing the first chord of the verse
for note in verse_chords[0][0]:
play(note, verse_chords[0][1], verse_chords[0][2]*seconds_per_beat*4, channel=0)
for event in [(verse_arp[0], 1),
((verse_arp[0][0]-12, verse_arp[0][1], verse_arp[0][2]) if verse_arp[0] else None, bass_arp_instrument),
(verse_snare[0], drum_instrument),
(verse_bass[0], drum_instrument),
(verse_hihat[0], drum_instrument),
((note_number('D3'), 90, 0.1), drum_instrument),
((verse_arp[0][0]+verse_instrument[1], verse_melody[0][1], verse_melody[0][2]) if verse_melody[0] else None, verse_instrument[0])]:
if event[0]:
play(event[0][0], event[0][1], seconds_per_beat*4, channel=event[1])
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions called by the generated code to execute an eager-mode op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from google.protobuf import text_format
from tensorflow.core.framework import tensor_pb2
from tensorflow.python import pywrap_tfe
from tensorflow.python.eager import core
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.util import compat
def quick_execute(op_name, num_outputs, inputs, attrs, ctx, name=None):
"""Execute a TensorFlow operation.
Args:
op_name: Name of the TensorFlow operation (see REGISTER_OP in C++ code) to
execute.
num_outputs: The number of outputs of the operation to fetch.
(Explicitly provided instead of being inferred for performance
reasons).
inputs: A list of inputs to the operation. Each entry should be a Tensor, or
a value which can be passed to the Tensor constructor to create one.
attrs: A tuple with alternating string attr names and attr values for this
operation.
ctx: The value of context.context().
name: Customized name for the operation.
Returns:
List of output Tensor objects. The list is empty if there are no outputs
Raises:
An exception on error.
"""
device_name = ctx.device_name
# pylint: disable=protected-access
try:
ctx.ensure_initialized()
tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
inputs, attrs, num_outputs)
except core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
six.raise_from(core._status_to_exception(e.code, message), None)
except TypeError as e:
keras_symbolic_tensors = [
x for x in inputs if ops._is_keras_symbolic_tensor(x)
]
if keras_symbolic_tensors:
raise core._SymbolicException(
"Inputs to eager execution function cannot be Keras symbolic "
"tensors, but found {}".format(keras_symbolic_tensors))
raise e
# pylint: enable=protected-access
return tensors
def execute_with_cancellation(op_name,
num_outputs,
inputs,
attrs,
ctx,
cancellation_manager,
name=None):
"""Execute a TensorFlow operation.
Args:
op_name: Name of the TensorFlow operation (see REGISTER_OP in C++ code) to
execute.
num_outputs: The number of outputs of the operation to fetch. (Explicitly
provided instead of being inferred for performance reasons).
inputs: A list of inputs to the operation. Each entry should be a Tensor, or
a value which can be passed to the Tensor constructor to create one.
attrs: A tuple with alternating string attr names and attr values for this
operation.
ctx: The value of context.context().
cancellation_manager: a `CancellationManager` object that can be used to
cancel the operation.
name: Customized name for the operation.
Returns:
List of output Tensor objects. The list is empty if there are no outputs
Raises:
An exception on error.
"""
device_name = ctx.device_name
# pylint: disable=protected-access
try:
ctx.ensure_initialized()
tensors = pywrap_tfe.TFE_Py_ExecuteCancelable(ctx._handle, device_name,
op_name, inputs, attrs,
cancellation_manager._impl,
num_outputs)
except core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
six.raise_from(core._status_to_exception(e.code, message), None)
except TypeError as e:
keras_symbolic_tensors = [
x for x in inputs if ops._is_keras_symbolic_tensor(x)
]
if keras_symbolic_tensors:
raise core._SymbolicException(
"Inputs to eager execution function cannot be Keras symbolic "
"tensors, but found {}".format(keras_symbolic_tensors))
raise e
# pylint: enable=protected-access
return tensors
def execute_with_callbacks(op_name, num_outputs, inputs, attrs, ctx, name=None):
"""Monkey-patch to execute to enable execution callbacks."""
tensors = quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
for callback in ctx.op_callbacks:
callback(op_name, tuple(inputs), attrs, tensors, name)
return tensors
execute = quick_execute
def must_record_gradient():
"""Import backprop if you want gradients recorded."""
return False
def record_gradient(unused_op_name, unused_inputs, unused_attrs,
unused_outputs):
"""Import backprop if you want gradients recorded."""
pass
def make_float(v, arg_name):
if not isinstance(v, compat.real_types):
raise TypeError("Expected float for argument '%s' not %s." %
(arg_name, repr(v)))
return float(v)
def make_int(v, arg_name):
if isinstance(v, six.string_types):
raise TypeError("Expected int for argument '%s' not %s." %
(arg_name, repr(v)))
try:
return int(v)
except (ValueError, TypeError):
raise TypeError("Expected int for argument '%s' not %s." %
(arg_name, repr(v)))
def make_str(v, arg_name):
if not isinstance(v, compat.bytes_or_text_types):
raise TypeError("Expected string for argument '%s' not %s." %
(arg_name, repr(v)))
return compat.as_bytes(v) # Convert unicode strings to bytes.
def make_bool(v, arg_name):
if not isinstance(v, bool):
raise TypeError("Expected bool for argument '%s' not %s." %
(arg_name, repr(v)))
return v
def make_type(v, arg_name):
try:
v = dtypes.as_dtype(v).base_dtype
except TypeError:
raise TypeError("Expected DataType for argument '%s' not %s." %
(arg_name, repr(v)))
i = v.as_datatype_enum
return i
def make_shape(v, arg_name):
"""Convert v into a list."""
# Args:
# v: A TensorShapeProto, a list of ints, or a tensor_shape.TensorShape.
# arg_name: String, for error messages.
# Returns:
# None if the rank is unknown, otherwise a list of ints (or Nones in the
# position where the dimension is unknown).
try:
shape = tensor_shape.as_shape(v)
except TypeError as e:
raise TypeError("Error converting %s to a TensorShape: %s." % (arg_name, e))
except ValueError as e:
raise ValueError("Error converting %s to a TensorShape: %s." % (arg_name,
e))
if shape.ndims is None:
return None
else:
return shape.as_list()
def make_tensor(v, arg_name):
"""Ensure v is a TensorProto."""
if isinstance(v, tensor_pb2.TensorProto):
return v
elif isinstance(v, six.string_types):
pb = tensor_pb2.TensorProto()
text_format.Merge(v, pb)
return pb
raise TypeError(
"Don't know how to convert %s to a TensorProto for argument '%s'." %
(repr(v), arg_name))
def args_to_matching_eager(l, ctx, allowed_dtypes, default_dtype=None):
"""Convert sequence `l` to eager same-type Tensors."""
if (not l) and (default_dtype is not None):
return default_dtype, [] # List is empty; assume default dtype.
EagerTensor = ops.EagerTensor # pylint: disable=invalid-name
for x in l:
if not isinstance(x, EagerTensor):
break
else: # note: intentional for-else
return l[0]._datatype_enum(), l # pylint: disable=protected-access
# Is some input already a Tensor with a dtype?
dtype = None
for t in l:
if isinstance(t, EagerTensor):
dtype = t.dtype
break
if dtype is None:
# Infer a dtype based on the first value, and use that dtype for the
# remaining values.
ret = []
for t in l:
tensor = None
# First see if we can get a valid dtype with the default conversion
# and see if it matches an allowed dtypes. Some ops like ConcatV2 may
# not list allowed dtypes, in which case we should skip this.
if dtype is None and allowed_dtypes:
tensor = ops.convert_to_tensor(t, ctx=ctx)
# If we did not match an allowed dtype, try again with the default
# dtype. This could be because we have an empty tensor and thus we
# picked the wrong type.
if tensor.dtype not in allowed_dtypes:
tensor = None
if tensor is None:
tensor = ops.convert_to_tensor(
t, dtype, preferred_dtype=default_dtype, ctx=ctx)
ret.append(tensor)
if dtype is None:
dtype = tensor.dtype
else:
ret = [ops.convert_to_tensor(t, dtype, ctx=ctx) for t in l]
# TODO(slebedev): consider removing this as it leaks a Keras concept.
# pylint: disable=protected-access
keras_symbolic_tensors = [x for x in ret if
ops._is_keras_symbolic_tensor(x)]
if keras_symbolic_tensors:
raise core._SymbolicException(
"Using symbolic output of a Keras layer during eager execution "
"{}".format(keras_symbolic_tensors))
# pylint: enable=protected-access
return dtype.as_datatype_enum, ret
def convert_to_mixed_eager_tensors(values, ctx):
v = [ops.convert_to_tensor(t, ctx=ctx) for t in values]
types = [t._datatype_enum() for t in v] # pylint: disable=protected-access
return types, v
def args_to_mixed_eager_tensors(lists, ctx):
"""Converts a list of same-length lists of values to eager tensors."""
assert len(lists) > 1
# Generate an error if len(lists[i]) is not the same for all i.
lists_ret = []
for l in lists[1:]:
if len(l) != len(lists[0]):
raise ValueError(
"Expected list arguments to be the same length: %d != %d (%r vs. %r)."
% (len(lists[0]), len(l), lists[0], l))
lists_ret.append([])
# Convert the first element of each list first, then the second element, etc.
types = []
for i in range(len(lists[0])):
dtype = None
# If any list has a Tensor, use that dtype
for l in lists:
if isinstance(l[i], ops.EagerTensor):
dtype = l[i].dtype
break
if dtype is None:
# Convert the first one and use its dtype.
lists_ret[0].append(ops.convert_to_tensor(lists[0][i], ctx=ctx))
dtype = lists_ret[0][i].dtype
for j in range(1, len(lists)):
lists_ret[j].append(
ops.convert_to_tensor(lists[j][i], dtype=dtype, ctx=ctx))
else:
# Convert everything to the found dtype.
for j in range(len(lists)):
lists_ret[j].append(
ops.convert_to_tensor(lists[j][i], dtype=dtype, ctx=ctx))
types.append(dtype.as_datatype_enum)
return types, lists_ret
| |
"""
==========================
Display images for MEG
==========================
"""
import numpy as np
from expyfun import visual, ExperimentController
from expyfun.io import write_hdf5
import time
from PIL import Image
import os
from os import path as op
import glob
# background color
testing = False
test_trig = [15]
if testing:
bgcolor = [0., 0., 0., 1.]
else:
bgcolor = [127/255., 127/255., 127/255., 1]
# Paths to images
#basedir = '/home/jyeatman/projects/MEG/images/'
basedir = os.path.join('C:\\Users\\neuromag\\Desktop\\jason\\wordStim')
if not os.path.isdir(basedir):
basedir = os.path.join('/mnt/diskArray/projects/MEG/wordStim')
""" Words, False fonts (Korean), Faces, Objects """
imagedirs = ['falsefont', 'word_c254_p0']
stim_time = 40 # 40 s seq
base_rate = 6 # 6 reps per second
odd_rate = 1.2
n_images = stim_time * base_rate
init_time = 2
end_time = 2 # These are padding in the beginning and end
pad_time = init_time + end_time
#nimages = [30, 30, 30, 30] # number of images in each category
#if len(nimages) == 1: # Does nothing....
# nimages = np.repeat(nimages, len(imagedirs))
#n_totalimages = sum(nimages)
# ISIs to be used. Must divide evenly into nimages
#isis = np.arange(1., 1.51, 0.1) #np.arange(.62, .84, .02)
#imduration = 0.8 # Image duration 800 ms
s = .5 # Image scale
# Create a vector of ISIs in a random order. One ISI for each image
rng = np.random.RandomState(int(time.time()))
#ISI = np.tile(isis, int(np.ceil(n_totalimages/len(isis)))+1)
#rng.shuffle(ISI)
#ISI = ISI[:(n_totalimages)]
total_time = stim_time + pad_time
n_flickers = int(total_time*2)+1 # every 500 ms
n_target = int(0.2*n_flickers)
fix_seq = np.zeros(n_flickers)
fix_seq[:n_target] = 1
rng.shuffle(fix_seq)
for i in range(0,len(fix_seq)-2):
if (fix_seq[i] + fix_seq[i+1]) == 2:
fix_seq[i+1] = 0
if fix_seq[i+3] == 0:
fix_seq[i+3] = 1
elif fix_seq[i+4] == 0:
fix_seq[i+4] = 1
# Creat a vector of dot colors for each ISI
c = ['g', 'b', 'y', 'c']
k = 0
m = 0
fix_color = []
for i in range(0,len(fix_seq)):
if fix_seq[i] == 1:
fix_color.append('r')
else:
fix_color.append(c[k])
k += 1
k = np.mod(k,4)
if k == 0:
rng.shuffle(c)
while fix_color[i] == c[0]:
rng.shuffle(c)
# Build the path structure to each image in each image directory. Each image
# category is an entry into the list. The categories are in sequential order
# matching imorder, but the images within each category are random
# Temporary variable with image names in order
tmp = sorted(glob.glob(os.path.join(basedir, imagedirs[0], '*')))
# Randomly grab nimages from the list
rng.shuffle(tmp)
baselist = tmp
tmp = sorted(glob.glob(os.path.join(basedir, imagedirs[1], '*')))
rng.shuffle(tmp)
oddlist = tmp
templist = []
k = 0
temptype = []
for i in np.arange(0,len(baselist)): # Every 6 images is odd image
if not np.mod(i+1,5):
templist.append(oddlist[k])
k += 1
temptype.append('Oddball')
else:
templist.append(baselist[i])
temptype.append('Base')
templist = templist[:n_images]
temptype = temptype[:n_images]
paddlist = baselist[len(baselist)-pad_time*base_rate:]
paddtype = []
for i in np.arange(0,len(paddlist)/2):
paddtype.append('Base')
imagelist = np.concatenate((paddlist[:init_time*base_rate],templist,paddlist[len(paddlist)-init_time*base_rate:]))
imtype = np.concatenate((paddtype,temptype,paddtype))
# Start instance of the experiment controller
with ExperimentController('ShowImages', full_screen=True, version='dev') as ec:
#write_hdf5(op.splitext(ec.data_fname)[0] + '_trials.hdf5',
# dict(imorder_shuf=imorder_shuf,
# imtype_shuf=imtype_shuf))
fr = 1/ec.estimate_screen_fs() # Estimate frame rate
realRR = ec.estimate_screen_fs()
realRR = round(realRR)
adj = fr/2 # Adjustment factor for accurate flip
# Wait to fill the screen
ec.set_visible(False)
# Set the background color to gray
ec.set_background_color(bgcolor)
n_frames = round(total_time * realRR)
img_frames = int(round(realRR/base_rate))
frame_img = np.arange(0,n_frames,img_frames)
start_frame = (len(paddlist)/2-1)*img_frames
end_frame = start_frame + len(templist)*img_frames
# x = np.linspace(0,np.pi,img_frames)
# multi_factor = np.sin(x)
jitter = np.arange(0,realRR*0.2) # 0~200 ms jitter
temp_flicker = np.arange(0,n_frames,int(realRR/2)) # Get temp_flicker frames: every .5 s
delay = []
for i in np.arange(0,len(temp_flicker)):
rng.shuffle(jitter)
delay.append(jitter[0])
frame_flicker = temp_flicker + delay #
# load up the image stack. The images in img_buffer are in the sequential
# non-shuffled order
img = []
for im in imagelist:
img_buffer = np.array(Image.open(im), np.uint8) / 255.
if img_buffer.ndim == 2:
img_buffer = np.tile(img_buffer[:, :, np.newaxis], [1, 1, 3])
img.append(visual.RawImage(ec, img_buffer, scale=s))
ec.check_force_quit()
# make a blank image
blank = visual.RawImage(ec, np.tile(bgcolor[0], np.multiply([s, s, 1], img_buffer.shape)))
bright = visual.RawImage(ec, np.tile([1.], np.multiply([s, s, 1], img_buffer.shape)))
# Calculate stimulus size
d_pix = -np.diff(ec._convert_units([[3., 0.], [3., 0.]], 'deg', 'pix'), axis=-1)
# do the drawing, then flip
ec.set_visible(True)
frametimes = []
buttons = []
ec.listen_presses()
last_flip = -1
# Create a fixation dot
fix = visual.FixationDot(ec, colors=('k', 'k'))
fix.set_radius(4, 0, 'pix')
fix.draw()
# Display instruction (7 seconds).
# They will be different depending on the run number
if int(ec.session) % 2:
t = visual.Text(ec,text='Button press when the dot turns red - Ignore images',pos=[0,.1],font_size=40,color='k')
else:
t = visual.Text(ec,text='Button press for fake word',pos=[0,.1],font_size=40,color='k')
t.draw()
ec.flip()
ec.wait_secs(5.0)
# Show images
count = 0 # This is for testing...
# Initial blank
init_blanktime = 1.
fix.set_colors(colors=('k', 'k'))
blank.draw(), fix.draw()
ec.write_data_line('dotcolorFix', 'k')
last_flip = ec.flip()
# The iterable 'trial' randomizes the order of everything since it is
# drawn from imorder_shuf
trial = 0
frame = 0
flicker = 0
imageframe = []
t0 = time.time()
while frame < n_frames-1:
if frame == frame_flicker[flicker]:
fix.set_colors(colors=(fix_color[flicker],fix_color[flicker]))
ec.write_data_line('dotcolorFix', fix_color[flicker])
if flicker < len(frame_flicker)-2:
flicker += 1
if frame >= frame_img[trial] and frame < frame_img[trial] + int(img_frames/2):
if frame == frame_img[trial]:
ec.write_data_line('imtype', imtype[trial])
elif frame == start_frame:
ec.write_data_line('Start')
trig = 1
ec.stamp_triggers(trig, check='int4', wait_for_last = False)
elif frame == end_frame:
ec.write_data_line('End')
trig = 11
ec.stamp_triggers(trig, check='int4', wait_for_last = False)
fix.set_colors(colors=(fix_color[flicker],fix_color[flicker]))
img[trial].draw()
imageframe.append(frame)
if frame == frame_img[trial] + int(img_frames/2) - 1:
if trial < len(imtype)-2:
trial += 1
else:
blank.draw()
fix.draw()
last_flip = ec.flip()
frame += 1
ec.get_presses()
frametimes.append(last_flip)
ec.check_force_quit()
# Now the experiment is over and we show 5 seconds of blank
print "\n\n Elasped time: %0.2d mins %0.2d secs" % divmod(time.time()-t0, 60)
print "\n\n Targeted time: %0.2d mins %0.2d secs" % divmod(total_time, 60)
blank.draw(), fix.draw()
ec.flip()
ec.wait_secs(5.0)
pressed = ec.get_presses() # relative_to=0.0
| |
import unittest
from datetime import datetime
from django.test import SimpleTestCase, ignore_warnings
from django.utils.datastructures import MultiValueDict
from django.utils.deprecation import RemovedInDjango30Warning
from django.utils.http import (
base36_to_int, cookie_date, escape_leading_slashes, http_date,
int_to_base36, is_safe_url, is_same_domain, parse_etags, parse_http_date,
quote_etag, urlencode, urlquote, urlquote_plus, urlsafe_base64_decode,
urlsafe_base64_encode, urlunquote, urlunquote_plus,
)
class URLEncodeTests(unittest.TestCase):
def test_tuples(self):
self.assertEqual(urlencode((('a', 1), ('b', 2), ('c', 3))), 'a=1&b=2&c=3')
def test_dict(self):
result = urlencode({'a': 1, 'b': 2, 'c': 3})
# Dictionaries are treated as unordered.
self.assertIn(result, [
'a=1&b=2&c=3',
'a=1&c=3&b=2',
'b=2&a=1&c=3',
'b=2&c=3&a=1',
'c=3&a=1&b=2',
'c=3&b=2&a=1',
])
def test_dict_containing_sequence_not_doseq(self):
self.assertEqual(urlencode({'a': [1, 2]}, doseq=False), 'a=%5B%271%27%2C+%272%27%5D')
def test_dict_containing_sequence_doseq(self):
self.assertEqual(urlencode({'a': [1, 2]}, doseq=True), 'a=1&a=2')
def test_dict_containing_empty_sequence_doseq(self):
self.assertEqual(urlencode({'a': []}, doseq=True), '')
def test_multivaluedict(self):
result = urlencode(MultiValueDict({
'name': ['Adrian', 'Simon'],
'position': ['Developer'],
}), doseq=True)
# MultiValueDicts are similarly unordered.
self.assertIn(result, [
'name=Adrian&name=Simon&position=Developer',
'position=Developer&name=Adrian&name=Simon',
])
def test_dict_with_bytes_values(self):
self.assertEqual(urlencode({'a': b'abc'}, doseq=True), 'a=abc')
def test_dict_with_sequence_of_bytes(self):
self.assertEqual(urlencode({'a': [b'spam', b'eggs', b'bacon']}, doseq=True), 'a=spam&a=eggs&a=bacon')
def test_dict_with_bytearray(self):
self.assertEqual(urlencode({'a': bytearray(range(2))}, doseq=True), 'a=0&a=1')
self.assertEqual(urlencode({'a': bytearray(range(2))}, doseq=False), 'a=%5B%270%27%2C+%271%27%5D')
def test_generator(self):
def gen():
yield from range(2)
self.assertEqual(urlencode({'a': gen()}, doseq=True), 'a=0&a=1')
self.assertEqual(urlencode({'a': gen()}, doseq=False), 'a=%5B%270%27%2C+%271%27%5D')
class Base36IntTests(SimpleTestCase):
def test_roundtrip(self):
for n in [0, 1, 1000, 1000000]:
self.assertEqual(n, base36_to_int(int_to_base36(n)))
def test_negative_input(self):
with self.assertRaisesMessage(ValueError, 'Negative base36 conversion input.'):
int_to_base36(-1)
def test_to_base36_errors(self):
for n in ['1', 'foo', {1: 2}, (1, 2, 3), 3.141]:
with self.assertRaises(TypeError):
int_to_base36(n)
def test_invalid_literal(self):
for n in ['#', ' ']:
with self.assertRaisesMessage(ValueError, "invalid literal for int() with base 36: '%s'" % n):
base36_to_int(n)
def test_input_too_large(self):
with self.assertRaisesMessage(ValueError, 'Base36 input too large'):
base36_to_int('1' * 14)
def test_to_int_errors(self):
for n in [123, {1: 2}, (1, 2, 3), 3.141]:
with self.assertRaises(TypeError):
base36_to_int(n)
def test_values(self):
for n, b36 in [(0, '0'), (1, '1'), (42, '16'), (818469960, 'django')]:
self.assertEqual(int_to_base36(n), b36)
self.assertEqual(base36_to_int(b36), n)
class IsSafeURLTests(unittest.TestCase):
def test_bad_urls(self):
bad_urls = (
'http://example.com',
'http:///example.com',
'https://example.com',
'ftp://example.com',
r'\\example.com',
r'\\\example.com',
r'/\\/example.com',
r'\\\example.com',
r'\\example.com',
r'\\//example.com',
r'/\/example.com',
r'\/example.com',
r'/\example.com',
'http:///example.com',
r'http:/\//example.com',
r'http:\/example.com',
r'http:/\example.com',
'javascript:alert("XSS")',
'\njavascript:alert(x)',
'\x08//example.com',
r'http://otherserver\@example.com',
r'http:\\testserver\@example.com',
r'http://testserver\me:pass@example.com',
r'http://testserver\@example.com',
r'http:\\testserver\confirm\me@example.com',
'http:999999999',
'ftp:9999999999',
'\n',
'http://[2001:cdba:0000:0000:0000:0000:3257:9652/',
'http://2001:cdba:0000:0000:0000:0000:3257:9652]/',
)
for bad_url in bad_urls:
with self.subTest(url=bad_url):
self.assertIs(is_safe_url(bad_url, allowed_hosts={'testserver', 'testserver2'}), False)
def test_good_urls(self):
good_urls = (
'/view/?param=http://example.com',
'/view/?param=https://example.com',
'/view?param=ftp://example.com',
'view/?param=//example.com',
'https://testserver/',
'HTTPS://testserver/',
'//testserver/',
'http://testserver/confirm?email=me@example.com',
'/url%20with%20spaces/',
'path/http:2222222222',
)
for good_url in good_urls:
with self.subTest(url=good_url):
self.assertIs(is_safe_url(good_url, allowed_hosts={'otherserver', 'testserver'}), True)
def test_basic_auth(self):
# Valid basic auth credentials are allowed.
self.assertIs(is_safe_url(r'http://user:pass@testserver/', allowed_hosts={'user:pass@testserver'}), True)
def test_no_allowed_hosts(self):
# A path without host is allowed.
self.assertIs(is_safe_url('/confirm/me@example.com', allowed_hosts=None), True)
# Basic auth without host is not allowed.
self.assertIs(is_safe_url(r'http://testserver\@example.com', allowed_hosts=None), False)
def test_allowed_hosts_str(self):
self.assertIs(is_safe_url('http://good.com/good', allowed_hosts='good.com'), True)
self.assertIs(is_safe_url('http://good.co/evil', allowed_hosts='good.com'), False)
def test_secure_param_https_urls(self):
secure_urls = (
'https://example.com/p',
'HTTPS://example.com/p',
'/view/?param=http://example.com',
)
for url in secure_urls:
with self.subTest(url=url):
self.assertIs(is_safe_url(url, allowed_hosts={'example.com'}, require_https=True), True)
def test_secure_param_non_https_urls(self):
insecure_urls = (
'http://example.com/p',
'ftp://example.com/p',
'//example.com/p',
)
for url in insecure_urls:
with self.subTest(url=url):
self.assertIs(is_safe_url(url, allowed_hosts={'example.com'}, require_https=True), False)
class URLSafeBase64Tests(unittest.TestCase):
def test_roundtrip(self):
bytestring = b'foo'
encoded = urlsafe_base64_encode(bytestring)
decoded = urlsafe_base64_decode(encoded)
self.assertEqual(bytestring, decoded)
class URLQuoteTests(unittest.TestCase):
def test_quote(self):
self.assertEqual(urlquote('Paris & Orl\xe9ans'), 'Paris%20%26%20Orl%C3%A9ans')
self.assertEqual(urlquote('Paris & Orl\xe9ans', safe="&"), 'Paris%20&%20Orl%C3%A9ans')
def test_unquote(self):
self.assertEqual(urlunquote('Paris%20%26%20Orl%C3%A9ans'), 'Paris & Orl\xe9ans')
self.assertEqual(urlunquote('Paris%20&%20Orl%C3%A9ans'), 'Paris & Orl\xe9ans')
def test_quote_plus(self):
self.assertEqual(urlquote_plus('Paris & Orl\xe9ans'), 'Paris+%26+Orl%C3%A9ans')
self.assertEqual(urlquote_plus('Paris & Orl\xe9ans', safe="&"), 'Paris+&+Orl%C3%A9ans')
def test_unquote_plus(self):
self.assertEqual(urlunquote_plus('Paris+%26+Orl%C3%A9ans'), 'Paris & Orl\xe9ans')
self.assertEqual(urlunquote_plus('Paris+&+Orl%C3%A9ans'), 'Paris & Orl\xe9ans')
class IsSameDomainTests(unittest.TestCase):
def test_good(self):
for pair in (
('example.com', 'example.com'),
('example.com', '.example.com'),
('foo.example.com', '.example.com'),
('example.com:8888', 'example.com:8888'),
('example.com:8888', '.example.com:8888'),
('foo.example.com:8888', '.example.com:8888'),
):
self.assertIs(is_same_domain(*pair), True)
def test_bad(self):
for pair in (
('example2.com', 'example.com'),
('foo.example.com', 'example.com'),
('example.com:9999', 'example.com:8888'),
):
self.assertIs(is_same_domain(*pair), False)
class ETagProcessingTests(unittest.TestCase):
def test_parsing(self):
self.assertEqual(
parse_etags(r'"" , "etag", "e\\tag", W/"weak"'),
['""', '"etag"', r'"e\\tag"', 'W/"weak"']
)
self.assertEqual(parse_etags('*'), ['*'])
# Ignore RFC 2616 ETags that are invalid according to RFC 7232.
self.assertEqual(parse_etags(r'"etag", "e\"t\"ag"'), ['"etag"'])
def test_quoting(self):
self.assertEqual(quote_etag('etag'), '"etag"') # unquoted
self.assertEqual(quote_etag('"etag"'), '"etag"') # quoted
self.assertEqual(quote_etag('W/"etag"'), 'W/"etag"') # quoted, weak
class HttpDateProcessingTests(unittest.TestCase):
def test_http_date(self):
t = 1167616461.0
self.assertEqual(http_date(t), 'Mon, 01 Jan 2007 01:54:21 GMT')
@ignore_warnings(category=RemovedInDjango30Warning)
def test_cookie_date(self):
t = 1167616461.0
self.assertEqual(cookie_date(t), 'Mon, 01-Jan-2007 01:54:21 GMT')
def test_parsing_rfc1123(self):
parsed = parse_http_date('Sun, 06 Nov 1994 08:49:37 GMT')
self.assertEqual(datetime.utcfromtimestamp(parsed), datetime(1994, 11, 6, 8, 49, 37))
def test_parsing_rfc850(self):
parsed = parse_http_date('Sunday, 06-Nov-94 08:49:37 GMT')
self.assertEqual(datetime.utcfromtimestamp(parsed), datetime(1994, 11, 6, 8, 49, 37))
def test_parsing_asctime(self):
parsed = parse_http_date('Sun Nov 6 08:49:37 1994')
self.assertEqual(datetime.utcfromtimestamp(parsed), datetime(1994, 11, 6, 8, 49, 37))
class EscapeLeadingSlashesTests(unittest.TestCase):
def test(self):
tests = (
('//example.com', '/%2Fexample.com'),
('//', '/%2F'),
)
for url, expected in tests:
with self.subTest(url=url):
self.assertEqual(escape_leading_slashes(url), expected)
| |
# -*- coding: utf-8 -*-
from gi.repository import GLib as glib
import logging
import os
from window import Window
from player import Player
from protocol import Protocol
log = logging.getLogger(__name__)
class Controller(object):
def __init__(self, url, token, fullscreen):
self.proto = Protocol(url, token)
self.window = Window(fullscreen)
self.is_running = False
self.to_status = 0
self.current_status = 0
self.got_poke = False
self.seek_to = None
self.source_to = None
# Get the ad image
base_dir = os.path.dirname(os.path.abspath(__file__))
resources = '{}{}{}{}'.format(base_dir, os.sep, "resources", os.sep)
self.stopped_image_url = 'file:///{}'.format(os.path.join(resources, "standby.png"))
self.maintenance_image_url = 'file:///{}'.format(os.path.join(resources, "maintenance.png"))
# Show image at start
self.player = Player(self.window,
self.stopped_image_url,
self.player_finished,
self.player_error,
mode=Player.IMAGE)
def player_finished(self):
if self.to_status != 4:
self.to_status = 0
def player_error(self, error):
if self.to_status != 4:
self.to_status = 0
def on_unknown_msg(self, query, data, error):
log.info(u"Unknown msg: %s: %s (%s)", query, data, error)
def write_status(self, status):
self.proto.write_msg('playerdev', {'status': status}, 'status_change')
def on_player_msg(self, query, data, error):
if error == 1:
log.warn(u"Server responded: %s", data['message'])
return
# Poke message from server: Something happened on the server that might interest us.
if query == 'poke':
self.got_poke = True
return
# Sets our status (stopped, playing, paused)
if query == 'set_status':
self.to_status = data.get('status')
return
# Seek to position (in seconds)
if query == 'seek':
self.seek_to = data.get('time')
return
# Set our video source
if query == 'source':
self.source_to = data.get('url')
self.to_status = 1
return
def on_login_msg(self, query, data, error):
if error == 1:
log.warn(u"Server responded: %s", data['message'])
self.close()
return
# Okay, we got in. Let's get on with it.
log.info(u"Logged in as %s", data['name'])
# Send initial status
self.write_status(self.current_status)
def run_checks(self):
if not self.is_running:
return
timeout = 100
# Handle incoming messages
d = self.proto.read()
# Handle all the messages on queue
while d:
if d:
# Handle this message
mtype = d.get('type', 'unknown')
query = d.get('query')
data = d.get('data', {})
error = d.get('error', 0) == 1
cbs = {
'login': self.on_login_msg,
'playerdev': self.on_player_msg,
'unknown': self.on_unknown_msg,
}
cbs[mtype if mtype in cbs else 'unknown'](query, data, error)
# Read next
d = self.proto.read()
# If we are settings a remote source, go ahead
if self.source_to and self.to_status == 1:
# If we are currently playing, stop.
if self.player:
self.player.close()
# Set up statuses
self.current_status = 1
self.write_status(self.current_status)
# Set up a new player
self.player = Player(self.window, self.source_to, self.player_finished, self.player_error)
self.player.play()
# Log and clear remote op
log.info(u"Switching to %s", self.source_to)
self.source_to = None
# Timeout and quit here. We're waiting.
timeout = 1000
# Poke the server back, hard. MAKE it give us videos.
if self.got_poke:
self.got_poke = False
# If we are stopped currently, and we're not going to start anything, send status
if self.to_status == 0 and self.current_status == 0:
self.write_status(self.current_status)
# Timeout. We're waiting.
timeout = 1000
# If we need to seek, do so now. Only when paused or playing.
if self.current_status != 0 and self.seek_to:
self.player.seek(self.seek_to)
log.info(u"Seeking to %d", self.seek_to)
self.seek_to = None
# Check if we need to stop
if self.current_status != 0 and self.to_status == 0:
if self.player:
self.player.close()
self.write_status(0)
self.current_status = 0
self.player = Player(self.window,
self.stopped_image_url,
self.player_finished,
self.player_error,
mode=Player.IMAGE)
log.info(u"Status = Stopped")
# Check if we need to pause
if self.current_status == 1 and self.to_status == 2:
self.player.pause()
self.write_status(2)
self.current_status = 2
log.info(u"Status = Paused")
# If paused and remote requests start, do so
elif self.current_status == 2 and self.to_status == 1 and self.player:
self.player.play()
self.write_status(1)
self.current_status = 1
log.info(u"Status = Playing")
# If stopped and remote requests start, do so
elif self.current_status == 0 and self.to_status == 1:
self.write_status(0)
self.current_status = 0
log.info(u"Status = Stopped")
# If maintenance and remote requests start, do so
elif self.current_status == 4 and self.to_status == 1:
self.write_status(0)
self.current_status = 0
log.info(u"Status = Stopped")
# Maintenance
if self.current_status != 4 and self.to_status == 4:
if self.player:
self.player.close()
self.write_status(4)
self.current_status = 4
self.player = Player(self.window,
self.maintenance_image_url,
self.player_finished,
self.player_error,
mode=Player.IMAGE)
timeout = 100
# Continue listening
glib.timeout_add(timeout, self.run_checks)
def run(self):
self.is_running = True
glib.timeout_add(100, self.run_checks)
self.window.run()
def close(self):
if self.is_running:
self.is_running = False
if self.player:
self.player.close()
self.window.close()
self.proto.close()
| |
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
#
"""
Build Docker Artifacts
On build this is loosely modeled after https://github.com/docker/build-push-action
- same in that we auto add labels from github action metadata.
- differs in that we use `dev` for latest.
- differs in that latest refers to last tagged revision.
We also support running functional tests and image cve scanning before pushing.
"""
import logging
import os
import time
import subprocess
import sys
from datetime import datetime
from pathlib import Path
import click
log = logging.getLogger("dockerpkg")
BUILD_STAGE = """\
# Dockerfiles are generated from tools/dev/dockerpkg.py
FROM {base_build_image} as build-env
# pre-requisite distro deps, and build env setup
RUN adduser --disabled-login --gecos "" custodian
RUN apt-get --yes update
RUN apt-get --yes install build-essential curl python3-venv python3-dev --no-install-recommends
RUN python3 -m venv /usr/local
RUN curl -sSL https://raw.githubusercontent.com/python-poetry/poetry/master/get-poetry.py | python3
WORKDIR /src
# Add core & aws packages
ADD pyproject.toml poetry.lock README.md /src/
ADD c7n /src/c7n/
RUN . /usr/local/bin/activate && $HOME/.poetry/bin/poetry install --no-dev
RUN . /usr/local/bin/activate && pip install -q wheel
RUN . /usr/local/bin/activate && pip install -q aws-xray-sdk psutil jsonpatch
# Add provider packages
ADD tools/c7n_gcp /src/tools/c7n_gcp
RUN rm -R tools/c7n_gcp/tests
ADD tools/c7n_azure /src/tools/c7n_azure
RUN rm -R tools/c7n_azure/tests_azure
ADD tools/c7n_kube /src/tools/c7n_kube
RUN rm -R tools/c7n_kube/tests
ADD tools/c7n_openstack /src/tools/c7n_openstack
RUN rm -R tools/c7n_openstack/tests
# Install requested providers
ARG providers="azure gcp kube openstack"
RUN . /usr/local/bin/activate && \\
for pkg in $providers; do cd tools/c7n_$pkg && \\
$HOME/.poetry/bin/poetry install && cd ../../; done
RUN mkdir /output
"""
TARGET_UBUNTU_STAGE = """\
FROM {base_target_image}
LABEL name="{name}" \\
repository="http://github.com/cloud-custodian/cloud-custodian"
COPY --from=build-env /src /src
COPY --from=build-env /usr/local /usr/local
COPY --from=build-env /output /output
RUN DEBIAN_FRONTEND=noninteractive apt-get --yes update \\
&& apt-get --yes install python3 python3-venv --no-install-recommends \\
&& rm -Rf /var/cache/apt \\
&& rm -Rf /var/lib/apt/lists/* \\
&& rm -Rf /var/log/*
RUN adduser --disabled-login --gecos "" custodian
USER custodian
WORKDIR /home/custodian
ENV LC_ALL="C.UTF-8" LANG="C.UTF-8"
VOLUME ["/home/custodian"]
ENTRYPOINT ["{entrypoint}"]
CMD ["--help"]
"""
TARGET_DISTROLESS_STAGE = """\
FROM {base_target_image}
LABEL name="{name}" \\
repository="http://github.com/cloud-custodian/cloud-custodian"
COPY --from=build-env /src /src
COPY --from=build-env /usr/local /usr/local
COPY --from=build-env /etc/passwd /etc/passwd
COPY --from=build-env /etc/group /etc/group
COPY --chown=custodian:custodian --from=build-env /output /output
COPY --chown=custodian:custodian --from=build-env /home/custodian /home/custodian
USER custodian
WORKDIR /home/custodian
ENV LC_ALL="C.UTF-8" LANG="C.UTF-8"
VOLUME ["/home/custodian"]
ENTRYPOINT ["{entrypoint}"]
CMD ["--help"]
"""
BUILD_ORG = """\
# Install c7n-org
ADD tools/c7n_org /src/tools/c7n_org
RUN . /usr/local/bin/activate && cd tools/c7n_org && $HOME/.poetry/bin/poetry install
"""
BUILD_MAILER = """\
# Install c7n-mailer
ADD tools/c7n_mailer /src/tools/c7n_mailer
RUN . /usr/local/bin/activate && cd tools/c7n_mailer && $HOME/.poetry/bin/poetry install
"""
BUILD_POLICYSTREAM = """\
# Compile libgit2
RUN DEBIAN_FRONTEND=noninteractive apt-get -y install wget cmake libssl-dev libffi-dev git
RUN mkdir build && \\
wget -q https://github.com/libgit2/libgit2/releases/download/v1.0.0/libgit2-1.0.0.tar.gz && \\
cd build && \\
tar xzf ../libgit2-1.0.0.tar.gz && \\
cd libgit2-1.0.0 && \\
mkdir build && cd build && \\
cmake .. && \\
make install && \\
rm -Rf /src/build
# Install c7n-policystream
ADD tools/c7n_policystream /src/tools/c7n_policystream
RUN . /usr/local/bin/activate && cd tools/c7n_policystream && $HOME/.poetry/bin/poetry install
# Verify the install
# - policystream is not in ci due to libgit2 compilation needed
# - as a sanity check to distributing known good assets / we test here
RUN . /usr/local/bin/activate && pytest tools/c7n_policystream
"""
class Image:
defaults = dict(base_build_image="ubuntu:20.04", base_target_image="ubuntu:20.04")
def __init__(self, metadata, build, target):
self.metadata = metadata
self.build = build
self.target = target
@property
def repo(self):
return self.metadata.get("repo", self.metadata["name"])
@property
def tag_prefix(self):
return self.metadata.get("tag_prefix", "")
def render(self):
output = []
output.extend(self.build)
output.extend(self.target)
template_vars = dict(self.defaults)
template_vars.update(self.metadata)
return "\n".join(output).format(**template_vars)
def clone(self, metadata, target=None):
d = dict(self.metadata)
d.update(metadata)
return Image(d, self.build, target or self.target)
ImageMap = {
"docker/cli": Image(
dict(
name="cli",
repo="c7n",
description="Cloud Management Rules Engine",
entrypoint="/usr/local/bin/custodian",
),
build=[BUILD_STAGE],
target=[TARGET_UBUNTU_STAGE],
),
"docker/org": Image(
dict(
name="org",
repo="c7n-org",
description="Cloud Custodian Organization Runner",
entrypoint="/usr/local/bin/c7n-org",
),
build=[BUILD_STAGE, BUILD_ORG],
target=[TARGET_UBUNTU_STAGE],
),
"docker/mailer": Image(
dict(
name="mailer",
description="Cloud Custodian Notification Delivery",
entrypoint="/usr/local/bin/c7n-mailer",
),
build=[BUILD_STAGE, BUILD_MAILER],
target=[TARGET_UBUNTU_STAGE],
),
"docker/policystream": Image(
dict(
name="policystream",
description="Custodian policy changes streamed from Git",
entrypoint="/usr/local/bin/c7n-policystream",
),
build=[BUILD_STAGE, BUILD_POLICYSTREAM],
target=[TARGET_UBUNTU_STAGE],
),
}
def human_size(size, precision=2):
# interesting discussion on 1024 vs 1000 as base
# https://en.wikipedia.org/wiki/Binary_prefix
suffixes = ["B", "KB", "MB", "GB", "TB", "PB"]
suffixIndex = 0
while size > 1024:
suffixIndex += 1
size = size / 1024.0
return "%.*f %s" % (precision, size, suffixes[suffixIndex])
@click.group()
def cli():
"""Custodian Docker Packaging Tool
slices, dices, and blends :-)
"""
logging.basicConfig(
level=logging.INFO, format="%(asctime)s:%(levelname)s %(message)s"
)
logging.getLogger("docker").setLevel(logging.INFO)
logging.getLogger("urllib3").setLevel(logging.INFO)
for name, image in list(ImageMap.items()):
ImageMap[name + "-distroless"] = image.clone(
dict(
tag_prefix="distroless-",
base_build_image="debian:10-slim",
base_target_image="gcr.io/distroless/python3-debian10",
),
target=[TARGET_DISTROLESS_STAGE],
)
@cli.command()
@click.option("-p", "--provider", multiple=True)
@click.option(
"-r", "--registry", multiple=True, help="Registries for image repo on tag and push"
)
@click.option("-t", "--tag", help="Static tag for the image")
@click.option("--push", is_flag=True, help="Push images to registries")
@click.option(
"--test", help="Run lightweight functional tests with image", is_flag=True
)
@click.option("--scan", help="scan the image for cve with trivy", is_flag=True)
@click.option("-q", "--quiet", is_flag=True)
@click.option("-i", "--image", multiple=True)
@click.option("-v", "--verbose", is_flag=True)
def build(provider, registry, tag, image, quiet, push, test, scan, verbose):
"""Build custodian docker images...
python tools/dev/dockerpkg.py --test -i cli -i org -i mailer
"""
try:
import docker
except ImportError:
print("python docker client library required")
sys.exit(1)
if quiet:
logging.getLogger().setLevel(logging.WARNING)
if verbose:
logging.getLogger().setLevel(logging.DEBUG)
client = docker.from_env()
# Nomenclature wise these are the set of version tags, independent
# of registry / repo name, that will be applied to all images.
#
# ie. Build out some common suffixes for the image
#
# Note there's a bit of custodian specific logic in how we get env tags.
# see the function docstring for more details.
image_tags = get_env_tags(tag)
build_args = None
if provider not in (None, ()):
build_args = {"providers": " ".join(sorted(provider))} if provider else []
for path, image_def in ImageMap.items():
_, image_name = path.split("/")
if image and image_name not in image:
continue
image_id = build_image(client, image_name, image_def, path, build_args)
image_refs = tag_image(client, image_id, image_def, registry, image_tags)
if test:
test_image(image_id, image_name, provider)
if scan:
scan_image(":".join(image_refs[0]))
if push:
retry(3, (RuntimeError,), push_image, client, image_id, image_refs)
def get_labels(image):
hub_env = get_github_env()
# Standard Container Labels / Metadata
# https://github.com/opencontainers/image-spec/blob/master/annotations.md
labels = {
"org.opencontainers.image.created": datetime.utcnow().isoformat(),
"org.opencontainers.image.licenses": "Apache-2.0",
"org.opencontainers.image.documentation": "https://cloudcustodian.io/docs",
"org.opencontainers.image.title": image.metadata["name"],
"org.opencontainers.image.description": image.metadata["description"],
}
if not hub_env:
hub_env = get_git_env()
if hub_env.get("repository"):
labels["org.opencontainers.image.source"] = hub_env["repository"]
if hub_env.get("sha"):
labels["org.opencontainers.image.revision"] = hub_env["sha"]
return labels
def retry(retry_count, exceptions, func, *args, **kw):
attempts = 1
while attempts <= retry_count:
try:
return func(*args, **kw)
except exceptions:
log.warn('retrying on %s' % func)
attempts += 1
time.sleep(5)
if attempts > retry_count:
raise
def get_github_env():
envget = os.environ.get
return {
k: v
for k, v in {
"sha": envget("GITHUB_SHA"),
"event": envget("GITHUB_EVENT_NAME"),
"repository": envget("GITHUB_REPOSITORY"),
"workflow": envget("GITHUB_WORKFLOW"),
"actor": envget("GITHUB_ACTOR"),
"event_path": envget("GITHUB_EVENT_PATH"),
"workspace": envget("GITHUB_WORKSPACE"),
"actions": envget("GITHUB_ACTIONS"),
"ref": envget("GITHUB_REF"),
}.items()
if v
}
def get_git_env():
return {
"sha": subprocess.check_output(["git", "rev-parse", "HEAD"]).decode("utf8"),
"repository": "https://github.com/cloud-custodian/cloud-custodian",
}
def get_image_repo_tags(image, registries, tags):
results = []
# get a local tag with name
if not registries:
registries = [""]
for t in tags:
for r in registries:
results.append((f"{r}/{image.repo}".lstrip("/"), image.tag_prefix + t))
return results
def get_env_tags(cli_tag):
"""So we're encoding quite a bit of custodian release workflow logic here.
Github actions product -dev and release images from same action workflow.
Azure pipelines runs functional tests and produces nightly images.
End result is intended to be
|name|label|frequency|mutability|testing|
|----|-----|---------|----------|-------|
|c7n |latest |release |mutable |light-functional|
|c7n |0.9.1 |release |immutable |light-functional|
|c7n |nightly |daily |mutable |functional|
|c7n |2020-04-01 |daily |immutable |functional|
|c7n |dev |per-commit |mutable |light-functional|
|c7n |distroless-dev |per-commit|mutable |light-functional|
|c7n |distroless-latest |release |mutable |functional|
|c7n |distroless-2020-04-01 |daily |immutable |functional|
|c7n |distroless-0.9.1 |release |immutable |light-functional|
This function encodes that the github logic by checking github env vars
if passed --tag=auto on the cli to distinguish dev/release images.
It also handles the azure workflow by checking for --tag=nightly and
adding a date tag.
"""
image_tags = []
hub_env = get_github_env()
if "ref" in hub_env and cli_tag == "auto":
_, rtype, rvalue = hub_env["ref"].split("/", 2)
if rtype == "tags":
image_tags.append("latest")
image_tags.append(rvalue)
elif rtype == "heads" and rvalue == "master":
image_tags.append("dev")
elif rtype == "heads": # branch
image_tags.append(rvalue)
if cli_tag == "nightly":
image_tags.append(cli_tag)
image_tags.append(datetime.utcnow().strftime("%Y-%m-%d"))
if cli_tag not in ("nightly", "auto"):
image_tags = [cli_tag]
return list(filter(None, image_tags))
def tag_image(client, image_id, image_def, registries, env_tags):
image = client.images.get(image_id)
image_tags = get_image_repo_tags(image_def, registries, env_tags)
for repo, tag in image_tags:
image.tag(repo, tag)
return image_tags
def scan_image(image_ref):
cmd = ["trivy"]
hub_env = get_github_env()
if "workspace" in hub_env:
cmd = [os.path.join(hub_env["workspace"], "bin", "trivy")]
cmd.append(image_ref)
subprocess.check_call(cmd, stderr=subprocess.STDOUT)
def test_image(image_id, image_name, providers):
env = dict(os.environ)
env.update(
{
"TEST_DOCKER": "yes",
"CUSTODIAN_%s_IMAGE"
% image_name.upper().split("-", 1)[0]: image_id.split(":")[-1],
}
)
if providers not in (None, ()):
env["CUSTODIAN_PROVIDERS"] = " ".join(providers)
subprocess.check_call(
[Path(sys.executable).parent / "pytest", "-v", "tests/test_docker.py"],
env=env,
stderr=subprocess.STDOUT,
)
def push_image(client, image_id, image_refs):
if "HUB_TOKEN" in os.environ and "HUB_USER" in os.environ:
log.info("docker hub login %s" % os.environ["HUB_USER"])
result = client.login(os.environ["HUB_USER"], os.environ["HUB_TOKEN"])
if result.get("Status", "") != "Login Succeeded":
raise RuntimeError("Docker Login failed %s" % (result,))
for (repo, tag) in image_refs:
log.info(f"Pushing image {repo}:{tag}")
for line in client.images.push(repo, tag, stream=True, decode=True):
if "status" in line:
log.debug("%s id:%s" % (line["status"], line.get("id", "n/a")))
elif "error" in line:
log.warning("Push error %s" % (line,))
raise RuntimeError("Docker Push Failed\n %s" % (line,))
else:
log.info("other %s" % (line,))
def build_image(client, image_name, image_def, dfile_path, build_args):
log.info("Building %s image (--verbose for build output)" % image_name)
labels = get_labels(image_def)
stream = client.api.build(
path=os.path.abspath(os.getcwd()),
dockerfile=dfile_path,
buildargs=build_args,
labels=labels,
rm=True,
pull=True,
decode=True,
)
built_image_id = None
for chunk in stream:
if "stream" in chunk:
log.debug(chunk["stream"].strip())
elif "status" in chunk:
log.debug(chunk["status"].strip())
elif "aux" in chunk:
built_image_id = chunk["aux"].get("ID")
assert built_image_id
if built_image_id.startswith("sha256:"):
built_image_id = built_image_id[7:]
built_image = client.images.get(built_image_id)
log.info(
"Built %s image Id:%s Size:%s"
% (image_name, built_image_id[:12], human_size(built_image.attrs["Size"]),)
)
return built_image_id[:12]
@cli.command()
def generate():
"""Generate dockerfiles"""
for df_path, image in ImageMap.items():
p = Path(df_path)
p.write_text(image.render())
if __name__ == "__main__":
cli()
| |
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import contextlib
import datetime
import errno
import graphviz
import json
import logging
import os
import pprint
import re
import requests
import shutil
import sys
import tarfile
import tempfile
import threading
import time
import docker
import git
import jinja2
from oslo_config import cfg
from requests import exceptions as requests_exc
import six
PROJECT_ROOT = os.path.abspath(os.path.join(
os.path.dirname(os.path.realpath(__file__)), '../..'))
# NOTE(SamYaple): Update the search patch to prefer PROJECT_ROOT as the source
# of packages to import if we are using local tools/build.py
# instead of pip installed kolla-build tool
if PROJECT_ROOT not in sys.path:
sys.path.insert(0, PROJECT_ROOT)
from kolla.common import config as common_config
from kolla.common import task
from kolla.template import filters as jinja_filters
from kolla.template import methods as jinja_methods
from kolla import version
def make_a_logger(conf=None, image_name=None):
if image_name:
log = logging.getLogger(".".join([__name__, image_name]))
else:
log = logging.getLogger(__name__)
if not log.handlers:
if conf is None or not conf.logs_dir or not image_name:
handler = logging.StreamHandler(sys.stdout)
log.propagate = False
else:
filename = os.path.join(conf.logs_dir, "%s.log" % image_name)
handler = logging.FileHandler(filename, delay=True)
handler.setFormatter(logging.Formatter(logging.BASIC_FORMAT))
log.addHandler(handler)
if conf is not None and conf.debug:
log.setLevel(logging.DEBUG)
else:
log.setLevel(logging.INFO)
return log
LOG = make_a_logger()
class KollaDirNotFoundException(Exception):
pass
class KollaUnknownBuildTypeException(Exception):
pass
class KollaMismatchBaseTypeException(Exception):
pass
class KollaRpmSetupUnknownConfig(Exception):
pass
# Image status constants.
#
# TODO(harlowja): use enum lib in the future??
STATUS_CONNECTION_ERROR = 'connection_error'
STATUS_PUSH_ERROR = 'push_error'
STATUS_ERROR = 'error'
STATUS_PARENT_ERROR = 'parent_error'
STATUS_BUILT = 'built'
STATUS_BUILDING = 'building'
STATUS_UNMATCHED = 'unmatched'
STATUS_MATCHED = 'matched'
STATUS_UNPROCESSED = 'unprocessed'
# All error status constants.
STATUS_ERRORS = (STATUS_CONNECTION_ERROR, STATUS_PUSH_ERROR,
STATUS_ERROR, STATUS_PARENT_ERROR)
@contextlib.contextmanager
def join_many(threads):
try:
yield
for t in threads:
t.join()
except KeyboardInterrupt:
try:
LOG.info('Waiting for daemon threads exit. Push Ctrl + c again to'
' force exit')
for t in threads:
if t.is_alive():
LOG.debug('Waiting thread %s to exit', t.name)
# NOTE(Jeffrey4l): Python Bug: When join without timeout,
# KeyboardInterrupt is never sent.
t.join(0xffff)
LOG.debug('Thread %s exits', t.name)
except KeyboardInterrupt:
LOG.warning('Force exits')
class DockerTask(task.Task):
docker_kwargs = docker.utils.kwargs_from_env()
def __init__(self):
super(DockerTask, self).__init__()
self._dc = None
@property
def dc(self):
if self._dc is not None:
return self._dc
docker_kwargs = self.docker_kwargs.copy()
self._dc = docker.Client(version='auto', **docker_kwargs)
return self._dc
class Image(object):
def __init__(self, name, canonical_name, path, parent_name='',
status=STATUS_UNPROCESSED, parent=None,
source=None, logger=None):
self.name = name
self.canonical_name = canonical_name
self.path = path
self.status = status
self.parent = parent
self.source = source
self.parent_name = parent_name
if logger is None:
logger = make_a_logger(image_name=name)
self.logger = logger
self.children = []
self.plugins = []
def copy(self):
c = Image(self.name, self.canonical_name, self.path,
logger=self.logger, parent_name=self.parent_name,
status=self.status, parent=self.parent)
if self.source:
c.source = self.source.copy()
if self.children:
c.children = list(self.children)
if self.plugins:
c.plugins = list(self.plugins)
return c
def __repr__(self):
return ("Image(%s, %s, %s, parent_name=%s,"
" status=%s, parent=%s, source=%s)") % (
self.name, self.canonical_name, self.path,
self.parent_name, self.status, self.parent, self.source)
class PushIntoQueueTask(task.Task):
"""Task that pushes some other task into a queue."""
def __init__(self, push_task, push_queue):
super(PushIntoQueueTask, self).__init__()
self.push_task = push_task
self.push_queue = push_queue
@property
def name(self):
return 'PushIntoQueueTask(%s=>%s)' % (self.push_task.name,
self.push_queue)
def run(self):
self.push_queue.put(self.push_task)
self.success = True
class PushTask(DockerTask):
"""Task that pushes an image to a docker repository."""
def __init__(self, conf, image):
super(PushTask, self).__init__()
self.conf = conf
self.image = image
self.logger = image.logger
@property
def name(self):
return 'PushTask(%s)' % self.image.name
def run(self):
image = self.image
self.logger.info('Trying to push the image')
try:
self.push_image(image)
except requests_exc.ConnectionError:
self.logger.exception('Make sure Docker is running and that you'
' have the correct privileges to run Docker'
' (root)')
image.status = STATUS_CONNECTION_ERROR
except Exception:
self.logger.exception('Unknown error when pushing')
image.status = STATUS_PUSH_ERROR
finally:
if (image.status not in STATUS_ERRORS
and image.status != STATUS_UNPROCESSED):
self.logger.info('Pushed successfully')
self.success = True
else:
self.success = False
def push_image(self, image):
for response in self.dc.push(image.canonical_name,
stream=True,
insecure_registry=True):
stream = json.loads(response)
if 'stream' in stream:
self.logger.info(stream['stream'])
elif 'errorDetail' in stream:
image.status = STATUS_ERROR
self.logger.error(stream['errorDetail']['message'])
class BuildTask(DockerTask):
"""Task that builds out an image."""
def __init__(self, conf, image, push_queue):
super(BuildTask, self).__init__()
self.conf = conf
self.image = image
self.push_queue = push_queue
self.nocache = not conf.cache
self.forcerm = not conf.keep
self.logger = image.logger
@property
def name(self):
return 'BuildTask(%s)' % self.image.name
def run(self):
self.builder(self.image)
if self.image.status == STATUS_BUILT:
self.success = True
@property
def followups(self):
followups = []
if self.conf.push and self.success:
followups.extend([
# If we are supposed to push the image into a docker
# repository, then make sure we do that...
PushIntoQueueTask(
PushTask(self.conf, self.image),
self.push_queue),
])
if self.image.children and self.success:
for image in self.image.children:
if image.status == STATUS_UNMATCHED:
continue
followups.append(BuildTask(self.conf, image, self.push_queue))
return followups
def process_source(self, image, source):
dest_archive = os.path.join(image.path, source['name'] + '-archive')
if source.get('type') == 'url':
self.logger.debug("Getting archive from %s", source['source'])
try:
r = requests.get(source['source'], timeout=self.conf.timeout)
except requests_exc.Timeout:
self.logger.exception(
'Request timed out while getting archive from %s',
source['source'])
image.status = STATUS_ERROR
return
if r.status_code == 200:
with open(dest_archive, 'wb') as f:
f.write(r.content)
else:
self.logger.error(
'Failed to download archive: status_code %s',
r.status_code)
image.status = STATUS_ERROR
return
elif source.get('type') == 'git':
clone_dir = '{}-{}'.format(dest_archive,
source['reference'].replace('/', '-'))
try:
self.logger.debug("Cloning from %s", source['source'])
git.Git().clone(source['source'], clone_dir)
git.Git(clone_dir).checkout(source['reference'])
reference_sha = git.Git(clone_dir).rev_parse('HEAD')
self.logger.debug("Git checkout by reference %s (%s)",
source['reference'], reference_sha)
except Exception as e:
self.logger.error("Failed to get source from git", image.name)
self.logger.error("Error: %s", e)
# clean-up clone folder to retry
shutil.rmtree(clone_dir)
image.status = STATUS_ERROR
return
with tarfile.open(dest_archive, 'w') as tar:
tar.add(clone_dir, arcname=os.path.basename(clone_dir))
elif source.get('type') == 'local':
self.logger.debug("Getting local archive from %s",
source['source'])
if os.path.isdir(source['source']):
with tarfile.open(dest_archive, 'w') as tar:
tar.add(source['source'],
arcname=os.path.basename(source['source']))
else:
shutil.copyfile(source['source'], dest_archive)
else:
self.logger.error("Wrong source type '%s'", source.get('type'))
image.status = STATUS_ERROR
return
# Set time on destination archive to epoch 0
os.utime(dest_archive, (0, 0))
return dest_archive
def update_buildargs(self):
buildargs = dict()
if self.conf.build_args:
buildargs = dict(self.conf.build_args)
proxy_vars = ('HTTP_PROXY', 'http_proxy', 'HTTPS_PROXY',
'https_proxy', 'FTP_PROXY', 'ftp_proxy',
'NO_PROXY', 'no_proxy')
for proxy_var in proxy_vars:
if proxy_var in os.environ and proxy_var not in buildargs:
buildargs[proxy_var] = os.environ.get(proxy_var)
if not buildargs:
return None
return buildargs
def builder(self, image):
self.logger.debug('Processing')
if image.status == STATUS_UNMATCHED:
return
if (image.parent is not None and
image.parent.status in STATUS_ERRORS):
self.logger.error('Parent image error\'d with message "%s"',
image.parent.status)
image.status = STATUS_PARENT_ERROR
return
image.status = STATUS_BUILDING
self.logger.info('Building')
if image.source and 'source' in image.source:
self.process_source(image, image.source)
if image.status in STATUS_ERRORS:
return
plugin_archives = list()
plugins_path = os.path.join(image.path, 'plugins')
for plugin in image.plugins:
archive_path = self.process_source(image, plugin)
if image.status in STATUS_ERRORS:
return
plugin_archives.append(archive_path)
if plugin_archives:
for plugin_archive in plugin_archives:
with tarfile.open(plugin_archive, 'r') as plugin_archive_tar:
plugin_archive_tar.extractall(path=plugins_path)
else:
try:
os.mkdir(plugins_path)
except OSError as e:
if e.errno == errno.EEXIST:
self.logger.info('Directory %s already exist. Skipping.',
plugins_path)
else:
self.logger.error('Failed to create directory %s: %s',
plugins_path, e)
image.status = STATUS_CONNECTION_ERROR
return
with tarfile.open(os.path.join(image.path, 'plugins-archive'),
'w') as tar:
tar.add(plugins_path, arcname='plugins')
# Pull the latest image for the base distro only
pull = True if image.parent is None else False
buildargs = self.update_buildargs()
try:
for response in self.dc.build(path=image.path,
tag=image.canonical_name,
nocache=not self.conf.cache,
rm=True,
pull=pull,
forcerm=self.forcerm,
buildargs=buildargs):
stream = json.loads(response.decode('utf-8'))
if 'stream' in stream:
for line in stream['stream'].split('\n'):
if line:
self.logger.info('%s', line)
if 'errorDetail' in stream:
image.status = STATUS_ERROR
self.logger.error('Error\'d with the following message')
for line in stream['errorDetail']['message'].split('\n'):
if line:
self.logger.error('%s', line)
return
except docker.errors.DockerException:
image.status = STATUS_ERROR
self.logger.exception('Unknown docker error when building')
except Exception:
image.status = STATUS_ERROR
self.logger.exception('Unknown error when building')
else:
image.status = STATUS_BUILT
self.logger.info('Built')
class WorkerThread(threading.Thread):
"""Thread that executes tasks until the queue provides a tombstone."""
#: Object to be put on worker queues to get them to die.
tombstone = object()
def __init__(self, conf, queue):
super(WorkerThread, self).__init__()
self.queue = queue
self.conf = conf
self.should_stop = False
def run(self):
while not self.should_stop:
task = self.queue.get()
if task is self.tombstone:
# Ensure any other threads also get the tombstone.
self.queue.put(task)
break
try:
for attempt in six.moves.range(self.conf.retries + 1):
if self.should_stop:
break
if attempt > 0:
LOG.info("Attempting to run task %s for the %s time",
task.name, attempt + 1)
else:
LOG.info("Attempting to run task %s for the first"
" time", task.name)
try:
task.run()
if task.success:
break
except Exception:
LOG.exception('Unhandled error when running %s',
task.name)
# try again...
task.reset()
if task.success and not self.should_stop:
for next_task in task.followups:
LOG.info('Added next task %s to queue',
next_task.name)
self.queue.put(next_task)
finally:
self.queue.task_done()
class KollaWorker(object):
def __init__(self, conf):
self.conf = conf
self.images_dir = self._get_images_dir()
self.registry = conf.registry
if self.registry:
self.namespace = self.registry + '/' + conf.namespace
else:
self.namespace = conf.namespace
self.base = conf.base
self.base_tag = conf.base_tag
self.install_type = conf.install_type
self.tag = conf.tag
self.images = list()
if conf.rpm_setup_config:
rpm_setup_config = filter(None, conf.rpm_setup_config)
else:
rpm_setup_config = list()
self.rpm_setup = self.build_rpm_setup(rpm_setup_config)
rh_base = ['fedora', 'centos', 'oraclelinux', 'rhel']
rh_type = ['source', 'binary', 'rdo', 'rhos']
deb_base = ['ubuntu', 'debian']
deb_type = ['source', 'binary']
if not ((self.base in rh_base and self.install_type in rh_type) or
(self.base in deb_base and self.install_type in deb_type)):
raise KollaMismatchBaseTypeException(
'{} is unavailable for {}'.format(self.install_type, self.base)
)
if self.base == 'fedora':
LOG.warning('Fedora images are deprecated since Newton and will '
'be removed in the future')
if self.install_type == 'binary':
self.install_metatype = 'rdo'
elif self.install_type == 'source':
self.install_metatype = 'mixed'
elif self.install_type == 'rdo':
self.install_type = 'binary'
self.install_metatype = 'rdo'
elif self.install_type == 'rhos':
self.install_type = 'binary'
self.install_metatype = 'rhos'
else:
raise KollaUnknownBuildTypeException(
'Unknown install type'
)
self.image_prefix = self.base + '-' + self.install_type + '-'
self.include_header = conf.include_header
self.include_footer = conf.include_footer
self.regex = conf.regex
self.image_statuses_bad = dict()
self.image_statuses_good = dict()
self.image_statuses_unmatched = dict()
self.maintainer = conf.maintainer
def _get_images_dir(self):
possible_paths = (
PROJECT_ROOT,
os.path.join(sys.prefix, 'share/kolla'),
os.path.join(sys.prefix, 'local/share/kolla'))
for path in possible_paths:
image_path = os.path.join(path, 'docker')
# NOTE(SamYaple): We explicty check for the base folder to ensure
# this is the correct path
# TODO(SamYaple): Improve this to make this safer
if os.path.exists(os.path.join(image_path, 'base')):
LOG.info('Found the docker image folder at %s', image_path)
return image_path
else:
raise KollaDirNotFoundException('Image dir can not be found')
def build_rpm_setup(self, rpm_setup_config):
"""Generates a list of docker commands based on provided configuration.
:param rpm_setup_config: A list of .rpm or .repo paths or URLs
:return: A list of docker commands
"""
rpm_setup = list()
for config in rpm_setup_config:
if config.endswith('.rpm'):
# RPM files can be installed with yum from file path or url
cmd = "RUN yum -y install {}".format(config)
elif config.endswith('.repo'):
if config.startswith('http'):
# Curl http://url/etc.repo to /etc/yum.repos.d/etc.repo
name = config.split('/')[-1]
cmd = "RUN curl -L {} -o /etc/yum.repos.d/{}".format(
config, name)
else:
# Copy .repo file from filesystem
cmd = "COPY {} /etc/yum.repos.d/".format(config)
else:
raise KollaRpmSetupUnknownConfig(
'RPM setup must be provided as .rpm or .repo files.'
' Attempted configuration was {}'.format(config)
)
rpm_setup.append(cmd)
return rpm_setup
def copy_apt_files(self):
if self.conf.apt_sources_list:
shutil.copyfile(
self.conf.apt_sources_list,
os.path.join(self.working_dir, "base", "sources.list")
)
if self.conf.apt_preferences:
shutil.copyfile(
self.conf.apt_preferences,
os.path.join(self.working_dir, "base", "apt_preferences")
)
def setup_working_dir(self):
"""Creates a working directory for use while building"""
ts = time.time()
ts = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d_%H-%M-%S_')
self.temp_dir = tempfile.mkdtemp(prefix='kolla-' + ts)
self.working_dir = os.path.join(self.temp_dir, 'docker')
shutil.copytree(self.images_dir, self.working_dir)
self.copy_apt_files()
LOG.debug('Created working dir: %s', self.working_dir)
def set_time(self):
for root, dirs, files in os.walk(self.working_dir):
for file_ in files:
os.utime(os.path.join(root, file_), (0, 0))
for dir_ in dirs:
os.utime(os.path.join(root, dir_), (0, 0))
LOG.debug('Set atime and mtime to 0 for all content in working dir')
def _get_filters(self):
filters = {
'customizable': jinja_filters.customizable,
}
return filters
def _get_methods(self):
"""Mapping of available Jinja methods
return a dictionary that maps available function names and their
corresponding python methods to make them available in jinja templates
"""
return {
'debian_package_install': jinja_methods.debian_package_install,
}
def create_dockerfiles(self):
kolla_version = version.version_info.cached_version_string()
supported_distro_release = common_config.DISTRO_RELEASE.get(
self.base)
for path in self.docker_build_paths:
template_name = "Dockerfile.j2"
image_name = path.split("/")[-1]
values = {'base_distro': self.base,
'base_image': self.conf.base_image,
'base_distro_tag': self.base_tag,
'supported_distro_release': supported_distro_release,
'install_metatype': self.install_metatype,
'image_prefix': self.image_prefix,
'install_type': self.install_type,
'namespace': self.namespace,
'tag': self.tag,
'maintainer': self.maintainer,
'kolla_version': kolla_version,
'image_name': image_name,
'rpm_setup': self.rpm_setup}
env = jinja2.Environment( # nosec: not used to render HTML
loader=jinja2.FileSystemLoader(self.working_dir))
env.filters.update(self._get_filters())
env.globals.update(self._get_methods())
tpl_path = os.path.join(
os.path.relpath(path, self.working_dir),
template_name)
template = env.get_template(tpl_path)
if self.conf.template_override:
template_path = os.path.dirname(self.conf.template_override)
template_name = os.path.basename(self.conf.template_override)
values['parent_template'] = template
env = jinja2.Environment( # nosec: not used to render HTML
loader=jinja2.FileSystemLoader(template_path))
env.filters.update(self._get_filters())
env.globals.update(self._get_methods())
template = env.get_template(template_name)
if self.include_header:
with open(self.include_header, 'r') as f:
values['include_header'] = f.read()
if self.include_footer:
with open(self.include_footer, 'r') as f:
values['include_footer'] = f.read()
content = template.render(values)
with open(os.path.join(path, 'Dockerfile'), 'w') as f:
f.write(content)
def find_dockerfiles(self):
"""Recursive search for Dockerfiles in the working directory"""
self.docker_build_paths = list()
path = self.working_dir
filename = 'Dockerfile.j2'
for root, dirs, names in os.walk(path):
if filename in names:
self.docker_build_paths.append(root)
LOG.debug('Found %s', root.split(self.working_dir)[1])
LOG.debug('Found %d Dockerfiles', len(self.docker_build_paths))
def cleanup(self):
"""Remove temp files"""
shutil.rmtree(self.temp_dir)
def filter_images(self):
"""Filter which images to build"""
filter_ = list()
if self.regex:
filter_ += self.regex
elif self.conf.profile:
for profile in self.conf.profile:
if profile not in self.conf.profiles:
self.conf.register_opt(cfg.ListOpt(profile,
default=[]),
'profiles')
if len(self.conf.profiles[profile]) == 0:
msg = 'Profile: {} does not exist'.format(profile)
raise ValueError(msg)
else:
filter_ += self.conf.profiles[profile]
if filter_:
patterns = re.compile(r"|".join(filter_).join('()'))
for image in self.images:
if image.status == STATUS_MATCHED:
continue
if re.search(patterns, image.name):
image.status = STATUS_MATCHED
while (image.parent is not None and
image.parent.status != STATUS_MATCHED):
image = image.parent
image.status = STATUS_MATCHED
LOG.debug('Image %s matched regex', image.name)
else:
image.status = STATUS_UNMATCHED
else:
for image in self.images:
image.status = STATUS_MATCHED
def summary(self):
"""Walk the dictionary of images statuses and print results"""
# For debug we print the logs again if the image error'd. This is to
# help us debug and it will be extra helpful in the gate.
for image in self.images:
if image.status in STATUS_ERRORS:
LOG.debug("Image %s failed", image.name)
self.get_image_statuses()
if self.image_statuses_good:
LOG.info("=========================")
LOG.info("Successfully built images")
LOG.info("=========================")
for name in self.image_statuses_good.keys():
LOG.info(name)
if self.image_statuses_bad:
LOG.info("===========================")
LOG.info("Images that failed to build")
LOG.info("===========================")
for name, status in self.image_statuses_bad.items():
LOG.error('%s Failed with status: %s', name, status)
if self.image_statuses_unmatched:
LOG.debug("=====================================")
LOG.debug("Images not matched for build by regex")
LOG.debug("=====================================")
for name in self.image_statuses_unmatched.keys():
LOG.debug(name)
def get_image_statuses(self):
if any([self.image_statuses_bad,
self.image_statuses_good,
self.image_statuses_unmatched]):
return (self.image_statuses_bad,
self.image_statuses_good,
self.image_statuses_unmatched)
for image in self.images:
if image.status == STATUS_BUILT:
self.image_statuses_good[image.name] = image.status
elif image.status == STATUS_UNMATCHED:
self.image_statuses_unmatched[image.name] = image.status
else:
self.image_statuses_bad[image.name] = image.status
return (self.image_statuses_bad,
self.image_statuses_good,
self.image_statuses_unmatched)
def build_image_list(self):
def process_source_installation(image, section):
installation = dict()
# NOTE(jeffrey4l): source is not needed when the type is None
if self.conf._get('type', self.conf._get_group(section)) is None:
if image.parent_name is None:
LOG.debug('No source location found in section %s',
section)
else:
installation['type'] = self.conf[section]['type']
installation['source'] = self.conf[section]['location']
installation['name'] = section
if installation['type'] == 'git':
installation['reference'] = self.conf[section]['reference']
return installation
all_sections = (set(six.iterkeys(self.conf._groups)) |
set(self.conf.list_all_sections()))
for path in self.docker_build_paths:
# Reading parent image name
with open(os.path.join(path, 'Dockerfile')) as f:
content = f.read()
image_name = os.path.basename(path)
canonical_name = (self.namespace + '/' + self.image_prefix +
image_name + ':' + self.tag)
image = Image(image_name, canonical_name, path,
parent_name=content.split(' ')[1].split('\n')[0],
logger=make_a_logger(self.conf, image_name))
if self.install_type == 'source':
# NOTE(jeffrey4l): register the opts if the section didn't
# register in the kolla/common/config.py file
if image.name not in self.conf._groups:
self.conf.register_opts(common_config.get_source_opts(),
image.name)
image.source = process_source_installation(image, image.name)
for plugin in [match.group(0) for match in
(re.search('^{}-plugin-.+'.format(image.name),
section) for section in
all_sections) if match]:
try:
self.conf.register_opts(
common_config.get_source_opts(),
plugin
)
except cfg.DuplicateOptError:
LOG.debug('Plugin %s already registered in config',
plugin)
image.plugins.append(
process_source_installation(image, plugin))
self.images.append(image)
def save_dependency(self, to_file):
dot = graphviz.Digraph(comment='Docker Images Dependency')
dot.body.extend(['rankdir=LR'])
for image in self.images:
if image.status not in [STATUS_MATCHED]:
continue
dot.node(image.name)
if image.parent is not None:
dot.edge(image.parent.name, image.name)
with open(to_file, 'w') as f:
f.write(dot.source)
def list_images(self):
for count, image in enumerate(self.images):
print(count + 1, ':', image.name)
def list_dependencies(self):
match = False
for image in self.images:
if image.status in [STATUS_MATCHED]:
match = True
if image.parent is None:
base = image
if not match:
print('Nothing matched!')
return
def list_children(images, ancestry):
children = six.next(iter(ancestry.values()))
for image in images:
if image.status not in [STATUS_MATCHED]:
continue
if not image.children:
children.append(image.name)
else:
newparent = {image.name: []}
children.append(newparent)
list_children(image.children, newparent)
ancestry = {base.name: []}
list_children(base.children, ancestry)
pprint.pprint(ancestry)
def find_parents(self):
"""Associate all images with parents and children"""
sort_images = dict()
for image in self.images:
sort_images[image.canonical_name] = image
for parent_name, parent in sort_images.items():
for image in sort_images.values():
if image.parent_name == parent_name:
parent.children.append(image)
image.parent = parent
def build_queue(self, push_queue):
"""Organizes Queue list
Return a list of Queues that have been organized into a hierarchy
based on dependencies
"""
self.build_image_list()
self.find_parents()
self.filter_images()
queue = six.moves.queue.Queue()
for image in self.images:
if image.status == STATUS_UNMATCHED:
# Don't bother queuing up build tasks for things that
# were not matched in the first place... (not worth the
# effort to run them, if they won't be used anyway).
continue
if image.parent is None:
queue.put(BuildTask(self.conf, image, push_queue))
LOG.info('Added image %s to queue', image.name)
return queue
def run_build():
"""Build container images.
:return: A 3-tuple containing bad, good, and unmatched container image
status dicts, or None if no images were built.
"""
conf = cfg.ConfigOpts()
common_config.parse(conf, sys.argv[1:], prog='kolla-build')
if conf.debug:
LOG.setLevel(logging.DEBUG)
kolla = KollaWorker(conf)
kolla.setup_working_dir()
kolla.find_dockerfiles()
kolla.create_dockerfiles()
if conf.template_only:
LOG.info('Dockerfiles are generated in %s', kolla.working_dir)
return
# We set the atime and mtime to 0 epoch to preserve allow the Docker cache
# to work like we want. A different size or hash will still force a rebuild
kolla.set_time()
if conf.save_dependency:
kolla.build_image_list()
kolla.find_parents()
kolla.filter_images()
kolla.save_dependency(conf.save_dependency)
LOG.info('Docker images dependency are saved in %s',
conf.save_dependency)
return
if conf.list_images:
kolla.build_image_list()
kolla.list_images()
return
if conf.list_dependencies:
kolla.build_image_list()
kolla.find_parents()
kolla.filter_images()
kolla.list_dependencies()
return
push_queue = six.moves.queue.Queue()
queue = kolla.build_queue(push_queue)
workers = []
with join_many(workers):
try:
for x in six.moves.range(conf.threads):
worker = WorkerThread(conf, queue)
worker.setDaemon(True)
worker.start()
workers.append(worker)
for x in six.moves.range(conf.push_threads):
worker = WorkerThread(conf, push_queue)
worker.setDaemon(True)
worker.start()
workers.append(worker)
# sleep until queue is empty
while queue.unfinished_tasks or push_queue.unfinished_tasks:
time.sleep(3)
# ensure all threads exited happily
push_queue.put(WorkerThread.tombstone)
queue.put(WorkerThread.tombstone)
except KeyboardInterrupt:
for w in workers:
w.should_stop = True
push_queue.put(WorkerThread.tombstone)
queue.put(WorkerThread.tombstone)
raise
kolla.summary()
kolla.cleanup()
return kolla.get_image_statuses()
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# coding=utf-8
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# Copyright 2013 International Business Machines Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Conduct all activity related to bare-metal deployments.
A single instance of :py:class:`ironic.conductor.manager.ConductorManager` is
created within the *ironic-conductor* process, and is responsible for
performing all actions on bare metal resources (Chassis, Nodes, and Ports).
Commands are received via RPC calls. The conductor service also performs
periodic tasks, eg. to monitor the status of active deployments.
Drivers are loaded via entrypoints, by the
:py:class:`ironic.conductor.resource_manager.NodeManager` class. Each driver is
instantiated once and a ref to that singleton is included in each resource
manager, depending on the node's configuration. In this way, a single
ConductorManager may use multiple drivers, and manage heterogeneous hardware.
When multiple :py:class:`ConductorManager` are run on different hosts, they are
all active and cooperatively manage all nodes in the deployment. Nodes are
locked by each conductor when performing actions which change the state of that
node; these locks are represented by the
:py:class:`ironic.conductor.task_manager.TaskManager` class.
"""
from ironic.common import exception
from ironic.common import service
from ironic.common import states
from ironic.conductor import task_manager
from ironic.db import api as dbapi
from ironic.objects import base as objects_base
from ironic.openstack.common import log
MANAGER_TOPIC = 'ironic.conductor_manager'
LOG = log.getLogger(__name__)
class ConductorManager(service.PeriodicService):
"""Ironic Conductor service main class."""
RPC_API_VERSION = '1.3'
def __init__(self, host, topic):
serializer = objects_base.IronicObjectSerializer()
super(ConductorManager, self).__init__(host, topic,
serializer=serializer)
def start(self):
super(ConductorManager, self).start()
self.dbapi = dbapi.get_instance()
def initialize_service_hook(self, service):
pass
def process_notification(self, notification):
LOG.debug(_('Received notification: %r') %
notification.get('event_type'))
# TODO(deva)
def periodic_tasks(self, context):
# TODO(deva)
pass
def get_node_power_state(self, context, node_id):
"""Get and return the power state for a single node."""
with task_manager.acquire([node_id], shared=True) as task:
node = task.resources[0].node
driver = task.resources[0].driver
state = driver.power.get_power_state(task, node)
return state
def update_node(self, context, node_obj):
"""Update a node with the supplied data.
This method is the main "hub" for PUT and PATCH requests in the API.
It ensures that the requested change is safe to perform,
validates the parameters with the node's driver, if necessary.
:param context: an admin context
:param node_obj: a changed (but not saved) node object.
"""
node_id = node_obj.get('uuid')
LOG.debug(_("RPC update_node called for node %s.") % node_id)
delta = node_obj.obj_what_changed()
if 'power_state' in delta:
raise exception.IronicException(_(
"Invalid method call: update_node can not change node state."))
driver_name = node_obj.get('driver') if 'driver' in delta else None
with task_manager.acquire(node_id,
shared=False,
driver_name=driver_name) as task:
if 'driver_info' in delta:
task.driver.deploy.validate(node_obj)
task.driver.power.validate(node_obj)
node_obj['power_state'] = task.driver.power.get_power_state
# TODO(deva): Determine what value will be passed by API when
# instance_uuid needs to be unset, and handle it.
if 'instance_uuid' in delta:
if node_obj['power_state'] != states.POWER_OFF:
raise exception.NodeInWrongPowerState(
node=node_id,
pstate=node_obj['power_state'])
# update any remaining parameters, then save
node_obj.save(context)
return node_obj
def change_node_power_state(self, context, node_obj, new_state):
"""RPC method to encapsulate changes to a node's state.
Perform actions such as power on, power off. It waits for the power
action to finish, then if succesful, it updates the power_state for
the node with the new power state.
:param context: an admin context.
:param node_obj: an RPC-style node object.
:param new_state: the desired power state of the node.
:raises: InvalidParameterValue when the wrong state is specified
or the wrong driver info is specified.
:raises: NodeInWrongPowerState when the node is in the state.
that cannot perform and requested power action.
:raises: other exceptins by the node's power driver if something
wrong during the power action.
"""
node_id = node_obj.get('uuid')
LOG.debug(_("RPC change_node_power_state called for node %(node)s. "
"The desired new state is %(state)s.")
% {'node': node_id, 'state': new_state})
with task_manager.acquire(node_id, shared=False) as task:
# an exception will be raised if validate fails.
task.driver.power.validate(node_obj)
curr_state = task.driver.power.get_power_state(task, node_obj)
if curr_state == new_state:
raise exception.NodeInWrongPowerState(node=node_id,
pstate=curr_state)
# set the target_power_state.
# This will expose to other processes and clients that the work
# is in progress
node_obj['target_power_state'] = new_state
node_obj.save(context)
#take power action, set the power_state to error if fails
try:
task.driver.power.set_power_state(task, node_obj, new_state)
except exception.IronicException:
node_obj['power_state'] = states.ERROR
node_obj.save(context)
raise
# update the node power states
node_obj['power_state'] = new_state
node_obj['target_power_state'] = states.NOSTATE
node_obj.save(context)
# NOTE(deva): There is a race condition in the RPC API for vendor_passthru.
# Between the validate_vendor_action and do_vendor_action calls, it's
# possible another conductor instance may acquire a lock, or change the
# state of the node, such that validate() succeeds but do() fails.
# TODO(deva): Implement an intent lock to prevent this race. Do this after
# we have implemented intelligent RPC routing so that the do() will be
# guaranteed to land on the same conductor instance that performed
# validate().
def validate_vendor_action(self, context, node_id, driver_method, info):
"""Validate driver specific info or get driver status."""
LOG.debug(_("RPC call_driver called for node %s.") % node_id)
with task_manager.acquire(node_id, shared=True) as task:
if getattr(task.driver, 'vendor', None):
return task.driver.vendor.validate(task.node,
method=driver_method,
**info)
else:
raise exception.UnsupportedDriverExtension(
driver=task.node['driver'],
node=node_id,
extension='vendor passthru')
def do_vendor_action(self, context, node_id, driver_method, info):
"""Run driver action asynchronously."""
with task_manager.acquire(node_id, shared=True) as task:
task.driver.vendor.vendor_passthru(task, task.node,
method=driver_method, **info)
| |
# pylint:disable=C0301
"""
.. _simple_netem_control:
python wrapper for linux commands that provide basic WAN emulations
:module: control
:copyright:
Copyright 2017 Serban Teodorescu
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
:contact: serbant@gmail.com
Simple Netem Control
====================
This module contains python classes that expose network (WAN) emulations
control functions on linux hosts
The linux commands exposed by this module are the **tc** command and the **ip**
command.
See
`<http://www.linuxfoundation.org/collaborate/workgroups/networking/netem#Emulating_wide_area_network_delays>`_
for details
Supported WAN Emulations
------------------------
This module can provide any combination of the WAN conditions (emulations)
listed below but only on a per network interface basis. It does not support
per flow emulations.
* packet delay
* packet loss
* packet duplication
* packet corruption
* packet re-ordering
* traffic rate
:Note:
The code in this file is inherently not portable and can only be executed
on a Linux host
"""
# pylint:enable=C0301
from __future__ import (
unicode_literals, print_function, division, absolute_import)
import sys
import subprocess
import shlex
from weakref import WeakSet
import netem_exceptions
import emulations
import config
class Command(object):
'''
build and expose all the os command strings as static methods
'''
@staticmethod
def add_emulation(device=None, cmd_opts=None):
'''
:returns: the os command to add an emulations to a network device
:rtype: str
:arg str device: the network device name
:arg str emulations: the netem emulations(s)
'''
cmd = 'sudo tc qdisc add dev'
if not device or not cmd_opts:
raise netem_exceptions.CommandError('device', 'cmd_opts')
return r'{cmd} {device} root netem {cmd_opts}'.format(
cmd=cmd, device=device, cmd_opts=cmd_opts)
@staticmethod
def remove_emulation(device=None, cmd_opts=None):
'''
:returns: the os command to remove an emulations from a network device
:rtype: str
:arg str device: the network device name
:arg str emulations: the netem emulations(s)
'''
cmd = 'sudo tc qdisc del dev'
if not device or not cmd_opts:
raise netem_exceptions.CommandError('device', 'emulations')
return r'{cmd} {device} root netem {emulations}'.format(
cmd=cmd, device=device, emulations=cmd_opts)
@staticmethod
def remove_all_emulations(device):
'''
:returns: the command to remove all emulations from a network device
:arg str device:
'''
cmd = r'sudo tc qdisc del dev'
if not device:
raise netem_exceptions.CommandError('device')
return r'{cmd} {device} root netem'.format(cmd=cmd, device=device)
@staticmethod
def show_emulations(device):
'''
:returns: the os command to show the emulations runing on a device
:arg str device:
'''
cmd = 'tc -s qdisc show dev'
if not device:
raise netem_exceptions.CommandError('device')
return r'{cmd} {device}'.format(cmd=cmd, device=device)
@staticmethod
def ifup(device):
'''
set a network device in the UP state
:arg str device:
'''
cmd = 'sudo ip link set dev'
if not device:
raise netem_exceptions.CommandError('device')
return r'{cmd} {device} up'.format(cmd=cmd, device=device)
@staticmethod
def ifdown(device):
'''
set a network device in the DOWN state
:arg str device:
'''
cmd = 'sudo ip link set dev'
if not device:
raise netem_exceptions.CommandError('device')
return r'{cmd} {device} down'.format(cmd=cmd, device=device)
@staticmethod
def ifshow(device):
'''
show the info for a network device
:arg str device:
'''
cmd = 'ip link show dev'
if not device:
raise netem_exceptions.CommandError('device')
return r'{cmd} {device}'.format(cmd=cmd, device=device)
@staticmethod
def iflist():
'''
list the network devices on the host
'''
cmd = 'ip link show'
return r'{cmd}'.format(cmd=cmd)
def execute(cmd):
"""
execute a system command
:param cmd:
the command to execute as a string
:returns:
a tuple in the format (returncode, stdout, stderr)
"""
if 'linux' not in sys.platform:
return (1,
'cannot execute {}'.format(cmd),
'not supported on {}'.format(sys.platform))
try:
proc = subprocess.Popen(
shlex.split(cmd), bufsize=-1, stdout=subprocess.PIPE,
stdin=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = proc.communicate()
except OSError as err:
return (1, 'cannot execute {}'.format(cmd), err)
except ValueError as err:
return (1, 'invalid command {}'.format(cmd), err)
except Exception as err: # pylint:disable=W0703
return (1, 'unexpected error on command {}'.format(cmd), err)
return (proc.returncode, output.decode(), error.decode())
class NetemInterface(object):
"""
class wrapper for the network interface to be controlled
each interface used for network emulations is exposed via an
instance of this class
public members
---------------
* **state:** the state of the application that will be returned via the
heartbeat() : starting|emulating|waiting|blocking|degraded.
* starting: the interface object is initializing
* emulating: there is an active netem policy on the interface
* waiting: the interface is up and running the default policy
(pfifo_fast)
* blocking: the interface is down but not out
* degraded: the interface cannot be used. this is the error state.
the application is running but it cannot be used
"""
class State(object):
# pylint:disable=R0903
'''
keep the possible states in their own class
'''
ready = 'UP, ready'
emulating = 'UP, emulating'
blocking = 'DOWN, blocking'
starting = 'starting'
@staticmethod
def get_interfaces(xclude_wlan=config.XCLUDE_WLAN,
xclude_loopback=config.XCLUDE_LOOPBACK):
"""
get a list of network interface names from the system
see `<https://github.com/systemd/systemd/blob/master/src/udev/udev-builtin-net_id.c#L20>`_
for predictable interface names
:param xclude_wlan:
exclude the wireless interfaces, default True
:param xclude_loopback:
exclude the loopback (lo) interfaces
(yes, there can be more than one), default True
:returns:
a dictionary keyed on the network interface system name,
each entry contains the info between the <> following the interface
name
:raises:
NetemNotSupportedException
NetemInsufficientInterfaces
"""
interfaces = dict()
returncode, output, error = execute(Command.iflist())
if returncode:
if 'supported' in error:
raise netem_exceptions.NetemNotSupportedError(
output, error)
else:
raise netem_exceptions.NetemUnexpectedError(error)
for etherface in output.split('\n'):
if xclude_wlan and 'wl' in etherface:
continue
if xclude_loopback and 'lo' in etherface:
continue
if not etherface:
continue
interfaces[''.join(etherface.split(': ')[1:2])] = ''.join(
etherface.split(': ')[2:3]
)
# there may be a '' key, pop it
interfaces.pop('', None)
return interfaces
def __init__(self, interface=None, side=None, logger=None):
"""
:param side:
the position of the interface controlled by this instance relative
to the network
the value of this parameter is arbritrary but from logical
perspective it maps to either a 'host side' or (one or more)
'client sides'
:param interface:
the system name associated with the interface controlled from this
instance by the operating system
this is more or less the 'eth0', 'eth1', etc parameter. it is
configurable because the 'eth0', 'eth1' approach is just a
convention (current fedora or centos distros do not use this
convention anymore).
if interface is not specified, the constructor will assume that:
* the **netem node** has only 2 netem capable interfaces
* the interfaces are named using the eth0, eth1, eth2 convention
* the host side interface is eth0
* the client side interface is eth1
* the value of the :param side: contains the string 'host' for
the host side instance, and the string 'client' for the client
side instance
:raises:
NetemInsufficientInterfaces exception,
NetemInvalidInterfaceException exception
:warnings:
raises a warning if one tries to run more instances than the
number of available interfaces or if each available interface is
already controlled by an instance of this class
"""
if not interface:
raise netem_exceptions.NetemUnexpectedError(
err='must specify a network device')
self.interface = interface
self.side = side or self.interface
self.logger = logger or config.get_logger('local_netem')
interfaces = self.get_interfaces()
# not a multi-homed host, can't run netem
if len(interfaces.keys()) < 2:
raise netem_exceptions.NetemInsufficientInterfaces(
interfaces=dict(interfaces))
# bad interface name
if self.interface not in interfaces.keys():
raise netem_exceptions.NetemInvalidInterface(
self.interface, interfaces.keys())
self._state = '{}: {}'.format(self.interface, self.State.starting)
# and we're good to go
# but let's make sure there's no qdisc already running on this thing
self.remove_emulations()
self.set_interface_up()
self._state = '{}: {}'.format(self.interface, self.State.ready)
self.logger.info(
'netem control server running as %s on network interface %s' %
(self.side, self.interface))
@property
def state(self):
'''
state property getter
:returns: the state of the interface prefixed by the interface name
:rtype: str
'''
return self._state
@property
def ready(self):
'''
:returns:
``True`` if the interface is passing traffic and there are no
active emulations
:rtype: bool
'''
if self.State.ready in self._state:
return True
return False
@property
def emulating(self):
'''
:returns:
``True`` if the interface is passing traffic and there are one or
more active emulations
:rtype: bool
'''
if self.State.emulating in self._state:
return True
return False
@property
def blocking(self):
'''
:returns:
``True`` if the interface is blocking traffic (DOWN)
:rtype: bool
'''
if self.State.blocking in self._state:
return True
return False
def __new__(cls, interface, side=None, logger=None, *args, **kwargs):
"""
there are some restrictions on how instances of the
:classL`<NetemInterface>` are constructed, namely:
* the interface argument value cannot be reused. had that been
allowed, it would be possible to have multiple (and multiple
remote) instances controlling the same network device.
an extreme case of such a situation would be one instance setting
the device in the UP state, and another instance setting the
device in the DOWN state which is really not a good idea.
it is possible to keep track of the device states and prevent
such conflicts but it is much simpler (KISS) to just not allow
more than one instance per network device.
this also makes it easier to identify the instance(s) for
remote access by enforcing a logical "unique" identifier for
each instance
* the side member is intended to provide an easier way to describe
which instance controls which network device. it is sometimes
easier to just say 'i want to delay traffic on the host side'
instead of remembering that the network device controlling
traffic on the host side is enp2s0.
when not specified, the side member is initialized with the value
of the interface argument and the restraint at the previous
bullet point will handle this restraint. but otherwise one must
make sure that the side member respects the same unique
constraint as the interface member
this method updates a class variable each time a new object is
initialized. it will raise an exception if either the interface arg or
the side argument are present in previously defined instances.
:arg str interface: the name of the network device
:arg str side:
the side (symbolic) name by which one identifies the new instance
:raises:
:exceptions:`<netem_exceptions.NetemInterfaceBusyError>` when the
interface constraint kick in
:exceptions:`<netem_exceptions.NetemSideAlreadyDefinedError>` when
the side constraint kicks in
"""
instance = object.__new__(cls, *args, **kwargs)
# first make sure the class variable exists
if 'instances' not in cls.__dict__:
cls.instances = WeakSet()
# look for instances that are already using the interface and/or side
# args but only if this is not the first instance
if len(cls.instances):
if interface in [instance.interface for instance in cls.instances]:
raise netem_exceptions.NetemInterfaceBusyError(interface)
if side in [instance.side for instance in cls.instances]:
raise netem_exceptions.NetemSideAlreadyDefinedError(side)
cls.instances.add(instance)
return instance
@property
def info(self):
'''
:returns: a `dict` with the full information available for this
netem control instance
'''
return dict(side=self.side,
device=self.interface,
server_state=self.state,
device_state=self.interface_info,
active_emulations=self.emulation_info)
@property
def interface_info(self):
"""
:returns:
the output from::
ip link show dev $device_name
"""
return self.__execute__(Command.ifshow(self.interface))
def __execute__(self, cmd):
"""
execute the command prepared by the calling method, log teh results
smooch the returns
:param cmd:
the command
:returns:
the output of the command as returned by the operating system
:raises:
:exception:`<netem_exceptions.NetemCommandError>`
"""
self.logger.debug('executing %s' % cmd)
ret, out, error = execute(cmd)
if ret:
self.logger.error('stderr: %s' % error)
raise netem_exceptions.NetemCommandError(cmd, error)
self.logger.debug('stdout: %s' % out)
return out
@property
def emulation_info(self):
"""
get the netem stats
:returns:
the output from executing::
tc -s qdisc show dev $interface_name
"""
return self.__execute__(Command.show_emulations(self.interface))
@property
def is_interface_up(self):
"""
is the network interface up?
"""
if 'state UP' in self.interface_info:
return True
return False
def set_interface_up(self):
"""
bring up the interface
"""
if not self.is_interface_up:
self.__execute__(Command.ifup(self.interface))
self._state = '{}: {}'.format(self.interface, self.State.ready)
self.logger.info('interface state: %s' % self.interface_info)
def set_interface_down(self):
"""
bring down the interface
"""
if self.is_interface_up:
self.__execute__(Command.ifdown(self.interface))
self._state = '{}: {}'.format(self.interface, self.State.blocking)
self.logger.info('interface state: %s' % self.interface_info)
@property
def is_emulating(self):
"""
is there an active netem discipline applied
"""
if 'netem' in self.emulation_info:
return True
return False
def add_netem_options(self, *netem_options):
'''
apply one or more netem disciplines (netem_options) to the network
device controlled by this instance
:arg *default_netem_options:
use the specified netem_options with the default arguments present
in the netem_options classes
:arg **custom_netem_options:
use netem_options with fully (or partially) defined arguments
obviously, a syntax error is raised if there are conflicts between
*default_netem_options and **netem_options
'''
if not netem_options:
self.logger.exception(
'must specify at least one netem option when adding an'
' emulation')
raise netem_exceptions.NetemOptionsError(
msg='must specify at least one netem option when adding an'
' emulation')
# *args is a tuple, we want a list because it's meaner
netem_options = list(netem_options)
for netem_option in netem_options:
if not isinstance(netem_option, emulations.Emulation):
msg = 'emulation %s: invalid type %s' % (
netem_option, type(netem_option).__name__)
self.logger.exception(msg)
raise emulations.EmulationTypeError(
emulation=netem_option, msg=msg)
if 'emulation' in type(netem_option).__name__.lower():
msg = 'using %s directly is not allowed' % type(
netem_option).__name__
self.logger.exception(msg)
raise emulations.EmulationTypeError(
emulation=netem_option, msg=msg)
try:
emulations.Emulation.has_no_duplicates(netem_options)
except emulations.EmulationValueError as err:
self.logger.exception(err, exc_info=True)
raise err
if emulations.Emulation.has_reorder_without_delay(netem_options):
self.logger.info('found reorder option without delay option.'
' adding default delay option...')
netem_options.append(emulations.Delay())
try:
emulations.Emulation.has_no_multiple_loss_emulations(
netem_options)
except emulations.EmulationValueError as err:
self.logger.exception(err, exc_info=True)
raise err
if not emulations.Emulation.has_limit(netem_options):
self.logger.info('no limit option was specified.'
' adding default limit option...')
netem_options.append(emulations.Limit())
self.__execute__(Command.add_emulation(
self.interface,
' '.join(
[netem_option.emulation for netem_option in netem_options])))
self._state = '{}: {}'.format(self.interface, self.State.emulating)
def remove_emulations(self):
"""
we always assume that qdisc is applied on the device root,
no fancy handles
do a::
sudo tc qdisc del dev self.iface root
"""
if self.is_emulating:
self.__execute__(Command.remove_all_emulations(self.interface))
self._state = '{}: {}'.format(self.interface, self.State.ready)
self.logger.info(
'no emulations running on network device %s' % self.interface)
def remove_emulation(self):
'''
remove a single emulations
:raises: :exception:`<NotImplementedError>`
'''
raise NotImplementedError(
'please use self.remove_all_emulations() and then re-apply'
' any desired emulations')
| |
import testtools as unittest
import basicdb.backends
import basicdb.exceptions as exc
class BaseStorageBackendTests(unittest.TestCase):
def test_create_domain_raises_not_implemented(self):
backend = basicdb.backends.StorageBackend()
self.assertRaises(NotImplementedError, backend.create_domain, "owner", "domain-name")
def test_delete_domain_raises_not_implemented(self):
backend = basicdb.backends.StorageBackend()
self.assertRaises(NotImplementedError, backend.delete_domain, "owner", "domain-name")
def test_domain_metadata_raises_not_implemented(self):
backend = basicdb.backends.StorageBackend()
self.assertRaises(NotImplementedError, backend.domain_metadata, "owner", "domain-name")
def test_list_domains_raises_not_implemented(self):
backend = basicdb.backends.StorageBackend()
self.assertRaises(NotImplementedError, backend.list_domains, "owner")
def test_add_attribute_value_raises_not_implemented(self):
backend = basicdb.backends.StorageBackend()
self.assertRaises(NotImplementedError, backend.add_attribute_value, "owner", "domain", "item", "attrname", "attrvalue")
def test_delete_attribute_all_raises_not_implemented(self):
backend = basicdb.backends.StorageBackend()
self.assertRaises(NotImplementedError, backend.delete_attribute_all, "owner", "domain", "item", "attrname")
def test_delete_attribute_value_raises_not_implemented(self):
backend = basicdb.backends.StorageBackend()
self.assertRaises(NotImplementedError, backend.delete_attribute_value, "owner", "domain", "item", "attrname", "attrvalue")
def test_put_attributes_raises_exception_if_expectations_are_not_met(self):
self.check_expectations_call_args = []
self.add_attributes_call_args = []
self.replace_attributes_call_args = []
class TestStoreBackend(basicdb.backends.StorageBackend):
def check_expectations(self2, *args):
False
backend = TestStoreBackend()
self.assertRaises(basicdb.exceptions.ConditionalCheckFailed,
backend.put_attributes, "owner", "domain", "item",
{"attr1": set(["attr1val1", "attr1val2"])},
{"attr2": set(["attr2val1"])},
[("attr3", True)])
def test_put_attributes(self):
self.check_expectations_call_args = []
self.add_attributes_call_args = []
self.replace_attributes_call_args = []
class TestStoreBackend(basicdb.backends.StorageBackend):
def check_expectations(self2, *args):
self.check_expectations_call_args += [args]
return True
def add_attributes(self2, *args):
self.assertTrue(self.check_expectations_call_args,
"check_expectations was not called before adding attributes")
self.add_attributes_call_args += [args]
def replace_attributes(self2, *args):
self.assertTrue(self.check_expectations_call_args,
"check_expectations was not called before replacing attributes")
self.replace_attributes_call_args += [args]
backend = TestStoreBackend()
backend.put_attributes("owner", "domain", "item",
{"attr1": set(["attr1val1", "attr1val2"])},
{"attr2": set(["attr2val1"])},
[("attr3", True)])
self.assertIn(("owner", "domain", "item", [("attr3", True)]),
self.check_expectations_call_args)
self.assertIn(("owner", "domain", "item", {"attr1": set(["attr1val1", "attr1val2"])}),
self.add_attributes_call_args)
self.assertIn(("owner", "domain", "item", {"attr2": set(["attr2val1"])}),
self.replace_attributes_call_args)
def test_batch_put_attributes(self):
self.put_attributes_call_args = []
class TestStoreBackend(basicdb.backends.StorageBackend):
def put_attributes(self2, *args):
self.put_attributes_call_args += [args]
backend = TestStoreBackend()
backend.batch_put_attributes("owner", "domain",
{"item1": {"attr1": set(["attr1val1", "attr1val2"])}},
{"item2": {"attr2": set(["attr2val1"])}})
self.assertIn(("owner", "domain", "item1", {"attr1": set(["attr1val1", "attr1val2"])}, {}),
self.put_attributes_call_args)
self.assertIn(("owner", "domain", "item2", {}, {"attr2": set(["attr2val1"])}),
self.put_attributes_call_args)
def test_batch_delete_attributes(self):
self.delete_attributes_call_args = []
class TestStoreBackend(basicdb.backends.StorageBackend):
def delete_attributes(self2, *args):
self.delete_attributes_call_args += [args]
backend = TestStoreBackend()
backend.batch_delete_attributes("owner", "domain",
{"item1": {"attr1": set(["attr1val1", "attr1val2"])},
"item2": {"attr2": set(["attr2val1"])}})
self.assertIn(("owner", "domain", "item1", {"attr1": set(["attr1val1", "attr1val2"])}),
self.delete_attributes_call_args)
self.assertIn(("owner", "domain", "item2", {"attr2": set(["attr2val1"])}),
self.delete_attributes_call_args)
def test_get_attributes_raises_not_implemented(self):
backend = basicdb.backends.StorageBackend()
self.assertRaises(NotImplementedError, backend.get_attributes, "owner", "domain", "item")
def test_add_attributes(self):
self.add_attribute_call_args = []
class TestStoreBackend(basicdb.backends.StorageBackend):
def add_attribute(self2, *args):
self.add_attribute_call_args += [args]
backend = TestStoreBackend()
backend.add_attributes("owner", "domain", "item", {"attr1": set(["attr1val1", "attr1val2"]),
"attr2": set(["attr2val1"])})
self.assertEquals(len(self.add_attribute_call_args), 2)
self.assertIn(("owner", "domain", "item", "attr1", set(["attr1val1", "attr1val2"])),
self.add_attribute_call_args)
self.assertIn(("owner", "domain", "item", "attr2", set(["attr2val1"])),
self.add_attribute_call_args)
def test_add_attribute(self):
self.add_attribute_value_call_args = []
class TestStoreBackend(basicdb.backends.StorageBackend):
def add_attribute_value(self2, *args):
self.add_attribute_value_call_args += [args]
backend = TestStoreBackend()
backend.add_attribute("owner", "domain", "item", "attr1", set(["attr1val1", "attr1val2"]))
self.assertIn(("owner", "domain", "item", "attr1", "attr1val1"),
self.add_attribute_value_call_args)
self.assertIn(("owner", "domain", "item", "attr1", "attr1val2"),
self.add_attribute_value_call_args)
def test_replace_attributes(self):
self.replace_attribute_call_args = []
class TestStoreBackend(basicdb.backends.StorageBackend):
def replace_attribute(self2, *args):
self.replace_attribute_call_args += [args]
backend = TestStoreBackend()
backend.replace_attributes("owner", "domain", "item", {"attr1": set(["attr1val1", "attr1val2"]),
"attr2": set(["attr2val1"])})
self.assertEquals(len(self.replace_attribute_call_args), 2)
self.assertIn(("owner", "domain", "item", "attr1", set(["attr1val1", "attr1val2"])),
self.replace_attribute_call_args)
self.assertIn(("owner", "domain", "item", "attr2", set(["attr2val1"])),
self.replace_attribute_call_args)
def test_replace_attribute(self):
self.delete_attributes_call_args = []
self.add_attribute_call_args = []
class TestStoreBackend(basicdb.backends.StorageBackend):
def delete_attributes(self2, *args):
self.delete_attributes_call_args += [args]
def add_attribute(self2, *args):
self.assertIsNot(self.delete_attributes_call_args, None,
"Attribute wasn't deleted first")
self.add_attribute_call_args += [args]
backend = TestStoreBackend()
backend.replace_attribute("owner", "domain", "item", "attr1", set(["attr1val1", "attr1val2"]))
self.assertIn(("owner", "domain", "item", {"attr1": set([basicdb.AllAttributes])}), self.delete_attributes_call_args)
self.assertIn(("owner", "domain", "item", "attr1", set(["attr1val1", "attr1val2"])),
self.add_attribute_call_args)
def test_delete_attributes(self):
self.delete_attribute_call_args = []
class TestStoreBackend(basicdb.backends.StorageBackend):
def delete_attribute(self2, *args):
self.delete_attribute_call_args += [args]
backend = TestStoreBackend()
backend.delete_attributes("owner", "domain", "item", {"attr1": set(["attr1val1", "attr1val2"])})
self.assertIn(("owner", "domain", "item", "attr1", set(["attr1val1", "attr1val2"])),
self.delete_attribute_call_args)
def test_delete_attribute_only_calls_delete_all_if_all_should_be_removed(self):
self.delete_attribute_all_call_args = []
class TestStoreBackend(basicdb.backends.StorageBackend):
def delete_attribute_all(self2, *args):
self.delete_attribute_all_call_args += [args]
def delete_attribute_value(self2, *args):
self.fail("Should not have called delete_attribute_value")
backend = TestStoreBackend()
backend.delete_attribute("owner", "domain", "item", "attr1", set(["attr1val1", basicdb.AllAttributes, "attr1val2"]))
self.assertEquals([("owner", "domain", "item", "attr1")], self.delete_attribute_all_call_args)
def test_delete_attribute(self):
self.delete_attribute_value_call_args = []
class TestStoreBackend(basicdb.backends.StorageBackend):
def delete_attribute_all(self2, *args):
self.fail("Should not have called delete_attribute_all")
def delete_attribute_value(self2, *args):
self.delete_attribute_value_call_args += [args]
backend = TestStoreBackend()
backend.delete_attribute("owner", "domain", "item", "attr1", set(["attr1val1", "attr1val2"]))
self.assertIn(("owner", "domain", "item", "attr1", "attr1val1"), self.delete_attribute_value_call_args)
self.assertIn(("owner", "domain", "item", "attr1", "attr1val2"), self.delete_attribute_value_call_args)
def test_select_raises_not_implemented(self):
backend = basicdb.backends.StorageBackend()
self.assertRaises(NotImplementedError, backend.select, "owner", "SELECT somethign FROM somewhere")
def test_check_expectation_raises_not_implemented(self):
backend = basicdb.backends.StorageBackend()
self.assertRaises(NotImplementedError,
backend.check_expectation, "owner", 'domain', 'item', ('foo', 'bar'))
def test_check_expectations(self):
self.check_expectation_call_args = []
class TestStoreBackend(basicdb.backends.StorageBackend):
def check_expectation(self2, *args):
self.check_expectation_call_args += [args]
backend = TestStoreBackend()
backend.check_expectations("owner", "domain", "item", [("attr1", "val1"), ("attr1", "val2"), ("attr2", "val3")])
self.assertIn(("owner", "domain", "item", ("attr1", "val1")), self.check_expectation_call_args)
self.assertIn(("owner", "domain", "item", ("attr1", "val2")), self.check_expectation_call_args)
self.assertIn(("owner", "domain", "item", ("attr2", "val3")), self.check_expectation_call_args)
class _GenericBackendDriverTest(object):
def test_create_list_delete_domain(self):
self.assertEquals(self.backend.list_domains("owner"), [])
self.backend.create_domain("owner", "domain1")
self.assertEquals(set(self.backend.list_domains("owner")), set(["domain1"]))
self.backend.create_domain("owner", "domain2")
self.assertEquals(set(self.backend.list_domains("owner")), set(["domain1", "domain2"]))
self.backend.delete_domain("owner", "domain1")
self.assertEquals(set(self.backend.list_domains("owner")), set(["domain2"]))
self.backend.delete_domain("owner", "domain2")
self.assertEquals(self.backend.list_domains("owner"), [])
def test_domain_metadata(self):
self.backend.create_domain("owner", "domain1")
self.backend.domain_metadata("owner", "domain1")
def test_batch_put_attributes(self):
self.backend.create_domain("owner", "domain1")
self.backend.batch_put_attributes("owner", "domain1",
{"item1": {"a": set(["b", "c"])},
"item2": {"d": set(["e", "f"])}},
{})
self.assertEquals(self.backend.get_attributes("owner", "domain1", "item1"),
{"a": set(["b", "c"])})
self.assertEquals(self.backend.get_attributes("owner", "domain1", "item2"),
{"d": set(["e", "f"])})
self.backend.batch_put_attributes("owner", "domain1",
{"item1": {"a": set(["e"])}},
{"item2": {"d": set(["a", "b"])}})
self.assertEquals(self.backend.get_attributes("owner", "domain1", "item1"),
{"a": set(["b", "c", "e"])})
self.assertEquals(self.backend.get_attributes("owner", "domain1", "item2"),
{"d": set(["a", "b"])})
def test_put_get_attributes(self):
self.backend.create_domain("owner", "domain1")
self.backend.put_attributes("owner", "domain1", "item1",
{"a": set(["b", "c"])},
{"d": set(["e"])})
self.assertEquals(self.backend.get_attributes("owner", "domain1", "item1"),
{"a": set(["b", "c"]), "d": set(["e"])})
self.backend.put_attributes("owner", "domain1", "item1",
{"d": set(["f", "g"])},
{"a": set(["h"])})
self.assertEquals(self.backend.get_attributes("owner", "domain1", "item1"),
{"a": set(["h"]), "d": set(["e", "f", "g"])})
def test_delete_attributes_non_existant_item(self):
self.backend.create_domain("owner", "domain1")
self.backend.delete_attributes("owner", "domain1", "item1",
{"a": set(["b"]),
"b": set([basicdb.AllAttributes])})
def test_delete_attributes_non_existant_domain(self):
self.backend.delete_attributes("owner", "domain1", "item1",
{"a": set(["b"]),
"b": set([basicdb.AllAttributes])})
def test_delete_attributes(self):
self.backend.create_domain("owner", "domain1")
self.backend.put_attributes("owner", "domain1", "item1",
{"a": set(["b", "c"]),
"d": set(["e"]),
"f": set(["g"])},
{})
self.backend.delete_attributes("owner", "domain1", "item1",
{"a": set([basicdb.AllAttributes]),
"d": set(["f"]),
"f": set(["g"])})
self.assertEquals(self.backend.get_attributes("owner", "domain1", "item1"),
{"d": set(["e"])})
def test_batch_delete_attributes(self):
self.backend.create_domain("owner", "domain1")
self.backend.put_attributes("owner", "domain1", "item1",
{"a": set(["b", "c"]),
"d": set(["e"]),
"f": set(["g"])},
{})
self.backend.put_attributes("owner", "domain1", "item2",
{"h": set(["i"]),
"j": set(["k"])},
{})
self.backend.batch_delete_attributes("owner", "domain1",
{"item1":
{"a": set([basicdb.AllAttributes]),
"d": set(["f"]),
"f": set(["g"])},
"item2":
{"j": set(["k"])}})
self.assertEquals(self.backend.get_attributes("owner", "domain1", "item1"),
{"d": set(["e"])})
self.assertEquals(self.backend.get_attributes("owner", "domain1", "item2"),
{"h": set(["i"])})
def _load_sample_query_data_set(self):
self.backend.create_domain('owner', 'mydomain')
self.backend.put_attributes('owner', 'mydomain', "0385333498",
{"Title": set(["The Sirens of Titan"]),
"Author": set(["Kurt Vonnegut"]),
"Year": set(["1959"]),
"Pages": set(["00336"]),
"Keyword": set(["Book", "Paperback"]),
"Rating": set(["*****", "5 stars", "Excellent"])}, {})
self.backend.put_attributes('owner', 'mydomain', "0802131786",
{"Title": set(["Tropic of Cancer"]),
"Author": set(["Henry Miller"]),
"Year": set(["1934"]),
"Pages": set(["00318"]),
"Keyword": set(["Book"]),
"Rating": set(["****"])}, {})
self.backend.put_attributes('owner', 'mydomain', "1579124585",
{"Title": set(["The Right Stuff"]),
"Author": set(["Tom Wolfe"]),
"Year": set(["1979"]),
"Pages": set(["00304"]),
"Keyword": set(["Book", "Hardcover", "American"]),
"Rating": set(["****", "4 stars"])}, {})
self.backend.put_attributes('owner', 'mydomain', "B000T9886K",
{"Title": set(["In Between"]),
"Author": set(["Paul Van Dyk"]),
"Year": set(["2007"]),
"Keyword": set(["CD", "Trance"]),
"Rating": set(["4 stars"])}, {})
self.backend.put_attributes('owner', 'mydomain', "B00005JPLW",
{"Title": set(["300"]),
"Author": set(["Zack Snyder"]),
"Year": set(["2007"]),
"Keyword": set(["DVD", "Action", "Frank Miller"]),
"Rating": set(["***", "3 stars", "Not bad"])}, {})
self.backend.put_attributes('owner', 'mydomain', "B000SF3NGK",
{"Title": set(["Heaven's Gonna Burn Your Eyes"]),
"Author": set(["Thievery Corporation"]),
"Year": set(["2002"]),
"Rating": set(["*****"])}, {})
def test_select2(self):
self._load_sample_query_data_set()
def f(expr, items, ordered=False):
if not ordered:
wrap = set
else:
wrap = list
self.assertEquals(wrap(self.backend.select_wrapper("owner", expr)[0]),
wrap(items), expr)
f("select * from mydomain where Title = 'The Right Stuff'",
["1579124585"])
f("select * from mydomain where Year > '1985'",
["B000T9886K", "B00005JPLW", "B000SF3NGK"])
f("select * from mydomain where Rating like '****%'",
["0385333498", "1579124585", "0802131786", "B000SF3NGK"])
f("select * from mydomain where Pages < '00320'",
["1579124585", "0802131786"])
f("select * from mydomain where Year > '1975' and Year < '2008'",
["1579124585", "B000T9886K", "B00005JPLW", "B000SF3NGK"])
f("select * from mydomain where Year between '1975' and '2008'",
["1579124585", "B000T9886K", "B00005JPLW", "B000SF3NGK"])
f("select * from mydomain where Rating = '***' or Rating = '*****'",
["0385333498", "B00005JPLW", "B000SF3NGK"])
f("select * from mydomain where (Year > '1950' and Year < '1960') "
"or Year like '193%' or Year = '2007'",
["0385333498", "0802131786", "B000T9886K", "B00005JPLW"])
f("select * from mydomain where (Year > '1950' and Year < '1960') "
"or Year like '193%' or Year = '2007'",
["0385333498", "0802131786", "B000T9886K", "B00005JPLW"])
f("select * from mydomain where Rating = '4 stars' or Rating = '****'",
["1579124585", "0802131786", "B000T9886K"])
f("select * from mydomain where Rating in ('4 stars', '****')",
["1579124585", "0802131786", "B000T9886K"])
f("select * from mydomain where Keyword = 'Book' and Keyword = 'Hardcover'",
[])
f("select * from mydomain where every(Keyword) in ('Book', 'Paperback')",
["0385333498", "0802131786"])
f("select * from mydomain where Keyword = 'Book' intersection Keyword = 'Hardcover'",
["1579124585"])
f("select * from mydomain where Year < '1980' order by Year asc",
["0802131786", "0385333498", "1579124585"], ordered=True)
f("select * from mydomain where Year < '1980' order by Year",
["0802131786", "0385333498", "1579124585"], ordered=True)
f("select * from mydomain where Year = '2007' intersection Author is not null order by Author desc",
["B00005JPLW", "B000T9886K"], ordered=True)
self.assertRaises(exc.InvalidSortExpressionException, f, "select * from mydomain order by Year asc", [])
f("select * from mydomain where Year < '1980' order by Year limit 2",
["0802131786", "0385333498"])
f("select itemName() from mydomain where itemName() like 'B000%' order by itemName()",
["B00005JPLW", "B000SF3NGK", "B000T9886K"])
def g(expr, expected):
order, results = self.backend.select_wrapper("owner", expr)
self.assertEquals(order, ['mydomain'])
self.assertEquals(results['mydomain']['count'].pop(), expected)
g("select count(*) from mydomain where Title = 'The Right Stuff'", "1")
g("select count(*) from mydomain where Year > '1985'", "3")
g("select count(*) from mydomain limit 500", "6")
g("select count(*) from mydomain limit 4", "4")
def test_select(self):
self.backend.create_domain("owner", "domain1")
self.backend.put_attributes("owner", "domain1", "item1",
{"shape": set(["square", "triangle"]),
"colour": set(["Blue"])}, {})
self.backend.put_attributes("owner", "domain1", "item2",
{"colour": set(["Blue"])}, {})
self.backend.put_attributes("owner", "domain1", "item3",
{"shape": set(["round"]),
"colour": set(["Red"])}, {})
self.assertEquals(self.backend.select_wrapper("owner", "SELECT * FROM domain1")[1],
{"item1": {"shape": set(["square", "triangle"]),
"colour": set(["Blue"])},
"item2": {"colour": set(["Blue"])},
"item3": {"shape": set(["round"]),
"colour": set(["Red"])}})
self.assertEquals(self.backend.select_wrapper("owner", "SELECT shape FROM domain1")[1],
{"item1": {"shape": set(["square", "triangle"])},
"item3": {"shape": set(["round"])}})
self.assertEquals(self.backend.select_wrapper("owner", "SELECT shape FROM domain1 WHERE colour LIKE 'Blue'")[1],
{"item1": {"shape": set(["square", "triangle"])}})
self.assertEquals(self.backend.select_wrapper("owner", "SELECT shape FROM domain1 WHERE shape = 'triangle'")[1],
{"item1": {"shape": set(["square", "triangle"])}})
def test_unknown_attribute_condition_raises_attribute_does_not_exist(self):
self.backend.create_domain("owner", "domain1")
self.backend.put_attributes("owner", "domain1",
"item1",
{'attr1': set(['attr1val1'])},
{}, [])
self.assertRaises(basicdb.exceptions.AttributeDoesNotExist, self.backend.put_attributes, "owner", "domain1", "item1", {'attr2': set(["attr2val1"])}, {}, [('attr3', 'attr3val1')])
def test_multi_valued_attribute_raises_multi_valued_attribute(self):
self.backend.create_domain("owner", "domain1")
self.backend.put_attributes("owner", "domain1", "item1",
{'attr1': set(['attr1val1', 'attr1val2'])},
{}, [])
self.assertRaises(basicdb.exceptions.MultiValuedAttribute, self.backend.put_attributes, "owner", "domain1", "item1", {'attr2': set(["attr2val1"])}, {}, [('attr1', 'attr1val1')])
def test_wrong_value_raises_conditional_check_failed(self):
self.backend.create_domain("owner", "domain1")
self.backend.put_attributes("owner", "domain1", "item1",
{'attr1': set(['attr1val1'])},
{}, [])
self.assertRaises(basicdb.exceptions.WrongValueFound, self.backend.put_attributes, "owner", "domain1", "item1", {'attr2': set(["attr2val1"])}, {}, [('attr1', 'attr1val2')])
def test_unexpected_attribute_raises_unexpected_value(self):
self.backend.create_domain("owner", "domain1")
self.backend.put_attributes("owner", "domain1", "item1",
{'attr1': set(['attr1val1'])},
{}, [])
self.assertRaises(basicdb.exceptions.FoundUnexpectedAttribute, self.backend.put_attributes, "owner", "domain1", "item1", {'attr2': set(["attr2val1"])}, {}, [('attr1', False)])
def test_expected_attribute_value_succeeds(self):
self.backend.create_domain("owner", "domain1")
self.backend.put_attributes("owner", "domain1", "item1",
{'attr1': set(['attr1val1'])},
{}, [])
self.backend.put_attributes("owner", "domain1", "item1",
{'attr2': set(["attr2val1"])}, {},
[('attr1', 'attr1val1')])
def test_expected_attribute_succeeds(self):
self.backend.create_domain("owner", "domain1")
self.backend.put_attributes("owner", "domain1", "item1",
{'attr1': set(['attr1val1'])},
{}, [])
self.backend.put_attributes("owner", "domain1", "item1",
{'attr2': set(["attr2val1"])}, {},
[('attr1', True)])
class FakeBackendDriverTest(_GenericBackendDriverTest, unittest.TestCase):
def setUp(self):
super(FakeBackendDriverTest, self).setUp()
self.backend = basicdb.backends.fake.driver()
def tearDown(self):
super(FakeBackendDriverTest, self).tearDown()
self.backend._reset()
class FilesystemBackendDriverTest(_GenericBackendDriverTest, unittest.TestCase):
def setUp(self):
super(FilesystemBackendDriverTest, self).setUp()
import basicdb.backends.filesystem
self.backend = basicdb.backends.filesystem.driver()
def tearDown(self):
super(FilesystemBackendDriverTest, self).tearDown()
self.backend._reset()
class RiakBackendDriverTest(_GenericBackendDriverTest, unittest.TestCase):
def setUp(self):
import os
if 'ENABLE_RIAK_TESTS' not in os.environ:
self.skip("Riak tests not enabled (set ENABLE_RIAK_TESTS "
"env to enable)")
super(RiakBackendDriverTest, self).setUp()
import basicdb.backends.riak
import uuid
self.backend = basicdb.backends.riak.driver(base_bucket='testbucket%s' % (uuid.uuid4().hex,))
def tearDown(self):
super(RiakBackendDriverTest, self).tearDown()
self.backend._reset()
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import tokenize
import ddt
import six
from tests.hacking import checks
from tests.unit import test
@ddt.ddt
class HackingTestCase(test.TestCase):
def test__parse_assert_mock_str(self):
pos, method, obj = checks._parse_assert_mock_str(
"mock_clients.fake().quotas.delete.assert_called_once()")
self.assertEqual("assert_called_once", method)
self.assertEqual("mock_clients.fake().quotas.delete", obj)
def test__parse_assert_mock_str_no_assert(self):
pos, method, obj = checks._parse_assert_mock_str(
"mock_clients.fake().quotas.delete.")
self.assertIsNone(pos)
self.assertIsNone(method)
self.assertIsNone(obj)
@ddt.data(
{"line": "fdafadfdas # noqa", "result": []},
{"line": " # fdafadfdas", "result": []},
{"line": " ", "result": []},
{"line": "otherstuff", "result": [42]}
)
@ddt.unpack
def test_skip_ignored_lines(self, line, result):
@checks.skip_ignored_lines
def any_gen(physical_line, logical_line, file_name):
yield 42
self.assertEqual(result, list(any_gen(line, line, "f")))
def test_correct_usage_of_assert_from_mock(self):
correct_method_names = ["assert_any_call", "assert_called_once_with",
"assert_called_with", "assert_has_calls"]
for name in correct_method_names:
line = "some_mock.%s(asd)" % name
self.assertEqual(0, len(
list(checks.check_assert_methods_from_mock(
line, line, "./tests/fake/test"))))
def test_wrong_usage_of_broad_assert_from_mock(self):
fake_method = "rtfm.assert_something()"
actual_number, actual_msg = next(checks.check_assert_methods_from_mock(
fake_method, fake_method, "./tests/fake/test"))
self.assertEqual(4, actual_number)
self.assertTrue(actual_msg.startswith("N301"))
def test_wrong_usage_of_assert_called_from_mock(self):
fake_method = "rtfm.assert_called()"
actual_number, actual_msg = next(checks.check_assert_methods_from_mock(
fake_method, fake_method, "./tests/fake/test"))
self.assertEqual(4, actual_number)
self.assertTrue(actual_msg.startswith("N302"))
def test_wrong_usage_of_assert_called_once_from_mock(self):
fake_method = "rtfm.assert_called_once()"
actual_number, actual_msg = next(checks.check_assert_methods_from_mock(
fake_method, fake_method, "./tests/fake/test"))
self.assertEqual(4, actual_number)
self.assertTrue(actual_msg.startswith("N303"))
def _assert_good_samples(self, checker, samples, module_file="f"):
for s in samples:
self.assertEqual([], list(checker(s, s, module_file)), s)
def _assert_bad_samples(self, checker, samples, module_file="f"):
for s in samples:
self.assertEqual(1, len(list(checker(s, s, module_file))), s)
def test_check_wrong_logging_import(self):
bad_imports = ["from oslo_log import log",
"import oslo_log",
"import logging"]
good_imports = ["from rally.common import logging",
"from rally.common.logging",
"import rally.common.logging"]
for bad in bad_imports:
checkres = checks.check_import_of_logging(bad, bad, "fakefile")
self.assertIsNotNone(next(checkres))
for bad in bad_imports:
checkres = checks.check_import_of_logging(
bad, bad, "./rally/common/logging.py")
self.assertEqual([], list(checkres))
for good in good_imports:
checkres = checks.check_import_of_logging(good, good, "fakefile")
self.assertEqual([], list(checkres))
def test_no_translate_debug_logs(self):
bad_samples = ["LOG.debug(_('foo'))"]
self._assert_bad_samples(checks.no_translate_debug_logs, bad_samples)
good_samples = ["LOG.debug('foo')", "LOG.info(_('foo'))"]
self._assert_good_samples(checks.no_translate_debug_logs, good_samples)
def test_no_use_conf_debug_check(self):
bad_samples = [
"if CONF.debug:",
"if cfg.CONF.debug"
]
self._assert_bad_samples(checks.no_use_conf_debug_check, bad_samples)
good_samples = ["if logging.is_debug()"]
self._assert_good_samples(checks.no_use_conf_debug_check, good_samples)
@ddt.data(
{
"line": "self.assertTrue(isinstance(e, exception.BuildAbortExc))",
"result": 1
},
{
"line": "self.assertTrue()",
"result": 0
}
)
@ddt.unpack
def test_assert_true_instance(self, line, result):
self.assertEqual(
result, len(list(checks.assert_true_instance(line, line, "f"))))
@ddt.data(
{
"line": "self.assertEqual(type(als['QuicAssist']), list)",
"result": 1
},
{
"line": "self.assertTrue()",
"result": 0
}
)
@ddt.unpack
def test_assert_equal_type(self, line, result):
self.assertEqual(
len(list(checks.assert_equal_type(line, line, "f"))), result)
@ddt.data(
{"line": "self.assertEqual(A, None)", "result": 1},
{"line": "self.assertEqual(None, A)", "result": 1},
{"line": "self.assertIsNone()", "result": 0}
)
@ddt.unpack
def test_assert_equal_none(self, line, result):
self.assertEqual(
len(list(checks.assert_equal_none(line, line, "f"))), result)
@ddt.data(
{"line": "self.assertNotEqual(A, None)", "result": 1},
{"line": "self.assertNotEqual(None, A)", "result": 1},
{"line": "self.assertIsNotNone()", "result": 0}
)
@ddt.unpack
def test_assert_not_equal_none(self, line, result):
self.assertEqual(
len(list(checks.assert_not_equal_none(line, line, "f"))), result)
def test_assert_true_or_false_with_in_or_not_in(self):
good_lines = [
"self.assertTrue(any(A > 5 for A in B))",
"self.assertTrue(any(A > 5 for A in B), 'some message')",
"self.assertFalse(some in list1 and some2 in list2)"
]
self._assert_good_samples(checks.assert_true_or_false_with_in,
good_lines)
bad_lines = [
"self.assertTrue(A in B)",
"self.assertFalse(A in B)",
"self.assertTrue(A not in B)",
"self.assertFalse(A not in B)",
"self.assertTrue(A in B, 'some message')",
"self.assertFalse(A in B, 'some message')",
"self.assertTrue(A not in B, 'some message')",
"self.assertFalse(A not in B, 'some message')",
"self.assertTrue(A in 'some string with spaces')",
"self.assertTrue(A in 'some string with spaces')",
"self.assertTrue(A in ['1', '2', '3'])",
"self.assertTrue(A in [1, 2, 3])"
]
self._assert_bad_samples(checks.assert_true_or_false_with_in,
bad_lines)
def test_assert_equal_in(self):
good_lines = [
"self.assertEqual(any(a==1 for a in b), True)",
"self.assertEqual(True, any(a==1 for a in b))",
"self.assertEqual(any(a==1 for a in b), False)",
"self.assertEqual(False, any(a==1 for a in b))"
]
self._assert_good_samples(checks.assert_equal_in, good_lines)
bad_lines = [
"self.assertEqual(a in b, True)",
"self.assertEqual(a not in b, True)",
"self.assertEqual('str' in 'string', True)",
"self.assertEqual('str' not in 'string', True)",
"self.assertEqual(True, a in b)",
"self.assertEqual(True, a not in b)",
"self.assertEqual(True, 'str' in 'string')",
"self.assertEqual(True, 'str' not in 'string')",
"self.assertEqual(a in b, False)",
"self.assertEqual(a not in b, False)",
"self.assertEqual('str' in 'string', False)",
"self.assertEqual('str' not in 'string', False)",
"self.assertEqual(False, a in b)",
"self.assertEqual(False, a not in b)",
"self.assertEqual(False, 'str' in 'string')",
"self.assertEqual(False, 'str' not in 'string')",
]
self._assert_bad_samples(checks.assert_equal_in, bad_lines)
def test_check_no_direct_rally_objects_import(self):
bad_imports = ["from rally.common.objects import task",
"import rally.common.objects.task"]
self._assert_bad_samples(checks.check_no_direct_rally_objects_import,
bad_imports)
self._assert_good_samples(
checks.check_no_direct_rally_objects_import,
bad_imports,
module_file="./rally/common/objects/__init__.py")
good_imports = ["from rally.common import objects"]
self._assert_good_samples(checks.check_no_direct_rally_objects_import,
good_imports)
def test_check_no_oslo_deprecated_import(self):
bad_imports = ["from oslo.config",
"import oslo.config",
"from oslo.db",
"import oslo.db",
"from oslo.i18n",
"import oslo.i18n",
"from oslo.serialization",
"import oslo.serialization",
"from oslo.utils",
"import oslo.utils"]
self._assert_bad_samples(checks.check_no_oslo_deprecated_import,
bad_imports)
def test_check_quotas(self):
bad_lines = [
"a = '1'",
"a = \"a\" + 'a'",
"'",
"\"\"\"\"\"\" + ''''''"
]
self._assert_bad_samples(checks.check_quotes, bad_lines)
good_lines = [
"\"'a'\" + \"\"\"a'''fdfd'''\"\"\"",
"\"fdfdfd\" + \"''''''\"",
"a = '' # noqa "
]
self._assert_good_samples(checks.check_quotes, good_lines)
def test_check_no_constructor_data_struct(self):
bad_struct = [
"= dict()",
"= list()"
]
self._assert_bad_samples(checks.check_no_constructor_data_struct,
bad_struct)
good_struct = [
"= []",
"= {}",
]
self._assert_good_samples(checks.check_no_constructor_data_struct,
good_struct)
def test_check_dict_formatting_in_string(self):
bad = [
"\"%(a)s\" % d",
"\"Split across \"\n\"multiple lines: %(a)f\" % d",
"\"%(a)X split across \"\n\"multiple lines\" % d",
"\"%(a)-5.2f: Split %(\"\n\"a)#Lu stupidly\" % d",
"\"Comment between \" # wtf\n\"split lines: %(a) -6.2f\" % d",
"\"Two strings\" + \" added: %(a)-6.2f\" % d",
"\"half legit (%(a)s %(b)s)\" % d + \" half bogus: %(a)s\" % d",
"(\"Parenthesized: %(a)s\") % d",
"(\"Parenthesized \"\n\"concatenation: %(a)s\") % d",
"(\"Parenthesized \" + \"addition: %(a)s\") % d",
"\"Complete %s\" % (\"foolisness: %(a)s%(a)s\" % d)",
"\"Modulus %(a)s\" % {\"a\": (5 % 3)}"
]
for sample in bad:
sample = "print(%s)" % sample
tokens = tokenize.generate_tokens(
six.moves.StringIO(sample).readline)
self.assertEqual(
1,
len(list(checks.check_dict_formatting_in_string(sample,
tokens))))
sample = "print(\"%(a)05.2lF\" % d + \" added: %(a)s\" % d)"
tokens = tokenize.generate_tokens(six.moves.StringIO(sample).readline)
self.assertEqual(
2,
len(list(checks.check_dict_formatting_in_string(sample, tokens))))
good = [
"\"This one is okay: %(a)s %(b)s\" % d",
"\"So is %(a)s\"\n\"this one: %(b)s\" % d"
]
for sample in good:
sample = "print(%s)" % sample
tokens = tokenize.generate_tokens(
six.moves.StringIO(sample).readline)
self.assertEqual(
[],
list(checks.check_dict_formatting_in_string(sample, tokens)))
@ddt.data(
"text = unicode('sometext')",
"text = process(unicode('sometext'))"
)
def test_check_using_unicode(self, line):
checkres = checks.check_using_unicode(line, line, "fakefile")
self.assertIsNotNone(next(checkres))
self.assertEqual([], list(checkres))
def test_check_raises(self):
checkres = checks.check_raises(
"text = :raises: Exception if conditions", "fakefile")
self.assertIsNotNone(checkres)
checkres = checks.check_raises(
"text = :raises Exception: if conditions", "fakefile")
self.assertIsNone(checkres)
def test_check_db_imports_of_cli(self):
line = "from rally.common import db"
next(checks.check_db_imports_in_cli(
line, line, "./rally/cli/filename"))
checkres = checks.check_db_imports_in_cli(
line, line, "./filename")
self.assertRaises(StopIteration, next, checkres)
def test_check_objects_imports_of_cli(self):
line = "from rally.common import objects"
next(checks.check_objects_imports_in_cli(
line, line, "./rally/cli/filename"))
checkres = checks.check_objects_imports_in_cli(
line, line, "./filename")
self.assertRaises(StopIteration, next, checkres)
@ddt.data(
"class Oldstype():",
"class Oldstyle:"
)
def test_check_old_type_class(self, line):
checkres = checks.check_old_type_class(line, line, "fakefile")
self.assertIsNotNone(next(checkres))
self.assertEqual([], list(checkres))
def test_check_datetime_alias(self):
lines = ["import datetime as date",
"import datetime",
"import datetime as dto",
"from datetime import datetime as dtime"]
for line in lines:
checkres = checks.check_datetime_alias(line, line, "fakefile")
self.assertIsNotNone(next(checkres))
self.assertEqual([], list(checkres))
line = "import datetime as dt"
checkres = checks.check_datetime_alias(line, line, "fakefile")
| |
import logging
import os
import socket
from lvsm import genericdirector, utils
from lvsm.modules import kaparser
logger = logging.getLogger('lvsm')
# needed for testing the code on non-Linux platforms
try:
from snimpy import manager
from snimpy import mib
from snimpy import snmp
except ImportError:
logger.warn("Python module 'snimpy' not found, loading a dummy module.")
logger.warn("'enable' and 'disable' commands will not be availble.""")
from lvsm.snimpy_dummy import manager
from lvsm.snimpy_dummy import mib
from lvsm.snimpy_dummy import snmp
class Keepalived(genericdirector.GenericDirector):
"""
Implements Keepalived specific functions. Stub for now.
"""
def __init__(self, ipvsadm, configfile='', restart_cmd='', nodes='', args=dict()):
super(Keepalived, self).__init__(ipvsadm,
configfile,
restart_cmd,
nodes,
args)
# Now handle args
self.mib = args['keepalived-mib']
self.snmp_community = args['snmp_community']
self.snmp_host = args['snmp_host']
if args['snmp_user']:
self.snmp_user = args['snmp_user']
else:
self.snmp_user = None
if args['snmp_password']:
self.snmp_password = args['snmp_password']
else:
self.snmp_password = None
self.cache_dir = args['cache_dir']
def rmfile(self, filepath):
"""
A safe way to remove files in keepalived
"""
try:
logger.debug("Keeplived.rmfile(): removing file %s" % filepath)
os.unlink(filepath)
except OSError as e:
logger.error(e)
logger.error('Please make sure %s is writable!' % self.cache_dir)
logger.error('%s needs to be manually deleted to avoid future problems.' % filepath)
def disable(self, protocol, host, port='', vhost='', vport='', reason=''):
"""
Disable a real server in keepalived. This command rellies on snimpy
and will set the weight of the real server to 0.
The reason is not used in this case.
"""
found = False
hostips = utils.gethostbyname_ex(host)
if not hostips:
logger.error('Real server %s is not valid!' % host)
return False
# Here we only use the first IP if the host has more than one
hostip = hostips[0]
if port:
# check that it's a valid port
portnum = utils.getportnum(port)
if portnum == -1:
logger.error('Port %s is not valid!' % port)
return False
if vhost:
vipnums = utils.gethostbyname_ex(vhost)
if not vipnums:
logger.error('Virtual host %s not valid!' % vhost)
return False
# only take the first ip address if host has more than one
vipnum = vipnums[0]
if vport:
vportnum = utils.getportnum(vport)
if vportnum == -1:
logger.error('Virtual port %s is not valid!' % vport)
return False
try:
manager.load(self.mib)
m = manager.Manager(self.snmp_host, self.snmp_community)
# Not compatible with earlier
# versions of snimpy
#secname=self.snmp_user,
#authpassword=self.snmp_password)
except (snmp.SNMPException, mib.SMIException) as e:
logger.error(e)
logger.error("Unable to perfrom action!")
return False
# iterate through the virtual servers
# and disable the matching real server
try:
for i in m.virtualServerAddress:
hexip = m.virtualServerAddress[i]
vip = socket.inet_ntoa(hexip)
logger.debug("Keepalived.disable(): Checking VIP: %s" % vip)
logger.debug("Keepalived.disable(): Protocol: %s" % str(m.virtualServerProtocol[i]))
if m.virtualServerProtocol[i] == protocol:
if not vhost or vipnum == vip:
vp = m.virtualServerPort[i]
if not vport or vportnum == vp:
# iterate over the realservers in
# the specific virtual
j = m.virtualServerRealServersTotal[i]
idx = 1
while idx <= j:
hexip = m.realServerAddress[i,idx]
rip = socket.inet_ntoa(hexip)
rp = m.realServerPort[i,idx]
if hostip == rip:
if not port or (port and portnum == rp):
logger.debug('Keepalived.disable(): Disabling %s:%s on VIP %s:%s' % (rip, rp, vip, vp))
# 'found' is used to keep track of
# matching real servers to disable
found = True
# Record the original weight
# before disabling it
# It'll be used when enabling
weight = m.realServerWeight[i,idx]
logger.debug('Keepalived.disable(): Current weight: %s' % weight)
if weight == 0:
logger.warning("Real server %s:%s is already disabled on VIP %s:%s" % (rip, rp, vip, vp))
idx += 1
continue
filename = "realServerWeight.%s.%s" % (i, idx)
fullpath = '%s/%s' % (self.cache_dir, filename)
rfilename = "realServerReason.%s.%s" % (i, idx)
rfullpath = '%s/%s' % (self.cache_dir, rfilename)
try:
# Create a file with the original weight
logger.info('Creating file: %s' % fullpath)
f = open(fullpath, 'w')
f.write(str(weight))
f.close()
# Create a file with the disable reason
logger.info('Creating file: %s' % rfullpath)
f = open(rfullpath, 'w')
f.write(str(reason))
f.close()
except IOError as e:
logger.error(e)
logger.error('Please make sure %s is writable before proceeding!' % self.cache_dir)
return False
# Copy the file to the other nodes
# In case of a switch lvsm will have
# the weight info on all nodes
self.filesync_nodes('copy', fullpath)
# set the weight to zero
community = "private"
cmd_example = "snmpset -v2c -c %s localhost KEEPALIVED-MIB::%s = 0" % (community, filename)
logger.info("Running equivalent command to: %s" % cmd_example)
m.realServerWeight[i,idx] = 0
print "Disabled %s:%s on VIP %s:%s (%s). Weight set to 0." % (rip, rp, vip, vp, protocol)
idx += 1
except snmp.SNMPException as e:
logger.error(e)
logger.error("Unable to complete the command successfully! Please verify manually.")
return False
if not found:
logger.error('No matching real servers were found!')
return False
else:
return True
def enable(self, protocol, rhost, rport='',vhost='', vport=''):
"""
Enable a real server in keepalived. This command rellies on snimpy
and will set the weight of the real server back to its original weight.
Assumption: original weight is stored in self.cache_dir/realServerWeight.x.y
The reason is not used in this case.
"""
hostips = utils.gethostbyname_ex(rhost)
if not hostips:
logger.error('Real server %s is not valid!' % rhost)
return False
# Here we only use the first IP if the host has more than one
hostip = hostips[0]
if rport:
# check that it's a valid port
portnum = utils.getportnum(rport)
if portnum == -1:
logger.error('Port %s is not valid!' % rport)
return False
if vhost:
vipnum = utils.gethostname(vhost)
if not vipnum:
logger.error('Virtual host %s not valid!' % vhost)
return False
if vport:
vportnum = utils.getportnum(vport)
if vportnum == -1:
logger.error('Virtual port %s is not valid!' % vport)
return False
try:
manager.load(self.mib)
m = manager.Manager(self.snmp_host, self.snmp_community)
# Not compatible with earlier
# versions of snimpy
# secname=self.snmp_user,
# authpassword=self.snmp_password)
except (snmp.SNMPException, mib.SMIException) as e:
logger.error(e)
logger.error("Unable to perfrom action!")
return False
# iterate through the virtual servers
# and enable the matching real server
# if the weight is zero.
# Note: if file is not found in the cache_dir (i.e. /var/cache/lvsm)
# we set the weight 1 (keepalived default)
try:
for i in m.virtualServerAddress:
hexip = m.virtualServerAddress[i]
vip = socket.inet_ntoa(hexip)
logger.debug("Keepalived.enable(): Checking VIP: %s" % vip)
logger.debug("Keepalived.enable(): Protocol: %s" % str(m.virtualServerProtocol[i]))
if m.virtualServerProtocol[i] == protocol:
if not vhost or vipnum == vip:
vp = m.virtualServerPort[i]
if not vport or vportnum == vp:
# iterate over the realservers in
# the specific virtual host
j = m.virtualServerRealServersTotal[i]
idx = 1
while idx <= j:
hexip = m.realServerAddress[i,idx]
rip = socket.inet_ntoa(hexip)
logger.debug("Keepalived.enable(): RIP: %s" % rip)
rp = m.realServerPort[i,idx]
if hostip == rip:
if not rport or (rport and portnum == rp):
# Record the original weight somewhere before disabling it
# Will be used when enabling the server
weight = m.realServerWeight[i,idx]
logger.debug('Keepalived.enable(): Current weight: %s' % weight)
if weight > 0:
msg = "Real server %s:%s on VIP %s:%s is already enabled with a weight of %s" % (rip, rp, vip, vp, weight)
logger.warning(msg)
idx += 1
continue
filename = "realServerWeight.%s.%s" % (i, idx)
fullpath = '%s/%s' % (self.cache_dir, filename)
logger.debug('Keepalived.enable(): Enabling %s:%s on VIP %s:%s' % (rip, rp, vip, vp))
try:
logger.debug('Keepalived.enable(): Reading server weight from file: %s' % fullpath)
f = open(fullpath, 'r')
str_weight = f.readline().rstrip()
f.close()
# make sure the weight is a valid int
orig_weight = int(str_weight)
except IOError as e:
logger.warning("%s. Using 1 as default weight!" % e)
logger.warning("To ensure the correct wieght is set, please restart Keepalived.")
orig_weight = 1
# set the weight to zero
community = "private"
cmd_example = "snmpset -v2c -c %s localhost KEEPALIVED-MIB::%s = %s" % (community, filename, orig_weight)
logger.info("Running equivalent command to: %s" % cmd_example)
m.realServerWeight[i,idx] = orig_weight
print "Enabled %s:%s on VIP %s:%s (%s). Weight set to %s." % (rip, rp, vip, vp, protocol, orig_weight)
# Now remove the placeholder file locally
try:
logger.debug("Keeplived.enable(): removing placehodler file")
os.unlink(fullpath)
except OSError as e:
logger.error(e)
logger.error('Please make sure %s is writable!' % self.cache_dir)
logger.error('%s needs to be manually deleted to avoid future problems.' % fullpath)
# Try removing the reason file
rfilename = "realServerReason.%s.%s" % (i, idx)
rfullpath = '%s/%s' % (self.cache_dir, rfilename)
try:
logger.debug("Keeplived.enable(): removing reason file")
os.unlink(rfullpath)
except OSError as e:
logger.error(e)
logger.error('Please make sure %s is writable!' % self.cache_dir)
logger.error('%s needs to be manually deleted to avoid future problems.' % rfullpath)
# remove the placeholder file in other nodes
self.filesync_nodes('remove', fullpath)
idx += 1
except snmp.SNMPException as e:
logger.error(e)
logger.error("Unable to complete the command successfully! Please verify manually.")
return False
return True
def show_real_disabled(self, host, port, numeric):
"""show status of disabled real server across multiple VIPs"""
logger.debug("Keepalived.show_real_disabled(): host:%s" % host)
logger.debug("Keepalived.show_real_disabled(): port:%s" % port)
output = list()
# update the ipvs table
self.build_ipvs()
for i, v in enumerate(self.virtuals):
for j, r in enumerate(v.realServers):
if r.weight == "0":
if not host or utils.gethostbyname_ex(host)[0] == r.ip:
if not port or utils.getportnum(port) == r.port:
if numeric:
output.append("%s:%s" % (r.ip, r.port))
else:
try:
host, aliaslist, ipaddrlist = socket.gethostbyaddr(r.ip)
except socket.herror as e:
host = r.ip
try:
portname = socket.getservbyport(int(r.port))
except socket.error as e:
portname = r.port
output.append("%s:%s" % (host, portname))
try:
filename = "%s/realServerReason.%d.%d" % (self.cache_dir, i+1, j+1)
f = open(filename)
reason = 'Reason: ' + f.readline()
logger.debug("Keepalived.show_real_disabled(): %s" % reason)
f.close()
except IOError as e:
logger.error(e)
reason = ''
output[-1] = output[-1] + "\t\t" + reason
return output
def parse_config(self, configfile):
"""Read the config file and validate configuration syntax"""
try:
f = open(configfile)
except IOError as e:
logger.error(e)
return False
conf = "".join(f.readlines())
tokens = kaparser.tokenize_config(conf)
if tokens:
return True
else:
return False
| |
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class SmsCommandList(ListResource):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
def __init__(self, version):
"""
Initialize the SmsCommandList
:param Version version: Version that contains the resource
:returns: twilio.rest.supersim.v1.sms_command.SmsCommandList
:rtype: twilio.rest.supersim.v1.sms_command.SmsCommandList
"""
super(SmsCommandList, self).__init__(version)
# Path Solution
self._solution = {}
self._uri = '/SmsCommands'.format(**self._solution)
def create(self, sim, payload, callback_method=values.unset,
callback_url=values.unset):
"""
Create the SmsCommandInstance
:param unicode sim: The sid or unique_name of the SIM to send the SMS Command to
:param unicode payload: The message body of the SMS Command
:param unicode callback_method: The HTTP method we should use to call callback_url
:param unicode callback_url: The URL we should call after we have sent the command
:returns: The created SmsCommandInstance
:rtype: twilio.rest.supersim.v1.sms_command.SmsCommandInstance
"""
data = values.of({
'Sim': sim,
'Payload': payload,
'CallbackMethod': callback_method,
'CallbackUrl': callback_url,
})
payload = self._version.create(method='POST', uri=self._uri, data=data, )
return SmsCommandInstance(self._version, payload, )
def stream(self, sim=values.unset, status=values.unset, direction=values.unset,
limit=None, page_size=None):
"""
Streams SmsCommandInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param unicode sim: The SID or unique name of the Sim resource that SMS Command was sent to or from.
:param SmsCommandInstance.Status status: The status of the SMS Command
:param SmsCommandInstance.Direction direction: The direction of the SMS Command
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.supersim.v1.sms_command.SmsCommandInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(sim=sim, status=status, direction=direction, page_size=limits['page_size'], )
return self._version.stream(page, limits['limit'])
def list(self, sim=values.unset, status=values.unset, direction=values.unset,
limit=None, page_size=None):
"""
Lists SmsCommandInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param unicode sim: The SID or unique name of the Sim resource that SMS Command was sent to or from.
:param SmsCommandInstance.Status status: The status of the SMS Command
:param SmsCommandInstance.Direction direction: The direction of the SMS Command
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.supersim.v1.sms_command.SmsCommandInstance]
"""
return list(self.stream(
sim=sim,
status=status,
direction=direction,
limit=limit,
page_size=page_size,
))
def page(self, sim=values.unset, status=values.unset, direction=values.unset,
page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of SmsCommandInstance records from the API.
Request is executed immediately
:param unicode sim: The SID or unique name of the Sim resource that SMS Command was sent to or from.
:param SmsCommandInstance.Status status: The status of the SMS Command
:param SmsCommandInstance.Direction direction: The direction of the SMS Command
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of SmsCommandInstance
:rtype: twilio.rest.supersim.v1.sms_command.SmsCommandPage
"""
data = values.of({
'Sim': sim,
'Status': status,
'Direction': direction,
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(method='GET', uri=self._uri, params=data, )
return SmsCommandPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of SmsCommandInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of SmsCommandInstance
:rtype: twilio.rest.supersim.v1.sms_command.SmsCommandPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return SmsCommandPage(self._version, response, self._solution)
def get(self, sid):
"""
Constructs a SmsCommandContext
:param sid: The SID that identifies the resource to fetch
:returns: twilio.rest.supersim.v1.sms_command.SmsCommandContext
:rtype: twilio.rest.supersim.v1.sms_command.SmsCommandContext
"""
return SmsCommandContext(self._version, sid=sid, )
def __call__(self, sid):
"""
Constructs a SmsCommandContext
:param sid: The SID that identifies the resource to fetch
:returns: twilio.rest.supersim.v1.sms_command.SmsCommandContext
:rtype: twilio.rest.supersim.v1.sms_command.SmsCommandContext
"""
return SmsCommandContext(self._version, sid=sid, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Supersim.V1.SmsCommandList>'
class SmsCommandPage(Page):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
def __init__(self, version, response, solution):
"""
Initialize the SmsCommandPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:returns: twilio.rest.supersim.v1.sms_command.SmsCommandPage
:rtype: twilio.rest.supersim.v1.sms_command.SmsCommandPage
"""
super(SmsCommandPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of SmsCommandInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.supersim.v1.sms_command.SmsCommandInstance
:rtype: twilio.rest.supersim.v1.sms_command.SmsCommandInstance
"""
return SmsCommandInstance(self._version, payload, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Supersim.V1.SmsCommandPage>'
class SmsCommandContext(InstanceContext):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
def __init__(self, version, sid):
"""
Initialize the SmsCommandContext
:param Version version: Version that contains the resource
:param sid: The SID that identifies the resource to fetch
:returns: twilio.rest.supersim.v1.sms_command.SmsCommandContext
:rtype: twilio.rest.supersim.v1.sms_command.SmsCommandContext
"""
super(SmsCommandContext, self).__init__(version)
# Path Solution
self._solution = {'sid': sid, }
self._uri = '/SmsCommands/{sid}'.format(**self._solution)
def fetch(self):
"""
Fetch the SmsCommandInstance
:returns: The fetched SmsCommandInstance
:rtype: twilio.rest.supersim.v1.sms_command.SmsCommandInstance
"""
payload = self._version.fetch(method='GET', uri=self._uri, )
return SmsCommandInstance(self._version, payload, sid=self._solution['sid'], )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Supersim.V1.SmsCommandContext {}>'.format(context)
class SmsCommandInstance(InstanceResource):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
class Status(object):
QUEUED = "queued"
SENT = "sent"
DELIVERED = "delivered"
RECEIVED = "received"
FAILED = "failed"
class Direction(object):
TO_SIM = "to_sim"
FROM_SIM = "from_sim"
def __init__(self, version, payload, sid=None):
"""
Initialize the SmsCommandInstance
:returns: twilio.rest.supersim.v1.sms_command.SmsCommandInstance
:rtype: twilio.rest.supersim.v1.sms_command.SmsCommandInstance
"""
super(SmsCommandInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'sid': payload.get('sid'),
'account_sid': payload.get('account_sid'),
'sim_sid': payload.get('sim_sid'),
'payload': payload.get('payload'),
'status': payload.get('status'),
'direction': payload.get('direction'),
'date_created': deserialize.iso8601_datetime(payload.get('date_created')),
'date_updated': deserialize.iso8601_datetime(payload.get('date_updated')),
'url': payload.get('url'),
}
# Context
self._context = None
self._solution = {'sid': sid or self._properties['sid'], }
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: SmsCommandContext for this SmsCommandInstance
:rtype: twilio.rest.supersim.v1.sms_command.SmsCommandContext
"""
if self._context is None:
self._context = SmsCommandContext(self._version, sid=self._solution['sid'], )
return self._context
@property
def sid(self):
"""
:returns: The unique string that identifies the resource
:rtype: unicode
"""
return self._properties['sid']
@property
def account_sid(self):
"""
:returns: The SID of the Account that created the resource
:rtype: unicode
"""
return self._properties['account_sid']
@property
def sim_sid(self):
"""
:returns: The SID of the SIM that this SMS Command was sent to or from
:rtype: unicode
"""
return self._properties['sim_sid']
@property
def payload(self):
"""
:returns: The message body of the SMS Command sent to or from the SIM
:rtype: unicode
"""
return self._properties['payload']
@property
def status(self):
"""
:returns: The status of the SMS Command
:rtype: SmsCommandInstance.Status
"""
return self._properties['status']
@property
def direction(self):
"""
:returns: The direction of the SMS Command
:rtype: SmsCommandInstance.Direction
"""
return self._properties['direction']
@property
def date_created(self):
"""
:returns: The ISO 8601 date and time in GMT when the resource was created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The ISO 8601 date and time in GMT when the resource was last updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def url(self):
"""
:returns: The absolute URL of the SMS Command resource
:rtype: unicode
"""
return self._properties['url']
def fetch(self):
"""
Fetch the SmsCommandInstance
:returns: The fetched SmsCommandInstance
:rtype: twilio.rest.supersim.v1.sms_command.SmsCommandInstance
"""
return self._proxy.fetch()
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Supersim.V1.SmsCommandInstance {}>'.format(context)
| |
#!/usr/bin/env python3
import unittest
from scapy.packet import Raw
from scapy.layers.l2 import Ether, Dot1Q, GRE, ERSPAN
from scapy.layers.inet import IP, UDP
from scapy.layers.vxlan import VXLAN
from framework import VppTestCase, VppTestRunner
from util import Host, ppp
from vpp_sub_interface import L2_VTR_OP, VppDot1QSubint, VppDot1ADSubint
from vpp_gre_interface import VppGreInterface
from collections import namedtuple
from vpp_papi import VppEnum
Tag = namedtuple('Tag', ['dot1', 'vlan'])
DOT1AD = 0x88A8
DOT1Q = 0x8100
class TestSpan(VppTestCase):
""" SPAN Test Case """
@classmethod
def setUpClass(cls):
super(TestSpan, cls).setUpClass()
# Test variables
cls.pkts_per_burst = 257 # Number of packets per burst
# create 3 pg interfaces
cls.create_pg_interfaces(range(3))
cls.bd_id = 55
cls.sub_if = VppDot1QSubint(cls, cls.pg0, 100)
cls.vlan_sub_if = VppDot1QSubint(cls, cls.pg2, 300)
cls.vlan_sub_if.set_vtr(L2_VTR_OP.L2_POP_1, tag=300)
cls.qinq_sub_if = VppDot1ADSubint(cls, cls.pg2, 33, 400, 500)
cls.qinq_sub_if.set_vtr(L2_VTR_OP.L2_POP_2, outer=500, inner=400)
# packet flows mapping pg0 -> pg1, pg2 -> pg3, etc.
cls.flows = dict()
cls.flows[cls.pg0] = [cls.pg1]
cls.flows[cls.pg1] = [cls.pg0]
# packet sizes
cls.pg_if_packet_sizes = [64, 512, 1518] # , 9018]
# setup all interfaces
for i in cls.pg_interfaces:
i.admin_up()
i.config_ip4()
i.resolve_arp()
cls.vxlan = cls.vapi.vxlan_add_del_tunnel(
src_address=cls.pg2.local_ip4n, dst_address=cls.pg2.remote_ip4n,
is_add=1, vni=1111)
def setUp(self):
super(TestSpan, self).setUp()
self.reset_packet_infos()
def tearDown(self):
super(TestSpan, self).tearDown()
def show_commands_at_teardown(self):
self.logger.info(self.vapi.ppcli("show interface span"))
def xconnect(self, a, b, is_add=1):
self.vapi.sw_interface_set_l2_xconnect(a, b, enable=is_add)
self.vapi.sw_interface_set_l2_xconnect(b, a, enable=is_add)
def bridge(self, sw_if_index, is_add=1):
self.vapi.sw_interface_set_l2_bridge(rx_sw_if_index=sw_if_index,
bd_id=self.bd_id, enable=is_add)
def _remove_tag(self, packet, vlan, tag_type):
self.assertEqual(packet.type, tag_type)
payload = packet.payload
self.assertEqual(payload.vlan, vlan)
inner_type = payload.type
payload = payload.payload
packet.remove_payload()
packet.add_payload(payload)
packet.type = inner_type
def remove_tags(self, packet, tags):
for t in tags:
self._remove_tag(packet, t.vlan, t.dot1)
return packet
def decap_gre(self, pkt):
"""
Decapsulate the original payload frame by removing GRE header
"""
self.assertEqual(pkt[Ether].src, self.pg2.local_mac)
self.assertEqual(pkt[Ether].dst, self.pg2.remote_mac)
self.assertEqual(pkt[IP].src, self.pg2.local_ip4)
self.assertEqual(pkt[IP].dst, self.pg2.remote_ip4)
return pkt[GRE].payload
def decap_erspan(self, pkt, session):
"""
Decapsulate the original payload frame by removing ERSPAN header
"""
self.assertEqual(pkt[Ether].src, self.pg2.local_mac)
self.assertEqual(pkt[Ether].dst, self.pg2.remote_mac)
self.assertEqual(pkt[IP].src, self.pg2.local_ip4)
self.assertEqual(pkt[IP].dst, self.pg2.remote_ip4)
self.assertEqual(pkt[ERSPAN].ver, 1)
self.assertEqual(pkt[ERSPAN].vlan, 0)
self.assertEqual(pkt[ERSPAN].cos, 0)
self.assertEqual(pkt[ERSPAN].en, 3)
self.assertEqual(pkt[ERSPAN].t, 0)
self.assertEqual(pkt[ERSPAN].session_id, session)
self.assertEqual(pkt[ERSPAN].reserved, 0)
self.assertEqual(pkt[ERSPAN].index, 0)
return pkt[ERSPAN].payload
def decap_vxlan(self, pkt):
"""
Decapsulate the original payload frame by removing VXLAN header
"""
self.assertEqual(pkt[Ether].src, self.pg2.local_mac)
self.assertEqual(pkt[Ether].dst, self.pg2.remote_mac)
self.assertEqual(pkt[IP].src, self.pg2.local_ip4)
self.assertEqual(pkt[IP].dst, self.pg2.remote_ip4)
return pkt[VXLAN].payload
def create_stream(self, src_if, packet_sizes, do_dot1=False, bcast=False):
pkts = []
dst_if = self.flows[src_if][0]
dst_mac = src_if.remote_mac
if bcast:
dst_mac = "ff:ff:ff:ff:ff:ff"
for i in range(0, self.pkts_per_burst):
payload = "span test"
size = packet_sizes[int((i / 2) % len(packet_sizes))]
p = (Ether(src=src_if.local_mac, dst=dst_mac) /
IP(src=src_if.remote_ip4, dst=dst_if.remote_ip4) /
UDP(sport=10000 + src_if.sw_if_index * 1000 + i, dport=1234) /
Raw(payload))
if do_dot1:
p = self.sub_if.add_dot1_layer(p)
self.extend_packet(p, size)
pkts.append(p)
return pkts
def verify_capture(self, cap1, cap2):
self.assertEqual(len(cap1), len(cap2),
"Different number of sent and mirrored packets :"
"%u != %u" % (len(cap1), len(cap2)))
pkts1 = [(pkt[Ether] / pkt[IP] / pkt[UDP]) for pkt in cap1]
pkts2 = [(pkt[Ether] / pkt[IP] / pkt[UDP]) for pkt in cap2]
self.assertEqual(pkts1.sort(), pkts2.sort())
def test_device_span(self):
""" SPAN device rx mirror """
# Create bi-directional cross-connects between pg0 and pg1
self.xconnect(self.pg0.sw_if_index, self.pg1.sw_if_index)
# Create incoming packet streams for packet-generator interfaces
pkts = self.create_stream(self.pg0, self.pg_if_packet_sizes)
self.pg0.add_stream(pkts)
# Enable SPAN on pg0 (mirrored to pg2)
self.vapi.sw_interface_span_enable_disable(
self.pg0.sw_if_index, self.pg2.sw_if_index)
self.logger.info(self.vapi.ppcli("show interface span"))
# Enable packet capturing and start packet sending
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
# Verify packets outgoing packet streams on mirrored interface (pg2)
n_pkts = len(pkts)
pg1_pkts = self.pg1.get_capture(n_pkts)
pg2_pkts = self.pg2.get_capture(n_pkts)
# Disable SPAN on pg0 (mirrored to pg2)
self.vapi.sw_interface_span_enable_disable(
self.pg0.sw_if_index, self.pg2.sw_if_index, state=0)
self.xconnect(self.pg0.sw_if_index, self.pg1.sw_if_index, is_add=0)
self.verify_capture(pg1_pkts, pg2_pkts)
def test_span_l2_rx(self):
""" SPAN l2 rx mirror """
self.sub_if.admin_up()
self.bridge(self.pg2.sw_if_index)
# Create bi-directional cross-connects between pg0 subif and pg1
self.xconnect(self.sub_if.sw_if_index, self.pg1.sw_if_index)
# Create incoming packet streams for packet-generator interfaces
pkts = self.create_stream(
self.pg0, self.pg_if_packet_sizes, do_dot1=True)
self.pg0.add_stream(pkts)
# Enable SPAN on pg0 (mirrored to pg2)
self.vapi.sw_interface_span_enable_disable(
self.sub_if.sw_if_index, self.pg2.sw_if_index, is_l2=1)
self.logger.info(self.vapi.ppcli("show interface span"))
# Enable packet capturing and start packet sending
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
# Verify packets outgoing packet streams on mirrored interface (pg2)
pg2_expected = len(pkts)
pg1_pkts = self.pg1.get_capture(pg2_expected)
pg2_pkts = self.pg2.get_capture(pg2_expected)
self.bridge(self.pg2.sw_if_index, is_add=0)
# Disable SPAN on pg0 (mirrored to pg2)
self.vapi.sw_interface_span_enable_disable(
self.sub_if.sw_if_index, self.pg2.sw_if_index, state=0, is_l2=1)
self.xconnect(self.sub_if.sw_if_index, self.pg1.sw_if_index, is_add=0)
self.verify_capture(pg1_pkts, pg2_pkts)
def test_span_l2_rx_dst_vxlan(self):
""" SPAN l2 rx mirror into vxlan """
self.sub_if.admin_up()
self.vapi.sw_interface_set_flags(self.vxlan.sw_if_index,
flags=1)
self.bridge(self.vxlan.sw_if_index, is_add=1)
# Create bi-directional cross-connects between pg0 subif and pg1
self.xconnect(self.sub_if.sw_if_index, self.pg1.sw_if_index)
# Create incoming packet streams for packet-generator interfaces
pkts = self.create_stream(
self.pg0, self.pg_if_packet_sizes, do_dot1=True)
self.pg0.add_stream(pkts)
# Enable SPAN on pg0 sub if (mirrored to vxlan)
self.vapi.sw_interface_span_enable_disable(
self.sub_if.sw_if_index, self.vxlan.sw_if_index, is_l2=1)
self.logger.info(self.vapi.ppcli("show interface span"))
# Enable packet capturing and start packet sending
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
# Verify packets outgoing packet streams on mirrored interface (pg2)
n_pkts = len(pkts)
pg1_pkts = self.pg1.get_capture(n_pkts)
pg2_pkts = [self.decap_vxlan(p) for p in self.pg2.get_capture(n_pkts)]
self.bridge(self.vxlan.sw_if_index, is_add=0)
# Disable SPAN on pg0 sub if (mirrored to vxlan)
self.vapi.sw_interface_span_enable_disable(
self.sub_if.sw_if_index, self.vxlan.sw_if_index, state=0, is_l2=1)
self.xconnect(self.sub_if.sw_if_index, self.pg1.sw_if_index, is_add=0)
self.verify_capture(pg1_pkts, pg2_pkts)
def test_span_l2_rx_dst_gre_erspan(self):
""" SPAN l2 rx mirror into gre-erspan """
self.sub_if.admin_up()
gre_if = VppGreInterface(self, self.pg2.local_ip4,
self.pg2.remote_ip4,
session=543,
type=(VppEnum.vl_api_gre_tunnel_type_t.
GRE_API_TUNNEL_TYPE_ERSPAN))
gre_if.add_vpp_config()
gre_if.admin_up()
self.bridge(gre_if.sw_if_index)
# Create bi-directional cross-connects between pg0 and pg1
self.xconnect(self.sub_if.sw_if_index, self.pg1.sw_if_index, is_add=1)
# Create incoming packet streams for packet-generator interfaces
pkts = self.create_stream(
self.pg0, self.pg_if_packet_sizes, do_dot1=True)
self.pg0.add_stream(pkts)
# Enable SPAN on pg0 sub if (mirrored to gre-erspan)
self.vapi.sw_interface_span_enable_disable(
self.sub_if.sw_if_index, gre_if.sw_if_index, is_l2=1)
# Enable packet capturing and start packet sending
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
# Verify packets outgoing packet streams on mirrored interface (pg2)
n_pkts = len(pkts)
pg1_pkts = self.pg1.get_capture(n_pkts)
pg2_pkts = self.pg2.get_capture(n_pkts)
def decap(p): return self.decap_erspan(p, session=543)
pg2_decaped = [decap(p) for p in pg2_pkts]
self.bridge(gre_if.sw_if_index, is_add=0)
# Disable SPAN on pg0 sub if
self.vapi.sw_interface_span_enable_disable(
self.sub_if.sw_if_index, gre_if.sw_if_index, state=0, is_l2=1)
gre_if.remove_vpp_config()
self.xconnect(self.sub_if.sw_if_index, self.pg1.sw_if_index, is_add=0)
self.verify_capture(pg1_pkts, pg2_decaped)
def test_span_l2_rx_dst_gre_subif_vtr(self):
""" SPAN l2 rx mirror into gre-subif+vtr """
self.sub_if.admin_up()
gre_if = VppGreInterface(self, self.pg2.local_ip4,
self.pg2.remote_ip4,
type=(VppEnum.vl_api_gre_tunnel_type_t.
GRE_API_TUNNEL_TYPE_TEB))
gre_if.add_vpp_config()
gre_if.admin_up()
gre_sub_if = VppDot1QSubint(self, gre_if, 500)
gre_sub_if.set_vtr(L2_VTR_OP.L2_POP_1, tag=500)
gre_sub_if.admin_up()
self.bridge(gre_sub_if.sw_if_index)
# Create bi-directional cross-connects between pg0 and pg1
self.xconnect(self.sub_if.sw_if_index, self.pg1.sw_if_index, is_add=1)
# Create incoming packet streams for packet-generator interfaces
pkts = self.create_stream(
self.pg0, self.pg_if_packet_sizes, do_dot1=True)
self.pg0.add_stream(pkts)
# Enable SPAN on pg0 sub if (mirrored to gre sub if)
self.vapi.sw_interface_span_enable_disable(
self.sub_if.sw_if_index, gre_sub_if.sw_if_index, is_l2=1)
# Enable packet capturing and start packet sending
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
# Verify packets outgoing packet streams on mirrored interface (pg2)
n_pkts = len(pkts)
pg1_pkts = self.pg1.get_capture(n_pkts)
pg2_pkts = self.pg2.get_capture(n_pkts)
def decap(p): return self.remove_tags(
self.decap_gre(p), [Tag(dot1=DOT1Q, vlan=500)])
pg2_decaped = [decap(p) for p in pg2_pkts]
self.bridge(gre_sub_if.sw_if_index, is_add=0)
# Disable SPAN on pg0 sub if
self.vapi.sw_interface_span_enable_disable(
self.sub_if.sw_if_index, gre_sub_if.sw_if_index, state=0, is_l2=1)
gre_if.remove_vpp_config()
self.xconnect(self.sub_if.sw_if_index, self.pg1.sw_if_index, is_add=0)
self.verify_capture(pg1_pkts, pg2_decaped)
def test_span_l2_rx_dst_1q_vtr(self):
""" SPAN l2 rx mirror into 1q subif+vtr """
self.sub_if.admin_up()
self.vlan_sub_if.admin_up()
self.bridge(self.vlan_sub_if.sw_if_index)
# Create bi-directional cross-connects between pg0 and pg1
self.xconnect(self.sub_if.sw_if_index, self.pg1.sw_if_index, is_add=1)
# Create incoming packet streams for packet-generator interfaces
pkts = self.create_stream(
self.pg0, self.pg_if_packet_sizes, do_dot1=True)
self.pg0.add_stream(pkts)
self.vapi.sw_interface_span_enable_disable(
self.sub_if.sw_if_index, self.vlan_sub_if.sw_if_index, is_l2=1)
# Enable packet capturing and start packet sending
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
# Verify packets outgoing packet streams on mirrored interface (pg2)
n_pkts = len(pkts)
pg1_pkts = self.pg1.get_capture(n_pkts)
pg2_pkts = self.pg2.get_capture(n_pkts)
pg2_untagged = [self.remove_tags(p, [Tag(dot1=DOT1Q, vlan=300)])
for p in pg2_pkts]
self.bridge(self.vlan_sub_if.sw_if_index, is_add=0)
# Disable SPAN on pg0 sub if (mirrored to vxlan)
self.vapi.sw_interface_span_enable_disable(
self.sub_if.sw_if_index, self.vlan_sub_if.sw_if_index, state=0,
is_l2=1)
self.xconnect(self.sub_if.sw_if_index, self.pg1.sw_if_index, is_add=0)
self.verify_capture(pg1_pkts, pg2_untagged)
def test_span_l2_rx_dst_1ad_vtr(self):
""" SPAN l2 rx mirror into 1ad subif+vtr """
self.sub_if.admin_up()
self.qinq_sub_if.admin_up()
self.bridge(self.qinq_sub_if.sw_if_index)
# Create bi-directional cross-connects between pg0 and pg1
self.xconnect(self.sub_if.sw_if_index, self.pg1.sw_if_index, is_add=1)
# Create incoming packet streams for packet-generator interfaces
pkts = self.create_stream(
self.pg0, self.pg_if_packet_sizes, do_dot1=True)
self.pg0.add_stream(pkts)
self.vapi.sw_interface_span_enable_disable(
self.sub_if.sw_if_index, self.qinq_sub_if.sw_if_index, is_l2=1)
# Enable packet capturing and start packet sending
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
# Verify packets outgoing packet streams on mirrored interface (pg2)
n_pkts = len(pkts)
pg1_pkts = self.pg1.get_capture(n_pkts)
pg2_pkts = self.pg2.get_capture(n_pkts)
pg2_untagged = [self.remove_tags(p, [Tag(dot1=DOT1AD, vlan=400),
Tag(dot1=DOT1Q, vlan=500)])
for p in pg2_pkts]
self.bridge(self.qinq_sub_if.sw_if_index, is_add=0)
# Disable SPAN on pg0 sub if (mirrored to vxlan)
self.vapi.sw_interface_span_enable_disable(
self.sub_if.sw_if_index, self.qinq_sub_if.sw_if_index, state=0,
is_l2=1)
self.xconnect(self.sub_if.sw_if_index, self.pg1.sw_if_index, is_add=0)
self.verify_capture(pg1_pkts, pg2_untagged)
def test_l2_tx_span(self):
""" SPAN l2 tx mirror """
self.sub_if.admin_up()
self.bridge(self.pg2.sw_if_index)
# Create bi-directional cross-connects between pg0 and pg1
self.xconnect(self.sub_if.sw_if_index, self.pg1.sw_if_index)
# Create incoming packet streams for packet-generator interfaces
pkts = self.create_stream(
self.pg0, self.pg_if_packet_sizes, do_dot1=True)
self.pg0.add_stream(pkts)
# Enable SPAN on pg1 (mirrored to pg2)
self.vapi.sw_interface_span_enable_disable(
self.pg1.sw_if_index, self.pg2.sw_if_index, is_l2=1, state=2)
self.logger.info(self.vapi.ppcli("show interface span"))
# Enable packet capturing and start packet sending
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
# Verify packets outgoing packet streams on mirrored interface (pg2)
n_pkts = len(pkts)
pg1_pkts = self.pg1.get_capture(n_pkts)
pg2_pkts = self.pg2.get_capture(n_pkts)
self.bridge(self.pg2.sw_if_index, is_add=0)
# Disable SPAN on pg0 (mirrored to pg2)
self.vapi.sw_interface_span_enable_disable(
self.pg1.sw_if_index, self.pg2.sw_if_index, state=0, is_l2=1)
self.xconnect(self.sub_if.sw_if_index, self.pg1.sw_if_index, is_add=0)
self.verify_capture(pg1_pkts, pg2_pkts)
def test_l2_rx_tx_span(self):
""" SPAN l2 rx tx mirror """
self.sub_if.admin_up()
self.bridge(self.pg2.sw_if_index)
# Create bi-directional cross-connects between pg0 and pg1
self.xconnect(self.sub_if.sw_if_index, self.pg1.sw_if_index)
# Create incoming packet streams for packet-generator interfaces
pg0_pkts = self.create_stream(
self.pg0, self.pg_if_packet_sizes, do_dot1=True)
self.pg0.add_stream(pg0_pkts)
pg1_pkts = self.create_stream(
self.pg1, self.pg_if_packet_sizes, do_dot1=False)
self.pg1.add_stream(pg1_pkts)
# Enable SPAN on pg0 (mirrored to pg2)
self.vapi.sw_interface_span_enable_disable(
self.sub_if.sw_if_index, self.pg2.sw_if_index, is_l2=1, state=3)
self.logger.info(self.vapi.ppcli("show interface span"))
# Enable packet capturing and start packet sending
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
# Verify packets outgoing packet streams on mirrored interface (pg2)
pg0_expected = len(pg1_pkts)
pg1_expected = len(pg0_pkts)
pg2_expected = pg0_expected + pg1_expected
pg0_pkts = self.pg0.get_capture(pg0_expected)
pg1_pkts = self.pg1.get_capture(pg1_expected)
pg2_pkts = self.pg2.get_capture(pg2_expected)
self.bridge(self.pg2.sw_if_index, is_add=0)
# Disable SPAN on pg0 (mirrored to pg2)
self.vapi.sw_interface_span_enable_disable(
self.sub_if.sw_if_index, self.pg2.sw_if_index, state=0, is_l2=1)
self.xconnect(self.sub_if.sw_if_index, self.pg1.sw_if_index, is_add=0)
self.verify_capture(pg0_pkts + pg1_pkts, pg2_pkts)
def test_l2_bcast_mirror(self):
""" SPAN l2 broadcast mirror """
self.sub_if.admin_up()
self.bridge(self.pg2.sw_if_index)
# Create bi-directional cross-connects between pg0 and pg1
self.vapi.sw_interface_set_l2_bridge(
rx_sw_if_index=self.sub_if.sw_if_index, bd_id=99, enable=1)
self.vapi.sw_interface_set_l2_bridge(
rx_sw_if_index=self.pg1.sw_if_index, bd_id=99, enable=1)
# Create incoming packet streams for packet-generator interfaces
pg0_pkts = self.create_stream(
self.pg0, self.pg_if_packet_sizes, do_dot1=True, bcast=True)
self.pg0.add_stream(pg0_pkts)
pg1_pkts = self.create_stream(
self.pg1, self.pg_if_packet_sizes, do_dot1=False, bcast=True)
self.pg1.add_stream(pg1_pkts)
# Enable SPAN on pg0 (mirrored to pg2)
self.vapi.sw_interface_span_enable_disable(
self.sub_if.sw_if_index, self.pg2.sw_if_index, is_l2=1, state=3)
self.logger.info(self.vapi.ppcli("show interface span"))
# Enable packet capturing and start packet sending
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
# Verify packets outgoing packet streams on mirrored interface (pg2)
pg0_expected = len(pg1_pkts)
pg1_expected = len(pg0_pkts)
pg2_expected = pg0_expected + pg1_expected
pg0_pkts = self.pg0.get_capture(pg0_expected)
pg1_pkts = self.pg1.get_capture(pg1_expected)
pg2_pkts = self.pg2.get_capture(pg2_expected)
self.bridge(self.pg2.sw_if_index, is_add=0)
self.vapi.sw_interface_set_l2_bridge(
rx_sw_if_index=self.sub_if.sw_if_index, bd_id=99, enable=0)
self.vapi.sw_interface_set_l2_bridge(
rx_sw_if_index=self.pg1.sw_if_index, bd_id=99, enable=0)
# Disable SPAN on pg0 (mirrored to pg2)
self.vapi.sw_interface_span_enable_disable(
self.sub_if.sw_if_index, self.pg2.sw_if_index, state=0, is_l2=1)
self.verify_capture(pg0_pkts + pg1_pkts, pg2_pkts)
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)
| |
from consts import *
import websocket
import thread
import threading
import time
from time import sleep
import json
import hmac
import hashlib
class PyfinexWebsocket:
def __init__(self, debug = False):
self.debug = debug
if (self.debug):
websocket.enableTrace(True)
self.ws = websocket.WebSocketApp(BFX_WEBSOCKET_ADDRESS, on_message = self.__on_message, on_error = self.__on_error, on_close = self.__on_close)
self.ws.on_open = self.__on_open
self.wst = threading.Thread(target = self.ws.run_forever)
self.wst.daemon = True
self.wst.start()
# block main thread for at most 5 seconds or until websocket is connected
connection_timeout = 5
while not self.ws.sock.connected and connection_timeout:
sleep(1)
connection_timeout -= 1
def subscribe_book(self, callback):
print "subscribing to BTCUSD book"
self.ws.send(BOOK_SUBSCRIBE_STRING);
self.book_callback = callback
def subscribe_ticker(self, callback):
print "subscribing to BTCUSD ticker"
self.ws.send(TICKER_SUBSCRIBE_STRING);
self.ticker_callback = callback
def subscribe_trades(self, callback):
print "subscribing to BTCUSD trades"
self.ws.send(TRADES_SUBSCRIBE_STRING);
self.trades_callback = callback
def subscribe_private(self, api_key, api_secret, callbacks):
### JS authentication example from API docs ###
# var
# crypto = require('crypto'),
# api_key = 'API_KEY',
# api_secret = 'API_SECRET',
# payload = 'AUTH' + (new Date().getTime()),
# signature = crypto.createHmac("sha384", api_secret).update(payload).digest('hex');
# w.send(JSON.stringify({
# event: "auth",
# apiKey: api_key,
# authSig: signature,
# authPayload: payload
# }));
payload = "AUTH" + str(int(time.time() * 1000))
signature = hmac.new(api_secret, payload, hashlib.sha384).hexdigest()
auth_request = json.dumps({"event": "auth", "apiKey": api_key, "authSig": signature, "authPayload": payload})
if (self.debug):
print "API Key: " + api_key
print "Signature Payload: " + payload
print "Signature: " + signature
print auth_request
self.ws.send(auth_request)
def __update_book(self, update_object):
if (self.debug):
print update_object
if (hasattr(self, "book_callback")):
price = update_object[0]
count = update_object[1]
amount = update_object[2]
self.book_callback(price, count, amount, False)
def __parse_book_message(self, message_object):
if (len(message_object) > 1):
if (type(message_object[1]) is list):
book_updates = message_object[1]
if (hasattr(self, "book_callback")):
self.book_callback(None, None, None, True)
for update_object in book_updates:
self.__update_book(update_object)
else:
message_object.pop(0)
self.__update_book(message_object)
def __update_ticker(self, update_object):
if (self.debug):
print update_object
if (hasattr(self, "ticker_callback")):
bid = update_object[0]
bid_size = update_object[1]
ask = update_object[2]
ask_size = update_object[3]
daily_change = update_object[4]
daily_change_percentage = update_object[5]
last_price = update_object[6]
volume = update_object[7]
high = update_object[8]
low = update_object[9]
self.ticker_callback(bid, bid_size, ask, ask_size, daily_change, daily_change_percentage, last_price, volume, high, low)
def __parse_ticker_message(self, message_object):
message_object.pop(0)
if (len(message_object) == 10):
self.high = message_object[8]
self.low = message_object[9]
self.__update_ticker(message_object)
elif (len(message_object) == 8):
last_price = message_object[6]
if (hasattr(self, "high")):
if (last_price > self.high):
self.high = last_price
else:
self.high = last_price
if (hasattr(self, "low")):
if (last_price < self.low):
self.low = last_price
else:
self.low = last_price
message_object.append(self.high)
message_object.append(self.low)
self.__update_ticker(message_object)
def __update_trades(self, update_object):
if (self.debug):
print update_object
if (hasattr(self, "trades_callback")):
sequence_id = update_object[0]
timestamp = update_object[1]
price = update_object[2]
amount = update_object[3]
self.trades_callback(sequence_id, timestamp, price, amount)
def __parse_trades_message(self, message_object):
if (len(message_object) > 1):
if (type(message_object[1]) is list):
trade_updates = message_object[1]
for update_object in trade_updates:
self.__update_trades(update_object)
else:
message_object.pop(0)
self.__update_trades(message_object)
def __update_private_wallet(self, update_object):
if (self.debug):
print update_object
if (hasattr(self, "private_wallet_callback")):
name = update_object[0]
currency = update_object[1]
balance = update_object[2]
unsettled_interest = update_object[3]
self.private_wallet_callback(name, currency, balance, unsettled_interest)
def __update_private_position(self, update_object):
if (self.debug):
print update_object
if (hasattr(self, "private_position_callback")):
pair = update_object[0]
status = update_object[1]
amount = update_object[2]
base_price = update_object[3]
margin_funding = update_object[4]
margin_funding_type = update_object[5]
self.private_position_callback(pair, status, amount, base_price, margin_funding, margin_funding_type)
def __update_private_order(self, update_object):
if (self.debug):
print update_object
if (hasattr(self, "private_order_callback")):
order_id = update_object[0]
pair = update_object[1]
amount = update_object[2]
original_amount = update_object[3]
order_type = update_object[4]
status = update_object[5]
price = update_object[6]
price_average = update_object[7]
created_at = update_object[8]
notify = update_object[9]
hidden = update_object[10]
self.private_order_callback(order_id, pair, amount, original_amount, order_type, status, price, price_average, created_at, notify, hidden)
def __parse_private_message(self, message_object):
print message_object
message_object.pop(0)
message_type = message_object.pop(0)
if (message_type == "ws"):
for update_object in message_object[0]:
self.__update_private_wallet(update_object)
elif (message_type == "wu"):
update_object = message_object[0]
self.__update_private_wallet(update_object)
elif (message_type == "ps"):
for update_object in message_object[0]:
self.__update_private_position(update_object)
elif (message_type == "pn" or message_type == "pu" or message_type == "pc"):
update_object = message_object[0]
self.__update_private_position(update_object)
elif (message_type == "os"):
for update_object in message_object[0]:
self.__update_private_order(update_object)
elif (message_type == "on" or message_type == "ou" or message_type == "oc"):
update_object = message_object[0]
self.__update_private_order(update_object)
elif (message_type == "ts"):
pass
elif (message_type == "te"):
pass
elif (message_type == "tu"):
pass
def __on_message(self, ws, message):
obj = json.loads(message);
if (type(obj) is dict):
if KEY_EVENT in obj.keys():
if (obj[KEY_EVENT] == EVENT_SUBSCRIBED):
channel = obj[KEY_CHANNEL_NAME]
if (channel == "book"):
self.book_channel_id = obj[KEY_CHANNEL_ID];
print "subscribed to the orderbook"
elif (channel == "ticker"):
self.ticker_channel_id = obj[KEY_CHANNEL_ID];
print "subscribed to the ticker"
elif (channel == "trades"):
self.trades_channel_id = obj[KEY_CHANNEL_ID];
print "subscribed to the trades"
elif (obj[KEY_EVENT] == "auth"):
status = obj["status"]
if (status == "OK"):
# should always be channel id 0
self.private_channel_id = obj[KEY_CHANNEL_ID]
elif (status == "FAIL"):
# should throw exception here
print "Error: " + obj["code"]
elif (type(obj) is list):
if (len(obj) > 0):
channel_id = obj[0]
if (hasattr(self, "book_channel_id")):
if (channel_id == self.book_channel_id):
self.__parse_book_message(obj)
if (hasattr(self, "ticker_channel_id")):
if (channel_id == self.ticker_channel_id):
self.__parse_ticker_message(obj)
if (hasattr(self, "trades_channel_id")):
if (channel_id == self.trades_channel_id):
self.__parse_trades_message(obj)
if (hasattr(self, "private_channel_id")):
if (channel_id == self.private_channel_id):
self.__parse_private_message(obj)
def __on_error(self, ws, error):
print error
def __on_close(self, ws):
print "### closed connection to " + BFX_WEBSOCKET_ADDRESS + " ###"
def __on_open(self, ws):
print "### opened connection to " + BFX_WEBSOCKET_ADDRESS + " ###"
| |
import fnmatch
import os
import re
import logging
import time
from sys import platform as _platform
from django.http import HttpResponse
from django.http import JsonResponse
from django.views.generic import TemplateView
from django.db.models.expressions import F, RawSQL
from django.db.models import Avg, FloatField, Max, Min, Sum
from controller.models import TestRunning, TestRunningData
from analyzer.models import Project
from django.shortcuts import render
logger = logging.getLogger(__name__)
MONITORING_DIR = ""
# Create your views here.
if _platform == "linux" or _platform == "linux2":
MONITORING_DIRS = ["/var/lib/jenkins/jobs/", "/tmp/jltc/"]
elif _platform == "win32":
MONITORING_DIRS = ["C:\work\monitoring"]
def tests_list(request):
data = []
for MONITORING_DIR in MONITORING_DIRS:
for root, dirs, files in os.walk(MONITORING_DIR):
if "workspace" in root or "results" in root:
for f in fnmatch.filter(files, '*.jtl'):
if os.stat(os.path.join(root, f)).st_size > 0:
result_file_dest = os.path.join(root, f)
if not TestRunning.objects.filter(
result_file_dest=result_file_dest).exists():
#project_name = 'work\reportdata\FoE'
# will be gone soon:
project_name = re.search('/([^/]+)/workspace', root).group(1)
p = Project.objects.get(project_name=project_name)
logger.debug(
"Adding new running test to database: {}")
t = TestRunning(
result_file_dest=result_file_dest,
#project_id=272,
project_id=p.id,
is_running=True,
start_time=int(
time.time() * 1000) # REMOVE IT SOON
)
t.save()
t_id = t.id
else:
t = TestRunning.objects.get(
result_file_dest=result_file_dest)
t_id = t.id
# delete old tests from list
for test_running in list(TestRunning.objects.values()):
result_file_dest = test_running["result_file_dest"]
if not os.path.exists(result_file_dest):
logger.debug("Remove running test from database: {}".format(
result_file_dest))
test_running = TestRunning.objects.get(result_file_dest=result_file_dest)
TestRunningData.objects.filter(
test_running_id=test_running.id).delete()
test_running.delete()
else:
data.append({
"id":
test_running["id"],
"result_file_dest":
result_file_dest,
"project_name":
Project.objects.get(
id=test_running['project_id']).project_name,
})
return JsonResponse(data, safe=False)
def online_test_success_rate(request, test_running_id):
test_running = TestRunning.objects.get(id=test_running_id)
if test_running == None:
response = {
"message": {
"text": "Running test with this id does not exists",
"type": "danger",
"msg_params": {
"test_running_id": test_running_id
}
}
}
return JsonResponse(response, safe=False)
else:
data = TestRunningData.objects.filter(name='data_over_time', test_running_id=test_running_id). \
annotate(errors=RawSQL("((data->>%s)::numeric)", ('errors',))). \
annotate(count=RawSQL("((data->>%s)::numeric)", ('count',))). \
aggregate(count_sum=Sum(F('count'), output_field=FloatField()),
errors_sum=Sum(F('errors'), output_field=FloatField()))
errors_percentage = round(data['errors_sum'] * 100 / data['count_sum'],
1)
response = [{
"fail_%": errors_percentage,
"success_%": 100 - errors_percentage
}]
return JsonResponse(response, safe=False)
def online_test_response_codes(request, test_running_id):
test_running = TestRunning.objects.get(id=test_running_id)
if test_running == None:
response = {
"message": {
"text": "Running test with this id does not exists",
"type": "danger",
"msg_params": {
"test_running_id": test_running_id
}
}
}
return JsonResponse(response, safe=False)
else:
response = []
test_response_codes = TestRunningData.objects.get(
name='response_codes', test_running_id=test_running_id).data
for k in test_response_codes:
response.append({
'response_code': k,
'count': test_response_codes.get(k)['count'],
})
return JsonResponse(response, safe=False)
def online_test_aggregate(request, test_running_id):
aggregate = get_test_running_aggregate(test_running_id)
return render(request, 'online_aggregate_table.html', {
'aggregate_table': aggregate,
})
def get_test_running_aggregate(test_running_id):
test_running = TestRunning.objects.get(id=test_running_id)
if test_running == None:
response = {
"message": {
"text": "Running test with this id does not exists",
"type": "danger",
"msg_params": {
"test_running_id": test_running_id
}
}
}
return JsonResponse(response, safe=False)
else:
test_running_aggregate = TestRunningData.objects.get(
name='aggregate_table', test_running_id=test_running_id).data
return test_running_aggregate
def online_test_rps(request, test_running_id):
test_running = TestRunning.objects.get(id=test_running_id)
if test_running == None:
response = {
"message": {
"text": "Running test with this id does not exists",
"type": "danger",
"msg_params": {
"test_running_id": test_running_id
}
}
}
return JsonResponse(response, safe=False)
else:
data = list(TestRunningData.objects.filter(name='data_over_time', test_running_id=test_running_id). \
annotate(count=RawSQL("((data->>%s)::numeric)", ('count',))).order_by('-id').values('count'))
response = [{
"rps": int(data[1]['count']/60),
}]
return JsonResponse(response, safe=False)
def online_test_rtot(request, test_running_id):
test_running = TestRunning.objects.get(id=test_running_id)
if test_running == None:
response = {
"message": {
"text": "Running test with this id does not exists",
"type": "danger",
"msg_params": {
"test_running_id": test_running_id
}
}
}
return JsonResponse(response, safe=False)
else:
response = []
test_running_data_over_time = list(
TestRunningData.objects.filter(
name='data_over_time', test_running_id=test_running_id).values(
'data'))
for d in test_running_data_over_time:
response.append({
'time': d['data']['timestamp'][:19],
'rps': d['data']['count'] / 60,
'avg': d['data']['avg'],
'errors': d['data']['errors'],
})
return JsonResponse(response, safe=False)
def update(request, test_running_id):
test_running = TestRunning.objects.get(id=test_running_id)
response = {}
if test_running == None:
response = {
"message": {
"text": "Running test with this id does not exists",
"type": "danger",
"msg_params": {
"test_running_id": test_running_id
}
}
}
else:
test_running.update_data_frame()
response = {
"message": {
"text": "Running test data was updated",
"type": "success",
"msg_params": {
"test_running_id": test_running_id
}
}
}
return JsonResponse(response, safe=False)
class OnlinePage(TemplateView):
def get(self, request, **kwargs):
return render(request, 'online_page.html', context=None)
| |
# The MIT License (MIT)
#
# Copyright (c) 2016 Adafruit Industries
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import time
from kervi.hal import I2CSensorDeviceDriver
TCS34725_ADDRESS = 0x29
TCS34725_ID = 0x12 # 0x44 = TCS34721/TCS34725, 0x4D = TCS34723/TCS34727
TCS34725_COMMAND_BIT = 0x80
TCS34725_ENABLE = 0x00
TCS34725_ENABLE_AIEN = 0x10 # RGBC Interrupt Enable
TCS34725_ENABLE_WEN = 0x08 # Wait enable - Writing 1 activates the wait timer
TCS34725_ENABLE_AEN = 0x02 # RGBC Enable - Writing 1 actives the ADC, 0 disables it
TCS34725_ENABLE_PON = 0x01 # Power on - Writing 1 activates the internal oscillator, 0 disables it
TCS34725_ATIME = 0x01 # Integration time
TCS34725_WTIME = 0x03 # Wait time (if TCS34725_ENABLE_WEN is asserted)
TCS34725_WTIME_2_4MS = 0xFF # WLONG0 = 2.4ms WLONG1 = 0.029s
TCS34725_WTIME_204MS = 0xAB # WLONG0 = 204ms WLONG1 = 2.45s
TCS34725_WTIME_614MS = 0x00 # WLONG0 = 614ms WLONG1 = 7.4s
TCS34725_AILTL = 0x04 # Clear channel lower interrupt threshold
TCS34725_AILTH = 0x05
TCS34725_AIHTL = 0x06 # Clear channel upper interrupt threshold
TCS34725_AIHTH = 0x07
TCS34725_PERS = 0x0C # Persistence register - basic SW filtering mechanism for interrupts
TCS34725_PERS_NONE = 0b0000 # Every RGBC cycle generates an interrupt
TCS34725_PERS_1_CYCLE = 0b0001 # 1 clean channel value outside threshold range generates an interrupt
TCS34725_PERS_2_CYCLE = 0b0010 # 2 clean channel values outside threshold range generates an interrupt
TCS34725_PERS_3_CYCLE = 0b0011 # 3 clean channel values outside threshold range generates an interrupt
TCS34725_PERS_5_CYCLE = 0b0100 # 5 clean channel values outside threshold range generates an interrupt
TCS34725_PERS_10_CYCLE = 0b0101 # 10 clean channel values outside threshold range generates an interrupt
TCS34725_PERS_15_CYCLE = 0b0110 # 15 clean channel values outside threshold range generates an interrupt
TCS34725_PERS_20_CYCLE = 0b0111 # 20 clean channel values outside threshold range generates an interrupt
TCS34725_PERS_25_CYCLE = 0b1000 # 25 clean channel values outside threshold range generates an interrupt
TCS34725_PERS_30_CYCLE = 0b1001 # 30 clean channel values outside threshold range generates an interrupt
TCS34725_PERS_35_CYCLE = 0b1010 # 35 clean channel values outside threshold range generates an interrupt
TCS34725_PERS_40_CYCLE = 0b1011 # 40 clean channel values outside threshold range generates an interrupt
TCS34725_PERS_45_CYCLE = 0b1100 # 45 clean channel values outside threshold range generates an interrupt
TCS34725_PERS_50_CYCLE = 0b1101 # 50 clean channel values outside threshold range generates an interrupt
TCS34725_PERS_55_CYCLE = 0b1110 # 55 clean channel values outside threshold range generates an interrupt
TCS34725_PERS_60_CYCLE = 0b1111 # 60 clean channel values outside threshold range generates an interrupt
TCS34725_CONFIG = 0x0D
TCS34725_CONFIG_WLONG = 0x02 # Choose between short and long (12x) wait times via TCS34725_WTIME
TCS34725_CONTROL = 0x0F # Set the gain level for the sensor
TCS34725_ID = 0x12 # 0x44 = TCS34721/TCS34725, 0x4D = TCS34723/TCS34727
TCS34725_STATUS = 0x13
TCS34725_STATUS_AINT = 0x10 # RGBC Clean channel interrupt
TCS34725_STATUS_AVALID = 0x01 # Indicates that the RGBC channels have completed an integration cycle
TCS34725_CDATAL = 0x14 # Clear channel data
TCS34725_CDATAH = 0x15
TCS34725_RDATAL = 0x16 # Red channel data
TCS34725_RDATAH = 0x17
TCS34725_GDATAL = 0x18 # Green channel data
TCS34725_GDATAH = 0x19
TCS34725_BDATAL = 0x1A # Blue channel data
TCS34725_BDATAH = 0x1B
TCS34725_INTEGRATIONTIME_2_4MS = 0xFF # 2.4ms - 1 cycle - Max Count: 1024
TCS34725_INTEGRATIONTIME_24MS = 0xF6 # 24ms - 10 cycles - Max Count: 10240
TCS34725_INTEGRATIONTIME_50MS = 0xEB # 50ms - 20 cycles - Max Count: 20480
TCS34725_INTEGRATIONTIME_101MS = 0xD5 # 101ms - 42 cycles - Max Count: 43008
TCS34725_INTEGRATIONTIME_154MS = 0xC0 # 154ms - 64 cycles - Max Count: 65535
TCS34725_INTEGRATIONTIME_700MS = 0x00 # 700ms - 256 cycles - Max Count: 65535
TCS34725_GAIN_1X = 0x00 # No gain
TCS34725_GAIN_4X = 0x01 # 2x gain
TCS34725_GAIN_16X = 0x02 # 16x gain
TCS34725_GAIN_60X = 0x03 # 60x gain
# Lookup table for integration time delays.
INTEGRATION_TIME_DELAY = {
0xFF: 0.0024, # 2.4ms - 1 cycle - Max Count: 1024
0xF6: 0.024, # 24ms - 10 cycles - Max Count: 10240
0xEB: 0.050, # 50ms - 20 cycles - Max Count: 20480
0xD5: 0.101, # 101ms - 42 cycles - Max Count: 43008
0xC0: 0.154, # 154ms - 64 cycles - Max Count: 65535
0x00: 0.700 # 700ms - 256 cycles - Max Count: 65535
}
# Utility methods:
def calculate_color_temperature(r, g, b):
"""Converts the raw R/G/B values to color temperature in degrees Kelvin."""
# 1. Map RGB values to their XYZ counterparts.
# Based on 6500K fluorescent, 3000K fluorescent
# and 60W incandescent values for a wide range.
# Note: Y = Illuminance or lux
X = (-0.14282 * r) + (1.54924 * g) + (-0.95641 * b)
Y = (-0.32466 * r) + (1.57837 * g) + (-0.73191 * b)
Z = (-0.68202 * r) + (0.77073 * g) + ( 0.56332 * b)
# Check for divide by 0 (total darkness) and return None.
if (X + Y + Z) == 0:
return None
# 2. Calculate the chromaticity co-ordinates
xc = (X) / (X + Y + Z)
yc = (Y) / (X + Y + Z)
# Check for divide by 0 again and return None.
if (0.1858 - yc) == 0:
return None
# 3. Use McCamy's formula to determine the CCT
n = (xc - 0.3320) / (0.1858 - yc)
# Calculate the final CCT
cct = (449.0 * (n ** 3.0)) + (3525.0 *(n ** 2.0)) + (6823.3 * n) + 5520.33
return int(cct)
def calculate_lux(r, g, b):
"""Converts the raw R/G/B values to luminosity in lux."""
illuminance = (-0.32466 * r) + (1.57837 * g) + (-0.73191 * b)
return int(illuminance)
class TCS34725DeviceDriver(I2CSensorDeviceDriver):
"""TCS34725 color sensor."""
def __init__(self, integration_time=TCS34725_INTEGRATIONTIME_2_4MS,
gain=TCS34725_GAIN_4X, address=TCS34725_ADDRESS, bus=None, **kwargs):
"""Initialize the TCS34725 sensor."""
# Setup I2C interface for the device.
I2CSensorDeviceDriver.__init__(self, address, bus)
chip_id = self._readU8(TCS34725_ID)
if chip_id != 0x44:
raise RuntimeError('Failed to read TCS34725 chip ID, check your wiring.')
# Set default integration time and gain.
self.set_integration_time(integration_time)
self.set_gain(gain)
# Enable the device (by default, the device is in power down mode on bootup).
self.enable()
@property
def dimensions(self):
return 1
@property
def dimension_labels(self):
return ["r","g", "b"]
@property
def type(self):
return "color"
@property
def value_type(self):
return "color"
@property
def unit(self):
return ""
@property
def max(self):
return 0xffff
@property
def min(self):
return 0
def _valid(self):
# Check if the status bit is set and the chip is ready.
return bool(self.i2c.read_U8(TCS34725_STATUS) & 0x01)
def _readU8(self, reg):
"""Read an unsigned 8-bit register."""
return self.i2c.read_U8(TCS34725_COMMAND_BIT | reg)
def _readU16LE(self, reg):
"""Read a 16-bit little endian register."""
return self.i2c.read_U16LE(TCS34725_COMMAND_BIT | reg)
def _write8(self, reg, value):
"""Write a 8-bit value to a register."""
self.i2c.write8(TCS34725_COMMAND_BIT | reg, value)
def enable(self):
"""Enable the chip."""
# Flip on the power and enable bits.
self._write8(TCS34725_ENABLE, TCS34725_ENABLE_PON)
time.sleep(0.01)
self._write8(TCS34725_ENABLE, (TCS34725_ENABLE_PON | TCS34725_ENABLE_AEN))
def disable(self):
"""Disable the chip (power down)."""
# Flip off the power on and enable bits.
reg = self._readU8(TCS34725_ENABLE)
reg &= ~(TCS34725_ENABLE_PON | TCS34725_ENABLE_AEN)
self._write8(TCS34725_ENABLE, reg)
def set_integration_time(self, integration_time):
"""Sets the integration time for the TC34725. Provide one of these
constants:
- TCS34725_INTEGRATIONTIME_2_4MS = 2.4ms - 1 cycle - Max Count: 1024
- TCS34725_INTEGRATIONTIME_24MS = 24ms - 10 cycles - Max Count: 10240
- TCS34725_INTEGRATIONTIME_50MS = 50ms - 20 cycles - Max Count: 20480
- TCS34725_INTEGRATIONTIME_101MS = 101ms - 42 cycles - Max Count: 43008
- TCS34725_INTEGRATIONTIME_154MS = 154ms - 64 cycles - Max Count: 65535
- TCS34725_INTEGRATIONTIME_700MS = 700ms - 256 cycles - Max Count: 65535
"""
self._integration_time = integration_time
self._write8(TCS34725_ATIME, integration_time)
def get_integration_time(self):
"""Return the current integration time value. This will be one of the
constants specified in the set_integration_time doc string.
"""
return self._readU8(TCS34725_ATIME)
def set_gain(self, gain):
"""Adjusts the gain on the TCS34725 (adjusts the sensitivity to light).
Use one of the following constants:
- TCS34725_GAIN_1X = No gain
- TCS34725_GAIN_4X = 2x gain
- TCS34725_GAIN_16X = 16x gain
- TCS34725_GAIN_60X = 60x gain
"""
self._write8(TCS34725_CONTROL, gain)
def get_gain(self):
"""Return the current gain value. This will be one of the constants
specified in the set_gain doc string.
"""
return self._readU8(TCS34725_CONTROL)
def read_value(self):
"""Reads the raw red, green, blue and clear channel values. Will return
a 4-tuple with the red, green, blue, clear color values (unsigned 16-bit
numbers).
"""
while not self._valid():
time.sleep((self._integration_time + 0.9)/1000.0)
# Read each color register.
r = self._readU16LE(TCS34725_RDATAL)
g = self._readU16LE(TCS34725_GDATAL)
b = self._readU16LE(TCS34725_BDATAL)
c = self._readU16LE(TCS34725_CDATAL)
# Delay for the integration time to allow for next reading immediately.
red = int(pow((int((r/c) * 256) / 255), 2.5) * 255)
green = int(pow((int((g/c) * 256) / 255), 2.5) * 255)
blue = int(pow((int((b/c) * 256) / 255), 2.5) * 255)
return [r, g, b]
def set_interrupt(self, enabled):
"""Enable or disable interrupts by setting enabled to True or False."""
enable_reg = self._readU8(TCS34725_ENABLE)
if enabled:
enable_reg |= TCS34725_ENABLE_AIEN
else:
enable_reg &= ~TCS34725_ENABLE_AIEN
self._write8(TCS34725_ENABLE, enable_reg)
time.sleep(1)
def clear_interrupt(self):
"""Clear interrupt."""
self.i2c.write8(0x66 & 0xff)
def set_interrupt_limits(self, low, high):
"""Set the interrupt limits to provied unsigned 16-bit threshold values.
"""
self.i2c.write8(0x04, low & 0xFF)
self.i2c.write8(0x05, low >> 8)
self.i2c.write8(0x06, high & 0xFF)
self.i2c.write8(0x07, high >> 8)
| |
"""slightly modified from http://deeplearning.net/tutorial/code/imdb.py"""
from __future__ import print_function
from six.moves import xrange
import six.moves.cPickle as pickle
import csv
import gzip
import os
import numpy
def NextMiniBatch(x, labels, idx, batch_size):
if (idx + 1) * batch_size > x.shape[0] or idx < 0:
return None, None
return x[idx * batch_size:(idx + 1) * batch_size, :], labels[
idx * batch_size:(idx + 1) * batch_size]
def prepare_data(seqs, labels, maxlen=None):
"""Create the matrices from the datasets.
This pad each sequence to the same lenght: the lenght of the
longuest sequence or maxlen.
if maxlen is set, we will cut all sequence to this maximum
lenght.
This swap the axis!
"""
# x: a list of sentences
lengths = [len(s) for s in seqs]
if maxlen is not None:
new_seqs = []
new_labels = []
new_lengths = []
for l, s, y in zip(lengths, seqs, labels):
if l <= maxlen:
new_seqs.append(s)
new_labels.append(y)
new_lengths.append(l)
lengths = new_lengths
labels = new_labels
seqs = new_seqs
if len(lengths) < 1:
return None, None, None
n_samples = len(seqs)
maxlen = maxlen or numpy.max(lengths)
x = numpy.zeros((n_samples, maxlen)).astype('float32')
labels_arr = numpy.zeros((n_samples,)).astype('float32')
for idx, s in enumerate(seqs):
x[idx, -(lengths[idx]):] = s
for idx, l in enumerate(labels):
labels_arr[idx] = l
return x, labels_arr
def get_dataset_file(dataset, default_dataset, origin):
'''Look for it as if it was a full path, if not, try local file,
if not try in the data directory.
Download dataset if it is not present
'''
data_dir, data_file = os.path.split(dataset)
if data_dir == "" and not os.path.isfile(dataset):
# Check if dataset is in the data directory.
new_path = os.path.join(os.path.split(__file__)[0], "..", "data", dataset)
if os.path.isfile(new_path) or data_file == default_dataset:
dataset = new_path
if (not os.path.isfile(dataset)) and data_file == default_dataset:
from six.moves import urllib
print('Downloading data from %s' % origin)
urllib.request.urlretrieve(origin, dataset)
return dataset
def load_data(path="imdb.pkl",
n_words=100000,
valid_portion=0.1,
maxlen=None,
sort_by_len=True):
'''Loads the dataset
:type path: String
:param path: The path to the dataset (here IMDB)
:type n_words: int
:param n_words: The number of word to keep in the vocabulary.
All extra words are set to unknow (1).
:type valid_portion: float
:param valid_portion: The proportion of the full train set used for
the validation set.
:type maxlen: None or positive int
:param maxlen: the max sequence length we use in the train/valid set.
:type sort_by_len: bool
:name sort_by_len: Sort by the sequence lenght for the train,
valid and test set. This allow faster execution as it cause
less padding per minibatch. Another mechanism must be used to
shuffle the train set at each epoch.
'''
#############
# LOAD DATA #
#############
# Load the dataset
path = get_dataset_file(
path, "imdb.pkl", "http://www.iro.umontreal.ca/~lisa/deep/data/imdb.pkl")
if path.endswith(".gz"):
f = gzip.open(path, 'rb')
else:
f = open(path, 'rb')
train_set = pickle.load(f)
test_set = pickle.load(f)
f.close()
if maxlen:
new_train_set_x = []
new_train_set_y = []
for x, y in zip(train_set[0], train_set[1]):
if len(x) < maxlen:
new_train_set_x.append(x)
new_train_set_y.append(y)
train_set = (new_train_set_x, new_train_set_y)
del new_train_set_x, new_train_set_y
# split training set into validation set
train_set_x, train_set_y = train_set
n_samples = len(train_set_x)
sidx = numpy.random.permutation(n_samples)
n_train = int(numpy.round(n_samples * (1. - valid_portion)))
valid_set_x = [train_set_x[s] for s in sidx[n_train:]]
valid_set_y = [train_set_y[s] for s in sidx[n_train:]]
train_set_x = [train_set_x[s] for s in sidx[:n_train]]
train_set_y = [train_set_y[s] for s in sidx[:n_train]]
train_set = (train_set_x, train_set_y)
valid_set = (valid_set_x, valid_set_y)
def remove_unk(x):
return [[1 if w >= n_words else w for w in sen] for sen in x]
test_set_x, test_set_y = test_set
valid_set_x, valid_set_y = valid_set
train_set_x, train_set_y = train_set
train_set_x = remove_unk(train_set_x)
valid_set_x = remove_unk(valid_set_x)
test_set_x = remove_unk(test_set_x)
def len_argsort(seq):
return sorted(range(len(seq)), key=lambda x: len(seq[x]))
if sort_by_len:
sorted_index = len_argsort(test_set_x)
test_set_x = [test_set_x[i] for i in sorted_index]
test_set_y = [test_set_y[i] for i in sorted_index]
sorted_index = len_argsort(valid_set_x)
valid_set_x = [valid_set_x[i] for i in sorted_index]
valid_set_y = [valid_set_y[i] for i in sorted_index]
sorted_index = len_argsort(train_set_x)
train_set_x = [train_set_x[i] for i in sorted_index]
train_set_y = [train_set_y[i] for i in sorted_index]
train = (train_set_x, train_set_y)
valid = (valid_set_x, valid_set_y)
test = (test_set_x, test_set_y)
return train, valid, test
def load_dictionary(path='../data/imdb.dict.pkl'):
# TODO(wenjie): download this file from 'http://www.iro.umontreal.ca/~lisa/deep/data/imdb.dict.pkl.gz'
f = open(path, 'r')
dictionary = pickle.load(f)
f.close()
return dictionary
def load_dictionary_key_idx(path='../data/imdb.dict.pkl'):
# TODO(wenjie): download this file from 'http://www.iro.umontreal.ca/~lisa/deep/data/imdb.dict.pkl.gz'
f = open(path, 'r')
dictionary = pickle.load(f)
f.close()
result = {v: k for k, v in dictionary.items()}
return result
def PrintSequence(dictionary, idx_seq):
return ' '.join([dictionary[int(idx)] if idx not in [0, 1] else ''
for idx in idx_seq])
def PrintSequenceSeq(dictionary, idx_seq_seq):
return '\n'.join([PrintSequence(dictionary, idx_seq) for idx_seq in
idx_seq_seq])
def WriteSequenceToCSV(path, repeat, dictionary, idx_seq):
seq = [dictionary[int(idx)] if idx not in [0, 1] else '' for idx in idx_seq]
seq_seq = [seq for i in range(repeat)]
with open(path, 'w') as f:
writer = csv.writer(f)
writer.writerows(seq_seq)
| |
import time
import logging
import struct
import socket
from mesos.interface.mesos_pb2 import TASK_LOST, MasterInfo
from .messages_pb2 import (
RegisterFrameworkMessage, ReregisterFrameworkMessage,
DeactivateFrameworkMessage, UnregisterFrameworkMessage,
ResourceRequestMessage, ReviveOffersMessage, LaunchTasksMessage, KillTaskMessage,
StatusUpdate, StatusUpdateAcknowledgementMessage, FrameworkToExecutorMessage,
ReconcileTasksMessage
)
from .process import UPID, Process, async
logger = logging.getLogger(__name__)
class MesosSchedulerDriver(Process):
def __init__(self, sched, framework, master_uri):
Process.__init__(self, 'scheduler')
self.sched = sched
#self.executor_info = executor_info
self.master_uri = master_uri
self.framework = framework
self.framework.failover_timeout = 100
self.framework_id = framework.id
self.master = None
self.detector = None
self.connected = False
self.savedOffers = {}
self.savedSlavePids = {}
@async # called by detector
def onNewMasterDetectedMessage(self, data):
try:
info = MasterInfo()
info.ParseFromString(data)
ip = socket.inet_ntoa(struct.pack('<I', info.ip))
master = UPID('master@%s:%s' % (ip, info.port))
except:
master = UPID(data)
self.connected = False
self.register(master)
@async # called by detector
def onNoMasterDetectedMessage(self):
self.connected = False
self.master = None
def register(self, master):
if self.connected or self.aborted:
return
if master:
if not self.framework_id.value:
msg = RegisterFrameworkMessage()
msg.framework.MergeFrom(self.framework)
else:
msg = ReregisterFrameworkMessage()
msg.framework.MergeFrom(self.framework)
msg.failover = True
self.send(master, msg)
self.delay(2, lambda:self.register(master))
def onFrameworkRegisteredMessage(self, framework_id, master_info):
self.framework_id = framework_id
self.framework.id.MergeFrom(framework_id)
self.connected = True
self.master = UPID('master@%s:%s' % (socket.inet_ntoa(struct.pack('<I', master_info.ip)), master_info.port))
self.link(self.master, self.onDisconnected)
self.sched.registered(self, framework_id, master_info)
def onFrameworkReregisteredMessage(self, framework_id, master_info):
assert self.framework_id == framework_id
self.connected = True
self.master = UPID('master@%s:%s' % (socket.inet_ntoa(struct.pack('<I', master_info.ip)), master_info.port))
self.link(self.master, self.onDisconnected)
self.sched.reregistered(self, master_info)
def onDisconnected(self):
self.connected = False
logger.warning("disconnected from master")
self.delay(5, lambda:self.register(self.master))
def onResourceOffersMessage(self, offers, pids):
for offer, pid in zip(offers, pids):
self.savedOffers.setdefault(offer.id.value, {})[offer.slave_id.value] = UPID(pid)
self.sched.resourceOffers(self, list(offers))
def onRescindResourceOfferMessage(self, offer_id):
self.savedOffers.pop(offer_id.value, None)
self.sched.offerRescinded(self, offer_id)
def onStatusUpdateMessage(self, update, pid=''):
if self.sender.addr != self.master.addr:
logger.warning("ignore status update message from %s instead of leader %s", self.sender, self.master)
return
assert self.framework_id == update.framework_id
self.sched.statusUpdate(self, update.status)
if not self.aborted and self.sender.addr and pid:
reply = StatusUpdateAcknowledgementMessage()
reply.framework_id.MergeFrom(self.framework_id)
reply.slave_id.MergeFrom(update.slave_id)
reply.task_id.MergeFrom(update.status.task_id)
reply.uuid = update.uuid
try: self.send(self.master, reply)
except IOError: pass
def onLostSlaveMessage(self, slave_id):
self.sched.slaveLost(self, slave_id)
def onExecutorToFrameworkMessage(self, slave_id, framework_id, executor_id, data):
self.sched.frameworkMessage(self, executor_id, slave_id, data)
def onFrameworkErrorMessage(self, message, code=0):
self.sched.error(self, message)
def start(self):
Process.start(self)
uri = self.master_uri
if uri.startswith('zk://') or uri.startswith('zoo://'):
from .detector import MasterDetector
self.detector = MasterDetector(uri[uri.index('://') + 3:], self)
self.detector.start()
else:
if not ':' in uri:
uri += ':5050'
self.onNewMasterDetectedMessage('master@%s' % uri)
def abort(self):
if self.connected:
msg = DeactivateFrameworkMessage()
msg.framework_id.MergeFrom(self.framework_id)
self.send(self.master, msg)
Process.abort(self)
def stop(self, failover=False):
if self.connected and not failover:
msg = UnregisterFrameworkMessage()
msg.framework_id.MergeFrom(self.framework_id)
self.send(self.master, msg)
if self.detector:
self.detector.stop()
Process.stop(self)
@async
def requestResources(self, requests):
if not self.connected:
return
msg = ResourceRequestMessage()
msg.framework_id.MergeFrom(self.framework_id)
for req in requests:
msg.requests.add().MergeFrom(req)
self.send(self.master, msg)
@async
def reviveOffers(self):
if not self.connected:
return
msg = ReviveOffersMessage()
msg.framework_id.MergeFrom(self.framework_id)
self.send(self.master, msg)
@async
def reconcileTasks(self, statuses=None):
if not self.connected:
return
msg = ReconcileTasksMessage()
msg.framework_id.MergeFrom(self.framework_id)
if statuses is not None:
msg.statuses = statuses
self.send(self.master, msg)
def launchTasks(self, offer_id, tasks, filters):
if not self.connected or offer_id.value not in self.savedOffers:
for task in tasks:
update = StatusUpdate()
update.framework_id.MergeFrom(self.framework_id)
update.status.task_id.MergeFrom(task.task_id)
update.status.state = TASK_LOST
update.status.message = 'Master disconnected' if not self.connected else "invalid offer_id"
update.timestamp = time.time()
update.uuid = ''
self.onStatusUpdateMessage(update)
return
msg = LaunchTasksMessage()
msg.framework_id.MergeFrom(self.framework_id)
msg.offer_ids.add().MergeFrom(offer_id)
msg.filters.MergeFrom(filters)
for task in tasks:
msg.tasks.add().MergeFrom(task)
pid = self.savedOffers.get(offer_id.value, {}).get(task.slave_id.value)
if pid and task.slave_id.value not in self.savedSlavePids:
self.savedSlavePids[task.slave_id.value] = pid
self.savedOffers.pop(offer_id.value)
self.send(self.master, msg)
def declineOffer(self, offer_id, filters=None):
if not self.connected:
return
msg = LaunchTasksMessage()
msg.framework_id.MergeFrom(self.framework_id)
msg.offer_ids.add().MergeFrom(offer_id)
if filters:
msg.filters.MergeFrom(filters)
self.send(self.master, msg)
@async
def killTask(self, task_id):
if not self.connected:
return
msg = KillTaskMessage()
msg.framework_id.MergeFrom(self.framework_id)
msg.task_id.MergeFrom(task_id)
self.send(self.master, msg)
@async
def sendFrameworkMessage(self, executor_id, slave_id, data):
if not self.connected:
return
msg = FrameworkToExecutorMessage()
msg.framework_id.MergeFrom(self.framework_id)
msg.executor_id.MergeFrom(executor_id)
msg.slave_id.MergeFrom(slave_id)
msg.data = data
slave = self.savedSlavePids.get(slave_id.value, self.master) # can not send to slave directly
self.send(slave, msg)
| |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2011 - 2015 -- Lars Heuer <heuer[at]semagia.com>
# All rights reserved.
#
# License: BSD, see LICENSE.txt for more details.
#
"""\
This module provides some global constants.
:author: Lars Heuer (heuer[at]semagia.com)
:organization: Semagia - <http://www.semagia.com/>
:license: BSD license
"""
import re
# Min/max length of station identifiers
MIN_ORIGIN_LENGTH = len(u'ROME')
MAX_ORIGIN_LENGTH = len(u'BANDARSERIBEGAWAN')
# Reference kind constants
REF_KIND_UNKNOWN = 0
REF_KIND_CABLE = 1
REF_KIND_EMAIL = 2
REF_KIND_BOOK = 3
REF_KIND_TEL = 4
REF_KIND_REPORT = 5
REF_KIND_FAX = 6
REF_KIND_MEMO = 7
REF_KIND_MEETING = 8
REF_KIND_WEB = 9
# TAG constants
TAG_KIND_UNKNOWN = 0
TAG_KIND_SUBJECT = 1
TAG_KIND_PERSON = 2
TAG_KIND_PROGRAM = 3
TAG_KIND_GEO = 4
TAG_KIND_ORG = 5
# Min/max cable serial number length
MIN_SERIAL_LENGTH = 1
MAX_SERIAL_LENGTH = 7
# Valid station identifiers
_STATIONS = (
# A
u'ABIDJAN', u'ABUDHABI', u'ABUJA', u'ACCRA', u'ADDISABABA',
u'AITTAIPEI', u'ALGIERS', u'AMMAN', u'AMSTERDAM', u'ANKARA',
u'ASHGABAT', u'ASMARA', u'ASTANA', u'ASUNCION', u'ATHENS',
u'ADANA', u'ALMATY', u'APIA', u'AUCKLAND', u'ANTANANARIVO',
u'ALEXANDRIA',
# B
u'BAGHDAD', u'BAKU', u'BAMAKO', u'BANDARSERIBEGAWAN', u'BANGKOK',
u'BANJUL', u'BARCELONA', u'BASRAH', u'BEIJING', u'BEIRUT',
u'BELGRADE', u'BERLIN', u'BERN', u'BISHKEK', u'BOGOTA',
u'BRASILIA', u'BRATISLAVA', u'BRIDGETOWN', u'BRUSSELS', u'BUCHAREST',
u'BUDAPEST', u'BUENOSAIRES', u'BUJUMBURA', u'BRAZZAVILLE', u'BELIZE',
u'BELFAST', u'BELMOPAN', u'BONN', u'BANGUI',
u'BENIN', # found in cable refs
# C
u'CAIRO', u'CALCUTTA', u'CANBERRA', u'CAPETOWN', u'CARACAS',
u'CASABLANCA', u'CHENNAI', u'CHISINAU', u'CIUDADJUAREZ', u'COLOMBO',
u'CONAKRY', u'COPENHAGEN', u'CURACAO', u'CALGARY', u'CHIANGMAI',
u'CHENGDU', u'COTONOU', u'CDGENEVA', u'CHARLESTON',
u'CDCATLANTAGA', # found in cable refs "Centers for Disease Control and Prevention"
u'CDC', # found in cable refs
# D
u'DAKAR', u'DAMASCUS', u'DARESSALAAM', u'DHAKA', u'DJIBOUTI',
u'DOHA', u'DUBAI', u'DUBLIN', u'DUSHANBE', u'DHAHRAN', u'DILI',
u'DURBAN', u'DAMASCCUS', u'DUSSELDORF',
u'USDOJ', # found in cable refs
# F
u'FREETOWN', u'FUKUOKA', u'FSINFATC', u'FRANKFURT', u'FLORENCE', u'FESTTWO',
# G
u'GABORONE', u'GENEVA', u'GUATEMALA', u'GUADALAJARA', u'GUAYAQUIL',
u'GUANGZHOU', u'GEORGETOWN', u'GRENADA',
# H
u'HAMBURG', u'HANOI', u'HARARE', u'HAVANA', u'HAMILTON', u'HELSINKI', u'HERMOSILLO',
u'HALIFAX', u'HOCHIMINHCITY', u'HONGKONG', u'HILLAH', u'HYDERABAD',
# I
u'IRANRPODUBAI', u'ISLAMABAD', u'ISTANBUL', u'IZMIR',
# J
u'JEDDAH', u'JERUSALEM', u'JAKARTA', u'JOHANNESBURG',
# K
u'KABUL', u'KAMPALA', u'KATHMANDU', u'KHARTOUM', u'KIEV', u'KIGALI',
u'KINSHASA', u'KUALALUMPUR', u'KUWAIT', u'KYIV', u'KOLKATA', u'KINGSTON',
u'KARACHI', u'KRAKOW', u'KOLONIA', u'KIRKUK', u'KOROR', u'KADUNA',
# L
u'LAGOS', u'LAPAZ', u'LAHORE', u'LILONGWE', u'LIMA', u'LISBON', u'LJUBLJANA',
u'LONDON', u'LUANDA', u'LUXEMBOURG', u'LIBREVILLE', u'LUSAKA', u'LEIPZIG',
u'LENINGRAD', u'LOME',
# M
u'MALABO', u'MADRID', u'MANAGUA', u'MANAMA', u'MAPUTO', u'MBABANE', u'MEXICO',
u'MILAN', u'MINSK', u'MONROVIA', u'MONTERREY', u'MONTEVIDEO', u'MONTREAL',
u'MOSCOW', u'MUMBAI', u'MUNICH', u'MUSCAT', u'MELBOURNE', u'MANILA',
u'MATAMOROS', u'MASERU', u'MOGADISHU', u'MARSEILLE', u'MERIDA', u'MAJURO', u'MOSUL',
u'MADRAS',
u'MONTEREY', # Found in cable refs
# N
u'NAIROBI', u'NAPLES', u'NASSAU', u'NEWDELHI', u'NIAMEY', u'NICOSIA',
u'NDJAMENA', u'NAHA', u'NUEVOLAREDO', u'NAGOYA', u'NOUAKCHOTT', u'NOGALES',
# O
u'OSLO', u'OTTAWA', u'OUAGADOUGOU', u'OSAKAKOBE',
# P
u'PANAMA', u'PARAMARIBO', u'PARIS', u'PARTO', u'PESHAWAR',
u'PHNOMPENH', u'PORTAUPRINCE', u'PRAGUE', u'PRETORIA', u'PRISTINA',
u'PORTLOUIS', u'PORTOFSPAIN', u'PODGORICA', u'PORTMORESBY', u'PERTH',
u'PONTADELGADA', u'PRAIA',
u'PARISFR', # Used for US Mission UNESCO, see also UNESCOPARISFR
# Q
u'QUITO', u'QUEBEC',
# R
u'RABAT', u'RANGOON', u'RECIFE', u'REYKJAVIK', u'RIGA',
u'RIODEJANEIRO', u'RIYADH', u'ROME', u'RPODUBAI',
# S
u'SANAA', u'SANJOSE', u'SANSALVADOR', u'SANTIAGO', u'SANTODOMINGO',
u'SAOPAULO', u'SARAJEVO', u'SEOUL', u'SHANGHAI', u'SHENYANG', u'SINGAPORE',
u'SKOPJE', u'SOFIA', u'STATE', u'STOCKHOLM', u'STRASBOURG', u'STPETERSBURG',
u'SUVA', u'SAPPORO', u'SECDEF', u'SYDNEY', u'SURABAYA',
# T
u'TALLINN', u'TASHKENT', u'TAIPEI', u'TBILISI', u'TEGUCIGALPA', u'TEHRAN',
u'TELAVIV', u'THEHAGUE', u'TIJUANA', u'TOKYO', u'TRIPOLI', u'TUNIS',
u'TORONTO', u'THESSALONIKI', u'TIRANA',
# U
u'ULAANBAATAR', u'UNVIEVIENNA', u'USNATO', u'USUNNEWYORK', u'USEUBRUSSELS',
u'USOSCE', u'UNROME', u'USTRGENEVA',
u'USDAFAS', # Found in cable references and stands for "U.S. Department of Agriculture"
u'USDOC', # Found in REFerences and stands for "United States Department of Commerce"
u'USCBP', # Found in refs and stands for "U.S. Customs and Border Protection"
u'UNESCOPARISFR', # Same as PARISFR
u'UNESCOPARIS', # Same as PARISFR
# V
u'VATICAN', u'VIENNA', u'VILNIUS', u'VLADIVOSTOK', u'VALLETTA', u'VANCOUVER',
u'VIENTIANE',
# W
u'WARSAW', u'WELLINGTON', u'WINDHOEK', u'WASHDC',
u'WHITEHOUSE', # Found in cable refs
# Y
u'YAOUNDE', u'YEREVAN', u'YEKATERINBURG',
# Z
u'ZAGREB'
)
REFERENCE_ID_PATTERN = re.compile(r'^([0-9]{2})(%s)([0-9]{%d,%d})$' % ('|'.join(_STATIONS), MIN_SERIAL_LENGTH, MAX_SERIAL_LENGTH), re.UNICODE)
# Wrong WikiLeaks cable identifiers
# These cable identifiers are cables which exist in two versions: One with the
# correct cable identifier and one with the incorrect cable id.
# This dict maps the wrong cable id to the correct one
MALFORMED_CABLE_IDS = {
# Format:
# Invalid cable id: correct cable id
u'08SCTION02OF02SAOPAULO335': u'08SAOPAULO335',
u'07SETION02OF02BAKU1501': u'07BAKU1501',
u'08ECTION02OF02ATHENS959': u'08ATHENS959',
u'08SECTON01OF02BEIRUT896': u'08BEIRUT896',
u'07SOIA828': u'07SOFIA828',
u'06PORTOFPAIN568': u'06PORTOFSPAIN568',
u'07SECTION02OF03EIJING483': u'07BEIJING483',
u'09BRUSSLS1332': u'09BRUSSELS1332',
u'09NDJAENA423': u'09NDJAMENA423',
u'08IHARTOUM1126': u'08KHARTOUM1126',
u'10AQNA272': u'10ASTANA272',
u'09SQCTION02OF02DUSHANBE143': u'09DUSHANBE143',
u'09SIFIEDABUJA1673': u'09ABUJA1673',
u'08SECTIN03OF03KABUL3036': u'08KABUL3036',
u'09SECTIOQ1OF06HARARE876': u'09HARARE876',
u'09BAU339': u'09BAKU339',
u'08SECTION01GF02BISHIEK21': u'08BISHKEK1021',
u'06NDJAENA1382': u'06NDJAMENA1382',
u'08SANODOMINGO1611': u'08SANTODOMINGO1611',
u'09SECION02OF02NAIROBI417': u'09NAIROBI417',
u'06BRAILIA1079': u'06BRASILIA1079',
u'07POTAUPRINCE943': u'07PORTAUPRINCE943',
u'10EFTOKABUL668': u'10KABUL668',
u'07QXICO3307': u'07MEXICO3307',
u'09SECTION0QF05HANOI297': u'09HANOI297',
u'07ANILA1702': u'07MANILA1702',
u'08SECTIN01OF02BUDAPEST836': u'08BUDAPEST836',
u'09NSSAU504': u'09NASSAU504',
u'06ANOI582': u'06HANOI582',
u'08INSHASA1164': u'08KINSHASA1164',
u'09EFTORIYADH1110': u'09RIYADH1110',
u'08SECTIN02OF02PORTOFSPAIN546': u'08PORTOFSPAIN546',
u'07THEHAGE742': u'07THEHAGUE742',
u'09EFTOSANAA433': u'09SANAA433',
u'10SECION03OF08VIENNA176': u'10VIENNA176',
u'07GEORGETON514': u'07GEORGETOWN514',
u'09SECTION02F02BRUSSELS1639': u'09BRUSSELS1639',
u'092OF5': u'09STATE126780',
u'07EFTOSANAA2300': u'07SANAA2300',
u'08SECTON02OF02TIRANA398': u'08TIRANA398',
u'09SCTION08OF09NAIROBI809': u'09NAIROBI809',
u'08ECTION01OF02MANAMA492': u'08MANAMA492',
u'07BRASIIA1568': u'07BRASILIA1568',
u'09KINHASA1056': u'09KINSHASA1056',
u'09COPENHAEN13': u'09COPENHAGEN13',
u'08SECTIO01OF02JERUSALEM1847': u'08JERUSALEM1847',
u'09SECTION02OF03QRIPOLI583': u'09TRIPOLI583',
u'06MILSK1226': u'06MINSK1226',
u'06MAILA1222': u'06MANILA1222',
u'08SANTOOMINGO1959': u'08SANTODOMINGO1959',
u'06BELGADE856': u'06BELGRADE856',
u'07RUSSELS1548': u'07BRUSSELS1548',
u'09ECTION03OF05MANILA924': u'09MANILA924',
u'09SCTION02OF02PRAGUE383': u'09PRAGUE383',
u'06HONGKOG2054': u'06HONGKONG2054',
u'06MINSI225': u'06MINSK225',
u'06MADRID002583ZFR2585': u'06MADRID2583',
u'06LINSK1234': u'06MINSK1234',
u'06AITTAIPIE2654': u'06TAIPEI2654',
u'07OSLO000160ZDK164': u'07OSLO160',
u'06SARAEVO2307': u'06SARAJEVO2307',
u'08SCTION02OF04NAIROBI1373': u'08NAIROBI1373',
u'08SECTION01OF03CAIRO1416': u'08CAIRO1416',
u'08SECTION01OF04HARARE754': u'08HARARE754',
u'09NOUKKCHOTT514': u'09NOUAKCHOTT514',
u'09SECTON02OF02BRUSSELS579': u'09BRUSSELS579',
u'85NOFORNMOGADISHU1643': u'85MOGADISHU1643',
u'08SECTIO04OF07BEIJING3049': u'08BEIJING3049',
u'09SECTION01OF03BRUSSELS1234': u'09USEUBRUSSELS1234',
u'09TILISI1526': u'09TBILISI1526',
u'06MAAMA2067': u'06MANAMA2067',
u'09BRUSSQLS1235': u'09BRUSSELS1235',
u'09SECTIO01OF03BRUSSELS1363': u'09BRUSSELS1363',
u'09BEIRT96': u'09BEIRUT96',
u'08MADRID000762ZFR763': u'08MADRID762',
u'09BUSSELS1666': u'09BRUSSELS1666',
u'07ARAJEVO753': u'07SARAJEVO753',
u'08SECTINN02OF02JAKARTA1486': u'08JAKARTA1486',
u'08BEIJIG3760': u'08BEIJING3760',
u'06ASTANA': u'06ASTANA204', # 06ASTANA is the incomplete version of 06ASTANA204
}
# Wrong WikiLeaks cable identifiers w/o a valid equivalent
INVALID_CABLE_IDS = {
# Format:
# Invalid cable ID: Cable ID which would be correct
u'06EFTOBAKU1165': u'06BAKU1165',
u'09EFTOASMARA373': u'09ASMARA373',
u'09EFTOLONDON2468': u'09LONDON2468',
u'07EFTORABAT521': u'07RABAT521',
u'09AMEMBASSYHANOI1284': u'09HANOI1284',
u'06EFTOISLAMABAD17875': u'06ISLAMABAD17875',
u'09EFTOASMARA34': u'09ASMARA34',
u'08THEHAGU799': u'08THEHAGUE799',
u'08EFTOBUENOSAIRES648': u'08BUENOSAIRES648',
u'09EFTOLONDON2858': u'09LONDON2858',
u'08EFTOLONDON2883': u'08LONDON2883',
u'10EFTOLONDON16': u'10LONDON16',
u'07EFTOATHENS404': u'07ATHENS404',
u'09EFTOLONDON2618': u'09LONDON2618',
u'09GUATEMLA692': u'09GUATEMALA692',
u'08EFTOJAKARTA2073': u'08JAKARTA2073',
u'09AMEMBASSYHANOI1234': u'09HANOI1234',
u'06EFTOATHENS2950': u'06ATHENS2950',
u'10EFTOLONDON223': u'10LONDON223',
u'10EFTOLONDON224': u'10LONDON224',
u'06KINSHAA1386': u'06KINSHASA1386',
u'09EFTOUSUNNEWYORK584': u'09USUNNEWYORK584',
u'09EFTOLONDON2688': u'09LONDON2688',
u'07EFTOSANAA588': u'07SANAA588',
u'08EFTODAMASCUS487': u'08DAMASCUS487',
u'06EFTOSANAA1621': u'06SANAA1621',
u'06EFTOSANAA1996': u'06SANAA1996',
u'06SECTIO03OF03MINSK1128': u'06MINSK1128',
u'07EFTOBAGHDAD1098': u'07BAGHDAD1098',
u'07SECTON03OF04DAKAR269': u'07DAKAR269',
u'06EFTOPORTMORESBY194': u'06PORTMORESBY194',
u'09EFTOLONDON2884': u'09LONDON2884',
u'06EFTOPORTMORESBY197': u'06PORTMORESBY197',
u'09EFTOLONDON2187': u'09LONDON2187',
u'06EFTOBAKU1149': u'06BAKU1149',
u'07EFTOBUENOSAIRES1049': u'07BUENOSAIRES1049',
u'07EFTOATHENS781': u'07ATHENS781',
u'06EFTOKABUL5893': u'06KABUL5893',
u'07EFTOATHENS543': u'07ATHENS543',
u'09EFTOLONDON2240': u'09LONDON2240',
u'06EFTORABAT1713': u'06RABAT1713',
u'06EFTOBAKU1420': u'06BAKU1420',
u'07EFTOUSUNNEWYORK181': u'07USUNNEWYORK181',
u'10EFTOBANDARSERIBEGAWAN24': u'10BANDARSERIBEGAWAN24',
u'08FSCCHARLESTON1712': u'08CHARLESTON1712',
u'06SECTIKN01OF03MINSK1223': u'06MINSK1223',
u'06EFTOSKOPJE206': u'06SKOPJE206',
u'09EFTOYEREVAN678': u'09YEREVAN678',
u'07EFTOBAGHDAD867': u'07BAGHDAD867',
u'08EFTOUSUNNEWYORK457': u'08USUNNEWYORK457',
u'09EFTOYEREVAN677': u'09YEREVAN677',
u'09EFTOYEREVAN540': u'09YEREVAN540',
u'10EFTOKABUL597': u'10KABUL597',
u'07EFTOATHENS174': u'07ATHENS174',
u'09SECTION01OF03SANJOSE525': u'09SANJOSE525',
u'06EFTOPORTMORESBY364': u'06PORTMORESBY364',
u'07KAPALA518': u'07KAMPALA518',
u'07EFTORABAT171': u'07RABAT171',
u'08EFTOMONTEVIDEO541': u'08MONTEVIDEO541',
u'06EFTOANKARA5010': u'06ANKARA5010',
u'06EFTOCARACAS2252': u'06CARACAS2252',
u'08EFTOMONTEVIDEO718': u'08MONTEVIDEO718',
u'09BRUSELS1292': u'09USEUBRUSEELS1292',
u'09AMEMBASSYHANOI1246': u'09HANOI1246',
u'07EFTOLAPAZ1740': u'07LAPAZ1740',
u'07EFTOATHENS373': u'07ATHENS373',
u'09EFTOMONTEVIDEO137': u'09MONTEVIDEO137',
u'09EFTOYEREVAN559': u'09YEREVAN559',
u'06EFTOANKARA5097': u'06ANKARA5097',
u'06EFTOCARACAS943': u'06CARACAS943',
u'09EFTOLONDON2211': u'09LONDON2211',
u'09EFTOLONDON2521': u'09LONDON2521',
u'07SECTION01OF03ANKARA365': u'07ANKARA365',
u'06EFTOPORTMORESBY350': u'06PORTMORESBY350',
u'07EFTOATHENS298': u'07ATHENS298',
u'07EFTOATHENS299': u'07ATHENS299',
u'07EFTOSANAA784': u'07SANAA784',
u'06EFTOBAKU1204': u'06BAKU1204',
u'09AMEMBASSYHANOI1274': u'09HANOI1274',
u'08ACCRA001382SUSPECTEDDUPLICATE1392': u'08ACCRA1392',
u'06EFTOUSUNNEWYORK1560': u'06USUNNEWYORK1560',
u'09EFTOHELSINKI235': u'09HELSINKI235',
u'09EFTOTRIPOLI704': u'09TRIPOLI704',
u'06EFTOANKARA4972': u'06ANKARA4972',
u'06EFTOATHENS1738': u'06ATHENS1738',
u'06EFTOCAIRO6192': u'06CAIRO6192',
u'08EFTOPHNOMPENH416': u'08PHNOMPENH416',
u'07EFTOBAGHDAD1116': u'07BAGHDAD1116',
u'09EFTOLONDON2363': u'09LONDON2363',
u'07EFTORABAT264': u'07RABAT264',
u'09EFTOLONDON2239': u'09LONDON2239',
u'08BANGOK1382': u'08BANGKOK1382',
u'07EFTOOTTAWA1217': u'07OTTAWA1217',
u'09EFTOYEREVAN874': u'09YEREVAN874',
u'06EFTOMAPUTO981': u'06MAPUTO981',
u'07BUENOSQRES633': u'07BUENOSAIRES633',
u'06EFTORANGOON1092': u'06RANGOON1092',
u'09AMEMBASSYHANOI1290': u'09HANOI1290',
u'09AMEMBASSYHANOI1292': u'09HANOI1292',
u'06EFTOSKOPJE971': u'06SKOPJE971',
u'06EFTOBRUSSELS3952': u'06BRUSSELS3952',
u'06ATANANARIVO1320': u'06ANTANANARIVO1320',
u'09EFTOLONDON2905': u'09LONDON2905',
u'08AITTAIPIE1698': u'08AITTAIPEI1698',
u'06EFTOBAKU1453': u'06BAKU1453',
u'09AMEMBASSYHANOI909': u'09HANOI909',
u'09AMEMBASSYHANOI903': u'09HANOI903',
u'07DULIN903': u'07DUBLIN903',
u'0901OF02RPODUBAI288': u'09DUBAI288',
u'90STATE255577TOSEC140323': u'90STATE255577',
u'90STATE255723TOSEC140328': u'90STATE255723',
u'7575LIBREVILLE1895': u'75LIBREVILLE1895',
u'09AMEMBASSYHANOI911': u'09HANOI911',
u'08SECTION01F03BEIRUT1568': u'08BEIRUT1568',
u'08SECTION01OF02CHISINAU976': u'08CHISINAU976',
u'06SECION01OF02HARARE628': u'06HARARE628',
u'94STATE183691TOSEC140193': u'94STATE183691',
u'90STATE255291TOSEC140303': u'90STATE255291',
u'90STATE255195TOSEC140301': u'90STATE255195',
u'06SECTION01O02HONGKONG2311': u'06HONGKONG2311',
u'09AMEMBASSYHANOI917': u'09HANOI917',
u'09AMEMBASSYHANOI913': u'09HANOI913',
u'09SECTIO01OF02NDJAMENA530': u'09NDJAMENA530',
}
| |
import sys
from PyQt4 import QtCore, QtGui, uic
from random import *
import os
#import gaea
import globals
import repo
import commit
import remote
#import clint libraries
from clint.arguments import Args
from clint.textui import puts, colored, indent
gaeaDir = os.getcwd()
form_class = uic.loadUiType(os.path.join(gaeaDir, "git.ui"))[0] # Load the UI
projectDir = None
class InitPromptWindow(QtGui.QMainWindow):
parent = None
def __init__(self, parent):
QtGui.QMainWindow.__init__(self)
uic.loadUi(os.path.join(gaeaDir, 'initui.ui'),self)
self.setWindowTitle("Init Prompt")
self.resize(350,400)
self.move(500, 500)
self.but.clicked.connect(self.init)
self.editPassword.setEchoMode(QtGui.QLineEdit.Password)
self.editRootPassword.setEchoMode(QtGui.QLineEdit.Password)
self.parent = parent
def init(self):
print "button clicked"
name = str(self.editName.text())
print "name is ", name
Password = str(self.editPassword.text())
print "password is ", Password
RootPassword = str(self.editRootPassword.text())
print "password is ", RootPassword
if (name.strip() and Password.strip() and RootPassword.strip()):
try:
repo.init(RootPassword, name, Password)
self.parent.load()
self.close()
except Exception, e:
print "error"
self.errorMessage(str(e))
else :
if not name.strip():
self.errorMessage("Name field cannot be empty")
elif not Password.strip():
self.errorMessage("Password field cannot be empty")
elif not RootPassword.strip():
self.errorMessage("RootPassword field cannot be empty")
print "error"
def errorMessage(self,str):
print "error"
msgBox = QtGui.QMessageBox()
msgBox.setWindowTitle("Error!")
msgBox.setText(str)
msgBox.exec_()
class CommitPromptWindow(QtGui.QMainWindow):
t = 0
parent = None
def __init__(self, parent = None):
QtGui.QMainWindow.__init__(self)
uic.loadUi(os.path.join(gaeaDir, 'commitui.ui'),self)
self.setWindowTitle("Commit Prompt")
self.resize(350,200)
self.move(500, 500)
self.but.clicked.connect(self.commit)
# self.radioSoft.toggled.connect(self.softClick)
self.parent = parent
def commit(self):
print "button clicked"
message = str(self.editMessage.text())
print "msg is ", message
try:
#if self.t == 0:
commit.snap('soft', message)
# elif self.t == 1:
# commit.snap('hard', message)
self.parent.load()
self.close()
except Exception, e:
print e
print "error"
msgBox = QtGui.QMessageBox()
msgBox.setWindowTitle("Error!")
msgBox.setText(str(e))
msgBox.exec_()
# def softClick(self):
# self.t = 1 - self.t
# print "toggle ", t
# # self.radioSoft.setChecked(True)
# # self.radioHard.setChecked(False)
# def hardClick(self):
# self.t = 1 - self.t
# print "toggle ", self.t
# # self.radioSoft.setChecked(False)
# # self.radioHard.setChecked(True)
class CloneWindow(QtGui.QMainWindow):
def __init__(self):
QtGui.QMainWindow.__init__(self)
uic.loadUi(os.path.join(gaeaDir, 'prompt_clone.ui'),self)
self.setWindowTitle("Clone")
self.resize(350,400)
self.move(500, 500)
self.but.clicked.connect(self.clone)
self.editPassword.setEchoMode(QtGui.QLineEdit.Password)
self.editNewPassword.setEchoMode(QtGui.QLineEdit.Password)
self.editRootPassword.setEchoMode(QtGui.QLineEdit.Password)
def clone(self):
print "button clicked"
IP = str(self.editIP.text())
# print "ip is ", IP
Path = str(self.editPath.text())
#print "path is ", Path
Name = str(self.editName.text())
#print "name is ", Name
Password = str(self.editPassword.text())
#print "password is ", Password
NewName = str(self.editNewName.text())
#print "name is ", NewName
NewPassword = str(self.editNewPassword.text())
#print "name is ", NewPassword
RootPassword = str(self.editRootPassword.text())
#print "name is ", RootPassword
if (IP.strip() and Path.strip() and Name.strip() and Password.strip()):
try:
remote.clone(IP, Path, Name, Password, False, RootPassword, NewName, NewPassword)
self.close()
except Exception, e:
print "error"
self.errorMessage(str(e))
else:
if not IP.strip():
self.errorMessage("IP field cannot be empty")
elif not Path.strip():
self.errorMessage("Path field cannot be empty")
elif not Name.strip():
self.errorMessage("Name field cannot be empty")
elif not Password.strip():
self.errorMessage("Password field cannot be empty")
print "error"
def errorMessage(self,str):
print "error"
msgBox = QtGui.QMessageBox()
msgBox.setWindowTitle("Error!")
msgBox.setText(str)
msgBox.exec_()
class PeerWindow(QtGui.QMainWindow):
PeerSelected = []
parent = None
def __init__(self, parent):
QtGui.QMainWindow.__init__(self)
uic.loadUi(os.path.join(gaeaDir, 'peers.ui'),self)
self.setWindowTitle("Peer Details")
self.resize(350,600)
self.move(500, 500)
self.but_add.clicked.connect(self.add)
self.but_delete.clicked.connect(self.delete)
self.but_delete.setEnabled(False)
self.but_pull.clicked.connect(self.pull)
self.but_pull.setEnabled(False)
self.but_pullAll.clicked.connect(self.pullAll)
self.list_peers.clicked.connect(self.listClicked1)
self.populatePeers()
self.parent = parent
def populatePeers(self): #get PEER_INFO and populate it
self.PeerSelected = []
model = QtGui.QStandardItemModel()
for key in globals.PEERINFO['peers'].keys():
item = QtGui.QStandardItem(globals.PEERINFO['peers'][key]['username'] + '@'+key+':'+globals.PEERINFO['peers'][key]['path'])
check = QtCore.Qt.Unchecked
item.setCheckState(check)
item.setCheckable(True)
item.setEditable(False)
model.appendRow(item)
self.list_peers.setModel(model)
@QtCore.pyqtSlot(QtCore.QModelIndex)
def listClicked1(self, index):
print 'called '
print index.row()
print index.data().toString()
if index.row() in self.PeerSelected:
self.PeerSelected.remove(index.row())
index.model().item(index.row()).setCheckState(QtCore.Qt.Unchecked)
print "removed "
print self.PeerSelected
elif index.row() not in self.PeerSelected:
self.PeerSelected.append(index.row())
index.model().item(index.row()).setCheckState(QtCore.Qt.Checked)
print "appended "
print self.PeerSelected
if(len(self.PeerSelected) == 0):
self.but_delete.setEnabled(False)
self.but_pull.setEnabled(False)
else:
self.but_delete.setEnabled(True)
self.but_pull.setEnabled(True)
def add(self):
print "add button clicked"
self.myOtherWindow = AddPeerWindow(self)
self.myOtherWindow.show()
def delete(self):
print "del button clicked"
del_msg = "Are you sure you want to delete " + str(self.PeerSelected)[1:-1]
reply = QtGui.QMessageBox.question(self, 'Message', del_msg, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
print "delete", self.PeerSelected
try:
ip_del = globals.PEERINFO['peers'].keys()[self.PeerSelected[0]]
remote.deletePeer(ip_del)
self.populatePeers()
except Exception, e:
print e
self.errorMessage(str(e))
def pullAll(self):
print "pull all button pressed"
try:
remote.pullAll()
self.parent.load()
except Exception, e:
print e
self.parent.load()
self.errorMessage(e)
def pull(self):
print "pull button pressed"
pull_msg = "Are you sure you want to pull from " + str(self.PeerSelected)[1:-1]
reply = QtGui.QMessageBox.question(self, 'Message', pull_msg, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
print "pull from ", self.PeerSelected
if len(self.PeerSelected) == 1:
try:
ip = globals.PEERINFO['peers'].keys()[self.PeerSelected[0]]
name = globals.PEERINFO['peers'][ip]['username']
path = globals.PEERINFO['peers'][ip]['path']
password = globals.PEERINFO['peers'][ip]['password']
print ip, name, path, password
remote.pull(ip, path, name, password)
self.parent.load()
except Exception, e:
print e
self.errorMessage(str(e))
def errorMessage(self,str):
print "error"
msgBox = QtGui.QMessageBox()
msgBox.setWindowTitle("Error!")
msgBox.setText(str)
msgBox.exec_()
class AddPeerWindow(QtGui.QMainWindow):
parent = None
def __init__(self, parent):
QtGui.QMainWindow.__init__(self)
uic.loadUi(os.path.join(gaeaDir, 'prompt.ui'),self)
self.setWindowTitle("Add Peer")
self.resize(350,400)
self.move(500, 500)
self.but.clicked.connect(self.addPeer)
self.editPassword.setEchoMode(QtGui.QLineEdit.Password)
self.parent = parent
def addPeer(self):
print "button clicked"
IP = str(self.editIP.text())
print "ip is ", IP
Path = str(self.editPath.text())
print "path is ", Path
Name = str(self.editName.text())
print "user name is ", Name
Password = str(self.editPassword.text())
print "password is ", Password
if (IP.strip() and Path.strip() and Name.strip() and Password.strip()):
try:
remote.addPeer(IP, Path, Name, Password)
self.parent.populatePeers()
self.close()
except Exception, e:
print "error"
self.errorMessage(str(e))
else:
if not IP.strip():
self.errorMessage("IP field cannot be empty")
elif not Path.strip():
self.errorMessage("Path field cannot be empty")
elif not Name.strip():
self.errorMessage("Name field cannot be empty")
elif not Password.strip():
self.errorMessage("Password field cannot be empty")
print "error"
def errorMessage(self,str):
print "error"
msgBox = QtGui.QMessageBox()
msgBox.setWindowTitle("Error!")
msgBox.setText(str)
msgBox.exec_()
class MyWindowClass(QtGui.QMainWindow, form_class):
all_logs = []
diffList = []
model_log = QtGui.QStandardItemModel()
model_diff = QtGui.QStandardItemModel()
def __init__(self, parent=None):
QtGui.QMainWindow.__init__(self, parent)
self.setupUi(self)
self.but_openFile.clicked.connect(self.openFile)
self.but_name.clicked.connect(self.setName)
self.but_email.clicked.connect(self.setEmail)
self.but_diff.clicked.connect(self.diff)
# self.but_remote.clicked.connect(self.setRemote)
# self.but_push.clicked.connect(self.push)
# self.but_pullAll.clicked.connect(self.pullAll)
# self.but_pull.clicked.connect(self.pull)
self.but_commit.clicked.connect(self.snap)
self.but_init.clicked.connect(self.init)
# self.but_log.clicked.connect(self.log)
self.but_restore.clicked.connect(self.restore)
self.but_clone.clicked.connect(self.clone)
self.but_managePeers.clicked.connect(self.managePeers)
self.but_delete.clicked.connect(self.delete)
self.diffList = []
# qtable_history = self.table_history
# qtable_history.setColumnCount(4)
# listOfLables = ['Commit Id','Message', 'Author', 'Time']
# header = qtable_history.horizontalHeader()
# header.setResizeMode(QtGui.QHeaderView.Stretch)
# qtable_history.setHorizontalHeaderLabels(listOfLables)
# self.populateTable(qtable_history)
# self.displayDiff()
# model = QtGui.QStandardItemModel()
# for i in range(10):
# item = QtGui.QStandardItem('Item %s' % randint(1, 100))
# check = QtCore.Qt.Unchecked
# item.setCheckState(check)
# item.setCheckable(True)
# item.setEditable(False)
# model.appendRow(item)
# self.list_commit.setModel(model)
# self.list_commit.clicked.connect(self.listClicked)
self.disableButtons()
self.list_commit.clicked.connect(self.listClicked)
# QtCore.QObject.connect(self.list_commit,QtCore.SIGNAL("clicked(QModelIndex)"), self.list_commit, QtCore.SLOT("ItemClicked(QModelIndex)"))
self.actionOpne_Project.setShortcut('Ctrl+O')
self.actionOpne_Project.triggered.connect(self.openFile)
self.actionExit.setShortcut('Alt+F4')
self.actionExit.triggered.connect(self.exit)
#print "cwd is " + globals.ROOT
#############
#functions to be called at when a repo is selected
# diff, log
@QtCore.pyqtSlot(QtCore.QModelIndex)
def listClicked(self, index):
print 'called '
print index.row()
print index.data().toString()
#index.setCheckState(True)
# print self.list_commit.item(index.row())
#print index.child(0, 0)
if index.row() in self.diffList:
self.diffList.remove(index.row())
print "removed "
print self.diffList
index.model().item(index.row()).setCheckState(QtCore.Qt.Unchecked)
elif index.row() not in self.diffList:
self.diffList.append(index.row())
index.model().item(index.row()).setCheckState(QtCore.Qt.Checked)
print "appended "
print self.diffList
# itms = self.assetList.selectedIndexes()
# for it in itms:
# print 'selected item index found at %s' % it.row()
# @pyqtSlot("QModelIndex")
# def ItemClicked(self,index):
# QMessageBox.information(None,"Hello!","You Clicked: \n"+index.data().toString())
def openFile(self):
global projectDir
# fname = QtGui.QFileDialog.getOpenFileName(self, 'Open file', '/home')
fname = QtGui.QFileDialog.getExistingDirectory(self, 'Open Directory', '\home')
self.txt_path1.setText(fname)
globals.changeCWD(fname)
print "ROOT name changed to ", fname
projectDir = str(fname)
print globals.ROOT
os.chdir(globals.ROOT)
self.disableButtons()
self.load()
# clone and init enable, else disableButtons
def load(self):
try:
self.model_log.clear()
self.model_diff.clear()
print "load called"
self.list_commit.setModel(None)
self.diffList = []
repo.LoadRepo() ### already a repo
self.enableButtons()
self.log()
self.diff()
self.but_init.setEnabled(False)
self.but_clone.setEnabled(False)
except Exception, e:
print e
self.but_init.setEnabled(True)
self.but_clone.setEnabled(True)
def exit(self):
quit_msg = "Are you sure you want to exit the program?"
reply = QtGui.QMessageBox.question(self, 'Message',
quit_msg, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
sys.exit(0)
def setName(self):
text, ok = QtGui.QInputDialog.getText(self, 'Input Dialog', 'Enter your name:')
if ok:
print "name is ", str(text)
try:
repo.LoadRepo()
repo.setAuthor(str(text))
except Exception,e:
print e
self.errorMessage(str(e))
def setEmail(self):
text, ok = QtGui.QInputDialog.getText(self, 'Input Dialog', 'Enter your email address:')
if ok:
print "email is ", str(text)
try:
repo.LoadRepo()
repo.setEmail(str(text))
except Exception, e:
print e
self.errorMessage(str(e))
# def setRemote(self):
# name, ok = QtGui.QInputDialog.getText(self, 'Input Dialog', 'Enter remote name:')
# if ok:
# # print "name is ", str(name)
# try:
# address, ok = QtGui.QInputDialog.getText(self, 'Input Dialog', 'Enter remote address:')
# if ok:
# print "name is ", str(name), " address is ", str(address)
# except Exception, e:
# print e
def populateDiffInList(self,difference):
print difference
self.model_diff = QtGui.QStandardItemModel()
for line in iter(difference.splitlines()):
item = QtGui.QStandardItem(line)
if len(line) > 0:
if line[0]=='+' and (len(line) <2 or line[1] != '+'):
brush = QtGui.QBrush(QtGui.QColor(0, 255, 0)) #Green
elif line[0]=='-' and (len(line) <2 or line[1] != '-'):
brush = QtGui.QBrush(QtGui.QColor(255, 0, 0)) #Red
else:
brush = QtGui.QBrush(QtGui.QColor(0,0,0)) #black
brush.setStyle(QtCore.Qt.SolidPattern)
item.setForeground(brush)
item.setEditable(False)
self.model_diff.appendRow(item)
self.list_diff.setModel(self.model_diff)
def diff(self):
print "diff called"
print "diff list is ", self.diffList
difference = ''
if(len(self.diffList) == 1):
print "diff of current state and ", self.diffList[0]
difference = repo.diff(id1= self.all_logs[self.diffList[0]][0])
self.populateDiffInList(difference)
elif(len(self.diffList) == 2):
print "diff between ", self.diffList[0], " and ", self.diffList[1]
difference = repo.diff(id1= self.all_logs[self.diffList[0]][0], id2 = self.all_logs[self.diffList[1]][0])
self.populateDiffInList(difference)
elif(len(self.diffList) == 0):
print "diff from last commit"
difference = repo.diff()
self.populateDiffInList(difference)
# diff = repo.diff()
#
else:
print "error"
self.errorMessage("You have selected more than two commits for diff.")
# msgBox = QtGui.QMessageBox()
# msgBox.setWindowTitle("Error!")
# msgBox.setText("You have selected more than two commits for diff.")
# msgBox.exec_()
def populateTable(self, qtable):
array = [["mohit", "kumar", "garg", "cse"], ["gaurav", "gautam", "-", "phy"]]
print "populateTable called"
for row in range(2):
qtable.insertRow(row)
for column in range(4):
qtable.setItem(row, column, QtGui.QTableWidgetItem(QtCore.QString("%1").arg(array[row][column])))
def snap(self):
print "commit button pressed"
try:
self.myOtherWindow = CommitPromptWindow(self)
self.myOtherWindow.show()
print "commit function called"
except Exception, e:
print e
self.errorMessage(str(e))
# this function has been shifted to another window
# def pull(self):
# print "pull button pressed"
def delete(self):
print "delete button pressed"
if(len(self.diffList) == 0):
print "select atleast one"
msgBox = QtGui.QMessageBox()
msgBox.setWindowTitle("Error!")
msgBox.setText("Please select the commit you want to delete.")
msgBox.exec_()
elif(len(self.diffList) > 1):
print "select atleast one"
msgBox = QtGui.QMessageBox()
msgBox.setWindowTitle("Error!")
msgBox.setText("You have selected more than one commits. Please delete one by one.")
msgBox.exec_()
elif(len(self.diffList) == 1):
del_msg = "Are you sure you want to delete " + str(self.diffList[0])
reply = QtGui.QMessageBox.question(self, 'Message', del_msg, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
print "del", self.diffList[0]
try:
commit.delete(self.all_logs[self.diffList[0]][0])
print "delete done", self.all_logs[self.diffList[0]][0]
self.load()
except Exception, e:
print e
self.errorMessage(str(e))
# try:
# repo.LoadRepo()
# commit.delete( convert self.diffList[0] to commit id)
# except Exception,e:
# print e
# This function has been shifted to another window
# def pullAll(self):
# print "pull all button pressed"
def displayDiff(self):
try:
repo.LoadRepo()
diff = repo.diff()
for line in iter(diff.splitlines()):
if line[0]=='+' and line[1]!='+':
puts(colored.red(line))
elif line[0]=='-' and line[2]!='-':
puts(colored.green(line))
else:
puts(colored.white(line, False, True))
except Exception, e:
print e
self.errorMessage(str(e))
self.edit_diff.setText("diff of two commits comes here")
def init(self):
try:
self.myOtherWindow = InitPromptWindow(self)
self.myOtherWindow.show()
self.load()
# repo.init()
print "init function called"
except Exception, e:
print e
self.errorMessage(str(e))
def log(self):
print "log button pressed"
try:
repo.LoadRepo()
print "log starts"
self.all_logs = repo.log()
model_log = QtGui.QStandardItemModel()
for i in self.all_logs:
item = QtGui.QStandardItem( i[0] +' ' +i[1] + ' '+ i[2] + ' '+ i[3])
if i[0] == globals.REPOINFO['HEAD']:
brush = QtGui.QBrush(QtGui.QColor(0,255,0)) #green
brush.setStyle(QtCore.Qt.SolidPattern)
item.setForeground(brush)
check = QtCore.Qt.Unchecked
item.setCheckState(check)
item.setCheckable(True)
item.setEditable(False)
model_log.appendRow(item)
self.list_commit.setModel(model_log)
except Exception, e:
print e
self.errorMessage(str(e))
def clone(self):
print "clone button pressed"
try:
self.myOtherWindow = CloneWindow()
self.myOtherWindow.show()
except Exception, e:
print e
self.errorMessage(str(e))
def managePeers(self):
print "peer button pressed"
try:
self.load()
self.myOtherWindow = PeerWindow(self)
self.myOtherWindow.show()
except Exception, e:
print e
self.errorMessage(str(e))
def restore(self):
if(len(self.diffList) == 0):
print "select atleast one"
msgBox = QtGui.QMessageBox()
msgBox.setWindowTitle("Error!")
msgBox.setText("Please select the commit you want to restore.")
msgBox.exec_()
elif(len(self.diffList) > 1):
print "select atleast one"
msgBox = QtGui.QMessageBox()
msgBox.setWindowTitle("Error!")
msgBox.setText("You have selected more than one commits.")
msgBox.exec_()
elif(len(self.diffList) == 1):
restore_msg = "Are you sure you want to restore " + str(self.diffList[0])
reply = QtGui.QMessageBox.question(self, 'Message', restore_msg, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
print "restore", self.diffList[0]
try:
commit.restore(self.all_logs[self.diffList[0]][0])
self.load()
except Exception,e:
print e
self.errorMessage(str(e))
def enableButtons(self):
self.but_name.setEnabled(True)
self.but_email.setEnabled(True)
self.but_restore.setEnabled(True)
self.but_delete.setEnabled(True)
self.but_commit.setEnabled(True)
self.but_diff.setEnabled(True)
self.but_managePeers.setEnabled(True)
def disableButtons(self):
self.but_name.setEnabled(False)
self.but_email.setEnabled(False)
self.but_init.setEnabled(False)
self.but_restore.setEnabled(False)
self.but_delete.setEnabled(False)
self.but_commit.setEnabled(False)
self.but_diff.setEnabled(False)
self.but_clone.setEnabled(False)
self.but_managePeers.setEnabled(False)
def errorMessage(self,str):
print "error"
msgBox = QtGui.QMessageBox()
msgBox.setWindowTitle("Error!")
msgBox.setText(str)
msgBox.exec_()
globals.ROOT = projectDir
app = QtGui.QApplication(sys.argv)
myWindow = MyWindowClass(None)
myWindow.show()
app.exec_()
| |
from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
from fontTools.misc.textTools import safeEval
from fontTools.misc.fixedTools import fixedToFloat as fi2fl, floatToFixed as fl2fi
from .otBase import ValueRecordFactory
def buildConverters(tableSpec, tableNamespace):
"""Given a table spec from otData.py, build a converter object for each
field of the table. This is called for each table in otData.py, and
the results are assigned to the corresponding class in otTables.py."""
converters = []
convertersByName = {}
for tp, name, repeat, aux, descr in tableSpec:
tableName = name
if name.startswith("ValueFormat"):
assert tp == "uint16"
converterClass = ValueFormat
elif name.endswith("Count") or name.endswith("LookupType"):
assert tp == "uint16"
converterClass = ComputedUShort
elif name == "SubTable":
converterClass = SubTable
elif name == "ExtSubTable":
converterClass = ExtSubTable
elif name == "FeatureParams":
converterClass = FeatureParams
else:
if not tp in converterMapping:
tableName = tp
converterClass = Struct
else:
converterClass = converterMapping[tp]
tableClass = tableNamespace.get(tableName)
conv = converterClass(name, repeat, aux, tableClass)
if name in ["SubTable", "ExtSubTable"]:
conv.lookupTypes = tableNamespace['lookupTypes']
# also create reverse mapping
for t in conv.lookupTypes.values():
for cls in t.values():
convertersByName[cls.__name__] = Table(name, repeat, aux, cls)
if name == "FeatureParams":
conv.featureParamTypes = tableNamespace['featureParamTypes']
conv.defaultFeatureParams = tableNamespace['FeatureParams']
for cls in conv.featureParamTypes.values():
convertersByName[cls.__name__] = Table(name, repeat, aux, cls)
converters.append(conv)
assert name not in convertersByName, name
convertersByName[name] = conv
return converters, convertersByName
class BaseConverter(object):
"""Base class for converter objects. Apart from the constructor, this
is an abstract class."""
def __init__(self, name, repeat, aux, tableClass):
self.name = name
self.repeat = repeat
self.aux = aux
self.tableClass = tableClass
self.isCount = name.endswith("Count")
self.isLookupType = name.endswith("LookupType")
self.isPropagated = name in ["ClassCount", "Class2Count", "FeatureTag"]
def read(self, reader, font, tableDict):
"""Read a value from the reader."""
raise NotImplementedError(self)
def write(self, writer, font, tableDict, value, repeatIndex=None):
"""Write a value to the writer."""
raise NotImplementedError(self)
def xmlRead(self, attrs, content, font):
"""Read a value from XML."""
raise NotImplementedError(self)
def xmlWrite(self, xmlWriter, font, value, name, attrs):
"""Write a value to XML."""
raise NotImplementedError(self)
class SimpleValue(BaseConverter):
def xmlWrite(self, xmlWriter, font, value, name, attrs):
xmlWriter.simpletag(name, attrs + [("value", value)])
xmlWriter.newline()
def xmlRead(self, attrs, content, font):
return attrs["value"]
class IntValue(SimpleValue):
def xmlRead(self, attrs, content, font):
return int(attrs["value"], 0)
class Long(IntValue):
def read(self, reader, font, tableDict):
return reader.readLong()
def write(self, writer, font, tableDict, value, repeatIndex=None):
writer.writeLong(value)
class Version(BaseConverter):
def read(self, reader, font, tableDict):
value = reader.readLong()
assert (value >> 16) == 1, "Unsupported version 0x%08x" % value
return fi2fl(value, 16)
def write(self, writer, font, tableDict, value, repeatIndex=None):
if value < 0x10000:
value = fl2fi(value, 16)
value = int(round(value))
assert (value >> 16) == 1, "Unsupported version 0x%08x" % value
writer.writeLong(value)
def xmlRead(self, attrs, content, font):
value = attrs["value"]
value = float(int(value, 0)) if value.startswith("0") else float(value)
if value >= 0x10000:
value = fi2fl(value, 16)
return value
def xmlWrite(self, xmlWriter, font, value, name, attrs):
if value >= 0x10000:
value = fi2fl(value, 16)
if value % 1 != 0:
# Write as hex
value = "0x%08x" % fl2fi(value, 16)
xmlWriter.simpletag(name, attrs + [("value", value)])
xmlWriter.newline()
class Short(IntValue):
def read(self, reader, font, tableDict):
return reader.readShort()
def write(self, writer, font, tableDict, value, repeatIndex=None):
writer.writeShort(value)
class UShort(IntValue):
def read(self, reader, font, tableDict):
return reader.readUShort()
def write(self, writer, font, tableDict, value, repeatIndex=None):
writer.writeUShort(value)
class UInt24(IntValue):
def read(self, reader, font, tableDict):
return reader.readUInt24()
def write(self, writer, font, tableDict, value, repeatIndex=None):
writer.writeUInt24(value)
class ComputedUShort(UShort):
def xmlWrite(self, xmlWriter, font, value, name, attrs):
xmlWriter.comment("%s=%s" % (name, value))
xmlWriter.newline()
class Tag(SimpleValue):
def read(self, reader, font, tableDict):
return reader.readTag()
def write(self, writer, font, tableDict, value, repeatIndex=None):
writer.writeTag(value)
class GlyphID(SimpleValue):
def read(self, reader, font, tableDict):
value = reader.readUShort()
value = font.getGlyphName(value)
return value
def write(self, writer, font, tableDict, value, repeatIndex=None):
value = font.getGlyphID(value)
writer.writeUShort(value)
class FloatValue(SimpleValue):
def xmlRead(self, attrs, content, font):
return float(attrs["value"])
class DeciPoints(FloatValue):
def read(self, reader, font, tableDict):
value = reader.readUShort()
return value / 10
def write(self, writer, font, tableDict, value, repeatIndex=None):
writer.writeUShort(int(round(value * 10)))
class Struct(BaseConverter):
def read(self, reader, font, tableDict):
table = self.tableClass()
table.decompile(reader, font)
return table
def write(self, writer, font, tableDict, value, repeatIndex=None):
value.compile(writer, font)
def xmlWrite(self, xmlWriter, font, value, name, attrs):
if value is None:
if attrs:
# If there are attributes (probably index), then
# don't drop this even if it's NULL. It will mess
# up the array indices of the containing element.
xmlWriter.simpletag(name, attrs + [("empty", 1)])
xmlWriter.newline()
else:
pass # NULL table, ignore
else:
value.toXML(xmlWriter, font, attrs, name=name)
def xmlRead(self, attrs, content, font):
if "empty" in attrs and safeEval(attrs["empty"]):
return None
table = self.tableClass()
Format = attrs.get("Format")
if Format is not None:
table.Format = int(Format)
for element in content:
if isinstance(element, tuple):
name, attrs, content = element
table.fromXML(name, attrs, content, font)
else:
pass
return table
class Table(Struct):
longOffset = False
def readOffset(self, reader):
return reader.readUShort()
def writeNullOffset(self, writer):
if self.longOffset:
writer.writeULong(0)
else:
writer.writeUShort(0)
def read(self, reader, font, tableDict):
offset = self.readOffset(reader)
if offset == 0:
return None
if offset <= 3:
# XXX hack to work around buggy pala.ttf
print("*** Warning: offset is not 0, yet suspiciously low (%s). table: %s" \
% (offset, self.tableClass.__name__))
return None
table = self.tableClass()
reader = reader.getSubReader(offset)
if font.lazy:
table.reader = reader
table.font = font
else:
table.decompile(reader, font)
return table
def write(self, writer, font, tableDict, value, repeatIndex=None):
if value is None:
self.writeNullOffset(writer)
else:
subWriter = writer.getSubWriter()
subWriter.longOffset = self.longOffset
subWriter.name = self.name
if repeatIndex is not None:
subWriter.repeatIndex = repeatIndex
writer.writeSubTable(subWriter)
value.compile(subWriter, font)
class LTable(Table):
longOffset = True
def readOffset(self, reader):
return reader.readULong()
class SubTable(Table):
def getConverter(self, tableType, lookupType):
tableClass = self.lookupTypes[tableType][lookupType]
return self.__class__(self.name, self.repeat, self.aux, tableClass)
class ExtSubTable(LTable, SubTable):
def write(self, writer, font, tableDict, value, repeatIndex=None):
writer.Extension = 1 # actually, mere presence of the field flags it as an Ext Subtable writer.
Table.write(self, writer, font, tableDict, value, repeatIndex)
class FeatureParams(Table):
def getConverter(self, featureTag):
tableClass = self.featureParamTypes.get(featureTag, self.defaultFeatureParams)
return self.__class__(self.name, self.repeat, self.aux, tableClass)
class ValueFormat(IntValue):
def __init__(self, name, repeat, aux, tableClass):
BaseConverter.__init__(self, name, repeat, aux, tableClass)
self.which = "ValueFormat" + ("2" if name[-1] == "2" else "1")
def read(self, reader, font, tableDict):
format = reader.readUShort()
reader[self.which] = ValueRecordFactory(format)
return format
def write(self, writer, font, tableDict, format, repeatIndex=None):
writer.writeUShort(format)
writer[self.which] = ValueRecordFactory(format)
class ValueRecord(ValueFormat):
def read(self, reader, font, tableDict):
return reader[self.which].readValueRecord(reader, font)
def write(self, writer, font, tableDict, value, repeatIndex=None):
writer[self.which].writeValueRecord(writer, font, value)
def xmlWrite(self, xmlWriter, font, value, name, attrs):
if value is None:
pass # NULL table, ignore
else:
value.toXML(xmlWriter, font, self.name, attrs)
def xmlRead(self, attrs, content, font):
from .otBase import ValueRecord
value = ValueRecord()
value.fromXML(None, attrs, content, font)
return value
class DeltaValue(BaseConverter):
def read(self, reader, font, tableDict):
StartSize = tableDict["StartSize"]
EndSize = tableDict["EndSize"]
DeltaFormat = tableDict["DeltaFormat"]
assert DeltaFormat in (1, 2, 3), "illegal DeltaFormat"
nItems = EndSize - StartSize + 1
nBits = 1 << DeltaFormat
minusOffset = 1 << nBits
mask = (1 << nBits) - 1
signMask = 1 << (nBits - 1)
DeltaValue = []
tmp, shift = 0, 0
for i in range(nItems):
if shift == 0:
tmp, shift = reader.readUShort(), 16
shift = shift - nBits
value = (tmp >> shift) & mask
if value & signMask:
value = value - minusOffset
DeltaValue.append(value)
return DeltaValue
def write(self, writer, font, tableDict, value, repeatIndex=None):
StartSize = tableDict["StartSize"]
EndSize = tableDict["EndSize"]
DeltaFormat = tableDict["DeltaFormat"]
DeltaValue = value
assert DeltaFormat in (1, 2, 3), "illegal DeltaFormat"
nItems = EndSize - StartSize + 1
nBits = 1 << DeltaFormat
assert len(DeltaValue) == nItems
mask = (1 << nBits) - 1
tmp, shift = 0, 16
for value in DeltaValue:
shift = shift - nBits
tmp = tmp | ((value & mask) << shift)
if shift == 0:
writer.writeUShort(tmp)
tmp, shift = 0, 16
if shift != 16:
writer.writeUShort(tmp)
def xmlWrite(self, xmlWriter, font, value, name, attrs):
xmlWriter.simpletag(name, attrs + [("value", value)])
xmlWriter.newline()
def xmlRead(self, attrs, content, font):
return safeEval(attrs["value"])
converterMapping = {
# type class
"int16": Short,
"uint16": UShort,
"uint24": UInt24,
"Version": Version,
"Tag": Tag,
"GlyphID": GlyphID,
"DeciPoints": DeciPoints,
"struct": Struct,
"Offset": Table,
"LOffset": LTable,
"ValueRecord": ValueRecord,
"DeltaValue": DeltaValue,
}
| |
#===----------------------------------------------------------------------===##
#
# The LLVM Compiler Infrastructure
#
# This file is dual licensed under the MIT and the University of Illinois Open
# Source Licenses. See LICENSE.TXT for details.
#
#===----------------------------------------------------------------------===##
import ast
import distutils.spawn
import sys
import re
import libcxx.util
def read_syms_from_list(slist):
"""
Read a list of symbols from a list of strings.
Each string is one symbol.
"""
return [ast.literal_eval(l) for l in slist]
def read_syms_from_file(filename):
"""
Read a list of symbols in from a file.
"""
with open(filename, 'r') as f:
data = f.read()
return read_syms_from_list(data.splitlines())
def read_blacklist(filename):
with open(filename, 'r') as f:
data = f.read()
lines = [l.strip() for l in data.splitlines() if l.strip()]
lines = [l for l in lines if not l.startswith('#')]
return lines
def write_syms(sym_list, out=None, names_only=False):
"""
Write a list of symbols to the file named by out.
"""
out_str = ''
out_list = sym_list
out_list.sort(key=lambda x: x['name'])
if names_only:
out_list = [sym['name'] for sym in sym_list]
for sym in out_list:
out_str += '%s\n' % sym
if out is None:
sys.stdout.write(out_str)
else:
with open(out, 'w') as f:
f.write(out_str)
_cppfilt_exe = distutils.spawn.find_executable('c++filt')
def demangle_symbol(symbol):
if _cppfilt_exe is None:
return symbol
out, _, exit_code = libcxx.util.executeCommandVerbose(
[_cppfilt_exe], input=symbol)
if exit_code != 0:
return symbol
return out
def is_elf(filename):
with open(filename, 'rb') as f:
magic_bytes = f.read(4)
return magic_bytes == b'\x7fELF'
def is_mach_o(filename):
with open(filename, 'rb') as f:
magic_bytes = f.read(4)
return magic_bytes in [
'\xfe\xed\xfa\xce', # MH_MAGIC
'\xce\xfa\xed\xfe', # MH_CIGAM
'\xfe\xed\xfa\xcf', # MH_MAGIC_64
'\xcf\xfa\xed\xfe', # MH_CIGAM_64
'\xca\xfe\xba\xbe', # FAT_MAGIC
'\xbe\xba\xfe\xca' # FAT_CIGAM
]
def is_library_file(filename):
if sys.platform == 'darwin':
return is_mach_o(filename)
else:
return is_elf(filename)
def extract_or_load(filename):
import libcxx.sym_check.extract
if is_library_file(filename):
return libcxx.sym_check.extract.extract_symbols(filename)
return read_syms_from_file(filename)
def adjust_mangled_name(name):
if not name.startswith('__Z'):
return name
return name[1:]
new_delete_std_symbols = [
'_Znam',
'_Znwm',
'_ZdaPv',
'_ZdaPvm',
'_ZdlPv',
'_ZdlPvm'
]
cxxabi_symbols = [
'___dynamic_cast',
'___gxx_personality_v0',
'_ZTIDi',
'_ZTIDn',
'_ZTIDs',
'_ZTIPDi',
'_ZTIPDn',
'_ZTIPDs',
'_ZTIPKDi',
'_ZTIPKDn',
'_ZTIPKDs',
'_ZTIPKa',
'_ZTIPKb',
'_ZTIPKc',
'_ZTIPKd',
'_ZTIPKe',
'_ZTIPKf',
'_ZTIPKh',
'_ZTIPKi',
'_ZTIPKj',
'_ZTIPKl',
'_ZTIPKm',
'_ZTIPKs',
'_ZTIPKt',
'_ZTIPKv',
'_ZTIPKw',
'_ZTIPKx',
'_ZTIPKy',
'_ZTIPa',
'_ZTIPb',
'_ZTIPc',
'_ZTIPd',
'_ZTIPe',
'_ZTIPf',
'_ZTIPh',
'_ZTIPi',
'_ZTIPj',
'_ZTIPl',
'_ZTIPm',
'_ZTIPs',
'_ZTIPt',
'_ZTIPv',
'_ZTIPw',
'_ZTIPx',
'_ZTIPy',
'_ZTIa',
'_ZTIb',
'_ZTIc',
'_ZTId',
'_ZTIe',
'_ZTIf',
'_ZTIh',
'_ZTIi',
'_ZTIj',
'_ZTIl',
'_ZTIm',
'_ZTIs',
'_ZTIt',
'_ZTIv',
'_ZTIw',
'_ZTIx',
'_ZTIy',
'_ZTSDi',
'_ZTSDn',
'_ZTSDs',
'_ZTSPDi',
'_ZTSPDn',
'_ZTSPDs',
'_ZTSPKDi',
'_ZTSPKDn',
'_ZTSPKDs',
'_ZTSPKa',
'_ZTSPKb',
'_ZTSPKc',
'_ZTSPKd',
'_ZTSPKe',
'_ZTSPKf',
'_ZTSPKh',
'_ZTSPKi',
'_ZTSPKj',
'_ZTSPKl',
'_ZTSPKm',
'_ZTSPKs',
'_ZTSPKt',
'_ZTSPKv',
'_ZTSPKw',
'_ZTSPKx',
'_ZTSPKy',
'_ZTSPa',
'_ZTSPb',
'_ZTSPc',
'_ZTSPd',
'_ZTSPe',
'_ZTSPf',
'_ZTSPh',
'_ZTSPi',
'_ZTSPj',
'_ZTSPl',
'_ZTSPm',
'_ZTSPs',
'_ZTSPt',
'_ZTSPv',
'_ZTSPw',
'_ZTSPx',
'_ZTSPy',
'_ZTSa',
'_ZTSb',
'_ZTSc',
'_ZTSd',
'_ZTSe',
'_ZTSf',
'_ZTSh',
'_ZTSi',
'_ZTSj',
'_ZTSl',
'_ZTSm',
'_ZTSs',
'_ZTSt',
'_ZTSv',
'_ZTSw',
'_ZTSx',
'_ZTSy'
]
def is_stdlib_symbol_name(name):
name = adjust_mangled_name(name)
if re.search("@GLIBC|@GCC", name):
return False
if re.search('(St[0-9])|(__cxa)|(__cxxabi)', name):
return True
if name in new_delete_std_symbols:
return True
if name in cxxabi_symbols:
return True
if name.startswith('_Z'):
return True
return False
def filter_stdlib_symbols(syms):
stdlib_symbols = []
other_symbols = []
for s in syms:
canon_name = adjust_mangled_name(s['name'])
if not is_stdlib_symbol_name(canon_name):
assert not s['is_defined'] and "found defined non-std symbol"
other_symbols += [s]
else:
stdlib_symbols += [s]
return stdlib_symbols, other_symbols
| |
# -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import inspect
import os
import shutil
import sys
__location__ = os.path.join(os.getcwd(), os.path.dirname(
inspect.getfile(inspect.currentframe())))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.join(__location__, '../src'))
# -- Run sphinx-apidoc ------------------------------------------------------
# This hack is necessary since RTD does not issue `sphinx-apidoc` before running
# `sphinx-build -b html . _build/html`. See Issue:
# https://github.com/rtfd/readthedocs.org/issues/1139
# DON'T FORGET: Check the box "Install your project inside a virtualenv using
# setup.py install" in the RTD Advanced Settings.
# Additionally it helps us to avoid running apidoc manually
try: # for Sphinx >= 1.7
from sphinx.ext import apidoc
except ImportError:
from sphinx import apidoc
output_dir = os.path.join(__location__, "api")
module_dir = os.path.join(__location__, "../src/panoptes")
try:
shutil.rmtree(output_dir)
except FileNotFoundError:
pass
try:
import sphinx
from pkg_resources import parse_version
cmd_line_template = "sphinx-apidoc -f -o {outputdir} {moduledir}"
cmd_line = cmd_line_template.format(outputdir=output_dir, moduledir=module_dir)
args = cmd_line.split(" ")
if parse_version(sphinx.__version__) >= parse_version('1.7'):
args = args[1:]
apidoc.main(args)
except Exception as e:
print("Running `sphinx-apidoc` failed!\n{}".format(e))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autosummary',
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'matplotlib.sphinxext.plot_directive',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = ['.rst']
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'POCS'
copyright = u'2020, Project PANOPTES'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '' # Is set by calling `setup.py docs`
# The full version, including alpha/beta/rc tags.
release = '' # Is set by calling `setup.py docs`
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {
# 'sidebar_width': '300px',
# 'page_width': '1200px'
# }
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
try:
from panoptes.pocs import __version__ as version
except ImportError:
pass
else:
release = version
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = ""
html_logo = '_static/pan-title-black-transparent.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pocs-doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'user_guide.tex', u'POCS Documentation',
u'Project PANOPTES', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = ""
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- External mapping ------------------------------------------------------------
python_version = '.'.join(map(str, sys.version_info[0:2]))
intersphinx_mapping = {
'sphinx': ('https://www.sphinx-doc.org/en/stable', None),
'python': ('https://docs.python.org/' + python_version, None),
'matplotlib': ('https://matplotlib.org', None),
'numpy': ('https://docs.scipy.org/doc/numpy', None),
'sklearn': ('https://scikit-learn.org/stable', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference', None),
'astropy': ('https://docs.astropy.org/en/stable/', None),
'astroplan': ('https://astroplan.readthedocs.io/en/latest/', None),
'panoptes.utils': ('https://panoptes-utils.readthedocs.io/en/latest/', None),
}
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
def skip(app, what, name, obj, would_skip, options):
if name == "__init__":
return False
return would_skip
def setup(app):
app.connect("autodoc-skip-member", skip)
| |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from monty.json import MSONable
import numpy as np
from scipy.ndimage.filters import gaussian_filter1d
from pymatgen.util.coord import get_linear_interpolated_value
"""
This module defines classes to represent any type of spectrum, essentially any
x y value pairs.
"""
__author__ = "Chen Zheng"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "2.0"
__maintainer__ = "Chen Zheng"
__email__ = "chz022@ucsd.edu"
__date__ = "Aug 9, 2017"
class Spectrum(MSONable):
"""
Base class for any type of xas, essentially just x, y values. Examples
include XRD patterns, XANES, EXAFS, NMR, DOS, etc.
Implements basic tools like application of smearing, normalization, addition
multiplication, etc.
Subclasses should extend this object and ensure that super is called with
ALL args and kwargs. That ensures subsequent things like add and mult work
properly.
"""
XLABEL = "x"
YLABEL = "y"
def __init__(self, x, y, *args, **kwargs):
"""
Args:
x (ndarray): A ndarray of N values.
y (ndarray): A ndarray of N x k values. The first dimension must be
the same as that of x. Each of the k values are interpreted as
\\*args: All subclasses should provide args other than x and y
when calling super, e.g., super().__init__(
x, y, arg1, arg2, kwarg1=val1, ..). This guarantees the +, -, *,
etc. operators work properly.
\\*\\*kwargs: Same as that for \\*args.
"""
self.x = np.array(x)
self.y = np.array(y)
self.ydim = self.y.shape
if self.x.shape[0] != self.ydim[0]:
raise ValueError("x and y values have different first dimension!")
self._args = args
self._kwargs = kwargs
def __getattr__(self, item):
if item == self.XLABEL.lower():
return self.x
elif item == self.YLABEL.lower():
return self.y
else:
raise AttributeError("Invalid attribute name %s" % str(item))
def __len__(self):
return self.ydim[0]
def normalize(self, mode="max", value=1):
"""
Normalize the spectrum with respect to the sum of intensity
Args:
mode (str): Normalization mode. Supported modes are "max" (set the
max y value to value, e.g., in XRD patterns), "sum" (set the
sum of y to a value, i.e., like a probability density).
value (float): Value to normalize to. Defaults to 1.
"""
if mode.lower() == "sum":
factor = np.sum(self.y, axis=0)
elif mode.lower() == "max":
factor = np.max(self.y, axis=0)
else:
raise ValueError("Unsupported normalization mode %s!" % mode)
self.y /= factor / value
def smear(self, sigma):
"""
Apply Gaussian smearing to spectrum y value.
Args:
sigma: Std dev for Gaussian smear function
"""
diff = [self.x[i + 1] - self.x[i] for i in range(len(self.x) - 1)]
avg_x_per_step = np.sum(diff) / len(diff)
if len(self.ydim) == 1:
self.y = gaussian_filter1d(self.y, sigma / avg_x_per_step)
else:
self.y = np.array([
gaussian_filter1d(self.y[:, k], sigma / avg_x_per_step)
for k in range(self.ydim[1])]).T
def get_interpolated_value(self, x):
"""
Returns an interpolated y value for a particular x value.
Args:
x: x value to return the y value for
Returns:
Value of y at x
"""
if len(self.ydim) == 1:
return get_linear_interpolated_value(self.x, self.y, x)
else:
return [get_linear_interpolated_value(self.x, self.y[:, k], x)
for k in range(self.ydim[1])]
def copy(self):
"""
Returns:
Copy of Spectrum object.
"""
return self.__class__(self.x, self.y, *self._args, **self._kwargs)
def __add__(self, other):
"""
Add two Spectrum object together. Checks that x scales are the same.
Otherwise, a ValueError is thrown.
Args:
other: Another Spectrum object
Returns:
Sum of the two Spectrum objects
"""
if not all(np.equal(self.x, other.x)):
raise ValueError("X axis values are not compatible!")
return self.__class__(self.x, self.y + other.y, *self._args,
**self._kwargs)
def __sub__(self, other):
"""
Substract one Spectrum object from another. Checks that x scales are
the same.
Otherwise, a ValueError is thrown
Args:
other: Another Spectrum object
Returns:
Substraction of the two Spectrum objects
"""
if not all(np.equal(self.x, other.x)):
raise ValueError("X axis values are not compatible!")
return self.__class__(self.x, self.y - other.y, *self._args,
**self._kwargs)
def __mul__(self, other):
"""
Scale the Spectrum's y values
Args:
other: scalar, The scale amount
Returns:
Spectrum object with y values scaled
"""
return self.__class__(self.x, other * self.y, *self._args,
**self._kwargs)
__rmul__ = __mul__
def __truediv__(self, other):
"""
True division of y
Args:
other: The divisor
Returns:
Spectrum object with y values divided
"""
return self.__class__(self.x, self.y.__truediv__(other), *self._args,
**self._kwargs)
def __floordiv__(self, other):
"""
True division of y
Args:
other: The divisor
Returns:
Spectrum object with y values divided
"""
return self.__class__(self.x, self.y.__floordiv__(other), *self._args,
**self._kwargs)
__div__ = __truediv__
def __str__(self):
"""
Returns a string containing values and labels of spectrum object for
plotting.
"""
return "\n".join([self.__class__.__name__,
"%s: %s" % (self.XLABEL, self.x),
"%s: %s" % (self.YLABEL, self.y)])
def __repr__(self):
"""
Returns a printable representation of the class
"""
return self.__str__()
| |
# Copyright (C) 2016 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import logging
try:
import mock # Python 2
except ImportError:
from unittest import mock # Python 3
from nose.tools import raises
from ryu.services.protocols.bgp import bgpspeaker
LOG = logging.getLogger(__name__)
class Test_BGPSpeaker(unittest.TestCase):
"""
Test case for bgp.bgpspeaker.BGPSpeaker
"""
@mock.patch('ryu.services.protocols.bgp.bgpspeaker.BGPSpeaker.__init__',
mock.MagicMock(return_value=None))
@mock.patch('ryu.services.protocols.bgp.bgpspeaker.call')
def test_evpn_prefix_add_mac_ip_adv(self, mock_call):
# Prepare test data
route_type = bgpspeaker.EVPN_MAC_IP_ADV_ROUTE
route_dist = '65000:100'
esi = 0 # denotes single-homed
ethernet_tag_id = 200
mac_addr = 'aa:bb:cc:dd:ee:ff'
ip_addr = '192.168.0.1'
next_hop = '10.0.0.1'
expected_kwargs = {
'route_type': route_type,
'route_dist': route_dist,
'esi': esi,
'ethernet_tag_id': ethernet_tag_id,
'mac_addr': mac_addr,
'ip_addr': ip_addr,
'next_hop': next_hop,
}
# Test
speaker = bgpspeaker.BGPSpeaker(65000, '10.0.0.1')
speaker.evpn_prefix_add(
route_type=route_type,
route_dist=route_dist,
esi=esi,
ethernet_tag_id=ethernet_tag_id,
mac_addr=mac_addr,
ip_addr=ip_addr,
next_hop=next_hop,
)
# Check
mock_call.assert_called_with(
'evpn_prefix.add_local', **expected_kwargs)
@mock.patch('ryu.services.protocols.bgp.bgpspeaker.BGPSpeaker.__init__',
mock.MagicMock(return_value=None))
@mock.patch('ryu.services.protocols.bgp.bgpspeaker.call')
def test_evpn_prefix_add_mac_ip_adv_vni(self, mock_call):
# Prepare test data
route_type = bgpspeaker.EVPN_MAC_IP_ADV_ROUTE
route_dist = '65000:100'
esi = 0 # denotes single-homed
ethernet_tag_id = 200
mac_addr = 'aa:bb:cc:dd:ee:ff'
ip_addr = '192.168.0.1'
vni = 500
next_hop = '10.0.0.1'
tunnel_type = bgpspeaker.TUNNEL_TYPE_VXLAN
expected_kwargs = {
'route_type': route_type,
'route_dist': route_dist,
'esi': esi,
'ethernet_tag_id': ethernet_tag_id,
'mac_addr': mac_addr,
'ip_addr': ip_addr,
'vni': vni,
'next_hop': next_hop,
'tunnel_type': tunnel_type,
}
# Test
speaker = bgpspeaker.BGPSpeaker(65000, '10.0.0.1')
speaker.evpn_prefix_add(
route_type=route_type,
route_dist=route_dist,
esi=esi,
ethernet_tag_id=ethernet_tag_id,
mac_addr=mac_addr,
ip_addr=ip_addr,
vni=vni,
next_hop=next_hop,
tunnel_type=tunnel_type,
)
# Check
mock_call.assert_called_with(
'evpn_prefix.add_local', **expected_kwargs)
@mock.patch('ryu.services.protocols.bgp.bgpspeaker.BGPSpeaker.__init__',
mock.MagicMock(return_value=None))
@mock.patch('ryu.services.protocols.bgp.bgpspeaker.call')
def test_evpn_prefix_add_multicast_etag(self, mock_call):
# Prepare test data
route_type = bgpspeaker.EVPN_MULTICAST_ETAG_ROUTE
route_dist = '65000:100'
esi = 0 # denotes single-homed
ethernet_tag_id = 200
mac_addr = 'aa:bb:cc:dd:ee:ff'
ip_addr = '192.168.0.1'
next_hop = '10.0.0.1'
expected_kwargs = {
'route_type': route_type,
'route_dist': route_dist,
# 'esi': esi, # should be ignored
'ethernet_tag_id': ethernet_tag_id,
# 'mac_addr': mac_addr, # should be ignored
'ip_addr': ip_addr,
'next_hop': next_hop,
}
# Test
speaker = bgpspeaker.BGPSpeaker(65000, '10.0.0.1')
speaker.evpn_prefix_add(
route_type=route_type,
route_dist=route_dist,
esi=esi,
ethernet_tag_id=ethernet_tag_id,
mac_addr=mac_addr,
ip_addr=ip_addr,
next_hop=next_hop,
)
# Check
mock_call.assert_called_with(
'evpn_prefix.add_local', **expected_kwargs)
@mock.patch('ryu.services.protocols.bgp.bgpspeaker.BGPSpeaker.__init__',
mock.MagicMock(return_value=None))
@mock.patch('ryu.services.protocols.bgp.bgpspeaker.call')
def test_evpn_prefix_add_multicast_etag_no_next_hop(self, mock_call):
# Prepare test data
route_type = bgpspeaker.EVPN_MULTICAST_ETAG_ROUTE
route_dist = '65000:100'
esi = 0 # denotes single-homed
ethernet_tag_id = 200
mac_addr = 'aa:bb:cc:dd:ee:ff'
ip_addr = '192.168.0.1'
next_hop = '0.0.0.0' # the default value
expected_kwargs = {
'route_type': route_type,
'route_dist': route_dist,
# 'esi': esi, # should be ignored
'ethernet_tag_id': ethernet_tag_id,
# 'mac_addr': mac_addr, # should be ignored
'ip_addr': ip_addr,
'next_hop': next_hop,
}
# Test
speaker = bgpspeaker.BGPSpeaker(65000, '10.0.0.1')
speaker.evpn_prefix_add(
route_type=route_type,
route_dist=route_dist,
esi=esi,
ethernet_tag_id=ethernet_tag_id,
mac_addr=mac_addr,
ip_addr=ip_addr,
# next_hop=next_hop, # omitted
)
# Check
mock_call.assert_called_with(
'evpn_prefix.add_local', **expected_kwargs)
@raises(ValueError)
@mock.patch('ryu.services.protocols.bgp.bgpspeaker.BGPSpeaker.__init__',
mock.MagicMock(return_value=None))
@mock.patch('ryu.services.protocols.bgp.bgpspeaker.call')
def test_evpn_prefix_add_invalid_route_type(self, mock_call):
# Prepare test data
route_type = 'foobar' # Invalid EVPN route type
route_dist = '65000:100'
esi = 0 # denotes single-homed
ethernet_tag_id = 200
mac_addr = 'aa:bb:cc:dd:ee:ff'
ip_addr = '192.168.0.1'
next_hop = '10.0.0.1'
# Test
speaker = bgpspeaker.BGPSpeaker(65000, '10.0.0.1')
speaker.evpn_prefix_add(
route_type=route_type,
route_dist=route_dist,
esi=esi,
ethernet_tag_id=ethernet_tag_id,
mac_addr=mac_addr,
ip_addr=ip_addr,
next_hop=next_hop,
)
# Check
mock_call.assert_called_with(
'evpn_prefix.add_local', 'Invalid arguments detected')
@mock.patch('ryu.services.protocols.bgp.bgpspeaker.BGPSpeaker.__init__',
mock.MagicMock(return_value=None))
@mock.patch('ryu.services.protocols.bgp.bgpspeaker.call')
def test_evpn_prefix_del_mac_ip_adv(self, mock_call):
# Prepare test data
route_type = bgpspeaker.EVPN_MAC_IP_ADV_ROUTE
route_dist = '65000:100'
esi = 0 # denotes single-homed
ethernet_tag_id = 200
mac_addr = 'aa:bb:cc:dd:ee:ff'
ip_addr = '192.168.0.1'
expected_kwargs = {
'route_type': route_type,
'route_dist': route_dist,
'esi': esi,
'ethernet_tag_id': ethernet_tag_id,
'mac_addr': mac_addr,
'ip_addr': ip_addr,
}
# Test
speaker = bgpspeaker.BGPSpeaker(65000, '10.0.0.1')
speaker.evpn_prefix_del(
route_type=route_type,
route_dist=route_dist,
esi=esi,
ethernet_tag_id=ethernet_tag_id,
mac_addr=mac_addr,
ip_addr=ip_addr,
)
# Check
mock_call.assert_called_with(
'evpn_prefix.delete_local', **expected_kwargs)
@mock.patch('ryu.services.protocols.bgp.bgpspeaker.BGPSpeaker.__init__',
mock.MagicMock(return_value=None))
@mock.patch('ryu.services.protocols.bgp.bgpspeaker.call')
def test_evpn_prefix_del_multicast_etag(self, mock_call):
# Prepare test data
route_type = bgpspeaker.EVPN_MULTICAST_ETAG_ROUTE
route_dist = '65000:100'
esi = 0 # denotes single-homed
ethernet_tag_id = 200
mac_addr = 'aa:bb:cc:dd:ee:ff'
ip_addr = '192.168.0.1'
expected_kwargs = {
'route_type': route_type,
'route_dist': route_dist,
# 'esi': esi, # should be ignored
'ethernet_tag_id': ethernet_tag_id,
# 'mac_addr': mac_addr, # should be ignored
'ip_addr': ip_addr,
}
# Test
speaker = bgpspeaker.BGPSpeaker(65000, '10.0.0.1')
speaker.evpn_prefix_del(
route_type=route_type,
route_dist=route_dist,
esi=esi,
ethernet_tag_id=ethernet_tag_id,
mac_addr=mac_addr,
ip_addr=ip_addr,
)
# Check
mock_call.assert_called_with(
'evpn_prefix.delete_local', **expected_kwargs)
@raises(ValueError)
@mock.patch('ryu.services.protocols.bgp.bgpspeaker.BGPSpeaker.__init__',
mock.MagicMock(return_value=None))
@mock.patch('ryu.services.protocols.bgp.bgpspeaker.call')
def test_evpn_prefix_del_invalid_route_type(self, mock_call):
# Prepare test data
route_type = 'foobar' # Invalid EVPN route type
route_dist = '65000:100'
esi = 0 # denotes single-homed
ethernet_tag_id = 200
mac_addr = 'aa:bb:cc:dd:ee:ff'
ip_addr = '192.168.0.1'
# Test
speaker = bgpspeaker.BGPSpeaker(65000, '10.0.0.1')
speaker.evpn_prefix_del(
route_type=route_type,
route_dist=route_dist,
esi=esi,
ethernet_tag_id=ethernet_tag_id,
mac_addr=mac_addr,
ip_addr=ip_addr,
)
# Check
mock_call.assert_called_with(
'evpn_prefix.delete_local', 'Invalid arguments detected')
@mock.patch('ryu.services.protocols.bgp.bgpspeaker.BGPSpeaker.__init__',
mock.MagicMock(return_value=None))
@mock.patch('ryu.services.protocols.bgp.bgpspeaker.call')
def test_evpn_prefix_add_pmsi_no_tunnel_info(self, mock_call):
# Prepare test data
route_type = bgpspeaker.EVPN_MULTICAST_ETAG_ROUTE
route_dist = '65000:100'
ethernet_tag_id = 200
next_hop = '0.0.0.0'
ip_addr = '192.168.0.1'
pmsi_tunnel_type = bgpspeaker.PMSI_TYPE_NO_TUNNEL_INFO
expected_kwargs = {
'route_type': route_type,
'route_dist': route_dist,
'ethernet_tag_id': ethernet_tag_id,
'next_hop': next_hop,
'ip_addr': ip_addr,
'pmsi_tunnel_type': pmsi_tunnel_type,
}
# Test
speaker = bgpspeaker.BGPSpeaker(65000, '10.0.0.1')
speaker.evpn_prefix_add(
route_type=route_type,
route_dist=route_dist,
ethernet_tag_id=ethernet_tag_id,
ip_addr=ip_addr,
pmsi_tunnel_type=pmsi_tunnel_type,
)
# Check
mock_call.assert_called_with(
'evpn_prefix.add_local', **expected_kwargs)
@mock.patch(
'ryu.services.protocols.bgp.bgpspeaker.BGPSpeaker.__init__',
mock.MagicMock(return_value=None))
@mock.patch('ryu.services.protocols.bgp.bgpspeaker.call')
def test_evpn_prefix_add_pmsi_ingress_rep(self, mock_call):
# Prepare test data
route_type = bgpspeaker.EVPN_MULTICAST_ETAG_ROUTE
route_dist = '65000:100'
ethernet_tag_id = 200
next_hop = '0.0.0.0'
ip_addr = '192.168.0.1'
pmsi_tunnel_type = bgpspeaker.PMSI_TYPE_INGRESS_REP
expected_kwargs = {
'route_type': route_type,
'route_dist': route_dist,
'ethernet_tag_id': ethernet_tag_id,
'next_hop': next_hop,
'ip_addr': ip_addr,
'pmsi_tunnel_type': pmsi_tunnel_type,
}
# Test
speaker = bgpspeaker.BGPSpeaker(65000, '10.0.0.1')
speaker.evpn_prefix_add(
route_type=route_type,
route_dist=route_dist,
ethernet_tag_id=ethernet_tag_id,
ip_addr=ip_addr,
pmsi_tunnel_type=pmsi_tunnel_type,
)
# Check
mock_call.assert_called_with(
'evpn_prefix.add_local', **expected_kwargs)
@raises(ValueError)
@mock.patch(
'ryu.services.protocols.bgp.bgpspeaker.BGPSpeaker.__init__',
mock.MagicMock(return_value=None))
@mock.patch('ryu.services.protocols.bgp.bgpspeaker.call')
def test_evpn_prefix_add_invalid_pmsi_tunnel_type(self, mock_call):
# Prepare test data
route_type = bgpspeaker.EVPN_MULTICAST_ETAG_ROUTE
route_dist = '65000:100'
ethernet_tag_id = 200
next_hop = '0.0.0.0'
ip_addr = '192.168.0.1'
pmsi_tunnel_type = 1
# Test
speaker = bgpspeaker.BGPSpeaker(65000, '10.0.0.1')
speaker.evpn_prefix_add(
route_type=route_type,
route_dist=route_dist,
ethernet_tag_id=ethernet_tag_id,
ip_addr=ip_addr,
pmsi_tunnel_type=pmsi_tunnel_type,
)
# Check
mock_call.assert_called_with(
'evpn_prefix.add_local', 'Invalid arguments detected')
| |
# -*- coding: utf-8 -*-
"""
/***************************************************************************
ORStools
A QGIS plugin
QGIS client to query openrouteservice
-------------------
begin : 2017-02-01
git sha : $Format:%H$
copyright : (C) 2021 by HeiGIT gGmbH
email : support@openrouteservice.heigit.org
***************************************************************************/
This plugin provides access to openrouteservice API functionalities
(https://openrouteservice.org), developed and
maintained by the openrouteservice team of HeiGIT gGmbH, Germany. By using
this plugin you agree to the ORS terms of service
(https://openrouteservice.org/terms-of-service/).
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from qgis.core import (QgsWkbTypes,
QgsCoordinateReferenceSystem,
QgsProcessing,
QgsProcessingParameterField,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterEnum,
)
from ORStools.common import directions_core, PROFILES, PREFERENCES
from ORStools.utils import transform, exceptions, logger
from .base_processing_algorithm import ORSBaseProcessingAlgorithm
# noinspection PyPep8Naming
class ORSDirectionsPointsLayersAlgo(ORSBaseProcessingAlgorithm):
def __init__(self):
super().__init__()
self.ALGO_NAME = 'directions_from_points_2_layers'
self.GROUP = "Directions"
self.MODE_SELECTION: list = ['Row-by-Row', 'All-by-All']
self.IN_START = "INPUT_START_LAYER"
self.IN_START_FIELD = "INPUT_START_FIELD"
self.IN_SORT_START_BY = "INPUT_SORT_START_BY"
self.IN_END = "INPUT_END_LAYER"
self.IN_END_FIELD = "INPUT_END_FIELD"
self.IN_SORT_END_BY = "INPUT_SORT_END_BY"
self.IN_PROFILE = "INPUT_PROFILE"
self.IN_PREFERENCE = "INPUT_PREFERENCE"
self.IN_MODE = "INPUT_MODE"
self.PARAMETERS = [
QgsProcessingParameterFeatureSource(
name=self.IN_START,
description="Input Start Point layer",
types=[QgsProcessing.TypeVectorPoint],
),
QgsProcessingParameterField(
name=self.IN_START_FIELD,
description="Start ID Field (can be used for joining)",
parentLayerParameterName=self.IN_START,
defaultValue=None,
optional=True,
),
QgsProcessingParameterField(
name=self.IN_SORT_START_BY,
description="Sort Start Points by",
parentLayerParameterName=self.IN_START,
defaultValue=None,
optional=True
),
QgsProcessingParameterFeatureSource(
name=self.IN_END,
description="Input End Point layer",
types=[QgsProcessing.TypeVectorPoint],
),
QgsProcessingParameterField(
name=self.IN_END_FIELD,
description="End ID Field (can be used for joining)",
parentLayerParameterName=self.IN_END,
defaultValue=None,
optional=True,
),
QgsProcessingParameterField(
name=self.IN_SORT_END_BY,
description="Sort End Points by",
parentLayerParameterName=self.IN_END,
defaultValue=None,
optional=True
),
QgsProcessingParameterEnum(
self.IN_PROFILE,
"Travel mode",
PROFILES,
defaultValue=PROFILES[0]
),
QgsProcessingParameterEnum(
self.IN_PREFERENCE,
"Travel preference",
PREFERENCES,
defaultValue=PREFERENCES[0]
),
QgsProcessingParameterEnum(
self.IN_MODE,
"Layer mode",
self.MODE_SELECTION,
defaultValue=self.MODE_SELECTION[0]
)
]
# TODO: preprocess parameters to options the range cleanup below:
# https://www.qgis.org/pyqgis/master/core/Processing/QgsProcessingAlgorithm.html#qgis.core.QgsProcessingAlgorithm.preprocessParameters
def processAlgorithm(self, parameters, context, feedback):
ors_client = self._get_ors_client_from_provider(parameters[self.IN_PROVIDER], feedback)
profile = dict(enumerate(PROFILES))[parameters[self.IN_PROFILE]]
preference = dict(enumerate(PREFERENCES))[parameters[self.IN_PREFERENCE]]
mode = dict(enumerate(self.MODE_SELECTION))[parameters[self.IN_MODE]]
# Get parameter values
source = self.parameterAsSource(
parameters,
self.IN_START,
context
)
source_field_name = parameters[self.IN_START_FIELD]
source_field = source.fields().field(source_field_name) if source_field_name else None
sort_start_by = parameters[self.IN_SORT_START_BY]
if sort_start_by:
def sort_start(f): return f.attribute(sort_start_by)
else:
def sort_start(f): return f.id()
destination = self.parameterAsSource(
parameters,
self.IN_END,
context
)
destination_field_name = parameters[self.IN_END_FIELD]
destination_field = destination.fields().field(destination_field_name) if destination_field_name else None
sort_end_by = parameters[self.IN_SORT_END_BY]
if sort_end_by:
def sort_end(f): return f.attribute(sort_end_by)
else:
def sort_end(f): return f.id()
route_dict = self._get_route_dict(
source,
source_field,
sort_start,
destination,
destination_field,
sort_end
)
if mode == 'Row-by-Row':
route_count = min([source.featureCount(), destination.featureCount()])
else:
route_count = source.featureCount() * destination.featureCount()
# get types of set ID fields
field_types = dict()
if source_field:
field_types.update({"from_type": source_field.type()})
if destination_field:
field_types.update({"to_type": destination_field.type()})
sink_fields = directions_core.get_fields(**field_types)
(sink, dest_id) = self.parameterAsSink(parameters, self.OUT, context, sink_fields,
QgsWkbTypes.LineString,
QgsCoordinateReferenceSystem.fromEpsgId(4326))
counter = 0
for coordinates, values in directions_core.get_request_point_features(route_dict, mode):
# Stop the algorithm if cancel button has been clicked
if feedback.isCanceled():
break
params = directions_core.build_default_parameters(preference, coordinates=coordinates)
try:
response = ors_client.request('/v2/directions/' + profile + '/geojson', {}, post_json=params)
except (exceptions.ApiError,
exceptions.InvalidKey,
exceptions.GenericServerError) as e:
msg = f"Route from {values[0]} to {values[1]} caused a {e.__class__.__name__}:\n{str(e)}"
feedback.reportError(msg)
logger.log(msg)
continue
sink.addFeature(directions_core.get_output_feature_directions(
response,
profile,
preference,
from_value=values[0],
to_value=values[1]
))
counter += 1
feedback.setProgress(int(100.0 / route_count * counter))
return {self.OUT: dest_id}
@staticmethod
def _get_route_dict(source, source_field, sort_start, destination, destination_field, sort_end):
"""
Compute route_dict from input layer.
:param source: Input from layer
:type source: QgsProcessingParameterFeatureSource
:param source_field: ID field from layer.
:type source_field: QgsField
:param destination: Input to layer.
:type destination: QgsProcessingParameterFeatureSource
:param destination_field: ID field to layer.
:type destination_field: QgsField
:returns: route_dict with coordinates and ID values
:rtype: dict
"""
route_dict = dict()
source_feats = sorted(list(source.getFeatures()), key=sort_start)
x_former_source = transform.transformToWGS(source.sourceCrs())
route_dict['start'] = dict(
geometries=[x_former_source.transform(feat.geometry().asPoint()) for feat in source_feats],
values=[feat.attribute(source_field.name()) if source_field else feat.id() for feat in source_feats],
)
destination_feats = sorted(list(destination.getFeatures()), key=sort_end)
x_former_destination = transform.transformToWGS(destination.sourceCrs())
route_dict['end'] = dict(
geometries=[x_former_destination.transform(feat.geometry().asPoint()) for feat in destination_feats],
values=[feat.attribute(destination_field.name()) if destination_field else feat.id() for feat in
destination_feats
],
)
return route_dict
| |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Run regression test suite.
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts.
Functional tests are disabled on Windows by default. Use --force to run them anyway.
For a description of arguments recognized by test scripts, see
`test/functional/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import argparse
import configparser
import os
import time
import shutil
import sys
import subprocess
import tempfile
import re
import logging
TEST_EXIT_PASSED = 0
TEST_EXIT_SKIPPED = 77
BASE_SCRIPTS= [
# Scripts that are run by the travis build process.
# Longest test should go first, to favor running tests in parallel
'wallet-hd.py',
'walletbackup.py',
# vv Tests less than 5m vv
'p2p-fullblocktest.py',
'fundrawtransaction.py',
'p2p-compactblocks.py',
'segwit.py',
# vv Tests less than 2m vv
'wallet.py',
'wallet-accounts.py',
'p2p-segwit.py',
'wallet-dump.py',
'listtransactions.py',
# vv Tests less than 60s vv
'sendheaders.py',
'zapwallettxes.py',
'importmulti.py',
'mempool_limit.py',
'merkle_blocks.py',
'receivedby.py',
'abandonconflict.py',
'bip68-112-113-p2p.py',
'rawtransactions.py',
'reindex.py',
# vv Tests less than 30s vv
'mempool_resurrect_test.py',
'txn_doublespend.py --mineblock',
'txn_clone.py',
'getchaintips.py',
'rest.py',
'mempool_spendcoinbase.py',
'mempool_reorg.py',
'httpbasics.py',
'multi_rpc.py',
'proxy_test.py',
'signrawtransactions.py',
'nodehandling.py',
'decodescript.py',
'blockchain.py',
'disablewallet.py',
'keypool.py',
'p2p-mempool.py',
'prioritise_transaction.py',
'invalidblockrequest.py',
'invalidtxrequest.py',
'p2p-versionbits-warning.py',
'preciousblock.py',
'importprunedfunds.py',
'signmessages.py',
'nulldummy.py',
'import-rescan.py',
'bumpfee.py',
'rpcnamedargs.py',
'listsinceblock.py',
'p2p-leaktests.py',
]
ZMQ_SCRIPTS = [
# ZMQ test can only be run if bitcoin was built with zmq-enabled.
# call test_runner.py with -nozmq to explicitly exclude these tests.
'zmq_test.py']
EXTENDED_SCRIPTS = [
# These tests are not run by the travis build process.
# Longest test should go first, to favor running tests in parallel
'pruning.py',
# vv Tests less than 20m vv
'smartfees.py',
# vv Tests less than 5m vv
'maxuploadtarget.py',
'mempool_packages.py',
# vv Tests less than 2m vv
'bip68-sequence.py',
'getblocktemplate_longpoll.py',
'p2p-timeouts.py',
# vv Tests less than 60s vv
'bip9-softforks.py',
'p2p-feefilter.py',
'rpcbind_test.py',
# vv Tests less than 30s vv
'assumevalid.py',
'bip65-cltv.py',
'bip65-cltv-p2p.py',
'bipdersig-p2p.py',
'bipdersig.py',
'getblocktemplate_proposals.py',
'txn_doublespend.py',
'txn_clone.py --mineblock',
'forknotify.py',
'invalidateblock.py',
'maxblocksinflight.py',
'p2p-acceptblock.py',
'replace-by-fee.py',
]
ALL_SCRIPTS = BASE_SCRIPTS + ZMQ_SCRIPTS + EXTENDED_SCRIPTS
NON_SCRIPTS = [
# These are python files that live in the functional tests directory, but are not test scripts.
"combine_logs.py",
"create_cache.py",
"test_runner.py",
]
def main():
# Parse arguments and pass through unrecognised args
parser = argparse.ArgumentParser(add_help=False,
usage='%(prog)s [test_runner.py options] [script options] [scripts]',
description=__doc__,
epilog='''
Help text and arguments for individual test script:''',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--coverage', action='store_true', help='generate a basic coverage report for the RPC interface')
parser.add_argument('--exclude', '-x', help='specify a comma-seperated-list of scripts to exclude. Do not include the .py extension in the name.')
parser.add_argument('--extended', action='store_true', help='run the extended test suite in addition to the basic tests')
parser.add_argument('--force', '-f', action='store_true', help='run tests even on platforms where they are disabled by default (e.g. windows).')
parser.add_argument('--help', '-h', '-?', action='store_true', help='print help text and exit')
parser.add_argument('--jobs', '-j', type=int, default=4, help='how many test scripts to run in parallel. Default=4.')
parser.add_argument('--quiet', '-q', action='store_true', help='only print results summary and failure logs')
parser.add_argument('--nozmq', action='store_true', help='do not run the zmq tests')
args, unknown_args = parser.parse_known_args()
# Create a set to store arguments and create the passon string
tests = set(arg for arg in unknown_args if arg[:2] != "--")
passon_args = [arg for arg in unknown_args if arg[:2] == "--"]
# Read config generated by configure.
config = configparser.ConfigParser()
config.read_file(open(os.path.dirname(__file__) + "/config.ini"))
# Set up logging
logging_level = logging.INFO if args.quiet else logging.DEBUG
logging.basicConfig(format='%(message)s', level=logging_level)
enable_wallet = config["components"].getboolean("ENABLE_WALLET")
enable_utils = config["components"].getboolean("ENABLE_UTILS")
enable_bitcoind = config["components"].getboolean("ENABLE_BITCOIND")
enable_zmq = config["components"].getboolean("ENABLE_ZMQ") and not args.nozmq
if config["environment"]["EXEEXT"] == ".exe" and not args.force:
# https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
# https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964
print("Tests currently disabled on Windows by default. Use --force option to enable")
sys.exit(0)
if not (enable_wallet and enable_utils and enable_bitcoind):
print("No functional tests to run. Wallet, utils, and bitcoind must all be enabled")
print("Rerun `configure` with -enable-wallet, -with-utils and -with-daemon and rerun make")
sys.exit(0)
# python3-zmq may not be installed. Handle this gracefully and with some helpful info
if enable_zmq:
try:
import zmq
except ImportError:
print("ERROR: \"import zmq\" failed. Use -nozmq to run without the ZMQ tests."
"To run zmq tests, see dependency info in /test/README.md.")
raise
# Build list of tests
if tests:
# Individual tests have been specified. Run specified tests that exist
# in the ALL_SCRIPTS list. Accept the name with or without .py extension.
test_list = [t for t in ALL_SCRIPTS if
(t in tests or re.sub(".py$", "", t) in tests)]
else:
# No individual tests have been specified. Run base tests, and
# optionally ZMQ tests and extended tests.
test_list = BASE_SCRIPTS
if enable_zmq:
test_list += ZMQ_SCRIPTS
if args.extended:
test_list += EXTENDED_SCRIPTS
# TODO: BASE_SCRIPTS and EXTENDED_SCRIPTS are sorted by runtime
# (for parallel running efficiency). This combined list will is no
# longer sorted.
# Remove the test cases that the user has explicitly asked to exclude.
if args.exclude:
for exclude_test in args.exclude.split(','):
if exclude_test + ".py" in test_list:
test_list.remove(exclude_test + ".py")
if not test_list:
print("No valid test scripts specified. Check that your test is in one "
"of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests")
sys.exit(0)
if args.help:
# Print help for test_runner.py, then print help of the first script (with args removed) and exit.
parser.print_help()
subprocess.check_call([(config["environment"]["SRCDIR"] + '/test/functional/' + test_list[0].split()[0])] + ['-h'])
sys.exit(0)
check_script_list(config["environment"]["SRCDIR"])
run_tests(test_list, config["environment"]["SRCDIR"], config["environment"]["BUILDDIR"], config["environment"]["EXEEXT"], args.jobs, args.coverage, passon_args)
def run_tests(test_list, src_dir, build_dir, exeext, jobs=1, enable_coverage=False, args=[]):
BOLD = ("","")
if os.name == 'posix':
# primitive formatting on supported
# terminal via ANSI escape sequences:
BOLD = ('\033[0m', '\033[1m')
#Set env vars
if "BITCOIND" not in os.environ:
os.environ["BITCOIND"] = build_dir + '/src/bitcoind' + exeext
tests_dir = src_dir + '/test/functional/'
flags = ["--srcdir={}/src".format(build_dir)] + args
flags.append("--cachedir=%s/test/cache" % build_dir)
if enable_coverage:
coverage = RPCCoverage()
flags.append(coverage.flag)
logging.debug("Initializing coverage directory at %s" % coverage.dir)
else:
coverage = None
if len(test_list) > 1 and jobs > 1:
# Populate cache
subprocess.check_output([tests_dir + 'create_cache.py'] + flags)
#Run Tests
all_passed = True
time_sum = 0
time0 = time.time()
job_queue = TestHandler(jobs, tests_dir, test_list, flags)
max_len_name = len(max(test_list, key=len))
results = "\n" + BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "STATUS ", "DURATION") + BOLD[0]
for _ in range(len(test_list)):
(name, stdout, stderr, status, duration) = job_queue.get_next()
all_passed = all_passed and status != "Failed"
time_sum += duration
if status == "Passed":
logging.debug("\n%s%s%s passed, Duration: %s s" % (BOLD[1], name, BOLD[0], duration))
elif status == "Skipped":
logging.debug("\n%s%s%s skipped" % (BOLD[1], name, BOLD[0]))
else:
print("\n%s%s%s failed, Duration: %s s\n" % (BOLD[1], name, BOLD[0], duration))
print(BOLD[1] + 'stdout:\n' + BOLD[0] + stdout + '\n')
print(BOLD[1] + 'stderr:\n' + BOLD[0] + stderr + '\n')
results += "%s | %s | %s s\n" % (name.ljust(max_len_name), status.ljust(7), duration)
results += BOLD[1] + "\n%s | %s | %s s (accumulated)" % ("ALL".ljust(max_len_name), str(all_passed).ljust(7), time_sum) + BOLD[0]
print(results)
print("\nRuntime: %s s" % (int(time.time() - time0)))
if coverage:
coverage.report_rpc_coverage()
logging.debug("Cleaning up coverage data")
coverage.cleanup()
sys.exit(not all_passed)
class TestHandler:
"""
Trigger the testscrips passed in via the list.
"""
def __init__(self, num_tests_parallel, tests_dir, test_list=None, flags=None):
assert(num_tests_parallel >= 1)
self.num_jobs = num_tests_parallel
self.tests_dir = tests_dir
self.test_list = test_list
self.flags = flags
self.num_running = 0
# In case there is a graveyard of zombie bitcoinds, we can apply a
# pseudorandom offset to hopefully jump over them.
# (625 is PORT_RANGE/MAX_NODES)
self.portseed_offset = int(time.time() * 1000) % 625
self.jobs = []
def get_next(self):
while self.num_running < self.num_jobs and self.test_list:
# Add tests
self.num_running += 1
t = self.test_list.pop(0)
port_seed = ["--portseed={}".format(len(self.test_list) + self.portseed_offset)]
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
test_argv = t.split()
self.jobs.append((t,
time.time(),
subprocess.Popen([self.tests_dir + test_argv[0]] + test_argv[1:] + self.flags + port_seed,
universal_newlines=True,
stdout=log_stdout,
stderr=log_stderr),
log_stdout,
log_stderr))
if not self.jobs:
raise IndexError('pop from empty list')
while True:
# Return first proc that finishes
time.sleep(.5)
for j in self.jobs:
(name, time0, proc, log_out, log_err) = j
if proc.poll() is not None:
log_out.seek(0), log_err.seek(0)
[stdout, stderr] = [l.read().decode('utf-8') for l in (log_out, log_err)]
log_out.close(), log_err.close()
if proc.returncode == TEST_EXIT_PASSED and stderr == "":
status = "Passed"
elif proc.returncode == TEST_EXIT_SKIPPED:
status = "Skipped"
else:
status = "Failed"
self.num_running -= 1
self.jobs.remove(j)
return name, stdout, stderr, status, int(time.time() - time0)
print('.', end='', flush=True)
def check_script_list(src_dir):
"""Check scripts directory.
Check that there are no scripts in the functional tests directory which are
not being run by pull-tester.py."""
script_dir = src_dir + '/test/functional/'
python_files = set([t for t in os.listdir(script_dir) if t[-3:] == ".py"])
missed_tests = list(python_files - set(map(lambda x: x.split()[0], ALL_SCRIPTS + NON_SCRIPTS)))
if len(missed_tests) != 0:
print("The following scripts are not being run:" + str(missed_tests))
print("Check the test lists in test_runner.py")
sys.exit(1)
class RPCCoverage(object):
"""
Coverage reporting utilities for test_runner.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `bitcoin-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: test/functional/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir=%s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % i) for i in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `test/functional/test-framework/coverage.py`
reference_filename = 'rpc_interface.txt'
coverage_file_prefix = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, reference_filename)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r') as f:
all_cmds.update([i.strip() for i in f.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(coverage_file_prefix):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r') as f:
covered_cmds.update([i.strip() for i in f.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
main()
| |
#! /usr/bin/env python
'''
IKFast Plugin Generator for MoveIt!
Creates a kinematics plugin using the output of IKFast from OpenRAVE.
This plugin and the move_group node can be used as a general
kinematics service, from within the moveit planning environment, or in
your own ROS node.
Author: Dave Coleman, CU Boulder
Based heavily on the arm_kinematic_tools package by Jeremy Zoss, SwRI
and the arm_navigation plugin generator by David Butterworth, KAIST
Date: March 2013
'''
'''
Copyright (c) 2013, Jeremy Zoss, SwRI
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Willow Garage, Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
IABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
'''
import glob
import sys
import roslib
import re
import os
import yaml
from lxml import etree
import shutil
plugin_gen_pkg = 'moveit_ikfast' # package containing this file
# Allowed search modes, see SEARCH_MODE enum in template file
search_modes = ['OPTIMIZE_MAX_JOINT', 'OPTIMIZE_FREE_JOINT' ]
if __name__ == '__main__':
# Check input arguments
try:
robot_name = sys.argv[1]
planning_group_name = sys.argv[2]
moveit_plugin_pkg = sys.argv[3]
if len(sys.argv) == 6:
ikfast_output_file = sys.argv[5]
search_mode = sys.argv[4]
if search_mode not in search_modes:
print 'Invalid search mode. Allowed values: ', search_modes
raise Exception()
elif len(sys.argv) == 5:
search_mode = search_modes[0];
print "Warning: The default search has changed from OPTIMIZE_FREE_JOINT to now %s!" % (search_mode)
ikfast_output_file = sys.argv[4]
else:
raise Exception()
except:
print("\nUsage: create_ikfast_plugin.py <yourrobot_name> <planning_group_name> <moveit_plugin_pkg> [<search_mode>] <ikfast_output_path>\n")
sys.exit(-1)
print '\nIKFast Plugin Generator'
# Setup key package directories
try:
#plan_pkg = robot_name + '_moveit_config'
plan_pkg = 'moveit_config'
plan_pkg_dir = roslib.packages.get_pkg_dir(plan_pkg)
print 'Loading robot from \''+plan_pkg+'\' package ... '
except:
print '\nERROR: can\'t find package '+plan_pkg+'\n'
sys.exit(-1)
try:
plugin_pkg = moveit_plugin_pkg
plugin_pkg_dir = roslib.packages.get_pkg_dir(plugin_pkg)
print 'Creating plugin in \''+plugin_pkg+'\' package ... '
except:
print '\nERROR: can\'t find package '+plugin_pkg+'\n'
sys.exit(-1)
# Check for at least 1 planning group
try:
srdf_files = glob.glob(plan_pkg_dir+'/config/*.srdf')
if (len(srdf_files) == 1):
srdf_file_name = srdf_files[0]
else:
srdf_file_name = plan_pkg_dir + '/config/' + robot_name + '.srdf'
srdf = etree.parse(srdf_file_name).getroot()
except:
print("\nERROR: unable to parse robot configuration file\n" + srdf_file_name + "\n")
sys.exit(-1)
try:
if (robot_name != srdf.get('name')):
print '\nERROR: non-matching robot name found in ' + srdf_file_name + '.' \
+ ' Expected \'' + robot_name + '\',' + ' found \''+srdf.get('name')+'\''
raise
groups = srdf.findall('group')
if(len(groups) < 1) : # No groups
raise
if groups[0].get('name') == None: # Group name is blank
raise
except:
print("\nERROR: need at least 1 planning group in robot planning description ")
print srdf_file_name + '\n'
sys.exit(-1)
print ' found ' + str(len(groups)) + ' planning groups: ' \
+ ", ".join([g.get('name') for g in groups])
# Select manipulator arm group
planning_group = None
for g in groups:
foundName = (g.get('name').lower() == planning_group_name.lower())
if (foundName):
planning_group = g
break
if planning_group is None:
print '\nERROR: could not find planning group ' + planning_group_name + ' in SRDF.\n'
sys.exit(-1)
print ' found group \'' + planning_group_name + '\''
# Create src and include folders in target package
plugin_pkg_src_dir = plugin_pkg_dir+'/src/'
plugin_pkg_include_dir = plugin_pkg_dir+'/include/'
if not os.path.exists(plugin_pkg_src_dir):
os.makedirs(plugin_pkg_src_dir)
if not os.path.exists(plugin_pkg_include_dir):
os.makedirs(plugin_pkg_include_dir)
# Check for source code generated by IKFast
if not os.path.exists(ikfast_output_file):
print '\nERROR: can\'t find IKFast source code at \'' + \
ikfast_output_file + '\'\n'
print 'Make sure this input argument is correct \n'
sys.exit(-1)
# Copy the source code generated by IKFast into our src folder
solver_file_name = plugin_pkg_dir+'/src/'+robot_name+'_'+planning_group_name+'_ikfast_solver.cpp'
# Check if they are the same file - if so, skip
skip = False
if os.path.exists(ikfast_output_file) & os.path.exists(solver_file_name):
if os.path.samefile(ikfast_output_file, solver_file_name):
print 'Skipping copying ' + solver_file_name + ' since it is already in the correct location'
skip = True
if not skip:
shutil.copy2(ikfast_output_file,solver_file_name)
# Error check
if not os.path.exists(solver_file_name):
print '\nERROR: Unable to copy IKFast source code from \'' + ikfast_output_file + '\'' + ' to \'' + solver_file_name + '\''
print 'Manually copy the source file generated by IKFast to this location \n'
sys.exit(-1)
# Detect version of IKFast used to generate solver code
solver_version = 0
with open(solver_file_name,'r') as src:
for line in src:
if line.startswith('/// ikfast version'):
line_search = re.search('ikfast version (.*) generated', line)
if line_search:
solver_version = int(line_search.group(1), 0)
break
print ' found source code generated by IKFast version ' + str(solver_version)
# Get template folder location
try:
plugin_gen_dir = roslib.packages.get_pkg_dir(plugin_gen_pkg)
except:
print '\nERROR: can\'t find package '+plugin_gen_pkg+' \n'
sys.exit(-1)
# Chose template depending on IKFast version
if solver_version >= 56:
template_version = 61
else:
print '\nERROR this converter is not made for IKFast 54 or anything but 61'
sys.exit(-1)
# Check if IKFast header file exists
template_header_file = plugin_gen_dir + '/templates/ikfast.h'
if not os.path.exists(template_header_file):
print '\nERROR: can\'t find ikfast header file at \'' + template_header_file + '\'\n'
sys.exit(-1)
# Copy the IKFast header file into the include directory
header_file_name = plugin_pkg_dir+'/include/ikfast.h'
shutil.copy2(template_header_file,header_file_name)
if not os.path.exists(header_file_name):
print '\nERROR: Unable to copy IKFast header file from \'' + \
template_header_file + '\'' + ' to \'' + header_file_name + '\' \n'
print 'Manually copy ikfast.h to this location \n'
sys.exit(-1)
# Check if template exists
template_file_name = plugin_gen_dir + '/templates/ikfast' + str(template_version) + '_moveit_plugin_template.cpp'
if not os.path.exists(template_file_name):
print '\nERROR: can\'t find template file at \'' + template_file_name + '\'\n'
sys.exit(-1)
# Create plugin source from template
template_file_data = open(template_file_name, 'r')
template_text = template_file_data.read()
template_text = re.sub('_ROBOT_NAME_', robot_name, template_text)
template_text = re.sub('_GROUP_NAME_', planning_group_name, template_text)
template_text = re.sub('_SEARCH_MODE_', search_mode, template_text)
plugin_file_base = robot_name + '_' + planning_group_name + '_ikfast_moveit_plugin.cpp'
plugin_file_name = plugin_pkg_dir + '/src/' + plugin_file_base
with open(plugin_file_name,'w') as f:
f.write(template_text)
print '\nCreated plugin file at \'' + plugin_file_name + '\''
# Create plugin definition .xml file
ik_library_name = robot_name + "_" + planning_group_name + "_moveit_ikfast_plugin"
plugin_name = robot_name + '_' + planning_group_name + \
'_kinematics/IKFastKinematicsPlugin'
plugin_def = etree.Element("library", path="lib/lib"+ik_library_name)
cl = etree.SubElement(plugin_def, "class")
cl.set("name", plugin_name)
cl.set("type", 'ikfast_kinematics_plugin::IKFastKinematicsPlugin')
cl.set("base_class_type", "kinematics::KinematicsBase")
desc = etree.SubElement(cl, "description")
desc.text = 'IKFast'+str(template_version)+' plugin for closed-form kinematics'
# Write plugin definition to file
def_file_base = ik_library_name + "_description.xml"
def_file_name = plugin_pkg_dir + "/" + def_file_base
with open(def_file_name,'w') as f:
etree.ElementTree(plugin_def).write(f, xml_declaration=True, pretty_print=True)
print '\nCreated plugin definition at: \''+def_file_name+'\''
# Check if CMakeLists file exists
cmake_template_file = plugin_gen_dir+"/templates/CMakeLists.txt"
if not os.path.exists(cmake_template_file):
print '\nERROR: can\'t find CMakeLists template file at \'' + cmake_template_file + '\'\n'
sys.exit(-1)
# Create new CMakeLists file
cmake_file = plugin_pkg_dir+'/CMakeLists.txt'
# Create plugin source from template
template_file_data = open(cmake_template_file, 'r')
template_text = template_file_data.read()
template_text = re.sub('_ROBOT_NAME_', robot_name, template_text)
template_text = re.sub('_GROUP_NAME_', planning_group_name, template_text)
template_text = re.sub('_PACKAGE_NAME_', moveit_plugin_pkg, template_text)
template_text = re.sub('_LIBRARY_NAME_', ik_library_name, template_text)
with open(cmake_file,'w') as f:
f.write(template_text)
print '\nOverwrote CMakeLists file at \'' + cmake_file + '\''
# Add plugin export to package manifest
parser = etree.XMLParser(remove_blank_text=True)
package_file_name = plugin_pkg_dir+"/package.xml"
package_xml = etree.parse(package_file_name, parser)
# Make sure at least all required dependencies are in the depends lists
build_deps = ["liblapack-dev", "moveit_core", "pluginlib", "roscpp", "tf_conversions"]
run_deps = ["liblapack-dev", "moveit_core", "pluginlib", "roscpp", "tf_conversions"]
def update_deps(reqd_deps, req_type, e_parent):
curr_deps = [e.text for e in e_parent.findall(req_type)]
missing_deps = set(reqd_deps) - set(curr_deps)
for d in missing_deps:
etree.SubElement(e_parent, req_type).text = d
return missing_deps
# empty sets evaluate to false
modified_pkg = update_deps(build_deps, "build_depend", package_xml.getroot())
modified_pkg |= update_deps(run_deps, "run_depend", package_xml.getroot())
if modified_pkg:
with open(package_file_name,"w") as f:
package_xml.write(f, xml_declaration=True, pretty_print=True)
print '\nModified package.xml at \''+package_file_name+'\''
# Check that plugin definition file is in the export list
new_export = etree.Element("moveit_core", \
plugin="${prefix}/"+def_file_base)
export_element = package_xml.getroot().find("export")
if export_element == None:
export_element = etree.SubElement(package_xml.getroot(), "export")
found = False
for el in export_element.findall("moveit_core"):
found = (etree.tostring(new_export) == etree.tostring(el))
if found: break
if not found:
export_element.append(new_export)
with open(package_file_name,"w") as f:
package_xml.write(f, xml_declaration=True, pretty_print=True)
print '\nModified package.xml at \''+package_file_name+'\''
# Modify kinematics.yaml file
kin_yaml_file_name = plan_pkg_dir+"/config/kinematics.yaml"
with open(kin_yaml_file_name, 'r') as f:
kin_yaml_data = yaml.safe_load(f)
kin_yaml_data[planning_group_name]["kinematics_solver"] = plugin_name
with open(kin_yaml_file_name, 'w') as f:
yaml.dump(kin_yaml_data, f,default_flow_style=False)
print '\nModified kinematics.yaml at ' + kin_yaml_file_name
# Create a script for easily updating the plugin in the future in case the plugin needs to be updated
easy_script_file_name = "update_ikfast_plugin.sh"
easy_script_file_path = plugin_pkg_dir + "/" + easy_script_file_name
with open(easy_script_file_path,'w') as f:
f.write("rosrun moveit_ikfast create_ikfast_moveit_plugin.py"
+ " " + robot_name
+ " " + planning_group_name
+ " " + plugin_pkg
+ " " + solver_file_name )
print '\nCreated update plugin script at '+easy_script_file_path
| |
# A set of functions helpful for analysing the OD matrices produced from tools
# such as OpenTripPlanner, NetView, etc.
# Patrick Sunter, 2013-2014.
# Uses OGR library for shape file manipulation aspects.
import csv
import os
import operator
import itertools
import numpy
import taz_files
### General utility functions - OTP
def readLatLons(otpfilename, nPoints):
"""Read a set of latitute and longitude points for a set of
Travel Analysis Zones (TAZs) - from the first columns of an OTP O-D
matrix file. Return these as a two-dimensional array,
where primary index is ID of each TAZ."""
latlons = numpy.zeros((nPoints+1,2))
otpfile = open(otpfilename)
otpreader = csv.reader(otpfile, delimiter=',')
print "Reading Lat-lons from OTP CSV ... "
#header row
header_row = otpreader.next()
for ii, row in enumerate(otpreader):
originID = int(row[0])
latlons[originID] = (float(row[1]), float(row[2]))
print "Done."
otpfile.close()
return latlons
### General utility functions - Netview
def readNVRouteIDs(nvfilename, nroutes):
docstring = """Returns an array containing the OriginID, DestID pair for \
each route in the Netview CSV file."""
nvroutes = numpy.zeros((nroutes,2))
nvfile = open(nvfilename)
nvreader = csv.reader(nvfile, delimiter=';')
#There are three mostly blank lines at the start
for ii in range(3):
nvreader.next()
#Then headers row
nv_header_row = nvreader.next()
#OK, now process rest of rows
nroutes = 0
for ii, row in enumerate(nvreader):
originIDText = row[0]
originID = int(originIDText[1:])
destIDText = row[1]
destID = int(destIDText[1:])
nvroutes[ii] = [originID, destID]
nvfile.close()
return nvroutes
### Entire OD Matrix reading
def readOTPMatrix(otpfilename, mat):
"""Read in an OD matrix from results created by OpenTripPlanner, and
then post-processed by the make_od_matrix.py script.
matrix 'mat' must be in numpy format, and already be of the correct
size."""
otpfile = open(otpfilename)
otpreader = csv.reader(otpfile, delimiter=',')
print "Reading OTP O-D matrix from CSV ..."
#Create lookup table from header row
header_row = otpreader.next()
destlookups = header_row[3:]
for ii, row in enumerate(otpreader):
if ii % 100 == 0:
print "Reading %dth row of O-Ds" % (ii)
originID = int(row[0])
timesToDests = row[3:]
for jj, time in enumerate(timesToDests):
mat[originID, int(destlookups[jj])] = int(float(time)+0.5)
print "Done."
otpfile.close()
return
def readNVMatrix(nvfilename, mat):
"""Read in an OD matrix in the format created by the Netview routing
tool. Matrix must be of the correct size.
Note: converts the Netview output (in minutes) into seconds."""
nvfile = open(nvfilename)
nvreader = csv.reader(nvfile, delimiter=';')
print "Reading Netview O-D matrix ..."
#There are three mostly blank lines at the start
for ii in range(3):
nvreader.next()
#Then headers row
nv_header_row = nvreader.next()
#OK, now process rest of rows
nroutes = 0
for ii, row in enumerate(nvreader):
if ii % 1000 == 0:
print "Reading and processing %dth row ... " % (ii)
originIDText = row[0]
originID = int(originIDText[1:])
destIDText = row[1]
destID = int(destIDText[1:])
time_min = row[9]
time_sec = float(time_min) * 60.0
mat[originID, destID] = time_sec
nroutes += 1
print "Done."
nvfile.close()
return nroutes
# High-level analysis functions.
def saveComparisonFile(routesArray, od_mat_1, od_mat_2, compfilename,
case_names):
compfile = open(compfilename, "w")
compwriter = csv.writer(compfile, delimiter=',')
# Header row
compwriter.writerow(['OriginID','DestID', '%s Time (s)' % case_names[0], \
'%s Time (s)' % case_names[1], 'Difference (s)', 'Abs. Diff (s)',
'Abs. Diff (%)','Diff (%)'])
for ii, route in enumerate(routesArray):
originID = route[0]
destID = route[1]
time_1 = int(od_mat_1[originID, destID])
time_2 = int(od_mat_2[originID, destID])
# Checking for OTP times that are null for some reason.
# NB: ideally would be good to keep some info with a matrix so
# we can interpret if it was created by OTP etc how to handle.
# would require more complex data structures, or an object-oriented
# wrapper with an is_valid() function etc.
if time_1 in [0,-1,-2] or time_2 in [0,-1,-2]:
diff = "NA"
diff_percent = "NA"
absdiff = "NA"
absdiff_percent = "NA"
else:
diff = time_1 - time_2
diff_percent = diff / float(time_1)
absdiff = abs(diff)
absdiff_percent = absdiff / float(time_1)
compwriter.writerow([originID, destID, time_1, time_2, diff, absdiff,\
absdiff_percent, diff_percent])
compfile.close()
return
def readComparisonFile(compfilename):
"""Read in a comparison file created by saveComparisonFile().
Returns a tuple containing 3 numpy arrays:- first being the routes
in terms of TAZ O-D pairs, the second being times for those routes
in the first case, the second being times in the second case.
Requires format of saved comparison file's first 4 columns to be origin ID,
dest ID, time in case 1, time in case 2
(e.g. case 1 being OTP, case 2 being Netview)."""
compfile = open(compfilename)
compreader = csv.reader(compfile, delimiter=',')
#headers
compreader.next()
nrows = 0
for ii, row in enumerate(compreader):
nrows += 1
routesArray = []
case1Times = []
case2Times = []
#Restart, now we know array sizes
compfile.seek(0)
compreader = csv.reader(compfile, delimiter=',')
#headers
compreader.next()
for ii, row in enumerate(compreader):
routesArray.append((int(row[0]), int(row[1])))
case1Times.append(int(row[2]))
case2Times.append(int(row[3]))
compfile.close()
return routesArray, case1Times, case2Times
def createShapefile(routesArray, lonlats, case1Times, case2Times, caseNames,
shapefilename):
"""Creates a Shape file stating the difference between times in two
OD matrices, which have been 'unrolled' as large arrays listing
travel time between OD pairs. 'caseNames' should be short strings
describing the cases, eg. 'OTP' and 'NV'.
Saves results to a shapefile determined by shapefilename.
N.B. :- thanks for overall strategy here are due to author of
https://github.com/glennon/FlowpyGIS"""
import osgeo.ogr
from osgeo import ogr
print "Creating shapefile of route lines with time attributes to file"\
" %s ..." % (shapefilename)
driver = ogr.GetDriverByName('ESRI Shapefile')
# create a new data source and layer
if os.path.exists(shapefilename):
driver.DeleteDataSource(shapefilename)
ds = driver.CreateDataSource(shapefilename)
if ds is None:
print 'Could not create file'
sys.exit(1)
c1TimeFieldName = 't %s' % caseNames[0]
c2TimeFieldName = 't %s' % caseNames[1]
layer = ds.CreateLayer('routeinfos', geom_type=ogr.wkbLineString)
fieldDefn = ogr.FieldDefn('OriginID', ogr.OFTReal)
layer.CreateField(fieldDefn)
fieldDefn = ogr.FieldDefn('DestID', ogr.OFTReal)
layer.CreateField(fieldDefn)
fieldDefn = ogr.FieldDefn(c1TimeFieldName, ogr.OFTReal)
layer.CreateField(fieldDefn)
fieldDefn = ogr.FieldDefn(c2TimeFieldName, ogr.OFTReal)
layer.CreateField(fieldDefn)
fieldDefn = ogr.FieldDefn('Diff', ogr.OFTReal)
layer.CreateField(fieldDefn)
# END setup creation of shapefile
for ii, routePair in enumerate(routesArray):
originID = routePair[0]
destID = routePair[1]
case1time = case1Times[ii]
case2time = case2Times[ii]
linester = ogr.Geometry(ogr.wkbLineString)
linester.AddPoint(lonlats[originID][0], lonlats[originID][1])
linester.AddPoint(lonlats[destID][0], lonlats[destID][1])
featureDefn = layer.GetLayerDefn()
feature = ogr.Feature(featureDefn)
feature.SetGeometry(linester)
feature.SetField('OriginID', originID)
feature.SetField('DestID', destID)
feature.SetField(c1TimeFieldName, case1time)
feature.SetField(c2TimeFieldName, case2time)
if case1time in [0,-1,-2] or case2time in [0,-1,-2]:
diff = 0
else:
diff = case1time - case2time
feature.SetField('Diff', diff)
layer.CreateFeature(feature)
# shapefile cleanup
# destroy the geometry and feature and close the data source
linester.Destroy()
feature.Destroy()
ds.Destroy()
print "Done."
return
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A `traverse` visitor for processing documentation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import six
class DocGeneratorVisitor(object):
"""A visitor that generates docs for a python object when __call__ed."""
def __init__(self, root_name=''):
"""Make a visitor.
As this visitor is starting its traversal at a module or class, it will not
be old the name of that object during traversal. `root_name` is the name it
should use for that object, effectively prefixing all names with
"root_name.".
Args:
root_name: The name of the root module/class.
"""
self._root_name = root_name or ''
self._prefix = (root_name + '.') if root_name else ''
self._index = {}
self._tree = {}
self._reverse_index = None
self._duplicates = None
self._duplicate_of = None
@property
def index(self):
"""A map from fully qualified names to objects to be documented.
The index is filled when the visitor is passed to `traverse`.
Returns:
The index filled by traversal.
"""
return self._index
@property
def tree(self):
"""A map from fully qualified names to all its child names for traversal.
The full name to member names map is filled when the visitor is passed to
`traverse`.
Returns:
The full name to member name map filled by traversal.
"""
return self._tree
@property
def reverse_index(self):
"""A map from `id(object)` to the preferred fully qualified name.
This map only contains non-primitive objects (no numbers or strings) present
in `index` (for primitive objects, `id()` doesn't quite do the right thing).
It is computed when it, `duplicate_of`, or `duplicates` are first accessed.
Returns:
The `id(object)` to full name map.
"""
self._maybe_find_duplicates()
return self._reverse_index
@property
def duplicate_of(self):
"""A map from duplicate full names to a preferred fully qualified name.
This map only contains names that are not themself a preferred name.
It is computed when it, `reverse_index`, or `duplicates` are first accessed.
Returns:
The map from duplicate name to preferred name.
"""
self._maybe_find_duplicates()
return self._duplicate_of
@property
def duplicates(self):
"""A map from preferred full names to a list of all names for this symbol.
This function returns a map from preferred (master) name for a symbol to a
lexicographically sorted list of all aliases for that name (incl. the master
name). Symbols without duplicate names do not appear in this map.
It is computed when it, `reverse_index`, or `duplicate_of` are first
accessed.
Returns:
The map from master name to list of all duplicate names.
"""
self._maybe_find_duplicates()
return self._duplicates
def __call__(self, parent_name, parent, children):
"""Visitor interface, see `tensorflow/tools/common:traverse` for details.
This method is called for each symbol found in a traversal using
`tensorflow/tools/common:traverse`. It should not be called directly in
user code.
Args:
parent_name: The fully qualified name of a symbol found during traversal.
parent: The Python object referenced by `parent_name`.
children: A list of `(name, py_object)` pairs enumerating, in alphabetical
order, the children (as determined by `inspect.getmembers`) of `parent`.
`name` is the local name of `py_object` in `parent`.
Raises:
RuntimeError: If this visitor is called with a `parent` that is not a
class or module.
"""
parent_name = self._prefix + parent_name if parent_name else self._root_name
self._index[parent_name] = parent
self._tree[parent_name] = []
if inspect.ismodule(parent):
print('module %s: %r' % (parent_name, parent))
elif inspect.isclass(parent):
print('class %s: %r' % (parent_name, parent))
else:
raise RuntimeError('Unexpected type in visitor -- %s: %r' %
(parent_name, parent))
for name, child in children:
full_name = '.'.join([parent_name, name]) if parent_name else name
self._index[full_name] = child
self._tree[parent_name].append(name)
def _maybe_find_duplicates(self):
"""Compute data structures containing information about duplicates.
Find duplicates in `index` and decide on one to be the "master" name.
Computes a reverse_index mapping each object id to its master name.
Also computes a map `duplicate_of` from aliases to their master name (the
master name itself has no entry in this map), and a map `duplicates` from
master names to a lexicographically sorted list of all aliases for that name
(incl. the master name).
All these are computed and set as fields if they haven't aready.
"""
if self._reverse_index is not None:
return
# Maps the id of a symbol to its fully qualified name. For symbols that have
# several aliases, this map contains the first one found.
# We use id(py_object) to get a hashable value for py_object. Note all
# objects in _index are in memory at the same time so this is safe.
reverse_index = {}
# Make a preliminary duplicates map. For all sets of duplicate names, it
# maps the first name found to a list of all duplicate names.
raw_duplicates = {}
for full_name, py_object in six.iteritems(self._index):
# We cannot use the duplicate mechanism for some constants, since e.g.,
# id(c1) == id(c2) with c1=1, c2=1. This is unproblematic since constants
# have no usable docstring and won't be documented automatically.
if py_object is not None and not isinstance(
py_object, six.integer_types + six.string_types +
(six.binary_type, six.text_type, float, complex, bool)):
object_id = id(py_object)
if object_id in reverse_index:
master_name = reverse_index[object_id]
if master_name in raw_duplicates:
raw_duplicates[master_name].append(full_name)
else:
raw_duplicates[master_name] = [master_name, full_name]
else:
reverse_index[object_id] = full_name
# Decide on master names, rewire duplicates and make a duplicate_of map
# mapping all non-master duplicates to the master name. The master symbol
# does not have an entry in this map.
duplicate_of = {}
# Duplicates maps the main symbols to the set of all duplicates of that
# symbol (incl. itself).
duplicates = {}
for names in raw_duplicates.values():
names = sorted(names)
# Choose the lexicographically first name with the minimum number of
# submodules. This will prefer highest level namespace for any symbol.
master_name = min(names, key=lambda name: name.count('.'))
duplicates[master_name] = names
for name in names:
if name != master_name:
duplicate_of[name] = master_name
# Set the reverse index to the canonical name.
reverse_index[id(self._index[master_name])] = master_name
self._duplicate_of = duplicate_of
self._duplicates = duplicates
self._reverse_index = reverse_index
| |
# coding: utf-8
# pylint: disable = invalid-name, W0105, C0111, C0301
"""Scikit-Learn Wrapper interface for LightGBM."""
from __future__ import absolute_import
import numpy as np
from .basic import Dataset, LightGBMError
from .compat import (SKLEARN_INSTALLED, LGBMClassifierBase, LGBMDeprecated,
LGBMLabelEncoder, LGBMModelBase, LGBMRegressorBase, argc_,
range_)
from .engine import train
def _objective_function_wrapper(func):
"""Decorate an objective function
Note: for multi-class task, the y_pred is group by class_id first, then group by row_id
if you want to get i-th row y_pred in j-th class, the access way is y_pred[j*num_data+i]
and you should group grad and hess in this way as well
Parameters
----------
func: callable
Expects a callable with signature ``func(y_true, y_pred)`` or ``func(y_true, y_pred, group):
y_true: array_like of shape [n_samples]
The target values
y_pred: array_like of shape [n_samples] or shape[n_samples * n_class] (for multi-class)
The predicted values
group: array_like
group/query data, used for ranking task
Returns
-------
new_func: callable
The new objective function as expected by ``lightgbm.engine.train``.
The signature is ``new_func(preds, dataset)``:
preds: array_like, shape [n_samples] or shape[n_samples * n_class]
The predicted values
dataset: ``dataset``
The training set from which the labels will be extracted using
``dataset.get_label()``
"""
def inner(preds, dataset):
"""internal function"""
labels = dataset.get_label()
argc = argc_(func)
if argc == 2:
grad, hess = func(labels, preds)
elif argc == 3:
grad, hess = func(labels, preds, dataset.get_group())
else:
raise TypeError("Self-defined objective function should have 2 or 3 arguments, got %d" % argc)
"""weighted for objective"""
weight = dataset.get_weight()
if weight is not None:
"""only one class"""
if len(weight) == len(grad):
grad = np.multiply(grad, weight)
hess = np.multiply(hess, weight)
else:
num_data = len(weight)
num_class = len(grad) // num_data
if num_class * num_data != len(grad):
raise ValueError("Length of grad and hess should equal to num_class * num_data")
for k in range_(num_class):
for i in range_(num_data):
idx = k * num_data + i
grad[idx] *= weight[i]
hess[idx] *= weight[i]
return grad, hess
return inner
def _eval_function_wrapper(func):
"""Decorate an eval function
Note: for multi-class task, the y_pred is group by class_id first, then group by row_id
if you want to get i-th row y_pred in j-th class, the access way is y_pred[j*num_data+i]
Parameters
----------
func: callable
Expects a callable with following functions:
``func(y_true, y_pred)``,
``func(y_true, y_pred, weight)``
or ``func(y_true, y_pred, weight, group)``
and return (eval_name->str, eval_result->float, is_bigger_better->Bool):
y_true: array_like of shape [n_samples]
The target values
y_pred: array_like of shape [n_samples] or shape[n_samples * n_class] (for multi-class)
The predicted values
weight: array_like of shape [n_samples]
The weight of samples
group: array_like
group/query data, used for ranking task
Returns
-------
new_func: callable
The new eval function as expected by ``lightgbm.engine.train``.
The signature is ``new_func(preds, dataset)``:
preds: array_like, shape [n_samples] or shape[n_samples * n_class]
The predicted values
dataset: ``dataset``
The training set from which the labels will be extracted using
``dataset.get_label()``
"""
def inner(preds, dataset):
"""internal function"""
labels = dataset.get_label()
argc = argc_(func)
if argc == 2:
return func(labels, preds)
elif argc == 3:
return func(labels, preds, dataset.get_weight())
elif argc == 4:
return func(labels, preds, dataset.get_weight(), dataset.get_group())
else:
raise TypeError("Self-defined eval function should have 2, 3 or 4 arguments, got %d" % argc)
return inner
class LGBMModel(LGBMModelBase):
def __init__(self, boosting_type="gbdt", num_leaves=31, max_depth=-1,
learning_rate=0.1, n_estimators=10, max_bin=255,
subsample_for_bin=50000, objective=None,
min_split_gain=0, min_child_weight=5, min_child_samples=10,
subsample=1, subsample_freq=1, colsample_bytree=1,
reg_alpha=0, reg_lambda=0, seed=0, nthread=-1, silent=True, **kwargs):
"""
Implementation of the Scikit-Learn API for LightGBM.
Parameters
----------
boosting_type : string
gbdt, traditional Gradient Boosting Decision Tree
dart, Dropouts meet Multiple Additive Regression Trees
num_leaves : int
Maximum tree leaves for base learners.
max_depth : int
Maximum tree depth for base learners, -1 means no limit.
learning_rate : float
Boosting learning rate
n_estimators : int
Number of boosted trees to fit.
max_bin : int
Number of bucketed bin for feature values
subsample_for_bin : int
Number of samples for constructing bins.
objective : string or callable
Specify the learning task and the corresponding learning objective or
a custom objective function to be used (see note below).
default: binary for LGBMClassifier, lambdarank for LGBMRanker
min_split_gain : float
Minimum loss reduction required to make a further partition on a leaf node of the tree.
min_child_weight : int
Minimum sum of instance weight(hessian) needed in a child(leaf)
min_child_samples : int
Minimum number of data need in a child(leaf)
subsample : float
Subsample ratio of the training instance.
subsample_freq : int
frequence of subsample, <=0 means no enable
colsample_bytree : float
Subsample ratio of columns when constructing each tree.
reg_alpha : float
L1 regularization term on weights
reg_lambda : float
L2 regularization term on weights
seed : int
Random number seed.
nthread : int
Number of parallel threads
silent : boolean
Whether to print messages while running boosting.
**kwargs : other parameters
Check http://lightgbm.readthedocs.io/en/latest/Parameters.html for more parameters.
Note: **kwargs is not supported in sklearn, it may cause unexpected issues.
Note
----
A custom objective function can be provided for the ``objective``
parameter. In this case, it should have the signature
``objective(y_true, y_pred) -> grad, hess``
or ``objective(y_true, y_pred, group) -> grad, hess``:
y_true: array_like of shape [n_samples]
The target values
y_pred: array_like of shape [n_samples] or shape[n_samples * n_class]
The predicted values
group: array_like
group/query data, used for ranking task
grad: array_like of shape [n_samples] or shape[n_samples * n_class]
The value of the gradient for each sample point.
hess: array_like of shape [n_samples] or shape[n_samples * n_class]
The value of the second derivative for each sample point
for multi-class task, the y_pred is group by class_id first, then group by row_id
if you want to get i-th row y_pred in j-th class, the access way is y_pred[j*num_data+i]
and you should group grad and hess in this way as well
"""
if not SKLEARN_INSTALLED:
raise LightGBMError('Scikit-learn is required for this module')
self.boosting_type = boosting_type
if objective is None:
if isinstance(self, LGBMRegressor):
self.objective = "regression"
elif isinstance(self, LGBMClassifier):
self.objective = "binary"
elif isinstance(self, LGBMRanker):
self.objective = "lambdarank"
else:
raise TypeError("Unknown LGBMModel type.")
else:
self.objective = objective
self.num_leaves = num_leaves
self.max_depth = max_depth
self.learning_rate = learning_rate
self.n_estimators = n_estimators
self.max_bin = max_bin
self.subsample_for_bin = subsample_for_bin
self.min_split_gain = min_split_gain
self.min_child_weight = min_child_weight
self.min_child_samples = min_child_samples
self.subsample = subsample
self.subsample_freq = subsample_freq
self.colsample_bytree = colsample_bytree
self.reg_alpha = reg_alpha
self.reg_lambda = reg_lambda
self.seed = seed
self.nthread = nthread
self.silent = silent
self._Booster = None
self.evals_result = None
self.best_iteration = -1
self.best_score = {}
if callable(self.objective):
self.fobj = _objective_function_wrapper(self.objective)
else:
self.fobj = None
self.other_params = {}
self.set_params(**kwargs)
def get_params(self, deep=True):
params = super(LGBMModel, self).get_params(deep=deep)
params.update(self.other_params)
return params
# minor change to support `**kwargs`
def set_params(self, **params):
for key, value in params.items():
setattr(self, key, value)
self.other_params[key] = value
return self
def fit(self, X, y,
sample_weight=None, init_score=None, group=None,
eval_set=None, eval_names=None, eval_sample_weight=None,
eval_init_score=None, eval_group=None,
eval_metric=None,
early_stopping_rounds=None, verbose=True,
feature_name='auto', categorical_feature='auto',
callbacks=None):
"""
Fit the gradient boosting model
Parameters
----------
X : array_like
Feature matrix
y : array_like
Labels
sample_weight : array_like
weight of training data
init_score : array_like
init score of training data
group : array_like
group data of training data
eval_set : list, optional
A list of (X, y) tuple pairs to use as a validation set for early-stopping
eval_names: list of string
Names of eval_set
eval_sample_weight : List of array
weight of eval data
eval_init_score : List of array
init score of eval data
eval_group : List of array
group data of eval data
eval_metric : str, list of str, callable, optional
If a str, should be a built-in evaluation metric to use.
If callable, a custom evaluation metric, see note for more details.
early_stopping_rounds : int
verbose : bool
If `verbose` and an evaluation set is used, writes the evaluation
feature_name : list of str, or 'auto'
Feature names
If 'auto' and data is pandas DataFrame, use data columns name
categorical_feature : list of str or int, or 'auto'
Categorical features,
type int represents index,
type str represents feature names (need to specify feature_name as well)
If 'auto' and data is pandas DataFrame, use pandas categorical columns
callbacks : list of callback functions
List of callback functions that are applied at each iteration.
See Callbacks in Python-API.md for more information.
Note
----
Custom eval function expects a callable with following functions:
``func(y_true, y_pred)``, ``func(y_true, y_pred, weight)``
or ``func(y_true, y_pred, weight, group)``.
return (eval_name, eval_result, is_bigger_better)
or list of (eval_name, eval_result, is_bigger_better)
y_true: array_like of shape [n_samples]
The target values
y_pred: array_like of shape [n_samples] or shape[n_samples * n_class] (for multi-class)
The predicted values
weight: array_like of shape [n_samples]
The weight of samples
group: array_like
group/query data, used for ranking task
eval_name: str
name of evaluation
eval_result: float
eval result
is_bigger_better: bool
is eval result bigger better, e.g. AUC is bigger_better.
for multi-class task, the y_pred is group by class_id first, then group by row_id
if you want to get i-th row y_pred in j-th class, the access way is y_pred[j*num_data+i]
"""
evals_result = {}
params = self.get_params()
# user can set verbose with kwargs, it has higher priority
if 'verbose' not in params and self.silent:
params['verbose'] = -1
params.pop('silent', None)
params.pop('n_estimators', None)
if hasattr(self, 'n_classes_') and self.n_classes_ > 2:
params['num_class'] = self.n_classes_
if hasattr(self, 'eval_at'):
params['ndcg_eval_at'] = self.eval_at
if self.fobj:
params['objective'] = 'None' # objective = nullptr for unknown objective
if callable(eval_metric):
feval = _eval_function_wrapper(eval_metric)
else:
feval = None
params['metric'] = eval_metric
def _construct_dataset(X, y, sample_weight, init_score, group, params):
ret = Dataset(X, label=y, max_bin=self.max_bin, weight=sample_weight, group=group, params=params)
ret.set_init_score(init_score)
return ret
train_set = _construct_dataset(X, y, sample_weight, init_score, group, params)
valid_sets = []
if eval_set is not None:
if isinstance(eval_set, tuple):
eval_set = [eval_set]
for i, valid_data in enumerate(eval_set):
"""reduce cost for prediction training data"""
if valid_data[0] is X and valid_data[1] is y:
valid_set = train_set
else:
def get_meta_data(collection, i):
if collection is None:
return None
elif isinstance(collection, list):
return collection[i] if len(collection) > i else None
elif isinstance(collection, dict):
return collection.get(i, None)
else:
raise TypeError('eval_sample_weight, eval_init_score, and eval_group should be dict or list')
valid_weight = get_meta_data(eval_sample_weight, i)
valid_init_score = get_meta_data(eval_init_score, i)
valid_group = get_meta_data(eval_group, i)
valid_set = _construct_dataset(valid_data[0], valid_data[1], valid_weight, valid_init_score, valid_group, params)
valid_sets.append(valid_set)
self._Booster = train(params, train_set,
self.n_estimators, valid_sets=valid_sets, valid_names=eval_names,
early_stopping_rounds=early_stopping_rounds,
evals_result=evals_result, fobj=self.fobj, feval=feval,
verbose_eval=verbose, feature_name=feature_name,
categorical_feature=categorical_feature,
callbacks=callbacks)
if evals_result:
self.evals_result = evals_result
if early_stopping_rounds is not None:
self.best_iteration = self._Booster.best_iteration
self.best_score = self._Booster.best_score
# free dataset
self.booster_.free_dataset()
del train_set, valid_sets
return self
def predict(self, X, raw_score=False, num_iteration=0):
"""
Return the predicted value for each sample.
Parameters
----------
X : array_like, shape=[n_samples, n_features]
Input features matrix.
num_iteration : int
Limit number of iterations in the prediction; defaults to 0 (use all trees).
Returns
-------
predicted_result : array_like, shape=[n_samples] or [n_samples, n_classes]
"""
return self.booster_.predict(X, raw_score=raw_score, num_iteration=num_iteration)
def apply(self, X, num_iteration=0):
"""
Return the predicted leaf every tree for each sample.
Parameters
----------
X : array_like, shape=[n_samples, n_features]
Input features matrix.
num_iteration : int
Limit number of iterations in the prediction; defaults to 0 (use all trees).
Returns
-------
X_leaves : array_like, shape=[n_samples, n_trees]
"""
return self.booster_.predict(X, pred_leaf=True, num_iteration=num_iteration)
@property
def booster_(self):
"""Get the underlying lightgbm Booster of this model."""
if self._Booster is None:
raise LightGBMError('No booster found. Need to call fit beforehand.')
return self._Booster
@property
def evals_result_(self):
"""Get the evaluation results."""
if self.evals_result is None:
raise LightGBMError('No results found. Need to call fit with eval set beforehand.')
return self.evals_result
@property
def feature_importances_(self):
"""Get normailized feature importances."""
importace_array = self.booster_.feature_importance().astype(np.float32)
return importace_array / importace_array.sum()
@LGBMDeprecated('Use attribute booster_ instead.')
def booster(self):
return self.booster_
@LGBMDeprecated('Use attribute feature_importances_ instead.')
def feature_importance(self):
return self.feature_importances_
class LGBMRegressor(LGBMModel, LGBMRegressorBase):
def fit(self, X, y,
sample_weight=None, init_score=None,
eval_set=None, eval_names=None, eval_sample_weight=None,
eval_init_score=None,
eval_metric="l2",
early_stopping_rounds=None, verbose=True,
feature_name='auto', categorical_feature='auto', callbacks=None):
super(LGBMRegressor, self).fit(X, y, sample_weight=sample_weight,
init_score=init_score, eval_set=eval_set,
eval_names=eval_names,
eval_sample_weight=eval_sample_weight,
eval_init_score=eval_init_score,
eval_metric=eval_metric,
early_stopping_rounds=early_stopping_rounds,
verbose=verbose, feature_name=feature_name,
categorical_feature=categorical_feature,
callbacks=callbacks)
return self
class LGBMClassifier(LGBMModel, LGBMClassifierBase):
def fit(self, X, y,
sample_weight=None, init_score=None,
eval_set=None, eval_names=None, eval_sample_weight=None,
eval_init_score=None,
eval_metric="logloss",
early_stopping_rounds=None, verbose=True,
feature_name='auto', categorical_feature='auto',
callbacks=None):
self._le = LGBMLabelEncoder().fit(y)
_y = self._le.transform(y)
self.classes = self._le.classes_
self.n_classes = len(self.classes_)
if self.n_classes > 2:
# Switch to using a multiclass objective in the underlying LGBM instance
self.objective = "multiclass"
if eval_metric == 'logloss' or eval_metric == 'binary_logloss':
eval_metric = "multi_logloss"
elif eval_metric == 'error' or eval_metric == 'binary_error':
eval_metric = "multi_error"
else:
if eval_metric == 'logloss' or eval_metric == 'multi_logloss':
eval_metric = 'binary_logloss'
elif eval_metric == 'error' or eval_metric == 'multi_error':
eval_metric = 'binary_error'
if eval_set is not None:
if isinstance(eval_set, tuple):
eval_set = [eval_set]
for i, (valid_x, valid_y) in enumerate(eval_set):
if valid_x is X and valid_y is y:
eval_set[i] = (valid_x, _y)
else:
eval_set[i] = (valid_x, self._le.transform(valid_y))
super(LGBMClassifier, self).fit(X, _y, sample_weight=sample_weight,
init_score=init_score, eval_set=eval_set,
eval_names=eval_names,
eval_sample_weight=eval_sample_weight,
eval_init_score=eval_init_score,
eval_metric=eval_metric,
early_stopping_rounds=early_stopping_rounds,
verbose=verbose, feature_name=feature_name,
categorical_feature=categorical_feature,
callbacks=callbacks)
return self
def predict(self, X, raw_score=False, num_iteration=0):
class_probs = self.predict_proba(X, raw_score, num_iteration)
class_index = np.argmax(class_probs, axis=1)
return self._le.inverse_transform(class_index)
def predict_proba(self, X, raw_score=False, num_iteration=0):
"""
Return the predicted probability for each class for each sample.
Parameters
----------
X : array_like, shape=[n_samples, n_features]
Input features matrix.
num_iteration : int
Limit number of iterations in the prediction; defaults to 0 (use all trees).
Returns
-------
predicted_probability : array_like, shape=[n_samples, n_classes]
"""
class_probs = self.booster_.predict(X, raw_score=raw_score, num_iteration=num_iteration)
if self.n_classes > 2:
return class_probs
else:
return np.vstack((1. - class_probs, class_probs)).transpose()
@property
def classes_(self):
"""Get class label array."""
if self.classes is None:
raise LightGBMError('No classes found. Need to call fit beforehand.')
return self.classes
@property
def n_classes_(self):
"""Get number of classes"""
if self.n_classes is None:
raise LightGBMError('No classes found. Need to call fit beforehand.')
return self.n_classes
class LGBMRanker(LGBMModel):
def fit(self, X, y,
sample_weight=None, init_score=None, group=None,
eval_set=None, eval_names=None, eval_sample_weight=None,
eval_init_score=None, eval_group=None,
eval_metric='ndcg', eval_at=1,
early_stopping_rounds=None, verbose=True,
feature_name='auto', categorical_feature='auto',
callbacks=None):
"""
Most arguments like common methods except following:
eval_at : list of int
The evaulation positions of NDCG
"""
"""check group data"""
if group is None:
raise ValueError("Should set group for ranking task")
if eval_set is not None:
if eval_group is None:
raise ValueError("Eval_group cannot be None when eval_set is not None")
elif len(eval_group) != len(eval_set):
raise ValueError("Length of eval_group should equal to eval_set")
elif (isinstance(eval_group, dict) and any(i not in eval_group or eval_group[i] is None for i in range_(len(eval_group)))) \
or (isinstance(eval_group, list) and any(group is None for group in eval_group)):
raise ValueError("Should set group for all eval dataset for ranking task; if you use dict, the index should start from 0")
if eval_at is not None:
self.eval_at = eval_at
super(LGBMRanker, self).fit(X, y, sample_weight=sample_weight,
init_score=init_score, group=group,
eval_set=eval_set, eval_names=eval_names,
eval_sample_weight=eval_sample_weight,
eval_init_score=eval_init_score, eval_group=eval_group,
eval_metric=eval_metric,
early_stopping_rounds=early_stopping_rounds,
verbose=verbose, feature_name=feature_name,
categorical_feature=categorical_feature,
callbacks=callbacks)
return self
| |
#!/usr/bin/env python
"""Software Carpentry Windows Installer
Helps mimic a *nix environment on Windows with as little work as possible.
The script:
* Installs GNU Make and makes it accessible from msysGit
* Installs nano and makes it accessible from msysGit
* Installs SQLite and makes it accessible from msysGit
* Creates a ~/nano.rc with links to syntax highlighting configs
* Provides standard nosetests behavior for msysGit
* Adds R's bin directory to the path (if we can find it)
To use:
1. Install Python, IPython, and Nose. An easy way to do this is with
the Anaconda Python distribution
http://continuum.io/downloads
2. Install msysGit
https://github.com/msysgit/msysgit/releases
3. Install R (if your workshop uses R)
http://cran.r-project.org/bin/windows/base/rw-FAQ.html#Installation-and-Usage
4. Run swc-windows-installer.py.
You should be able to simply double click the file in Windows
"""
import glob
import hashlib
try: # Python 3
from io import BytesIO as _BytesIO
except ImportError: # Python 2
from StringIO import StringIO as _BytesIO
import logging
import os
import re
import sys
import tarfile
try: # Python 3
from urllib.request import urlopen as _urlopen
except ImportError: # Python 2
from urllib2 import urlopen as _urlopen
import zipfile
__version__ = '0.2'
LOG = logging.getLogger('swc-windows-installer')
LOG.addHandler(logging.StreamHandler())
LOG.setLevel(logging.INFO)
if sys.version_info >= (3, 0): # Python 3
open3 = open
else:
def open3(file, mode='r', newline=None):
if newline:
if newline != '\n':
raise NotImplementedError(newline)
f = open(file, mode + 'b')
else:
f = open(file, mode)
return f
def download(url, sha1):
"""Download a file and verify its hash"""
LOG.debug('download {}'.format(url))
r = _urlopen(url)
byte_content = r.read()
download_sha1 = hashlib.sha1(byte_content).hexdigest()
if download_sha1 != sha1:
raise ValueError(
'downloaded {!r} has the wrong SHA-1 hash: {} != {}'.format(
url, download_sha1, sha1))
LOG.debug('SHA-1 for {} matches the expected {}'.format(url, sha1))
return byte_content
def splitall(path):
"""Split a path into a list of components
>>> splitall('nano-2.2.6/doc/Makefile.am')
['nano-2.2.6', 'doc', 'Makefile.am']
"""
parts = []
while True:
head, tail = os.path.split(path)
if tail:
parts.insert(0, tail)
elif head:
parts.insert(0, head)
break
else:
break
path = head
return parts
def transform(tarinfo, strip_components=0):
"""Transform TarInfo objects for extraction"""
path_components = splitall(tarinfo.name)
try:
tarinfo.name = os.path.join(*path_components[strip_components:])
except TypeError:
if len(path_components) <= strip_components:
return None
raise
return tarinfo
def tar_install(url, sha1, install_directory, compression='*',
strip_components=0):
"""Download and install a tar bundle"""
if not os.path.isdir(install_directory):
tar_bytes = download(url=url, sha1=sha1)
tar_io = _BytesIO(tar_bytes)
filename = os.path.basename(url)
mode = 'r:{}'.format(compression)
tar_file = tarfile.open(filename, mode, tar_io)
LOG.info('installing {} into {}'.format(url, install_directory))
os.makedirs(install_directory)
members = [
transform(tarinfo=tarinfo, strip_components=strip_components)
for tarinfo in tar_file]
tar_file.extractall(
path=install_directory,
members=[m for m in members if m is not None])
else:
LOG.info('existing installation at {}'.format(install_directory))
def zip_install(url, sha1, install_directory):
"""Download and install a zipped bundle"""
if not os.path.isdir(install_directory):
zip_bytes = download(url=url, sha1=sha1)
zip_io = _BytesIO(zip_bytes)
zip_file = zipfile.ZipFile(zip_io)
LOG.info('installing {} into {}'.format(url, install_directory))
os.makedirs(install_directory)
zip_file.extractall(install_directory)
else:
LOG.info('existing installation at {}'.format(install_directory))
def install_msysgit_binary(name, sha1, install_directory,
tag='Git-1.9.4-preview20140815'):
"""Download and install a binary from msysGit's bin directory"""
bytes = download(
url='https://github.com/msysgit/msysgit/raw/{}/bin/{}'.format(
tag, name),
sha1=sha1)
LOG.info('installing {} into {}'.format(name, install_directory))
with open(os.path.join(install_directory, name), 'wb') as f:
f.write(bytes)
def install_nano(install_directory):
"""Download and install the nano text editor"""
zip_install(
url='http://www.nano-editor.org/dist/v2.2/NT/nano-2.2.6.zip',
sha1='f5348208158157060de0a4df339401f36250fe5b',
install_directory=install_directory)
def install_nanorc(install_directory):
"""Download and install nano syntax highlighting"""
tar_install(
url='http://www.nano-editor.org/dist/v2.2/nano-2.2.6.tar.gz',
sha1='f2a628394f8dda1b9f28c7e7b89ccb9a6dbd302a',
install_directory=install_directory,
strip_components=1)
home = os.path.expanduser('~')
nanorc = os.path.join(home, 'nano.rc')
if not os.path.isfile(nanorc):
syntax_dir = os.path.join(install_directory, 'doc', 'syntax')
LOG.info('include nanorc from {} in {}'.format(syntax_dir, nanorc))
with open3(nanorc, 'w', newline='\n') as f:
for filename in os.listdir(syntax_dir):
if filename.endswith('.nanorc'):
path = os.path.join(syntax_dir, filename)
rel_path = os.path.relpath(path, home)
include_path = make_posix_path(os.path.join('~', rel_path))
f.write('include {}\n'.format(include_path))
def install_sqlite(install_directory):
"""Download and install the SQLite shell"""
zip_install(
url='https://sqlite.org/2014/sqlite-shell-win32-x86-3080403.zip',
sha1='1a8ab0ca9f4c51afeffeb49bd301e1d7f64741bb',
install_directory=install_directory)
def create_nosetests_entry_point(python_scripts_directory):
"""Creates a terminal-based nosetests entry point for msysGit"""
contents = '\n'.join([
'#!/usr/bin/env/ python',
'import sys',
'import nose',
"if __name__ == '__main__':",
' sys.exit(nose.core.main())',
'',
])
if not os.path.isdir(python_scripts_directory):
os.makedirs(python_scripts_directory)
path = os.path.join(python_scripts_directory, 'nosetests')
LOG.info('create nosetests entrypoint {}'.format(path))
with open(path, 'w') as f:
f.write(contents)
def get_r_bin_directory():
"""Locate the R bin directory (if R is installed)
"""
version_re = re.compile('^R-(\d+)[.](\d+)[.](\d+)$')
paths = {}
for pf in [
os.environ.get('ProgramW6432', r'c:\Program Files'),
os.environ.get('ProgramFiles', r'c:\Program Files'),
os.environ.get('ProgramFiles(x86)', r'c:\Program Files(x86)'),
]:
bin_glob = os.path.join(pf, 'R', 'R-[0-9]*.[0-9]*.[0-9]*', 'bin')
for path in glob.glob(bin_glob):
version_dir = os.path.basename(os.path.dirname(path))
version_match = version_re.match(version_dir)
if version_match and version_match.groups() not in paths:
paths[version_match.groups()] = path
if not paths:
LOG.info('no R installation found under {}'.format(pf))
return
LOG.debug('detected R installs:\n* {}'.format('\n* '.join([
v for k,v in sorted(paths.items())])))
version = sorted(paths.keys())[-1]
LOG.info('using R v{} bin directory at {}'.format(
'.'.join(version), paths[version]))
return paths[version]
def update_bash_profile(extra_paths=()):
"""Create or append to a .bash_profile for Software Carpentry
Adds nano to the path, sets the default editor to nano, and adds
additional paths for other executables.
"""
lines = [
'',
'# Add paths for Software-Carpentry-installed scripts and executables',
'export PATH=\"$PATH:{}\"'.format(':'.join(
make_posix_path(path) for path in extra_paths),),
'',
'# Make nano the default editor',
'export EDITOR=nano',
'',
]
config_path = os.path.join(os.path.expanduser('~'), '.bash_profile')
LOG.info('update bash profile at {}'.format(config_path))
LOG.debug('extra paths:\n* {}'.format('\n* '.join(extra_paths)))
with open(config_path, 'a') as f:
f.write('\n'.join(lines))
def make_posix_path(windows_path):
"""Convert a Windows path to a posix path"""
for regex, sub in [
(re.compile(r'\\'), '/'),
(re.compile('^[Cc]:'), '/c'),
]:
windows_path = regex.sub(sub, windows_path)
return windows_path
def main():
swc_dir = os.path.join(os.path.expanduser('~'), '.swc')
bin_dir = os.path.join(swc_dir, 'bin')
nano_dir = os.path.join(swc_dir, 'lib', 'nano')
nanorc_dir = os.path.join(swc_dir, 'share', 'nanorc')
sqlite_dir = os.path.join(swc_dir, 'lib', 'sqlite')
create_nosetests_entry_point(python_scripts_directory=bin_dir)
install_msysgit_binary(
name='make.exe', sha1='ad11047985c33ff57074f8e09d347fe122e047a4',
install_directory=bin_dir)
install_nano(install_directory=nano_dir)
install_nanorc(install_directory=nanorc_dir)
install_sqlite(install_directory=sqlite_dir)
paths = [nano_dir, sqlite_dir, bin_dir]
r_dir = get_r_bin_directory()
if r_dir:
paths.append(r_dir)
update_bash_profile(extra_paths=paths)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
'-v', '--verbose',
choices=['critical', 'error', 'warning', 'info', 'debug'],
help='Verbosity (defaults to {!r})'.format(
logging.getLevelName(LOG.level).lower()))
parser.add_argument(
'--version', action='version',
version='%(prog)s {}'.format(__version__))
args = parser.parse_args()
if args.verbose:
level = getattr(logging, args.verbose.upper())
LOG.setLevel(level)
LOG.info('Preparing your Software Carpentry awesomeness!')
LOG.info('installer version {}'.format(__version__))
main()
LOG.info('Installation complete.')
| |
"""
Support for MQTT message handling..
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/mqtt/
"""
import logging
import os
import socket
import time
from homeassistant.config import load_yaml_config_file
from homeassistant.exceptions import HomeAssistantError
import homeassistant.util as util
from homeassistant.helpers import template
from homeassistant.helpers import validate_config
from homeassistant.const import (
EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP)
_LOGGER = logging.getLogger(__name__)
DOMAIN = "mqtt"
MQTT_CLIENT = None
SERVICE_PUBLISH = 'publish'
EVENT_MQTT_MESSAGE_RECEIVED = 'mqtt_message_received'
REQUIREMENTS = ['paho-mqtt==1.1']
CONF_BROKER = 'broker'
CONF_PORT = 'port'
CONF_CLIENT_ID = 'client_id'
CONF_KEEPALIVE = 'keepalive'
CONF_USERNAME = 'username'
CONF_PASSWORD = 'password'
CONF_CERTIFICATE = 'certificate'
CONF_PROTOCOL = 'protocol'
PROTOCOL_31 = '3.1'
PROTOCOL_311 = '3.1.1'
DEFAULT_PORT = 1883
DEFAULT_KEEPALIVE = 60
DEFAULT_QOS = 0
DEFAULT_RETAIN = False
DEFAULT_PROTOCOL = PROTOCOL_311
ATTR_TOPIC = 'topic'
ATTR_PAYLOAD = 'payload'
ATTR_PAYLOAD_TEMPLATE = 'payload_template'
ATTR_QOS = 'qos'
ATTR_RETAIN = 'retain'
MAX_RECONNECT_WAIT = 300 # seconds
def _build_publish_data(topic, qos, retain):
"""Build the arguments for the publish service without the payload."""
data = {ATTR_TOPIC: topic}
if qos is not None:
data[ATTR_QOS] = qos
if retain is not None:
data[ATTR_RETAIN] = retain
return data
def publish(hass, topic, payload, qos=None, retain=None):
"""Publish message to an MQTT topic."""
data = _build_publish_data(topic, qos, retain)
data[ATTR_PAYLOAD] = payload
hass.services.call(DOMAIN, SERVICE_PUBLISH, data)
def publish_template(hass, topic, payload_template, qos=None, retain=None):
"""Publish message to an MQTT topic using a template payload."""
data = _build_publish_data(topic, qos, retain)
data[ATTR_PAYLOAD_TEMPLATE] = payload_template
hass.services.call(DOMAIN, SERVICE_PUBLISH, data)
def subscribe(hass, topic, callback, qos=DEFAULT_QOS):
"""Subscribe to an MQTT topic."""
def mqtt_topic_subscriber(event):
"""Match subscribed MQTT topic."""
if _match_topic(topic, event.data[ATTR_TOPIC]):
callback(event.data[ATTR_TOPIC], event.data[ATTR_PAYLOAD],
event.data[ATTR_QOS])
hass.bus.listen(EVENT_MQTT_MESSAGE_RECEIVED, mqtt_topic_subscriber)
MQTT_CLIENT.subscribe(topic, qos)
def setup(hass, config):
"""Start the MQTT protocol service."""
if not validate_config(config, {DOMAIN: ['broker']}, _LOGGER):
return False
conf = config[DOMAIN]
broker = conf[CONF_BROKER]
port = util.convert(conf.get(CONF_PORT), int, DEFAULT_PORT)
client_id = util.convert(conf.get(CONF_CLIENT_ID), str)
keepalive = util.convert(conf.get(CONF_KEEPALIVE), int, DEFAULT_KEEPALIVE)
username = util.convert(conf.get(CONF_USERNAME), str)
password = util.convert(conf.get(CONF_PASSWORD), str)
certificate = util.convert(conf.get(CONF_CERTIFICATE), str)
protocol = util.convert(conf.get(CONF_PROTOCOL), str, DEFAULT_PROTOCOL)
if protocol not in (PROTOCOL_31, PROTOCOL_311):
_LOGGER.error('Invalid protocol specified: %s. Allowed values: %s, %s',
protocol, PROTOCOL_31, PROTOCOL_311)
return False
# For cloudmqtt.com, secured connection, auto fill in certificate
if certificate is None and 19999 < port < 30000 and \
broker.endswith('.cloudmqtt.com'):
certificate = os.path.join(os.path.dirname(__file__),
'addtrustexternalcaroot.crt')
global MQTT_CLIENT
try:
MQTT_CLIENT = MQTT(hass, broker, port, client_id, keepalive, username,
password, certificate, protocol)
except socket.error:
_LOGGER.exception("Can't connect to the broker. "
"Please check your settings and the broker "
"itself.")
return False
def stop_mqtt(event):
"""Stop MQTT component."""
MQTT_CLIENT.stop()
def start_mqtt(event):
"""Launch MQTT component when Home Assistant starts up."""
MQTT_CLIENT.start()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_mqtt)
def publish_service(call):
"""Handle MQTT publish service calls."""
msg_topic = call.data.get(ATTR_TOPIC)
payload = call.data.get(ATTR_PAYLOAD)
payload_template = call.data.get(ATTR_PAYLOAD_TEMPLATE)
qos = call.data.get(ATTR_QOS, DEFAULT_QOS)
retain = call.data.get(ATTR_RETAIN, DEFAULT_RETAIN)
if payload is None:
if payload_template is None:
_LOGGER.error(
"You must set either '%s' or '%s' to use this service",
ATTR_PAYLOAD, ATTR_PAYLOAD_TEMPLATE)
return
try:
payload = template.render(hass, payload_template)
except template.jinja2.TemplateError as exc:
_LOGGER.error(
"Unable to publish to '%s': rendering payload template of "
"'%s' failed because %s.",
msg_topic, payload_template, exc)
return
if msg_topic is None or payload is None:
return
MQTT_CLIENT.publish(msg_topic, payload, qos, retain)
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, start_mqtt)
descriptions = load_yaml_config_file(
os.path.join(os.path.dirname(__file__), 'services.yaml'))
hass.services.register(DOMAIN, SERVICE_PUBLISH, publish_service,
descriptions.get(SERVICE_PUBLISH))
return True
# pylint: disable=too-many-arguments
class MQTT(object):
"""Home Assistant MQTT client."""
def __init__(self, hass, broker, port, client_id, keepalive, username,
password, certificate, protocol):
"""Initialize Home Assistant MQTT client."""
import paho.mqtt.client as mqtt
self.hass = hass
self.topics = {}
self.progress = {}
if protocol == PROTOCOL_31:
proto = mqtt.MQTTv31
else:
proto = mqtt.MQTTv311
if client_id is None:
self._mqttc = mqtt.Client(protocol=proto)
else:
self._mqttc = mqtt.Client(client_id, protocol=proto)
if username is not None:
self._mqttc.username_pw_set(username, password)
if certificate is not None:
self._mqttc.tls_set(certificate)
self._mqttc.on_subscribe = self._mqtt_on_subscribe
self._mqttc.on_unsubscribe = self._mqtt_on_unsubscribe
self._mqttc.on_connect = self._mqtt_on_connect
self._mqttc.on_disconnect = self._mqtt_on_disconnect
self._mqttc.on_message = self._mqtt_on_message
self._mqttc.connect(broker, port, keepalive)
def publish(self, topic, payload, qos, retain):
"""Publish a MQTT message."""
self._mqttc.publish(topic, payload, qos, retain)
def start(self):
"""Run the MQTT client."""
self._mqttc.loop_start()
def stop(self):
"""Stop the MQTT client."""
self._mqttc.disconnect()
self._mqttc.loop_stop()
def subscribe(self, topic, qos):
"""Subscribe to a topic."""
assert isinstance(topic, str)
if topic in self.topics:
return
result, mid = self._mqttc.subscribe(topic, qos)
_raise_on_error(result)
self.progress[mid] = topic
self.topics[topic] = None
def unsubscribe(self, topic):
"""Unsubscribe from topic."""
result, mid = self._mqttc.unsubscribe(topic)
_raise_on_error(result)
self.progress[mid] = topic
def _mqtt_on_connect(self, _mqttc, _userdata, _flags, result_code):
"""On connect callback.
Resubscribe to all topics we were subscribed to.
"""
if result_code != 0:
_LOGGER.error('Unable to connect to the MQTT broker: %s', {
1: 'Incorrect protocol version',
2: 'Invalid client identifier',
3: 'Server unavailable',
4: 'Bad username or password',
5: 'Not authorised'
}.get(result_code, 'Unknown reason'))
self._mqttc.disconnect()
return
old_topics = self.topics
self.topics = {key: value for key, value in self.topics.items()
if value is None}
for topic, qos in old_topics.items():
# qos is None if we were in process of subscribing
if qos is not None:
self.subscribe(topic, qos)
def _mqtt_on_subscribe(self, _mqttc, _userdata, mid, granted_qos):
"""Subscribe successful callback."""
topic = self.progress.pop(mid, None)
if topic is None:
return
self.topics[topic] = granted_qos[0]
def _mqtt_on_message(self, _mqttc, _userdata, msg):
"""Message received callback."""
self.hass.bus.fire(EVENT_MQTT_MESSAGE_RECEIVED, {
ATTR_TOPIC: msg.topic,
ATTR_QOS: msg.qos,
ATTR_PAYLOAD: msg.payload.decode('utf-8'),
})
def _mqtt_on_unsubscribe(self, _mqttc, _userdata, mid, granted_qos):
"""Unsubscribe successful callback."""
topic = self.progress.pop(mid, None)
if topic is None:
return
self.topics.pop(topic, None)
def _mqtt_on_disconnect(self, _mqttc, _userdata, result_code):
"""Disconnected callback."""
self.progress = {}
self.topics = {key: value for key, value in self.topics.items()
if value is not None}
# Remove None values from topic list
for key in list(self.topics):
if self.topics[key] is None:
self.topics.pop(key)
# When disconnected because of calling disconnect()
if result_code == 0:
return
tries = 0
wait_time = 0
while True:
try:
if self._mqttc.reconnect() == 0:
_LOGGER.info('Successfully reconnected to the MQTT server')
break
except socket.error:
pass
wait_time = min(2**tries, MAX_RECONNECT_WAIT)
_LOGGER.warning(
'Disconnected from MQTT (%s). Trying to reconnect in %ss',
result_code, wait_time)
# It is ok to sleep here as we are in the MQTT thread.
time.sleep(wait_time)
tries += 1
def _raise_on_error(result):
"""Raise error if error result."""
if result != 0:
raise HomeAssistantError('Error talking to MQTT: {}'.format(result))
def _match_topic(subscription, topic):
"""Test if topic matches subscription."""
if subscription.endswith('#'):
return (subscription[:-2] == topic or
topic.startswith(subscription[:-1]))
sub_parts = subscription.split('/')
topic_parts = topic.split('/')
return (len(sub_parts) == len(topic_parts) and
all(a == b for a, b in zip(sub_parts, topic_parts) if a != '+'))
| |
from unittest import TestCase
from django.test import TestCase as DjangoTestCase
from django.urls import reverse
from wagtail.admin.search import SearchArea
from wagtail.admin.ui.sidebar import (
CustomBrandingModule, LinkMenuItem, MainMenuModule, PageExplorerMenuItem, SearchModule,
SubMenuItem, WagtailBrandingModule)
from wagtail.core.telepath import JSContext
from wagtail.tests.utils import WagtailTestUtils
class TestAdaptLinkMenuItem(TestCase):
def test_adapt(self):
packed = JSContext().pack(LinkMenuItem('link', "Link", '/link/'))
self.assertEqual(packed, {
'_type': 'wagtail.sidebar.LinkMenuItem',
'_args': [
{
'classnames': '',
'icon_name': '',
'label': 'Link',
'name': 'link',
'url': '/link/'
}
]
})
def test_adapt_with_classnames_and_icon(self):
packed = JSContext().pack(LinkMenuItem('link', "Link", '/link/', icon_name='link-icon', classnames='some classes'))
self.assertEqual(packed, {
'_type': 'wagtail.sidebar.LinkMenuItem',
'_args': [
{
'classnames': 'some classes',
'icon_name': 'link-icon',
'label': 'Link',
'name': 'link',
'url': '/link/'
}
]
})
class TestAdaptSubMenuItem(TestCase):
def test_adapt(self):
packed = JSContext().pack(
SubMenuItem('sub-menu', "Sub menu", [
LinkMenuItem('link', "Link", '/link/', icon_name='link-icon'),
], footer_text='Footer text')
)
self.assertEqual(packed, {
'_type': 'wagtail.sidebar.SubMenuItem',
'_args': [
{
'name': 'sub-menu',
'label': 'Sub menu',
'icon_name': '',
'classnames': '',
'footer_text': 'Footer text'
},
[
{
'_type': 'wagtail.sidebar.LinkMenuItem',
'_args': [
{
'name': 'link',
'label': 'Link',
'icon_name': 'link-icon',
'classnames': '',
'url': '/link/'
}
]
}
]
]
})
def test_adapt_without_footer_text(self):
packed = JSContext().pack(
SubMenuItem('sub-menu', "Sub menu", [
LinkMenuItem('link', "Link", '/link/', icon_name='link-icon'),
])
)
self.assertEqual(packed, {
'_type': 'wagtail.sidebar.SubMenuItem',
'_args': [
{
'name': 'sub-menu',
'label': 'Sub menu',
'icon_name': '',
'classnames': '',
'footer_text': ''
},
[
{
'_type': 'wagtail.sidebar.LinkMenuItem',
'_args': [
{
'name': 'link',
'label': 'Link',
'icon_name': 'link-icon',
'classnames': '',
'url': '/link/'
}
]
}
]
]
})
class TestAdaptPageExplorerMenuItem(TestCase):
def test_adapt(self):
packed = JSContext().pack(PageExplorerMenuItem('pages', "Pages", '/pages/', 1))
self.assertEqual(packed, {
'_type': 'wagtail.sidebar.PageExplorerMenuItem',
'_args': [
{
'classnames': '',
'icon_name': '',
'label': 'Pages',
'name': 'pages',
'url': '/pages/'
},
1
]
})
class TestAdaptWagtailBrandingModule(TestCase):
def test_adapt(self):
packed = JSContext().pack(WagtailBrandingModule())
self.assertEqual(packed['_type'], 'wagtail.sidebar.WagtailBrandingModule')
self.assertEqual(len(packed['_args']), 2)
self.assertEqual(packed['_args'][0], reverse('wagtailadmin_home'))
self.assertEqual(packed['_args'][1].keys(), {
'desktopLogoBody',
'desktopLogoEyeClosed',
'desktopLogoEyeOpen',
'desktopLogoTail',
'mobileLogo'
})
class TestAdaptCustomBrandingModule(TestCase):
def test_adapt(self):
packed = JSContext().pack(CustomBrandingModule('<h1>My custom branding</h1>'))
self.assertEqual(packed, {
'_type': 'wagtail.sidebar.CustomBrandingModule',
'_args': [
'<h1>My custom branding</h1>',
False
]
})
def test_collapsible(self):
packed = JSContext().pack(CustomBrandingModule('<h1>My custom branding</h1>', collapsible=True))
self.assertEqual(packed, {
'_type': 'wagtail.sidebar.CustomBrandingModule',
'_args': [
'<h1>My custom branding</h1>',
True
]
})
class TestAdaptSearchModule(TestCase):
def test_adapt(self):
packed = JSContext().pack(SearchModule(SearchArea("Search", '/search/')))
self.assertEqual(packed, {
'_type': 'wagtail.sidebar.SearchModule',
'_args': [
'/search/'
]
})
class TestAdaptMainMenuModule(DjangoTestCase, WagtailTestUtils):
def test_adapt(self):
main_menu = [
LinkMenuItem('pages', "Pages", '/pages/'),
]
account_menu = [
LinkMenuItem('account', "Account", reverse('wagtailadmin_account'), icon_name='user'),
LinkMenuItem('logout', "Logout", reverse('wagtailadmin_logout'), icon_name='logout'),
]
user = self.create_user(username='admin')
packed = JSContext().pack(MainMenuModule(main_menu, account_menu, user))
self.assertEqual(packed, {
'_type': 'wagtail.sidebar.MainMenuModule',
'_args': [
[
{
'_type': 'wagtail.sidebar.LinkMenuItem',
'_args': [
{'name': 'pages', 'label': 'Pages', 'icon_name': '', 'classnames': '', 'url': '/pages/'}
]
}
],
[
{
'_type': 'wagtail.sidebar.LinkMenuItem',
'_args': [
{'name': 'account', 'label': 'Account', 'icon_name': 'user', 'classnames': '', 'url': reverse('wagtailadmin_account')}
]
},
{
'_type': 'wagtail.sidebar.LinkMenuItem',
'_args': [
{'name': 'logout', 'label': 'Logout', 'icon_name': 'logout', 'classnames': '', 'url': reverse('wagtailadmin_logout')}
]
}
],
{
'name': user.first_name or user.get_username(),
'avatarUrl': '//www.gravatar.com/avatar/e64c7d89f26bd1972efa854d13d7dd61?s=100&d=mm'
}
]
})
| |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions and classes related to optimization (weight updates)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from absl import logging
import gin
import tensorflow as tf
import tensorflow_addons.optimizers as tfa_optimizers
class WarmUp(tf.keras.optimizers.schedules.LearningRateSchedule):
"""Applies a warmup schedule on a given learning rate decay schedule."""
def __init__(self,
initial_learning_rate,
decay_schedule_fn,
warmup_steps,
power=1.0,
name=None):
super(WarmUp, self).__init__()
self.initial_learning_rate = initial_learning_rate
self.warmup_steps = warmup_steps
self.power = power
self.decay_schedule_fn = decay_schedule_fn
self.name = name
def __call__(self, step):
with tf.name_scope(self.name or 'WarmUp') as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
global_step_float = tf.cast(step, tf.float32)
warmup_steps_float = tf.cast(self.warmup_steps, tf.float32)
warmup_percent_done = global_step_float / warmup_steps_float
warmup_learning_rate = (
self.initial_learning_rate *
tf.math.pow(warmup_percent_done, self.power))
return tf.cond(
global_step_float < warmup_steps_float,
lambda: warmup_learning_rate,
lambda: self.decay_schedule_fn(step),
name=name)
def get_config(self):
return {
'initial_learning_rate': self.initial_learning_rate,
'decay_schedule_fn': self.decay_schedule_fn,
'warmup_steps': self.warmup_steps,
'power': self.power,
'name': self.name
}
@gin.configurable
def create_optimizer(init_lr,
num_train_steps,
num_warmup_steps,
end_lr=0.0,
optimizer_type='adamw'):
"""Creates an optimizer with learning rate schedule."""
# Implements linear decay of the learning rate.
lr_schedule = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=init_lr,
decay_steps=num_train_steps,
end_learning_rate=end_lr)
if num_warmup_steps:
lr_schedule = WarmUp(
initial_learning_rate=init_lr,
decay_schedule_fn=lr_schedule,
warmup_steps=num_warmup_steps)
if optimizer_type == 'adamw':
logging.info('using Adamw optimizer')
optimizer = AdamWeightDecay(
learning_rate=lr_schedule,
weight_decay_rate=0.01,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'])
elif optimizer_type == 'lamb':
logging.info('using Lamb optimizer')
optimizer = tfa_optimizers.LAMB(
learning_rate=lr_schedule,
weight_decay_rate=0.01,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'])
else:
raise ValueError('Unsupported optimizer type: ', optimizer_type)
return optimizer
class AdamWeightDecay(tf.keras.optimizers.Adam):
"""Adam enables L2 weight decay and clip_by_global_norm on gradients.
Just adding the square of the weights to the loss function is *not* the
correct way of using L2 regularization/weight decay with Adam, since that will
interact with the m and v parameters in strange ways.
Instead we want ot decay the weights in a manner that doesn't interact with
the m/v parameters. This is equivalent to adding the square of the weights to
the loss with plain (non-momentum) SGD.
"""
def __init__(self,
learning_rate=0.001,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-7,
amsgrad=False,
weight_decay_rate=0.0,
include_in_weight_decay=None,
exclude_from_weight_decay=None,
name='AdamWeightDecay',
**kwargs):
super(AdamWeightDecay, self).__init__(learning_rate, beta_1, beta_2,
epsilon, amsgrad, name, **kwargs)
self.weight_decay_rate = weight_decay_rate
self._include_in_weight_decay = include_in_weight_decay
self._exclude_from_weight_decay = exclude_from_weight_decay
@classmethod
def from_config(cls, config):
"""Creates an optimizer from its config with WarmUp custom object."""
custom_objects = {'WarmUp': WarmUp}
return super(AdamWeightDecay, cls).from_config(
config, custom_objects=custom_objects)
def _prepare_local(self, var_device, var_dtype, apply_state):
super(AdamWeightDecay, self)._prepare_local(var_device, var_dtype,
apply_state)
apply_state[(var_device, var_dtype)]['weight_decay_rate'] = tf.constant(
self.weight_decay_rate, name='adam_weight_decay_rate')
def _decay_weights_op(self, var, learning_rate, apply_state):
do_decay = self._do_use_weight_decay(var.name)
if do_decay:
return var.assign_sub(
learning_rate * var *
apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'],
use_locking=self._use_locking)
return tf.no_op()
def apply_gradients(self,
grads_and_vars,
name=None,
experimental_aggregate_gradients=True):
grads, tvars = list(zip(*grads_and_vars))
if experimental_aggregate_gradients:
# when experimental_aggregate_gradients = False, apply_gradients() no
# longer implicitly allreduce gradients, users manually allreduce gradient
# and passed the allreduced grads_and_vars. For now, the
# clip_by_global_norm will be moved to before the explicit allreduce to
# keep the math the same as TF 1 and pre TF 2.2 implementation.
(grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0)
return super(AdamWeightDecay, self).apply_gradients(
zip(grads, tvars),
name=name,
experimental_aggregate_gradients=experimental_aggregate_gradients)
def _get_lr(self, var_device, var_dtype, apply_state):
"""Retrieves the learning rate with the given state."""
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
apply_state = apply_state or {}
coefficients = apply_state.get((var_device, var_dtype))
if coefficients is None:
coefficients = self._fallback_apply_state(var_device, var_dtype)
apply_state[(var_device, var_dtype)] = coefficients
return coefficients['lr_t'], dict(apply_state=apply_state)
def _resource_apply_dense(self, grad, var, apply_state=None):
lr_t, kwargs = self._get_lr(var.device, var.dtype.base_dtype, apply_state)
decay = self._decay_weights_op(var, lr_t, apply_state)
with tf.control_dependencies([decay]):
return super(AdamWeightDecay,
self)._resource_apply_dense(grad, var, **kwargs)
def _resource_apply_sparse(self, grad, var, indices, apply_state=None):
lr_t, kwargs = self._get_lr(var.device, var.dtype.base_dtype, apply_state)
decay = self._decay_weights_op(var, lr_t, apply_state)
with tf.control_dependencies([decay]):
return super(AdamWeightDecay,
self)._resource_apply_sparse(grad, var, indices, **kwargs)
def get_config(self):
config = super(AdamWeightDecay, self).get_config()
config.update({
'weight_decay_rate': self.weight_decay_rate,
})
return config
def _do_use_weight_decay(self, param_name):
"""Whether to use L2 weight decay for `param_name`."""
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(r, param_name) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(r, param_name) is not None:
return False
return True
| |
#!/usr/bin/env python2
# MIT License
#
# Copyright (c) 2016 Zhiang Chen
'''
Receive the cropped image from "cropped_box_image/numpy", and publish the class prediction and angle prediction onto "prediction"
'''
from __future__ import print_function
import rospy
import roslib
import cv2
from sensor_msgs.msg import Image
from std_msgs.msg import String
from cv_bridge import CvBridge, CvBridgeError
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import random
import operator
import time
import os
import sys
from rospy.numpy_msg import numpy_msg
from rospy_tutorials.msg import Floats
from depthnet.msg import PredictionMSG
import math
name2value = {'v8':0,'duck':1,'stapler':2,'pball':3,'tball':4,'sponge':5,'bclip':6,'tape':7,'gstick':8,'cup':9,
'pen':10,'calc':11,'blade':12,'bottle':13,'cpin':14,'scissors':15,'stape':16,'gball':17,'orwidg':18,
'glue':19,'spoon':20,'fork':21,'nerf':22,'eraser':23,'empty':24}
name2string = {'v8':'v8 can','duck':'ducky','stapler':'stapler','pball':'ping pang ball','tball':'tennis ball','sponge':'sponge',
'bclip':'binder clip','tape':'big tape','gstick':'glue stick','cup':'cup','pen':'pen','calc':'calculator',
'blade':'razor','bottle':'bottle','cpin':'clothespin','scissors':'scissors','stape':'small tape','gball':'golf ball',
'orwidg':'orange thing','glue':'glue','spoon':'spoon','fork':'fork','nerf':'nerf gun','eraser':'eraser',
'empty':'empty plate'}
value2name = dict((value,name) for name,value in name2value.items())
image_size = 80
num_labels = 25
angle_bias = -15
nm_classes = 25
nm_angles = 10
num_channels = 1
batch_size = 30
patch_size = 5
kernel_size = 2
depth1 = 6
depth2 = 16
depth3 = 10
F7_classes = 120
F8_classes = 84
F9_classes = nm_classes
F7_angles = 120
F8_angles = 84
F9_angles = nm_angles
keep_prob1 = 0.5
keep_prob2_classes = 0.8
keep_prob2_angles = 0.5
angles_list = np.asarray([i*18 + angle_bias for i in range(10)]).astype(np.float32)
graph = tf.Graph()
with graph.as_default():
# Variables(weights and biases)
C1_weights = tf.Variable(tf.truncated_normal([patch_size, patch_size, num_channels, depth1], stddev=0.1))
# convolution's weights are called filter in tensorflow
# it is a tensor of shape [kernel_hight,kernel_width,in_channels,out_channels]
C1_biases = tf.Variable(tf.zeros([depth1]))
# S1_weights # Sub-sampling doesn't need weights and biases
# S1_biases
C3_weights = tf.Variable(tf.truncated_normal([patch_size, patch_size, depth1, depth2], stddev=0.1))
C3_biases = tf.Variable(tf.constant(1.0, shape=[depth2]))
# S4_weights
# S4_biases
C5_weights = tf.Variable(tf.truncated_normal([patch_size, patch_size, depth2, depth3], stddev=0.1))
C5_biases = tf.Variable(tf.constant(1.0, shape=[depth3]))
# S6_weights
# S6_biases
F7_classes_weights = tf.Variable(tf.truncated_normal([6 * 6 * depth3, F7_classes], stddev=0.1))
F7_classes_biases = tf.Variable(tf.constant(1.0, shape=[F7_classes]))
F7_angles_weights = tf.Variable(tf.truncated_normal([6 * 6 * depth3, F7_angles], stddev=0.1))
F7_angles_biases = tf.Variable(tf.constant(1.0, shape=[F7_angles]))
F8_classes_weights = tf.Variable(tf.truncated_normal([F7_classes,F8_classes], stddev=0.1))
F8_classes_biases = tf.Variable(tf.constant(1.0, shape=[F8_classes]))
F8_angles_weights = tf.Variable(tf.truncated_normal([F7_angles,F8_angles], stddev=0.1))
F8_angles_biases = tf.Variable(tf.constant(1.0, shape=[F8_angles]))
F9_classes_weights = tf.Variable(tf.truncated_normal([F8_classes,F9_classes], stddev=0.1))
F9_classes_biases = tf.Variable(tf.constant(1.0, shape=[F9_classes]))
F9_angles_weights = tf.Variable(tf.truncated_normal([F8_angles,F9_angles], stddev=0.1))
F9_angles_biases = tf.Variable(tf.constant(1.0, shape=[F9_angles]))
saver = tf.train.Saver()
# Model
def test_model(data):
conv = tf.nn.conv2d(data, C1_weights, [1, 1, 1, 1], padding='VALID')
hidden = tf.nn.relu(conv + C1_biases)
max_pool = tf.nn.max_pool(hidden,[1,kernel_size,kernel_size,1],[1,2,2,1],'VALID')
hidden = tf.nn.relu(max_pool)
conv = tf.nn.conv2d(hidden, C3_weights, [1, 1, 1, 1], padding='VALID')
hidden = tf.nn.relu(conv + C3_biases)
max_pool = tf.nn.max_pool(hidden,[1,kernel_size,kernel_size,1],[1,2,2,1],'VALID')
hidden = tf.nn.relu(max_pool)
conv = tf.nn.conv2d(hidden,C5_weights, [1,1,1,1], padding = 'VALID')
hidden = tf.nn.relu(conv + C5_biases)
max_pool = tf.nn.max_pool(hidden,[1,kernel_size,kernel_size,1],[1,2,2,1],'VALID')
hidden = tf.nn.relu(max_pool)
shape = hidden.get_shape().as_list()
reshape = tf.reshape(hidden, [shape[0], shape[1] * shape[2] * shape[3]])
hidden_classes = tf.nn.relu(tf.matmul(reshape, F7_classes_weights) + F7_classes_biases)
hidden_angles = tf.nn.relu(tf.matmul(reshape, F7_angles_weights) + F7_angles_biases)
fc_classes = tf.matmul(hidden_classes,F8_classes_weights)
fc_angles = tf.matmul(hidden_angles,F8_angles_weights)
hidden_classes = tf.nn.relu(fc_classes + F8_classes_biases)
hidden_angles = tf.nn.relu(fc_angles + F8_angles_biases)
fc_classes = tf.matmul(hidden_classes,F9_classes_weights)
fc_angles = tf.matmul(hidden_angles,F9_angles_weights)
output_classes = fc_classes + F9_classes_biases
output_angles = fc_angles + F9_angles_biases
return output_classes, output_angles
config = tf.ConfigProto()
#config.log_device_placement = True
session = tf.Session(graph=graph, config = config)
saver.restore(session, "model1.ckpt")
def accuracy_classes(predictions, labels):
return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))/ predictions.shape[0])
class evaluator:
def __init__(self):
#Initialize ros publisher, subscriber
self.pub1 = rospy.Publisher('prediction',PredictionMSG,queue_size=1)
self.sub1 = rospy.Subscriber('cropped_box_image/numpy',numpy_msg(Floats),self.callback,queue_size=1)
self.pub2 = rospy.Publisher('cropped_box_image/image',Image, queue_size=1)
self.pub3 = rospy.Publisher('predicted_class', String, queue_size=1)
self.bridge = CvBridge()
self.pt1x = -40.0
self.pt1y = 0.0
self.pt2x = 40.0
self.pt2y = 0.0
rospy.loginfo("Initialized!")
def callback(self,data):
with session.as_default():
assert tf.get_default_session() is session
input_image = np.flipud(data.data.reshape(image_size,image_size).astype(np.float32)).reshape(-1,image_size,image_size,1)
out_class, out_angle = test_model(input_image)
pre_class = tf.nn.softmax(out_class)
pre_angle = tf.nn.softmax(out_angle).eval()
angle = np.sum(np.multiply(pre_angle, angles_list))/np.sum(pre_angle)
pre_dict = dict(zip(list(range(num_labels)),pre_class.eval()[0]))
sorted_pre_dict = sorted(pre_dict.items(), key=operator.itemgetter(1))
name1 = value2name[sorted_pre_dict[-1][0]]
name1 = name2string[name1]
self.pub3.publish(name1)
#name1 = name2string[name1]
value1 = str(sorted_pre_dict[-1][1])
name2 = value2name[sorted_pre_dict[-2][0]]
name2 = name2string[name2]
value2 = str(sorted_pre_dict[-2][1])
pre = PredictionMSG()
pre.name1, pre.value1, pre.name2, pre.value2, pre.angle = name1, float(value1), name2, float(value2), angle
self.pub1.publish(pre)
image = ((input_image.reshape(image_size,image_size) + 0.65)*255).astype(np.uint8)
pt1x = int(self.pt1x * math.cos(math.radians(angle)) + self.pt1y * -math.sin(math.radians(angle))) + 40
pt1y = int(self.pt1x * math.sin(math.radians(angle)) + self.pt1y * math.cos(math.radians(angle))) + 40
pt2x = int(self.pt2x * math.cos(math.radians(angle)) + self.pt2y * -math.sin(math.radians(angle))) + 40
pt2y = int(self.pt2x * math.sin(math.radians(angle)) + self.pt2y * math.cos(math.radians(angle))) + 40
#cv2.line(image,(pt1x,pt1y),(pt2x,pt2y),255,2)
ros_image = self.bridge.cv2_to_imgmsg(image, encoding="mono8")
self.pub2.publish(ros_image)
sys.stdout.write(".")
sys.stdout.flush()
if __name__ == '__main__':
rospy.init_node('depthnet',anonymous=True)
ev = evaluator()
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down ROS node evaluate_image")
session.close()
print("Shutting down ROS node evaluate_image")
| |
# -*- encoding: utf-8 -*-
"""
Group-by operations on an H2OFrame.
:copyright: (c) 2016 H2O.ai
:license: Apache License Version 2.0 (see LICENSE for details)
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import h2o
from h2o.expr import ExprNode
from h2o.utils.compatibility import * # NOQA
from h2o.utils.typechecks import is_type
class GroupBy(object):
"""
A class that represents the group by operation on an H2OFrame.
The returned groups are sorted by the natural group-by column sort.
:param H2OFrame fr: H2OFrame that you want the group by operation to be performed on.
:param by: by can be a column name (str) or an index (int) of a single column, or a list for multiple columns
denoting the set of columns to group by.
Sample usage:
>>> my_frame = ... # some existing H2OFrame
>>> grouped = my_frame.group_by(by=["C1", "C2"])
>>> grouped.sum(col="X1", na="all").mean(col="X5", na="all").max()
>>> grouped.get_frame()
Any number of aggregations may be chained together in this manner. Note that once the aggregation operations
are complete, calling the GroupBy object with a new set of aggregations will yield no effect. You must generate
a new GroupBy object in order to apply a new aggregation on it. In addition, certain aggregations are only
defined for numerical or categorical columns. An error will be thrown for calling aggregation on the wrong
data types.
If no arguments are given to the aggregation (e.g. "max" in the above example), then it is assumed that the
aggregation should apply to all columns but the group by columns.
All GroupBy aggregations take parameter na, which controls treatment of NA values during the calculation.
It can be one of:
- "all" (default) -- any NAs are used in the calculation as-is; which usually results in the final result
being NA too.
- "ignore" -- NA entries are not included in calculations, but the total number of entries is taken as the
total number of rows. For example, mean([1, 2, 3, nan], na="ignore") will produce 1.5. In addition,
median([1, 2, 3, nan], na="ignore") will first sort the row as [nan, 1, 2, 3]. Next, the median is the
mean of the two middle values in this case producing a median of 1.5.
- "rm" entries are skipped during the calculations, reducing the total effective count of entries. For
example, mean([1, 2, 3, nan], na="rm") will produce 2. The median in this case will be 2 as the middle
value.
Variance (var) and standard deviation (sd) are the sample (not population) statistics.
"""
def __init__(self, fr, by):
"""
Return a new ``GroupBy`` object using the H2OFrame specified in fr and the desired grouping columns
specified in by. The original H2O frame will be stored as member _fr. Information on the new grouping
of the original frame is described in a new H2OFrame in member frame.
The returned groups are sorted by the natural group-by column sort.
:param H2OFrame fr: H2OFrame that you want the group by operation to be performed on.
:param by: can be a column name (str) or an index (int) of a single column, or a list for multiple columns
denoting the set of columns to group by.
"""
self._fr = fr # IN
self._by = by # IN
self._aggs = {} # IN
self._res = None # OUT
if is_type(by, str):
self._by = [self._fr.names.index(by)]
elif is_type(by, list, tuple):
self._by = [self._fr.names.index(b) if is_type(b, str) else b for b in by]
else:
self._by = [self._by]
def min(self, col=None, na="all"):
"""
Calculate the minimum of each column specified in col for each group of a GroupBy object. If no col is
given, compute the minimum among all numeric columns other than those being grouped on.
:param col: col can be None (default), a column name (str) or an index (int) of a single column, or a
list for multiple columns denoting the set of columns to group by.
:param str na: one of 'rm', 'ignore' or 'all' (default).
:return: the original GroupBy object (self), for ease of constructing chained operations.
"""
return self._add_agg("min", col, na)
def max(self, col=None, na="all"):
"""
Calculate the maximum of each column specified in col for each group of a GroupBy object. If no col is
given, compute the maximum among all numeric columns other than those being grouped on.
:param col: col can be None (default), a column name (str) or an index (int) of a single column, or a
list for multiple columns
:param str na: one of 'rm', 'ignore' or 'all' (default).
:return: the original GroupBy object (self), for ease of constructing chained operations.
"""
return self._add_agg("max", col, na)
def mean(self, col=None, na="all"):
"""
Calculate the mean of each column specified in col for each group of a GroupBy object. If no col is
given, compute the mean among all numeric columns other than those being grouped on.
:param col: col can be None (default), a column name (str) or an index (int) of a single column, or a
list for multiple columns
:param str na: one of 'rm', 'ignore' or 'all' (default).
:return: the original GroupBy object (self), for ease of constructing chained operations.
"""
return self._add_agg("mean", col, na)
def count(self, na="all"):
"""
Count the number of rows in each group of a GroupBy object.
:param str na: one of 'rm', 'ignore' or 'all' (default).
:return: the original GroupBy object (self), for ease of constructing chained operations.
"""
return self._add_agg("nrow", None, na)
def sum(self, col=None, na="all"):
"""
Calculate the sum of each column specified in col for each group of a GroupBy object. If no col is given,
compute the sum among all numeric columns other than those being grouped on.
:param col: col can be None (default), a column name (str) or an index (int) of a single column, or a
list for multiple columns
:param str na: one of 'rm', 'ignore' or 'all' (default).
:return: the original GroupBy object (self), for ease of constructing chained operations.
"""
return self._add_agg("sum", col, na)
def sd(self, col=None, na="all"):
"""
Calculate the standard deviation of each column specified in col for each group of a GroupBy object. If no
col is given, compute the standard deviation among all numeric columns other than those being grouped on.
:param col: col can be None (default), a column name (str) or an index (int) of a single column, or a
list for multiple columns
:param str na: one of 'rm', 'ignore' or 'all' (default).
:return: the original GroupBy object (self), for ease of constructing chained operations.
"""
return self._add_agg("sdev", col, na)
def var(self, col=None, na="all"):
"""
Calculate the variance of each column specified in col for each group of a GroupBy object. If no col is
given, compute the variance among all numeric columns other than those being grouped on.
:param col: col can be None (default), a column name (str) or an index (int) of a single column, or a
list for multiple columns
:param str na: one of 'rm', 'ignore' or 'all' (default).
:return: the original GroupBy object (self), for ease of constructing chained operations.
"""
return self._add_agg("var", col, na)
def ss(self, col=None, na="all"):
"""
Calculate the sum of squares of each column specified in col for each group of a GroupBy object. If no col
is given, compute the sum of squares among all numeric columns other than those being grouped on.
:param col: col can be None (default), a column name (str) or an index (int) of a single column, or a
list for multiple columns
:param str na: one of 'rm', 'ignore' or 'all' (default).
:return: the original GroupBy object (self), for ease of constructing chained operations.
"""
return self._add_agg("sumSquares", col, na)
def mode(self, col=None, na="all"):
"""
Calculate the mode of each column specified in col for each group of a GroupBy object. If no col is given,
compute the mode among all categorical columns other than those being grouped on.
:param col: col can be None (default), a column name (str) or an index (int) of a single column, or a
list for multiple columns
:param str na: one of 'rm', 'ignore' or 'all' (default).
:return: the original GroupBy object (self), for ease of constructing chained operations.
"""
return self._add_agg("mode", col, na)
def median(self, col=None, na="all"):
"""
Calculate the median of each column specified in col for each group of a GroupBy object. If no col is given,
compute the median among all numeric columns other than those being grouped on.
:param col: col can be None (default), a column name (str) or an index (int) of a single column, or a
list for multiple columns
:param str na: one of 'rm', 'ignore' or 'all' (default).
:return: the original GroupBy object (self), for ease of constructing chained operations.
"""
return self._add_agg("median", col, na)
@property
def frame(self):
"""
same as get_frame().
"""
return self.get_frame()
def get_frame(self):
"""
Return the resulting H2OFrame containing the result(s) of aggregation(s) of the group by.
The number of rows denote the number of groups generated by the group by operation.
The number of columns depend on the number of aggregations performed, the number of columns specified in
the col parameter. Generally, expect the number of columns to be
(len(col) of aggregation 0 + len(col) of aggregation 1 +...+ len(col) of aggregation n) x
(number of groups of the GroupBy object) +1 (for group-by group names).
Note:
- the count aggregation only generates one column;
- if col is a str or int, len(col) = 1.
"""
if self._res is None:
aggs = []
for k in self._aggs: aggs += (self._aggs[k])
self._res = h2o.H2OFrame._expr(expr=ExprNode("GB", self._fr, self._by, *aggs))
return self._res
def _add_agg(self, op, col, na):
if op == "nrow": col = 0
if col is None:
for i in range(self._fr.ncol):
if i not in self._by: self._add_agg(op, i, na)
return self
elif is_type(col, str):
cidx = self._fr.names.index(col)
elif is_type(col, int):
cidx = col
elif is_type(col, list, tuple):
for i in col:
self._add_agg(op, i, na)
return self
else:
raise ValueError("col must be a column name or index.")
name = "{}_{}".format(op, self._fr.names[cidx])
self._aggs[name] = [op, cidx, na]
return self
def __repr__(self):
print("GroupBy: ")
print(" Frame: {}; by={}".format(self._fr.frame_id, str(self._by)))
print(" Aggregates: {}".format(str(self._aggs.keys())))
print("*** Use get_frame() to get groupby frame ***")
return ""
| |
from PIL import Image
from PIL import ImageDraw
import math, random, sys, codecs
from database import GamesDatabase
from imglib import createImages
from imglib import compareImages
from imglib import drawSpiral
from sklearn.decomposition import PCA
from shapely.geometry import Point
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
class Screen(object):
code = ""
cluster = 0
img = None
values = None
distance = -1
goid = None
platform = -1
def __init__(self,values,path,code,gid,cls=None,tks=None):
self.values = values
self.tokens = tks
self.goid = gid
self.path = path #Path for image file
self.code = code #Code to image
self.cluster = cls #Cluster ID
#For each value after cluster ID
#add in values of screen
#temp = Image.open(path+self.code+".jpg")
#self.img = temp.copy()
#temp.close()
#self.img.thumbnail((100,100),Image.ANTIALIAS)
def createImage(self,thumb_size):
try:
temp = Image.open(self.path+self.code+".jpg")
self.img = temp.copy()
temp.close()
self.img.thumbnail((thumb_size,thumb_size),Image.ANTIALIAS)
except:
print "no create image"
def compare(self,avgValues):
dist = 0
for i in range(len(self.values)):
dist = dist + pow(avgValues[i]-self.values[i],2)
self.distance = math.sqrt(dist)
def delImage(self):
self.img.close()
class Centroid(object):
values = []
grid = None
images = []
canvas = None
average_image = None
image_width = 0
image_height = 0
_id = -1
def __init__(self,line,c):
self._id = c
self.values = line.split('\t')
for i in range(len(self.values)):
self.values[i] = float(self.values[i])
self.images = []
def add(self,im):
#i = im.thumbnail((100,100),Image.ANTIALIAS)
self.images.append(im)
def distance(self, ivalues):
s = 0
for i in range(0,len(self.values)):
s += (float(ivalues[i])-float(self.values[i]))**2
return math.sqrt(s)
def findDistances(self):
val = sys.float_info.max
ind = -1
i = 0
for img in self.images:
v = self.distance(img.values)
img.distance = v
def defineIndex(self, size):
print "s "+str(size)
d = False
i = 1
while not d:
if size <= i**2:
d = True
else:
i += 1
print "i "+str(i)
return i
def plotManovich(self,colorsByCol):
if len(self.images) == 0:
return
if len(self.images) == 1:
im = Image.open(self.images[0].path+self.images[0].code+".jpg")
im.thumbnail((100,100),Image.ANTIALIAS)
im.save(str(self._id)+".png")
return
print colorsByCol
tmp = []
for i in self.images:
tmp.append(i.values)
df = pd.DataFrame(tmp)
p = PCA(n_components=2)
X = df.as_matrix()
p.fit(X)
subspace = pd.DataFrame(p.fit_transform(X),columns=["x","y"])
num_bins = 100
x = [subspace.x.min()*1.5,subspace.x.max()*1.5]
y = [subspace.y.min()*1.5,subspace.y.max()*1.5]
tmp = pd.DataFrame(x,columns=["x"])
tmp["y"] = y
subspace = subspace.append(tmp)
subspace['x_bin'] = pd.cut(subspace['x'],num_bins,labels=False)
subspace['y_bin'] = pd.cut(subspace['y'],num_bins,labels=False)
subspace = subspace[:-2]
factor = 1
subspace["x_grid"] = subspace.x_bin * factor
subspace["y_grid"] = subspace.y_bin * factor
centroid_point = []
n = len(subspace.index)
for j in range(n):
tx = subspace.x_grid.loc[j].astype(float)
ty = subspace.y_grid.loc[j].astype(float)
centroid_point.append(Point(tx,ty))
subspace['centroid_point'] = centroid_point
grid_side = num_bins * factor
x,y = range(grid_side) * grid_side, np.repeat(range(grid_side),grid_side)
grid_list = pd.DataFrame(x,columns=['x'])
grid_list['y'] = y
point = []
n = len(grid_list.index)
for i in range(n):
point.append(Point(grid_list.x.loc[i],grid_list.y.loc[i]))
grid_list['point'] = point
open_grid = list(grid_list.point)
centroids = list(subspace.centroid_point)
collection = pd.DataFrame()
n = len(collection.index)
local_path = []
cls = []
dst = []
for i in self.images:
local_path.append(i.path+i.code+".jpg")
cls.append(i.cluster)
dst.append(i.distance)
collection['local_path'] = local_path
collection['clusters'] = cls
collection['cluster_dist'] = dst
thumb_side = 100
px_w = thumb_side * grid_side
px_h = thumb_side * grid_side
self.canvas = Image.new('RGBA',(px_w,px_h),(50,50,50,0))
n = len(subspace.index)
print n,len(self.images),self._id
for i in self.images:
centroid = subspace.centroid_point.loc[0]
try:
# again, a workaround for indexing difference
candidates = collection[collection.clusters==i.cluster]
candidates.sort_values("cluster_dist",inplace=True)
best = candidates.iloc[0]
im = Image.open(best.local_path)
im.thumbnail((thumb_side,thumb_side),Image.ANTIALIAS)
i.img = im
self._paintCols(5,colorsByCol,i,2)
closest_open = min(open_grid,key=lambda x: centroid.distance(x))
x = int(closest_open.x) * thumb_side
y = int(closest_open.y) * thumb_side
self.canvas.paste(im,(x,y))
idx = collection[collection.local_path==best.local_path].index
collection.drop(idx,inplace=True)
open_grid.remove(closest_open)
except:
print "cluster empty"
self.canvas.save(str(self._id)+".png")
def filterGames(self,maxGames,thumb=100):
f = []
s = []
for i in self.images:
if i.goid not in f:
f.append(i.goid)
s.append(i.code)
if maxGames != -1:
lim = min(maxGames,len(f))
else:
lim = len(f)
gdb = GamesDatabase()
self.images = self.images[:lim]
arq = codecs.open('nomes'+str(self._id)+".txt",'w',"utf-8")
for i in range(lim):
print f[i]
if maxGames != -1:
c = gdb.getImageAverage(f[i])
if c!= None:
self.images[i].code = "/avg_rgb/"+c
else:
self.images[i].code = s[i]
g = gdb.getGameByObject(f[i])
w = g['name'].replace(u'\xe3', u' ')
arq.write(w+"\t"+self.images[i].code+"\t"+str(g['_id'])+'\n')
self.images[i].createImage(thumb)
arq.close()
def selectGames(self,target,maxGames,thumb):
for i in self.images:
pass
def organizeByAverageGames(self,thumb=100):
#cada centroid deve obter sua imagem media da base
self.findDistances()
self.images.sort(key=lambda image: image.distance)
self.filterGames(-1,thumb)
temp = []
for i in range(len(self.images)):
temp.append(self.images[i].img)
#calcular a media usando essas imagens medias
if len(temp)>0:
self.average_image = createImages(temp)
self.average_image.save("saida_media_"+str(self._id)+".png")
#aproximar os kj dessa imagem media
#quando plotar pinta a media principal e as imagens medias dos kjs
def organizeByGames(self,colorsByCol,maxGames):
self.findDistances()
print "tot.images: "+str(len(self.images))
self.images.sort(key=lambda image: image.distance)
self.filterGames(maxGames)
self.plotManovich(colorsByCol)
def organize(self,colorsByCol,maxImgs=-1):
self.findDistances()
self.plotManovich(colorsByCol)
#print "tot.images: "+str(len(self.images))
#sorted(self.images, key=lambda image: Screen.distance)
#if maxImgs != -1:
# self.images = self.images[:maxImgs]
def _paintCols(self,size,colorsByCol,s,offset):
px = 0
ws = size
d = ImageDraw.Draw(s.img)
for i in colorsByCol.keys():
color = colorsByCol[i][s.tokens[i]]
d.ellipse((px, 0, px+ws, ws), fill = color, outline =color)
px += (ws + offset)
del d
class ImageCreator(object):
centroids = []
images = []
colorsByCol = {}
genres = None
def initNoClasses(self,centroids,clusters,path, colsPrint,colsDist=None):
self.init(centroids,None,clusters,path, colsPrint,colsDist)
def setGenres(self, g):
self.genres = g
def getClass(self,vls2):
mx = sys.float_info.max
ind = -1
ic = 0
for c in self.centroids:
i = 0
tot = 0
for v in range(len(vls2)):
tot += (float(vls2[v]) - float(c.values[i]))**2
i += 1
tot = math.sqrt(tot)
if tot < mx:
mx = tot
ind = ic
ic += 1
return ind,mx
def breakLine(self,line,colsDist):
tokens = line.split('\t')
vls = []
i = 0
for t in tokens:
if i in colsDist:
vls.append(float(t))
i+=1
return tokens,vls
def rep(self,dic):
c = 0
for i in dic:
s = i.replace(u'\xa0', u' ')
dic[c] = s
c += 1
def init(self,centroids,cls,samples,path, colsPrint,colsDist):
gdb = GamesDatabase()
#Ler os centroids
arq = open(centroids, 'r')
lines = arq.readlines()
code = 0
for line in lines:
c = Centroid(line,code)
self.centroids.append(c)
code += 1
arq.close()
if cls != None:
#Ler as classes atribuidas aos exemplos
arq = open(cls, 'r')
classes = []
lines = arq.readlines()
for line in lines:
classes.append(int(line))
arq.close()
#Criar as telas usando a base de exemplos
arq = open(samples, 'r')
arq.readline()
lines = arq.readlines()
i = 0
out = open('clusters_para_tese.txt','w')
for line in lines:
img = None
if cls != None:
img = Screen(line,path,classes[i])
else:
tokens,vls = self.breakLine(line,colsDist)
code= tokens[1]
gid = tokens[0]
#Aplicando filtro de genero
gm = gdb.getGameByObject(gid)
if gm.has_key('genres'):
self.rep(gm['genres'])
if self.genres != None:
for g in self.genres:
if any(ig == g for ig in gm['genres']):
c,mx = self.getClass(vls)
out.write(str(c)+" ")
out.write(code+" ")
out.write(gid+"\n")
img = Screen(vls,path,code,gid,c,tokens)
img.distance = mx
self.addValue(img.tokens,colsPrint)
self.images.append(img)
print i
i += 1
break
else:
c,mx = self.getClass(vls)
out.write(str(c)+" ")
out.write(code+" ")
out.write(gid+"\n")
img = Screen(vls,path,code,gid,c,tokens)
img.distance = mx
self.addValue(img.tokens,colsPrint)
self.images.append(img)
print i
i += 1
arq.close()
out.close()
#Associa as telas aos centroides
for i in self.images:
self.centroids[i.cluster].add(i)
def addValue(self,vls,colsPrint):
#Para cada coluna para ser pintada
for c in colsPrint:
#Se a coluna nao esta adicionada
if c not in self.colorsByCol.keys():
#adiciona a coluna sem nenhum valor
self.colorsByCol[c] = {}
#se ainda nao tem esse valor da coluna adicionada
if vls[c] not in self.colorsByCol[c].keys():
self.colorsByCol[c][vls[c]] = (random.randint(0,255),random.randint(0,255),random.randint(0,255))
def createAverageImageByGames(self,maxImgs,thumb=100):
temp = []
for c in self.centroids:
c.organizeByAverageGames(thumb)
if c.average_image != None:
temp.append(c.average_image)
if len(temp)>0:
average_image = createImages(temp)
average_image.save("saida_media.png")
def createImagesByGames(self,rgb,offset,maxGames=-1):
i = 0
self.colorsByCol = {10: {'1990': (207, 25, 49), '2000': (135, 82, 128), '2010': (57, 75, 79), '1980': (79, 36, 118), '1970': (103, 249, 226)}}
for c in self.centroids:
c.organizeByGames(self.colorsByCol,maxGames)
#c.drawSpiral(rgb,offset,self.colorsByCol,"centro"+str(i)+".png")
#i += 1
def createImages(self,rgb,offset,maxImgs=-1):
i = 0
self.colorsByCol = {10: {'1990': (207, 25, 49), '2000': (135, 82, 128), '2010': (57, 75, 79), '1980': (79, 36, 118), '1970': (103, 249, 226)}}
for c in self.centroids:
c.organize(self.colorsByCol,maxImgs)
#c.drawSpiral(rgb,offset,self.colorsByCol,"centro"+str(i)+".png")
#i += 1
def createImage(self,offset,filename,dim,rgb):
px = 0
py = offset
height = 0
width = 0
hmax = 0
wmax =0
i = 0
for r in range(dim[0]):
tw = 0
th = 0
for c in range(dim[1]):
tw += self.centroids[i].image_width+offset
temp = self.centroids[i].image_height+offset
if temp > th:
th = temp
if self.centroids[i].image_width>wmax:
wmax = self.centroids[i].image_width
if self.centroids[i].image_height>hmax:
hmax = self.centroids[i].image_height
i += 1
height += th
if tw > width:
width = tw
out = Image.new("RGB", (wmax*dim[0]+((dim[0]+1)*offset),hmax*dim[1]+((dim[1]+1)*offset)), rgb)
i = 0
for r in range(dim[0]):
px = offset
for c in range(dim[1]):
out.paste(self.centroids[i].canvas,(px,py))
px += wmax+offset
i+=1
py += hmax+offset
out.save(filename)
def selectGames(self,maxGames,thumb=100):
allImages = []
centers = []
for c in self.centroids:
allImages = allImages + c.images
centers.append(c.values)
a = np.array(centers)
avgCentroids = np.mean(a,axis=0)
print avgCentroids
pass
filtered = []
for i in allImages:
i.compare(avgCentroids)
filtered.append(i)
filtered.sort(key=lambda image: image.distance)
f = []
g = []
for i in filtered:
if i.goid not in g:
f.append(i)
g.append(i.goid)
lim = min(maxGames,len(f))
gdb = GamesDatabase()
h = []
arq = codecs.open('jogos.txt','w',"utf-8")
for i in range(lim):
g = gdb.getGameByObject(f[i].goid)
print f[i].values
w = g['name'].replace(u'\xe3', u' ')
arq.write(w+"\t"+str(g['_id'])+'\n')
c = gdb.getImageAverage(f[i].goid)
f[i].code = "/avg_rgb/"+c
f[i].createImage(thumb)
h.append(f[i])
arq.close()
drawSpiral((128,128,128),h,50,'jogos.png',thumb)
def _paintCols(self,size,img,tokens,offset):
px = 0
ws = size
d = ImageDraw.Draw(img)
for i in self.colorsByCol.keys():
color = self.colorsByCol[i][tokens[i]]
d.ellipse((px, 0, px+ws, ws), fill = color, outline =color)
px += (ws + offset)
del d
#5795 exemplos
def main():
ic = ImageCreator()
#ic.setGenres(['Puzzle','Artgame','Compilation'])
#ic.setGenres(['Adventure'])
#ic.setGenres(['Action'])
#ic.setGenres(['Strategy','Tactics','Role-Playing (RPG)','Simulation'])
#ic.setGenres(['Puzzle','Artgame'])
#Uso tese
ic.initNoClasses('clusters_tese.txt','nova4_v2.csv','/Users/jrbitt/Dropbox/full2/',[10],[4,5,6,7,8,9,12])
#Uso do Weka SBGames
#ic.initNoClasses('centroides_galloway.txt','nova3.csv','/Users/jrbitt/Dropbox/full2/',[10],##[4,5,6,7,8,9])
#ic.plot()
#Usado para fazer as imagens do Galloway
#ic.initNoClasses('centroides_galloway.txt','exemplo5.csv','/Users/jrbitt/Dropbox/full2/',[23])
#ic.init('centroides_shooter_25.txt','classes_shooter_25.txt','base_shooter.csv','/Users/jrbitt/gamesresearch/games/games/spiders/screens/full/',[0])
#Gerar as imagens
#ic.createImages((128,128,128),10)
#Gerar imagens pra cada cluster usando a imagem media do jogo
#ic.createImagesByGames((128,128,128),10,25)
#Gerar uma imagem dos k clusters
#ic.createAverageImageByGames(10,600)
ic.selectGames(10,600)
#ic.createImage(50,"clusters_galloway.png",(3,1),(128,128,128))
if __name__ == '__main__': main()
| |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for the utility functions used by the placement API."""
import fixtures
from oslo_middleware import request_id
import webob
from nova.api.openstack.placement import microversion
from nova.api.openstack.placement import util
from nova import objects
from nova import test
from nova.tests import uuidsentinel
class TestCheckAccept(test.NoDBTestCase):
"""Confirm behavior of util.check_accept."""
@staticmethod
@util.check_accept('application/json', 'application/vnd.openstack')
def handler(req):
"""Fake handler to test decorator."""
return True
def test_fail_no_match(self):
req = webob.Request.blank('/')
req.accept = 'text/plain'
error = self.assertRaises(webob.exc.HTTPNotAcceptable,
self.handler, req)
self.assertEqual(
'Only application/json, application/vnd.openstack is provided',
str(error))
def test_fail_complex_no_match(self):
req = webob.Request.blank('/')
req.accept = 'text/html;q=0.9,text/plain,application/vnd.aws;q=0.8'
error = self.assertRaises(webob.exc.HTTPNotAcceptable,
self.handler, req)
self.assertEqual(
'Only application/json, application/vnd.openstack is provided',
str(error))
def test_success_no_accept(self):
req = webob.Request.blank('/')
self.assertTrue(self.handler(req))
def test_success_simple_match(self):
req = webob.Request.blank('/')
req.accept = 'application/json'
self.assertTrue(self.handler(req))
def test_success_complex_any_match(self):
req = webob.Request.blank('/')
req.accept = 'application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'
self.assertTrue(self.handler(req))
def test_success_complex_lower_quality_match(self):
req = webob.Request.blank('/')
req.accept = 'application/xml;q=0.9,application/vnd.openstack;q=0.8'
self.assertTrue(self.handler(req))
class TestExtractJSON(test.NoDBTestCase):
# Although the intent of this test class is not to test that
# schemas work, we may as well use a real one to ensure that
# behaviors are what we expect.
schema = {
"type": "object",
"properties": {
"name": {"type": "string"},
"uuid": {"type": "string", "format": "uuid"}
},
"required": ["name"],
"additionalProperties": False
}
def test_not_json(self):
error = self.assertRaises(webob.exc.HTTPBadRequest,
util.extract_json,
'I am a string',
self.schema)
self.assertIn('Malformed JSON', str(error))
def test_malformed_json(self):
error = self.assertRaises(webob.exc.HTTPBadRequest,
util.extract_json,
'{"my bytes got left behind":}',
self.schema)
self.assertIn('Malformed JSON', str(error))
def test_schema_mismatch(self):
error = self.assertRaises(webob.exc.HTTPBadRequest,
util.extract_json,
'{"a": "b"}',
self.schema)
self.assertIn('JSON does not validate', str(error))
def test_type_invalid(self):
error = self.assertRaises(webob.exc.HTTPBadRequest,
util.extract_json,
'{"name": 1}',
self.schema)
self.assertIn('JSON does not validate', str(error))
def test_format_checker(self):
error = self.assertRaises(webob.exc.HTTPBadRequest,
util.extract_json,
'{"name": "hello", "uuid": "not a uuid"}',
self.schema)
self.assertIn('JSON does not validate', str(error))
def test_no_additional_properties(self):
error = self.assertRaises(webob.exc.HTTPBadRequest,
util.extract_json,
'{"name": "hello", "cow": "moo"}',
self.schema)
self.assertIn('JSON does not validate', str(error))
def test_valid(self):
data = util.extract_json(
'{"name": "cow", '
'"uuid": "%s"}' % uuidsentinel.rp_uuid,
self.schema)
self.assertEqual('cow', data['name'])
self.assertEqual(uuidsentinel.rp_uuid, data['uuid'])
class TestJSONErrorFormatter(test.NoDBTestCase):
def setUp(self):
super(TestJSONErrorFormatter, self).setUp()
self.environ = {}
# TODO(jaypipes): Remove this when we get more than a single version
# in the placement API. The fact that we only had a single version was
# masking a bug in the utils code.
_versions = [
'1.0',
'1.1',
]
mod_str = 'nova.api.openstack.placement.microversion.VERSIONS'
self.useFixture(fixtures.MonkeyPatch(mod_str, _versions))
def test_status_to_int_code(self):
body = ''
status = '404 Not Found'
title = ''
result = util.json_error_formatter(
body, status, title, self.environ)
self.assertEqual(404, result['errors'][0]['status'])
def test_strip_body_tags(self):
body = '<h1>Big Error!</h1>'
status = '400 Bad Request'
title = ''
result = util.json_error_formatter(
body, status, title, self.environ)
self.assertEqual('Big Error!', result['errors'][0]['detail'])
def test_request_id_presence(self):
body = ''
status = '400 Bad Request'
title = ''
# no request id in environ, none in error
result = util.json_error_formatter(
body, status, title, self.environ)
self.assertNotIn('request_id', result['errors'][0])
# request id in environ, request id in error
self.environ[request_id.ENV_REQUEST_ID] = 'stub-id'
result = util.json_error_formatter(
body, status, title, self.environ)
self.assertEqual('stub-id', result['errors'][0]['request_id'])
def test_microversion_406_handling(self):
body = ''
status = '400 Bad Request'
title = ''
# Not a 406, no version info required.
result = util.json_error_formatter(
body, status, title, self.environ)
self.assertNotIn('max_version', result['errors'][0])
self.assertNotIn('min_version', result['errors'][0])
# A 406 but not because of microversions (microversion
# parsing was successful), no version info
# required.
status = '406 Not Acceptable'
version_obj = microversion.parse_version_string('2.3')
self.environ[microversion.MICROVERSION_ENVIRON] = version_obj
result = util.json_error_formatter(
body, status, title, self.environ)
self.assertNotIn('max_version', result['errors'][0])
self.assertNotIn('min_version', result['errors'][0])
# Microversion parsing failed, status is 406, send version info.
del self.environ[microversion.MICROVERSION_ENVIRON]
result = util.json_error_formatter(
body, status, title, self.environ)
self.assertEqual(microversion.max_version_string(),
result['errors'][0]['max_version'])
self.assertEqual(microversion.min_version_string(),
result['errors'][0]['min_version'])
class TestRequireContent(test.NoDBTestCase):
"""Confirm behavior of util.require_accept."""
@staticmethod
@util.require_content('application/json')
def handler(req):
"""Fake handler to test decorator."""
return True
def test_fail_no_content_type(self):
req = webob.Request.blank('/')
error = self.assertRaises(webob.exc.HTTPUnsupportedMediaType,
self.handler, req)
self.assertEqual(
'The media type None is not supported, use application/json',
str(error))
def test_fail_wrong_content_type(self):
req = webob.Request.blank('/')
req.content_type = 'text/plain'
error = self.assertRaises(webob.exc.HTTPUnsupportedMediaType,
self.handler, req)
self.assertEqual(
'The media type text/plain is not supported, use application/json',
str(error))
def test_success_content_type(self):
req = webob.Request.blank('/')
req.content_type = 'application/json'
self.assertTrue(self.handler(req))
class TestPlacementURLs(test.NoDBTestCase):
def setUp(self):
super(TestPlacementURLs, self).setUp()
self.resource_provider = objects.ResourceProvider(
name=uuidsentinel.rp_name,
uuid=uuidsentinel.rp_uuid)
self.resource_class = objects.ResourceClass(
name='CUSTOM_BAREMETAL_GOLD',
id=1000)
def test_resource_provider_url(self):
environ = {}
expected_url = '/resource_providers/%s' % uuidsentinel.rp_uuid
self.assertEqual(expected_url, util.resource_provider_url(
environ, self.resource_provider))
def test_resource_provider_url_prefix(self):
# SCRIPT_NAME represents the mount point of a WSGI
# application when it is hosted at a path/prefix.
environ = {'SCRIPT_NAME': '/placement'}
expected_url = ('/placement/resource_providers/%s'
% uuidsentinel.rp_uuid)
self.assertEqual(expected_url, util.resource_provider_url(
environ, self.resource_provider))
def test_inventories_url(self):
environ = {}
expected_url = ('/resource_providers/%s/inventories'
% uuidsentinel.rp_uuid)
self.assertEqual(expected_url, util.inventory_url(
environ, self.resource_provider))
def test_inventory_url(self):
resource_class = 'DISK_GB'
environ = {}
expected_url = ('/resource_providers/%s/inventories/%s'
% (uuidsentinel.rp_uuid, resource_class))
self.assertEqual(expected_url, util.inventory_url(
environ, self.resource_provider, resource_class))
def test_resource_class_url(self):
environ = {}
expected_url = '/resource_classes/CUSTOM_BAREMETAL_GOLD'
self.assertEqual(expected_url, util.resource_class_url(
environ, self.resource_class))
def test_resource_class_url_prefix(self):
# SCRIPT_NAME represents the mount point of a WSGI
# application when it is hosted at a path/prefix.
environ = {'SCRIPT_NAME': '/placement'}
expected_url = '/placement/resource_classes/CUSTOM_BAREMETAL_GOLD'
self.assertEqual(expected_url, util.resource_class_url(
environ, self.resource_class))
class TestNormalizeResourceQsParam(test.NoDBTestCase):
def setUp(self):
super(TestNormalizeResourceQsParam, self).setUp()
def test_success(self):
qs = "VCPU:1"
resources = util.normalize_resources_qs_param(qs)
expected = {
'VCPU': 1,
}
self.assertEqual(expected, resources)
qs = "VCPU:1,MEMORY_MB:1024,DISK_GB:100"
resources = util.normalize_resources_qs_param(qs)
expected = {
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 100,
}
self.assertEqual(expected, resources)
def test_400_empty_string(self):
qs = ""
self.assertRaises(
webob.exc.HTTPBadRequest,
util.normalize_resources_qs_param,
qs,
)
def test_400_bad_int(self):
qs = "VCPU:foo"
self.assertRaises(
webob.exc.HTTPBadRequest,
util.normalize_resources_qs_param,
qs,
)
def test_400_no_amount(self):
qs = "VCPU"
self.assertRaises(
webob.exc.HTTPBadRequest,
util.normalize_resources_qs_param,
qs,
)
def test_400_zero_amount(self):
qs = "VCPU:0"
self.assertRaises(
webob.exc.HTTPBadRequest,
util.normalize_resources_qs_param,
qs,
)
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for image preprocessing utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import numpy as np
from tensorflow.contrib.keras.python import keras
from tensorflow.python.platform import test
try:
import PIL # pylint:disable=g-import-not-at-top
except ImportError:
PIL = None
def _generate_test_images():
img_w = img_h = 20
rgb_images = []
gray_images = []
for _ in range(8):
bias = np.random.rand(img_w, img_h, 1) * 64
variance = np.random.rand(img_w, img_h, 1) * (255 - 64)
imarray = np.random.rand(img_w, img_h, 3) * variance + bias
im = keras.preprocessing.image.array_to_img(imarray, scale=False)
rgb_images.append(im)
imarray = np.random.rand(img_w, img_h, 1) * variance + bias
im = keras.preprocessing.image.array_to_img(imarray, scale=False)
gray_images.append(im)
return [rgb_images, gray_images]
class TestImage(test.TestCase):
def test_image_data_generator(self):
if PIL is None:
return # Skip test if PIL is not available.
for test_images in _generate_test_images():
img_list = []
for im in test_images:
img_list.append(keras.preprocessing.image.img_to_array(im)[None, ...])
images = np.vstack(img_list)
generator = keras.preprocessing.image.ImageDataGenerator(
featurewise_center=True,
samplewise_center=True,
featurewise_std_normalization=True,
samplewise_std_normalization=True,
zca_whitening=True,
rotation_range=90.,
width_shift_range=0.1,
height_shift_range=0.1,
shear_range=0.5,
zoom_range=0.2,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.5,
horizontal_flip=True,
vertical_flip=True)
# Basic test before fit
x = np.random.random((32, 10, 10, 3))
generator.flow(x)
# Fit
generator.fit(images, augment=True)
for x, _ in generator.flow(
images,
np.arange(images.shape[0]),
shuffle=True):
self.assertEqual(x.shape[1:], images.shape[1:])
break
def test_image_data_generator_invalid_data(self):
generator = keras.preprocessing.image.ImageDataGenerator(
featurewise_center=True,
samplewise_center=True,
featurewise_std_normalization=True,
samplewise_std_normalization=True,
zca_whitening=True,
data_format='channels_last')
# Test fit with invalid data
with self.assertRaises(ValueError):
x = np.random.random((3, 10, 10))
generator.fit(x)
# Test flow with invalid data
with self.assertRaises(ValueError):
generator.flow(np.arange(5))
# Invalid number of channels: will work but raise a warning
x = np.random.random((32, 10, 10, 5))
generator.flow(x)
with self.assertRaises(ValueError):
generator = keras.preprocessing.image.ImageDataGenerator(
data_format='unknown')
generator = keras.preprocessing.image.ImageDataGenerator(
zoom_range=(2, 2))
with self.assertRaises(ValueError):
generator = keras.preprocessing.image.ImageDataGenerator(
zoom_range=(2, 2, 2))
def test_image_data_generator_fit(self):
generator = keras.preprocessing.image.ImageDataGenerator(
featurewise_center=True,
samplewise_center=True,
featurewise_std_normalization=True,
samplewise_std_normalization=True,
zca_whitening=True,
data_format='channels_last')
# Test grayscale
x = np.random.random((32, 10, 10, 1))
generator.fit(x)
# Test RBG
x = np.random.random((32, 10, 10, 3))
generator.fit(x)
generator = keras.preprocessing.image.ImageDataGenerator(
featurewise_center=True,
samplewise_center=True,
featurewise_std_normalization=True,
samplewise_std_normalization=True,
zca_whitening=True,
data_format='channels_first')
# Test grayscale
x = np.random.random((32, 1, 10, 10))
generator.fit(x)
# Test RBG
x = np.random.random((32, 3, 10, 10))
generator.fit(x)
def test_directory_iterator(self):
if PIL is None:
return # Skip test if PIL is not available.
num_classes = 2
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
# create folders and subfolders
paths = []
for cl in range(num_classes):
class_directory = 'class-{}'.format(cl)
classpaths = [
class_directory, os.path.join(class_directory, 'subfolder-1'),
os.path.join(class_directory, 'subfolder-2'), os.path.join(
class_directory, 'subfolder-1', 'sub-subfolder')
]
for path in classpaths:
os.mkdir(os.path.join(temp_dir, path))
paths.append(classpaths)
# save the images in the paths
count = 0
filenames = []
for test_images in _generate_test_images():
for im in test_images:
# rotate image class
im_class = count % num_classes
# rotate subfolders
classpaths = paths[im_class]
filename = os.path.join(classpaths[count % len(classpaths)],
'image-{}.jpg'.format(count))
filenames.append(filename)
im.save(os.path.join(temp_dir, filename))
count += 1
# Test image loading util
fname = os.path.join(temp_dir, filenames[0])
_ = keras.preprocessing.image.load_img(fname)
_ = keras.preprocessing.image.load_img(fname, grayscale=True)
_ = keras.preprocessing.image.load_img(fname, target_size=(10, 10))
# create iterator
generator = keras.preprocessing.image.ImageDataGenerator()
dir_iterator = generator.flow_from_directory(temp_dir)
# check number of classes and images
self.assertEqual(len(dir_iterator.class_indices), num_classes)
self.assertEqual(len(dir_iterator.classes), count)
self.assertEqual(sorted(dir_iterator.filenames), sorted(filenames))
_ = dir_iterator.next()
def test_img_utils(self):
if PIL is None:
return # Skip test if PIL is not available.
height, width = 10, 8
# Test channels_first data format
x = np.random.random((3, height, width))
img = keras.preprocessing.image.array_to_img(
x, data_format='channels_first')
self.assertEqual(img.size, (width, height))
x = keras.preprocessing.image.img_to_array(
img, data_format='channels_first')
self.assertEqual(x.shape, (3, height, width))
# Test 2D
x = np.random.random((1, height, width))
img = keras.preprocessing.image.array_to_img(
x, data_format='channels_first')
self.assertEqual(img.size, (width, height))
x = keras.preprocessing.image.img_to_array(
img, data_format='channels_first')
self.assertEqual(x.shape, (1, height, width))
# Test channels_last data format
x = np.random.random((height, width, 3))
img = keras.preprocessing.image.array_to_img(x, data_format='channels_last')
self.assertEqual(img.size, (width, height))
x = keras.preprocessing.image.img_to_array(img, data_format='channels_last')
self.assertEqual(x.shape, (height, width, 3))
# Test 2D
x = np.random.random((height, width, 1))
img = keras.preprocessing.image.array_to_img(x, data_format='channels_last')
self.assertEqual(img.size, (width, height))
x = keras.preprocessing.image.img_to_array(img, data_format='channels_last')
self.assertEqual(x.shape, (height, width, 1))
def test_img_transforms(self):
x = np.random.random((3, 200, 200))
_ = keras.preprocessing.image.random_rotation(x, 20)
_ = keras.preprocessing.image.random_shift(x, 0.2, 0.2)
_ = keras.preprocessing.image.random_shear(x, 2.)
_ = keras.preprocessing.image.random_zoom(x, (0.5, 0.5))
with self.assertRaises(ValueError):
keras.preprocessing.image.random_zoom(x, (0, 0, 0))
_ = keras.preprocessing.image.random_channel_shift(x, 2.)
if __name__ == '__main__':
test.main()
| |
import sys
import pickle
import inspect
import operator
import collections
from pstats import Stats
from cProfile import Profile
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
try:
import unittest2 as unittest
except ImportError:
import unittest
try:
from unittest.mock import patch
except ImportError:
from mock import patch
import sqlalchemy
from sqlalchemy import Table, Column, String, Integer, ForeignKey
from sqlalchemy import create_engine, func
from sqlalchemy.orm import Session, Query as QueryBase, relationship, aliased
from sqlalchemy.orm import subqueryload, scoped_session, sessionmaker
from sqlalchemy.sql.operators import ColumnOperators
from sqlalchemy.ext.declarative import declarative_base, DeclarativeMeta
PY3 = sys.version_info[0] == 3
SQLA_ge_08 = sqlalchemy.__version__ >= '0.8'
SQLA_ge_09 = sqlalchemy.__version__ >= '0.9'
if PY3:
_range = range
else:
_range = xrange
if SQLA_ge_08:
from sqlalchemy.util import KeyedTuple
else:
from sqlalchemy.util import NamedTuple as KeyedTuple
from sqlconstruct import Construct, Object, apply_, if_, define, QueryMixin
from sqlconstruct import ConstructQuery, bind, map_, get_, _Scope, _QueryPlan
from sqlconstruct import ObjectSubQuery, CollectionSubQuery
from sqlconstruct import RelativeObjectSubQuery, RelativeCollectionSubQuery
if SQLA_ge_09:
class Query(QueryBase):
pass
else:
class Query(QueryMixin, QueryBase):
pass
capitalize = lambda s: s.capitalize()
@define
def defined_func(a, b, extra_id=0, extra_name=''):
"""doc"""
def body(a_id, a_name, b_id, b_name, extra_id, extra_name):
return a_id, b_id, extra_id, a_name, b_name, extra_name
return body, [a.id, a.name, b.id, b.name, extra_id, extra_name]
def columns_set(processable):
scope = _Scope(_QueryPlan())
processable.__processor__(scope)
return set(scope.query_plan.query_columns(None))
def proceed(processable, mapping):
scope = _Scope(_QueryPlan())
processor = processable.__processor__(scope)
columns = scope.query_plan.query_columns(None)
result = {0: [mapping[col] for col in columns]}
return processor(result)
class BaseMeta(DeclarativeMeta):
def __new__(mcs, name, bases, attrs):
attrs.setdefault('__tablename__', name.lower())
attrs.setdefault('id', Column(Integer, primary_key=True))
return DeclarativeMeta.__new__(mcs, name, bases, attrs)
class TestConstruct(unittest.TestCase):
def setUp(self):
engine = create_engine('sqlite://')
base_cls = declarative_base(metaclass=BaseMeta)
self.a_cls = type('A', (base_cls,), dict(
name=Column(String),
))
self.b_cls = type('B', (base_cls,), dict(
name=Column(String),
a_id=Column(Integer, ForeignKey('a.id')),
a=relationship('A'),
))
base_cls.metadata.create_all(engine)
self.session = Session(engine, query_cls=Query)
self.session.add_all([
self.b_cls(name='b1', a=self.a_cls(name='a1')),
self.b_cls(name='b2', a=self.a_cls(name='a2')),
])
def test_object_interface(self):
obj = Object({'a': 1, 'b': 2})
self.assertEqual(repr(obj), 'Object({})'.format(repr({'a': 1, 'b': 2})))
self.assertTrue(isinstance(obj, collections.Mapping), type(obj))
self.assertEqual(obj.a, 1)
self.assertEqual(obj['a'], 1)
self.assertEqual(obj.b, 2)
self.assertEqual(obj['b'], 2)
self.assertEqual(dict(obj), {'a': 1, 'b': 2})
with self.assertRaises(KeyError):
_ = obj['foo']
with self.assertRaises(AttributeError):
_ = obj.foo
def test_object_pickling(self):
ref = {'a': 1, 'b': 2}
o1 = pickle.loads(pickle.dumps(Object(ref), 0))
self.assertIs(type(o1), Object)
self.assertEqual(dict(o1), ref)
o2 = pickle.loads(pickle.dumps(Object(ref), 1))
self.assertIs(type(o2), Object)
self.assertEqual(dict(o2), ref)
o3 = pickle.loads(pickle.dumps(Object(ref), 2))
self.assertIs(type(o3), Object)
self.assertEqual(dict(o3), ref)
def test_object_immutability(self):
obj = Object({'foo': 'bar'})
with self.assertRaises(TypeError):
obj.foo = 'baz'
with self.assertRaises(TypeError):
obj['foo'] = 'baz'
with self.assertRaises(TypeError):
del obj.foo
with self.assertRaises(TypeError):
del obj['foo']
with self.assertRaises(TypeError):
obj.clear()
with self.assertRaises(TypeError):
obj.pop('foo', None)
with self.assertRaises(TypeError):
obj.popitem()
with self.assertRaises(TypeError):
obj.setdefault('foo', 'baz')
with self.assertRaises(TypeError):
obj.update({'foo': 'baz'})
def test_scalar_construct(self):
struct = Construct({'foo': 1, 'bar': '2'})
s = struct._from_row([])
self.assertEqual(s.foo, 1)
self.assertEqual(s.bar, '2')
def test_basic_construct(self):
struct = Construct({
'a_id': self.a_cls.id,
'a_name': self.a_cls.name,
})
self.assertEqual(set(struct._columns), {
self.a_cls.__table__.c.id,
self.a_cls.__table__.c.name,
})
result = {
self.a_cls.__table__.c.id: 1,
self.a_cls.__table__.c.name: 'a1',
}
row = [result[col] for col in struct._columns]
s = struct._from_row(row)
self.assertEqual(s.a_id, 1)
self.assertEqual(s.a_name, 'a1')
def test_nested_construct(self):
struct = Construct({
'a_id': apply_(operator.add, [self.a_cls.id, 5]),
'a_name': apply_(operator.concat, [self.a_cls.name, '-test']),
})
self.assertEqual(set(struct._columns), {
self.a_cls.__table__.c.id,
self.a_cls.__table__.c.name,
})
result = {
self.a_cls.__table__.c.id: 1,
self.a_cls.__table__.c.name: 'a1',
}
row = [result[col] for col in struct._columns]
s = struct._from_row(row)
self.assertEqual(s.a_id, 1 + 5)
self.assertEqual(s.a_name, 'a1' + '-test')
def test_apply(self):
add = lambda a, b, c=30, d=400: a + b + c + d
min_pos_apply = apply_(add, [1, 2])
self.assertEqual(columns_set(min_pos_apply), set())
self.assertEqual(proceed(min_pos_apply, {}), 1 + 2 + 30 + 400)
min_kw_apply = apply_(add, [], {'a': 1, 'b': 2})
self.assertEqual(columns_set(min_kw_apply), set())
self.assertEqual(proceed(min_kw_apply, {}), 1 + 2 + 30 + 400)
max_pos_apply = apply_(add, [1, 2, 33, 444])
self.assertEqual(columns_set(max_pos_apply), set())
self.assertEqual(proceed(max_pos_apply, {}), 1 + 2 + 33 + 444)
max_kw_apply = apply_(add, [], {'a': 1, 'b': 2, 'c': 33, 'd': 444})
self.assertEqual(columns_set(max_kw_apply), set())
self.assertEqual(proceed(max_kw_apply, {}), 1 + 2 + 33 + 444)
mixed_apply = apply_(add, [1, 2], {'c': 33, 'd': 444})
self.assertEqual(columns_set(mixed_apply), set())
self.assertEqual(proceed(mixed_apply, {}), 1 + 2 + 33 + 444)
def test_apply_with_columns(self):
f1 = self.a_cls.id
f2 = self.b_cls.id
c1 = self.a_cls.__table__.c.id
c2 = self.b_cls.__table__.c.id
fn1 = func.count(self.a_cls.id)
fn2 = func.count(self.b_cls.id)
add = lambda a, b: a + b
apl1 = apply_(add, [f1], {'b': f2})
self.assertEqual(columns_set(apl1), {c1, c2})
self.assertEqual(proceed(apl1, {c1: 3, c2: 4}), 3 + 4)
apl2 = apply_(add, [c1], {'b': c2})
self.assertEqual(columns_set(apl2), {c1, c2})
self.assertEqual(proceed(apl1, {c1: 4, c2: 5}), 4 + 5)
apl3 = apply_(add, [fn1], {'b': fn2})
self.assertEqual(columns_set(apl3), {fn1, fn2})
self.assertEqual(proceed(apl3, {fn1: 5, fn2: 6}), 5 + 6)
def test_nested_apply(self):
c1 = self.a_cls.__table__.c.id
c2 = self.b_cls.__table__.c.id
add = lambda a, b: a + b
apl = apply_(add, [
apply_(add, [
apply_(add, [
0,
1,
]),
apply_(add, [
2,
apply_(add, [
3,
c1, # 4
]),
]),
]),
apply_(add, [
apply_(add, [
apply_(add, [
c2, # 5
6,
]),
7,
]),
apply_(add, [
8,
9,
]),
]),
])
self.assertEqual(columns_set(apl), {c1, c2})
self.assertEqual(proceed(apl, {c1: 4, c2: 5}), sum(range(10)))
def test_if(self):
add = lambda a, b: a + b
c1 = self.a_cls.__table__.c.id
c2 = self.a_cls.__table__.c.name
c3 = self.b_cls.__table__.c.id
c4 = self.b_cls.__table__.c.name
if1 = if_(True, then_=1, else_=2)
self.assertEqual(columns_set(if1), set())
self.assertEqual(proceed(if1, {}), 1)
if2 = if_(False, then_=1, else_=2)
self.assertEqual(columns_set(if2), set())
self.assertEqual(proceed(if2, {}), 2)
if3 = if_(c1, then_=c2, else_=c3)
self.assertEqual(columns_set(if3), {c1, c2, c3})
self.assertEqual(proceed(if3, {c1: 0, c2: 3, c3: 6}), 6)
self.assertEqual(proceed(if3, {c1: 1, c2: 3, c3: 6}), 3)
if4 = if_(c1, then_=apply_(add, [c2, c3]), else_=apply_(add, [c3, c4]))
self.assertEqual(columns_set(if4), {c1, c2, c3, c4})
self.assertEqual(proceed(if4, {c1: 0, c2: 2, c3: 3, c4: 4}), 3 + 4)
self.assertEqual(proceed(if4, {c1: 1, c2: 2, c3: 3, c4: 4}), 2 + 3)
def test_defined_signatures(self):
obj_spec = inspect.getargspec(defined_func)
self.assertEqual(obj_spec.args, ['a', 'b', 'extra_id', 'extra_name'])
self.assertEqual(obj_spec.varargs, None)
self.assertEqual(obj_spec.keywords, None)
self.assertEqual(obj_spec.defaults, (0, ''))
defn_spec = inspect.getargspec(defined_func.defn)
self.assertEqual(defn_spec.args, ['a', 'b', 'extra_id', 'extra_name'])
self.assertEqual(defn_spec.varargs, None)
self.assertEqual(defn_spec.keywords, None)
self.assertEqual(defn_spec.defaults, (0, ''))
func_spec = inspect.getargspec(defined_func.func)
self.assertEqual(func_spec.args, ['a_id', 'a_name', 'b_id', 'b_name',
'extra_id', 'extra_name'])
self.assertEqual(func_spec.varargs, None)
self.assertEqual(func_spec.keywords, None)
self.assertEqual(func_spec.defaults, None)
def test_defined_calls(self):
c1 = self.a_cls.__table__.c.id
c2 = self.a_cls.__table__.c.name
c3 = self.b_cls.__table__.c.id
c4 = self.b_cls.__table__.c.name
self.assertEqual(
defined_func(self.a_cls(id=1, name='foo'),
self.b_cls(id=2, name='bar'),
extra_id=3,
extra_name='baz'),
(1, 2, 3, 'foo', 'bar', 'baz'),
)
self.assertEqual(
defined_func(self.a_cls(id=1, name='foo'),
None,
extra_id=3,
extra_name='baz'),
(1, None, 3, 'foo', None, 'baz'),
)
self.assertEqual(
defined_func(None,
self.b_cls(id=2, name='bar'),
extra_id=3,
extra_name='baz'),
(None, 2, 3, None, 'bar', 'baz'),
)
apl1 = defined_func.defn(self.a_cls, self.b_cls,
extra_id=3, extra_name='baz')
self.assertTrue(isinstance(apl1, apply_), type(apl1))
self.assertEqual(columns_set(apl1), {c1, c2, c3, c4})
self.assertEqual(
proceed(apl1, {c1: 1, c2: 'foo', c3: 2, c4: 'bar'}),
(1, 2, 3, 'foo', 'bar', 'baz'),
)
apl2 = defined_func.defn(self.a_cls, self.b_cls,
extra_id=c1, extra_name=c2)
self.assertTrue(isinstance(apl2, apply_), type(apl2))
self.assertEqual(columns_set(apl2), {c1, c2, c3, c4})
self.assertEqual(
proceed(apl2, {c1: 1, c2: 'foo', c3: 2, c4: 'bar'}),
(1, 2, 1, 'foo', 'bar', 'foo'),
)
apl3 = defined_func.defn(self.a_cls, self.b_cls,
extra_id=apply_(operator.add, [c1, c3]),
extra_name=apply_(operator.concat, [c2, c4]))
self.assertTrue(isinstance(apl3, apply_), type(apl3))
self.assertEqual(columns_set(apl3), {c1, c2, c3, c4})
self.assertEqual(
proceed(apl3, {c1: 1, c2: 'foo', c3: 2, c4: 'bar'}),
(1, 2, (1 + 2), 'foo', 'bar', ('foo' + 'bar')),
)
self.assertEqual(
defined_func.func(1, 'foo', 2, 'bar', 3, 'baz'),
(1, 2, 3, 'foo', 'bar', 'baz'),
)
def test_defined_meta(self):
self.assertEqual(defined_func.__doc__, "doc")
self.assertEqual(defined_func.__module__, "tests")
def test_pipe_operator(self):
struct = Construct({
'a_name_hash': apply_(capitalize, [self.a_cls.name]) | hash,
})
s1, s2 = self.session.query(struct).order_by(self.a_cls.name).all()
self.assertEqual(s1.a_name_hash, hash('A1'))
self.assertEqual(s2.a_name_hash, hash('A2'))
def test_query_count(self):
query = self.session.query(
Construct({'a_id': self.a_cls.id,
'a_name': self.a_cls.name}),
)
self.assertEqual(query.count(), 2)
def test_query_expr(self):
query = self.session.query(
Construct({'a_id': self.a_cls.id,
'a_name': func.upper(self.a_cls.name)}),
)
s1, s2 = query.order_by(self.a_cls.name.asc()).all()
self.assertEqual(s1.a_name, 'A1')
self.assertEqual(s2.a_name, 'A2')
def test_query_single_entity(self):
query = self.session.query(
Construct({'a_id': self.a_cls.id,
'a_name': self.a_cls.name}),
)
s1, s2 = query.all()
self.assertTrue(isinstance(s1, Object), type(s1))
self.assertEqual(s1.a_id, 1)
self.assertEqual(s1.a_name, 'a1')
self.assertTrue(isinstance(s2, Object), type(s2))
self.assertEqual(s2.a_id, 2)
self.assertEqual(s2.a_name, 'a2')
def test_query_row(self):
query = self.session.query(
self.a_cls.id,
Construct({'a_id': self.a_cls.id,
'a_name': self.a_cls.name}),
self.a_cls.name,
)
r1, r2 = query.all()
self.assertTrue(isinstance(r1, KeyedTuple), type(r1))
self.assertEqual(r1.id, 1)
self.assertEqual(r1.name, 'a1')
self.assertTrue(isinstance(r1[1], Object), type(r1[1]))
self.assertEqual(r1[1].a_id, 1)
self.assertEqual(r1[1].a_name, 'a1')
self.assertTrue(isinstance(r2, KeyedTuple), type(r2))
self.assertEqual(r2.id, 2)
self.assertEqual(r2.name, 'a2')
self.assertTrue(isinstance(r2[1], Object), type(r2[1]))
self.assertEqual(r2[1].a_id, 2)
self.assertEqual(r2[1].a_name, 'a2')
def test_query_aliased_models(self):
a1_cls = aliased(self.a_cls, name='A1')
a2_cls = aliased(self.a_cls, name='A2')
query = (
self.session.query(
Construct({'a1_id': a1_cls.id,
'a1_name': a1_cls.name,
'a2_id': a2_cls.id,
'a2_name': a2_cls.name}),
)
.select_from(a1_cls)
.join(a2_cls, a2_cls.id == a1_cls.id + 1)
)
statement = str(query)
self.assertIn('"A1".id AS "A1_id"', statement)
self.assertIn('"A1".name AS "A1_name"', statement)
self.assertIn('"A2".id AS "A2_id"', statement)
self.assertIn('"A2".name AS "A2_name"', statement)
s, = query.all()
self.assertTrue(isinstance(s, Object), type(s))
self.assertEqual(s.a1_id, 1)
self.assertEqual(s.a1_name, 'a1')
self.assertEqual(s.a2_id, 2)
self.assertEqual(s.a2_name, 'a2')
def test_query_labeled_columns(self):
a1_cls = aliased(self.a_cls, name='A1')
a2_cls = aliased(self.a_cls, name='A2')
query = (
self.session.query(
Construct({'a1_id': a1_cls.id.label('__a1_id__'),
'a1_name': a1_cls.name.label('__a1_name__'),
'a2_id': a2_cls.id.label('__a2_id__'),
'a2_name': a2_cls.name.label('__a2_name__')}),
)
.select_from(a1_cls)
.join(a2_cls, a2_cls.id == a1_cls.id + 1)
)
statement = str(query)
self.assertIn('"A1".id AS __a1_id__', statement)
self.assertIn('"A1".name AS __a1_name__', statement)
self.assertIn('"A2".id AS __a2_id__', statement)
self.assertIn('"A2".name AS __a2_name__', statement)
s, = query.all()
self.assertTrue(isinstance(s, Object), type(s))
self.assertEqual(s.a1_id, 1)
self.assertEqual(s.a1_name, 'a1')
self.assertEqual(s.a2_id, 2)
self.assertEqual(s.a2_name, 'a2')
def test_query_with_explicit_join(self):
query = (
self.session.query(
Construct({'a_id': self.a_cls.id,
'a_name': self.a_cls.name,
'b_id': self.b_cls.id,
'b_name': self.b_cls.name}),
)
.join(self.b_cls.a)
)
s1, s2 = query.all()
self.assertTrue(isinstance(s1, Object), type(s1))
self.assertEqual(s1.a_id, 1)
self.assertEqual(s1.a_name, 'a1')
self.assertEqual(s1.b_id, 1)
self.assertEqual(s1.b_name, 'b1')
self.assertTrue(isinstance(s2, Object), type(s2))
self.assertEqual(s2.a_id, 2)
self.assertEqual(s2.a_name, 'a2')
self.assertEqual(s2.b_id, 2)
self.assertEqual(s2.b_name, 'b2')
@unittest.skipIf(SQLA_ge_09, 'SQLAlchemy < 0.9')
def test_query_with_implicit_join_lt_09(self):
from sqlalchemy.exc import InvalidRequestError
with self.assertRaises(InvalidRequestError) as e1:
(
self.session.query(
Construct({'a_id': self.a_cls.id,
'a_name': self.a_cls.name,
'b_id': self.b_cls.id,
'b_name': self.b_cls.name}),
)
.join(self.a_cls)
)
if SQLA_ge_08:
self.assertIn("Don't know how to join", e1.exception.args[0])
else:
self.assertEqual(e1.exception.args[0],
"Could not find a FROM clause to join from")
with self.assertRaises(InvalidRequestError) as e2:
(
self.session.query(
Construct({'a_id': self.a_cls.id,
'a_name': self.a_cls.name,
'b_id': self.b_cls.id,
'b_name': self.b_cls.name}),
)
.join(self.b_cls)
)
if SQLA_ge_08:
self.assertIn("Don't know how to join", e2.exception.args[0])
else:
self.assertEqual(e2.exception.args[0],
"Could not find a FROM clause to join from")
@unittest.skip('optional')
def test_performance(self):
@define
def test_func(a, b):
def body(a_id, a_name, b_id, b_name):
pass
return body, [a.id, a.name, b.id, b.name]
struct = Construct({
'r1': if_(self.a_cls.id,
then_=test_func.defn(self.a_cls, self.b_cls)),
'r2': if_(self.a_cls.name,
then_=test_func.defn(self.a_cls, self.b_cls)),
'r3': if_(self.b_cls.id,
then_=test_func.defn(self.a_cls, self.b_cls)),
'r4': if_(self.b_cls.name,
then_=test_func.defn(self.a_cls, self.b_cls)),
})
row = (
self.session.query(*struct._columns)
.join(self.b_cls.a)
.first()
)
# warm-up
for _ in _range(5000):
struct._from_row(row)
profile1 = Profile()
profile1.enable()
for _ in _range(5000):
struct._from_row(row)
profile1.disable()
out1 = StringIO()
stats1 = Stats(profile1, stream=out1)
stats1.strip_dirs()
stats1.sort_stats('calls').print_stats(10)
print(out1.getvalue().lstrip())
out1.close()
row = (
self.session.query(
self.a_cls.id.label('a_id'),
self.a_cls.name.label('a_name'),
self.b_cls.id.label('b_id'),
self.b_cls.name.label('b_name'),
)
.join(self.b_cls.a)
.first()
)
def make_object(row):
Object(dict(
r1=(
test_func.func(row.a_id, row.a_name, row.b_id, row.b_name)
if row.a_id else None
),
r2=(
test_func.func(row.a_id, row.a_name, row.b_id, row.b_name)
if row.a_name else None
),
r3=(
test_func.func(row.a_id, row.a_name, row.b_id, row.b_name)
if row.b_id else None
),
r4=(
test_func.func(row.a_id, row.a_name, row.b_id, row.b_name)
if row.b_name else None
),
))
# warm-up
for _ in _range(5000):
make_object(row)
profile2 = Profile()
profile2.enable()
for _ in _range(5000):
make_object(row)
profile2.disable()
out2 = StringIO()
stats2 = Stats(profile2, stream=out2)
stats2.strip_dirs()
stats2.sort_stats('calls').print_stats(10)
print(out2.getvalue().lstrip())
out2.close()
self.assertEqual(stats1.total_calls, stats2.total_calls)
class TestSubQueries(unittest.TestCase):
def setUp(self):
self.engine = create_engine('sqlite://')
self.base_cls = declarative_base(metaclass=BaseMeta)
def init(self):
self.base_cls.metadata.create_all(self.engine)
session = scoped_session(sessionmaker())
session.configure(bind=self.engine)
return session
def test_many_to_one(self):
class A(self.base_cls):
name = Column(String)
b_id = Column(Integer, ForeignKey('b.id'))
b = relationship('B')
class B(self.base_cls):
name = Column(String)
session = self.init()
b1, b2, b3 = B(name='b1'), B(name='b2'), B(name='b3')
session.add_all([
A(name='a1', b=b1), A(name='a2', b=b1), A(name='a3'),
A(name='a4', b=b2), A(name='a5'), A(name='a6', b=b2),
A(name='a7'), A(name='a8', b=b3), A(name='a9', b=b3),
])
session.commit()
query = (
ConstructQuery({
'a_name': A.name,
'b_name': get_(if_(B.id, apply_(capitalize, [B.name]), '~'),
A.b),
})
.with_session(session.registry())
)
self.assertEqual(
tuple(dict(obj) for obj in query.all()),
({'a_name': 'a1', 'b_name': 'B1'},
{'a_name': 'a2', 'b_name': 'B1'},
{'a_name': 'a3', 'b_name': '~'},
{'a_name': 'a4', 'b_name': 'B2'},
{'a_name': 'a5', 'b_name': '~'},
{'a_name': 'a6', 'b_name': 'B2'},
{'a_name': 'a7', 'b_name': '~'},
{'a_name': 'a8', 'b_name': 'B3'},
{'a_name': 'a9', 'b_name': 'B3'}),
)
def test_one_to_one(self):
class A(self.base_cls):
name = Column(String)
b = relationship('B', uselist=False)
class B(self.base_cls):
name = Column(String)
a_id = Column(Integer, ForeignKey('a.id'))
session = self.init()
session.add_all([
A(name='a1', b=B(name='b1')),
A(name='a2'),
B(name='b2'),
A(name='a3', b=B(name='b3')),
])
session.commit()
query = (
ConstructQuery({
'a_name': A.name,
'b_name': get_(if_(B.id, apply_(capitalize, [B.name]), '~'),
A.b),
})
.with_session(session.registry())
)
self.assertEqual(
tuple(dict(obj) for obj in query.all()),
({'a_name': 'a1', 'b_name': 'B1'},
{'a_name': 'a2', 'b_name': '~'},
{'a_name': 'a3', 'b_name': 'B3'}),
)
def test_one_to_many(self):
class A(self.base_cls):
name = Column(String)
b_list = relationship('B')
class B(self.base_cls):
name = Column(String)
a_id = Column(Integer, ForeignKey('a.id'))
session = self.init()
session.add_all([
A(name='a1', b_list=[B(name='b1'), B(name='b2'), B(name='b3')]),
A(name='a2', b_list=[B(name='b4'), B(name='b5'), B(name='b6')]),
A(name='a3', b_list=[B(name='b7'), B(name='b8'), B(name='b9')]),
])
session.commit()
query = (
ConstructQuery({
'a_name': A.name,
'b_names': map_(apply_(capitalize, [B.name]), A.b_list),
})
.with_session(session.registry())
)
self.assertEqual(
tuple(dict(obj) for obj in query.all()),
({'a_name': 'a1', 'b_names': ['B1', 'B2', 'B3']},
{'a_name': 'a2', 'b_names': ['B4', 'B5', 'B6']},
{'a_name': 'a3', 'b_names': ['B7', 'B8', 'B9']}),
)
def test_many_to_many(self):
ab_table = Table(
'a_b',
self.base_cls.metadata,
Column('a_id', Integer, ForeignKey('a.id')),
Column('b_id', Integer, ForeignKey('b.id'))
)
class A(self.base_cls):
name = Column(String)
b_list = relationship('B', secondary=ab_table)
class B(self.base_cls):
name = Column(String)
a_list = relationship('A', secondary=ab_table)
session = self.init()
a1, a2, a3, a4 = A(name='a1'), A(name='a2'), A(name='a3'), A(name='a4')
b1, b2, b3, b4 = B(name='b1'), B(name='b2'), B(name='b3'), B(name='b4')
a1.b_list = [b2, b3, b4]
a2.b_list = [b1, b3, b4]
a3.b_list = [b1, b2, b4]
a4.b_list = [b1, b2, b3]
session.add_all([a1, a2, a3, a4])
session.commit()
q1 = (
ConstructQuery({
'a_name': A.name,
'b_names': map_(apply_(capitalize, [B.name]), A.b_list),
})
.with_session(session.registry())
.order_by(A.name)
)
self.assertEqual(
tuple((obj.a_name, set(obj.b_names)) for obj in q1.all()),
(
('a1', {'B2', 'B3', 'B4'}),
('a2', {'B1', 'B3', 'B4'}),
('a3', {'B1', 'B2', 'B4'}),
('a4', {'B1', 'B2', 'B3'}),
)
)
q2 = (
ConstructQuery({
'b_name': B.name,
'a_names': map_(apply_(capitalize, [A.name]), B.a_list),
})
.with_session(session.registry())
.order_by(B.name)
)
self.assertEqual(
tuple((obj.b_name, set(obj.a_names)) for obj in q2.all()),
(
('b1', {'A2', 'A3', 'A4'}),
('b2', {'A1', 'A3', 'A4'}),
('b3', {'A1', 'A2', 'A4'}),
('b4', {'A1', 'A2', 'A3'}),
),
)
def test_non_empty_in_op_in_relative_subqueries(self):
in_op = ColumnOperators.in_
class EmptyInOpError(Exception):
pass
def wrapper(self, values):
if not values:
raise EmptyInOpError
return in_op(self, values)
patcher = patch.object(ColumnOperators, 'in_', wrapper)
class A(self.base_cls):
value = Column(String)
class B(self.base_cls):
value = Column(String)
session = self.init()
session.add_all([A(), A(), A()])
session.commit()
obj_sq = RelativeObjectSubQuery(A.value, B.value)
obj_query = ConstructQuery({'a_id': A.id,
'b_id': get_(B.id, obj_sq)},
session)
with patcher:
obj_query.all()
list_sq = RelativeCollectionSubQuery(A.value, B.value)
list_query = ConstructQuery({'a_id': A.id,
'b_id': map_(B.id, list_sq)},
session)
with patcher:
list_query.all()
def test_nested(self):
"""
A <- B -> C -> D <- E
"""
class A(self.base_cls):
name = Column(String)
class B(self.base_cls):
name = Column(String)
a_id = Column('a_id', Integer, ForeignKey('a.id'))
a = relationship('A', backref='b_list')
c_id = Column('c_id', Integer, ForeignKey('c.id'))
c = relationship('C', backref='b_list')
class C(self.base_cls):
name = Column(String)
d_id = Column('d_id', Integer, ForeignKey('d.id'))
d = relationship('D', backref='c_list')
class D(self.base_cls):
name = Column(String)
class E(self.base_cls):
name = Column(String)
d_id = Column('d_id', Integer, ForeignKey('d.id'))
d = relationship('D', backref='e_list')
session = self.init()
a1, a2, a3 = A(name='a1'), A(name='a2'), A(name='a3')
d1 = D(name='d1',
c_list=[C(name='c1',
b_list=[B(name='b1'),
B(name='b2', a=a2),
B(name='b3', a=a3)]),
C(name='c2',
b_list=[B(name='b4', a=a1),
B(name='b5'),
B(name='b6', a=a3)]),
C(name='c3',
b_list=[B(name='b7', a=a1),
B(name='b8', a=a2),
B(name='b9')])],
e_list=[E(name='e1'), E(name='e2'), E(name='e3')])
session.add_all([a1, a2, a3, d1])
session.commit()
# A <- B -> C
r1 = tuple(dict(obj) for obj in ConstructQuery({
'a_name': A.name,
'b_names': map_(B.name, A.b_list),
'c_names': map_(get_(C.name, B.c), A.b_list)
}).with_session(session.registry()).order_by(A.name).all())
self.assertEqual(r1, (
{'a_name': 'a1', 'b_names': ['b4', 'b7'], 'c_names': ['c2', 'c3']},
{'a_name': 'a2', 'b_names': ['b2', 'b8'], 'c_names': ['c1', 'c3']},
{'a_name': 'a3', 'b_names': ['b3', 'b6'], 'c_names': ['c1', 'c2']},
))
# B -> C -> D
r2 = tuple(dict(obj) for obj in ConstructQuery({
'b_name': B.name,
'c_name': get_(C.name, B.c),
'd_name': get_(get_(D.name, C.d), B.c),
}).with_session(session.registry()).order_by(B.name).all())
self.assertEqual(r2, (
{'b_name': 'b1', 'c_name': 'c1', 'd_name': 'd1'},
{'b_name': 'b2', 'c_name': 'c1', 'd_name': 'd1'},
{'b_name': 'b3', 'c_name': 'c1', 'd_name': 'd1'},
{'b_name': 'b4', 'c_name': 'c2', 'd_name': 'd1'},
{'b_name': 'b5', 'c_name': 'c2', 'd_name': 'd1'},
{'b_name': 'b6', 'c_name': 'c2', 'd_name': 'd1'},
{'b_name': 'b7', 'c_name': 'c3', 'd_name': 'd1'},
{'b_name': 'b8', 'c_name': 'c3', 'd_name': 'd1'},
{'b_name': 'b9', 'c_name': 'c3', 'd_name': 'd1'},
))
# C -> D <- E
r3 = tuple(dict(obj) for obj in ConstructQuery({
'c_name': C.name,
'd_name': get_(D.name, C.d),
'e_names': get_(map_(E.name, D.e_list), C.d),
}).with_session(session.registry()).order_by(C.name).all())
self.assertEqual(r3, (
{'c_name': 'c1', 'd_name': 'd1', 'e_names': ['e1', 'e2', 'e3']},
{'c_name': 'c2', 'd_name': 'd1', 'e_names': ['e1', 'e2', 'e3']},
{'c_name': 'c3', 'd_name': 'd1', 'e_names': ['e1', 'e2', 'e3']},
))
# D <- C <- B
r4 = dict(ConstructQuery({
'd_name': D.name,
'c_names': map_(C.name, D.c_list),
'b_names': map_(map_(B.name, C.b_list), D.c_list),
}).with_session(session.registry()).order_by(D.name).one())
self.assertEqual(r4['d_name'], 'd1')
self.assertEqual(set(r4['c_names']), {'c1', 'c2', 'c3'})
self.assertEqual(set(map(frozenset, r4['b_names'])), {
frozenset({'b1', 'b2', 'b3'}),
frozenset({'b4', 'b5', 'b6'}),
frozenset({'b7', 'b8', 'b9'}),
})
def test_with_define(self):
class A(self.base_cls):
name = Column(String)
b_id = Column(Integer, ForeignKey('b.id'))
b = relationship('B')
class B(self.base_cls):
name = Column(String)
@define
def full_name(a, b):
def body(a_name, b_name):
return ' '.join((a_name.capitalize(), b_name.capitalize()))
return body, [a.name, b.name]
session = self.init()
b1, b2, b3 = B(name='b1'), B(name='b2'), B(name='b3')
session.add_all([
A(name='a1', b=b1), A(name='a2', b=b1), A(name='a3', b=b1),
A(name='a4', b=b2), A(name='a5', b=b2), A(name='a6', b=b2),
A(name='a7', b=b3), A(name='a8', b=b3), A(name='a9', b=b3),
])
session.commit()
query = (
ConstructQuery({
'full_name': full_name.defn(A, A.b),
})
.with_session(session.registry())
.order_by(A.name)
)
self.assertEqual(
tuple(dict(obj) for obj in query.all()),
({'full_name': 'A1 B1'},
{'full_name': 'A2 B1'},
{'full_name': 'A3 B1'},
{'full_name': 'A4 B2'},
{'full_name': 'A5 B2'},
{'full_name': 'A6 B2'},
{'full_name': 'A7 B3'},
{'full_name': 'A8 B3'},
{'full_name': 'A9 B3'}),
)
def test_with_scoped_session(self):
class A(self.base_cls):
name = Column(String)
session = self.init()
session.add_all([A(name='a1'), A(name='a2'), A(name='a3')])
session.commit()
q1 = ConstructQuery({'name': A.name}, session)
self.assertEqual({obj.name for obj in q1.all()}, {'a1', 'a2', 'a3'})
q2 = ConstructQuery({'name': A.name})
with self.assertRaises(AttributeError):
q2.all()
def test_with_custom_queries(self):
class A(self.base_cls):
name = Column(String)
class B(self.base_cls):
name = Column(String)
session = self.init()
session.add_all([
A(name='a1'), A(name='a2'), A(name='a3'),
B(name='b1'), B(name='b2'), B(name='b3'),
])
session.commit()
sq1 = CollectionSubQuery(B).order_by(B.name.desc())
q1 = (
ConstructQuery({'a_name': A.name, 'b_list': map_(B.name, sq1)},
session)
.order_by(A.name)
)
self.assertEqual({(obj.a_name, tuple(obj.b_list)) for obj in q1.all()},
{('a1', ('b3', 'b2', 'b1')),
('a2', ('b3', 'b2', 'b1')),
('a3', ('b3', 'b2', 'b1'))})
sq2 = ObjectSubQuery(B).order_by(B.name.desc())
q2 = (
ConstructQuery({'a_name': A.name, 'b_name': get_(B.name, sq2)},
session)
.order_by(A.name)
)
self.assertEqual({(obj.a_name, obj.b_name) for obj in q2.all()},
{('a1', 'b3'), ('a2', 'b3'), ('a3', 'b3')})
def test_query_entities_modification(self):
class A(self.base_cls):
name = Column(String)
session = self.init()
session.add_all([A(name='a1'), A(name='a2'), A(name='a3')])
session.commit()
count = ConstructQuery({'name': A.name}, session).count()
self.assertEqual(count, 3)
with self.assertRaises(NotImplementedError):
ConstructQuery({'id': A.id}).add_columns(A.name)
with self.assertRaises(NotImplementedError):
ConstructQuery({'id': A.id}).add_entity(A)
def test_bound_to_query_expressions(self):
class A(self.base_cls):
name = Column(String)
b_list = relationship('B')
class B(self.base_cls):
name = Column(String)
a_id = Column(Integer, ForeignKey('a.id'))
session = self.init()
session.add_all([
A(name='a1', b_list=[B(name='b1')]),
A(name='a2', b_list=[B(name='b4'), B(name='b5')]),
A(name='a3', b_list=[B(name='b7'), B(name='b8'), B(name='b9')]),
])
session.commit()
sq = (
RelativeObjectSubQuery.from_relation(A.b_list)
.group_by(B.a_id)
)
query = (
ConstructQuery({
'a_name': A.name,
'b_count': get_(bind(func.count(), sq), sq),
})
.order_by(A.name.asc())
.with_session(session.registry())
)
self.assertEqual(
[dict(obj) for obj in query.all()],
[{'a_name': 'a1', 'b_count': 1},
{'a_name': 'a2', 'b_count': 2},
{'a_name': 'a3', 'b_count': 3}],
)
def test_bound_to_query_reference(self):
class A(self.base_cls):
name = Column(String)
b_list = relationship('B')
class B(self.base_cls):
name = Column(String)
a_id = Column(Integer, ForeignKey('a.id'))
ref = Column(Integer)
class C(self.base_cls):
name = Column(String)
ref = Column(Integer)
session = self.init()
session.add_all([
A(name='a1', b_list=[B(name='b1', ref=-1), B(name='b2', ref=2)]),
A(name='a2', b_list=[B(name='b3', ref=-3), B(name='b4', ref=4)]),
C(name='c1', ref=1), C(name='c2', ref=-1),
C(name='c3', ref=2), C(name='c4', ref=-2),
C(name='c5', ref=3), C(name='c6', ref=-3),
C(name='c7', ref=4), C(name='c8', ref=-4),
])
session.commit()
sq1 = (
RelativeCollectionSubQuery.from_relation(A.b_list)
.order_by(B.name.asc())
)
sq2 = (
RelativeCollectionSubQuery(bind(func.abs(B.ref), sq1), func.abs(C.ref))
.order_by(C.name.asc())
)
query = (
ConstructQuery({
'a_name': A.name,
'c_names': map_(map_(C.name, sq2), sq1),
})
.order_by(A.name.asc())
.with_session(session.registry())
)
self.assertEqual(
[dict(obj) for obj in query.all()],
[
{'a_name': 'a1', 'c_names': [['c1', 'c2'], ['c3', 'c4']]},
{'a_name': 'a2', 'c_names': [['c5', 'c6'], ['c7', 'c8']]},
],
)
def test_bound_to_query_entities(self):
class A(self.base_cls):
name = Column(String)
c_id = Column(Integer, ForeignKey('c.id'))
c = relationship('C')
class B(self.base_cls):
name = Column(String)
a_id = Column(Integer, ForeignKey('a.id'))
c_id = Column(Integer, ForeignKey('c.id'))
a = relationship('A', backref='b_list')
c = relationship('C')
class C(self.base_cls):
name = Column(String)
session = self.init()
session.add_all([
A(name='a1', c=C(name='c11'), b_list=[
B(name='b1', c=C(name='c21')),
B(name='b2', c=C(name='c22')),
]),
A(name='a2', c=C(name='c12'), b_list=[
B(name='b3', c=C(name='c23')),
B(name='b4', c=C(name='c24')),
]),
A(name='a3', c=C(name='c13'), b_list=[
B(name='b5', c=C(name='c25')),
B(name='b6', c=C(name='c26')),
]),
])
session.commit()
@define
def concat_names(a, c1, b, c2):
def body(a_name, c1_name, b_name, c2_name):
return ' '.join((a_name, c1_name, b_name, c2_name))
return body, [a.name, c1.name, b.name, c2.name]
sq = (
RelativeCollectionSubQuery.from_relation(A.b_list)
.outerjoin(B.c)
.order_by(B.name.asc())
)
query = (
ConstructQuery({
'names': map_(concat_names.defn(A, bind(C, None), B, C), sq),
})
.outerjoin(A.c)
.order_by(A.name.asc())
.with_session(session.registry())
)
self.assertEqual(
[dict(obj) for obj in query.all()],
[{'names': ['a1 c11 b1 c21', 'a1 c11 b2 c22']},
{'names': ['a2 c12 b3 c23', 'a2 c12 b4 c24']},
{'names': ['a3 c13 b5 c25', 'a3 c13 b6 c26']}],
)
@unittest.skip('optional')
def test_performance(self):
class A(self.base_cls):
name = Column(String)
b_list = relationship('B')
class B(self.base_cls):
name = Column(String)
a_id = Column(Integer, ForeignKey('a.id'))
session = self.init()
session.add_all([A(name='a_%d' % i,
b_list=[B(name='b_%d_%d' % (i, j))
for j in range(10)])
for i in _range(20)])
session.commit()
session.remove()
@define
def b_name(a, b):
def body(a_id, a_name, b_id, b_name):
return '%d - %s - %d - %s' % (a_id, a_name, b_id, b_name)
return body, [a.id, a.name, b.id, b.name]
tuples_query = session.query(A.id, A.name)
tuples_subquery = session.query(B.id, B.a_id, B.name)
def perform_tuples_query():
a_rows = tuples_query.all()
_tuples_subquery = (
tuples_subquery
.filter(B.a_id.in_({a_row.id for a_row in a_rows}))
)
b_rows_map = collections.defaultdict(list)
for row in _tuples_subquery.all():
b_rows_map[row.a_id].append(row)
return [{'a_name': a_row.name,
'b_names': [b_name.func(a_row.id, a_row.name,
b_row.id, b_row.name)
for b_row in b_rows_map[a_row.id]]}
for a_row in a_rows]
construct_query = (
ConstructQuery({
'a_name': A.name,
'b_names': map_(b_name.defn(A, B), A.b_list),
}, session)
)
def perform_construct_query():
return construct_query.all()
def perform_objects_query():
session.remove()
query = session.query(A).options(subqueryload(A.b_list))
return [{'a_name': a.name,
'b_names': [b_name(a, b) for b in a.b_list]}
for a in query.all()]
def do(impl, count):
# warm-up
_res = [impl() for _ in _range(count)]
profile = Profile()
profile.enable()
res = [impl() for _ in _range(count)]
profile.disable()
out = StringIO()
stats = Stats(profile, stream=out)
stats.strip_dirs()
stats.sort_stats('calls').print_stats(10)
print(out.getvalue().lstrip())
out.close()
return _res, res
res = []
res.extend(do(perform_tuples_query, 1000))
res.extend(do(perform_construct_query, 1000))
res.extend(do(perform_objects_query, 1000))
1/0
class TestBugs(unittest.TestCase):
def setUp(self):
self.engine = create_engine('sqlite://')
self.base_cls = declarative_base(metaclass=BaseMeta)
def init(self):
self.base_cls.metadata.create_all(self.engine)
session = scoped_session(sessionmaker())
session.configure(bind=self.engine)
return session
def test_nested_get_with_null_value_in_outer_expr(self):
class A(self.base_cls):
name = Column(String)
b_id = Column('b_id', Integer, ForeignKey('b.id'))
b = relationship('B', backref='a_list')
class B(self.base_cls):
name = Column(String)
c_id = Column('c_id', Integer, ForeignKey('c.id'))
c = relationship('C', backref='b_list')
class C(self.base_cls):
name = Column(String)
session = self.init()
c1 = C(name='c1')
b1, b2 = B(name='b1', c=c1), B(name='b2')
a1, a2, a3 = A(name='a1', b=b1), A(name='a2', b=b2), A(name='a3')
session.add_all([a1, a2, a3, b1, b2, c1])
session.commit()
res = tuple(dict(obj) for obj in ConstructQuery({
'a_name': A.name,
'b_name': get_(B.name, A.b),
'c_name': get_(get_(C.name, B.c), A.b),
}).with_session(session.registry()).order_by(A.name).all())
self.assertEqual(res, (
{'a_name': 'a1', 'b_name': 'b1', 'c_name': 'c1'},
{'a_name': 'a2', 'b_name': 'b2', 'c_name': None},
{'a_name': 'a3', 'b_name': None, 'c_name': None},
))
| |
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import re
import shutil
from collections import defaultdict
import requests
from pants.base.exceptions import TaskError
from pants.build_graph.address import Address
from pants.build_graph.address_lookup_error import AddressLookupError
from pants.util.contextutil import temporary_dir
from pants.util.dirutil import safe_mkdir
from pants.contrib.go.subsystems.fetchers import Fetchers
from pants.contrib.go.targets.go_remote_library import GoRemoteLibrary
from pants.contrib.go.tasks.go_task import GoTask
class GoFetch(GoTask):
"""Fetches third-party Go libraries."""
@classmethod
def global_subsystems(cls):
return super(GoFetch, cls).global_subsystems() + (Fetchers,)
@classmethod
def product_types(cls):
return ['go_remote_lib_src']
@property
def cache_target_dirs(self):
# TODO(John Sirois): See TODO in _transitive_download_remote_libs, re-consider how artifact
# caching works for fetches.
return True
def execute(self):
self.context.products.safe_create_data('go_remote_lib_src', lambda: defaultdict(str))
go_remote_libs = self.context.targets(self.is_remote_lib)
if not go_remote_libs:
return
undeclared_deps = self._transitive_download_remote_libs(set(go_remote_libs))
if undeclared_deps:
self._log_undeclared_deps(undeclared_deps)
raise TaskError('Failed to resolve transitive Go remote dependencies.')
def _log_undeclared_deps(self, undeclared_deps):
for dependee, deps in undeclared_deps.items():
self.context.log.error('{address} has remote dependencies which require local declaration:'
.format(address=dependee.address.reference()))
for dep_import_path, address in deps:
self.context.log.error('\t--> {import_path} (expected go_remote_library declaration '
'at {address})'.format(import_path=dep_import_path,
address=address.reference()))
def _get_fetcher(self, import_path):
return Fetchers.global_instance().get_fetcher(import_path)
@classmethod
def _check_for_meta_tag(cls, import_path):
"""Looks for go-import meta tags for the provided import_path.
Returns three values. First is the import prefix which designates where the
root of the repo should be set up. Next is the version control system that
must be used to copy down the repository. Finally is the URL to access the
repository.
If the meta tag is not found in the page's source, None is returned for all
three values.
More info: https://golang.org/cmd/go/#hdr-Remote_import_paths
"""
session = requests.session()
# Override default http adapters with a retriable one.
retriable_http_adapter = requests.adapters.HTTPAdapter(max_retries=2)
session.mount("http://", retriable_http_adapter)
session.mount("https://", retriable_http_adapter)
try:
page_data = session.get('http://{import_path}?go-get=1'.format(import_path=import_path))
except requests.ConnectionError:
return None, None, None
if not page_data:
return None, None, None
root, vcs, url = cls._find_meta_tag(page_data.text)
if root and vcs and url:
# Check to make sure returned root is an exact match to the provided import path. If it is
# not then run a recursive check on the returned and return the values provided by that call.
if root == import_path:
return root, vcs, url
elif import_path.starts_with(root):
return cls._check_for_meta_tag(root)
return None, None, None
@classmethod
def _find_meta_tag(cls, page_html):
"""Returns the content of the meta tag if found inside of the provided HTML."""
meta_import_regex = re.compile(r'<meta\s+name="go-import"\s+content="(?P<root>[^\s]+)\s+(?P<vcs>[^\s]+)\s+(?P<url>[^\s]+)"\s*>')
matched = meta_import_regex.search(page_html)
if matched:
return matched.groups()
return None
def _transitive_download_remote_libs(self, go_remote_libs, all_known_addresses=None):
"""Recursively attempt to resolve / download all remote transitive deps of go_remote_libs.
Returns a dict<GoRemoteLibrary, set<tuple<str, Address>>>, which maps a go remote library to a
set of unresolved remote dependencies, each dependency expressed as a tuple containing the
the import path of the dependency and the expected target address. If all transitive
dependencies were successfully resolved, returns an empty dict.
Downloads as many invalidated transitive dependencies as possible, and returns as many
undeclared dependencies as possible. However, because the dependencies of a remote library
can only be determined _after_ it has been downloaded, a transitive dependency of an undeclared
remote library will never be detected.
Because go_remote_libraries do not declare dependencies (rather, they are inferred), injects
all successfully resolved transitive dependencies into the build graph.
"""
if not go_remote_libs:
return {}
all_known_addresses = all_known_addresses or set()
all_known_addresses.update(lib.address for lib in go_remote_libs)
resolved_remote_libs = set()
undeclared_deps = defaultdict(set)
go_remote_lib_src = self.context.products.get_data('go_remote_lib_src')
with self.invalidated(go_remote_libs) as invalidation_check:
for vt in invalidation_check.all_vts:
go_remote_lib = vt.target
gopath = vt.results_dir
fetcher = self._get_fetcher(go_remote_lib.import_path)
if not vt.valid:
meta_root, meta_protocol, meta_repo_url = self._check_for_meta_tag(go_remote_lib.import_path)
if meta_root:
root = fetcher.root(meta_root)
else:
root = fetcher.root(go_remote_lib.import_path)
fetch_dir = os.path.join(self.workdir, 'fetches')
root_dir = os.path.join(fetch_dir, root)
# Only fetch each remote root once.
if not os.path.exists(root_dir):
with temporary_dir() as tmp_fetch_root:
fetcher.fetch(go_remote_lib.import_path, dest=tmp_fetch_root,
rev=go_remote_lib.rev, meta_repo_url=meta_repo_url)
safe_mkdir(root_dir)
for path in os.listdir(tmp_fetch_root):
shutil.move(os.path.join(tmp_fetch_root, path), os.path.join(root_dir, path))
# TODO(John Sirois): Circle back and get get rid of this symlink tree.
# GoWorkspaceTask will further symlink a single package from the tree below into a
# target's workspace when it could just be linking from the fetch_dir. The only thing
# standing in the way is a determination of what we want to artifact cache. If we don't
# want to cache fetched zips, linking straight from the fetch_dir works simply. Otherwise
# thought needs to be applied to using the artifact cache directly or synthesizing a
# canonical owner target for the fetched files that 'child' targets (subpackages) can
# depend on and share the fetch from.
dest_dir = os.path.join(gopath, 'src', root)
# We may have been `invalidate`d and not `clean-all`ed so we need a new empty symlink
# chroot to avoid collision; thus `clean=True`.
safe_mkdir(dest_dir, clean=True)
for path in os.listdir(root_dir):
os.symlink(os.path.join(root_dir, path), os.path.join(dest_dir, path))
# Map the fetched remote sources.
pkg = go_remote_lib.import_path
go_remote_lib_src[go_remote_lib] = os.path.join(gopath, 'src', pkg)
for remote_import_path in self._get_remote_import_paths(pkg, gopath=gopath):
fetcher = self._get_fetcher(remote_import_path)
remote_root = fetcher.root(remote_import_path)
spec_path = os.path.join(go_remote_lib.target_base, remote_root)
package_path = GoRemoteLibrary.remote_package_path(remote_root, remote_import_path)
target_name = package_path or os.path.basename(remote_root)
address = Address(spec_path, target_name)
if address not in all_known_addresses:
try:
# If we've already resolved a package from this remote root, its ok to define an
# implicit synthetic remote target for all other packages in the same remote root.
implicit_ok = any(spec_path == a.spec_path for a in all_known_addresses)
remote_lib = self._resolve(go_remote_lib, address, package_path, implicit_ok)
resolved_remote_libs.add(remote_lib)
all_known_addresses.add(address)
except self.UndeclaredRemoteLibError as e:
undeclared_deps[go_remote_lib].add((remote_import_path, e.address))
self.context.build_graph.inject_dependency(go_remote_lib.address, address)
# Recurse after the invalidated block, so the libraries we downloaded are now "valid"
# and thus we don't try to download a library twice.
trans_undeclared_deps = self._transitive_download_remote_libs(resolved_remote_libs,
all_known_addresses)
undeclared_deps.update(trans_undeclared_deps)
return undeclared_deps
class UndeclaredRemoteLibError(Exception):
def __init__(self, address):
self.address = address
def _resolve(self, dependent_remote_lib, address, pkg, implicit_ok):
"""Resolves the GoRemoteLibrary at `address` defining the given `pkg`.
If `implicit_ok` is True, then a GoRemoteLibrary to own `pkg` is always synthesized if it does
not already exist; otherwise the address must already exist in the build graph (a BUILD file
must exist on disk that owns the given `pkg` and declares a `rev` for it).
:param dependent_remote_lib: The remote library that depends on the remote `pkg`.
:type: :class:`pants.contrib.go.targets.go_remote_library.GoRemoteLibrary`
:param address: The address of the remote library that should own `pkg`.
:type: :class:`pants.base.Address`
:param string pkg: The remote package path whose owning target needs to be resolved.
:param bool implicit_ok: `False` if the given `address` must be defined in a BUILD file on disk;
otherwise a remote library to own `pkg` will always be created and
returned.
:returns: The resulting resolved remote library after injecting it in the build graph.
:rtype: :class:`pants.contrib.go.targets.go_remote_library.GoRemoteLibrary`
:raises: :class:`GoFetch.UndeclaredRemoteLibError`: If no BUILD file exists for the remote root
`pkg` lives in.
"""
try:
self.context.build_graph.inject_address_closure(address)
except AddressLookupError:
if implicit_ok:
self.context.add_new_target(address=address,
target_base=dependent_remote_lib.target_base,
target_type=GoRemoteLibrary,
pkg=pkg)
else:
raise self.UndeclaredRemoteLibError(address)
return self.context.build_graph.get_target(address)
@staticmethod
def _is_relative(import_path):
return import_path.startswith('.')
def _get_remote_import_paths(self, pkg, gopath=None):
"""Returns the remote import paths declared by the given remote Go `pkg`.
NB: This only includes production code imports, no test code imports.
"""
import_listing = self.import_oracle.list_imports(pkg, gopath=gopath)
return [imp for imp in import_listing.imports
if (not self.import_oracle.is_go_internal_import(imp) and
# We assume relative imports are local to the package and skip attempts to
# recursively resolve them.
not self._is_relative(imp))]
| |
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
"""
Support for running 'shell commands'
"""
import sys
import os
import signal
import types
import re
import subprocess
import traceback
import stat
from collections import deque
from tempfile import NamedTemporaryFile
from twisted.python import runtime, log
from twisted.python.win32 import quoteArguments
from twisted.internet import reactor, defer, protocol, task, error
from buildslave import util
from buildslave.exceptions import AbandonChain
if runtime.platformType == 'posix':
from twisted.internet.process import Process
def shell_quote(cmd_list):
# attempt to quote cmd_list such that a shell will properly re-interpret
# it. The pipes module is only available on UNIX, and Windows "shell"
# quoting is indescribably convoluted - so much so that it's not clear it's
# reversible. Also, the quote function is undocumented (although it looks
# like it will be documentd soon: http://bugs.python.org/issue9723).
# Finally, it has a nasty bug in some versions where an empty string is not
# quoted.
#
# So:
# - use pipes.quote on UNIX, handling '' as a special case
# - use Python's repr() on Windows, as a best effort
if runtime.platformType == 'win32':
return " ".join([ `e` for e in cmd_list ])
else:
import pipes
def quote(e):
if not e:
return '""'
return pipes.quote(e)
return " ".join([ quote(e) for e in cmd_list ])
class LogFileWatcher:
POLL_INTERVAL = 2
def __init__(self, command, name, logfile, follow=False):
self.command = command
self.name = name
self.logfile = logfile
log.msg("LogFileWatcher created to watch %s" % logfile)
# we are created before the ShellCommand starts. If the logfile we're
# supposed to be watching already exists, record its size and
# ctime/mtime so we can tell when it starts to change.
self.old_logfile_stats = self.statFile()
self.started = False
# follow the file, only sending back lines
# added since we started watching
self.follow = follow
# every 2 seconds we check on the file again
self.poller = task.LoopingCall(self.poll)
def start(self):
self.poller.start(self.POLL_INTERVAL).addErrback(self._cleanupPoll)
def _cleanupPoll(self, err):
log.err(err, msg="Polling error")
self.poller = None
def stop(self):
self.poll()
if self.poller is not None:
self.poller.stop()
if self.started:
self.f.close()
def statFile(self):
if os.path.exists(self.logfile):
s = os.stat(self.logfile)
return (s[stat.ST_CTIME], s[stat.ST_MTIME], s[stat.ST_SIZE])
return None
def poll(self):
if not self.started:
s = self.statFile()
if s == self.old_logfile_stats:
return # not started yet
if not s:
# the file was there, but now it's deleted. Forget about the
# initial state, clearly the process has deleted the logfile
# in preparation for creating a new one.
self.old_logfile_stats = None
return # no file to work with
self.f = open(self.logfile, "rb")
# if we only want new lines, seek to
# where we stat'd so we only find new
# lines
if self.follow:
self.f.seek(s[2], 0)
self.started = True
self.f.seek(self.f.tell(), 0)
while True:
data = self.f.read(10000)
if not data:
return
self.command.addLogfile(self.name, data)
if runtime.platformType == 'posix':
class ProcGroupProcess(Process):
"""Simple subclass of Process to also make the spawned process a process
group leader, so we can kill all members of the process group."""
def _setupChild(self, *args, **kwargs):
Process._setupChild(self, *args, **kwargs)
# this will cause the child to be the leader of its own process group;
# it's also spelled setpgrp() on BSD, but this spelling seems to work
# everywhere
os.setpgid(0, 0)
class RunProcessPP(protocol.ProcessProtocol):
debug = False
def __init__(self, command):
self.command = command
self.pending_stdin = ""
self.stdin_finished = False
self.killed = False
def setStdin(self, data):
assert not self.connected
self.pending_stdin = data
def connectionMade(self):
if self.debug:
log.msg("RunProcessPP.connectionMade")
if self.command.useProcGroup:
if self.debug:
log.msg(" recording pid %d as subprocess pgid"
% (self.transport.pid,))
self.transport.pgid = self.transport.pid
if self.pending_stdin:
if self.debug: log.msg(" writing to stdin")
self.transport.write(self.pending_stdin)
if self.debug: log.msg(" closing stdin")
self.transport.closeStdin()
def outReceived(self, data):
if self.debug:
log.msg("RunProcessPP.outReceived")
self.command.addStdout(data)
def errReceived(self, data):
if self.debug:
log.msg("RunProcessPP.errReceived")
self.command.addStderr(data)
def processEnded(self, status_object):
if self.debug:
log.msg("RunProcessPP.processEnded", status_object)
# status_object is a Failure wrapped around an
# error.ProcessTerminated or and error.ProcessDone.
# requires twisted >= 1.0.4 to overcome a bug in process.py
sig = status_object.value.signal
rc = status_object.value.exitCode
# sometimes, even when we kill a process, GetExitCodeProcess will still return
# a zero exit status. So we force it. See
# http://stackoverflow.com/questions/2061735/42-passed-to-terminateprocess-sometimes-getexitcodeprocess-returns-0
if self.killed and rc == 0:
log.msg("process was killed, but exited with status 0; faking a failure")
# windows returns '1' even for signalled failures, while POSIX returns -1
if runtime.platformType == 'win32':
rc = 1
else:
rc = -1
self.command.finished(sig, rc)
class RunProcess:
"""
This is a helper class, used by slave commands to run programs in a child
shell.
"""
notreally = False
BACKUP_TIMEOUT = 5
interruptSignal = "KILL"
CHUNK_LIMIT = 128*1024
# Don't send any data until at least BUFFER_SIZE bytes have been collected
# or BUFFER_TIMEOUT elapsed
BUFFER_SIZE = 64*1024
BUFFER_TIMEOUT = 5
# For sending elapsed time:
startTime = None
elapsedTime = None
# For scheduling future events
_reactor = reactor
# I wish we had easy access to CLOCK_MONOTONIC in Python:
# http://www.opengroup.org/onlinepubs/000095399/functions/clock_getres.html
# Then changes to the system clock during a run wouldn't effect the "elapsed
# time" results.
def __init__(self, builder, command,
workdir, environ=None,
sendStdout=True, sendStderr=True, sendRC=True,
timeout=None, maxTime=None, initialStdin=None,
keepStdout=False, keepStderr=False,
logEnviron=True, logfiles={}, usePTY="slave-config",
useProcGroup=True):
"""
@param keepStdout: if True, we keep a copy of all the stdout text
that we've seen. This copy is available in
self.stdout, which can be read after the command
has finished.
@param keepStderr: same, for stderr
@param usePTY: "slave-config" -> use the SlaveBuilder's usePTY;
otherwise, true to use a PTY, false to not use a PTY.
@param useProcGroup: (default True) use a process group for non-PTY
process invocations
"""
self.builder = builder
# We need to take unicode commands and arguments and encode them using
# the appropriate encoding for the slave. This is mostly platform
# specific, but can be overridden in the slave's buildbot.tac file.
#
# Encoding the command line here ensures that the called executables
# receive arguments as bytestrings encoded with an appropriate
# platform-specific encoding. It also plays nicely with twisted's
# spawnProcess which checks that arguments are regular strings or
# unicode strings that can be encoded as ascii (which generates a
# warning).
def to_str(cmd):
if isinstance(cmd, (tuple, list)):
for i, a in enumerate(cmd):
if isinstance(a, unicode):
cmd[i] = a.encode(self.builder.unicode_encoding)
elif isinstance(cmd, unicode):
cmd = cmd.encode(self.builder.unicode_encoding)
return cmd
self.command = to_str(util.Obfuscated.get_real(command))
self.fake_command = to_str(util.Obfuscated.get_fake(command))
self.sendStdout = sendStdout
self.sendStderr = sendStderr
self.sendRC = sendRC
self.logfiles = logfiles
self.workdir = workdir
self.process = None
if not os.path.exists(workdir):
os.makedirs(workdir)
if environ:
for key, v in environ.iteritems():
if isinstance(v, list):
# Need to do os.pathsep translation. We could either do that
# by replacing all incoming ':'s with os.pathsep, or by
# accepting lists. I like lists better.
# If it's not a string, treat it as a sequence to be
# turned in to a string.
environ[key] = os.pathsep.join(environ[key])
if environ.has_key('PYTHONPATH'):
environ['PYTHONPATH'] += os.pathsep + "${PYTHONPATH}"
# do substitution on variable values matching pattern: ${name}
p = re.compile('\${([0-9a-zA-Z_]*)}')
def subst(match):
return os.environ.get(match.group(1), "")
newenv = {}
for key in os.environ.keys():
# setting a key to None will delete it from the slave environment
if key not in environ or environ[key] is not None:
newenv[key] = os.environ[key]
for key, v in environ.iteritems():
if v is not None:
if not isinstance(v, basestring):
raise RuntimeError("'env' values must be strings or "
"lists; key '%s' is incorrect" % (key,))
newenv[key] = p.sub(subst, v)
self.environ = newenv
else: # not environ
self.environ = os.environ.copy()
self.initialStdin = initialStdin
self.logEnviron = logEnviron
self.timeout = timeout
self.timer = None
self.maxTime = maxTime
self.maxTimer = None
self.keepStdout = keepStdout
self.keepStderr = keepStderr
self.buffered = deque()
self.buflen = 0
self.buftimer = None
if usePTY == "slave-config":
self.usePTY = self.builder.usePTY
else:
self.usePTY = usePTY
# usePTY=True is a convenience for cleaning up all children and
# grandchildren of a hung command. Fall back to usePTY=False on systems
# and in situations where ptys cause problems. PTYs are posix-only,
# and for .closeStdin to matter, we must use a pipe, not a PTY
if runtime.platformType != "posix" or initialStdin is not None:
if self.usePTY and usePTY != "slave-config":
self.sendStatus({'header': "WARNING: disabling usePTY for this command"})
self.usePTY = False
# use an explicit process group on POSIX, noting that usePTY always implies
# a process group.
if runtime.platformType != 'posix':
useProcGroup = False
elif self.usePTY:
useProcGroup = True
self.useProcGroup = useProcGroup
self.logFileWatchers = []
for name,filevalue in self.logfiles.items():
filename = filevalue
follow = False
# check for a dictionary of options
# filename is required, others are optional
if type(filevalue) == dict:
filename = filevalue['filename']
follow = filevalue.get('follow', False)
w = LogFileWatcher(self, name,
os.path.join(self.workdir, filename),
follow=follow)
self.logFileWatchers.append(w)
def __repr__(self):
return "<%s '%s'>" % (self.__class__.__name__, self.fake_command)
def sendStatus(self, status):
self.builder.sendUpdate(status)
def start(self):
# return a Deferred which fires (with the exit code) when the command
# completes
if self.keepStdout:
self.stdout = ""
if self.keepStderr:
self.stderr = ""
self.deferred = defer.Deferred()
try:
self._startCommand()
except:
log.msg("error in RunProcess._startCommand")
log.err()
self._addToBuffers('stderr', "error in RunProcess._startCommand\n")
self._addToBuffers('stderr', traceback.format_exc())
self._sendBuffers()
# pretend it was a shell error
self.deferred.errback(AbandonChain(-1))
return self.deferred
def _startCommand(self):
# ensure workdir exists
if not os.path.isdir(self.workdir):
os.makedirs(self.workdir)
log.msg("RunProcess._startCommand")
if self.notreally:
self._addToBuffers('header', "command '%s' in dir %s" % \
(self.fake_command, self.workdir))
self._addToBuffers('header', "(not really)\n")
self.finished(None, 0)
return
self.pp = RunProcessPP(self)
self.using_comspec = False
if type(self.command) in types.StringTypes:
if runtime.platformType == 'win32':
argv = os.environ['COMSPEC'].split() # allow %COMSPEC% to have args
if '/c' not in argv: argv += ['/c']
argv += [self.command]
self.using_comspec = True
else:
# for posix, use /bin/sh. for other non-posix, well, doesn't
# hurt to try
argv = ['/bin/sh', '-c', self.command]
display = self.fake_command
else:
# On windows, CreateProcess requires an absolute path to the executable.
# When we call spawnProcess below, we pass argv[0] as the executable.
# So, for .exe's that we have absolute paths to, we can call directly
# Otherwise, we should run under COMSPEC (usually cmd.exe) to
# handle path searching, etc.
if runtime.platformType == 'win32' and not \
(self.command[0].lower().endswith(".exe") and os.path.isabs(self.command[0])):
argv = os.environ['COMSPEC'].split() # allow %COMSPEC% to have args
if '/c' not in argv: argv += ['/c']
argv += list(self.command)
self.using_comspec = True
else:
argv = self.command
# Attempt to format this for use by a shell, although the process isn't perfect
display = shell_quote(self.fake_command)
# $PWD usually indicates the current directory; spawnProcess may not
# update this value, though, so we set it explicitly here. This causes
# weird problems (bug #456) on msys, though..
if not self.environ.get('MACHTYPE', None) == 'i686-pc-msys':
self.environ['PWD'] = os.path.abspath(self.workdir)
# self.stdin is handled in RunProcessPP.connectionMade
log.msg(" " + display)
self._addToBuffers('header', display+"\n")
# then comes the secondary information
msg = " in dir %s" % (self.workdir,)
if self.timeout:
if self.timeout == 1:
unit = "sec"
else:
unit = "secs"
msg += " (timeout %d %s)" % (self.timeout, unit)
if self.maxTime:
if self.maxTime == 1:
unit = "sec"
else:
unit = "secs"
msg += " (maxTime %d %s)" % (self.maxTime, unit)
log.msg(" " + msg)
self._addToBuffers('header', msg+"\n")
msg = " watching logfiles %s" % (self.logfiles,)
log.msg(" " + msg)
self._addToBuffers('header', msg+"\n")
# then the obfuscated command array for resolving unambiguity
msg = " argv: %s" % (self.fake_command,)
log.msg(" " + msg)
self._addToBuffers('header', msg+"\n")
# then the environment, since it sometimes causes problems
if self.logEnviron:
msg = " environment:\n"
env_names = self.environ.keys()
env_names.sort()
for name in env_names:
msg += " %s=%s\n" % (name, self.environ[name])
log.msg(" environment: %s" % (self.environ,))
self._addToBuffers('header', msg)
if self.initialStdin:
msg = " writing %d bytes to stdin" % len(self.initialStdin)
log.msg(" " + msg)
self._addToBuffers('header', msg+"\n")
msg = " using PTY: %s" % bool(self.usePTY)
log.msg(" " + msg)
self._addToBuffers('header', msg+"\n")
# put data into stdin and close it, if necessary. This will be
# buffered until connectionMade is called
if self.initialStdin:
self.pp.setStdin(self.initialStdin)
self.startTime = util.now(self._reactor)
# start the process
self.process = self._spawnProcess(
self.pp, argv[0], argv,
self.environ,
self.workdir,
usePTY=self.usePTY)
# set up timeouts
if self.timeout:
self.timer = self._reactor.callLater(self.timeout, self.doTimeout)
if self.maxTime:
self.maxTimer = self._reactor.callLater(self.maxTime, self.doMaxTimeout)
for w in self.logFileWatchers:
w.start()
def _spawnProcess(self, processProtocol, executable, args=(), env={},
path=None, uid=None, gid=None, usePTY=False, childFDs=None):
"""private implementation of reactor.spawnProcess, to allow use of
L{ProcGroupProcess}"""
# use the ProcGroupProcess class, if available
if runtime.platformType == 'posix':
if self.useProcGroup and not usePTY:
return ProcGroupProcess(reactor, executable, args, env, path,
processProtocol, uid, gid, childFDs)
# fall back
if self.using_comspec:
return self._spawnAsBatch(processProtocol, executable, args, env,
path, usePTY=usePTY)
else:
return reactor.spawnProcess(processProtocol, executable, args, env,
path, usePTY=usePTY)
def _spawnAsBatch(self, processProtocol, executable, args, env,
path, usePTY):
"""A cheat that routes around the impedance mismatch between
twisted and cmd.exe with respect to escaping quotes"""
tf = NamedTemporaryFile(dir='.',suffix=".bat",delete=False)
#echo off hides this cheat from the log files.
tf.write( "@echo off\n" )
if type(self.command) in types.StringTypes:
tf.write( self.command )
else:
def maybe_escape_pipes(arg):
if arg != '|':
return arg.replace('|','^|')
else:
return '|'
cmd = [maybe_escape_pipes(arg) for arg in self.command]
tf.write( quoteArguments(cmd) )
tf.close()
argv = os.environ['COMSPEC'].split() # allow %COMSPEC% to have args
if '/c' not in argv: argv += ['/c']
argv += [tf.name]
def unlink_temp(result):
os.unlink(tf.name)
return result
self.deferred.addBoth(unlink_temp)
return reactor.spawnProcess(processProtocol, executable, argv, env,
path, usePTY=usePTY)
def _chunkForSend(self, data):
"""
limit the chunks that we send over PB to 128k, since it has a hardwired
string-size limit of 640k.
"""
LIMIT = self.CHUNK_LIMIT
for i in range(0, len(data), LIMIT):
yield data[i:i+LIMIT]
def _collapseMsg(self, msg):
"""
Take msg, which is a dictionary of lists of output chunks, and
concatentate all the chunks into a single string
"""
retval = {}
for log in msg:
data = "".join(msg[log])
if isinstance(log, tuple) and log[0] == 'log':
retval['log'] = (log[1], data)
else:
retval[log] = data
return retval
def _sendMessage(self, msg):
"""
Collapse and send msg to the master
"""
if not msg:
return
msg = self._collapseMsg(msg)
self.sendStatus(msg)
def _bufferTimeout(self):
self.buftimer = None
self._sendBuffers()
def _sendBuffers(self):
"""
Send all the content in our buffers.
"""
msg = {}
msg_size = 0
lastlog = None
logdata = []
while self.buffered:
# Grab the next bits from the buffer
logname, data = self.buffered.popleft()
# If this log is different than the last one, then we have to send
# out the message so far. This is because the message is
# transferred as a dictionary, which makes the ordering of keys
# unspecified, and makes it impossible to interleave data from
# different logs. A future enhancement could be to change the
# master to support a list of (logname, data) tuples instead of a
# dictionary.
# On our first pass through this loop lastlog is None
if lastlog is None:
lastlog = logname
elif logname != lastlog:
self._sendMessage(msg)
msg = {}
msg_size = 0
lastlog = logname
logdata = msg.setdefault(logname, [])
# Chunkify the log data to make sure we're not sending more than
# CHUNK_LIMIT at a time
for chunk in self._chunkForSend(data):
if len(chunk) == 0: continue
logdata.append(chunk)
msg_size += len(chunk)
if msg_size >= self.CHUNK_LIMIT:
# We've gone beyond the chunk limit, so send out our
# message. At worst this results in a message slightly
# larger than (2*CHUNK_LIMIT)-1
self._sendMessage(msg)
msg = {}
logdata = msg.setdefault(logname, [])
msg_size = 0
self.buflen = 0
if logdata:
self._sendMessage(msg)
if self.buftimer:
if self.buftimer.active():
self.buftimer.cancel()
self.buftimer = None
def _addToBuffers(self, logname, data):
"""
Add data to the buffer for logname
Start a timer to send the buffers if BUFFER_TIMEOUT elapses.
If adding data causes the buffer size to grow beyond BUFFER_SIZE, then
the buffers will be sent.
"""
n = len(data)
self.buflen += n
self.buffered.append((logname, data))
if self.buflen > self.BUFFER_SIZE:
self._sendBuffers()
elif not self.buftimer:
self.buftimer = self._reactor.callLater(self.BUFFER_TIMEOUT, self._bufferTimeout)
def addStdout(self, data):
if self.sendStdout:
self._addToBuffers('stdout', data)
if self.keepStdout:
self.stdout += data
if self.timer:
self.timer.reset(self.timeout)
def addStderr(self, data):
if self.sendStderr:
self._addToBuffers('stderr', data)
if self.keepStderr:
self.stderr += data
if self.timer:
self.timer.reset(self.timeout)
def addLogfile(self, name, data):
self._addToBuffers( ('log', name), data)
if self.timer:
self.timer.reset(self.timeout)
def finished(self, sig, rc):
self.elapsedTime = util.now(self._reactor) - self.startTime
log.msg("command finished with signal %s, exit code %s, elapsedTime: %0.6f" % (sig,rc,self.elapsedTime))
for w in self.logFileWatchers:
# this will send the final updates
w.stop()
self._sendBuffers()
if sig is not None:
rc = -1
if self.sendRC:
if sig is not None:
self.sendStatus(
{'header': "process killed by signal %d\n" % sig})
self.sendStatus({'rc': rc})
self.sendStatus({'header': "elapsedTime=%0.6f\n" % self.elapsedTime})
if self.timer:
self.timer.cancel()
self.timer = None
if self.maxTimer:
self.maxTimer.cancel()
self.maxTimer = None
if self.buftimer:
self.buftimer.cancel()
self.buftimer = None
d = self.deferred
self.deferred = None
if d:
d.callback(rc)
else:
log.msg("Hey, command %s finished twice" % self)
def failed(self, why):
self._sendBuffers()
log.msg("RunProcess.failed: command failed: %s" % (why,))
if self.timer:
self.timer.cancel()
self.timer = None
if self.maxTimer:
self.maxTimer.cancel()
self.maxTimer = None
if self.buftimer:
self.buftimer.cancel()
self.buftimer = None
d = self.deferred
self.deferred = None
if d:
d.errback(why)
else:
log.msg("Hey, command %s finished twice" % self)
def doTimeout(self):
self.timer = None
msg = "command timed out: %d seconds without output" % self.timeout
self.kill(msg)
def doMaxTimeout(self):
self.maxTimer = None
msg = "command timed out: %d seconds elapsed" % self.maxTime
self.kill(msg)
def kill(self, msg):
# This may be called by the timeout, or when the user has decided to
# abort this build.
self._sendBuffers()
if self.timer:
self.timer.cancel()
self.timer = None
if self.maxTimer:
self.maxTimer.cancel()
self.maxTimer = None
if self.buftimer:
self.buftimer.cancel()
self.buftimer = None
msg += ", attempting to kill"
log.msg(msg)
self.sendStatus({'header': "\n" + msg + "\n"})
# let the PP know that we are killing it, so that it can ensure that
# the exit status comes out right
self.pp.killed = True
# keep track of whether we believe we've successfully killed something
hit = 0
# try signalling the process group
if not hit and self.useProcGroup and runtime.platformType == "posix":
sig = getattr(signal, "SIG"+ self.interruptSignal, None)
if sig is None:
log.msg("signal module is missing SIG%s" % self.interruptSignal)
elif not hasattr(os, "kill"):
log.msg("os module is missing the 'kill' function")
elif self.process.pgid is None:
log.msg("self.process has no pgid")
else:
log.msg("trying to kill process group %d" %
(self.process.pgid,))
try:
os.kill(-self.process.pgid, sig)
log.msg(" signal %s sent successfully" % sig)
self.process.pgid = None
hit = 1
except OSError:
log.msg('failed to kill process group (ignored): %s' %
(sys.exc_info()[1],))
# probably no-such-process, maybe because there is no process
# group
pass
elif runtime.platformType == "win32":
if self.interruptSignal == None:
log.msg("self.interruptSignal==None, only pretending to kill child")
elif self.process.pid is not None:
log.msg("using TASKKILL /F PID /T to kill pid %s" % self.process.pid)
subprocess.check_call("TASKKILL /F /PID %s /T" % self.process.pid)
log.msg("taskkill'd pid %s" % self.process.pid)
hit = 1
# try signalling the process itself (works on Windows too, sorta)
if not hit:
try:
log.msg("trying process.signalProcess('%s')" % (self.interruptSignal,))
self.process.signalProcess(self.interruptSignal)
log.msg(" signal %s sent successfully" % (self.interruptSignal,))
hit = 1
except OSError:
log.err("from process.signalProcess:")
# could be no-such-process, because they finished very recently
pass
except error.ProcessExitedAlready:
log.msg("Process exited already - can't kill")
# the process has already exited, and likely finished() has
# been called already or will be called shortly
pass
if not hit:
log.msg("signalProcess/os.kill failed both times")
if runtime.platformType == "posix":
# we only do this under posix because the win32eventreactor
# blocks here until the process has terminated, while closing
# stderr. This is weird.
self.pp.transport.loseConnection()
if self.deferred:
# finished ought to be called momentarily. Just in case it doesn't,
# set a timer which will abandon the command.
self.timer = self._reactor.callLater(self.BACKUP_TIMEOUT,
self.doBackupTimeout)
def doBackupTimeout(self):
log.msg("we tried to kill the process, and it wouldn't die.."
" finish anyway")
self.timer = None
self.sendStatus({'header': "SIGKILL failed to kill process\n"})
if self.sendRC:
self.sendStatus({'header': "using fake rc=-1\n"})
self.sendStatus({'rc': -1})
self.failed(RuntimeError("SIGKILL failed to kill process"))
| |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import json
import os
from textwrap import dedent
from pants.backend.core.register import build_file_aliases as register_core
from pants.backend.core.targets.dependencies import Dependencies
from pants.backend.core.targets.resources import Resources
from pants.backend.jvm.register import build_file_aliases as register_jvm
from pants.backend.jvm.subsystems.scala_platform import ScalaPlatform
from pants.backend.jvm.targets.jar_dependency import JarDependency
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.jvm.targets.java_tests import JavaTests
from pants.backend.jvm.targets.jvm_app import JvmApp
from pants.backend.jvm.targets.jvm_binary import JvmBinary
from pants.backend.jvm.targets.jvm_target import JvmTarget
from pants.backend.jvm.targets.scala_library import ScalaLibrary
from pants.backend.jvm.tasks.classpath_products import ClasspathProducts
from pants.backend.project_info.tasks.export import Export
from pants.backend.python.register import build_file_aliases as register_python
from pants.backend.python.targets.python_library import PythonLibrary
from pants.base.exceptions import TaskError
from pants.base.source_root import SourceRoot
from pants_test.subsystem.subsystem_util import subsystem_instance
from pants_test.tasks.task_test_base import ConsoleTaskTestBase
class ExportTest(ConsoleTaskTestBase):
@classmethod
def task_type(cls):
return Export
@property
def alias_groups(self):
return register_core().merge(register_jvm()).merge(register_python())
def setUp(self):
super(ExportTest, self).setUp()
self.set_options_for_scope('jvm-platform',
default_platform='java6',
platforms={
'java6': {'source': '1.6', 'target': '1.6'}
})
with subsystem_instance(ScalaPlatform):
self.make_target(':scala-library',
JarLibrary,
jars=[JarDependency('org.scala-lang', 'scala-library', '2.10.5')])
self.make_target(
'project_info:first',
target_type=Dependencies,
)
jar_lib = self.make_target(
'project_info:jar_lib',
target_type=JarLibrary,
jars=[JarDependency('org.apache', 'apache-jar', '12.12.2012')],
)
self.make_target(
'java/project_info:java_lib',
target_type=JavaLibrary,
sources=['com/foo/Bar.java', 'com/foo/Baz.java'],
)
self.make_target(
'project_info:third',
target_type=ScalaLibrary,
dependencies=[jar_lib],
java_sources=['java/project_info:java_lib'],
sources=['com/foo/Bar.scala', 'com/foo/Baz.scala'],
)
self.make_target(
'project_info:globular',
target_type=ScalaLibrary,
dependencies=[jar_lib],
java_sources=['java/project_info:java_lib'],
sources=['com/foo/*.scala'],
)
self.make_target(
'project_info:jvm_app',
target_type=JvmApp,
dependencies=[jar_lib],
)
self.make_target(
'project_info:jvm_target',
target_type=ScalaLibrary,
dependencies=[jar_lib],
sources=['this/is/a/source/Foo.scala', 'this/is/a/source/Bar.scala'],
)
test_resource = self.make_target(
'project_info:test_resource',
target_type=Resources,
sources=['y_resource', 'z_resource'],
)
self.make_target(
'project_info:java_test',
target_type=JavaTests,
dependencies=[jar_lib],
sources=['this/is/a/test/source/FooTest.scala'],
resources=[test_resource.address.spec],
)
jvm_binary = self.make_target(
'project_info:jvm_binary',
target_type=JvmBinary,
dependencies=[jar_lib],
)
self.make_target(
'project_info:top_dependency',
target_type=Dependencies,
dependencies=[jvm_binary],
)
src_resource = self.make_target(
'project_info:resource',
target_type=Resources,
sources=['a_resource', 'b_resource'],
)
self.make_target(
'project_info:target_type',
target_type=ScalaLibrary,
dependencies=[jvm_binary],
resources=[src_resource.address.spec],
)
self.make_target(
'project_info:unrecognized_target_type',
target_type=JvmTarget,
)
SourceRoot.register(os.path.realpath(os.path.join(self.build_root, 'src')),
PythonLibrary)
self.add_to_build_file('src/x/BUILD', '''
python_library(name="x", sources=globs("*.py"))
'''.strip())
self.add_to_build_file('src/y/BUILD', dedent('''
python_library(name="y", sources=rglobs("*.py"))
python_library(name="y2", sources=rglobs("subdir/*.py"))
python_library(name="y3", sources=rglobs("Test*.py"))
'''))
self.add_to_build_file('src/z/BUILD', '''
python_library(name="z", sources=zglobs("**/*.py"))
'''.strip())
self.add_to_build_file('src/exclude/BUILD', '''
python_library(name="exclude", sources=globs("*.py", exclude=[['foo.py']]))
'''.strip())
def execute_export(self, *specs):
context = self.context(target_roots=[self.target(spec) for spec in specs])
context.products.safe_create_data('compile_classpath', init_func=ClasspathProducts)
return self.execute_console_task_given_context(context=context)
def execute_export_json(self, *specs):
return json.loads(''.join(self.execute_export(*specs)))
def test_source_globs_py(self):
self.set_options(globs=True)
result = self.execute_export_json('src/x')
self.assertEqual(
{'globs': ['src/x/*.py']},
result['targets']['src/x:x']['globs']
)
def test_source_globs_java(self):
self.set_options(globs=True)
result = self.execute_export_json('project_info:globular')
self.assertEqual(
{'globs' : ['project_info/com/foo/*.scala']},
result['targets']['project_info:globular']['globs']
)
def test_without_dependencies(self):
result = self.execute_export_json('project_info:first')
self.assertEqual({}, result['libraries'])
def test_version(self):
result = self.execute_export_json('project_info:first')
self.assertEqual('1.0.4', result['version'])
def test_sources(self):
self.set_options(sources=True)
result = self.execute_export_json('project_info:third')
self.assertEqual(
['project_info/com/foo/Bar.scala',
'project_info/com/foo/Baz.scala',
],
sorted(result['targets']['project_info:third']['sources'])
)
def test_with_dependencies(self):
result = self.execute_export_json('project_info:third')
self.assertEqual(
sorted([
':scala-library',
'java/project_info:java_lib',
'project_info:jar_lib'
]),
sorted(result['targets']['project_info:third']['targets'])
)
self.assertEqual(sorted(['org.scala-lang:scala-library:2.10.5',
'org.apache:apache-jar:12.12.2012']),
sorted(result['targets']['project_info:third']['libraries']))
self.assertEqual(1, len(result['targets']['project_info:third']['roots']))
source_root = result['targets']['project_info:third']['roots'][0]
self.assertEqual('com.foo', source_root['package_prefix'])
self.assertEqual(
'{0}/project_info/com/foo'.format(self.build_root),
source_root['source_root']
)
def test_jvm_app(self):
result = self.execute_export_json('project_info:jvm_app')
self.assertEqual(['org.apache:apache-jar:12.12.2012'],
result['targets']['project_info:jvm_app']['libraries'])
def test_jvm_target(self):
self.maxDiff = None
result = self.execute_export_json('project_info:jvm_target')
jvm_target = result['targets']['project_info:jvm_target']
expected_jvm_target = {
'excludes': [],
'globs': {'globs': ['project_info/this/is/a/source/Foo.scala',
'project_info/this/is/a/source/Bar.scala']},
'libraries': ['org.apache:apache-jar:12.12.2012', 'org.scala-lang:scala-library:2.10.5'],
'is_code_gen': False,
'targets': ['project_info:jar_lib', ':scala-library'],
'roots': [
{
'source_root': '{root}/project_info/this/is/a/source'.format(root=self.build_root),
'package_prefix': 'this.is.a.source'
},
],
'target_type': 'SOURCE',
'pants_target_type': 'scala_library',
'platform': 'java6',
}
self.assertEqual(jvm_target, expected_jvm_target)
def test_no_libraries(self):
self.set_options(libraries=False)
result = self.execute_export_json('project_info:java_test')
self.assertEqual([],
result['targets']['project_info:java_test']['libraries'])
def test_java_test(self):
result = self.execute_export_json('project_info:java_test')
self.assertEqual('TEST', result['targets']['project_info:java_test']['target_type'])
self.assertEqual(['org.apache:apache-jar:12.12.2012'],
result['targets']['project_info:java_test']['libraries'])
self.assertEqual('TEST_RESOURCE',
result['targets']['project_info:test_resource']['target_type'])
def test_jvm_binary(self):
result = self.execute_export_json('project_info:jvm_binary')
self.assertEqual(['org.apache:apache-jar:12.12.2012'],
result['targets']['project_info:jvm_binary']['libraries'])
def test_top_dependency(self):
result = self.execute_export_json('project_info:top_dependency')
self.assertEqual([], result['targets']['project_info:top_dependency']['libraries'])
self.assertEqual(['project_info:jvm_binary'],
result['targets']['project_info:top_dependency']['targets'])
def test_format_flag(self):
self.set_options(formatted=False)
result = self.execute_export('project_info:third')
# confirms only one line of output, which is what -format should produce
self.assertEqual(1, len(result))
def test_target_types(self):
result = self.execute_export_json('project_info:target_type')
self.assertEqual('SOURCE',
result['targets']['project_info:target_type']['target_type'])
self.assertEqual('RESOURCE', result['targets']['project_info:resource']['target_type'])
def test_target_platform(self):
result = self.execute_export_json('project_info:target_type')
self.assertEqual('java6',
result['targets']['project_info:target_type']['platform'])
def test_output_file(self):
outfile = os.path.join(self.build_root, '.pants.d', 'test')
self.set_options(output_file=outfile)
self.execute_export('project_info:target_type')
self.assertTrue(os.path.exists(outfile))
def test_output_file_error(self):
self.set_options(output_file=self.build_root)
with self.assertRaises(TaskError):
self.execute_export('project_info:target_type')
def test_unrecognized_target_type(self):
with self.assertRaises(TaskError):
self.execute_export('project_info:unrecognized_target_type')
def test_source_exclude(self):
self.set_options(globs=True)
result = self.execute_export_json('src/exclude')
self.assertEqual(
{'globs': ['src/exclude/*.py'],
'exclude': [{
'globs': ['src/exclude/foo.py']
}],
},
result['targets']['src/exclude:exclude']['globs']
)
def test_source_rglobs(self):
self.set_options(globs=True)
result = self.execute_export_json('src/y')
self.assertEqual(
{'globs': ['src/y/**/*.py', 'src/y/*.py']},
result['targets']['src/y:y']['globs']
)
def test_source_rglobs_subdir(self):
self.set_options(globs=True)
result = self.execute_export_json('src/y:y2')
self.assertEqual(
{'globs': ['src/y/subdir/**/*.py', 'src/y/subdir/*.py']},
result['targets']['src/y:y2']['globs']
)
def test_source_rglobs_noninitial(self):
self.set_options(globs=True)
result = self.execute_export_json('src/y:y3')
self.assertEqual(
{'globs': ['src/y/Test*.py']},
result['targets']['src/y:y3']['globs']
)
def test_source_zglobs(self):
self.set_options(globs=True)
result = self.execute_export_json('src/z')
self.assertEqual(
{'globs': ['src/z/**/*.py']},
result['targets']['src/z:z']['globs']
)
| |
# Copyright 2013-present Barefoot Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Extract control flow and parse graphs to DOT graph descriptions and generate
PNGs of them
"""
import p4_hlir.hlir.p4 as p4
import os
import subprocess
import argparse
import dependency_graph
def get_call_name (node, exit_node=None):
if node:
return node.name
else:
return exit_node
def dump_table(node, exit_node, visited=None):
# TODO: careful about tables with names with reserved DOT keywords
p = ""
if visited==None:
visited = set([node])
else:
visited.add(node)
if type(node) is p4.p4_table:
p += " %s [shape=ellipse];\n" % node.name
elif type(node) is p4.p4_conditional_node:
p += " %s [shape=box label=\"%s\"];\n" % (get_call_name(node), str(node.condition))
for label, next_node in node.next_.items():
if type(node) is p4.p4_table:
arrowhead = "normal"
if type(label) is str:
label_str = " label=\"%s\"" % label
else:
label_str = " label=\"%s\"" % label.name
elif type(node) is p4.p4_conditional_node:
label_str = ""
if label:
arrowhead = "dot"
else:
arrowhead = "odot"
p += " %s -> %s [arrowhead=%s%s];\n" % (get_call_name(node),
get_call_name(next_node, exit_node),
arrowhead, label_str)
if next_node and next_node not in visited:
p += dump_table(next_node, exit_node, visited)
if len(node.next_) == 0:
p += " %s -> %s;\n" % (node.name, exit_node)
return p
def dump_parser(node, visited=None):
if not visited:
visited = set()
visited.add(node.name)
p = ""
p += " %s [shape=record label=\"{" % node.name
p += node.name
if node.branch_on:
p += " | {"
for elem in node.branch_on:
elem_name = str(elem).replace("instances.","")
if type(elem) is tuple:
elem_name = "current"+elem_name
p += elem_name + " | "
p = p[0:-3]
p+="}"
p += "}\"];\n"
for case, target in node.branch_to.items():
label = ""
if type(case) is not list:
case = [case]
for caseval in case:
if type(caseval) is int or type(caseval) is long:
label += hex(caseval) + ", "
elif caseval == p4.P4_DEFAULT:
label += "default, "
elif type(caseval) == p4.p4_parse_value_set:
label += "set("+caseval.name+"), "
label = label[0:-2]
dst_name = target.name
if type(target) is p4.p4_table:
dst_name = "__table_"+dst_name
p += " %s -> %s [label=\"%s\"];\n" % (node.name, dst_name, label)
for _, target in node.branch_to.items():
if type(target) is p4.p4_parse_state and target.name not in visited:
p += dump_parser(target, visited)
return p
def generate_graph_png(dot, out):
with open(out, 'w') as pngf:
subprocess.check_call(["dot", "-Tpng", dot], stdout = pngf)
def generate_graph_eps(dot, out):
with open(out, 'w') as epsf:
subprocess.check_call(["dot", "-Teps", dot], stdout = epsf)
def export_parse_graph(hlir, filebase, gen_dir):
program_str = "digraph g {\n"
program_str += " wire [shape=doublecircle];\n"
for entry_point in hlir.p4_ingress_ptr:
program_str += " %s [label=%s shape=doublecircle];\n" % ("__table_"+entry_point.name, entry_point.name)
sub_str = dump_parser(hlir.p4_parse_states["start"])
program_str += " wire -> start\n"
program_str += sub_str
program_str += "}\n"
filename_dot = os.path.join(gen_dir, filebase + ".parser.dot")
with open(filename_dot, "w") as dotf:
dotf.write(program_str)
filename_png = os.path.join(gen_dir, filebase + ".parser.png")
filename_eps = os.path.join(gen_dir, filebase + ".parser.eps")
try:
generate_graph_png(filename_dot, filename_png)
except:
print 'Generating eps'
generate_graph_eps(filename_dot, filename_eps)
def export_table_graph(hlir, filebase, gen_dir, predecessors=False):
program_str = "digraph g {\n"
program_str += " buffer [shape=doublecircle];\n"
program_str += " egress [shape=doublecircle];\n"
for entry_point, invokers in hlir.p4_ingress_ptr.items():
if predecessors:
for invoker in invokers:
program_str += " %s [label=%s shape=doublecircle];\n" % ("__parser_"+invoker.name, invoker.name)
program_str += " %s -> %s\n" % ("__parser_"+invoker.name, get_call_name(entry_point))
program_str += dump_table(entry_point, "buffer")
if hlir.p4_egress_ptr:
program_str += " buffer -> %s\n" % get_call_name(hlir.p4_egress_ptr)
program_str += dump_table(hlir.p4_egress_ptr, "egress")
else:
program_str += " buffer -> egress [arrowhead=normal]\n"
program_str += "}\n"
filename_dot = os.path.join(gen_dir, filebase + ".tables.dot")
with open(filename_dot, "w") as dotf:
dotf.write(program_str)
filename_png = os.path.join(gen_dir, filebase + ".tables.png")
filename_eps = os.path.join(gen_dir, filebase + ".tables.eps")
try:
generate_graph_png(filename_dot, filename_png)
except:
print 'Generating eps'
generate_graph_eps(filename_dot, filename_eps)
def export_table_dependency_graph(hlir, filebase, gen_dir, show_conds = False):
print
print "TABLE DEPENDENCIES..."
print
print "INGRESS PIPELINE"
filename_dot = os.path.join(gen_dir, filebase + ".ingress.tables_dep.dot")
graph = dependency_graph.build_table_graph_ingress(hlir)
min_stages = graph.count_min_stages(show_conds = show_conds)
print "pipeline ingress requires at least", min_stages, "stages"
with open(filename_dot, 'w') as dotf:
graph.generate_dot(out = dotf)
filename_png = os.path.join(gen_dir, filebase + ".ingress.tables_dep.png")
filename_eps = os.path.join(gen_dir, filebase + ".ingress.tables_dep.eps")
try:
generate_graph_png(filename_dot, filename_png)
except:
print 'Generating eps'
generate_graph_eps(filename_dot, filename_eps)
print
print "EGRESS PIPELINE"
if hlir.p4_egress_ptr:
filename_dot = os.path.join(gen_dir, filebase + ".egress.tables_dep.dot")
graph = dependency_graph.build_table_graph_egress(hlir)
min_stages = graph.count_min_stages(show_conds = show_conds)
print "pipeline egress requires at least", min_stages, "stages"
with open(filename_dot, 'w') as dotf:
graph.generate_dot(out = dotf)
filename_png = os.path.join(gen_dir, filebase + ".egress.tables_dep.png")
filename_eps = os.path.join(gen_dir, filebase + ".egress.tables_dep.eps")
try:
generate_graph_png(filename_dot, filename_png)
except:
print 'Generating eps'
generate_graph_eps(filename_dot, filename_eps)
else:
print "Egress pipeline is empty"
print
| |
# Copyright 2014 Mirantis Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Thread-safe connection pool for python-memcached."""
# NOTE(yorik-sar): this file is copied between keystone and keystonemiddleware
# and should be kept in sync until we can use external library for this.
import collections
import contextlib
import itertools
import logging
import threading
import time
import memcache
from oslo_log import log
from six.moves import queue
from keystone import exception
from keystone.i18n import _
LOG = log.getLogger(__name__)
# This 'class' is taken from http://stackoverflow.com/a/22520633/238308
# Don't inherit client from threading.local so that we can reuse clients in
# different threads
_MemcacheClient = type('_MemcacheClient', (object,),
dict(memcache.Client.__dict__))
_PoolItem = collections.namedtuple('_PoolItem', ['ttl', 'connection'])
class ConnectionPool(queue.Queue):
"""Base connection pool class
This class implements the basic connection pool logic as an abstract base
class.
"""
def __init__(self, maxsize, unused_timeout, conn_get_timeout=None):
"""Initialize the connection pool.
:param maxsize: maximum number of client connections for the pool
:type maxsize: int
:param unused_timeout: idle time to live for unused clients (in
seconds). If a client connection object has been
in the pool and idle for longer than the
unused_timeout, it will be reaped. This is to
ensure resources are released as utilization
goes down.
:type unused_timeout: int
:param conn_get_timeout: maximum time in seconds to wait for a
connection. If set to `None` timeout is
indefinite.
:type conn_get_timeout: int
"""
queue.Queue.__init__(self, maxsize)
self._unused_timeout = unused_timeout
self._connection_get_timeout = conn_get_timeout
self._acquired = 0
def _create_connection(self):
"""Returns a connection instance.
This is called when the pool needs another instance created.
:returns: a new connection instance
"""
raise NotImplementedError
def _destroy_connection(self, conn):
"""Destroy and cleanup a connection instance.
This is called when the pool wishes to get rid of an existing
connection. This is the opportunity for a subclass to free up
resources and cleaup after itself.
:param conn: the connection object to destroy
"""
raise NotImplementedError
def _debug_logger(self, msg, *args, **kwargs):
if LOG.isEnabledFor(logging.DEBUG):
thread_id = threading.current_thread().ident
args = (id(self), thread_id) + args
prefix = 'Memcached pool %s, thread %s: '
LOG.debug(prefix + msg, *args, **kwargs)
@contextlib.contextmanager
def acquire(self):
self._debug_logger('Acquiring connection')
try:
conn = self.get(timeout=self._connection_get_timeout)
except queue.Empty:
raise exception.UnexpectedError(
_('Unable to get a connection from pool id %(id)s after '
'%(seconds)s seconds.') %
{'id': id(self), 'seconds': self._connection_get_timeout})
self._debug_logger('Acquired connection %s', id(conn))
try:
yield conn
finally:
self._debug_logger('Releasing connection %s', id(conn))
self._drop_expired_connections()
try:
super(ConnectionPool, self).put(conn, block=False)
except queue.Full:
self._debug_logger('Reaping exceeding connection %s', id(conn))
self._destroy_connection(conn)
def _qsize(self):
if self.maxsize:
return self.maxsize - self._acquired
else:
# A value indicating there is always a free connection
# if maxsize is None or 0
return 1
# NOTE(dstanek): stdlib and eventlet Queue implementations
# have different names for the qsize method. This ensures
# that we override both of them.
if not hasattr(queue.Queue, '_qsize'):
qsize = _qsize
def _get(self):
if self.queue:
conn = self.queue.pop().connection
else:
conn = self._create_connection()
self._acquired += 1
return conn
def _drop_expired_connections(self):
"""Drop all expired connections from the right end of the queue."""
now = time.time()
while self.queue and self.queue[0].ttl < now:
conn = self.queue.popleft().connection
self._debug_logger('Reaping connection %s', id(conn))
self._destroy_connection(conn)
def _put(self, conn):
self.queue.append(_PoolItem(
ttl=time.time() + self._unused_timeout,
connection=conn,
))
self._acquired -= 1
class MemcacheClientPool(ConnectionPool):
def __init__(self, urls, arguments, **kwargs):
ConnectionPool.__init__(self, **kwargs)
self.urls = urls
self._arguments = arguments
# NOTE(morganfainberg): The host objects expect an int for the
# deaduntil value. Initialize this at 0 for each host with 0 indicating
# the host is not dead.
self._hosts_deaduntil = [0] * len(urls)
def _create_connection(self):
return _MemcacheClient(self.urls, **self._arguments)
def _destroy_connection(self, conn):
conn.disconnect_all()
def _get(self):
conn = ConnectionPool._get(self)
try:
# Propagate host state known to us to this client's list
now = time.time()
for deaduntil, host in zip(self._hosts_deaduntil, conn.servers):
if deaduntil > now and host.deaduntil <= now:
host.mark_dead('propagating death mark from the pool')
host.deaduntil = deaduntil
except Exception:
# We need to be sure that connection doesn't leak from the pool.
# This code runs before we enter context manager's try-finally
# block, so we need to explicitly release it here
ConnectionPool._put(self, conn)
raise
return conn
def _put(self, conn):
try:
# If this client found that one of the hosts is dead, mark it as
# such in our internal list
now = time.time()
for i, host in zip(itertools.count(), conn.servers):
deaduntil = self._hosts_deaduntil[i]
# Do nothing if we already know this host is dead
if deaduntil <= now:
if host.deaduntil > now:
self._hosts_deaduntil[i] = host.deaduntil
self._debug_logger(
'Marked host %s dead until %s',
self.urls[i], host.deaduntil)
else:
self._hosts_deaduntil[i] = 0
# If all hosts are dead we should forget that they're dead. This
# way we won't get completely shut off until dead_retry seconds
# pass, but will be checking servers as frequent as we can (over
# way smaller socket_timeout)
if all(deaduntil > now for deaduntil in self._hosts_deaduntil):
self._debug_logger('All hosts are dead. Marking them as live.')
self._hosts_deaduntil[:] = [0] * len(self._hosts_deaduntil)
finally:
ConnectionPool._put(self, conn)
| |
#coding: utf-8
"""For Drupal site deployment and management"""
from __future__ import print_function
from bs4 import BeautifulSoup as Soup
try:
from http.cookiejar import CookieJar
except ImportError:
from cookielib import CookieJar
from os import remove as rm
from os.path import isdir, join as path_join, realpath, basename
from pipes import quote as shell_quote
from sh import tar, unzip
from shlex import split as shell_split
from shutil import copy2 as copy_file, rmtree as rmdir_force
import json
import os
import re
import subprocess as sp
try:
from urllib.parse import urlencode
from urllib.request import build_opener, HTTPCookieProcessor
except ImportError:
from urllib import urlencode
from urllib2 import build_opener, HTTPCookieProcessor
from osext.filesystem import sync as dir_sync, isfile
from osext.pushdcontext import pushd
import httpext as http
import langutil.php as php
CKEDITOR_URI = 'http://download.cksource.com/CKEditor/CKEditor/' + \
'CKEditor%203.6.6.1/ckeditor_3.6.6.1.tar.gz'
FANCYBOX_URI = 'https://github.com/fancyapps/fancyBox/zipball/v2.1.5'
JQ_COLOR_PICKER_URI = 'http://www.eyecon.ro/colorpicker/colorpicker.zip'
JQ_CYCLE_URI = 'http://malsup.github.com/jquery.cycle.all.js'
PREDIS_URI = 'https://github.com/nrk/predis/zipball/v0.8.4'
SIMPLEPIE_URI = 'http://simplepie.org/downloads/simplepie_1.3.1.compiled.php'
def _install_ckeditor(stdout=None):
"""Callback to install necessary library for the IMCE module"""
arg = 'xf'
if stdout:
arg += 'v'
http.dl(CKEDITOR_URI, 'ckeditor.tar.gz')
output = tar('xf', 'ckeditor.tar.gz')
if stdout and output:
stdout.write(str(output).strip() + '\n')
rm('ckeditor.tar.gz')
def _install_jquery_colorpicker(stdout=None):
"""Callback to install necessary library for the module"""
os.makedirs('./colorpicker')
http.dl(JQ_COLOR_PICKER_URI, './colorpicker/colorpicker.zip')
with pushd('./colorpicker'):
output = unzip('colorpicker.zip')
if stdout and output:
stdout.write(str(output).strip() + '\n')
rm('colorpicker.zip')
def _install_fancybox(stdout=None):
"""Callback to install necessary library for the Fancybox module"""
http.dl(FANCYBOX_URI, 'fancybox.zip')
output = unzip('fancybox.zip')
if stdout and output:
stdout.write(str(output).strip() + '\n')
os.rename('fancyapps-fancyBox-18d1712', 'fancybox')
rm('fancybox.zip')
def _install_jquery_cycle(stdout=None):
"""Callback to install necessary library for the Views Cycle module"""
os.makedirs('./jquery.cycle')
http.dl(JQ_CYCLE_URI, './jquery.cycle/jquery.cycle.all.js')
def _install_predis(stdout=None):
"""Callback to install Predis for the Redis module"""
http.dl(PREDIS_URI, 'predis.zip')
output = unzip('predis.zip')
if stdout and output:
stdout.write(str(output).strip() + '\n')
os.rename('nrk-predis-d02e2e1', 'predis')
rm('predis.zip')
def _install_simplepie(stdout=None):
"""Callback to install necessary file for the Simplepie module"""
target = realpath(path_join(os.getcwd(), '..', 'modules', 'contrib',
'feeds', 'libraries',
'simplepie.compiled.php'))
http.dl(SIMPLEPIE_URI, target)
# All callbacks must have stdout=None as the signature
_lib_hooks = {
'ckeditor': _install_ckeditor,
'colorpicker': _install_jquery_colorpicker,
'fancybox': _install_fancybox,
'jquery.cycle': _install_jquery_cycle,
'predis': _install_predis,
'simplepie': _install_simplepie,
}
class DrushError(Exception):
pass
class DrupalError(Exception):
pass
class Drush:
"""Interface to Drush from Python"""
_path = None
_verbose = False
_stdout = None
_uris = []
_cookie_processor = None
def __init__(self, path, verbose=False, stdout=None):
"""
Arguments:
path -- Path to target Drupal installation. Does not have to exist as
init_dir() can be used to initialise
Keyword Arguments:
verbose -- If verbose mode should be used with the drush command.
stdout -- Where standard output should be written to. None for
current terminal.
"""
self._path = realpath(path)
self._verbose = verbose
self._stdout = stdout
if isdir(self._path) and isdir(path_join(self._path, 'sites')):
with pushd(self._path):
site_dirs = [x
for x in os.listdir('sites')
if isdir(os.path.join('sites', x)) and
x not in ['all', 'default']]
for item in site_dirs:
self._uris.append('http://%s' % (item))
self._uris.sort()
def command(self, string_as_is, ignore_errors=False, once=False):
"""Runs a drush command string. If the class is not in verbose mode,
-q argument will be added
ignore_errors may want to be used for commands that exit with
non-zero status but are not always errors (like reverting a view
that may not exist)
command('en -y module_name')
command('views-revert my_nonexisting_view', ignore_errors=True)
"""
with pushd(self._path):
split = shell_split(string_as_is)
command_line = ['drush']
if not self._verbose:
command_line.append('-q')
command_line.extend(split)
if self._verbose and self._stdout:
self._stdout.write(' '.join(command_line) + '\n')
try:
sp.check_call(command_line, stdout=self._stdout)
except sp.CalledProcessError as e:
if not ignore_errors:
raise e
if once:
return
for uri in set(self._uris):
if re.match(r'^https?\://default$', uri):
continue
command_line = ['drush', '--uri=%s' % (uri)]
if not self._verbose:
command_line.append('-q')
command_line.extend(split)
if self._verbose and self._stdout:
self._stdout.write(' '.join(command_line) + '\n')
try:
sp.check_call(command_line, stdout=self._stdout)
except sp.CalledProcessError as e:
if not ignore_errors:
raise e
def command_output(self, string_as_is, once=False):
"""Like command() but returns the output, in a list format:
[(site_name, stripped_data)]"""
with pushd(self._path):
split = shell_split(string_as_is)
command_line = ['drush']
ret = []
if not self._verbose:
command_line.append('-q')
command_line.extend(split)
if self._verbose and self._stdout:
self._stdout.write(' '.join(command_line) + '\n')
ret.append(('default',
sp.Popen(command_line, stdout=sp.PIPE).communicate()[0].strip(),))
if once:
return ret
for uri in set(self._uris):
if re.match(r'^https?\://default$', uri):
continue
command_line = ['drush', '--uri=%s' % (uri)]
if not self._verbose:
command_line.append('-q')
command_line.extend(split)
if self._verbose and self._stdout:
self._stdout.write(' '.join(command_line) + '\n')
ret.append((uri,
sp.Popen(command_line, stdout=sp.PIPE).communicate()[0].strip(),))
return ret
def add_uri(self, uri):
if uri in self._uris:
return
self._uris.append(uri)
def init_dir(self, major_version=7, minor_version=25, cache=True):
"""Initialises a Drupal root with a version specified.
Kwargs:
``major_version`` (int): major version of Drupal\n
``minor_version`` (int): minor version of Drupal\n
cache (bool): if Drush's cache should be used
"""
with pushd(realpath(path_join(self._path, '..'))):
module_name = 'drupal-%d.%s' % (major_version, minor_version)
dir_name = module_name
if minor_version == 'x':
dir_name += '-dev'
if isdir(dir_name):
rmdir_force(dir_name)
command_line = ['drush', 'dl', '-y']
if cache:
command_line.append('--cache')
if not self._verbose:
command_line.append('-q')
command_line.append(module_name)
sp.check_call(command_line)
if not isdir(dir_name):
raise Exception('Failed to download Drupal 7.x correctly')
os.rename(dir_name, self._path)
with pushd(self._path):
os.makedirs('./sites/all/modules/contrib')
os.makedirs('./sites/all/themes/contrib')
os.makedirs(path_join(self._path, 'sites', 'default', 'files',
'tmp'),
504) # 0770, or 0o770 in Python 3
def create_libraries_dir(self):
"""Creates the sites/all/libraries directory. Root path must exist."""
path = path_join(self._path, 'sites', 'all', 'libraries')
if isdir(path):
return
os.makedirs(path_join(self._path, 'sites', 'all', 'libraries'))
def _handle_dl(self, command_line, ignore_errors=False):
try:
sp.check_call(command_line)
except sp.CalledProcessError as e:
# Most of the time this is caused by a bad checksum
if ignore_errors:
pass
raise DrushError('Non-zero status %d. Run in verbose mode and'
'check output for [error] line' %
(e.returncode))
def dl(self, module_names, cache=True, ignore_errors=False):
"""Downloads modules.
Arguments:
module_names -- str or list, a module name or a list of module names
Keyword Arguments:
cache -- boolean, if Drush's cache should be used
ignore_errors -- boolean, ignore hash errors
Raises DrushError if an error occurs when downloading, unless
ignore_errors is True.
"""
if type(module_names) is str:
module_names = shell_split(module_names)
command_line = ['drush', 'dl', '-y']
if cache:
command_line.append('--cache')
if not self._verbose:
command_line.append('-q')
command_line.extend(module_names)
dir_exceptions = [
'registry_rebuild',
]
if len(module_names) == 1 and \
module_names[0].lower() in dir_exceptions:
return self._handle_dl(command_line, ignore_errors)
with pushd(self._path):
self._handle_dl(command_line, ignore_errors)
def rr(self):
"""Rebuild registry front-end method."""
return self.command('rr')
def cc(self, which='all'):
"""Cache-clear front-end method.
:param which: Type of cache to clear.
:type which: ``str``.
:returns: ``int`` -- the return code.
"""
return self.command('cc %s' % (which))
def vset(self, variable_name, value):
"""Variable set front-end method. If the type of the value is str,
then --format=string will be used. Otherwise the value will be
converted to json and --format=json will be used.
Arguments:
variable_name -- str, variable name to set
value -- Any type that is JSON-encodable or str.
"""
if type(value) is str:
format = 'string'
else:
format = 'json'
value = json.dumps(value)
args = (format, shell_quote(variable_name), shell_quote(value))
return self.command('vset --exact -y --format=%s %s %s' % args)
def vset_many(self, dict_of_vars, mysql_connection):
"""Set many variables at once using a MySQL connection object and a
dictionary of values. Use this instead of vset() when many values
need to be set quickly.
Arguments:
dict_of_vars -- dict, dictionary of variables to values to set
mysql_connection -- MySQLdb.connection, open connection to MySQL to
correct Drupal database
"""
c = mysql_connection.cursor()
for (key, value) in dict_of_vars.items():
value = php.serialize(value)
c.execute('INSERT INTO variable (name, value) VALUES '
'(%s, %s) ON DUPLICATE KEY UPDATE value=%s',
args=(key, value, value,))
mysql_connection.commit()
def updb(self):
"""Update database front-end method. Use with caution."""
return self.command('updb -y')
def en(self, module_name):
"""Enable module front-end method.
Arguments:
module_name -- str, system module name
"""
return self.command('en -y %s' % (module_name))
def dis(self, module_name):
"""Disable module front-end method.
Arguments:
module_name -- str, system module name
"""
self.command('dis -y %s' % (module_name))
def install_lib(self, library_name, stdout=None):
"""Installs a library into sites/all/libraries (usually).
Arguments:
library_name -- library name, must be registerd as a hook in
_lib_hooks. This name is also the same as the directory name in
sites/all/libraries.
Keyword Arguments:
stdout -- None or file handle
"""
self.create_libraries_dir()
with pushd(path_join(self._path, 'sites', 'all', 'libraries')):
# library_name is also supposed to be the directory target
if isdir(library_name):
rmdir_force(library_name)
_lib_hooks[library_name](stdout=stdout)
def fix_registry_table(self,
connection,
search='sites/all/modules',
replace='sites/all/modules/contrib',
like='sites/all/modules/%',
not_like='%/contrib%'):
"""Not intended for general use, but called with default arguments this
method fixes an older database that may have modules' registered
files with paths at sites/all/modules instead of the more proper
path sites/all/modules/contrib.
Arguments:
connection -- MySQLdb connection object
Keyword Arguments:
search -- str, path to search for
replace -- str, replacement string
like -- str, filter
"""
c = connection.cursor()
args = (
search,
replace,
like,
not_like,
)
c.execute('UPDATE registry SET filename = REPLACE(filename, %s, %s)'
'WHERE filename LIKE %s AND filename NOT LIKE %s', args=args)
connection.commit()
c.close()
def sync_assets(self, remote_path, domain='default', local_domain=None):
"""Sync assets from a remote or local directory. This is intended to
sync sites/all/default/files or sites/all/somedomain.com/files
directories.
Arguments:
remote_path -- str, remote path in SSH format or a local path
domain -- directory in sites to write to
local_domain -- ???
"""
if local_domain:
domain_path = path_join(self._path, 'sites', local_domain, 'files')
else:
domain_path = path_join(self._path, 'sites', domain, 'files')
if not isdir(domain_path):
os.makedirs(domain_path)
os.makedirs(path_join(domain_path, 'tmp'))
with pushd(domain_path):
dir_sync(remote_path, domain_path)
def install_favicon(self, favicon_path):
"""Installs favicon into the root and at /misc/favicon.ico
Arguments:
favicon_path - Path to favicon.
"""
favicon_filename = basename(favicon_path)
paths = [
path_join(self._path, favicon_filename),
path_join(self._path, 'misc', favicon_filename),
]
for target_path in paths:
if isfile(target_path):
os.remove(target_path)
copy_file(favicon_path, target_path)
def remove_extraneous_files(self):
"""Mainly for production. Removes junk files from the root. For the
most part this should not be necessary as a server configuration
on production should block access to *.txt, *.config by default
anyway.
"""
extra_files = [
'CHANGELOG.txt',
'COPYRIGHT.txt',
'INSTALL.txt',
'INSTALL.mysql.txt',
'INSTALL.pgsql.txt',
'INSTALL.sqlite.txt',
'LICENSE.txt',
'MAINTAINERS.txt',
'README.txt',
'UPGRADE.txt',
'web.config',
]
for file_name in extra_files:
path = path_join(self._path, file_name)
os.remove(path)
#def _init_browser():
#if self._cookie_processor:
#return
#cookie_jar = CookieJar()
#self._cookie_processor = HTTPCookieProcessor(cookie_jar)
#def get_urlopener(self):
#self._init_browser()
#return build_opener(self._cookie_processor)
#def _get_session(self,
#username,
#password,
#protocol='http',
#add_headers=None):
#opener = self.get_urlopener()
#opener.add_headers = headers
## Login data
#data = [
#('name', username),
#('pass', password),
#('op', 'Log in'),
#]
## Get CSRF
#response = opener.open('%s://%s/user' % (protocol, host))
#soup = Soup(response.read(), 'html5lib')
#hiddens = soup.body.sellect('#user-login input[type="hidden"]')
#for field in hiddens:
#data.append((field['name'], field['value']))
## Perform login
#data = urlencode(data).encode('utf-8')
#response = opener.open('%s://%s/user', data=data)
#soup = Soup(response.read(), 'html5lib')
#try:
#if soup.body.select('#page-title')[0].contents[0] != username:
#raise DrupalError('Failed to log in')
#except IndexError:
#raise DrupalError('Failed to log in')
#def post_form(self,
#username,
#password,
#path,
#data=None,
#uris=None,
#add_headers=None):
#self._get_session(add_headers=add_headers)
def is_production(info_file='/etc/node_type'):
"""Based on DRUPAL_ENV environment variable, determines if the server is in
production mode.
Also can read the first line from a specified file to determine if the
string (lower-cased) == 'prod'.
Arguments:
info_file -- File to read from for environment type. For this function to
return ``True``, the file's first line must say ``prod`` exactly. If the
file cannot be opened, the environment variable ``DRUPAL_ENV`` will be
checked for.
"""
try:
with open('/etc/node_type') as f:
for line in f.readlines():
return line.strip().lower() == 'prod'
except IOError:
pass
try:
return os.environ['DRUPAL_ENV'] == 'prod'
except KeyError:
pass
return False
def generate_settings_files(data):
"""Generates settings.php files
Arguments:
data -- Configuration hash. Each key in the hash is a domain. So there
should be at least the 'default' key. From there are three
hashes: databases, conf, ini_set.
Example:
data = {
'default': {
'databases': {
'default': {
'database': 'my database',
...
},
},
'conf': {
# Anything that goes in the $conf array
'404_fast_paths_exclude': '/\/(?:styles)\//',
'blocked_ips': [list of IPs],
},
# Do note any scalar value (non-array, non-object) can be used here
# and it will become a global variable as in this one:
# $drupal_hash_salt = 'salt string';
'drupal_hash_salt': 'salt string',
'ini_set': { # Any ini_set() directives
'session.gc_probability': 1,
}
}
}
Returns a list of tuples: (path (str), data (PHP code, str))
"""
if not 'default' in data:
raise DrupalError('"default" key must exist')
ret = []
for site_name, settings in data.items():
php_code = ''
for key in ('databases', 'conf',):
php_code += '$%s = %s\n' % (key, php.generate_array(settings[key]))
for ini_name, ini_setting in settings['ini_set'].items():
php_code += 'ini_set(%s, %s);\n' % (
php.generate_scalar(ini_name),
php.generate_scalar(ini_setting))
for key, value in settings.items():
if key in ('databases', 'conf', 'ini_set'):
continue
php_code += '$%s = %s;\n' % (key, php.generate_scalar(value))
file_name = path_join('sites', site_name, 'settings.php')
ret.append([file_name, php_code.strip()])
return ret
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.