gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
"""
In this file we define the "Right Hand Side" of the dynamics for the
Magneplane concept to be used by the pointer framework, and create an
OpenMDAO problem to solve the optimal control problem using Pointer.
"""
import numpy as np
from openmdao.core.component import Component
try:
from openmdao.drivers.pyoptsparse_driver import pyOptSparseDriver
except ImportError:
pyOptSparseDriver = None
from openmdao.drivers.scipy_optimizer import ScipyOptimizer
from pointer.components import Problem, Trajectory, RHS, \
EOMComp, CollocationPhase
class MagneplaneEOM(EOMComp):
""" The equations of motion for the Magneplane concept.
In this implementation we assume a Magneplane vehicle is constrained
to move tangentially to some 'track' defined in cartesian 3-space.
We assume that this cartesian 3-space is inertial (the rotation of the
Earth is ignored). The inertial frame coordinates are specified in a
'North-East-Down' (NED) coordinate frame $\hat{e}$ such that:
$\hat{e}_0$ (North) lies tangential to the Earth surface and points North
$\hat{e}_1$ (East) lies tangential to the Earth surface and points East
$\hat{e}_2$ (Down) Is perpendicular to the Earth surface is nadir-pointing
Instead of using latitude/longitude we will assume some 'km per deg' of
latitude and longitude and specify the track path using an x-y set in the
NED frame. The altitude of the track will be specified ground level at a
given position. Using digital elevation maps from the USGS, we will then
be able to determine the entire path of the tube/track in 3-space.
The motion the pod along the track is constrained to be tangential with
the track. The simplified 3DOF equations of motion for the pod are:
..math::
\frac{dv}{dt} = -g \cdot \sin \theta + \frac{T - D}{m}
\frac{dx}{dt} = v \cdot \cos \theta \cos \psi
\frac{dy}{dt} = v \cdot \cos \theta \cos \psi
\frac{dz}{dt} = -v \cdot \sin \psi
Here $\psi$, $\theta$ and $\phi$ are the 3-2-1 Euler angle set used
to define the pod axial frame in the NED frame.
$\psi$ (azimuth angle) is a rotation about $\hat{e}_2$ that rotates the pod
x-axis to the proper heading.
$\theta$ (elevation angle) is a rotation about $\hat{e}'_1$ that rotates
the pod x-axis to the proper elevation.
$\phi$ (roll angle) is a rotation about $\hat{e}''_1$ that rotates the pod
within the tube.
In general the roll angle $\phi$ will typically be zero or small. It
may be necessary to bank the pod in turns to achieve a comfortable g-loading
on passengers.
Note that in the presence of no thrust or drag/friction, these equations
essentially describe the Brachistochrone problem. We can therefore use
the known analytic solution to these brachistochrone problem as a
test of these equations in the absence of thrust and drag on the pod.
"""
def __init__(self, grid_data):
super(MagneplaneEOM, self).__init__(grid_data, time_units='s')
self.deriv_options['type'] = 'user'
self.add_param('x',
desc='north component of position',
units='m',
eom_state=True)
self.add_param('y',
desc='east component of position',
units='m',
eom_state=True)
self.add_param('z',
desc='down component of position',
units='m',
eom_state=True)
self.add_param('v', desc='pod velocity', units='m/s', eom_state=True)
self.add_param('g', desc='gravitational acceleration', units='m/s/s')
self.add_param('psi', desc='azimuth angle', units='rad')
self.add_param('theta', desc='elevation angle', units='rad')
#self.add_param('phi',desc='roll angle',units='rad')
self.add_param('T', desc='thrust force', units='N')
self.add_param('D', desc='drag/friction force', units='N')
self.add_param('mass', desc='pod mass', units='kg')
# Jacobian
self._J = {}
# Partials of dx/dt
self._J['dXdt:x', 'v'] = np.eye(self.num_nodes)
self._J['dXdt:x', 'theta'] = np.eye(self.num_nodes)
self._J['dXdt:x', 'psi'] = np.eye(self.num_nodes)
# Partials of dy/dt
self._J['dXdt:y', 'v'] = np.eye(self.num_nodes)
self._J['dXdt:y', 'theta'] = np.eye(self.num_nodes)
self._J['dXdt:y', 'psi'] = np.eye(self.num_nodes)
# Partials of dz/dt
self._J['dXdt:z', 'v'] = np.eye(self.num_nodes)
self._J['dXdt:z', 'theta'] = np.eye(self.num_nodes)
# Partials of dv/dt
self._J['dXdt:v', 'g'] = np.eye(self.num_nodes)
self._J['dXdt:v', 'T'] = np.eye(self.num_nodes)
self._J['dXdt:v', 'D'] = np.eye(self.num_nodes)
self._J['dXdt:v', 'mass'] = np.eye(self.num_nodes)
self._J['dXdt:v', 'theta'] = np.eye(self.num_nodes)
def solve_nonlinear(self, params, unknowns, resids):
v = params['v']
g = params['g']
theta = params['theta']
psi = params['psi']
T = params['T']
D = params['D']
mass = params['mass']
unknowns['dXdt:x'][:] = v * np.cos(theta) * np.cos(psi)
unknowns['dXdt:y'][:] = v * np.cos(theta) * np.sin(psi)
unknowns['dXdt:z'][:] = -v * np.sin(theta)
unknowns['dXdt:v'][:] = -g * np.sin(theta) + (T - D) / mass
def linearize(self, params, unknowns, resids):
v = params['v']
g = params['g']
theta = params['theta']
psi = params['psi']
T = params['T']
D = params['D']
mass = params['mass']
np.fill_diagonal(self._J['dXdt:x', 'v'], np.cos(theta) * np.cos(psi))
np.fill_diagonal(self._J['dXdt:x', 'theta'], -v * np.sin(theta) *
np.cos(psi))
np.fill_diagonal(self._J['dXdt:x', 'psi'], -v * np.cos(theta) *
np.sin(psi))
np.fill_diagonal(self._J['dXdt:y', 'v'], np.cos(theta) * np.sin(psi))
np.fill_diagonal(self._J['dXdt:y', 'theta'], -v * np.sin(theta) *
np.sin(psi))
np.fill_diagonal(self._J['dXdt:y', 'psi'], v * np.cos(theta) *
np.cos(psi))
np.fill_diagonal(self._J['dXdt:z', 'v'], -np.sin(theta))
np.fill_diagonal(self._J['dXdt:z', 'theta'], -v * np.cos(theta))
np.fill_diagonal(self._J['dXdt:v', 'g'], -np.sin(theta))
np.fill_diagonal(self._J['dXdt:v', 'T'], 1.0 / mass)
np.fill_diagonal(self._J['dXdt:v', 'D'], -1.0 / mass)
np.fill_diagonal(self._J['dXdt:v', 'mass'], (D - T) / mass**2)
np.fill_diagonal(self._J['dXdt:v', 'theta'], -g * np.cos(theta))
return self._J
class AngularVelocityComp(EOMComp):
""" Component to compute the angular velocity of the pod in
the inertial NED frame. This is from the definition of the
angular velocity vector based on euler angles and rates in a
3-2-1 Euler angle sequence.
"""
def __init__(self, grid_data):
super(AngularVelocityComp, self).__init__(grid_data=grid_data,
time_units='s')
self.deriv_options['type'] = 'fd'
self.add_param('psi', desc='azimuth angle', units='rad')
self.add_param('theta', desc='elevation angle', units='rad')
self.add_param('phi', desc='roll angle', units='rad')
self.add_param('dUdt:psi', desc='azimuth angle rate', units='rad/s')
self.add_param('dUdt:theta',
desc='elevation angle rate',
units='rad/s')
self.add_param('dUdt:phi', desc='roll angle rate', units='rad/s')
nn = grid_data['num_nodes']
self.add_output('omega_x',
shape=(nn, ),
desc='omega about north',
units='rad/s')
self.add_output('omega_y',
shape=(nn, ),
desc='omega about east',
units='rad/s')
self.add_output('omega_z',
shape=(nn, ),
desc='omega about down',
units='rad/s')
def solve_nonlinear(self, params, unknowns, resids):
psi = params['psi']
theta = params['theta']
phi = params['phi']
psi_dot = params['dUdt:psi']
theta_dot = params['dUdt:theta']
phi_dot = params['dUdt:phi']
unknowns['omega_x'][:] = phi_dot - psi_dot * np.sin(theta)
unknowns['omega_y'][:] = theta_dot * np.cos(phi) + psi_dot * np.sin(
phi) * np.cos(theta)
unknowns['omega_z'][:] = -theta_dot * np.sin(phi) + psi_dot * np.cos(
phi) * np.cos(theta)
class MagneplaneRHS(RHS):
def __init__(self, grid_data, dynamic_controls=None, static_controls=None):
super(MagneplaneRHS, self).__init__(grid_data, dynamic_controls,
static_controls)
self.add(name='eom', system=MagneplaneEOM(grid_data), promotes=['*'])
self.add(name='omega',
system=AngularVelocityComp(grid_data),
promotes=['*'])
self.complete_init()
def magneplane_brachistochrone(solver='SLSQP', num_seg=3, seg_ncn=3):
prob = Problem()
traj = prob.add_traj(Trajectory("traj0"))
if solver == 'SNOPT':
if pyOptSparseDriver is None:
raise ValueError(
'Requested SNOPT but pyoptsparse is not available')
driver = pyOptSparseDriver()
driver.options['optimizer'] = solver
driver.opt_settings['Major iterations limit'] = 1000
driver.opt_settings['iSumm'] = 6
driver.opt_settings['Major step limit'] = 0.5
driver.opt_settings["Major feasibility tolerance"] = 1.0E-6
driver.opt_settings["Major optimality tolerance"] = 1.0E-6
driver.opt_settings["Minor feasibility tolerance"] = 1.0E-4
driver.opt_settings['Verify level'] = 3
else:
driver = ScipyOptimizer()
driver.options['tol'] = 1.0E-6
driver.options['disp'] = True
driver.options['maxiter'] = 500
prob.driver = driver
dynamic_controls = [{'name': 'g',
'units': 'm/s**2'}, {'name': 'T',
'units': 'N'}, {'name': 'D',
'units': 'N'},
{'name': 'mass',
'units': 'kg'}, {'name': 'psi',
'units': 'rad'}, {'name': 'theta',
'units': 'rad'},
{'name': 'phi',
'units': 'rad'}]
phase0 = CollocationPhase(name='phase0',
rhs_class=MagneplaneRHS,
num_seg=num_seg,
seg_ncn=seg_ncn,
rel_lengths="equal",
dynamic_controls=dynamic_controls,
static_controls=None)
traj.add_phase(phase0)
phase0.set_state_options('x',
lower=0,
upper=10,
ic_val=0,
ic_fix=True,
fc_val=10,
fc_fix=True,
defect_scaler=0.1)
phase0.set_state_options('y',
lower=0,
upper=0,
ic_val=0,
ic_fix=True,
fc_val=0,
fc_fix=True,
defect_scaler=0.1)
phase0.set_state_options('z',
lower=-10,
upper=0,
ic_val=-10,
ic_fix=True,
fc_val=-5,
fc_fix=True,
defect_scaler=0.1)
phase0.set_state_options('v',
lower=0,
upper=np.inf,
ic_val=0.0,
ic_fix=True,
fc_val=10.0,
fc_fix=False,
defect_scaler=0.1)
phase0.set_dynamic_control_options(name='psi',
val=phase0.node_space(0.0, 0.0),
opt=False)
phase0.set_dynamic_control_options(name='theta',
val=phase0.node_space(-.46, -.46),
opt=True,
lower=-1.57,
upper=1.57,
scaler=1.0)
phase0.set_dynamic_control_options(name='phi',
val=phase0.node_space(0.0, 0.0),
opt=False)
phase0.set_dynamic_control_options(name='g',
val=phase0.node_space(9.80665, 9.80665),
opt=False)
phase0.set_dynamic_control_options(name='T',
val=phase0.node_space(0.0, 0.0),
opt=False)
phase0.set_dynamic_control_options(name='D',
val=phase0.node_space(0.0, 0.0),
opt=False)
phase0.set_dynamic_control_options(name='mass',
val=phase0.node_space(1000.0, 1000.0),
opt=False)
phase0.set_time_options(t0_val=0,
t0_lower=0,
t0_upper=0,
tp_val=2.0,
tp_lower=0.5,
tp_upper=10.0)
traj.add_objective(name="t", phase="phase0", place="end", scaler=1.0)
return prob
if __name__ == "__main__":
prob = magneplane_brachistochrone('SNOPT', num_seg=10, seg_ncn=2)
prob.setup()
# np.set_printoptions(linewidth=1024)
# with open('check_partials.txt','wb') as f:
# prob.check_partial_derivatives(out_stream=f)
#
# exit(0)
prob.run()
simout = prob.trajectories['traj0'].simulate(dt=0.01)
import matplotlib.pyplot as plt
plt.plot(prob['traj0.phase0.rhs_c.x'], prob['traj0.phase0.rhs_c.z'], 'ro')
plt.plot(simout['phase0']['x'], simout['phase0']['z'])
plt.gca().invert_yaxis()
#plt.plot(simout['phase0']['t'],-simout['phase0']['z'])
plt.figure()
plt.plot(prob['traj0.phase0.rhs_c.t'], prob['traj0.phase0.rhs_c.omega_x'],
'ro')
plt.plot(prob['traj0.phase0.rhs_c.t'], prob['traj0.phase0.rhs_c.omega_y'],
'bo')
plt.plot(prob['traj0.phase0.rhs_c.t'], prob['traj0.phase0.rhs_c.omega_z'],
'go')
#plt.plot(simout['phase0']['t'],simout['phase0']['omega_y'])
#plt.plot(simout['phase0']['t'],simout['phase0']['omega_z'])
plt.show()
#
|
|
#!/usr/bin/python
#### workflow ####
# 1. get all datatsets within date range
# 2. put dataset pairs into input_queue
# 3. start worker threads with input_queue
# 3a. load dataset into prefix trees pt0 and pt1
# 3.b calc stats for prefix trees pt0 and pt1
# 3.c calc diffs for prefix treees (pt0,pt1)
# 4. start output thread and write results back to database
##################s
from __future__ import print_function
import argparse
import gzip
import os
import psycopg2
import radix
import re
import sys
import multiprocessing as mp
from collections import OrderedDict
from datetime import datetime, timedelta
from netaddr import IPSet, IPNetwork
verbose = False
warning = False
logging = False
re_file_rv = re.compile('rib.(\d+).(\d\d\d\d).bz2')
re_file_rr = re.compile('bview.(\d+).(\d\d\d\d).gz')
re_path_rv = re.compile('.*/([a-z0-9\.-]+)/bgpdata/\d\d\d\d.\d\d/RIBS.*')
re_path_rr = re.compile('.*/(rrc\d\d)/\d\d\d\d.\d\d.*')
reserved_ipv4 = IPSet (['0.0.0.0/8', # host on this network (RFC1122)
'10.0.0.0/8','172.16.0.0/12','192.168.0.0/16', # private address space (RFC1918)
'100.64.0.0/10', # shared address space (RFC6598)
'127.0.0.0/8', # loopback (RFC1122)
'169.254.0.0/16', # linklocal (RFC3927)
'192.0.0.0/24', # special purpose (RFC6890)
'192.0.0.0/29', # DS-lite (RFC6333)
'192.0.2.0/24','198.51.100.0/24','203.0.113.0/24', # test net 1-3 (RFC5737)
'224.0.0.0/4', # multicast address space
'240.0.0.0/4', # future use (RFC1122)
'255.255.255.255/32' # limited broadcast
])
all_ips_valid = len(IPSet(['0.0.0.0/0']) - reserved_ipv4)
## helper function ##
def prefixlen (prefix):
try:
network, length = prefix.split('/')
except:
return 32
else:
return int(length)
def print_log(*objs):
if logging or verbose:
print("[LOGS] .", *objs, file=sys.stdout)
def print_info(*objs):
if verbose:
print("[INFO] ..", *objs, file=sys.stdout)
def print_warn(*objs):
if warning or verbose:
print("[WARN] ", *objs, file=sys.stderr)
def print_error(*objs):
print("[ERROR] ", *objs, file=sys.stderr)
def valid_date(s):
try:
return datetime.strptime(s, "%Y-%m-%d")
except ValueError:
msg = "Not a valid date: '{0}'.".format(s)
raise argparse.ArgumentTypeError(msg)
## public and thread funtions ##
def get_origins(dbconnstr, did, ts_str):
print_log ("CALL get_origins (%s, %s, %s)" % (dbconnstr, did, ts_str))
query_origins = ("SELECT p.prefix, o.asn FROM "
"(SELECT * FROM %s WHERE dataset_id = '%s') AS o "
"LEFT JOIN t_prefixes AS p ON o.prefix_id = p.id")
ym_str = ts_str.strftime("%Y_%m")
table = "t_origins_"+ym_str
ptree = dict()
try:
con = psycopg2.connect(dbconnstr)
except Exception, e:
print_error("get_origins: connecting to database")
print_error("failed with: %s" % ( e.message))
sys.exit(1)
cur = con.cursor()
# get origins of dataset
try:
print_info("get_origins: execute query")
query = query_origins % (table, did)
cur.execute(query)
rs = cur.fetchall()
except Exception, e:
print_error("QUERY: %s ; failed with: %s" % (query, e.message))
con.rollback()
else:
print_info("get_origins: process response")
# update timestamps of prefix origin association
for row in rs:
prefix = str(row[0])
origin = int(row[1])
if prefix not in ptree:
ptree[prefix] = list()
if origin not in ptree[prefix]:
ptree[prefix].append(origin)
return ptree
def get_stat(pt):
print_log("CALL get_stat")
ips = IPSet(pt.keys())
num_ips_all = len(ips)
num_ips_valid = len(ips - reserved_ipv4)
num_ips_bogus = num_ips_all - num_ips_valid
ipspace = float(num_ips_valid) / all_ips_valid
pfxlen = dict()
asn = set()
num_pfx_moas = 0
# eval prefix tree
for p in pt:
pl = prefixlen(p)
for a in pt[p]:
asn.add(a)
if len(pt[p]) > 1:
num_pfx_moas += 1
if pl not in pfxlen:
pfxlen[pl] = list()
pfxlen[pl].append(p)
num_asn = len(asn)
num_pfx = len(pt.keys())
# prefix and ip results
pl_dict = dict()
for i in range(32): # init 1-32 with 0
pl_dict[i+1] = 0
for pl in pfxlen:
pl_dict[pl] = len(pfxlen[pl])
str_pfx_len = ','.join(str(pl_dict[i+1]) for i in range(32))
ret = [num_asn,num_ips_valid, num_ips_bogus, ipspace,
num_pfx, num_pfx_moas, str_pfx_len]
return ret
def worker(dbconnstr, queue):
print_log ("START worker")
for data in iter(queue.get, 'DONE'):
try:
did = data[0]
ts = data[1]
origins = get_origins(dbconnstr, did, ts)
print_info ("%s get_origins done ..." % (mp.current_process().name))
stat = get_stat(origins)
print_info ("%s get_stat done ..." % (mp.current_process().name))
odata = list()
odata.append(did)
odata.extend(stat)
output(dbconnstr, odata)
print_info ("%s output done ..." % (mp.current_process().name))
except Exception, e:
print_error("%s failed with: %s" %
(mp.current_process().name, e.message))
return True
def output(dbconnstr, odata):
try:
con = psycopg2.connect(dbconnstr)
except Exception, e:
print_error("output: connecting to database")
print_error("failed with: %s" % ( e.message))
sys.exit(1)
cur = con.cursor()
insert_stat = "INSERT INTO t_origin_stats VALUES (%s,%s,%s,%s,%s,%s,%s,'%s')"
sql_insert = insert_stat % tuple(odata)
try:
print_info("output: insert stats")
cur.execute(sql_insert)
con.commit()
except Exception, e:
print_error("INSERT: %s ; failed with: %s" % (sql_insert, e.message))
con.rollback()
else:
print_info ("STAT: " + ';'.join( str(x) for x in odata))
return True
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-l', '--logging',
help='Ouptut logging.',
action='store_true')
parser.add_argument('-w', '--warning',
help='Output warnings.',
action='store_true')
parser.add_argument('-v', '--verbose',
help='print everything.',
action='store_true')
parser.add_argument('-p', '--postgres',
help='Use PostgresqlDB for input and output.',
required=True)
parser.add_argument('-b', '--begin',
help='Begin date (inclusive), format: yyyy-mm-dd',
type=valid_date, required=True)
parser.add_argument('-u', '--until',
help='Until date (exclusive), format: yyyy-mm-dd',
type=valid_date, required=True)
parser.add_argument('-t', '--type',
help='Type of data source (show all: ?).',
type=str, required=True)
parser.add_argument('-s', '--subtype',
help='Subtype of data source (show all: ?)',
type=str, required=True)
parser.add_argument('-n', '--numthreads',
help='Set number of threads.',
type=int, default=2)
args = vars(parser.parse_args())
global verbose
verbose = args['verbose']
global warning
warning = args['warning']
global logging
logging = args['logging']
# run
start_time = datetime.now()
print_log("START: " + start_time.strftime('%Y-%m-%d %H:%M:%S'))
begin = args['begin']
until = args['until']
maptype = args['type']
subtype = args['subtype']
dbconnstr = args['postgres']
workers = args['numthreads']
if not workers:
workers = mp.cpu_count() / 2
# prepare some vars
input_queue = mp.Queue()
# get all matching datasets
try:
con = psycopg2.connect(dbconnstr)
except Exception, e:
print_error("origin_ttl_postgres: connecting to database")
print_error("failed with: %s" % ( e.message))
sys.exit(1)
cur = con.cursor()
query_datasets = ("SELECT id, ts FROM t_datasets WHERE ts >= '%s' "
"AND ts < '%s' AND maptype = '%s' "
"AND subtype = '%s' ORDER BY ts")
datasets = OrderedDict()
query = query_datasets % (begin,until,maptype,subtype)
try:
cur.execute(query)
rs = cur.fetchall()
datasets = OrderedDict((row[0], row[1]) for row in rs)
except Exception, e:
print_error("QUERY: %s ; failed with: %s" % (query, e.message))
con.rollback()
print_log ("FOUND %s datasets." % str(len(datasets)))
# fill input_queue
print_info ("fill input queue")
for i in datasets.items():
input_queue.put(i)
# start workers
print_info("start workers")
processes = []
for w in xrange(workers):
p = mp.Process(target=worker,
args=(dbconnstr,input_queue))
p.start()
processes.append(p)
input_queue.put('DONE')
print_info("wait for workers")
for p in processes:
p.join()
end_time = datetime.now()
print_log("FINISH: " + end_time.strftime('%Y-%m-%d %H:%M:%S'))
done_time = end_time - start_time
print_log(" processing time [s]: " + str(done_time.total_seconds()))
if __name__ == "__main__":
main()
|
|
# -*- coding: utf-8 -*-
"""Dependency graph implementation."""
from __future__ import absolute_import, print_function, unicode_literals
from collections import Counter
from textwrap import dedent
from kombu.utils.encoding import bytes_to_str, safe_str
from celery.five import items, python_2_unicode_compatible
__all__ = ('DOT', 'CycleError', 'DependencyGraph', 'GraphFormatter')
class DOT:
"""Constants related to the dot format."""
HEAD = dedent("""
{IN}{type} {id} {{
{INp}graph [{attrs}]
""")
ATTR = '{name}={value}'
NODE = '{INp}"{0}" [{attrs}]'
EDGE = '{INp}"{0}" {dir} "{1}" [{attrs}]'
ATTRSEP = ', '
DIRS = {'graph': '--', 'digraph': '->'}
TAIL = '{IN}}}'
class CycleError(Exception):
"""A cycle was detected in an acyclic graph."""
@python_2_unicode_compatible
class DependencyGraph(object):
"""A directed acyclic graph of objects and their dependencies.
Supports a robust topological sort
to detect the order in which they must be handled.
Takes an optional iterator of ``(obj, dependencies)``
tuples to build the graph from.
Warning:
Does not support cycle detection.
"""
def __init__(self, it=None, formatter=None):
self.formatter = formatter or GraphFormatter()
self.adjacent = {}
if it is not None:
self.update(it)
def add_arc(self, obj):
"""Add an object to the graph."""
self.adjacent.setdefault(obj, [])
def add_edge(self, A, B):
"""Add an edge from object ``A`` to object ``B``.
I.e. ``A`` depends on ``B``.
"""
self[A].append(B)
def connect(self, graph):
"""Add nodes from another graph."""
self.adjacent.update(graph.adjacent)
def topsort(self):
"""Sort the graph topologically.
Returns:
List: of objects in the order in which they must be handled.
"""
graph = DependencyGraph()
components = self._tarjan72()
NC = {
node: component for component in components for node in component
}
for component in components:
graph.add_arc(component)
for node in self:
node_c = NC[node]
for successor in self[node]:
successor_c = NC[successor]
if node_c != successor_c:
graph.add_edge(node_c, successor_c)
return [t[0] for t in graph._khan62()]
def valency_of(self, obj):
"""Return the valency (degree) of a vertex in the graph."""
try:
l = [len(self[obj])]
except KeyError:
return 0
for node in self[obj]:
l.append(self.valency_of(node))
return sum(l)
def update(self, it):
"""Update graph with data from a list of ``(obj, deps)`` tuples."""
tups = list(it)
for obj, _ in tups:
self.add_arc(obj)
for obj, deps in tups:
for dep in deps:
self.add_edge(obj, dep)
def edges(self):
"""Return generator that yields for all edges in the graph."""
return (obj for obj, adj in items(self) if adj)
def _khan62(self):
"""Perform Khan's simple topological sort algorithm from '62.
See https://en.wikipedia.org/wiki/Topological_sorting
"""
count = Counter()
result = []
for node in self:
for successor in self[node]:
count[successor] += 1
ready = [node for node in self if not count[node]]
while ready:
node = ready.pop()
result.append(node)
for successor in self[node]:
count[successor] -= 1
if count[successor] == 0:
ready.append(successor)
result.reverse()
return result
def _tarjan72(self):
"""Perform Tarjan's algorithm to find strongly connected components.
See Also:
:wikipedia:`Tarjan%27s_strongly_connected_components_algorithm`
"""
result, stack, low = [], [], {}
def visit(node):
if node in low:
return
num = len(low)
low[node] = num
stack_pos = len(stack)
stack.append(node)
for successor in self[node]:
visit(successor)
low[node] = min(low[node], low[successor])
if num == low[node]:
component = tuple(stack[stack_pos:])
stack[stack_pos:] = []
result.append(component)
for item in component:
low[item] = len(self)
for node in self:
visit(node)
return result
def to_dot(self, fh, formatter=None):
"""Convert the graph to DOT format.
Arguments:
fh (IO): A file, or a file-like object to write the graph to.
formatter (celery.utils.graph.GraphFormatter): Custom graph
formatter to use.
"""
seen = set()
draw = formatter or self.formatter
def P(s):
print(bytes_to_str(s), file=fh)
def if_not_seen(fun, obj):
if draw.label(obj) not in seen:
P(fun(obj))
seen.add(draw.label(obj))
P(draw.head())
for obj, adjacent in items(self):
if not adjacent:
if_not_seen(draw.terminal_node, obj)
for req in adjacent:
if_not_seen(draw.node, obj)
P(draw.edge(obj, req))
P(draw.tail())
def format(self, obj):
return self.formatter(obj) if self.formatter else obj
def __iter__(self):
return iter(self.adjacent)
def __getitem__(self, node):
return self.adjacent[node]
def __len__(self):
return len(self.adjacent)
def __contains__(self, obj):
return obj in self.adjacent
def _iterate_items(self):
return items(self.adjacent)
items = iteritems = _iterate_items
def __repr__(self):
return '\n'.join(self.repr_node(N) for N in self)
def repr_node(self, obj, level=1, fmt='{0}({1})'):
output = [fmt.format(obj, self.valency_of(obj))]
if obj in self:
for other in self[obj]:
d = fmt.format(other, self.valency_of(other))
output.append(' ' * level + d)
output.extend(self.repr_node(other, level + 1).split('\n')[1:])
return '\n'.join(output)
class GraphFormatter(object):
"""Format dependency graphs."""
_attr = DOT.ATTR.strip()
_node = DOT.NODE.strip()
_edge = DOT.EDGE.strip()
_head = DOT.HEAD.strip()
_tail = DOT.TAIL.strip()
_attrsep = DOT.ATTRSEP
_dirs = dict(DOT.DIRS)
scheme = {
'shape': 'box',
'arrowhead': 'vee',
'style': 'filled',
'fontname': 'HelveticaNeue',
}
edge_scheme = {
'color': 'darkseagreen4',
'arrowcolor': 'black',
'arrowsize': 0.7,
}
node_scheme = {'fillcolor': 'palegreen3', 'color': 'palegreen4'}
term_scheme = {'fillcolor': 'palegreen1', 'color': 'palegreen2'}
graph_scheme = {'bgcolor': 'mintcream'}
def __init__(self, root=None, type=None, id=None,
indent=0, inw=' ' * 4, **scheme):
self.id = id or 'dependencies'
self.root = root
self.type = type or 'digraph'
self.direction = self._dirs[self.type]
self.IN = inw * (indent or 0)
self.INp = self.IN + inw
self.scheme = dict(self.scheme, **scheme)
self.graph_scheme = dict(self.graph_scheme, root=self.label(self.root))
def attr(self, name, value):
value = '"{0}"'.format(value)
return self.FMT(self._attr, name=name, value=value)
def attrs(self, d, scheme=None):
d = dict(self.scheme, **dict(scheme, **d or {}) if scheme else d)
return self._attrsep.join(
safe_str(self.attr(k, v)) for k, v in items(d)
)
def head(self, **attrs):
return self.FMT(
self._head, id=self.id, type=self.type,
attrs=self.attrs(attrs, self.graph_scheme),
)
def tail(self):
return self.FMT(self._tail)
def label(self, obj):
return obj
def node(self, obj, **attrs):
return self.draw_node(obj, self.node_scheme, attrs)
def terminal_node(self, obj, **attrs):
return self.draw_node(obj, self.term_scheme, attrs)
def edge(self, a, b, **attrs):
return self.draw_edge(a, b, **attrs)
def _enc(self, s):
return s.encode('utf-8', 'ignore')
def FMT(self, fmt, *args, **kwargs):
return self._enc(fmt.format(
*args, **dict(kwargs, IN=self.IN, INp=self.INp)
))
def draw_edge(self, a, b, scheme=None, attrs=None):
return self.FMT(
self._edge, self.label(a), self.label(b),
dir=self.direction, attrs=self.attrs(attrs, self.edge_scheme),
)
def draw_node(self, obj, scheme=None, attrs=None):
return self.FMT(
self._node, self.label(obj), attrs=self.attrs(attrs, scheme),
)
|
|
# Copyright (c) 2012 Cloudera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Superclass of all HS2 tests containing commonly used functions.
from getpass import getuser
from TCLIService import TCLIService
from ImpalaService import ImpalaHiveServer2Service
from thrift.transport.TSocket import TSocket
from thrift.transport.TTransport import TBufferedTransport
from thrift.protocol import TBinaryProtocol
from tests.common.impala_test_suite import ImpalaTestSuite, IMPALAD_HS2_HOST_PORT
def needs_session(protocol_version=
TCLIService.TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V6,
conf_overlay=None):
def session_decorator(fn):
"""Decorator that establishes a session and sets self.session_handle. When the test is
finished, the session is closed.
"""
def add_session(self):
open_session_req = TCLIService.TOpenSessionReq()
open_session_req.username = getuser()
open_session_req.configuration = dict()
if conf_overlay is not None:
open_session_req.configuration = conf_overlay
open_session_req.client_protocol = protocol_version
resp = self.hs2_client.OpenSession(open_session_req)
HS2TestSuite.check_response(resp)
self.session_handle = resp.sessionHandle
assert protocol_version <= resp.serverProtocolVersion
try:
fn(self)
finally:
close_session_req = TCLIService.TCloseSessionReq()
close_session_req.sessionHandle = resp.sessionHandle
HS2TestSuite.check_response(self.hs2_client.CloseSession(close_session_req))
self.session_handle = None
return add_session
return session_decorator
def operation_id_to_query_id(operation_id):
lo, hi = operation_id.guid[:8], operation_id.guid[8:]
lo = ''.join(['%0.2X' % ord(c) for c in lo[::-1]])
hi = ''.join(['%0.2X' % ord(c) for c in hi[::-1]])
return "%s:%s" % (lo, hi)
class HS2TestSuite(ImpalaTestSuite):
TEST_DB = 'hs2_db'
HS2_V6_COLUMN_TYPES = ['boolVal', 'stringVal', 'byteVal', 'i16Val', 'i32Val', 'i64Val',
'doubleVal', 'binaryVal']
def setup(self):
self.cleanup_db(self.TEST_DB)
host, port = IMPALAD_HS2_HOST_PORT.split(":")
self.socket = TSocket(host, port)
self.transport = TBufferedTransport(self.socket)
self.transport.open()
self.protocol = TBinaryProtocol.TBinaryProtocol(self.transport)
self.hs2_client = ImpalaHiveServer2Service.Client(self.protocol)
def teardown(self):
self.cleanup_db(self.TEST_DB)
if self.socket:
self.socket.close()
@staticmethod
def check_response(response,
expected_status_code = TCLIService.TStatusCode.SUCCESS_STATUS,
expected_error_prefix = None):
assert response.status.statusCode == expected_status_code
if expected_status_code != TCLIService.TStatusCode.SUCCESS_STATUS\
and expected_error_prefix is not None:
assert response.status.errorMessage.startswith(expected_error_prefix)
def close(self, op_handle):
close_op_req = TCLIService.TCloseOperationReq()
close_op_req.operationHandle = op_handle
close_op_resp = self.hs2_client.CloseOperation(close_op_req)
assert close_op_resp.status.statusCode == TCLIService.TStatusCode.SUCCESS_STATUS
def get_num_rows(self, result_set):
# rows will always be set, so the only way to tell if we should use it is to see if
# any columns are set
if result_set.columns is None or len(result_set.columns) == 0:
return len(result_set.rows)
assert result_set.columns is not None
for col_type in HS2TestSuite.HS2_V6_COLUMN_TYPES:
typed_col = getattr(result_set.columns[0], col_type)
if typed_col != None:
return len(typed_col.values)
assert False
def fetch_at_most(self, handle, orientation, size, expected_num_rows = None):
"""Fetches at most size number of rows from the query identified by the given
operation handle. Uses the given fetch orientation. Asserts that the fetch returns a
success status, and that the number of rows returned is equal to given
expected_num_rows (if given). It is only safe for expected_num_rows to be 0 or 1:
Impala does not guarantee that a larger result set will be returned in one go. Use
fetch_until() for repeated fetches."""
assert expected_num_rows is None or expected_num_rows in (0, 1)
fetch_results_req = TCLIService.TFetchResultsReq()
fetch_results_req.operationHandle = handle
fetch_results_req.orientation = orientation
fetch_results_req.maxRows = size
fetch_results_resp = self.hs2_client.FetchResults(fetch_results_req)
HS2TestSuite.check_response(fetch_results_resp)
num_rows = size
if expected_num_rows is not None:
assert self.get_num_rows(fetch_results_resp.results) == expected_num_rows
return fetch_results_resp
def fetch_until(self, handle, orientation, size, expected_num_rows = None):
"""Tries to fetch exactly 'size' rows from the given query handle, with the given
fetch orientation, by repeatedly issuing fetch(size - num rows already fetched)
calls. Returns fewer than 'size' rows if either a fetch() returns 0 rows (indicating
EOS) or 'expected_num_rows' rows are returned. If 'expected_num_rows' is set to None,
it defaults to 'size', so that the effect is to both ask for and expect the same
number of rows."""
assert expected_num_rows is None or (size >= expected_num_rows)
fetch_results_req = TCLIService.TFetchResultsReq()
fetch_results_req.operationHandle = handle
fetch_results_req.orientation = orientation
fetch_results_req.maxRows = size
fetch_results_resp = self.hs2_client.FetchResults(fetch_results_req)
HS2TestSuite.check_response(fetch_results_resp)
num_rows_fetched = self.get_num_rows(fetch_results_resp.results)
if expected_num_rows is None: expected_num_rows = size
while num_rows_fetched < expected_num_rows:
# Always try to fetch at most 'size'
fetch_results_req.maxRows = size - num_rows_fetched
fetch_results_req.orientation = TCLIService.TFetchOrientation.FETCH_NEXT
fetch_results_resp = self.hs2_client.FetchResults(fetch_results_req)
HS2TestSuite.check_response(fetch_results_resp)
last_fetch_size = self.get_num_rows(fetch_results_resp.results)
assert last_fetch_size > 0
num_rows_fetched += last_fetch_size
assert num_rows_fetched == expected_num_rows
def fetch_fail(self, handle, orientation, expected_error_prefix):
"""Attempts to fetch rows from the query identified by the given operation handle.
Asserts that the fetch returns an error with an error message matching the given
expected_error_prefix."""
fetch_results_req = TCLIService.TFetchResultsReq()
fetch_results_req.operationHandle = handle
fetch_results_req.orientation = orientation
fetch_results_req.maxRows = 100
fetch_results_resp = self.hs2_client.FetchResults(fetch_results_req)
HS2TestSuite.check_response(fetch_results_resp, TCLIService.TStatusCode.ERROR_STATUS,
expected_error_prefix)
return fetch_results_resp
def result_metadata(self, handle):
""" Gets the schema for the query identified by the handle """
req = TCLIService.TGetResultSetMetadataReq()
req.operationHandle = handle
resp = self.hs2_client.GetResultSetMetadata(req)
HS2TestSuite.check_response(resp)
return resp
def column_results_to_string(self, columns):
"""Quick-and-dirty way to get a readable string to compare the output of a
columnar-oriented query to its expected output"""
formatted = ""
num_rows = 0
# Determine the number of rows by finding the type of the first column
for col_type in HS2TestSuite.HS2_V6_COLUMN_TYPES:
typed_col = getattr(columns[0], col_type)
if typed_col != None:
num_rows = len(typed_col.values)
break
for i in xrange(num_rows):
row = []
for c in columns:
for col_type in HS2TestSuite.HS2_V6_COLUMN_TYPES:
typed_col = getattr(c, col_type)
if typed_col != None:
indicator = ord(typed_col.nulls[i / 8])
if indicator & (1 << (i % 8)):
row.append("NULL")
else:
row.append(str(typed_col.values[i]))
break
formatted += (", ".join(row) + "\n")
return (num_rows, formatted)
|
|
# -*- coding: utf-8 -*-
"""Contains helper functions for generating correctly
formatted hgrid list/folders.
"""
import datetime
import hurry.filesize
from modularodm import Q
from framework.auth.decorators import Auth
from website.util import paths
from website.util import sanitize
from website.settings import (
ALL_MY_PROJECTS_ID, ALL_MY_REGISTRATIONS_ID, ALL_MY_PROJECTS_NAME,
ALL_MY_REGISTRATIONS_NAME, DISK_SAVING_MODE
)
FOLDER = 'folder'
FILE = 'file'
KIND = 'kind'
# TODO: Validate the JSON schema, esp. for addons
DEFAULT_PERMISSIONS = {
'view': True,
'edit': False,
}
def format_filesize(size):
return hurry.filesize.size(size, system=hurry.filesize.alternative)
def default_urls(node_api, short_name):
return {
'fetch': u'{node_api}{addonshort}/hgrid/'.format(node_api=node_api, addonshort=short_name),
'upload': u'{node_api}{addonshort}/'.format(node_api=node_api, addonshort=short_name),
}
def to_hgrid(node, auth, **data):
"""Converts a node into a rubeus grid format
:param Node node: the node to be parsed
:param Auth auth: the user authorization object
:returns: rubeus-formatted dict
"""
return NodeFileCollector(node, auth, **data).to_hgrid()
def to_project_hgrid(node, auth, **data):
"""Converts a node into a rubeus grid format
:param node Node: the node to be parsed
:param auth Auth: the user authorization object
:returns: rubeus-formatted dict
"""
return NodeProjectCollector(node, auth, **data).to_hgrid()
def to_project_root(node, auth, **data):
return NodeProjectCollector(node, auth, **data).get_root()
def build_addon_root(node_settings, name, permissions=None,
urls=None, extra=None, buttons=None, user=None,
**kwargs):
"""Builds the root or "dummy" folder for an addon.
:param addonNodeSettingsBase node_settings: Addon settings
:param String name: Additional information for the folder title
eg. Repo name for Github or bucket name for S3
:param dict or Auth permissions: Dictionary of permissions for the addon's content or Auth for use in node.can_X methods
:param dict urls: Hgrid related urls
:param String extra: Html to be appened to the addon folder name
eg. Branch switcher for github
:param list of dicts buttons: List of buttons to appear in HGrid row. Each
dict must have 'text', a string that will appear on the button, and
'action', the name of a function in
:param dict kwargs: Any additional information to add to the root folder
:return dict: Hgrid formatted dictionary for the addon root folder
"""
permissions = permissions or DEFAULT_PERMISSIONS
if name:
name = u'{0}: {1}'.format(node_settings.config.full_name, name)
else:
name = node_settings.config.full_name
if hasattr(node_settings.config, 'urls') and node_settings.config.urls:
urls = node_settings.config.urls
if urls is None:
urls = default_urls(node_settings.owner.api_url, node_settings.config.short_name)
forbid_edit = DISK_SAVING_MODE if node_settings.config.short_name == 'osfstorage' else False
if isinstance(permissions, Auth):
auth = permissions
permissions = {
'view': node_settings.owner.can_view(auth),
'edit': (node_settings.owner.can_edit(auth)
and not node_settings.owner.is_registration
and not forbid_edit),
}
max_size = node_settings.config.max_file_size
if user and 'high_upload_limit' in user.system_tags:
max_size = node_settings.config.high_max_file_size
ret = {
'provider': node_settings.config.short_name,
'addonFullname': node_settings.config.full_name,
'name': name,
'iconUrl': node_settings.config.icon_url,
KIND: FOLDER,
'extra': extra,
'buttons': buttons,
'isAddonRoot': True,
'permissions': permissions,
'accept': {
'maxSize': max_size,
'acceptedFiles': node_settings.config.accept_extensions,
},
'urls': urls,
'isPointer': False,
'nodeId': node_settings.owner._id,
'nodeUrl': node_settings.owner.url,
'nodeApiUrl': node_settings.owner.api_url,
}
ret.update(kwargs)
return ret
def build_addon_button(text, action, title=""):
"""Builds am action button to be rendered in HGrid
:param str text: A string or html to appear on the button itself
:param str action: The name of the HGrid action for the button to call.
The callback for the HGrid action must be defined as a member of HGrid.Actions
:return dict: Hgrid formatted dictionary for custom buttons
"""
button = {
'text': text,
'action': action,
}
if title:
button['attributes'] = 'title="{title}" data-toggle="tooltip" data-placement="right" '.format(title=title)
return button
def sort_by_name(hgrid_data):
return_value = hgrid_data
if hgrid_data is not None:
return_value = sorted(hgrid_data, key=lambda item: item['name'].lower())
return return_value
class NodeProjectCollector(object):
"""A utility class for creating rubeus formatted node data for project organization"""
def __init__(self, node, auth, just_one_level=False, **kwargs):
self.node = node
self.auth = auth
self.extra = kwargs
self.can_view = node.can_view(auth)
self.can_edit = node.can_edit(auth) and not node.is_registration
self.just_one_level = just_one_level
def _collect_components(self, node, visited):
rv = []
for child in reversed(node.nodes): # (child.resolve()._id not in visited or node.is_folder) and
if child is not None and not child.is_deleted and child.resolve().can_view(auth=self.auth) and node.can_view(self.auth):
# visited.append(child.resolve()._id)
rv.append(self._serialize_node(child, visited=None, parent_is_folder=node.is_folder))
return rv
def collect_all_projects_smart_folder(self):
contributed = self.auth.user.node__contributed
all_my_projects = contributed.find(
Q('category', 'eq', 'project') &
Q('is_deleted', 'eq', False) &
Q('is_registration', 'eq', False) &
Q('is_folder', 'eq', False) &
# parent is not in the nodes list
Q('__backrefs.parent.node.nodes', 'eq', None)
)
comps = contributed.find(
# components only
Q('category', 'ne', 'project') &
# parent is not in the nodes list
Q('__backrefs.parent.node.nodes', 'nin', all_my_projects.get_keys()) &
# exclude deleted nodes
Q('is_deleted', 'eq', False) &
# exclude registrations
Q('is_registration', 'eq', False)
)
children_count = all_my_projects.count() + comps.count()
return self.make_smart_folder(ALL_MY_PROJECTS_NAME, ALL_MY_PROJECTS_ID, children_count)
def collect_all_registrations_smart_folder(self):
contributed = self.auth.user.node__contributed
all_my_registrations = contributed.find(
Q('category', 'eq', 'project') &
Q('is_deleted', 'eq', False) &
Q('is_registration', 'eq', True) &
Q('is_folder', 'eq', False) &
# parent is not in the nodes list
Q('__backrefs.parent.node.nodes', 'eq', None)
)
comps = contributed.find(
# components only
Q('category', 'ne', 'project') &
# parent is not in the nodes list
Q('__backrefs.parent.node.nodes', 'nin', all_my_registrations.get_keys()) &
# exclude deleted nodes
Q('is_deleted', 'eq', False) &
# exclude registrations
Q('is_registration', 'eq', True)
)
children_count = all_my_registrations.count() + comps.count()
return self.make_smart_folder(ALL_MY_REGISTRATIONS_NAME, ALL_MY_REGISTRATIONS_ID, children_count)
def make_smart_folder(self, title, node_id, children_count=0):
return_value = {
'name': title,
'kind': FOLDER,
'permissions': {
'edit': False,
'view': True,
'copyable': False,
'movable': False,
'acceptsDrops': False,
},
'urls': {
'upload': None,
'fetch': None,
},
'children': [],
'type': 'smart-folder',
'expand': False,
'isPointer': False,
'isFolder': True,
'isSmartFolder': True,
'dateModified': None,
'modifiedDelta': 0,
'modifiedBy': None,
'parentIsFolder': True,
'isDashboard': False,
'contributors': [],
'node_id': node_id,
'childrenCount': children_count,
}
return return_value
def get_root(self):
root = self._serialize_node(self.node, visited=None, parent_is_folder=False)
return root
def to_hgrid(self):
"""Return the Rubeus.JS representation of the node's children, not including addons
"""
root = self._collect_components(self.node, visited=None)
# This will be important when we mix files and projects together: self._collect_addons(self.node) +
if self.node.is_dashboard:
root.insert(0, self.collect_all_projects_smart_folder())
root.insert(0, self.collect_all_registrations_smart_folder())
return root
def _serialize_node(self, node, visited=None, parent_is_folder=False):
"""Returns the rubeus representation of a node folder for the project organizer.
"""
visited = visited or []
visited.append(node.resolve()._id)
can_edit = node.can_edit(auth=self.auth) and not node.is_registration
expanded = node.is_expanded(user=self.auth.user)
can_view = node.can_view(auth=self.auth)
children = []
modified_delta = delta_date(node.date_modified)
date_modified = node.date_modified.isoformat()
contributors = []
for contributor in node.contributors:
if contributor._id in node.visible_contributor_ids:
contributor_name = [
contributor.family_name,
contributor.given_name,
contributor.fullname,
]
contributors.append({
'name': next(name for name in contributor_name if name),
'url': contributor.url,
})
try:
user = node.logs[-1].user
modified_by = user.family_name or user.given_name
except (AttributeError, IndexError):
modified_by = ''
child_nodes = node.nodes
readable_children = []
for child in child_nodes:
if child is not None:
resolved = child.resolve()
if resolved.can_view(auth=self.auth) and not resolved.is_deleted:
readable_children.append(child)
children_count = len(readable_children)
is_pointer = not node.primary
is_component = node.category != 'project'
is_project = node.category == 'project'
is_file = False
type_ = 'project'
if is_file:
type_ = 'file'
if is_pointer and not parent_is_folder:
type_ = 'pointer'
if node.is_folder:
type_ = 'folder'
if is_component:
type_ = 'component'
if node.is_dashboard:
to_expand = True
elif not is_pointer or parent_is_folder:
to_expand = expanded
else:
to_expand = False
return {
# TODO: Remove safe_unescape_html when mako html safe comes in
'name': sanitize.safe_unescape_html(node.title) if can_view else u'Private Component',
'kind': FOLDER,
'category': node.category,
# Once we get files into the project organizer, files would be kind of FILE
'permissions': {
'edit': can_edit,
'view': can_view,
'copyable': not node.is_folder,
'movable': parent_is_folder,
'acceptsFolders': node.is_folder,
'acceptsMoves': node.is_folder,
'acceptsCopies': node.is_folder or is_project,
'acceptsComponents': node.is_folder,
},
'urls': {
'upload': None,
'fetch': node.url if not node.is_folder else None,
},
'type': type_,
'children': children,
'expand': to_expand,
# TODO: (bgeiger) replace these flags with a Kind property or something
'isProject': is_project,
'isPointer': is_pointer,
'isComponent': is_component,
'isFolder': node.is_folder,
'isDashboard': node.is_dashboard,
'isFile': is_file,
'dateModified': date_modified,
'modifiedDelta': max(1, modified_delta),
'modifiedBy': modified_by,
'parentIsFolder': parent_is_folder,
'contributors': contributors,
'node_id': node.resolve()._id,
'isSmartFolder': False,
'apiURL': node.api_url,
'isRegistration': node.is_registration,
'description': node.description,
'registeredMeta': node.registered_meta,
'childrenCount': children_count,
'nodeType': node.project_or_component,
'archiving': node.archive_job and not node.archive_job.done,
}
def _collect_addons(self, node):
return_value = []
for addon in node.get_addons():
if addon.config.has_hgrid_files:
temp = self._upgrade_addon_meta(addon.config.get_hgrid_data(addon, self.auth, **self.extra))
for item in temp:
item["node_id"] = node._id
item["apiURL"] = node.api_url
return_value.extend(temp or [])
return return_value
def _upgrade_addon_meta(self, data):
for datum in data:
datum["expand"] = False
datum["isProject"] = False
datum["isPointer"] = False
datum["isComponent"] = False
datum["isFolder"] = False
datum["isDashboard"] = False
datum["isFile"] = True
datum["dateModified"] = None
datum["modifiedDelta"] = 0
datum["modifiedBy"] = ""
datum["parentIsFolder"] = False
datum["contributors"] = []
datum["isSmartFolder"] = False
datum["isRegistration"] = False
datum["description"] = ""
datum["registeredMeta"] = {}
datum["permissions"]["copyable"] = False
datum["permissions"]["movable"] = False
datum["permissions"]["acceptsFolders"] = False
datum["permissions"]["acceptsMoves"] = False
datum["permissions"]["acceptsCopies"] = False
datum["permissions"]["acceptsComponents"] = False
return data
class NodeFileCollector(object):
"""A utility class for creating rubeus formatted node data"""
def __init__(self, node, auth, **kwargs):
self.node = node
self.auth = auth
self.extra = kwargs
self.can_view = node.can_view(auth)
self.can_edit = node.can_edit(auth) and not node.is_registration
def to_hgrid(self):
"""Return the Rubeus.JS representation of the node's file data, including
addons and components
"""
root = self._serialize_node(self.node)
return [root]
def _collect_components(self, node, visited):
rv = []
for child in node.nodes:
if child.resolve()._id not in visited and not child.is_deleted and node.can_view(self.auth):
visited.append(child.resolve()._id)
rv.append(self._serialize_node(child, visited=visited))
return rv
def _get_node_name(self, node):
"""Input node object, return the project name to be display.
"""
can_view = node.can_view(auth=self.auth)
if can_view:
node_name = u'{0}: {1}'.format(node.project_or_component.capitalize(), sanitize.safe_unescape_html(node.title))
elif node.is_registration:
node_name = u'Private Registration'
elif node.is_fork:
node_name = u'Private Fork'
elif not node.primary:
node_name = u'Private Link'
else:
node_name = u'Private Component'
return node_name
def _serialize_node(self, node, visited=None):
"""Returns the rubeus representation of a node folder.
"""
visited = visited or []
visited.append(node.resolve()._id)
can_view = node.can_view(auth=self.auth)
if can_view:
children = self._collect_addons(node) + self._collect_components(node, visited)
else:
children = []
return {
# TODO: Remove safe_unescape_html when mako html safe comes in
'name': self._get_node_name(node),
'category': node.category,
'kind': FOLDER,
'permissions': {
'edit': node.can_edit(self.auth) and not node.is_registration,
'view': can_view,
},
'urls': {
'upload': None,
'fetch': None,
},
'children': children,
'isPointer': not node.primary,
'isSmartFolder': False,
'nodeType': node.project_or_component,
'nodeID': node.resolve()._id,
}
def _collect_addons(self, node):
rv = []
for addon in node.get_addons():
if addon.config.has_hgrid_files:
# WARNING: get_hgrid_data can return None if the addon is added but has no credentials.
temp = addon.config.get_hgrid_data(addon, self.auth, **self.extra)
rv.extend(sort_by_name(temp) or [])
return rv
# TODO: these might belong in addons module
def collect_addon_assets(node):
"""Return a dictionary containing lists of JS and CSS assets for a node's
addons.
:rtype: {'tree_js': <list of JS scripts>, 'tree_css': <list of CSS files>}
"""
return {
'tree_js': list(collect_addon_js(node)),
'tree_css': list(collect_addon_css(node)),
}
# TODO: Abstract static collectors
def collect_addon_js(node, visited=None, filename='files.js', config_entry='files'):
"""Collect JavaScript includes for all add-ons implementing HGrid views.
:return list: List of JavaScript include paths
"""
# NOTE: must coerce to list so it is JSON-serializable
visited = visited or []
visited.append(node._id)
js = set()
for addon in node.get_addons():
# JS modules configured in each addon's __init__ file
js = js.union(addon.config.include_js.get(config_entry, []))
# Webpack bundle
js_path = paths.resolve_addon_path(addon.config, filename)
if js_path:
js.add(js_path)
for each in node.nodes:
if each._id not in visited:
visited.append(each._id)
js = js.union(collect_addon_js(each, visited=visited))
return js
def collect_addon_css(node, visited=None):
"""Collect CSS includes for all addons-ons implementing Hgrid views.
:return: List of CSS include paths
:rtype: list
"""
visited = visited or []
visited.append(node._id)
css = set()
for addon in node.get_addons():
css = css.union(addon.config.include_css.get('files', []))
for each in node.nodes:
if each._id not in visited:
visited.append(each._id)
css = css.union(collect_addon_css(each, visited=visited))
return css
def delta_date(d):
diff = d - datetime.datetime.utcnow()
s = diff.total_seconds()
return s
|
|
import tensorflow as tf
import numpy as np
import random
from datetime import datetime
from neuralnet.cross_cnn import cross_cnn
from neuralnet.single_cnn import single_cnn
from neuralnet.single_bi_lstm import single_bi_lstm
from neuralnet.cross_bi_lstm import cross_bi_lstm
from neuralnet.single_cnn_lstm import single_cnn_lstm
from neuralnet.train_transw import train_transw
class train_nn():
def __init__(self,
emotion_list = [],
target_dic_path = "",
source_dic_path = "",
target_path = "",
source_path = "",
transfer_path = "",
part = 2,
model = "cnn",
sequence_length = 150,
cross_lingual = True,
embedding_dim = 128,
filter_sizes = [3, 4, 5],
num_filters = 128,
dropout_keep_prob = 0.5,
l2_reg_lambda = 0.00,
batch_size = 64,
num_epochs = 500,
evaluate_every = 10,
checkpoint_every = 100,
random_train = False,
op_step = 1e-3,
word_vec_target = "",
word_vec_source = "",
tf_df_target = "",
tf_df_source = "",
max_iter = 320,
vec_type = "static"):
self.emotion_list = emotion_list
self.target_dic_path = target_dic_path
self.source_dic_path = source_dic_path
self.target_path = target_path
self.source_path = source_path
self.transfer_path = transfer_path
self.part = part
self.model = model
self.sequence_length = sequence_length
self.cross_lingual = cross_lingual
self.embedding_dim = embedding_dim
self.filter_sizes = filter_sizes
self.num_filters = num_filters
self.dropout_keep_prob = dropout_keep_prob
self.l2_reg_lambda = l2_reg_lambda
self.batch_size = batch_size
self.num_epochs = num_epochs
self.evaluate_every = evaluate_every
self.checkpoint_every = checkpoint_every
self.pre_step = 100
self._seed = 10
self.random_train = random_train
self.op_step = op_step
self.word_vec_target = word_vec_target
self.word_vec_source = word_vec_source
self.tf_df_target = tf_df_target
self.tf_df_source = tf_df_source
self.max_iter = max_iter
self.vec_type = vec_type
def run(self):
print("emotions: " + str(self.emotion_list))
if self.cross_lingual:
tt = train_transw(
target_dic_path = self.target_dic_path,
source_dic_path = self.source_dic_path,
transfer_path = self.transfer_path,
word_vec_target = self.word_vec_target,
word_vec_source = self.word_vec_source,
tf_df_target = self.tf_df_target,
tf_df_source = self.tf_df_source,
weight_kind = "df",
embedding_size = 128,
trainable = False,
max_iter = 6000)
tt()
self.trans_w = tt.trans_w
self.trans_b = tt.trans_b
self.sour_dic = tt.sour_dic
self.tar_dic = tt.tar_dic
self.wei_dic = tt.wei_dic
self.load_cross_data()
return self.cross_training()
else:
self.load_single_data()
return self.single_training()
def load_data(self, path, add_len = 0):
data_label = []
data_feature = []
with open(path) as f:
for line in f:
line = line.strip()
label, line = line.split("\t")
if label not in self.emotion_list:
continue
labelfeature = [0 for ii in range(len(self.emotion_list))]
pos = self.emotion_list.index(label)
labelfeature[pos] = 1
data_label.append(labelfeature)
ll = line.split(" ")
data_feature.append([(int(x) + add_len + 1) for x in ll])
return data_label, data_feature
def load_dic_len(self, path):
dic_len = 0;
with open(path) as f:
for line in f:
dic_len += 1
return dic_len
def load_transfer(self, path, add_len):
res = {}
with open(path) as f:
for line in f:
line = line.strip()
index_en, index_cn, _, _ = line.split("\t")
res[int(index_en) + 1] = int(index_cn) + 1 + add_len
return res
def shuffle_data(self, label, feature, part = 1):
assert(len(label) == len(feature))
if self.random_train:
random.seed(self._seed)
shuffle_indices = np.random.permutation(np.arange(len(label)))
feature_shuffled = [feature[i] for i in shuffle_indices]
label_shuffled = [label[i] for i in shuffle_indices]
if part == 1:
return feature_shuffled, label_shuffled
elif part > 1:
train = int(len(label) * 1.0 / part)
return feature_shuffled[:train], feature_shuffled[train:], label_shuffled[:train], label_shuffled[train:]
else:
train = int(len(label) * 1.0 * part)
return feature_shuffled[:train], feature_shuffled[train:], label_shuffled[:train], label_shuffled[train:]
def batch_iter(self, data, batch_size, shuffle = False):
data_size = len(data)
num_batches = int(len(data)/batch_size) + 1
if shuffle:
if self.random_train:
np.random.seed(self._seed)
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_data = [data[x] for x in shuffle_indices]
else:
shuffled_data = data
data_len = [len(x[1]) for x in shuffled_data]
for batch_num in range(num_batches):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
if start_index < end_index:
yield shuffled_data[start_index:end_index]
def gene_pad_seq(self, batches):
seq_len = self.sequence_length
batch_seq_len = []
batch_pad = []
for b in batches:
if len(b) < seq_len:
feature = b + [0 for i in range(seq_len - len(b))]
batch_seq_len.append(len(b))
else:
feature = b[:seq_len]
batch_seq_len.append(seq_len)
batch_pad.append(feature)
return batch_pad, batch_seq_len
def load_word_vec(self, dic, vec_path, add_len):
with open(vec_path) as f:
for line in f:
ll = line.strip().split(" ")
if len(ll) < 129 or ll[0] == "</s>":
continue
word = int(ll[0])
embedding = [float(x) for x in ll[1:]]
dic[word + add_len] = embedding
def load_cross_data(self):
print("Loading data...")
target_dic_len = self.load_dic_len(self.target_dic_path)
print("target dic " + self.target_dic_path + " len: " + str(target_dic_len))
source_dic_len = self.load_dic_len(self.source_dic_path)
print("source dic " + self.source_dic_path + " len: " + str(source_dic_len))
self.vocab_size = target_dic_len + source_dic_len + 1
self.target_label, self.target_feature = self.load_data(self.target_path, source_dic_len)
print("target_path: " + self.target_path + " || len: " + str(len(self.target_label)))
source_label, source_feature = self.load_data(self.source_path)
print("source_path: " + self.source_path + " || len: " + str(len(source_label)))
self.target_vec_dic = {}
self.target_vec_dic[0] = [0 for x in range(self.embedding_dim)]
self.load_word_vec(self.target_vec_dic, self.word_vec_source, 1)
self.load_word_vec(self.target_vec_dic, self.word_vec_target, 1 + source_dic_len)
self.target_vec_dic = [x for _, x in list(sorted(self.target_vec_dic.items(), key = lambda x:x[0]))]
self.transform_dic = self.load_transfer(self.transfer_path, source_dic_len)
target_train_feature, self.target_test_feature, target_train_label, self.target_test_label = \
self.shuffle_data(self.target_label, self.target_feature, self.part)
self.source_train_feature, source_train_label = self.shuffle_data(source_label, source_feature)
print("Target Train/Test split: {:d}/{:d}".format(len(target_train_feature), len(self.target_test_feature)))
print("Source: {:d}".format(len(self.source_train_feature)))
self.all_batches = []
for i in range(self.num_epochs):
if self.part >= 1:
part_epochs = self.part
else:
part_epochs = 1
batches = []
for j in range(part_epochs):
arr_1 = [1 for k in range(len(target_train_label))]
arr_0 = [0 for k in range(len(target_train_label))]
target_batches = self.batch_iter(list(zip(target_train_label, target_train_feature, arr_1, arr_0)), self.batch_size)
batches += target_batches
source_batches = self.batch_iter(list(zip(source_train_label, self.source_train_feature, arr_0, arr_1)), self.batch_size)
batches += source_batches
if self.random_train:
random.seed(self._seed)
random.shuffle(batches)
self.all_batches += batches
def cross_training(self):
with tf.Graph().as_default():
session_conf = tf.ConfigProto(allow_soft_placement = True, log_device_placement = False)#, intra_op_parallelism_threads = 24)
sess = tf.Session(config=session_conf)
with sess.as_default():
if self.model == "cnn":
#print(len(self.trans_w))
#print(self.trans_w[0])
#print(len(self.tar_dic))
#print(self.tar_dic[0])
#print(self.wei_dic[:20])
print("model cross cnn")
cross_model = cross_cnn(
target_vec_dic = self.target_vec_dic,
sequence_length = self.sequence_length,
num_classes = len(self.emotion_list),
vocab_size = self.vocab_size,
embedding_size = self.embedding_dim,
filter_sizes = self.filter_sizes,
num_filters = self.num_filters,
l2_reg_lambda = self.l2_reg_lambda,
trans_w = self.trans_w,
trans_b = self.trans_b,
sour_dic = self.sour_dic,
tar_dic = self.tar_dic,
wei_dic = self.wei_dic)
elif self.model == "bi_lstm":
print("model cross bi_lstm")
cross_model = cross_bi_lstm(
target_vec_dic = self.target_vec_dic,
sequence_length = self.sequence_length,
num_classes = len(self.emotion_list),
vocab_size = self.vocab_size,
embedding_size = self.embedding_dim,
filter_sizes = self.filter_sizes,
num_filters = self.num_filters,
dropout_keep_prob = self.dropout_keep_prob,
l2_reg_lambda = self.l2_reg_lambda,
trans_w = self.trans_w,
trans_b = self.trans_b,
sour_dic = self.sour_dic,
tar_dic = self.tar_dic,
wei_dic = self.wei_dic,
vec_type = self.vec_type)
else:
pass
# Define Training procedure
global_step = tf.Variable(0, name="global_step", trainable=False)
optimizer = tf.train.AdamOptimizer(learning_rate = cross_model.learning_rate)
grads_and_vars = optimizer.compute_gradients(cross_model.loss)
train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)
sess.run(tf.global_variables_initializer())
max_accu = 0
#training and test
target_test_feature_pad, target_test_feature_seq_len = self.gene_pad_seq(self.target_test_feature)
for batch in self.all_batches:
y_batch, x_batch, f_cn, f_en = zip(*batch)
x_batch_pad, x_batch_seq_len = self.gene_pad_seq(x_batch)
# train
current_step = tf.train.global_step(sess, global_step)
if current_step < self.pre_step:
rate = self.op_step
else:
rate = self.op_step / 2.0
feed_dict = {
cross_model.seq_len: x_batch_seq_len,
cross_model.input_f_en: f_en[0],
cross_model.input_f_cn: f_cn[0],
cross_model.input_trans_en: self.sour_dic,
cross_model.input_trans_cn: self.tar_dic,
cross_model.input_x: x_batch_pad,
cross_model.input_y: y_batch,
cross_model.dropout_keep_prob: self.dropout_keep_prob,
cross_model.cn_weight: 1,
cross_model.en_weight: 1,
cross_model.trans_weight: 1e-3,
cross_model.learning_rate: rate
}
_, step, loss, kl, accuracy = sess.run(
[train_op, global_step, cross_model.loss, cross_model.kl, cross_model.accuracy],
feed_dict)
time_str = datetime.now().isoformat()
#print("train {}: step {}, loss {:g}, kl {:g}, acc {:g}".format(time_str, step, loss, kl, accuracy))
# test
#current_step = tf.train.global_step(sess, global_step)
if current_step > self.max_iter:
break
if current_step % self.evaluate_every == 0:
feed_dict = {
cross_model.seq_len: target_test_feature_seq_len,
cross_model.input_f_en: 0,
cross_model.input_f_cn: 1,
cross_model.input_trans_en: [0],
cross_model.input_trans_cn: [0],
cross_model.input_x: target_test_feature_pad,
cross_model.input_y: self.target_test_label,
cross_model.dropout_keep_prob: 1,
cross_model.cn_weight: 1,
cross_model.en_weight: 0,
cross_model.trans_weight: 0,
cross_model.learning_rate: rate}
step, loss, kl, accuracy = sess.run(
[global_step, cross_model.loss, cross_model.kl, cross_model.accuracy],
feed_dict)
if accuracy > max_accu:
max_accu = accuracy
time_str = datetime.now().isoformat()
print("eval {}: step {}, loss {:g}, kl {:g}, acc {:g}, max_accu {:g}".format(time_str, step, loss, kl, accuracy, max_accu))
#print("eval {}: loss {:g}".format(time_str, loss))
return max_accu
def load_single_data(self):
print("Loading data...")
target_dic_len = self.load_dic_len(self.target_dic_path)
print("target dic " + self.target_dic_path + " len: " + str(target_dic_len))
self.vocab_size = target_dic_len + 1
self.target_vec_dic = {}
self.target_vec_dic[0] = [0 for x in range(self.embedding_dim)]
self.load_word_vec(self.target_vec_dic, self.word_vec_target, 1)
self.target_vec_dic = [x for _, x in list(sorted(self.target_vec_dic.items(), key = lambda x:x[0]))]
#print(len(self.target_vec_dic))
#print(len(self.target_vec_dic[0]))
target_label, target_feature = self.load_data(self.target_path)
target_train_feature, self.target_test_feature, target_train_label, self.target_test_label = \
self.shuffle_data(target_label, target_feature, self.part)
print("Target Train/Test split: {:d}/{:d}".format(len(target_train_feature), len(self.target_test_feature)))
self.all_batches = []
for i in range(self.num_epochs):
batches = []
if self.part >= 1:
part_epochs = 4#self.part
else:
part_epochs = 1
for j in range(part_epochs):
target_batches = self.batch_iter(list(zip(target_train_label, target_train_feature)), self.batch_size)
batches += target_batches
if self.random_train:
random.seed(self._seed)
random.shuffle(batches)
self.all_batches += batches
def single_training(self):
with tf.Graph().as_default():
session_conf = tf.ConfigProto(allow_soft_placement = True, log_device_placement = False)
sess = tf.Session(config = session_conf)
with sess.as_default():
if self.model == "cnn":
single_model = single_cnn(
target_vec_dic = self.target_vec_dic,
sequence_length = self.sequence_length,
num_classes = len(self.emotion_list),
vocab_size = self.vocab_size,
embedding_size = self.embedding_dim,
filter_sizes = self.filter_sizes,
num_filters = self.num_filters,
l2_reg_lambda = self.l2_reg_lambda)
elif self.model == "bi_lstm":
single_model = single_bi_lstm(
target_vec_dic = self.target_vec_dic,
sequence_length = self.sequence_length,
num_classes = len(self.emotion_list),
vocab_size = self.vocab_size,
embedding_size = self.embedding_dim,
filter_sizes = self.filter_sizes,
num_filters = self.num_filters,
l2_reg_lambda = self.l2_reg_lambda,
dropout_keep_prob = 0.5)
elif self.model == "cnn_lstm":
single_model = single_cnn_lstm(
target_vec_dic = self.target_vec_dic,
sequence_length = self.sequence_length,
num_classes = len(self.emotion_list),
vocab_size = self.vocab_size,
embedding_size = self.embedding_dim,
filter_sizes = self.filter_sizes,
num_filters = self.num_filters,
l2_reg_lambda = self.l2_reg_lambda,
dropout_keep_prob = 0.5)
else:
pass
# Define Training procedure
global_step = tf.Variable(0, name="global_step", trainable=False)
optimizer = tf.train.AdamOptimizer(self.op_step)
grads_and_vars = optimizer.compute_gradients(single_model.loss)
train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)
sess.run(tf.global_variables_initializer())
max_accu = 0
#training and test
for batch in self.all_batches:
y_batch, x_batch = zip(*batch)
x_batch_pad, x_batch_seq_len = self.gene_pad_seq(x_batch)
feed_dict = {
single_model.input_x: x_batch_pad,
single_model.input_y: y_batch,
single_model.seq_len: x_batch_seq_len,
single_model.batch_size: len(y_batch),
single_model.dropout_keep_prob: 0.5,
}
_, step, loss, kl, accuracy = sess.run(
[train_op, global_step, single_model.loss, single_model.kl, single_model.accuracy],
feed_dict)
time_str = datetime.now().isoformat()
#print("train {}: step {}, loss {:g}, kl {:g}, acc {:g}".format(time_str, step, loss, kl, accuracy))
# test
current_step = tf.train.global_step(sess, global_step)
if current_step > self.max_iter:
break
if current_step % self.evaluate_every == 0:
target_test_feature_pad, target_test_feature_seq_len = self.gene_pad_seq(self.target_test_feature)
feed_dict = {
single_model.input_x: target_test_feature_pad,
single_model.input_y: self.target_test_label,
single_model.seq_len: target_test_feature_seq_len,
single_model.batch_size: len(y_batch),
single_model.dropout_keep_prob: 1,
}
step, loss, kl, accuracy = sess.run(
[global_step, single_model.loss, single_model.kl, single_model.accuracy],
feed_dict)
if accuracy > max_accu:
max_accu = accuracy
time_str = datetime.now().isoformat()
print("eval {}: step {}, loss {:g}, kl {:g}, acc {:g}, max_accu {:g}".format(time_str, step, loss, kl, accuracy, max_accu))
return max_accu
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._ip_firewall_rules_operations import build_create_or_update_request_initial, build_delete_request_initial, build_get_request, build_list_by_workspace_request, build_replace_all_request_initial
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class IpFirewallRulesOperations:
"""IpFirewallRulesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.synapse.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list_by_workspace(
self,
resource_group_name: str,
workspace_name: str,
**kwargs: Any
) -> AsyncIterable["_models.IpFirewallRuleInfoListResult"]:
"""Returns a list of firewall rules.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either IpFirewallRuleInfoListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.synapse.models.IpFirewallRuleInfoListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IpFirewallRuleInfoListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_workspace_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
template_url=self.list_by_workspace.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_workspace_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("IpFirewallRuleInfoListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_workspace.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/firewallRules'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
workspace_name: str,
rule_name: str,
ip_firewall_rule_info: "_models.IpFirewallRuleInfo",
**kwargs: Any
) -> Optional["_models.IpFirewallRuleInfo"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.IpFirewallRuleInfo"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(ip_firewall_rule_info, 'IpFirewallRuleInfo')
request = build_create_or_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
rule_name=rule_name,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('IpFirewallRuleInfo', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('IpFirewallRuleInfo', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/firewallRules/{ruleName}'} # type: ignore
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
workspace_name: str,
rule_name: str,
ip_firewall_rule_info: "_models.IpFirewallRuleInfo",
**kwargs: Any
) -> AsyncLROPoller["_models.IpFirewallRuleInfo"]:
"""Creates or updates a firewall rule.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param rule_name: The IP firewall rule name.
:type rule_name: str
:param ip_firewall_rule_info: IP firewall rule properties.
:type ip_firewall_rule_info: ~azure.mgmt.synapse.models.IpFirewallRuleInfo
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either IpFirewallRuleInfo or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.synapse.models.IpFirewallRuleInfo]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.IpFirewallRuleInfo"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
rule_name=rule_name,
ip_firewall_rule_info=ip_firewall_rule_info,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('IpFirewallRuleInfo', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/firewallRules/{ruleName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
workspace_name: str,
rule_name: str,
**kwargs: Any
) -> Optional[Any]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional[Any]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
rule_name=rule_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('object', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/firewallRules/{ruleName}'} # type: ignore
@distributed_trace_async
async def begin_delete(
self,
resource_group_name: str,
workspace_name: str,
rule_name: str,
**kwargs: Any
) -> AsyncLROPoller[Any]:
"""Deletes a firewall rule.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param rule_name: The IP firewall rule name.
:type rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either any or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[any]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[Any]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
rule_name=rule_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('object', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/firewallRules/{ruleName}'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
workspace_name: str,
rule_name: str,
**kwargs: Any
) -> "_models.IpFirewallRuleInfo":
"""Get a firewall rule.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param rule_name: The IP firewall rule name.
:type rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IpFirewallRuleInfo, or the result of cls(response)
:rtype: ~azure.mgmt.synapse.models.IpFirewallRuleInfo
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IpFirewallRuleInfo"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
rule_name=rule_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('IpFirewallRuleInfo', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/firewallRules/{ruleName}'} # type: ignore
async def _replace_all_initial(
self,
resource_group_name: str,
workspace_name: str,
request: "_models.ReplaceAllIpFirewallRulesRequest",
**kwargs: Any
) -> Optional["_models.ReplaceAllFirewallRulesOperationResponse"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ReplaceAllFirewallRulesOperationResponse"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(request, 'ReplaceAllIpFirewallRulesRequest')
request = build_replace_all_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
content_type=content_type,
json=_json,
template_url=self._replace_all_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ReplaceAllFirewallRulesOperationResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_replace_all_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/replaceAllIpFirewallRules'} # type: ignore
@distributed_trace_async
async def begin_replace_all(
self,
resource_group_name: str,
workspace_name: str,
request: "_models.ReplaceAllIpFirewallRulesRequest",
**kwargs: Any
) -> AsyncLROPoller["_models.ReplaceAllFirewallRulesOperationResponse"]:
"""Replaces firewall rules.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param request: Replace all IP firewall rules request.
:type request: ~azure.mgmt.synapse.models.ReplaceAllIpFirewallRulesRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either
ReplaceAllFirewallRulesOperationResponse or the result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.synapse.models.ReplaceAllFirewallRulesOperationResponse]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ReplaceAllFirewallRulesOperationResponse"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._replace_all_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
request=request,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('ReplaceAllFirewallRulesOperationResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_replace_all.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/replaceAllIpFirewallRules'} # type: ignore
|
|
import warnings
from contextlib import contextmanager
from copy import copy
from django.utils.deprecation import RemovedInDjango20Warning
# Hard-coded processor for easier use of CSRF protection.
_builtin_context_processors = ('django.template.context_processors.csrf',)
_current_app_undefined = object()
class ContextPopException(Exception):
"pop() has been called more times than push()"
pass
class ContextDict(dict):
def __init__(self, context, *args, **kwargs):
super(ContextDict, self).__init__(*args, **kwargs)
context.dicts.append(self)
self.context = context
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
self.context.pop()
class BaseContext(object):
def __init__(self, dict_=None):
self._reset_dicts(dict_)
def _reset_dicts(self, value=None):
builtins = {'True': True, 'False': False, 'None': None}
self.dicts = [builtins]
if value is not None:
self.dicts.append(value)
def __copy__(self):
duplicate = copy(super(BaseContext, self))
duplicate.dicts = self.dicts[:]
return duplicate
def __repr__(self):
return repr(self.dicts)
def __iter__(self):
for d in reversed(self.dicts):
yield d
def push(self, *args, **kwargs):
return ContextDict(self, *args, **kwargs)
def pop(self):
if len(self.dicts) == 1:
raise ContextPopException
return self.dicts.pop()
def __setitem__(self, key, value):
"Set a variable in the current context"
self.dicts[-1][key] = value
def __getitem__(self, key):
"Get a variable's value, starting at the current context and going upward"
for d in reversed(self.dicts):
if key in d:
return d[key]
raise KeyError(key)
def __delitem__(self, key):
"Delete a variable from the current context"
del self.dicts[-1][key]
def has_key(self, key):
for d in self.dicts:
if key in d:
return True
return False
def __contains__(self, key):
return self.has_key(key)
def get(self, key, otherwise=None):
for d in reversed(self.dicts):
if key in d:
return d[key]
return otherwise
def setdefault(self, key, default=None):
try:
return self[key]
except KeyError:
self[key] = default
return default
def new(self, values=None):
"""
Returns a new context with the same properties, but with only the
values given in 'values' stored.
"""
new_context = copy(self)
new_context._reset_dicts(values)
return new_context
def flatten(self):
"""
Returns self.dicts as one dictionary
"""
flat = {}
for d in self.dicts:
flat.update(d)
return flat
def __eq__(self, other):
"""
Compares two contexts by comparing theirs 'dicts' attributes.
"""
if isinstance(other, BaseContext):
# because dictionaries can be put in different order
# we have to flatten them like in templates
return self.flatten() == other.flatten()
# if it's not comparable return false
return False
class Context(BaseContext):
"A stack container for variable context"
def __init__(self, dict_=None, autoescape=True,
current_app=_current_app_undefined,
use_l10n=None, use_tz=None):
if current_app is not _current_app_undefined:
warnings.warn(
"The current_app argument of Context is deprecated. Use "
"RequestContext and set the current_app attribute of its "
"request instead.", RemovedInDjango20Warning, stacklevel=2)
self.autoescape = autoescape
self._current_app = current_app
self.use_l10n = use_l10n
self.use_tz = use_tz
self.template_name = "unknown"
self.render_context = RenderContext()
# Set to the original template -- as opposed to extended or included
# templates -- during rendering, see bind_template.
self.template = None
super(Context, self).__init__(dict_)
@property
def current_app(self):
return None if self._current_app is _current_app_undefined else self._current_app
@contextmanager
def bind_template(self, template):
if self.template is not None:
raise RuntimeError("Context is already bound to a template")
self.template = template
try:
yield
finally:
self.template = None
def __copy__(self):
duplicate = super(Context, self).__copy__()
duplicate.render_context = copy(self.render_context)
return duplicate
def update(self, other_dict):
"Pushes other_dict to the stack of dictionaries in the Context"
if not hasattr(other_dict, '__getitem__'):
raise TypeError('other_dict must be a mapping (dictionary-like) object.')
return ContextDict(self, other_dict)
class RenderContext(BaseContext):
"""
A stack container for storing Template state.
RenderContext simplifies the implementation of template Nodes by providing a
safe place to store state between invocations of a node's `render` method.
The RenderContext also provides scoping rules that are more sensible for
'template local' variables. The render context stack is pushed before each
template is rendered, creating a fresh scope with nothing in it. Name
resolution fails if a variable is not found at the top of the RequestContext
stack. Thus, variables are local to a specific template and don't affect the
rendering of other templates as they would if they were stored in the normal
template context.
"""
def __iter__(self):
for d in self.dicts[-1]:
yield d
def has_key(self, key):
return key in self.dicts[-1]
def get(self, key, otherwise=None):
return self.dicts[-1].get(key, otherwise)
def __getitem__(self, key):
return self.dicts[-1][key]
class RequestContext(Context):
"""
This subclass of template.Context automatically populates itself using
the processors defined in the engine's configuration.
Additional processors can be specified as a list of callables
using the "processors" keyword argument.
"""
def __init__(self, request, dict_=None, processors=None,
current_app=_current_app_undefined,
use_l10n=None, use_tz=None):
# current_app isn't passed here to avoid triggering the deprecation
# warning in Context.__init__.
super(RequestContext, self).__init__(
dict_, use_l10n=use_l10n, use_tz=use_tz)
if current_app is not _current_app_undefined:
warnings.warn(
"The current_app argument of RequestContext is deprecated. "
"Set the current_app attribute of its request instead.",
RemovedInDjango20Warning, stacklevel=2)
self._current_app = current_app
self.request = request
self._processors = () if processors is None else tuple(processors)
self._processors_index = len(self.dicts)
self.update({}) # placeholder for context processors output
@contextmanager
def bind_template(self, template):
if self.template is not None:
raise RuntimeError("Context is already bound to a template")
self.template = template
# Set context processors according to the template engine's settings.
processors = (template.engine.template_context_processors +
self._processors)
updates = {}
for processor in processors:
updates.update(processor(self.request))
self.dicts[self._processors_index] = updates
try:
yield
finally:
self.template = None
# Unset context processors.
self.dicts[self._processors_index] = {}
def new(self, values=None):
new_context = super(RequestContext, self).new(values)
# This is for backwards-compatibility: RequestContexts created via
# Context.new don't include values from context processors.
if hasattr(new_context, '_processors_index'):
del new_context._processors_index
return new_context
def make_context(context, request=None):
"""
Create a suitable Context from a plain dict and optionally an HttpRequest.
"""
if request is None:
context = Context(context)
else:
# The following pattern is required to ensure values from
# context override those from template context processors.
original_context = context
context = RequestContext(request)
if original_context:
context.push(original_context)
return context
|
|
import math
from time import gmtime, strftime
import os
import shutil
import random
import string
from os import listdir
import StringIO
import gzip
from os.path import exists,isfile, join
import socket
import sys
from utils.rsacrypt import RSACrypt
from utils.colors import bcolors
from utils.colors import update_progress
from client_config import client_conf
from Crypto.Hash import MD5
conf = client_conf()
class ServerConnection:
def __init__(self, server_address = conf.c['server_address'], server_port = int(conf.c['server_port'])):
self.serversocket = socket.socket(
socket.AF_INET, socket.SOCK_STREAM)
self.server_address = server_address
self.server_port = server_port
def connect(self, do_login = True):
try:
self.serversocket.connect((self.server_address, self.server_port))
except socket.error, (value,message):
if self.serversocket:
self.serversocket.close()
print bcolors.FAIL + "Could not connect to server (%s:%s): " %(self.server_address, self.server_port) + message + bcolors.ENDC
self.connected = False
return False
try:
kf = open(conf.c['server_public_rsa'])
self.server_public_key = kf.read()
kf.close()
kf = open(conf.c['client_public_rsa'])
self.my_public_key = kf.read()
kf.close()
kf = open(conf.c['client_private_rsa'])
self.my_private_key = kf.read()
kf.close()
except:
print bcolors.WARNING + "Error loading key files." + bcolors.ENDC
self.connected = True
# Don't wait more than 15 seconds for the server.
self.serversocket.settimeout(15)
print bcolors.OKBLUE + strftime("%Y-%m-%d %H:%M:%S") + ": Server connection successful." + bcolors.ENDC
if do_login:
self.logged_in = self.login()
else:
self.logged_in = False
self.connected = True
return True
def disconnect(self):
if not self.connected:
return True
if self.serversocket:
print bcolors.WARNING + strftime("%Y-%m-%d %H:%M:%S") + ": Closing connection to the server." + bcolors.ENDC
try:
#no need to authenticate when closing...
self.send_dyn("unauthorized")
self.send_fixed("x")
except:
pass
self.serversocket.close()
def send_fixed(self, data):
if not self.connected:
print bcolors.FAIL + "Server not connected!" + bcolors.ENDC
raise Exception("Not connected.")
return False
try:
sent = self.serversocket.send(data)
except socket.error, (value,message):
if self.serversocket:
self.serversocket.close()
raise Exception("Could not send data to server (%s:%s): " %(self.server_address, self.server_port) + message)
return False
#print "Sent %d bytes to the server." %(sent)
return True
def send_dyn(self, data):
if not self.connected:
print bcolors.FAIL + "Server not connected!" + bcolors.ENDC
return False
self.send_fixed(str(len(data)).zfill(10))
self.send_fixed(data)
def receive_fixed(self, message_len):
if not self.connected:
print bcolors.FAIL + "Server not connected!" + bcolors.ENDC
raise Exception("Not connected.")
return False
chunks = []
bytes_recd = 0
while bytes_recd < message_len:
chunk = self.serversocket.recv(min(message_len - bytes_recd, 2048))
if chunk == '':
raise Exception("Socket connection broken (%s:%s): " %(self.server_address, self.server_port))
chunks.append(chunk)
bytes_recd = bytes_recd + len(chunk)
#print ''.join(chunks)
return ''.join(chunks)
def receive_dyn(self):
msg_size = self.receive_fixed(10)
msg = self.receive_fixed(int(msg_size))
return msg
def receive_crypt(self, decryption_key, show_progress=True):
crypt = RSACrypt()
crypt.import_public_key(decryption_key)
chunk_count = int(self.receive_dyn())
received_digest = self.receive_dyn()
org = chunk_count
chunk_size = 256
decrypted_results = ""
if show_progress:
print bcolors.OKBLUE + "Progress: "
while chunk_count > 0:
encrypted_chunk = self.receive_dyn()
decrypted_results = decrypted_results + crypt.public_key_decrypt(encrypted_chunk)
chunk_count = chunk_count - 1
if show_progress:
update_progress( int(100 * float(org - chunk_count) / float(org)) )
if show_progress:
print bcolors.ENDC
calculated_digest = MD5.new(decrypted_results).digest()
if calculated_digest == received_digest:
return decrypted_results
else:
print bcolors.FAIL + "Data integrity check failed." + bcolors.ENDC
return False
def send_crypt(self, data, encryption_key):
crypt = RSACrypt()
crypt.import_public_key(encryption_key)
chunk_size = 256
chunk_count = int(math.ceil(len(data) / float(chunk_size)))
digest = MD5.new(data).digest()
self.send_dyn(str(chunk_count))
self.send_dyn(digest)
ch = 0
bytes_encrypted = 0
encrypted_data = ""
while bytes_encrypted < len(data):
ch = ch + 1
encrypted_chunk = crypt.public_key_encrypt(data[bytes_encrypted:min(bytes_encrypted+chunk_size, len(data))])
bytes_encrypted = bytes_encrypted + chunk_size
self.send_dyn(encrypted_chunk[0])
def sync_results(self):
successful = 0
total = 0
if not os.path.exists(conf.c['results_archive_dir']):
print "Creating results directory in %s" % (conf.c['results_archive_dir'])
os.makedirs(conf.c['results_archive_dir'])
for result_name in listdir(conf.c['results_dir']):
if isfile(join(conf.c['results_dir'],result_name)):
print bcolors.OKBLUE + "Submitting \"" + result_name + "\"..." + bcolors.ENDC
total = total + 1
if self.submit_results(result_name, join(conf.c['results_dir'],result_name)):
try:
shutil.move(os.path.join(conf.c['results_dir'], result_name), os.path.join(conf.c['results_archive_dir'], result_name))
print bcolors.OKBLUE + "Moved \"" + result_name + "\" to the archive." + bcolors.ENDC
except:
print bcolors.FAIL + "There was an error while moving \"" + result_name + "\" to the archive. This will be re-sent the next time!" + bcolors.ENDC
successful = successful + 1
else:
print bcolors.FAIL + "There was an error while sending \"" + result_name + "\". Will retry later." + bcolors.ENDC
print bcolors.OKBLUE + "Sync complete (%d/%d were successful)." %(successful, total) + bcolors.ENDC
def login(self):
try:
self.send_dyn(conf.c['client_tag'])
if conf.c['client_tag'] <> "unauthorized":
received_token = self.receive_crypt(self.my_private_key, show_progress=False)
self.send_crypt(received_token, self.server_public_key)
server_response = self.receive_fixed(1)
except Exception:
print bcolors.FAIL + "Can't log in: " + bcolors.ENDC, sys.exc_info()[0]
return False
if server_response == "a":
print bcolors.OKGREEN + "Authentication successful." + bcolors.ENDC
elif server_response == "e":
try:
error_message = self.receive_dyn()
print bcolors.FAIL + "Authentication error: " + error_message + bcolors.ENDC
except Exception:
print bcolors.FAIL + "Authentication error (could not receive error details from the server)." + bcolors.ENDC
return False
else:
print bcolors.FAIL + "Unknown server response \"" + server_response + "\"" + bcolors.ENDC
return False
return True
def submit_results(self, name, results_file_path):
if not self.connected:
print bcolors.FAIL + "Server not connected!" + bcolors.ENDC
return False
if conf.c['client_tag'] == 'unauthorized':
print bcolors.FAIL + "Client not authorized to send results." + bcolors.ENDC
return False
if not self.logged_in:
print bcolors.FAIL + "Client not logged in." + bcolors.ENDC
return False
try:
self.send_fixed("r")
server_response = self.receive_fixed(1)
except Exception:
print bcolors.FAIL + "Can't submit results." + bcolors.ENDC
return False
if server_response == "a":
print bcolors.OKGREEN + "Server ack received." + bcolors.ENDC
elif server_response == "e":
try:
error_message = self.receive_dyn()
print bcolors.FAIL + "Server error: " + error_message + bcolors.ENDC
except Exception:
print bcolors.FAIL + "Server error (could not receive error details from the server)." + bcolors.ENDC
return False
else:
print bcolors.FAIL + "Unknown server response \"" + server_response + "\"" + bcolors.ENDC
return False
try:
try:
data_file = open(results_file_path, 'r')
except:
print bcolors.FAIL + "Can not open results file!" + bcolors.ENDC
return False
self.send_dyn(name)
data = data_file.read()
self.send_crypt(data, self.server_public_key)
server_response = self.receive_fixed(1)
except Exception:
print bcolors.FAIL + "Error sending data to server." + bcolors.ENDC
return False
if server_response == "e":
try:
error_message = self.receive_dyn()
print bcolors.FAIL + "Error sending data to server: " + error_message + bcolors.ENDC
return False
except:
print bcolors.FAIL + "Server error (could not receive error details from the server)." + bcolors.ENDC
return False
elif server_response == "c":
print bcolors.OKGREEN + "Data successfully sent." + bcolors.ENDC
return True
else:
print bcolors.FAIL + "Unknown server response \"" + server_response + "\"" + bcolors.ENDC
return False
return True
def initialize_client(self):
try:
self.send_dyn("unauthorized")
self.receive_fixed(1)
self.send_fixed("i")
server_response = self.receive_fixed(1)
except Exception:
print bcolors.FAIL + "Can\'t initialize." + bcolors.ENDC
return False
if server_response == "a":
print bcolors.OKGREEN + "Server ack received." + bcolors.ENDC
elif server_response == "e":
try:
error_message = self.receive_dyn()
print bcolors.FAIL + "Server error: " + error_message + bcolors.ENDC
except Exception:
print bcolors.FAIL + "Server error (could not receive error details from the server)." + bcolors.ENDC
return False
else:
print bcolors.FAIL + "Unknown server response \"" + server_response + "\"" + bcolors.ENDC
return False
new_identity = self.receive_dyn() #identities are usually of length 5
crypt = RSACrypt()
my_public_key = crypt.public_key_string()
self.server_public_key = self.receive_dyn()
self.send_crypt(my_public_key, self.server_public_key)
server_response = self.receive_fixed(1)
pkf = open(conf.c['client_public_rsa'], "w")
pkf.write(crypt.public_key_string())
pkf.close()
pkf = open(conf.c['client_private_rsa'], "w")
pkf.write(crypt.private_key_string())
pkf.close()
pkf = open(conf.c['server_public_rsa'], "w")
pkf.write(self.server_public_key)
pkf.close()
pkf = open(conf.c['config_file'], "w")
pkf.write("[CentinelClient]\n")
pkf.write("client_tag="+new_identity)
pkf.close()
conf.c['client_tag'] = new_identity
if server_response == "c":
print bcolors.OKGREEN + "Server key negotiation and handshake successful. New tag: " + new_identity + bcolors.ENDC
elif server_response == "e":
try:
error_message = self.receive_dyn()
print bcolors.FAIL + "Server error: " + error_message + bcolors.ENDC
except Exception:
print bcolors.FAIL + "Server error (could not receive error details from the server)." + bcolors.ENDC
return False
else:
print bcolors.FAIL + "Unknown server response \"" + server_response + "\"" + bcolors.ENDC
return False
def beat(self):
if not self.connected:
print bcolors.FAIL + "Not connected to the server." + bcolors.ENDC
return False
if not self.logged_in:
print bcolors.FAIL + "Unauthorized hearts don't beat! " + bcolors.ENDC
return False
self.send_fixed('b')
server_response = self.receive_fixed(1)
if server_response == 'b':
return "beat"
elif server_response == 'c':
return self.receive_crypt(self.my_private_key)
else:
return False
def sync_exp(self):
if not self.logged_in:
print bcolors.FAIL + "Unauthorized hearts don't beat! " + bcolors.ENDC
return False
#self.send_fixed("s")
#py_exp_pairs = [ MD5.new(data).digest()
|
|
import astropy.cosmology as co
aa=co.Planck15
import astropy.io.fits as fits
import matplotlib.pyplot as p
import numpy as n
import os
import sys
# global cosmo quantities
z_min = float(sys.argv[1])
z_max = float(sys.argv[2])
#imf = 'kroupa'
#lO2_min = float(sys.argv[3]) # 'salpeter'
SNlimit = 5
smf_ilbert13 = lambda M, M_star, phi_1s, alpha_1s, phi_2s, alpha_2s : ( phi_1s * (M/M_star) ** alpha_1s + phi_2s * (M/M_star) ** alpha_2s ) * n.e ** (-M/M_star) * (M/ M_star)
ff_dir = os.path.join(os.environ['DATA_DIR'], 'spm', 'firefly')
ll_dir = os.path.join(os.environ['DATA_DIR'], 'spm', 'literature')
co_dir = os.path.join(os.environ['DATA_DIR'], 'COSMOS' )
sdss_dir = os.path.join(os.environ['DATA_DIR'], 'SDSS')
spiders_dir = os.path.join(os.environ['DATA_DIR'], 'spiders')
out_dir = os.path.join(os.environ['DATA_DIR'], 'spm', 'results')
path_2_cosmos_cat = os.path.join( co_dir, "photoz_vers2.0_010312.fits")
#path_2_sdss_cat = os.path.join( ff_dir, "FireflyGalaxySdssDR14.fits" )
#path_2_eboss_cat = os.path.join( ff_dir, "FireflyGalaxyEbossDR14.fits" )
path_2_spall_sdss_dr12_cat = os.path.join( sdss_dir, "specObj-SDSS-dr12.fits" )
path_2_spall_sdss_dr14_cat = os.path.join( sdss_dir, "specObj-SDSS-dr14.fits" )
path_2_spall_boss_dr12_cat = os.path.join( sdss_dir, "specObj-BOSS-dr12.fits" )
path_2_spall_boss_dr14_cat = os.path.join( sdss_dir, "specObj-BOSS-dr14.fits" )
path_2_spall_spiders_dr14_cat = os.path.join( spiders_dir, "cluster_statistics_2016-11-08-DR14_spm.fits" )
#print "SDSS spAll DR14", len(fits.open(path_2_spall_sdss_dr14_cat)[1].data)
#print "BOSS spAll DR14",len(fits.open(path_2_spall_boss_dr14_cat)[1].data)
path_2_cosmos_cat = os.path.join( co_dir, "photoz_vers2.0_010312.fits")
path_2_vvdsW_cat = os.path.join( ff_dir, "VVDS_WIDE_summary.v1.spm.fits" )
path_2_vipers_cat = os.path.join( ff_dir, "VIPERS_W14_summary_v2.1.linesFitted.spm.fits" )
path_2_vvdsD_cat = os.path.join( ff_dir, "VVDS_DEEP_summary.v1.spm.fits" )
path_2_deep2_cat = os.path.join( ff_dir, "zcat.deep2.dr4.v4.LFcatalogTC.Planck15.spm.v2.fits" )
cosmos = fits.open(path_2_cosmos_cat)[1].data
deep2 = fits.open(path_2_deep2_cat)[1].data
vvdsD = fits.open(path_2_vvdsD_cat)[1].data
vvdsW = fits.open(path_2_vvdsW_cat)[1].data
vipers = fits.open(path_2_vipers_cat)[1].data
spiders = fits.open(path_2_spall_spiders_dr14_cat)[1].data
path_2_sdss_cat = os.path.join( ff_dir, "FireflyGalaxySdss26.fits" )
path_2_eboss_cat = os.path.join( ff_dir, "FireflyGalaxyEbossDR14.fits" )
path_2_pS_salpeter_cat = os.path.join( ll_dir, "portsmouth_stellarmass_starforming_salp-26.fits.gz" )
path_2_pB_salpeter_cat = os.path.join( ll_dir, "portsmouth_stellarmass_starforming_salp-DR12-boss.fits.gz" )
path_2_pS_kroupa_cat = os.path.join( ll_dir, "portsmouth_stellarmass_starforming_krou-26.fits.gz" )
path_2_pB_kroupa_cat = os.path.join( ll_dir, "portsmouth_stellarmass_starforming_krou-DR12-boss.fits.gz" )
path_2_ppS_kroupa_cat = os.path.join( ll_dir, "portsmouth_stellarmass_passive_krou-26.fits")
path_2_ppB_kroupa_cat = os.path.join( ll_dir, "portsmouth_stellarmass_passive_krou-DR12.fits")
path_2_F16_cat = os.path.join( sdss_dir, "RA_DEC_z_w_fluxOII_Mstar_grcol_Mr_lumOII.dat" )
RA, DEC, z, weigth, O2flux, M_star, gr_color, Mr_5logh, O2luminosity = n.loadtxt(path_2_F16_cat, unpack=True)
cosmos = fits.open(path_2_cosmos_cat)[1].data
sdss = fits.open(path_2_sdss_cat)[1].data
boss = fits.open(path_2_eboss_cat)[1].data
sdss_12_portSF_kr = fits.open(path_2_pS_kroupa_cat)[1].data
boss_12_portSF_kr = fits.open(path_2_pB_kroupa_cat)[1].data
sdss_12_portPA_kr = fits.open(path_2_ppS_kroupa_cat)[1].data
boss_12_portPA_kr = fits.open(path_2_ppB_kroupa_cat)[1].data
sdss_12_portSF_sa = fits.open(path_2_pS_salpeter_cat)[1].data
boss_12_portSF_sa = fits.open(path_2_pB_salpeter_cat)[1].data
path_ilbert13_SMF = os.path.join(ll_dir, "ilbert_2013_mass_function_params.txt")
zmin, zmax, N, M_comp, M_star, phi_1s, alpha_1s, phi_2s, alpha_2s, log_rho_s = n.loadtxt(os.path.join( ll_dir, "ilbert_2013_mass_function_params.txt"), unpack=True)
#smfs_ilbert13 = n.array([lambda mass : smf_ilbert13( mass , 10**M_star[ii], phi_1s[ii]*10**(-3), alpha_1s[ii], phi_2s[ii]*10**(-3), alpha_2s[ii] ) for ii in range(len(M_star)) ])
smf01 = lambda mass : smf_ilbert13( mass , 10**M_star[0], phi_1s[0]*10**(-3), alpha_1s[0], phi_2s[0]*10**(-3), alpha_2s[0] )
print 10**M_star[0], phi_1s[0]*10**(-3), alpha_1s[0], phi_2s[0]*10**(-3), alpha_2s[0]
volume_per_deg2 = ( aa.comoving_volume(z_max) - aa.comoving_volume(z_min) ) * n.pi / 129600.
volume_per_deg2_val = volume_per_deg2.value
# global spm quantities
# stat functions
ld = lambda selection : len(selection.nonzero()[0])
area_sdss = 7900.
area_boss = 10000.
area_cosmos = 1.52
def get_basic_stat_anyCat(catalog_name, z_name, z_err_name, name, zflg_val):
catalog = fits.open(catalog_name)[1].data
catalog_zOk =(catalog[z_err_name] > 0.) & (catalog[z_name] > catalog[z_err_name])
catalog_stat = (catalog_zOk) #& (catalog[z_name] > z_min) & (catalog[z_name] < z_max)
catalog_sel = (catalog_stat) & (catalog['LOGMASS'] < 14. ) & (catalog['LOGMASS'] > 0 ) & (catalog['MAXLOGMASS'] - catalog['MINLOGMASS'] <0.4) & (catalog['LOGMASS'] < catalog['MAXLOGMASS'] ) & (catalog['LOGMASS'] > catalog['MINLOGMASS'] )
m_catalog = catalog['LOGMASS']
w_catalog = n.ones_like(catalog['LOGMASS'])
print catalog_name, "& - & $", ld(catalog_zOk),"$ & $", ld(catalog_sel),"$ \\\\"
#return catalog_sel, m_catalog, w_catalog
def get_basic_stat_DR12(catalog, z_name, z_err_name, name, zflg_val):
catalog_zOk =(catalog[z_err_name] > 0.) & (catalog[z_name] > catalog[z_err_name])
catalog_stat = (catalog_zOk) & (catalog[z_name] > z_min) & (catalog[z_name] < z_max)
catalog_sel = (catalog_stat) & (catalog['LOGMASS'] < 14. ) & (catalog['LOGMASS'] > 0 ) & (catalog['MAXLOGMASS'] - catalog['MINLOGMASS'] <0.4) & (catalog['LOGMASS'] < catalog['MAXLOGMASS'] ) & (catalog['LOGMASS'] > catalog['MINLOGMASS'] )
m_catalog = catalog['LOGMASS']
w_catalog = n.ones_like(catalog['LOGMASS'])
print name, "& - & $", ld(catalog_zOk),"$ & $", ld(catalog_sel),"$ \\\\"
return catalog_sel, m_catalog, w_catalog
def get_basic_stat_DR14(catalog, z_name, z_err_name, class_name, zwarning, name, zflg_val, prefix):
catalog_zOk =(catalog[z_err_name] > 0.) & (catalog[z_name] > catalog[z_err_name]) & (catalog[class_name]=='GALAXY') & (catalog[zwarning]==zflg_val)
catalog_stat = (catalog_zOk) & (catalog[z_name] > z_min) & (catalog[z_name] < z_max)
catalog_sel = (catalog_stat) & (catalog[prefix+'stellar_mass'] < 10**14. ) & (catalog[prefix+'stellar_mass'] > 0 ) & (catalog[prefix+'stellar_mass'] > catalog[prefix+'stellar_mass_low'] ) & (catalog[prefix+'stellar_mass'] < catalog[prefix+'stellar_mass_up'] ) & ( - n.log10(catalog[prefix+'stellar_mass_low']) + n.log10(catalog[prefix+'stellar_mass_up']) < 0.4 )
m_catalog = n.log10(catalog[prefix+'stellar_mass'])
w_catalog = n.ones_like(catalog[prefix+'stellar_mass'])
print name, '& $',len(catalog), "$ & $", ld(catalog_zOk),"$ & $", ld(catalog_sel),"$ \\\\"
return catalog_sel, m_catalog, w_catalog
def get_hist(masses, weights, mbins):
NN = n.histogram(masses, mbins)[0]
NW = n.histogram(masses, mbins, weights = weights)[0]
xx = (mbins[1:] + mbins[:-1])/2.
return xx, NW, NN**(-0.5)*NW
dlog10m = 0.25
mbins = n.arange(8,12.5,dlog10m)
def plot_smf_b(IMF="Chabrier_ELODIE_"):
boss_sel, boss_m, boss_w = get_basic_stat_DR14(boss, 'Z_NOQSO', 'Z_ERR_NOQSO', 'CLASS_NOQSO', 'ZWARNING_NOQSO', IMF+' & BOSS & 14 ', 0., IMF)
x, y, ye = get_hist(boss_m[boss_sel], weights = boss_w[boss_sel]/(dlog10m*n.log(10)*area_boss*volume_per_deg2_val), mbins = mbins)
p.errorbar(x, y, yerr = ye, label=IMF+'BOSS', lw=1)
def plot_smf_s(IMF="Chabrier_ELODIE_"):
boss_sel, boss_m, boss_w = get_basic_stat_DR14(sdss, 'Z', 'Z_ERR', 'CLASS', 'ZWARNING', IMF+' & BOSS & 14 ', 0., IMF)
x, y, ye = get_hist(boss_m[boss_sel], weights = boss_w[boss_sel]/(dlog10m*n.log(10)*area_boss*volume_per_deg2_val), mbins = mbins)
p.errorbar(x, y, yerr = ye, label=IMF+'SDSS', lw=1)
def plot_smf_spiders(IMF="Chabrier_ELODIE_"):
boss_sel, boss_m, boss_w = get_basic_stat_DR14(spiders, 'Z', 'Z_ERR', 'CLASS', 'ZWARNING', IMF+' & BOSS & 14 ', 0., IMF)
x, y, ye = get_hist(boss_m[boss_sel], weights = boss_w[boss_sel]/(dlog10m*n.log(10)*area_boss*volume_per_deg2_val), mbins = mbins)
p.errorbar(x, y, yerr = ye, label=IMF+'SPIDERS', lw=1)
p.figure(1, (8,8))
p.plot(mbins, smf01(10**mbins), label='Ilbert 13, 0.2<z<0.5', ls='dashed')
plot_smf_b("Chabrier_ELODIE_")
plot_smf_b("Chabrier_MILES_")
plot_smf_b("Chabrier_STELIB_")
plot_smf_b("Kroupa_ELODIE_")
plot_smf_b("Kroupa_MILES_")
plot_smf_b("Kroupa_STELIB_")
plot_smf_b("Salpeter_ELODIE_")
plot_smf_b("Salpeter_MILES_")
plot_smf_b("Salpeter_STELIB_")
plot_smf_spiders("Chabrier_ELODIE_")
p.title(str(z_min)+'<z<'+str(z_max)+' BOSS+eBOSS')
p.xlabel(r"$\log_{10}$ (M / $M_\odot$ )")
p.ylabel(r'$\Phi(M)$ [Mpc$^{-3}$ dex$^{-1}$]')
p.yscale('log')
p.legend(loc=0, frameon = False)
p.ylim((1e-8, 1e-2))
p.xlim((9.5, 12.5))
p.grid()
p.savefig(os.path.join(out_dir, "firefly_SMFs_BOSS_"+str(z_min)+'_z_'+str(z_max)+".jpg" ))
p.clf()
p.figure(1, (8,8))
p.plot(mbins, smf01(10**mbins), label='Ilbert 13, 0.2<z<0.5', ls='dashed')
plot_smf_s("Chabrier_ELODIE_")
plot_smf_s("Chabrier_MILES_")
plot_smf_s("Chabrier_STELIB_")
plot_smf_s("Kroupa_ELODIE_")
plot_smf_s("Kroupa_MILES_")
plot_smf_s("Kroupa_STELIB_")
plot_smf_s("Salpeter_ELODIE_")
plot_smf_s("Salpeter_MILES_")
plot_smf_s("Salpeter_STELIB_")
plot_smf_spiders("Chabrier_ELODIE_")
p.title(str(z_min)+'<z<'+str(z_max)+' SDSS')
p.xlabel(r'$\log_{10}$(M / $M_\odot$ )')
p.ylabel(r'$\Phi(M)$ [Mpc$^{-3}$ dex$^{-1}$]')
p.yscale('log')
p.legend(loc=0, frameon = False)
p.ylim((1e-8, 1e-2))
p.xlim((9.5, 12.5))
p.grid()
p.savefig(os.path.join(out_dir, "firefly_SMFs_SDSS_"+str(z_min)+'_z_'+str(z_max)+".jpg" ))
p.clf()
sys.exit()
sdss12_sel, sdss12_m, sdss12_w = get_basic_stat_DR12(boss_12_portSF_kr, 'Z', 'Z_ERR', 'Portsmouth SF Kroupa & BOSS & 12 ', 0.)
sdss12_sel, sdss12_m, sdss12_w = get_basic_stat_DR12(boss_12_portPA_kr, 'Z', 'Z_ERR', 'Portsmouth Passive Kroupa & BOSS & 12 ', 0.)
sdss12_sel, sdss12_m, sdss12_w = get_basic_stat_DR12(boss_12_portSF_sa, 'Z', 'Z_ERR', 'Portsmouth SF Salpeter & BOSS & 12 ', 0.)
sdss12_sel, sdss12_m, sdss12_w = get_basic_stat_DR14(sdss, 'Z', 'Z_ERR', 'CLASS', 'ZWARNING', 'Chabrier ELODIE & SDSS & 14 ', 0., "Chabrier_ELODIE_")
sdss12_sel, sdss12_m, sdss12_w = get_basic_stat_DR14(sdss, 'Z', 'Z_ERR', 'CLASS', 'ZWARNING', 'Chabrier MILES & SDSS & 14 ', 0., "Chabrier_MILES_")
sdss12_sel, sdss12_m, sdss12_w = get_basic_stat_DR14(sdss, 'Z', 'Z_ERR', 'CLASS', 'ZWARNING', 'Chabrier STELIB & SDSS & 14 ', 0., "Chabrier_STELIB_")
sdss12_sel, sdss12_m, sdss12_w = get_basic_stat_DR14(sdss, 'Z', 'Z_ERR', 'CLASS', 'ZWARNING', 'Kroupa ELODIE & SDSS & 14 ', 0., "Kroupa_ELODIE_")
sdss12_sel, sdss12_m, sdss12_w = get_basic_stat_DR14(sdss, 'Z', 'Z_ERR', 'CLASS', 'ZWARNING', 'Kroupa MILES & SDSS & 14 ', 0., "Kroupa_MILES_")
sdss12_sel, sdss12_m, sdss12_w = get_basic_stat_DR14(sdss, 'Z', 'Z_ERR', 'CLASS', 'ZWARNING', 'Kroupa STELIB & SDSS & 14 ', 0., "Kroupa_STELIB_")
sdss12_sel, sdss12_m, sdss12_w = get_basic_stat_DR14(sdss, 'Z', 'Z_ERR', 'CLASS', 'ZWARNING', 'Salpeter ELODIE & SDSS & 14 ', 0., "Salpeter_ELODIE_")
sdss12_sel, sdss12_m, sdss12_w = get_basic_stat_DR14(sdss, 'Z', 'Z_ERR', 'CLASS', 'ZWARNING', 'Salpeter MILES & SDSS & 14 ', 0., "Salpeter_MILES_")
sdss12_sel, sdss12_m, sdss12_w = get_basic_stat_DR14(sdss, 'Z', 'Z_ERR', 'CLASS', 'ZWARNING', 'Salpeter STELIB & SDSS & 14 ', 0., "Salpeter_STELIB_")
sdss12_sel, sdss12_m, sdss12_w = get_basic_stat_DR12(sdss_12_portSF_kr, 'Z', 'Z_ERR', 'Portsmouth SF Kroupa & SDSS & 12 ', 0.)
sdss12_sel, sdss12_m, sdss12_w = get_basic_stat_DR12(sdss_12_portPA_kr, 'Z', 'Z_ERR', 'Portsmouth Passive Kroupa & SDSS & 12 ', 0.)
sdss12_sel, sdss12_m, sdss12_w = get_basic_stat_DR12(sdss_12_portSF_sa, 'Z', 'Z_ERR', 'Portsmouth SF Salpeter & SDSS & 12 ', 0.)
x, y, ye = get_hist(boss14_m[boss14_sel], weights = boss14_w[boss14_sel]/(dlog10m*n.log(10)*area_boss14*volume_per_deg2_val), mbins = mbins)
p.errorbar(x, y, yerr = ye, label='BOSS14', lw=0.5)
p.title(str(z_min)+'<z<'+str(z_max))
p.xlabel(r'$\log_{10}$ (stellar mass '+imf+r" / $M_\odot$ )")
p.ylabel(r'$\Phi(M)$ [Mpc$^{-3}$ dex$^{-1}$]')
p.yscale('log')
p.legend(loc=0, frameon = False)
p.ylim((1e-8, 1e-2))
p.xlim((9.5, 12.5))
p.grid()
p.savefig(os.path.join(out_dir, "SDSS_SMF_"+imf+"_"+str(z_min)+'_z_'+str(z_max)+".jpg" ))
p.clf()
sys.exit()
def plotMF_raw(imf='kroupa'):
sdss14_sel, sdss14_m, sdss14_w = get_basic_stat_FF(sdss14, 'Z', 'Z_ERR', 'ZWARNING', 'SDSS14', 0., imf=imf)
boss14_sel, boss14_m, boss14_w = get_basic_stat_FF(boss14, 'Z_NOQSO', 'Z_ERR_NOQSO', 'ZWARNING_NOQSO', 'BOSS14', 0., imf=imf)
sdss12_sel, sdss12_m, sdss12_w = get_basic_stat_DR12(sdss12, 'Z', 'Z_ERR', 'SDSS12', 0.)
boss12_sel, boss12_m, boss12_w = get_basic_stat_DR12(boss12, 'Z', 'Z_ERR', 'BOSS12', 0.)
dlog10m = 0.25
mbins = n.arange(8,12.5,dlog10m)
p.figure(1, (8,8))
p.plot(mbins, smf01(10**mbins), label='Ilbert 13, 0.2<z<0.5', ls='dashed')
x, y, ye = get_hist(sdss14_m[sdss14_sel], weights = sdss14_w[sdss14_sel]/(dlog10m*n.log(10)*area_sdss14*volume_per_deg2_val), mbins = mbins)
p.errorbar(x, y, yerr = ye, label='SDSS14', lw=1)
x, y, ye = get_hist(sdss12_m[sdss12_sel], weights = sdss12_w[sdss12_sel]/(dlog10m*n.log(10)*area_sdss12*volume_per_deg2_val), mbins = mbins)
p.errorbar(x, y, yerr = ye, label='SDSS12', lw=1)
x, y, ye = get_hist(boss12_m[boss12_sel], weights = boss12_w[boss12_sel]/(dlog10m*n.log(10)*area_boss12*volume_per_deg2_val), mbins = mbins)
p.errorbar(x, y, yerr = ye, label='BOSS12', lw=0.5)
x, y, ye = get_hist(boss14_m[boss14_sel], weights = boss14_w[boss14_sel]/(dlog10m*n.log(10)*area_boss14*volume_per_deg2_val), mbins = mbins)
p.errorbar(x, y, yerr = ye, label='BOSS14', lw=0.5)
p.title(str(z_min)+'<z<'+str(z_max))
p.xlabel(r'$\log_{10}$ (stellar mass '+imf+r" / $M_\odot$ )")
p.ylabel(r'$\Phi(M)$ [Mpc$^{-3}$ dex$^{-1}$]')
p.yscale('log')
p.legend(loc=0, frameon = False)
p.ylim((1e-8, 1e-2))
p.xlim((9.5, 12.5))
p.grid()
p.savefig(os.path.join(out_dir, "SDSS_SMF_"+imf+"_"+str(z_min)+'_z_'+str(z_max)+".jpg" ))
p.clf()
import time
t0 = time.time()
id14 = n.arange(len(sdss14))
id12_2_14 = n.array([ id14[(sdss14['PLATE'] == sdss12[id12]['PLATE'])&(sdss14['MJD'] == sdss12[id12]['MJD'])&(sdss14['FIBERID'] == sdss12[id12]['FIBERID'])][0] for id12 in n.arange(5000)]) # len(sdss12)) ])
print time.time() - t0
m14_i = sdss14['stellar_mass_'+imf][id12_2_14]
m12_i = sdss12['LOGMASS'][n.arange(5000)]
ok = (m12_i >8 )&(m12_i < 13 )&(m14_i >8 )&(m14_i < 13 )
m14 = m14_i[ok]
m12 = m12_i[ok]
mms = n.arange(8,13,0.02)
outP = n.polyfit(m12, m14-m12, deg=1)
p.plot(mms, n.polyval(outP, mms), 'm')
outP = n.polyfit(m12, m14-m12, deg=2)
p.plot(mms, n.polyval(outP, mms), 'm')
p.plot(m12, m14-m12, 'b,')
p.axhline(n.mean(m14-m12), color='k')
p.xlabel('log(mass) dr12')
p.ylabel(r'$\Delta \log$(mass) dr14-dr12')
p.xlim((8,13.))
p.ylim((-2, 2))
p.grid()
p.savefig(os.path.join(out_dir, "SDSS_mass_comparison"+imf+"_"+str(z_min)+'_z_'+str(z_max)+".jpg" ))
p.show()
# for each object in the catalog, assign the corresponding stellar mass from firefly
# match in plate mjd fiberid using the SDSS DR12 as a reference i.e. make a DR12+DR14 catalog.
# fraction of objects with a more accurate stellar mass estimates : error is smaller.
# match DR14 wiht DR12
# extract mean SN per pixel in a spectrum and select objects that should have a better nass estimate.
#
plotMF_raw(imf='salpeter')
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import zlib
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.data.ops import readers
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.lib.io import python_io
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
from tensorflow.python.util import compat
class TextLineDatasetTest(test.TestCase):
def _lineText(self, f, l):
return compat.as_bytes("%d: %d" % (f, l))
def _createFiles(self,
num_files,
num_lines,
crlf=False,
compression_type=None):
filenames = []
for i in range(num_files):
fn = os.path.join(self.get_temp_dir(), "text_line.%d.txt" % i)
filenames.append(fn)
contents = []
for j in range(num_lines):
contents.append(self._lineText(i, j))
# Always include a newline after the record unless it is
# at the end of the file, in which case we include it
if j + 1 != num_lines or i == 0:
contents.append(b"\r\n" if crlf else b"\n")
contents = b"".join(contents)
if not compression_type:
with open(fn, "wb") as f:
f.write(contents)
elif compression_type == "GZIP":
with gzip.GzipFile(fn, "wb") as f:
f.write(contents)
elif compression_type == "ZLIB":
contents = zlib.compress(contents)
with open(fn, "wb") as f:
f.write(contents)
else:
raise ValueError("Unsupported compression_type", compression_type)
return filenames
def _testTextLineDataset(self, compression_type=None):
test_filenames = self._createFiles(
2, 5, crlf=True, compression_type=compression_type)
filenames = array_ops.placeholder(dtypes.string, shape=[None])
num_epochs = array_ops.placeholder(dtypes.int64, shape=[])
batch_size = array_ops.placeholder(dtypes.int64, shape=[])
repeat_dataset = readers.TextLineDataset(
filenames, compression_type=compression_type).repeat(num_epochs)
batch_dataset = repeat_dataset.batch(batch_size)
iterator = iterator_ops.Iterator.from_structure(batch_dataset.output_types)
init_op = iterator.make_initializer(repeat_dataset)
init_batch_op = iterator.make_initializer(batch_dataset)
get_next = iterator.get_next()
with self.test_session() as sess:
# Basic test: read from file 0.
sess.run(
init_op, feed_dict={filenames: [test_filenames[0]],
num_epochs: 1})
for i in range(5):
self.assertEqual(self._lineText(0, i), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Basic test: read from file 1.
sess.run(
init_op, feed_dict={filenames: [test_filenames[1]],
num_epochs: 1})
for i in range(5):
self.assertEqual(self._lineText(1, i), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Basic test: read from both files.
sess.run(init_op, feed_dict={filenames: test_filenames, num_epochs: 1})
for j in range(2):
for i in range(5):
self.assertEqual(self._lineText(j, i), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test repeated iteration through both files.
sess.run(init_op, feed_dict={filenames: test_filenames, num_epochs: 10})
for _ in range(10):
for j in range(2):
for i in range(5):
self.assertEqual(self._lineText(j, i), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test batched and repeated iteration through both files.
sess.run(
init_batch_op,
feed_dict={filenames: test_filenames,
num_epochs: 10,
batch_size: 5})
for _ in range(10):
self.assertAllEqual([self._lineText(0, i) for i in range(5)],
sess.run(get_next))
self.assertAllEqual([self._lineText(1, i) for i in range(5)],
sess.run(get_next))
def testTextLineDatasetNoCompression(self):
self._testTextLineDataset()
def testTextLineDatasetGzipCompression(self):
self._testTextLineDataset(compression_type="GZIP")
def testTextLineDatasetZlibCompression(self):
self._testTextLineDataset(compression_type="ZLIB")
def testTextLineDatasetBuffering(self):
test_filenames = self._createFiles(2, 5, crlf=True)
repeat_dataset = readers.TextLineDataset(test_filenames, buffer_size=10)
iterator = repeat_dataset.make_one_shot_iterator()
with self.test_session() as sess:
for j in range(2):
for i in range(5):
self.assertEqual(self._lineText(j, i), sess.run(iterator.get_next()))
with self.assertRaises(errors.OutOfRangeError):
sess.run(iterator.get_next())
class FixedLengthRecordReaderTest(test.TestCase):
def setUp(self):
super(FixedLengthRecordReaderTest, self).setUp()
self._num_files = 2
self._num_records = 7
self._header_bytes = 5
self._record_bytes = 3
self._footer_bytes = 2
def _record(self, f, r):
return compat.as_bytes(str(f * 2 + r) * self._record_bytes)
def _createFiles(self):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "fixed_length_record.%d.txt" % i)
filenames.append(fn)
with open(fn, "wb") as f:
f.write(b"H" * self._header_bytes)
for j in range(self._num_records):
f.write(self._record(i, j))
f.write(b"F" * self._footer_bytes)
return filenames
def testFixedLengthRecordDataset(self):
test_filenames = self._createFiles()
filenames = array_ops.placeholder(dtypes.string, shape=[None])
num_epochs = array_ops.placeholder(dtypes.int64, shape=[])
batch_size = array_ops.placeholder(dtypes.int64, shape=[])
repeat_dataset = (readers.FixedLengthRecordDataset(
filenames, self._record_bytes, self._header_bytes, self._footer_bytes)
.repeat(num_epochs))
batch_dataset = repeat_dataset.batch(batch_size)
iterator = iterator_ops.Iterator.from_structure(batch_dataset.output_types)
init_op = iterator.make_initializer(repeat_dataset)
init_batch_op = iterator.make_initializer(batch_dataset)
get_next = iterator.get_next()
with self.test_session() as sess:
# Basic test: read from file 0.
sess.run(
init_op, feed_dict={filenames: [test_filenames[0]],
num_epochs: 1})
for i in range(self._num_records):
self.assertEqual(self._record(0, i), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Basic test: read from file 1.
sess.run(
init_op, feed_dict={filenames: [test_filenames[1]],
num_epochs: 1})
for i in range(self._num_records):
self.assertEqual(self._record(1, i), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Basic test: read from both files.
sess.run(init_op, feed_dict={filenames: test_filenames, num_epochs: 1})
for j in range(self._num_files):
for i in range(self._num_records):
self.assertEqual(self._record(j, i), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test repeated iteration through both files.
sess.run(init_op, feed_dict={filenames: test_filenames, num_epochs: 10})
for _ in range(10):
for j in range(self._num_files):
for i in range(self._num_records):
self.assertEqual(self._record(j, i), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test batched and repeated iteration through both files.
sess.run(
init_batch_op,
feed_dict={
filenames: test_filenames,
num_epochs: 10,
batch_size: self._num_records
})
for _ in range(10):
for j in range(self._num_files):
self.assertAllEqual(
[self._record(j, i) for i in range(self._num_records)],
sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testFixedLengthRecordDatasetBuffering(self):
test_filenames = self._createFiles()
dataset = readers.FixedLengthRecordDataset(
test_filenames,
self._record_bytes,
self._header_bytes,
self._footer_bytes,
buffer_size=10)
iterator = dataset.make_one_shot_iterator()
with self.test_session() as sess:
for j in range(self._num_files):
for i in range(self._num_records):
self.assertEqual(self._record(j, i), sess.run(iterator.get_next()))
with self.assertRaises(errors.OutOfRangeError):
sess.run(iterator.get_next())
def testFixedLengthRecordDatasetWrongSize(self):
test_filenames = self._createFiles()
dataset = readers.FixedLengthRecordDataset(
test_filenames,
self._record_bytes + 1, # Incorrect record length.
self._header_bytes,
self._footer_bytes,
buffer_size=10)
iterator = dataset.make_one_shot_iterator()
with self.test_session() as sess:
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r"Excluding the header \(5 bytes\) and footer \(2 bytes\), input "
r"file \".*fixed_length_record.0.txt\" has body length 21 bytes, "
r"which is not an exact multiple of the record length \(4 bytes\)."):
sess.run(iterator.get_next())
def _iterator_checkpoint_path(self):
return os.path.join(self.get_temp_dir(), "iterator")
def _save_op(self, iterator_resource):
iterator_state_variant = gen_dataset_ops.serialize_iterator(
iterator_resource)
save_op = io_ops.write_file(
self._iterator_checkpoint_path(),
parsing_ops.serialize_tensor(iterator_state_variant))
return save_op
def _restore_op(self, iterator_resource):
iterator_state_variant = parsing_ops.parse_tensor(
io_ops.read_file(self._iterator_checkpoint_path()), dtypes.variant)
restore_op = gen_dataset_ops.deserialize_iterator(iterator_resource,
iterator_state_variant)
return restore_op
def _build_iterator_graph(self, num_epochs):
filenames = self._createFiles()
dataset = (readers.FixedLengthRecordDataset(
filenames, self._record_bytes, self._header_bytes, self._footer_bytes)
.repeat(num_epochs))
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next_op = iterator.get_next()
save_op = self._save_op(iterator._iterator_resource)
restore_op = self._restore_op(iterator._iterator_resource)
return init_op, get_next_op, save_op, restore_op
def _restore_iterator(self):
output_types = dtypes.string
output_shapes = tensor_shape.scalar()
iterator = iterator_ops.Iterator.from_structure(output_types, output_shapes)
get_next = iterator.get_next()
restore_op = self._restore_op(iterator._iterator_resource)
return restore_op, get_next
def testSaveRestore(self):
num_epochs = 10
epoch_break = 5
file_break = self._num_files // 2
record_break = self._num_records // 2
with ops.Graph().as_default() as g:
init_op, get_next_op, save_op, restore_op = self._build_iterator_graph(
num_epochs=num_epochs)
with self.test_session(graph=g) as sess:
sess.run(init_op)
# Note: There is no checkpoint saved currently so a NotFoundError is
# raised.
with self.assertRaises(errors.NotFoundError):
sess.run(restore_op)
for epoch in range(num_epochs):
for f in range(self._num_files):
for r in range(self._num_records):
if (epoch == epoch_break and f == file_break and
r == record_break):
sess.run(save_op)
break
self.assertEqual(self._record(f, r), sess.run(get_next_op))
else:
continue
break
else:
continue
break
else:
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next_op)
with ops.Graph().as_default() as g:
init_op, get_next_op, save_op, restore_op = self._build_iterator_graph(
num_epochs=num_epochs)
with self.test_session(graph=g) as sess:
sess.run(restore_op)
for epoch in range(num_epochs):
for f in range(self._num_files):
for r in range(self._num_records):
if (epoch < epoch_break or
(epoch == epoch_break and f < file_break) or
(epoch == epoch_break and f == file_break and
r < record_break)):
continue
self.assertEqual(self._record(f, r), sess.run(get_next_op))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next_op)
def testInitThenRestore(self):
# Note: Calling init_op before restore_op is redundant. This test just makes
# sure we do not fail if restore is called on an already initialized
# iterator resource.
num_epochs = 10
epoch_break = 5
file_break = self._num_files // 2
record_break = self._num_records // 2
with ops.Graph().as_default() as g:
init_op, get_next_op, save_op, restore_op = self._build_iterator_graph(
num_epochs=num_epochs)
with self.test_session(graph=g) as sess:
sess.run(init_op)
# Note: There is no checkpoint saved currently so a NotFoundError is
# raised.
with self.assertRaises(errors.NotFoundError):
sess.run(restore_op)
for epoch in range(num_epochs):
for f in range(self._num_files):
for r in range(self._num_records):
if (epoch == epoch_break and f == file_break and
r == record_break):
sess.run(save_op)
break
self.assertEqual(self._record(f, r), sess.run(get_next_op))
else:
continue
break
else:
continue
break
else:
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next_op)
with ops.Graph().as_default() as g:
init_op, get_next_op, save_op, restore_op = self._build_iterator_graph(
num_epochs=num_epochs)
with self.test_session(graph=g) as sess:
sess.run(init_op)
sess.run(restore_op)
for epoch in range(num_epochs):
for f in range(self._num_files):
for r in range(self._num_records):
if (epoch < epoch_break or
(epoch == epoch_break and f < file_break) or
(epoch == epoch_break and f == file_break and
r < record_break)):
continue
self.assertEqual(self._record(f, r), sess.run(get_next_op))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next_op)
def testRestoreInModifiedGraph(self):
num_epochs = 10
num_epochs_1 = 20
epoch_break = 5
file_break = self._num_files // 2
record_break = self._num_records // 2
with ops.Graph().as_default() as g:
init_op, get_next_op, save_op, restore_op = self._build_iterator_graph(
num_epochs=num_epochs)
with self.test_session(graph=g) as sess:
sess.run(init_op)
# Note: There is no checkpoint saved currently so a NotFoundError is
# raised.
with self.assertRaises(errors.NotFoundError):
sess.run(restore_op)
for epoch in range(num_epochs):
for f in range(self._num_files):
for r in range(self._num_records):
if (epoch == epoch_break and f == file_break and
r == record_break):
sess.run(save_op)
break
self.assertEqual(self._record(f, r), sess.run(get_next_op))
else:
continue
break
else:
continue
break
else:
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next_op)
with ops.Graph().as_default() as g:
init_op, get_next_op, save_op, restore_op = self._build_iterator_graph(
num_epochs=num_epochs_1)
with self.test_session(graph=g) as sess:
sess.run(restore_op)
for epoch in range(num_epochs):
for f in range(self._num_files):
for r in range(self._num_records):
if (epoch < epoch_break or
(epoch == epoch_break and f < file_break) or
(epoch == epoch_break and f == file_break and
r < record_break)):
continue
self.assertEqual(self._record(f, r), sess.run(get_next_op))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next_op)
def testRestoreWithoutBuildingDatasetGraph(self):
num_epochs = 10
epoch_break = 5
file_break = self._num_files // 2
record_break = self._num_records // 2
with ops.Graph().as_default() as g:
init_op, get_next_op, save_op, restore_op = self._build_iterator_graph(
num_epochs=num_epochs)
with self.test_session(graph=g) as sess:
sess.run(init_op)
# Note: There is no checkpoint saved currently so a NotFoundError is
# raised.
with self.assertRaises(errors.NotFoundError):
sess.run(restore_op)
for epoch in range(num_epochs):
for f in range(self._num_files):
for r in range(self._num_records):
if (epoch == epoch_break and f == file_break and
r == record_break):
sess.run(save_op)
break
self.assertEqual(self._record(f, r), sess.run(get_next_op))
else:
continue
break
else:
continue
break
else:
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next_op)
with ops.Graph().as_default() as g:
restore_op, get_next_op = self._restore_iterator()
with self.test_session(graph=g) as sess:
sess.run(restore_op)
for epoch in range(num_epochs):
for f in range(self._num_files):
for r in range(self._num_records):
if (epoch < epoch_break or
(epoch == epoch_break and f < file_break) or
(epoch == epoch_break and f == file_break and
r < record_break)):
continue
self.assertEqual(self._record(f, r), sess.run(get_next_op))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next_op)
def testRestoreUnusedIterator(self):
num_epochs = 10
with ops.Graph().as_default() as g:
init_op, get_next_op, save_op, restore_op = self._build_iterator_graph(
num_epochs=num_epochs)
with self.test_session(graph=g) as sess:
sess.run(init_op)
# Note: There is no checkpoint saved currently so a NotFoundError is
# raised.
with self.assertRaises(errors.NotFoundError):
sess.run(restore_op)
# Save unused iterator.
sess.run(save_op)
with ops.Graph().as_default() as g:
init_op, get_next_op, save_op, restore_op = self._build_iterator_graph(
num_epochs=num_epochs)
with self.test_session(graph=g) as sess:
sess.run(restore_op)
for _ in range(num_epochs * self._num_files * self._num_records):
sess.run(get_next_op)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next_op)
def testRestoreExhaustedIterator(self):
num_epochs = 10
with ops.Graph().as_default() as g:
init_op, get_next_op, save_op, restore_op = self._build_iterator_graph(
num_epochs=num_epochs)
with self.test_session(graph=g) as sess:
sess.run(init_op)
# Note: There is no checkpoint saved currently so a NotFoundError is
# raised.
with self.assertRaises(errors.NotFoundError):
sess.run(restore_op)
for _ in range(num_epochs):
for f in range(self._num_files):
for r in range(self._num_records):
self.assertEqual(self._record(f, r), sess.run(get_next_op))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next_op)
sess.run(save_op)
with ops.Graph().as_default() as g:
init_op, get_next_op, save_op, restore_op = self._build_iterator_graph(
num_epochs=num_epochs)
with self.test_session(graph=g) as sess:
sess.run(restore_op)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next_op)
class TFRecordDatasetTest(test.TestCase):
def setUp(self):
super(TFRecordDatasetTest, self).setUp()
self._num_files = 2
self._num_records = 7
self.test_filenames = self._createFiles()
self.filenames = array_ops.placeholder(dtypes.string, shape=[None])
self.num_epochs = array_ops.placeholder_with_default(
constant_op.constant(1, dtypes.int64), shape=[])
self.compression_type = array_ops.placeholder_with_default("", shape=[])
self.batch_size = array_ops.placeholder(dtypes.int64, shape=[])
repeat_dataset = readers.TFRecordDataset(self.filenames,
self.compression_type).repeat(
self.num_epochs)
batch_dataset = repeat_dataset.batch(self.batch_size)
iterator = iterator_ops.Iterator.from_structure(batch_dataset.output_types)
self.init_op = iterator.make_initializer(repeat_dataset)
self.init_batch_op = iterator.make_initializer(batch_dataset)
self.get_next = iterator.get_next()
def _record(self, f, r):
return compat.as_bytes("Record %d of file %d" % (r, f))
def _createFiles(self):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "tf_record.%d.txt" % i)
filenames.append(fn)
writer = python_io.TFRecordWriter(fn)
for j in range(self._num_records):
writer.write(self._record(i, j))
writer.close()
return filenames
def testReadOneEpoch(self):
with self.test_session() as sess:
# Basic test: read from file 0.
sess.run(
self.init_op,
feed_dict={
self.filenames: [self.test_filenames[0]],
self.num_epochs: 1
})
for i in range(self._num_records):
self.assertAllEqual(self._record(0, i), sess.run(self.get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.get_next)
# Basic test: read from file 1.
sess.run(
self.init_op,
feed_dict={
self.filenames: [self.test_filenames[1]],
self.num_epochs: 1
})
for i in range(self._num_records):
self.assertAllEqual(self._record(1, i), sess.run(self.get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.get_next)
# Basic test: read from both files.
sess.run(
self.init_op,
feed_dict={self.filenames: self.test_filenames,
self.num_epochs: 1})
for j in range(self._num_files):
for i in range(self._num_records):
self.assertAllEqual(self._record(j, i), sess.run(self.get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.get_next)
def testReadTenEpochs(self):
with self.test_session() as sess:
sess.run(
self.init_op,
feed_dict={self.filenames: self.test_filenames,
self.num_epochs: 10})
for _ in range(10):
for j in range(self._num_files):
for i in range(self._num_records):
self.assertAllEqual(self._record(j, i), sess.run(self.get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.get_next)
def testReadTenEpochsOfBatches(self):
with self.test_session() as sess:
sess.run(
self.init_batch_op,
feed_dict={
self.filenames: self.test_filenames,
self.num_epochs: 10,
self.batch_size: self._num_records
})
for _ in range(10):
for j in range(self._num_files):
values = sess.run(self.get_next)
self.assertAllEqual(
[self._record(j, i) for i in range(self._num_records)], values)
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.get_next)
def testReadZlibFiles(self):
zlib_files = []
for i, fn in enumerate(self.test_filenames):
with open(fn, "rb") as f:
cdata = zlib.compress(f.read())
zfn = os.path.join(self.get_temp_dir(), "tfrecord_%s.z" % i)
with open(zfn, "wb") as f:
f.write(cdata)
zlib_files.append(zfn)
with self.test_session() as sess:
sess.run(
self.init_op,
feed_dict={self.filenames: zlib_files,
self.compression_type: "ZLIB"})
for j in range(self._num_files):
for i in range(self._num_records):
self.assertAllEqual(self._record(j, i), sess.run(self.get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.get_next)
def testReadGzipFiles(self):
gzip_files = []
for i, fn in enumerate(self.test_filenames):
with open(fn, "rb") as f:
gzfn = os.path.join(self.get_temp_dir(), "tfrecord_%s.gz" % i)
with gzip.GzipFile(gzfn, "wb") as gzf:
gzf.write(f.read())
gzip_files.append(gzfn)
with self.test_session() as sess:
sess.run(
self.init_op,
feed_dict={self.filenames: gzip_files,
self.compression_type: "GZIP"})
for j in range(self._num_files):
for i in range(self._num_records):
self.assertAllEqual(self._record(j, i), sess.run(self.get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.get_next)
def testReadWithBuffer(self):
one_mebibyte = 2**20
d = readers.TFRecordDataset(self.test_filenames, buffer_size=one_mebibyte)
iterator = d.make_one_shot_iterator()
with self.test_session() as sess:
for j in range(self._num_files):
for i in range(self._num_records):
self.assertAllEqual(self._record(j, i), sess.run(iterator.get_next()))
with self.assertRaises(errors.OutOfRangeError):
sess.run(iterator.get_next())
if __name__ == "__main__":
test.main()
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class NetworkVirtualAppliancesOperations(object):
"""NetworkVirtualAppliancesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
network_virtual_appliance_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkVirtualApplianceName': self._serialize.url("network_virtual_appliance_name", network_virtual_appliance_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkVirtualAppliances/{networkVirtualApplianceName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
network_virtual_appliance_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified Network Virtual Appliance.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_virtual_appliance_name: The name of Network Virtual Appliance.
:type network_virtual_appliance_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
network_virtual_appliance_name=network_virtual_appliance_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkVirtualApplianceName': self._serialize.url("network_virtual_appliance_name", network_virtual_appliance_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkVirtualAppliances/{networkVirtualApplianceName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
network_virtual_appliance_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.NetworkVirtualAppliance"
"""Gets the specified Network Virtual Appliance.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_virtual_appliance_name: The name of Network Virtual Appliance.
:type network_virtual_appliance_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkVirtualAppliance, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_08_01.models.NetworkVirtualAppliance
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkVirtualAppliance"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkVirtualApplianceName': self._serialize.url("network_virtual_appliance_name", network_virtual_appliance_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkVirtualAppliance', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkVirtualAppliances/{networkVirtualApplianceName}'} # type: ignore
def update_tags(
self,
resource_group_name, # type: str
network_virtual_appliance_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.NetworkVirtualAppliance"
"""Updates a Network Virtual Appliance.
:param resource_group_name: The resource group name of Network Virtual Appliance.
:type resource_group_name: str
:param network_virtual_appliance_name: The name of Network Virtual Appliance being updated.
:type network_virtual_appliance_name: str
:param parameters: Parameters supplied to Update Network Virtual Appliance Tags.
:type parameters: ~azure.mgmt.network.v2020_08_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkVirtualAppliance, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_08_01.models.NetworkVirtualAppliance
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkVirtualAppliance"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkVirtualApplianceName': self._serialize.url("network_virtual_appliance_name", network_virtual_appliance_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkVirtualAppliance', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkVirtualAppliances/{networkVirtualApplianceName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
network_virtual_appliance_name, # type: str
parameters, # type: "_models.NetworkVirtualAppliance"
**kwargs # type: Any
):
# type: (...) -> "_models.NetworkVirtualAppliance"
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkVirtualAppliance"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkVirtualApplianceName': self._serialize.url("network_virtual_appliance_name", network_virtual_appliance_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'NetworkVirtualAppliance')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('NetworkVirtualAppliance', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('NetworkVirtualAppliance', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkVirtualAppliances/{networkVirtualApplianceName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
network_virtual_appliance_name, # type: str
parameters, # type: "_models.NetworkVirtualAppliance"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.NetworkVirtualAppliance"]
"""Creates or updates the specified Network Virtual Appliance.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_virtual_appliance_name: The name of Network Virtual Appliance.
:type network_virtual_appliance_name: str
:param parameters: Parameters supplied to the create or update Network Virtual Appliance.
:type parameters: ~azure.mgmt.network.v2020_08_01.models.NetworkVirtualAppliance
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either NetworkVirtualAppliance or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_08_01.models.NetworkVirtualAppliance]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkVirtualAppliance"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
network_virtual_appliance_name=network_virtual_appliance_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('NetworkVirtualAppliance', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkVirtualApplianceName': self._serialize.url("network_virtual_appliance_name", network_virtual_appliance_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkVirtualAppliances/{networkVirtualApplianceName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.NetworkVirtualApplianceListResult"]
"""Lists all Network Virtual Appliances in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkVirtualApplianceListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_08_01.models.NetworkVirtualApplianceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkVirtualApplianceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkVirtualApplianceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkVirtualAppliances'} # type: ignore
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.NetworkVirtualApplianceListResult"]
"""Gets all Network Virtual Appliances in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkVirtualApplianceListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_08_01.models.NetworkVirtualApplianceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkVirtualApplianceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkVirtualApplianceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkVirtualAppliances'} # type: ignore
|
|
import unittest
from _pydev_imps import _pydev_thread
try:
import Queue
except:
import queue as Queue #@UnresolvedImport
from _pydevd_bundle.pydevd_constants import * #@UnusedWildImport
from _pydev_runfiles import pydev_runfiles_xml_rpc
import time
import os
#=======================================================================================================================
# flatten_test_suite
#=======================================================================================================================
def flatten_test_suite(test_suite, ret):
if isinstance(test_suite, unittest.TestSuite):
for t in test_suite._tests:
flatten_test_suite(t, ret)
elif isinstance(test_suite, unittest.TestCase):
ret.append(test_suite)
#=======================================================================================================================
# execute_tests_in_parallel
#=======================================================================================================================
def execute_tests_in_parallel(tests, jobs, split, verbosity, coverage_files, coverage_include):
'''
@param tests: list(PydevTestSuite)
A list with the suites to be run
@param split: str
Either 'module' or the number of tests that should be run in each batch
@param coverage_files: list(file)
A list with the files that should be used for giving coverage information (if empty, coverage information
should not be gathered).
@param coverage_include: str
The pattern that should be included in the coverage.
@return: bool
Returns True if the tests were actually executed in parallel. If the tests were not executed because only 1
should be used (e.g.: 2 jobs were requested for running 1 test), False will be returned and no tests will be
run.
It may also return False if in debug mode (in which case, multi-processes are not accepted)
'''
try:
from _pydevd_bundle.pydevd_comm import get_global_debugger
if get_global_debugger() is not None:
return False
except:
pass #Ignore any error here.
#This queue will receive the tests to be run. Each entry in a queue is a list with the tests to be run together When
#split == 'tests', each list will have a single element, when split == 'module', each list will have all the tests
#from a given module.
tests_queue = []
queue_elements = []
if split == 'module':
module_to_tests = {}
for test in tests:
lst = []
flatten_test_suite(test, lst)
for test in lst:
key = (test.__pydev_pyfile__, test.__pydev_module_name__)
module_to_tests.setdefault(key, []).append(test)
for key, tests in module_to_tests.items():
queue_elements.append(tests)
if len(queue_elements) < jobs:
#Don't create jobs we will never use.
jobs = len(queue_elements)
elif split == 'tests':
for test in tests:
lst = []
flatten_test_suite(test, lst)
for test in lst:
queue_elements.append([test])
if len(queue_elements) < jobs:
#Don't create jobs we will never use.
jobs = len(queue_elements)
else:
raise AssertionError('Do not know how to handle: %s' % (split,))
for test_cases in queue_elements:
test_queue_elements = []
for test_case in test_cases:
try:
test_name = test_case.__class__.__name__+"."+test_case._testMethodName
except AttributeError:
#Support for jython 2.1 (__testMethodName is pseudo-private in the test case)
test_name = test_case.__class__.__name__+"."+test_case._TestCase__testMethodName
test_queue_elements.append(test_case.__pydev_pyfile__+'|'+test_name)
tests_queue.append(test_queue_elements)
if jobs < 2:
return False
sys.stdout.write('Running tests in parallel with: %s jobs.\n' %(jobs,))
queue = Queue.Queue()
for item in tests_queue:
queue.put(item, block=False)
providers = []
clients = []
for i in range(jobs):
test_cases_provider = CommunicationThread(queue)
providers.append(test_cases_provider)
test_cases_provider.start()
port = test_cases_provider.port
if coverage_files:
clients.append(ClientThread(i, port, verbosity, coverage_files.pop(0), coverage_include))
else:
clients.append(ClientThread(i, port, verbosity))
for client in clients:
client.start()
client_alive = True
while client_alive:
client_alive = False
for client in clients:
#Wait for all the clients to exit.
if not client.finished:
client_alive = True
time.sleep(.2)
break
for provider in providers:
provider.shutdown()
return True
#=======================================================================================================================
# CommunicationThread
#=======================================================================================================================
class CommunicationThread(threading.Thread):
def __init__(self, tests_queue):
threading.Thread.__init__(self)
self.setDaemon(True)
self.queue = tests_queue
self.finished = False
from _pydev_bundle.pydev_imports import SimpleXMLRPCServer
# This is a hack to patch slow socket.getfqdn calls that
# BaseHTTPServer (and its subclasses) make.
# See: http://bugs.python.org/issue6085
# See: http://www.answermysearches.com/xmlrpc-server-slow-in-python-how-to-fix/2140/
try:
import BaseHTTPServer
def _bare_address_string(self):
host, port = self.client_address[:2]
return '%s' % host
BaseHTTPServer.BaseHTTPRequestHandler.address_string = _bare_address_string
except:
pass
# End hack.
# Create server
from _pydev_bundle import pydev_localhost
server = SimpleXMLRPCServer((pydev_localhost.get_localhost(), 0), logRequests=False)
server.register_function(self.GetTestsToRun)
server.register_function(self.notifyStartTest)
server.register_function(self.notifyTest)
server.register_function(self.notifyCommands)
self.port = server.socket.getsockname()[1]
self.server = server
def GetTestsToRun(self, job_id):
'''
@param job_id:
@return: list(str)
Each entry is a string in the format: filename|Test.testName
'''
try:
ret = self.queue.get(block=False)
return ret
except: #Any exception getting from the queue (empty or not) means we finished our work on providing the tests.
self.finished = True
return []
def notifyCommands(self, job_id, commands):
#Batch notification.
for command in commands:
getattr(self, command[0])(job_id, *command[1], **command[2])
return True
def notifyStartTest(self, job_id, *args, **kwargs):
pydev_runfiles_xml_rpc.notifyStartTest(*args, **kwargs)
return True
def notifyTest(self, job_id, *args, **kwargs):
pydev_runfiles_xml_rpc.notifyTest(*args, **kwargs)
return True
def shutdown(self):
if hasattr(self.server, 'shutdown'):
self.server.shutdown()
else:
self._shutdown = True
def run(self):
if hasattr(self.server, 'shutdown'):
self.server.serve_forever()
else:
self._shutdown = False
while not self._shutdown:
self.server.handle_request()
#=======================================================================================================================
# Client
#=======================================================================================================================
class ClientThread(threading.Thread):
def __init__(self, job_id, port, verbosity, coverage_output_file=None, coverage_include=None):
threading.Thread.__init__(self)
self.setDaemon(True)
self.port = port
self.job_id = job_id
self.verbosity = verbosity
self.finished = False
self.coverage_output_file = coverage_output_file
self.coverage_include = coverage_include
def _reader_thread(self, pipe, target):
while True:
target.write(pipe.read(1))
def run(self):
try:
from _pydev_runfiles import pydev_runfiles_parallel_client
#TODO: Support Jython:
#
#For jython, instead of using sys.executable, we should use:
#r'D:\bin\jdk_1_5_09\bin\java.exe',
#'-classpath',
#'D:/bin/jython-2.2.1/jython.jar',
#'org.python.util.jython',
args = [
sys.executable,
pydev_runfiles_parallel_client.__file__,
str(self.job_id),
str(self.port),
str(self.verbosity),
]
if self.coverage_output_file and self.coverage_include:
args.append(self.coverage_output_file)
args.append(self.coverage_include)
import subprocess
if False:
proc = subprocess.Popen(args, env=os.environ, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
_pydev_thread.start_new_thread(self._reader_thread,(proc.stdout, sys.stdout))
_pydev_thread.start_new_thread(target=self._reader_thread,args=(proc.stderr, sys.stderr))
else:
proc = subprocess.Popen(args, env=os.environ, shell=False)
proc.wait()
finally:
self.finished = True
|
|
#!/usr/bin/python
__author__ = 'ejk'
''' The bootstrap-salt.sh script here is a direct copy of github.bom/saltstack/salt-bootstrap
you can find the authors of that script here
https://github.com/saltstack/salt-bootstrap/blob/develop/AUTHORS.rst
all credit to them for that fine piece of work'''
import subprocess
from os import path, system
from time import sleep
import glob
salt_master = 'salt-master.cisco.com'
salt_name = 'virl'
salt_append_domain = 'virl.info'
while_exit = 0
cwd = path.realpath('./')
proxy = 'None'
hostname = 'virl'
domain = 'virl.info'
if system('grep 16.04 /etc/issue.net') == 0:
xenial = True
else:
xenial = False
if xenial:
nano_path = '/bin/nano'
else:
nano_path = '/usr/bin/nano'
while not while_exit:
print (30 * '-')
print (" V I R L - I N S T A L L - M E N U")
print (30 * '-')
print ("1. Change salt master from {0} ".format(salt_master))
print ("2. Change salt id from {0} or salt domain from {1}".format(salt_name, salt_append_domain))
print ("3. Change hostname from {0} or domain name {1}".format(hostname, domain))
print ("4. Write out extra.conf")
print ("5. Change http proxy from {0}".format(proxy))
print ("6. install salt without preseed keys")
print ("7. install salt with preseed keys in {0}".format(cwd + '/preseed_keys'))
print ("8. Test if you are connected to salt-master")
print ("9. Install virl installer and settings")
print ("10. Edit /etc/virl.ini")
print ("11. Exit")
print (30 * '-')
choice = raw_input('Which step are you on [1-11] : ')
choice = int(choice)
if choice == 1:
salt_master = raw_input('Salt master [%s] ' % salt_master) or 'salt-master.cisco.com'
if choice == 2:
salt_name = raw_input('Salt id [%s] ' % salt_name) or 'virl'
salt_append_domain = raw_input('Salt domain name [%s] ' % salt_append_domain) or 'virl.info'
if choice == 3:
hostname = raw_input('System hostname [%s] ' % hostname) or 'virl'
domain = raw_input('System Domain name [%s] ' % domain) or 'virl.info'
if choice == 4:
if not path.exists('/etc/salt/virl'):
subprocess.check_output(['mkdir', '-p', '/etc/salt/virl'])
if not path.exists('/etc/salt/minion.d'):
subprocess.check_output(['mkdir', '-p', '/etc/salt/minion.d'])
with open(("/etc/salt/minion.d/extra.conf"), "w") as extra:
extra.write("""master: [{salt_master}]\n""".format(salt_master=salt_master))
extra.write("""id: {salt_name}\n""".format(salt_name=salt_name))
extra.write("""append_domain: {salt_append_domain}\n""".format(salt_append_domain=salt_append_domain))
if salt_master == 'masterless':
extra.write("""file_client: local
fileserver_backend:
- git
- roots
gitfs_provider: Dulwich
gitfs_remotes:
- https://github.com/Snergster/virl-salt.git\n""")
else:
if len(salt_master.split(',')) >= 2:
extra.write("""master_type: failover \n""")
extra.write("""verify_master_pubkey_sign: True \n""")
extra.write("""master_shuffle: True \n""")
extra.write("""master_alive_interval: 180 \n""")
##TODO waiting for salt to put this back in
# extra.write("""grains_dirs:\n""")
# extra.write(""" - /etc/salt/virl\n""")
with open(("/etc/salt/minion.d/stateagg.conf"), "w") as stateagg:
stateagg.write("""state_aggregate: False \n""")
if choice == 5:
proxy = raw_input('Http proxy [%s] ' % proxy) or 'None'
if not proxy == 'None':
if not path.exists('/etc/salt'):
subprocess.check_output(['mkdir', '-p', '/etc/salt'])
with open(("/etc/salt/grains"), "w") as grains:
grains.write("""proxy: True\n""")
grains.write("""http_proxy: {proxy}\n""".format(proxy=proxy))
else:
with open(("/etc/salt/grains"), "w") as grains:
grains.write("""proxy: False\n""")
if choice == 6:
subprocess.call(['mkdir', '-p','/etc/salt/pki/minion'])
subprocess.call(['cp', './master_sign.pub', '/etc/salt/pki/minion'])
if salt_master == 'masterless':
subprocess.call(['git', 'clone', '--depth', '1', 'https://github.com/Snergster/virl-salt.git', '/srv/salt'])
if not proxy == 'None':
subprocess.call(['sh', '/home/virl/virl-bootstrap/bootstrap-salt.sh', '-P', '-H', '{proxy}'.format(proxy=proxy), '-X', '-P', 'stable'])
else:
subprocess.call(['sh', '/home/virl/virl-bootstrap/bootstrap-salt.sh', '-X', '-P', 'stable'])
else:
if not proxy == 'None':
subprocess.call(['sh', '/home/virl/virl-bootstrap/bootstrap-salt.sh', '-P', '-H', '{proxy}'.format(proxy=proxy), '-X', '-P', 'stable'])
else:
subprocess.call(['sh', '/home/virl/virl-bootstrap/bootstrap-salt.sh', '-P', 'stable'])
if choice == 7:
subprocess.call(['mkdir', '-p','/etc/salt/pki/minion'])
subprocess.call(['cp', './master_sign.pub', '/etc/salt/pki/minion'])
subprocess.call(['rm', '-f', './preseed_keys/minion.pem'])
for file in glob.glob(r'{0}/preseed_keys/*.pem'.format(cwd)):
subprocess.call(['cp', '{0}'.format(file), '{0}/preseed_keys/minion.pem'.format(cwd)])
subprocess.call('openssl rsa -in {0}/preseed_keys/minion.pem -pubout > {0}/preseed_keys/minion.pub'.format(cwd), shell=True)
subprocess.call(['cp', '-f', '{0}/preseed_keys/minion.pem'.format(cwd), '/etc/salt/pki/minion/minion.pem'])
subprocess.call(['cp', '-f', '{0}/preseed_keys/minion.pub'.format(cwd), '/etc/salt/pki/minion/minion.pub'])
subprocess.call(['chmod', '400', '/etc/salt/pki/minion/minion.pem'])
if salt_master == 'masterless':
subprocess.call(['git', 'clone', 'https://github.com/Snergster/virl-salt.git', '/srv/salt'])
if not proxy == 'None':
subprocess.call(['sh', '/home/virl/virl-bootstrap/bootstrap-salt.sh', '-P', '-H', '{proxy}'.format(proxy=proxy), '-X', '-P', 'stable'])
else:
subprocess.call(['sh', '/home/virl/virl-bootstrap/bootstrap-salt.sh', '-X', '-P', 'stable'])
else:
if not proxy == 'None':
subprocess.call(['sh', '/home/virl/virl-bootstrap/bootstrap-salt.sh','-P', '-H', '{proxy}'.format(proxy=proxy), '-X', '-P', 'stable'])
else:
subprocess.call(['sh', '/home/virl/virl-bootstrap/bootstrap-salt.sh', '-P', 'stable'])
if choice == 8:
if salt_master == 'masterless':
print "Running in masterless mode skipping ping."
else:
subprocess.call(['salt-call', 'test.ping'])
if choice == 9:
if xenial:
subprocess.call(['salt-call', '--local', 'grains.setval', 'mitaka', 'true'])
subprocess.call(['salt-call', '--local', 'grains.setval', 'kilo', 'false'])
else:
subprocess.call(['salt-call', '--local', 'grains.setval', 'kilo', 'true'])
if salt_master == 'masterless':
if xenial:
subprocess.call(['salt-call', '--local', 'state.sls', 'common.xenial-pip'])
subprocess.call(['salt-call', '--local', 'state.sls', 'zero'])
else:
if xenial:
subprocess.call(['salt-call', '-l', 'debug', 'state.sls', 'common.xenial-pip'])
subprocess.call(['salt-call', '-l', 'debug', 'state.sls', 'zero'])
if choice == 10:
if not path.exists('/etc/virl.ini'):
subprocess.call(['cp', './vsettings.ini', '/etc/virl.ini'])
subprocess.call(['crudini', '--set','/etc/virl.ini', 'DEFAULT',
'salt_master', salt_master])
subprocess.call(['crudini', '--set','/etc/virl.ini', 'DEFAULT',
'salt_id', salt_name])
subprocess.call(['crudini', '--set','/etc/virl.ini', 'DEFAULT',
'salt_domain', salt_append_domain])
if salt_master == 'masterless':
subprocess.call(['crudini', '--set','/etc/virl.ini', 'DEFAULT',
'salt_masterless', 'true'])
if not proxy == 'None':
subprocess.call(['crudini', '--set','/etc/virl.ini', 'DEFAULT',
'proxy', 'True'])
subprocess.call(['crudini', '--set','/etc/virl.ini', 'DEFAULT',
'http_proxy', proxy])
else:
subprocess.call(['crudini', '--set','/etc/virl.ini', 'DEFAULT',
'proxy', 'False'])
if not hostname == 'virl' or not domain == 'virl.info':
subprocess.call(['crudini', '--set','/etc/virl.ini', 'DEFAULT',
'hostname', hostname ])
subprocess.call(['crudini', '--set','/etc/virl.ini', 'DEFAULT',
'domain_name', domain])
if xenial:
subprocess.call(['crudini', '--set','/etc/virl.ini', 'DEFAULT',
'mitaka', 'True'])
subprocess.call(['crudini', '--set','/etc/virl.ini', 'DEFAULT',
'kilo', 'False'])
else:
subprocess.call(['crudini', '--set','/etc/virl.ini', 'DEFAULT',
'mitaka', 'False'])
subprocess.call(['crudini', '--set','/etc/virl.ini', 'DEFAULT',
'kilo', 'True'])
subprocess.call([nano_path, '/etc/virl.ini'])
if choice == 11:
if path.isfile('/etc/salt/grains'):
subprocess.call(['rm', '/etc/salt/grains'])
subprocess.call(['/usr/local/bin/vinstall', 'salt'])
sleep(5)
subprocess.call(['salt-call', '-l', 'debug', 'state.sls', 'common.virl'])
subprocess.call(['salt-call', '-l', 'debug', 'state.sls', 'virl.basics'])
subprocess.call(['salt-call', '-l', 'debug', 'saltutil.sync_all'])
subprocess.call(['salt-call', '-l', 'debug', 'state.sls', 'common.virl,virl.web'])
subprocess.call(['/usr/local/bin/vinstall', 'salt'])
subprocess.call(['salt-call', '-l', 'debug', 'state.sls', 'virl.openrc'])
if xenial:
subprocess.call(['salt-call', '-l', 'debug', 'state.sls', 'common.xenial-bootstrap'])
print 'Please validate the contents of /etc/network/interfaces before rebooting!'
while_exit = 1
|
|
#! /usr/bin/env python
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
from pkg_resources import resource_filename
import nose
from nose.tools import assert_equal, assert_equals, assert_false, \
assert_true, assert_is_instance
import numpy as np
import sknano.core.atoms
from sknano.core.atoms import StructureAtom, StructureAtoms
from sknano.generators import SWNTGenerator
from sknano.io import DATAReader
from sknano.structures import compute_Natoms
from sknano.testing import AtomsTestFixture, generate_atoms
class TestCase(AtomsTestFixture):
def test1(self):
atoms = self.atoms
atoms.kNN = 6
atoms.NNrc = 9.0
for atom in atoms:
assert_equals(atom.CN, 0)
atom.CN = 3
assert_equals(atom.CN, 3)
atoms.update_attrs()
atoms = atoms.filtered((atoms.z >= -5) & (atoms.z <= 5))
print('Natoms: {}'.format(atoms.Natoms))
for atom in atoms:
assert_equals(atom.CN, atoms.kNN)
def test2(self):
atoms = self.atoms
atoms.kNN = 3
atoms.NNrc = 2.0
atoms.update_attrs()
print(atoms.ids)
for atom in atoms:
print('atom: {}, bond.lengths: {}'.format(
atom.id, atom.bonds.lengths))
def test3(self):
atoms = generate_atoms(elements='periodic_table')
atoms.assign_unique_ids()
atoms.kNN = 3
atoms_cp = atoms.copy()
assert_equal(atoms.kNN, atoms_cp.kNN)
def test4(self):
atoms = \
generate_atoms(generator_class='SWNTGenerator', n=3, m=0, nz=5)
assert_equal(compute_Natoms((3, 0), nz=5), atoms.Natoms)
assert_equal(atoms.Natoms, atoms.ids[-1])
def test5(self):
atoms = self.atoms
assert_true(np.allclose(atoms.coords, atoms.atom_tree.data))
atoms.kNN = 3
atoms.NNrc = 2.0
atoms.update_attrs()
assert_equals(len(atoms.nearest_neighbors), atoms.Natoms)
assert_equals(len(atoms.coordination_numbers), atoms.Natoms)
# atoms.kNN = 3
# atoms.NNrc = 2.0
# atoms.update_attrs()
# print(atoms.ids)
def test6(self):
atoms = self.atoms
atoms.update_attrs()
assert_equal(atoms.filtered(atoms.coordination_numbers == 1).Natoms,
atoms.coordination_counts[1])
assert_equal(atoms.filtered(atoms.coordination_numbers == 3).Natoms,
atoms.coordination_counts[3])
def test7(self):
atoms = self.atoms
atoms.update_attrs()
assert_true(np.allclose(atoms.coordination_numbers,
atoms.neighbor_counts(2.0)))
def test8(self):
atoms = self.atoms
atoms.update_attrs()
# print(atoms.bonds.lengths)
# print(atoms.neighbor_distances)
assert_true(np.allclose(atoms.bonds.lengths,
atoms.neighbor_distances))
def test9(self):
atoms = self.atoms
atoms.update_attrs()
assert_true(np.allclose(atoms.volume, atoms.bounds.volume))
def test10(self):
atom = StructureAtom(element='C')
assert_equal(atom.CN, 0)
atom.CN = 3
assert_equal(atom.CN, 3)
def test_list_methods(self):
atoms1 = StructureAtoms()
for Z in range(100, 0, -1):
atoms1.append(StructureAtom(Z=Z))
atoms1.sort(key=lambda a: a.Z)
atoms2 = StructureAtoms()
for Z in range(1, 101):
atoms2.append(StructureAtom(Z=Z))
assert_equal(atoms1, atoms2)
def test_atom_bonds(self):
atoms = \
generate_atoms(generator_class='SWNTGenerator', n=10, m=5, nz=1)
atoms.kNN = 3
atoms.NNrc = 2.0
atoms.compute_POAVs()
bonds = atoms.bonds
assert_equal(len(bonds), atoms.coordination_numbers.sum())
assert_equal(bonds.Nbonds, atoms.coordination_numbers.sum())
for i, atom in enumerate(atoms):
if atom.bonds.Nbonds > 1:
print('atom.bonds.angles:\n'
'{}'.format(np.degrees(atom.bonds.angles)))
for j, bond in enumerate(atom.bonds):
assert_true(np.allclose(bond.vector,
atom.bonds.vectors[j]))
assert_equal(bond.length, atom.bonds.lengths[j])
def test_structure_data(self):
fname = resource_filename('sknano', 'data/nanotubes/1005_5cells.data')
swnt = SWNTGenerator(n=10, m=5, nz=5)
swnt_atoms = swnt.atoms
swnt_atoms.compute_POAVs()
data = DATAReader(fname)
atoms = data.atoms
atoms.compute_POAVs()
assert_equals(swnt_atoms.Natoms, atoms.Natoms)
def test_POAVs(self):
atoms = \
generate_atoms(generator_class='SWNTGenerator', n=5, m=0, nz=5)
atoms.compute_POAVs()
atoms.filter(atoms.coordination_numbers == 3)
atom = atoms[10]
assert_equals(atom.bonds.Nbonds, 3)
for POAV in ('POAV1', 'POAV2', 'POAVR'):
atom_POAV = getattr(atom, POAV)
assert_is_instance(atom_POAV, getattr(sknano.core.atoms, POAV))
sigma_pi_angles = np.degrees(atom_POAV.sigma_pi_angles)
assert_false(np.all(np.isclose(sigma_pi_angles, 3 * [np.nan],
equal_nan=True)))
pyramidalization_angles = \
np.degrees(atom_POAV.pyramidalization_angles)
assert_false(np.all(np.isclose(pyramidalization_angles,
3 * [np.nan],
equal_nan=True)))
misalignment_angles = \
np.degrees(atom_POAV.misalignment_angles)
assert_false(np.all(np.isclose(misalignment_angles,
3 * [np.nan],
equal_nan=True)))
print(getattr(atom, POAV))
def test_POAV_angles(self):
atoms = \
generate_atoms(generator_class='SWNTGenerator', n=10, m=0, nz=2)
# atoms.NNrc = 2.0
atoms.assign_unique_ids()
atoms.compute_POAVs()
for i, atom in enumerate(atoms):
print('atom{}: {}'.format(atom.id, atom))
for POAV in ('POAV1', 'POAV2', 'POAVR'):
if getattr(atom, POAV) is not None:
atom_POAV = getattr(atom, POAV)
sigma_pi_angles = np.degrees(atom_POAV.sigma_pi_angles)
assert_false(np.all(np.isclose(sigma_pi_angles,
3 * [np.nan],
equal_nan=True)))
print('atom{}.{}.sigma_pi_angles:\n{}'.format(
atom.id, POAV, sigma_pi_angles))
pyramidalization_angles = \
np.degrees(atom_POAV.pyramidalization_angles)
print('atom{}.{}.pyramidalization_angles:\n{}'.format(
atom.id, POAV, pyramidalization_angles))
assert_false(np.all(np.isclose(pyramidalization_angles,
3 * [np.nan],
equal_nan=True)))
misalignment_angles = \
np.degrees(atom_POAV.misalignment_angles)
print('atom{}.{}.misalignment_angles:\n{}\n'.format(
atom.id, POAV, misalignment_angles))
assert_false(np.all(np.isclose(misalignment_angles,
3 * [np.nan],
equal_nan=True)))
if __name__ == '__main__':
nose.runmodule()
|
|
"""
This file is part of the TheLMA (THe Laboratory Management Application) project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
pytest fixtures for the entity test suite.
"""
from pytest import fixture # pylint: disable=E0611
from pytest import yield_fixture # pylint: disable=E0611
from everest.entities.utils import get_root_aggregate
from everest.entities.utils import slug_from_string
from everest.repositories.rdb.session import ScopedSessionMaker as Session
from everest.repositories.rdb.testing import RdbContextManager
from thelma.tools.semiconstants import ITEM_STATUS_NAMES
from thelma.tools.semiconstants import PIPETTING_SPECS_NAMES
from thelma.tools.semiconstants import RESERVOIR_SPECS_NAMES
from thelma.tools.iso.lab.base import DILUENT_INFO
from thelma.interfaces import IChemicalStructureType
from thelma.interfaces import IExperimentMetadataType
from thelma.interfaces import IItemStatus
from thelma.interfaces import IMoleculeType
from thelma.interfaces import IOrganization
from thelma.interfaces import IPipettingSpecs
from thelma.interfaces import IPlateSpecs
from thelma.interfaces import IRackShape
from thelma.interfaces import IReservoirSpecs
from thelma.interfaces import ISpecies
from thelma.interfaces import ITubeRackSpecs
from thelma.interfaces import ITubeSpecs
from thelma.interfaces import IUser
from thelma.entities.chemicalstructure import CHEMICAL_STRUCTURE_TYPE_IDS
from thelma.entities.chemicalstructure import CompoundChemicalStructure
from thelma.entities.chemicalstructure import NucleicAcidChemicalStructure
from thelma.entities.container import TubeLocation
from thelma.entities.container import Tube
from thelma.entities.container import TubeSpecs
from thelma.entities.container import WellSpecs
from thelma.entities.device import Device
from thelma.entities.device import DeviceType
from thelma.entities.experiment import EXPERIMENT_METADATA_TYPES
from thelma.entities.experiment import Experiment
from thelma.entities.experiment import ExperimentDesign
from thelma.entities.experiment import ExperimentDesignRack
from thelma.entities.experiment import ExperimentMetadata
from thelma.entities.experiment import ExperimentRack
from thelma.entities.gene import Gene
from thelma.entities.iso import IsoAliquotPlate
from thelma.entities.iso import IsoJobPreparationPlate
from thelma.entities.iso import IsoJobStockRack
from thelma.entities.iso import IsoPreparationPlate
from thelma.entities.iso import IsoSectorPreparationPlate
from thelma.entities.iso import IsoSectorStockRack
from thelma.entities.iso import IsoStockRack
from thelma.entities.iso import LabIso
from thelma.entities.iso import LabIsoRequest
from thelma.entities.iso import StockSampleCreationIso
from thelma.entities.iso import StockSampleCreationIsoRequest
from thelma.entities.job import ExperimentJob
from thelma.entities.job import IsoJob
from thelma.entities.library import LibraryPlate
from thelma.entities.library import MoleculeDesignLibrary
from thelma.entities.liquidtransfer import ExecutedRackSampleTransfer
from thelma.entities.liquidtransfer import ExecutedSampleDilution
from thelma.entities.liquidtransfer import ExecutedSampleTransfer
from thelma.entities.liquidtransfer import ExecutedWorklist
from thelma.entities.liquidtransfer import PipettingSpecs
from thelma.entities.liquidtransfer import PlannedRackSampleTransfer
from thelma.entities.liquidtransfer import PlannedSampleDilution
from thelma.entities.liquidtransfer import PlannedSampleTransfer
from thelma.entities.liquidtransfer import PlannedWorklist
from thelma.entities.liquidtransfer import ReservoirSpecs
from thelma.entities.liquidtransfer import TRANSFER_TYPES
from thelma.entities.liquidtransfer import WorklistSeries
from thelma.entities.liquidtransfer import WorklistSeriesMember
from thelma.entities.location import BarcodedLocation
from thelma.entities.location import BarcodedLocationType
from thelma.entities.moleculedesign import CompoundDesign
from thelma.entities.moleculedesign import MoleculeDesignPool
from thelma.entities.moleculedesign import MoleculeDesignPoolSet
from thelma.entities.moleculedesign import MoleculeDesignSet
from thelma.entities.moleculedesign import SiRnaDesign
from thelma.entities.moleculetype import MoleculeType
from thelma.entities.organization import Organization
from thelma.entities.project import Project
from thelma.entities.rack import Plate
from thelma.entities.rack import PlateSpecs
from thelma.entities.rack import RackPosition
from thelma.entities.rack import RackPositionSet
from thelma.entities.rack import TubeRack
from thelma.entities.rack import TubeRackSpecs
from thelma.entities.rack import rack_shape_from_rows_columns
from thelma.entities.racklayout import RackLayout
from thelma.entities.sample import Molecule
from thelma.entities.sample import Sample
from thelma.entities.sample import SampleMolecule
from thelma.entities.sample import StockSample
from thelma.entities.species import Species
from thelma.entities.status import ItemStatus
from thelma.entities.stockinfo import StockInfo
from thelma.entities.subproject import Subproject
from thelma.entities.tagging import Tag
from thelma.entities.tagging import Tagged
from thelma.entities.tagging import TaggedRackPositionSet
from thelma.entities.tubetransfer import TubeTransfer
from thelma.entities.tubetransfer import TubeTransferWorklist
from thelma.entities.user import User
from thelma.entities.user import UserPreferences
from thelma.entities.barcode import BarcodePrintJob
from thelma.interfaces import IRackPosition
from thelma.interfaces import IDeviceType
from thelma.interfaces import ILocation
__docformat__ = 'reStructuredText en'
__all__ = []
# By design, we have to use fixtures defined in the module scope as
# parameters in other fixture function declarations.
# pylint: disable=W0621
@fixture
def barcode_print_job_fac(test_object_fac):
kw = dict(barcodes='02480532',
labels='TestBarcode',
printer='DUMMY',
type='UNIRACK')
return test_object_fac(BarcodePrintJob, kw=kw)
@fixture
def barcoded_location_fac(test_object_fac, barcoded_location_type_fac,
device_fac):
kw = dict(name='test barcoded location',
label='Test Barcoded Location',
type=barcoded_location_type_fac(),
barcode='09999999',
device=device_fac())
return test_object_fac(BarcodedLocation, kw=kw)
@fixture
def barcoded_location_type_fac(test_object_fac):
kw = dict(name='testbcloctp',
)
return test_object_fac(BarcodedLocationType, kw=kw)
@fixture
def compound_chemical_structure_fac(test_object_fac):
kw = dict(representation='COMPOUND')
return test_object_fac(CompoundChemicalStructure, kw=kw)
@fixture
def compound_molecule_design_fac(test_object_fac, molecule_type_compound,
compound_chemical_structure_fac):
kw = dict(molecule_type=molecule_type_compound,
chemical_structures=[compound_chemical_structure_fac()]
)
return test_object_fac(CompoundDesign, kw=kw)
@fixture
def container_location_fac(test_object_fac, tube_fac, tube_rack_fac,
rack_position_fac):
kw = dict(rack=tube_rack_fac(),
position=rack_position_fac(),
container=tube_fac())
return test_object_fac(TubeLocation, kw=kw)
@fixture
def device_fac(test_object_fac, device_type_fac, organization_fac):
kw = dict(name='TEST_DEVICE',
label='test device',
type=device_type_fac(),
model='test device model',
manufacturer=organization_fac())
return test_object_fac(Device, kw=kw)
@fixture
def device_type_fac(test_object_fac):
kw = dict(name='TEST_DEVICE_TYPE',
label='test device type')
return test_object_fac(DeviceType, kw=kw)
@fixture
def executed_rack_sample_transfer_fac(test_object_fac, plate_fac,
planned_rack_sample_transfer_fac,
user_cenixadm):
kw = dict(source_rack=plate_fac(),
target_rack=plate_fac(),
planned_rack_sample_transfer=planned_rack_sample_transfer_fac(),
user=user_cenixadm
)
return test_object_fac(ExecutedRackSampleTransfer, kw=kw)
@fixture
def executed_sample_dilution_fac(test_object_fac, well_fac, rack_position_fac,
reservoir_specs_fac,
planned_sample_dilution_fac, user_cenixadm):
tgt_position = rack_position_fac(row_index=7, column_index=11)
kw = dict(target_container=well_fac(position=tgt_position),
reservoir_specs=reservoir_specs_fac(),
planned_sample_dilution=planned_sample_dilution_fac(),
user=user_cenixadm
)
return test_object_fac(ExecutedSampleDilution, kw=kw)
@fixture
def executed_sample_transfer_fac(test_object_fac, well_fac, rack_position_fac,
planned_sample_transfer_fac, user_cenixadm):
tgt_position = rack_position_fac(row_index=7, column_index=10)
kw = dict(source_container=well_fac(),
target_container=well_fac.new(position=tgt_position),
planned_sample_transfer=planned_sample_transfer_fac(),
user=user_cenixadm
)
return test_object_fac(ExecutedSampleTransfer, kw=kw)
@fixture
def executed_worklist_fac(test_object_fac, planned_worklist_fac):
kw = dict(planned_worklist=planned_worklist_fac())
return test_object_fac(ExecutedWorklist, kw=kw)
@fixture
def experiment_fac(test_object_fac, experiment_design_fac):
kw = dict(label='test experiment',
experiment_design=experiment_design_fac())
return test_object_fac(Experiment, kw=kw)
@fixture
def experiment_design_fac(test_object_fac, rack_shape_8x12,
experiment_metadata_fac, experiment_design_rack_fac):
kw = dict(rack_shape=rack_shape_8x12,
experiment_metadata=experiment_metadata_fac(),
experiment_design_racks=[experiment_design_rack_fac()])
return test_object_fac(ExperimentDesign, kw=kw)
@fixture
def experiment_design_rack_fac(test_object_fac, rack_layout_fac):
kw = dict(label='test experiment design rack',
rack_layout=rack_layout_fac())
return test_object_fac(ExperimentDesignRack, kw=kw)
@fixture
def experiment_job_fac(test_object_fac, experiment_fac, user_cenixadm):
kw = dict(label='test experiment job',
user=user_cenixadm,
experiments=[experiment_fac()])
return test_object_fac(ExperimentJob, kw=kw)
@fixture
def experiment_metadata_fac(test_object_fac, subproject_fac,
experiment_metadata_type_manual):
kw = dict(label='test experiment metadata',
subproject=subproject_fac(),
number_replicates=1,
experiment_metadata_type=experiment_metadata_type_manual,
ticket_number=9999)
return test_object_fac(ExperimentMetadata, kw=kw)
@fixture
def experiment_rack_fac(test_object_fac, experiment_design_rack_fac,
plate_fac, experiment_fac):
kw = dict(design_rack=experiment_design_rack_fac(),
rack=plate_fac(),
experiment=experiment_fac())
return test_object_fac(ExperimentRack, kw=kw)
@fixture
def gene_fac(test_object_fac, species_human):
kw = dict(accession='99999',
locus_name='XXXXX',
species=species_human)
return test_object_fac(Gene, kw=kw)
@fixture
def iso_aliquot_plate_fac(test_object_fac, lab_iso_fac, plate_fac):
kw = dict(iso=lab_iso_fac(),
rack=plate_fac())
return test_object_fac(IsoAliquotPlate, kw=kw)
@fixture
def iso_job_fac(test_object_fac, user_cenixadm, lab_iso_fac):
kw = dict(label='test iso job',
user=user_cenixadm,
isos=[lab_iso_fac()],
number_stock_racks=0
)
return test_object_fac(IsoJob, kw=kw)
@fixture
def iso_job_preparation_plate_fac(test_object_fac, iso_job_fac, plate_fac,
rack_layout_fac):
kw = dict(iso_job=iso_job_fac(),
rack=plate_fac(),
rack_layout=rack_layout_fac())
return test_object_fac(IsoJobPreparationPlate, kw=kw)
@fixture
def iso_job_stock_rack_fac(test_object_fac, iso_job_fac, plate_fac,
rack_layout_fac, worklist_series_fac):
kw = dict(iso_job=iso_job_fac(),
label='testiso#j0',
rack=plate_fac(),
rack_layout=rack_layout_fac(),
worklist_series=worklist_series_fac())
return test_object_fac(IsoJobStockRack, kw=kw)
@fixture
def iso_preparation_plate_fac(test_object_fac, lab_iso_fac, plate_fac,
rack_layout_fac):
kw = dict(iso=lab_iso_fac(),
rack=plate_fac(),
rack_layout=rack_layout_fac())
return test_object_fac(IsoPreparationPlate, kw=kw)
@fixture
def iso_sector_preparation_plate_fac(test_object_fac,
stock_sample_creation_iso_fac, plate_fac,
rack_layout_fac):
kw = dict(iso=stock_sample_creation_iso_fac(),
rack=plate_fac(),
sector_index=0,
rack_layout=rack_layout_fac())
return test_object_fac(IsoSectorPreparationPlate, kw=kw)
@fixture
def iso_sector_stock_rack_fac(test_object_fac, lab_iso_fac, plate_fac,
rack_layout_fac, worklist_series_fac):
kw = dict(iso=lab_iso_fac(),
sector_index=0,
label='testiso#00',
rack=plate_fac(),
rack_layout=rack_layout_fac(),
worklist_series=worklist_series_fac(),
)
return test_object_fac(IsoSectorStockRack, kw=kw)
@fixture
def iso_stock_rack_fac(test_object_fac, lab_iso_fac, plate_fac,
rack_layout_fac, worklist_series_fac):
kw = dict(iso=lab_iso_fac(),
label='testiso#0',
rack=plate_fac(),
rack_layout=rack_layout_fac(),
worklist_series=worklist_series_fac(),
)
return test_object_fac(IsoStockRack, kw=kw)
@fixture
def item_status_fac(test_object_fac):
kw = dict(id='TESTIS',
name='test item status',
description='test item status description')
return test_object_fac(ItemStatus, kw=kw)
@fixture
def lab_iso_fac(test_object_fac, rack_layout_fac, lab_iso_request_fac):
kw = dict(label='test iso',
number_stock_racks=4,
rack_layout=rack_layout_fac.new(),
iso_request=lab_iso_request_fac())
return test_object_fac(LabIso, kw=kw)
@fixture
def lab_iso_request_fac(test_object_fac, user_cenixadm, rack_layout_fac,
reservoir_specs_std96):
kw = dict(label='test lab iso request',
requester=user_cenixadm,
rack_layout=rack_layout_fac(),
iso_plate_reservoir_specs=reservoir_specs_std96,
)
return test_object_fac(LabIsoRequest, kw=kw)
@fixture
def library_plate_fac(test_object_fac, molecule_design_library_fac, plate_fac):
kw = dict(molecule_design_library=molecule_design_library_fac(),
rack=plate_fac(),
layout_number=1)
return test_object_fac(LibraryPlate, kw=kw)
@fixture
def molecule_fac(test_object_fac, sirna_molecule_design_fac,
organization_fac):
kw = dict(molecule_design=sirna_molecule_design_fac(),
supplier=organization_fac()
)
return test_object_fac(Molecule, kw=kw)
@fixture
def molecule_design_library_fac(test_object_fac, molecule_design_pool_set_fac,
rack_layout_fac):
kw = dict(molecule_design_pool_set=molecule_design_pool_set_fac(),
label='testlib',
final_volume=8e-6,
final_concentration=3e-6,
number_layouts=10,
rack_layout=rack_layout_fac(),
)
return test_object_fac(MoleculeDesignLibrary, kw=kw)
@fixture
def molecule_design_pool_fac(test_object_fac, sirna_molecule_design_fac,
nucleic_acid_chemical_structure_fac):
md1 = sirna_molecule_design_fac()
md2 = sirna_molecule_design_fac(chemical_structures=
[nucleic_acid_chemical_structure_fac(**kw)
for kw in [dict(representation='CCCCC'),
dict(representation='GGGGG')]
])
kw = dict(molecule_designs=set([md1, md2]),
)
return test_object_fac(MoleculeDesignPool, kw=kw)
@fixture
def molecule_design_pool_set_fac(test_object_fac, molecule_type_sirna,
molecule_design_pool_fac):
kw = dict(molecule_type=molecule_type_sirna,
molecule_design_pools=set([molecule_design_pool_fac()]))
return test_object_fac(MoleculeDesignPoolSet, kw=kw)
@fixture
def molecule_design_set_fac(test_object_fac, sirna_molecule_design_fac,
compound_molecule_design_fac):
md1 = sirna_molecule_design_fac()
md2 = compound_molecule_design_fac()
kw = dict(molecule_designs=set([md1, md2]))
return test_object_fac(MoleculeDesignSet, kw=kw)
@fixture
def molecule_type_fac(test_object_fac):
kw = dict(name='testmoltpe',
default_stock_concentration=5e-5)
return test_object_fac(MoleculeType, kw=kw)
@fixture
def nucleic_acid_chemical_structure_fac(test_object_fac):
kw = dict(representation='CTATAUGACTAGATCGATUUT')
return test_object_fac(NucleicAcidChemicalStructure, kw=kw)
@fixture
def organization_fac(test_object_fac):
kw = dict(name='test organization')
return test_object_fac(Organization, kw=kw)
@fixture
def pipetting_specs_fac(test_object_fac):
kw = dict(name='t p specs',
min_transfer_volume=1e-6,
max_transfer_volume=5e-5,
max_dilution_factor=20,
has_dynamic_dead_volume=True,
is_sector_bound=True)
return test_object_fac(PipettingSpecs, kw=kw)
@fixture
def planned_rack_sample_transfer_fac(test_object_fac):
kw = dict(volume=5e-6,
number_sectors=4,
source_sector_index=0,
target_sector_index=1)
return test_object_fac(PlannedRackSampleTransfer.get_entity, kw=kw)
@fixture
def planned_sample_dilution_fac(test_object_fac, rack_position_fac):
kw = dict(volume=5e-6,
diluent_info=DILUENT_INFO,
target_position=rack_position_fac()
)
return test_object_fac(PlannedSampleDilution.get_entity, kw=kw)
@fixture
def planned_sample_transfer_fac(test_object_fac, rack_position_fac):
kw = dict(volume=5e-6,
source_position=rack_position_fac(),
target_position=rack_position_fac()
)
return test_object_fac(PlannedSampleTransfer.get_entity, kw=kw)
@fixture
def planned_worklist_fac(test_object_fac, pipetting_specs_biomek,
planned_sample_dilution_fac):
kw = dict(label='test planned worklist',
transfer_type=TRANSFER_TYPES.SAMPLE_DILUTION,
pipetting_specs=pipetting_specs_biomek,
planned_liquid_transfers=[planned_sample_dilution_fac()]
)
return test_object_fac(PlannedWorklist, kw=kw)
@fixture
def plate_fac(test_object_fac, plate_specs_fac, item_status_managed):
kw = dict(label='test plate',
specs=plate_specs_fac(),
status=item_status_managed)
return test_object_fac(Plate, kw=kw)
@fixture
def plate_specs_fac(test_object_fac, rack_shape_8x12, well_specs_fac):
kw = dict(label='test plate specs',
shape=rack_shape_8x12,
well_specs=well_specs_fac())
return test_object_fac(PlateSpecs, kw=kw)
@fixture
def project_fac(test_object_fac, user_cenixadm, organization_cenix):
kw = dict(label='test project',
leader=user_cenixadm,
customer=organization_cenix,
)
return test_object_fac(Project, kw=kw)
@fixture
def rack_layout_fac(test_object_fac, rack_shape_8x12,
tagged_rack_position_set_fac):
kw = dict(shape=rack_shape_8x12,
tagged_rack_position_sets=[tagged_rack_position_set_fac()])
return test_object_fac(RackLayout, kw=kw)
@fixture
def rack_position_fac(test_object_fac):
kw = dict(row_index=0, column_index=0)
return test_object_fac(RackPosition.from_indices, kw=kw)
@fixture
def rack_position_set_fac(test_object_fac, rack_position_fac):
kw = dict(positions=set([rack_position_fac(row_index=pos[0],
column_index=pos[1])
for pos in
[(0, 1), (0, 2), (1, 0), (1, 1), (1, 3)]]))
return test_object_fac(RackPositionSet.from_positions, kw=kw)
@fixture
def rack_shape_fac(test_object_fac):
kw = dict(number_columns=12,
number_rows=8)
return test_object_fac(rack_shape_from_rows_columns, kw=kw)
@fixture
def reservoir_specs_fac(test_object_fac, rack_shape_8x12):
kw = dict(name='test rsv specs',
description='test reservoir specs description',
rack_shape=rack_shape_8x12,
max_volume=1e-4,
min_dead_volume=1e-6,
max_dead_volume=5e-6)
return test_object_fac(ReservoirSpecs, kw=kw)
@fixture
def sample_fac(test_object_fac, well_fac):
kw = dict(volume=1e-4,
container=well_fac())
return test_object_fac(Sample, kw=kw)
@fixture
def sample_molecule_fac(test_object_fac, molecule_fac, sample_fac):
kw = dict(molecule=molecule_fac(),
concentration=5e-5,
sample=sample_fac()
)
return test_object_fac(SampleMolecule, kw=kw)
@fixture
def sirna_molecule_design_fac(test_object_fac, molecule_type_sirna,
nucleic_acid_chemical_structure_fac):
kw = dict(molecule_type=molecule_type_sirna,
chemical_structures=[nucleic_acid_chemical_structure_fac(**kw)
for kw in [dict(representation='TTTTT'),
dict(representation='AAAAA')]],
)
return test_object_fac(SiRnaDesign, kw=kw)
@fixture
def species_fac(test_object_fac):
kw = dict(genus_name='Bufo',
species_name='bufo',
common_name='kroete',
acronym='BB',
ncbi_tax_id=999)
return test_object_fac(Species, kw=kw)
@fixture
def stock_info_fac(test_object_fac, molecule_design_pool_fac,
molecule_type_sirna):
kw = dict(molecule_design_pool=molecule_design_pool_fac(),
molecule_type=molecule_type_sirna,
concentration=5e-5,
total_tubes=5,
total_volume=1e-4,
minimum_volume=1e-5,
maximum_volume=1e-5)
return test_object_fac(StockInfo, kw=kw)
@fixture
def stock_sample_fac(test_object_fac, tube_fac, molecule_design_pool_fac,
organization_fac, molecule_type_sirna):
kw = dict(volume=1e-4,
container=tube_fac(),
molecule_design_pool=molecule_design_pool_fac(),
supplier=organization_fac(),
molecule_type=molecule_type_sirna,
concentration=5e-5)
return test_object_fac(StockSample, kw=kw)
@fixture
def stock_sample_creation_iso_fac(test_object_fac, rack_layout_fac,
stock_sample_creation_iso_request_fac):
kw = dict(label='test iso',
number_stock_racks=4,
rack_layout=rack_layout_fac.new(),
ticket_number=99999,
layout_number=1,
iso_request=stock_sample_creation_iso_request_fac())
return test_object_fac(StockSampleCreationIso, kw=kw)
@fixture
def stock_sample_creation_iso_request_fac(test_object_fac):
kw = dict(label='test lab iso request',
stock_volume=5e-5,
stock_concentration=1e-4,
preparation_plate_volume=4e-5,
number_designs=3,
)
return test_object_fac(StockSampleCreationIsoRequest, kw=kw)
@fixture
def subproject_fac(test_object_fac, project_fac):
kw = dict(label='test subproject',
project=project_fac())
return test_object_fac(Subproject, kw=kw)
@fixture
def tag_fac(test_object_fac):
kw = dict(domain='test_domain',
predicate='test_predicate',
value='test_value')
return test_object_fac(Tag, kw=kw)
@fixture
def tagged_fac(test_object_fac, tag_fac, user_cenixadm):
kw = dict(tags=set([tag_fac(domain='rackshapes',
predicate='default',
value='true'),
tag_fac(domain='audit',
predicate='creator',
value='someone')
]),
user=user_cenixadm
)
return test_object_fac(Tagged, kw=kw)
@fixture
def tagged_rack_position_set_fac(test_object_fac, tag_fac,
rack_position_set_fac, user_cenixadm):
return test_object_fac(TaggedRackPositionSet,
kw=dict(tags=set([tag_fac(value='value%d' % cnt)
for cnt in range(3)]),
rack_position_set=rack_position_set_fac(),
user=user_cenixadm))
@fixture
def tube_fac(test_object_fac, tube_specs_matrix, item_status_managed):
kw = dict(specs=tube_specs_matrix,
status=item_status_managed,
barcode='1019999999')
return test_object_fac(Tube, kw=kw)
@fixture
def tube_rack_fac(test_object_fac, tube_rack_specs_fac, item_status_managed):
kw = dict(label='test tube rack',
specs=tube_rack_specs_fac(),
status=item_status_managed)
return test_object_fac(TubeRack, kw=kw)
@fixture
def tube_rack_specs_fac(test_object_fac, rack_shape_8x12):
kw = dict(label='test tube rack specs',
shape=rack_shape_8x12)
return test_object_fac(TubeRackSpecs, kw=kw)
@fixture
def tube_specs_fac(test_object_fac, organization_fac, tube_rack_specs_fac):
kw = dict(label='test tube specs',
max_volume=1e-4,
dead_volume=5e-6,
manufacturer=organization_fac(),
tube_rack_specs=[tube_rack_specs_fac()])
return test_object_fac(TubeSpecs, kw=kw)
@fixture
def tube_transfer_fac(test_object_fac, tube_fac, tube_rack_fac,
rack_position_fac):
kw = dict(tube=tube_fac(),
source_rack=tube_rack_fac(),
source_position=rack_position_fac(),
target_rack=tube_rack_fac.new(),
target_position=rack_position_fac())
return test_object_fac(TubeTransfer, kw=kw)
@fixture
def tube_transfer_worklist_fac(test_object_fac, user_cenixadm,
tube_transfer_fac):
kw = dict(user=user_cenixadm,
tube_transfers=[tube_transfer_fac()],
)
return test_object_fac(TubeTransferWorklist, kw=kw)
@fixture
def user_fac(test_object_fac):
kw = dict(username='test user',
directory_user_id='testuser')
return test_object_fac(User, kw=kw)
@fixture
def user_preferences_fac(test_object_fac, user_cenixadm):
kw = dict(user=user_cenixadm,
app_name='test app',
preferences='test preferences')
return test_object_fac(UserPreferences, kw=kw)
@fixture
def well_fac(test_object_fac, well_specs_fac, item_status_managed, plate_fac,
rack_position_fac):
def _fetch_well(**kw):
pos = kw['position']
rack = kw['rack']
well = rack.container_positions[pos]
well.status = kw['status']
return well
# Wells are actually created in the constructor of its plate, hence we
# have to do things a little different here.
kw = dict(specs=well_specs_fac(),
status=item_status_managed,
rack=plate_fac(),
position=rack_position_fac())
return test_object_fac(_fetch_well, kw=kw)
@fixture
def well_specs_fac(test_object_fac, organization_fac):
kw = dict(label='test tube specs',
max_volume=1e-4,
dead_volume=5e-6,
plate_specs=None,
manufacturer=organization_fac())
return test_object_fac(WellSpecs, kw=kw)
@fixture
def worklist_series_fac(test_object_fac):
kw = dict()
return test_object_fac(WorklistSeries, kw=kw)
@fixture
def worklist_series_member_fac(test_object_fac, planned_worklist_fac,
worklist_series_fac):
kw = dict(planned_worklist=planned_worklist_fac(),
worklist_series=worklist_series_fac(),
index=0
)
return test_object_fac(WorklistSeriesMember, kw=kw)
# pylint: enable=W0621
# Constants fixtures.
@fixture
def barcoded_location_c127s8():
agg = get_root_aggregate(ILocation)
return agg.get_by_slug('c127s8')
@fixture
def chemical_structure_type_nucleic_acid():
agg = get_root_aggregate(IChemicalStructureType)
return agg.get_by_slug(CHEMICAL_STRUCTURE_TYPE_IDS.NUCLEIC_ACID.lower())
@fixture
def device_type_printer():
agg = get_root_aggregate(IDeviceType)
return agg.get_by_slug('printer')
@fixture
def experiment_metadata_type_manual():
agg = get_root_aggregate(IExperimentMetadataType)
return agg.get_by_slug(EXPERIMENT_METADATA_TYPES.MANUAL.lower())
@fixture
def item_status_destroyed():
agg = get_root_aggregate(IItemStatus)
return agg.get_by_slug(ITEM_STATUS_NAMES.MANAGED.lower())
@fixture
def item_status_managed():
agg = get_root_aggregate(IItemStatus)
return agg.get_by_slug(ITEM_STATUS_NAMES.MANAGED.lower())
@fixture
def molecule_type_sirna():
agg = get_root_aggregate(IMoleculeType)
return agg.get_by_slug('sirna')
@fixture
def molecule_type_compound():
agg = get_root_aggregate(IMoleculeType)
return agg.get_by_slug('compound')
@fixture
def organization_cenix():
agg = get_root_aggregate(IOrganization)
return agg.get_by_slug('cenix')
@fixture
def pipetting_specs_biomek():
agg = get_root_aggregate(IPipettingSpecs)
return agg.get_by_slug(PIPETTING_SPECS_NAMES.BIOMEK.lower())
@fixture
def plate_specs_std96():
agg = get_root_aggregate(IPlateSpecs)
return agg.get_by_slug('std96')
@fixture
def rack_position_a1():
agg = get_root_aggregate(IRackPosition)
return agg.get_by_slug('a1')
@fixture
def rack_shape_8x12():
agg = get_root_aggregate(IRackShape)
return agg.get_by_slug('8x12')
@fixture
def rack_shape_16x24():
agg = get_root_aggregate(IRackShape)
return agg.get_by_slug('16x24')
@fixture
def reservoir_specs_std96():
agg = get_root_aggregate(IReservoirSpecs)
return agg.get_by_slug(
slug_from_string(RESERVOIR_SPECS_NAMES.STANDARD_96))
@fixture
def species_human():
agg = get_root_aggregate(ISpecies)
return agg.get_by_slug('human')
@fixture
def tube_rack_specs_matrix():
agg = get_root_aggregate(ITubeRackSpecs)
return agg.get_by_slug('matrix0500')
@fixture
def tube_specs_matrix():
agg = get_root_aggregate(ITubeSpecs)
return agg.get_by_slug('matrix0500')
@fixture
def user_cenixadm():
agg = get_root_aggregate(IUser)
return agg.get_by_slug('cenixadm')
class SessionContextManager(object):
def __enter__(self):
Session.remove()
def __exit__(self, ext_type, value, tb):
Session.remove()
@yield_fixture
def session():
with SessionContextManager():
yield
@yield_fixture
def nested_session():
with RdbContextManager() as sess:
yield sess
|
|
# Copyright 2012 Big Switch Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mandeep Dhami, Big Switch Networks, Inc.
# @author: Sumit Naiksatam, sumitnaiksatam@gmail.com, Big Switch Networks, Inc.
"""
Neutron REST Proxy Plug-in for Big Switch and FloodLight Controllers.
NeutronRestProxy provides a generic neutron plugin that translates all plugin
function calls to equivalent authenticated REST calls to a set of redundant
external network controllers. It also keeps persistent store for all neutron
state to allow for re-sync of the external controller(s), if required.
The local state on the plugin also allows for local response and fast-fail
semantics where it can be determined based on the local persistent store.
Network controller specific code is decoupled from this plugin and expected
to reside on the controller itself (via the REST interface).
This allows for:
- independent authentication and redundancy schemes between neutron and the
network controller
- independent upgrade/development cycles between neutron and the controller
as it limits the proxy code upgrade requirement to neutron release cycle
and the controller specific code upgrade requirement to controller code
- ability to sync the controller with neutron for independent recovery/reset
External REST API used by proxy is the same API as defined for neutron (JSON
subset) with some additional parameters (gateway on network-create and macaddr
on port-attach) on an additional PUT to do a bulk dump of all persistent data.
"""
import copy
import functools
import httplib
import re
import eventlet
from oslo.config import cfg
from sqlalchemy.orm import exc as sqlexc
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.api import extensions as neutron_extensions
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from neutron.api.rpc.handlers import dhcp_rpc
from neutron.common import constants as const
from neutron.common import exceptions
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron.common import utils
from neutron import context as qcontext
from neutron.db import agents_db
from neutron.db import agentschedulers_db
from neutron.db import allowedaddresspairs_db as addr_pair_db
from neutron.db import api as db
from neutron.db import db_base_plugin_v2
from neutron.db import external_net_db
from neutron.db import extradhcpopt_db
from neutron.db import l3_db
from neutron.db import models_v2
from neutron.db import securitygroups_db as sg_db
from neutron.db import securitygroups_rpc_base as sg_rpc_base
from neutron.extensions import allowedaddresspairs as addr_pair
from neutron.extensions import external_net
from neutron.extensions import extra_dhcp_opt as edo_ext
from neutron.extensions import l3
from neutron.extensions import portbindings
from neutron import manager
from neutron.openstack.common import excutils
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.plugins.bigswitch import config as pl_config
from neutron.plugins.bigswitch.db import porttracker_db
from neutron.plugins.bigswitch import extensions
from neutron.plugins.bigswitch import routerrule_db
from neutron.plugins.bigswitch import servermanager
from neutron.plugins.bigswitch import version
LOG = logging.getLogger(__name__)
SYNTAX_ERROR_MESSAGE = _('Syntax error in server config file, aborting plugin')
METADATA_SERVER_IP = '169.254.169.254'
class AgentNotifierApi(n_rpc.RpcProxy,
sg_rpc.SecurityGroupAgentRpcApiMixin):
BASE_RPC_API_VERSION = '1.1'
def __init__(self, topic):
super(AgentNotifierApi, self).__init__(
topic=topic, default_version=self.BASE_RPC_API_VERSION)
self.topic_port_update = topics.get_topic_name(
topic, topics.PORT, topics.UPDATE)
def port_update(self, context, port):
self.fanout_cast(context,
self.make_msg('port_update',
port=port),
topic=self.topic_port_update)
class RestProxyCallbacks(n_rpc.RpcCallback,
sg_rpc_base.SecurityGroupServerRpcCallbackMixin):
RPC_API_VERSION = '1.1'
def get_port_from_device(self, device):
port_id = re.sub(r"^tap", "", device)
port = self.get_port_and_sgs(port_id)
if port:
port['device'] = device
return port
def get_port_and_sgs(self, port_id):
"""Get port from database with security group info."""
LOG.debug(_("get_port_and_sgs() called for port_id %s"), port_id)
session = db.get_session()
sg_binding_port = sg_db.SecurityGroupPortBinding.port_id
with session.begin(subtransactions=True):
query = session.query(
models_v2.Port,
sg_db.SecurityGroupPortBinding.security_group_id
)
query = query.outerjoin(sg_db.SecurityGroupPortBinding,
models_v2.Port.id == sg_binding_port)
query = query.filter(models_v2.Port.id.startswith(port_id))
port_and_sgs = query.all()
if not port_and_sgs:
return
port = port_and_sgs[0][0]
plugin = manager.NeutronManager.get_plugin()
port_dict = plugin._make_port_dict(port)
port_dict['security_groups'] = [
sg_id for port_, sg_id in port_and_sgs if sg_id]
port_dict['security_group_rules'] = []
port_dict['security_group_source_groups'] = []
port_dict['fixed_ips'] = [ip['ip_address']
for ip in port['fixed_ips']]
return port_dict
class NeutronRestProxyV2Base(db_base_plugin_v2.NeutronDbPluginV2,
external_net_db.External_net_db_mixin,
routerrule_db.RouterRule_db_mixin):
supported_extension_aliases = ["binding"]
servers = None
def _get_all_data(self, get_ports=True, get_floating_ips=True,
get_routers=True):
admin_context = qcontext.get_admin_context()
networks = []
# this method is used by the ML2 driver so it can't directly invoke
# the self.get_(ports|networks) methods
plugin = manager.NeutronManager.get_plugin()
all_networks = plugin.get_networks(admin_context) or []
for net in all_networks:
mapped_network = self._get_mapped_network_with_subnets(net)
flips_n_ports = mapped_network
if get_floating_ips:
flips_n_ports = self._get_network_with_floatingips(
mapped_network)
if get_ports:
ports = []
net_filter = {'network_id': [net.get('id')]}
net_ports = plugin.get_ports(admin_context,
filters=net_filter) or []
for port in net_ports:
mapped_port = self._map_state_and_status(port)
mapped_port['attachment'] = {
'id': port.get('device_id'),
'mac': port.get('mac_address'),
}
mapped_port = self._extend_port_dict_binding(admin_context,
mapped_port)
ports.append(mapped_port)
flips_n_ports['ports'] = ports
if flips_n_ports:
networks.append(flips_n_ports)
data = {'networks': networks}
if get_routers:
routers = []
all_routers = self.get_routers(admin_context) or []
for router in all_routers:
interfaces = []
mapped_router = self._map_state_and_status(router)
router_filter = {
'device_owner': [const.DEVICE_OWNER_ROUTER_INTF],
'device_id': [router.get('id')]
}
router_ports = self.get_ports(admin_context,
filters=router_filter) or []
for port in router_ports:
net_id = port.get('network_id')
subnet_id = port['fixed_ips'][0]['subnet_id']
intf_details = self._get_router_intf_details(admin_context,
net_id,
subnet_id)
interfaces.append(intf_details)
mapped_router['interfaces'] = interfaces
routers.append(mapped_router)
data.update({'routers': routers})
return data
def _send_all_data(self, send_ports=True, send_floating_ips=True,
send_routers=True, timeout=None,
triggered_by_tenant=None):
"""Pushes all data to network ctrl (networks/ports, ports/attachments).
This gives the controller an option to re-sync it's persistent store
with neutron's current view of that data.
"""
data = self._get_all_data(send_ports, send_floating_ips, send_routers)
data['triggered_by_tenant'] = triggered_by_tenant
errstr = _("Unable to update remote topology: %s")
return self.servers.rest_action('PUT', servermanager.TOPOLOGY_PATH,
data, errstr, timeout=timeout)
def _get_network_with_floatingips(self, network, context=None):
if context is None:
context = qcontext.get_admin_context()
net_id = network['id']
net_filter = {'floating_network_id': [net_id]}
fl_ips = self.get_floatingips(context,
filters=net_filter) or []
network['floatingips'] = fl_ips
return network
def _get_all_subnets_json_for_network(self, net_id, context=None):
if context is None:
context = qcontext.get_admin_context()
# start a sub-transaction to avoid breaking parent transactions
with context.session.begin(subtransactions=True):
subnets = self._get_subnets_by_network(context,
net_id)
subnets_details = []
if subnets:
for subnet in subnets:
subnet_dict = self._make_subnet_dict(subnet)
mapped_subnet = self._map_state_and_status(subnet_dict)
subnets_details.append(mapped_subnet)
return subnets_details
def _get_mapped_network_with_subnets(self, network, context=None):
# if context is not provided, admin context is used
if context is None:
context = qcontext.get_admin_context()
network = self._map_state_and_status(network)
subnets = self._get_all_subnets_json_for_network(network['id'],
context)
network['subnets'] = subnets
for subnet in (subnets or []):
if subnet['gateway_ip']:
# FIX: For backward compatibility with wire protocol
network['gateway'] = subnet['gateway_ip']
break
else:
network['gateway'] = ''
network[external_net.EXTERNAL] = self._network_is_external(
context, network['id'])
# include ML2 segmentation types
network['segmentation_types'] = getattr(self, "segmentation_types", "")
return network
def _send_create_network(self, network, context=None):
tenant_id = network['tenant_id']
mapped_network = self._get_mapped_network_with_subnets(network,
context)
self.servers.rest_create_network(tenant_id, mapped_network)
def _send_update_network(self, network, context=None):
net_id = network['id']
tenant_id = network['tenant_id']
mapped_network = self._get_mapped_network_with_subnets(network,
context)
net_fl_ips = self._get_network_with_floatingips(mapped_network,
context)
self.servers.rest_update_network(tenant_id, net_id, net_fl_ips)
def _send_delete_network(self, network, context=None):
net_id = network['id']
tenant_id = network['tenant_id']
self.servers.rest_delete_network(tenant_id, net_id)
def _map_state_and_status(self, resource):
resource = copy.copy(resource)
resource['state'] = ('UP' if resource.pop('admin_state_up',
True) else 'DOWN')
resource.pop('status', None)
return resource
def _warn_on_state_status(self, resource):
if resource.get('admin_state_up', True) is False:
LOG.warning(_("Setting admin_state_up=False is not supported "
"in this plugin version. Ignoring setting for "
"resource: %s"), resource)
if 'status' in resource:
if resource['status'] != const.NET_STATUS_ACTIVE:
LOG.warning(_("Operational status is internally set by the "
"plugin. Ignoring setting status=%s."),
resource['status'])
def _get_router_intf_details(self, context, intf_id, subnet_id):
# we will use the network id as interface's id
net_id = intf_id
network = self.get_network(context, net_id)
subnet = self.get_subnet(context, subnet_id)
mapped_network = self._get_mapped_network_with_subnets(network)
mapped_subnet = self._map_state_and_status(subnet)
data = {
'id': intf_id,
"network": mapped_network,
"subnet": mapped_subnet
}
return data
def _extend_port_dict_binding(self, context, port):
cfg_vif_type = cfg.CONF.NOVA.vif_type.lower()
if not cfg_vif_type in (portbindings.VIF_TYPE_OVS,
portbindings.VIF_TYPE_IVS):
LOG.warning(_("Unrecognized vif_type in configuration "
"[%s]. Defaulting to ovs."),
cfg_vif_type)
cfg_vif_type = portbindings.VIF_TYPE_OVS
# In ML2, the host_id is already populated
if portbindings.HOST_ID in port:
hostid = port[portbindings.HOST_ID]
else:
hostid = porttracker_db.get_port_hostid(context, port['id'])
if hostid:
port[portbindings.HOST_ID] = hostid
override = self._check_hostvif_override(hostid)
if override:
cfg_vif_type = override
port[portbindings.VIF_TYPE] = cfg_vif_type
port[portbindings.VIF_DETAILS] = {
# TODO(rkukura): Replace with new VIF security details
portbindings.CAP_PORT_FILTER:
'security-group' in self.supported_extension_aliases,
portbindings.OVS_HYBRID_PLUG: True
}
return port
def _check_hostvif_override(self, hostid):
for v in cfg.CONF.NOVA.vif_types:
if hostid in getattr(cfg.CONF.NOVA, "node_override_vif_" + v, []):
return v
return False
def _get_port_net_tenantid(self, context, port):
net = super(NeutronRestProxyV2Base,
self).get_network(context, port["network_id"])
return net['tenant_id']
def async_port_create(self, tenant_id, net_id, port):
try:
self.servers.rest_create_port(tenant_id, net_id, port)
except servermanager.RemoteRestError as e:
# 404 should never be received on a port create unless
# there are inconsistencies between the data in neutron
# and the data in the backend.
# Run a sync to get it consistent.
if (cfg.CONF.RESTPROXY.auto_sync_on_failure and
e.status == httplib.NOT_FOUND and
servermanager.NXNETWORK in e.reason):
LOG.error(_("Iconsistency with backend controller "
"triggering full synchronization."))
# args depend on if we are operating in ML2 driver
# or as the full plugin
topoargs = self.servers.get_topo_function_args
self._send_all_data(
send_ports=topoargs['get_ports'],
send_floating_ips=topoargs['get_floating_ips'],
send_routers=topoargs['get_routers'],
triggered_by_tenant=tenant_id
)
# If the full sync worked, the port will be created
# on the controller so it can be safely marked as active
else:
# Any errors that don't result in a successful auto-sync
# require that the port be placed into the error state.
LOG.error(
_("NeutronRestProxyV2: Unable to create port: %s"), e)
try:
self._set_port_status(port['id'], const.PORT_STATUS_ERROR)
except exceptions.PortNotFound:
# If port is already gone from DB and there was an error
# creating on the backend, everything is already consistent
pass
return
new_status = (const.PORT_STATUS_ACTIVE if port['state'] == 'UP'
else const.PORT_STATUS_DOWN)
try:
self._set_port_status(port['id'], new_status)
except exceptions.PortNotFound:
# This port was deleted before the create made it to the controller
# so it now needs to be deleted since the normal delete request
# would have deleted an non-existent port.
self.servers.rest_delete_port(tenant_id, net_id, port['id'])
# NOTE(kevinbenton): workaround for eventlet/mysql deadlock
@utils.synchronized('bsn-port-barrier')
def _set_port_status(self, port_id, status):
session = db.get_session()
try:
port = session.query(models_v2.Port).filter_by(id=port_id).one()
port['status'] = status
session.flush()
except sqlexc.NoResultFound:
raise exceptions.PortNotFound(port_id=port_id)
def put_context_in_serverpool(f):
@functools.wraps(f)
def wrapper(self, context, *args, **kwargs):
self.servers.set_context(context)
return f(self, context, *args, **kwargs)
return wrapper
class NeutronRestProxyV2(NeutronRestProxyV2Base,
addr_pair_db.AllowedAddressPairsMixin,
extradhcpopt_db.ExtraDhcpOptMixin,
agentschedulers_db.DhcpAgentSchedulerDbMixin,
sg_rpc_base.SecurityGroupServerRpcMixin):
_supported_extension_aliases = ["external-net", "router", "binding",
"router_rules", "extra_dhcp_opt", "quotas",
"dhcp_agent_scheduler", "agent",
"security-group", "allowed-address-pairs"]
@property
def supported_extension_aliases(self):
if not hasattr(self, '_aliases'):
aliases = self._supported_extension_aliases[:]
sg_rpc.disable_security_group_extension_by_config(aliases)
self._aliases = aliases
return self._aliases
def __init__(self):
super(NeutronRestProxyV2, self).__init__()
LOG.info(_('NeutronRestProxy: Starting plugin. Version=%s'),
version.version_string_with_vcs())
pl_config.register_config()
self.evpool = eventlet.GreenPool(cfg.CONF.RESTPROXY.thread_pool_size)
# Include the Big Switch Extensions path in the api_extensions
neutron_extensions.append_api_extensions_path(extensions.__path__)
self.add_meta_server_route = cfg.CONF.RESTPROXY.add_meta_server_route
# init network ctrl connections
self.servers = servermanager.ServerPool()
self.servers.get_topo_function = self._get_all_data
self.servers.get_topo_function_args = {'get_ports': True,
'get_floating_ips': True,
'get_routers': True}
self.network_scheduler = importutils.import_object(
cfg.CONF.network_scheduler_driver
)
# setup rpc for security and DHCP agents
self._setup_rpc()
if cfg.CONF.RESTPROXY.sync_data:
self._send_all_data()
LOG.debug(_("NeutronRestProxyV2: initialization done"))
def _setup_rpc(self):
self.conn = n_rpc.create_connection(new=True)
self.topic = topics.PLUGIN
self.notifier = AgentNotifierApi(topics.AGENT)
# init dhcp agent support
self._dhcp_agent_notifier = dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
self.agent_notifiers[const.AGENT_TYPE_DHCP] = (
self._dhcp_agent_notifier
)
self.endpoints = [RestProxyCallbacks(),
dhcp_rpc.DhcpRpcCallback(),
agents_db.AgentExtRpcCallback()]
self.conn.create_consumer(self.topic, self.endpoints,
fanout=False)
# Consume from all consumers in threads
self.conn.consume_in_threads()
@put_context_in_serverpool
def create_network(self, context, network):
"""Create a network.
Network represents an L2 network segment which can have a set of
subnets and ports associated with it.
:param context: neutron api request context
:param network: dictionary describing the network
:returns: a sequence of mappings with the following signature:
{
"id": UUID representing the network.
"name": Human-readable name identifying the network.
"tenant_id": Owner of network. NOTE: only admin user can specify
a tenant_id other than its own.
"admin_state_up": Sets admin state of network.
if down, network does not forward packets.
"status": Indicates whether network is currently operational
(values are "ACTIVE", "DOWN", "BUILD", and "ERROR")
"subnets": Subnets associated with this network.
}
:raises: RemoteRestError
"""
LOG.debug(_("NeutronRestProxyV2: create_network() called"))
self._warn_on_state_status(network['network'])
with context.session.begin(subtransactions=True):
self._ensure_default_security_group(
context,
network['network']["tenant_id"]
)
# create network in DB
new_net = super(NeutronRestProxyV2, self).create_network(context,
network)
self._process_l3_create(context, new_net, network['network'])
# create network on the network controller
self._send_create_network(new_net, context)
# return created network
return new_net
@put_context_in_serverpool
def update_network(self, context, net_id, network):
"""Updates the properties of a particular Virtual Network.
:param context: neutron api request context
:param net_id: uuid of the network to update
:param network: dictionary describing the updates
:returns: a sequence of mappings with the following signature:
{
"id": UUID representing the network.
"name": Human-readable name identifying the network.
"tenant_id": Owner of network. NOTE: only admin user can
specify a tenant_id other than its own.
"admin_state_up": Sets admin state of network.
if down, network does not forward packets.
"status": Indicates whether network is currently operational
(values are "ACTIVE", "DOWN", "BUILD", and "ERROR")
"subnets": Subnets associated with this network.
}
:raises: exceptions.NetworkNotFound
:raises: RemoteRestError
"""
LOG.debug(_("NeutronRestProxyV2.update_network() called"))
self._warn_on_state_status(network['network'])
session = context.session
with session.begin(subtransactions=True):
new_net = super(NeutronRestProxyV2, self).update_network(
context, net_id, network)
self._process_l3_update(context, new_net, network['network'])
# update network on network controller
self._send_update_network(new_net, context)
return new_net
# NOTE(kevinbenton): workaround for eventlet/mysql deadlock
@utils.synchronized('bsn-port-barrier')
@put_context_in_serverpool
def delete_network(self, context, net_id):
"""Delete a network.
:param context: neutron api request context
:param id: UUID representing the network to delete.
:returns: None
:raises: exceptions.NetworkInUse
:raises: exceptions.NetworkNotFound
:raises: RemoteRestError
"""
LOG.debug(_("NeutronRestProxyV2: delete_network() called"))
# Validate args
orig_net = super(NeutronRestProxyV2, self).get_network(context, net_id)
with context.session.begin(subtransactions=True):
self._process_l3_delete(context, net_id)
ret_val = super(NeutronRestProxyV2, self).delete_network(context,
net_id)
self._send_delete_network(orig_net, context)
return ret_val
@put_context_in_serverpool
def create_port(self, context, port):
"""Create a port, which is a connection point of a device
(e.g., a VM NIC) to attach an L2 Neutron network.
:param context: neutron api request context
:param port: dictionary describing the port
:returns:
{
"id": uuid representing the port.
"network_id": uuid of network.
"tenant_id": tenant_id
"mac_address": mac address to use on this port.
"admin_state_up": Sets admin state of port. if down, port
does not forward packets.
"status": dicates whether port is currently operational
(limit values to "ACTIVE", "DOWN", "BUILD", and "ERROR")
"fixed_ips": list of subnet IDs and IP addresses to be used on
this port
"device_id": identifies the device (e.g., virtual server) using
this port.
}
:raises: exceptions.NetworkNotFound
:raises: exceptions.StateInvalid
:raises: RemoteRestError
"""
LOG.debug(_("NeutronRestProxyV2: create_port() called"))
# Update DB in new session so exceptions rollback changes
with context.session.begin(subtransactions=True):
self._ensure_default_security_group_on_port(context, port)
sgids = self._get_security_groups_on_port(context, port)
# non-router port status is set to pending. it is then updated
# after the async rest call completes. router ports are synchronous
if port['port']['device_owner'] == l3_db.DEVICE_OWNER_ROUTER_INTF:
port['port']['status'] = const.PORT_STATUS_ACTIVE
else:
port['port']['status'] = const.PORT_STATUS_BUILD
dhcp_opts = port['port'].get(edo_ext.EXTRADHCPOPTS, [])
new_port = super(NeutronRestProxyV2, self).create_port(context,
port)
self._process_port_create_security_group(context, new_port, sgids)
if (portbindings.HOST_ID in port['port']
and 'id' in new_port):
host_id = port['port'][portbindings.HOST_ID]
porttracker_db.put_port_hostid(context, new_port['id'],
host_id)
new_port[addr_pair.ADDRESS_PAIRS] = (
self._process_create_allowed_address_pairs(
context, new_port,
port['port'].get(addr_pair.ADDRESS_PAIRS)))
self._process_port_create_extra_dhcp_opts(context, new_port,
dhcp_opts)
new_port = self._extend_port_dict_binding(context, new_port)
net = super(NeutronRestProxyV2,
self).get_network(context, new_port["network_id"])
if self.add_meta_server_route:
if new_port['device_owner'] == const.DEVICE_OWNER_DHCP:
destination = METADATA_SERVER_IP + '/32'
self._add_host_route(context, destination, new_port)
# create on network ctrl
mapped_port = self._map_state_and_status(new_port)
# ports have to be created synchronously when creating a router
# port since adding router interfaces is a multi-call process
if mapped_port['device_owner'] == l3_db.DEVICE_OWNER_ROUTER_INTF:
self.servers.rest_create_port(net["tenant_id"],
new_port["network_id"],
mapped_port)
else:
self.evpool.spawn_n(self.async_port_create, net["tenant_id"],
new_port["network_id"], mapped_port)
self.notify_security_groups_member_updated(context, new_port)
return new_port
def get_port(self, context, id, fields=None):
with context.session.begin(subtransactions=True):
port = super(NeutronRestProxyV2, self).get_port(context, id,
fields)
self._extend_port_dict_binding(context, port)
return self._fields(port, fields)
def get_ports(self, context, filters=None, fields=None):
with context.session.begin(subtransactions=True):
ports = super(NeutronRestProxyV2, self).get_ports(context, filters,
fields)
for port in ports:
self._extend_port_dict_binding(context, port)
return [self._fields(port, fields) for port in ports]
@put_context_in_serverpool
def update_port(self, context, port_id, port):
"""Update values of a port.
:param context: neutron api request context
:param id: UUID representing the port to update.
:param port: dictionary with keys indicating fields to update.
:returns: a mapping sequence with the following signature:
{
"id": uuid representing the port.
"network_id": uuid of network.
"tenant_id": tenant_id
"mac_address": mac address to use on this port.
"admin_state_up": sets admin state of port. if down, port
does not forward packets.
"status": dicates whether port is currently operational
(limit values to "ACTIVE", "DOWN", "BUILD", and "ERROR")
"fixed_ips": list of subnet IDs and IP addresses to be used on
this port
"device_id": identifies the device (e.g., virtual server) using
this port.
}
:raises: exceptions.StateInvalid
:raises: exceptions.PortNotFound
:raises: RemoteRestError
"""
LOG.debug(_("NeutronRestProxyV2: update_port() called"))
self._warn_on_state_status(port['port'])
# Validate Args
orig_port = super(NeutronRestProxyV2, self).get_port(context, port_id)
with context.session.begin(subtransactions=True):
# Update DB
new_port = super(NeutronRestProxyV2,
self).update_port(context, port_id, port)
ctrl_update_required = False
if addr_pair.ADDRESS_PAIRS in port['port']:
ctrl_update_required |= (
self.update_address_pairs_on_port(context, port_id, port,
orig_port, new_port))
self._update_extra_dhcp_opts_on_port(context, port_id, port,
new_port)
old_host_id = porttracker_db.get_port_hostid(context,
orig_port['id'])
if (portbindings.HOST_ID in port['port']
and 'id' in new_port):
host_id = port['port'][portbindings.HOST_ID]
porttracker_db.put_port_hostid(context, new_port['id'],
host_id)
if old_host_id != host_id:
ctrl_update_required = True
if (new_port.get("device_id") != orig_port.get("device_id") and
orig_port.get("device_id")):
ctrl_update_required = True
if ctrl_update_required:
# tenant_id must come from network in case network is shared
net_tenant_id = self._get_port_net_tenantid(context, new_port)
new_port = self._extend_port_dict_binding(context, new_port)
mapped_port = self._map_state_and_status(new_port)
self.servers.rest_update_port(net_tenant_id,
new_port["network_id"],
mapped_port)
agent_update_required = self.update_security_group_on_port(
context, port_id, port, orig_port, new_port)
agent_update_required |= self.is_security_group_member_updated(
context, orig_port, new_port)
# return new_port
return new_port
# NOTE(kevinbenton): workaround for eventlet/mysql deadlock
@utils.synchronized('bsn-port-barrier')
@put_context_in_serverpool
def delete_port(self, context, port_id, l3_port_check=True):
"""Delete a port.
:param context: neutron api request context
:param id: UUID representing the port to delete.
:raises: exceptions.PortInUse
:raises: exceptions.PortNotFound
:raises: exceptions.NetworkNotFound
:raises: RemoteRestError
"""
LOG.debug(_("NeutronRestProxyV2: delete_port() called"))
# if needed, check to see if this is a port owned by
# and l3-router. If so, we should prevent deletion.
if l3_port_check:
self.prevent_l3_port_deletion(context, port_id)
with context.session.begin(subtransactions=True):
router_ids = self.disassociate_floatingips(
context, port_id, do_notify=False)
self._delete_port_security_group_bindings(context, port_id)
port = super(NeutronRestProxyV2, self).get_port(context, port_id)
# Tenant ID must come from network in case the network is shared
tenid = self._get_port_net_tenantid(context, port)
self._delete_port(context, port_id)
self.servers.rest_delete_port(tenid, port['network_id'], port_id)
# now that we've left db transaction, we are safe to notify
self.notify_routers_updated(context, router_ids)
@put_context_in_serverpool
def create_subnet(self, context, subnet):
LOG.debug(_("NeutronRestProxyV2: create_subnet() called"))
self._warn_on_state_status(subnet['subnet'])
with context.session.begin(subtransactions=True):
# create subnet in DB
new_subnet = super(NeutronRestProxyV2,
self).create_subnet(context, subnet)
net_id = new_subnet['network_id']
orig_net = super(NeutronRestProxyV2,
self).get_network(context, net_id)
# update network on network controller
self._send_update_network(orig_net, context)
return new_subnet
@put_context_in_serverpool
def update_subnet(self, context, id, subnet):
LOG.debug(_("NeutronRestProxyV2: update_subnet() called"))
self._warn_on_state_status(subnet['subnet'])
with context.session.begin(subtransactions=True):
# update subnet in DB
new_subnet = super(NeutronRestProxyV2,
self).update_subnet(context, id, subnet)
net_id = new_subnet['network_id']
orig_net = super(NeutronRestProxyV2,
self).get_network(context, net_id)
# update network on network controller
self._send_update_network(orig_net, context)
return new_subnet
# NOTE(kevinbenton): workaround for eventlet/mysql deadlock
@utils.synchronized('bsn-port-barrier')
@put_context_in_serverpool
def delete_subnet(self, context, id):
LOG.debug(_("NeutronRestProxyV2: delete_subnet() called"))
orig_subnet = super(NeutronRestProxyV2, self).get_subnet(context, id)
net_id = orig_subnet['network_id']
with context.session.begin(subtransactions=True):
# delete subnet in DB
super(NeutronRestProxyV2, self).delete_subnet(context, id)
orig_net = super(NeutronRestProxyV2, self).get_network(context,
net_id)
# update network on network controller - exception will rollback
self._send_update_network(orig_net, context)
def _get_tenant_default_router_rules(self, tenant):
rules = cfg.CONF.ROUTER.tenant_default_router_rule
defaultset = []
tenantset = []
for rule in rules:
items = rule.split(':')
if len(items) == 5:
(tenantid, source, destination, action, nexthops) = items
elif len(items) == 4:
(tenantid, source, destination, action) = items
nexthops = ''
else:
continue
parsedrule = {'source': source,
'destination': destination, 'action': action,
'nexthops': nexthops.split(',')}
if parsedrule['nexthops'][0] == '':
parsedrule['nexthops'] = []
if tenantid == '*':
defaultset.append(parsedrule)
if tenantid == tenant:
tenantset.append(parsedrule)
if tenantset:
return tenantset
return defaultset
@put_context_in_serverpool
def create_router(self, context, router):
LOG.debug(_("NeutronRestProxyV2: create_router() called"))
self._warn_on_state_status(router['router'])
tenant_id = self._get_tenant_id_for_create(context, router["router"])
# set default router rules
rules = self._get_tenant_default_router_rules(tenant_id)
router['router']['router_rules'] = rules
with context.session.begin(subtransactions=True):
# create router in DB
new_router = super(NeutronRestProxyV2, self).create_router(context,
router)
mapped_router = self._map_state_and_status(new_router)
self.servers.rest_create_router(tenant_id, mapped_router)
# return created router
return new_router
@put_context_in_serverpool
def update_router(self, context, router_id, router):
LOG.debug(_("NeutronRestProxyV2.update_router() called"))
self._warn_on_state_status(router['router'])
orig_router = super(NeutronRestProxyV2, self).get_router(context,
router_id)
tenant_id = orig_router["tenant_id"]
with context.session.begin(subtransactions=True):
new_router = super(NeutronRestProxyV2,
self).update_router(context, router_id, router)
router = self._map_state_and_status(new_router)
# update router on network controller
self.servers.rest_update_router(tenant_id, router, router_id)
# return updated router
return new_router
# NOTE(kevinbenton): workaround for eventlet/mysql deadlock.
# delete_router ends up calling _delete_port instead of delete_port.
@utils.synchronized('bsn-port-barrier')
@put_context_in_serverpool
def delete_router(self, context, router_id):
LOG.debug(_("NeutronRestProxyV2: delete_router() called"))
with context.session.begin(subtransactions=True):
orig_router = self._get_router(context, router_id)
tenant_id = orig_router["tenant_id"]
# Ensure that the router is not used
router_filter = {'router_id': [router_id]}
fips = self.get_floatingips_count(context.elevated(),
filters=router_filter)
if fips:
raise l3.RouterInUse(router_id=router_id)
device_owner = l3_db.DEVICE_OWNER_ROUTER_INTF
device_filter = {'device_id': [router_id],
'device_owner': [device_owner]}
ports = self.get_ports_count(context.elevated(),
filters=device_filter)
if ports:
raise l3.RouterInUse(router_id=router_id)
ret_val = super(NeutronRestProxyV2,
self).delete_router(context, router_id)
# delete from network ctrl
self.servers.rest_delete_router(tenant_id, router_id)
return ret_val
@put_context_in_serverpool
def add_router_interface(self, context, router_id, interface_info):
LOG.debug(_("NeutronRestProxyV2: add_router_interface() called"))
# Validate args
router = self._get_router(context, router_id)
tenant_id = router['tenant_id']
with context.session.begin(subtransactions=True):
# create interface in DB
new_intf_info = super(NeutronRestProxyV2,
self).add_router_interface(context,
router_id,
interface_info)
port = self._get_port(context, new_intf_info['port_id'])
net_id = port['network_id']
subnet_id = new_intf_info['subnet_id']
# we will use the port's network id as interface's id
interface_id = net_id
intf_details = self._get_router_intf_details(context,
interface_id,
subnet_id)
# create interface on the network controller
self.servers.rest_add_router_interface(tenant_id, router_id,
intf_details)
return new_intf_info
@put_context_in_serverpool
def remove_router_interface(self, context, router_id, interface_info):
LOG.debug(_("NeutronRestProxyV2: remove_router_interface() called"))
# Validate args
router = self._get_router(context, router_id)
tenant_id = router['tenant_id']
# we will first get the interface identifier before deleting in the DB
if not interface_info:
msg = _("Either subnet_id or port_id must be specified")
raise exceptions.BadRequest(resource='router', msg=msg)
if 'port_id' in interface_info:
port = self._get_port(context, interface_info['port_id'])
interface_id = port['network_id']
elif 'subnet_id' in interface_info:
subnet = self._get_subnet(context, interface_info['subnet_id'])
interface_id = subnet['network_id']
else:
msg = _("Either subnet_id or port_id must be specified")
raise exceptions.BadRequest(resource='router', msg=msg)
with context.session.begin(subtransactions=True):
# remove router in DB
del_ret = super(NeutronRestProxyV2,
self).remove_router_interface(context,
router_id,
interface_info)
# create router on the network controller
self.servers.rest_remove_router_interface(tenant_id, router_id,
interface_id)
return del_ret
@put_context_in_serverpool
def create_floatingip(self, context, floatingip):
LOG.debug(_("NeutronRestProxyV2: create_floatingip() called"))
with context.session.begin(subtransactions=True):
# create floatingip in DB
new_fl_ip = super(NeutronRestProxyV2,
self).create_floatingip(context, floatingip)
# create floatingip on the network controller
try:
if 'floatingip' in self.servers.get_capabilities():
self.servers.rest_create_floatingip(
new_fl_ip['tenant_id'], new_fl_ip)
else:
self._send_floatingip_update(context)
except servermanager.RemoteRestError as e:
with excutils.save_and_reraise_exception():
LOG.error(
_("NeutronRestProxyV2: Unable to create remote "
"floating IP: %s"), e)
# return created floating IP
return new_fl_ip
@put_context_in_serverpool
def update_floatingip(self, context, id, floatingip):
LOG.debug(_("NeutronRestProxyV2: update_floatingip() called"))
with context.session.begin(subtransactions=True):
# update floatingip in DB
new_fl_ip = super(NeutronRestProxyV2,
self).update_floatingip(context, id, floatingip)
# update network on network controller
if 'floatingip' in self.servers.get_capabilities():
self.servers.rest_update_floatingip(new_fl_ip['tenant_id'],
new_fl_ip, id)
else:
self._send_floatingip_update(context)
return new_fl_ip
@put_context_in_serverpool
def delete_floatingip(self, context, id):
LOG.debug(_("NeutronRestProxyV2: delete_floatingip() called"))
with context.session.begin(subtransactions=True):
# delete floating IP in DB
old_fip = super(NeutronRestProxyV2, self).get_floatingip(context,
id)
super(NeutronRestProxyV2, self).delete_floatingip(context, id)
# update network on network controller
if 'floatingip' in self.servers.get_capabilities():
self.servers.rest_delete_floatingip(old_fip['tenant_id'], id)
else:
self._send_floatingip_update(context)
@put_context_in_serverpool
def disassociate_floatingips(self, context, port_id, do_notify=True):
LOG.debug(_("NeutronRestProxyV2: diassociate_floatingips() called"))
router_ids = super(NeutronRestProxyV2, self).disassociate_floatingips(
context, port_id, do_notify=do_notify)
self._send_floatingip_update(context)
return router_ids
# overriding method from l3_db as original method calls
# self.delete_floatingip() which in turn calls self.delete_port() which
# is locked with 'bsn-port-barrier'
@put_context_in_serverpool
def delete_disassociated_floatingips(self, context, network_id):
query = self._model_query(context, l3_db.FloatingIP)
query = query.filter_by(floating_network_id=network_id,
fixed_port_id=None,
router_id=None)
for fip in query:
context.session.delete(fip)
self._delete_port(context.elevated(), fip['floating_port_id'])
def _send_floatingip_update(self, context):
try:
ext_net_id = self.get_external_network_id(context)
if ext_net_id:
# Use the elevated state of the context for the ext_net query
admin_context = context.elevated()
ext_net = super(NeutronRestProxyV2,
self).get_network(admin_context, ext_net_id)
# update external network on network controller
self._send_update_network(ext_net, admin_context)
except exceptions.TooManyExternalNetworks:
# get_external_network can raise errors when multiple external
# networks are detected, which isn't supported by the Plugin
LOG.error(_("NeutronRestProxyV2: too many external networks"))
def _add_host_route(self, context, destination, port):
subnet = {}
for fixed_ip in port['fixed_ips']:
subnet_id = fixed_ip['subnet_id']
nexthop = fixed_ip['ip_address']
subnet['host_routes'] = [{'destination': destination,
'nexthop': nexthop}]
updated_subnet = self.update_subnet(context,
subnet_id,
{'subnet': subnet})
payload = {'subnet': updated_subnet}
self._dhcp_agent_notifier.notify(context, payload,
'subnet.update.end')
LOG.debug(_("Adding host route: "))
LOG.debug(_("Destination:%(dst)s nexthop:%(next)s"),
{'dst': destination, 'next': nexthop})
|
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Recipe module to ensure a checkout is consistant on a bot."""
from recipe_engine import recipe_api
class BotUpdateApi(recipe_api.RecipeApi):
def __init__(self, issue, patchset, repository, gerrit_ref, rietveld,
revision, parent_got_revision, deps_revision_overrides,
fail_patch, *args, **kwargs):
self._issue = issue
self._patchset = patchset
self._repository = repository
self._gerrit_ref = gerrit_ref
self._rietveld = rietveld
self._revision = revision
self._parent_got_revision = parent_got_revision
self._deps_revision_overrides = deps_revision_overrides
self._fail_patch = fail_patch
self._last_returned_properties = {}
super(BotUpdateApi, self).__init__(*args, **kwargs)
def __call__(self, name, cmd, **kwargs):
"""Wrapper for easy calling of bot_update."""
assert isinstance(cmd, (list, tuple))
bot_update_path = self.resource('bot_update.py')
kwargs.setdefault('infra_step', True)
kwargs.setdefault('env', {})
kwargs['env'].setdefault('PATH', '%(PATH)s')
kwargs['env']['PATH'] = self.m.path.pathsep.join([
kwargs['env']['PATH'], str(self._module.PACKAGE_REPO_ROOT)])
return self.m.python(name, bot_update_path, cmd, **kwargs)
@property
def last_returned_properties(self):
return self._last_returned_properties
# DO NOT USE.
# The below method will be removed after there are no more callers of
# tryserver.maybe_apply_issue (skbug.com/5588).
def apply_gerrit_ref(self, root, gerrit_no_reset=False,
gerrit_no_rebase_patch_ref=False, **kwargs):
apply_gerrit_path = self.resource('apply_gerrit.py')
kwargs.setdefault('infra_step', True)
kwargs.setdefault('env', {}).setdefault('PATH', '%(PATH)s')
kwargs['env']['PATH'] = self.m.path.pathsep.join([
kwargs['env']['PATH'], str(self._module.PACKAGE_REPO_ROOT)])
cmd = [
'--gerrit_repo', self._repository,
'--gerrit_ref', self._gerrit_ref or '',
'--root', str(root),
]
if gerrit_no_reset:
cmd.append('--gerrit_no_reset')
if gerrit_no_rebase_patch_ref:
cmd.append('--gerrit_no_rebase_patch_ref')
return self.m.python('apply_gerrit', apply_gerrit_path, cmd, **kwargs)
def ensure_checkout(self, gclient_config=None, suffix=None,
patch=True, update_presentation=True,
patch_root=None, no_shallow=False,
with_branch_heads=False, refs=None,
patch_oauth2=False, use_site_config_creds=True,
output_manifest=True, clobber=False,
root_solution_revision=None, rietveld=None, issue=None,
patchset=None, gerrit_no_reset=False,
gerrit_no_rebase_patch_ref=False, **kwargs):
"""
Args:
use_site_config_creds: If the oauth2 credentials are in the buildbot
site_config. See crbug.com/624212 for more information.
gclient_config: The gclient configuration to use when running bot_update.
If omitted, the current gclient configuration is used.
rietveld: The rietveld server to use. If omitted, will infer from
the 'rietveld' property.
issue: The rietveld issue number to use. If omitted, will infer from
the 'issue' property.
patchset: The rietveld issue patchset to use. If omitted, will infer from
the 'patchset' property.
"""
refs = refs or []
# We can re-use the gclient spec from the gclient module, since all the
# data bot_update needs is already configured into the gclient spec.
cfg = gclient_config or self.m.gclient.c
assert cfg is not None, (
'missing gclient_config or forgot api.gclient.set_config(...) before?')
# Construct our bot_update command. This basically be inclusive of
# everything required for bot_update to know:
root = patch_root
if root is None:
root = self.m.gclient.calculate_patch_root(
self.m.properties.get('patch_project'), cfg)
if patch:
issue = issue or self._issue
patchset = patchset or self._patchset
gerrit_repo = self._repository
gerrit_ref = self._gerrit_ref
else:
# The trybot recipe sometimes wants to de-apply the patch. In which case
# we pretend the issue/patchset never existed.
issue = patchset = email_file = key_file = None
gerrit_repo = gerrit_ref = None
# Issue and patchset must come together.
if issue:
assert patchset
if patchset:
assert issue
# The gerrit_ref and gerrit_repo must be together or not at all. If one is
# missing, clear both of them.
if not gerrit_ref or not gerrit_repo:
gerrit_repo = gerrit_ref = None
assert (gerrit_ref != None) == (gerrit_repo != None)
# Point to the oauth2 auth files if specified.
# These paths are where the bots put their credential files.
if patch_oauth2:
# TODO(martiniss): remove this hack :(. crbug.com/624212
if use_site_config_creds:
email_file = self.m.path['build'].join(
'site_config', '.rietveld_client_email')
key_file = self.m.path['build'].join(
'site_config', '.rietveld_secret_key')
else: #pragma: no cover
#TODO(martiniss): make this use path.join, so it works on windows
email_file = '/creds/rietveld/client_email'
key_file = '/creds/rietveld/secret_key'
else:
email_file = key_file = None
# Allow patch_project's revision if necessary.
# This is important for projects which are checked out as DEPS of the
# gclient solution.
self.m.gclient.set_patch_project_revision(
self.m.properties.get('patch_project'), cfg)
rev_map = cfg.got_revision_mapping.as_jsonish()
flags = [
# What do we want to check out (spec/root/rev/rev_map).
['--spec', self.m.gclient.config_to_pythonish(cfg)],
['--patch_root', root],
['--revision_mapping_file', self.m.json.input(rev_map)],
['--git-cache-dir', cfg.cache_dir],
# How to find the patch, if any (issue/patchset).
['--issue', issue],
['--patchset', patchset],
['--rietveld_server', rietveld or self._rietveld],
['--gerrit_repo', gerrit_repo],
['--gerrit_ref', gerrit_ref],
['--apply_issue_email_file', email_file],
['--apply_issue_key_file', key_file],
# Hookups to JSON output back into recipes.
['--output_json', self.m.json.output()],]
# Collect all fixed revisions to simulate them in the json output.
# Fixed revision are the explicit input revisions of bot_update.py, i.e.
# every command line parameter "--revision name@value".
fixed_revisions = {}
revisions = {}
for solution in cfg.solutions:
if solution.revision:
revisions[solution.name] = solution.revision
elif solution == cfg.solutions[0]:
revisions[solution.name] = (
self._parent_got_revision or
self._revision or
'HEAD')
if self.m.gclient.c and self.m.gclient.c.revisions:
revisions.update(self.m.gclient.c.revisions)
if cfg.solutions and root_solution_revision:
revisions[cfg.solutions[0].name] = root_solution_revision
# Allow for overrides required to bisect into rolls.
revisions.update(self._deps_revision_overrides)
for name, revision in sorted(revisions.items()):
fixed_revision = self.m.gclient.resolve_revision(revision)
if fixed_revision:
fixed_revisions[name] = fixed_revision
flags.append(['--revision', '%s@%s' % (name, fixed_revision)])
# Add extra fetch refspecs.
for ref in refs:
flags.append(['--refs', ref])
# Filter out flags that are None.
cmd = [item for flag_set in flags
for item in flag_set if flag_set[1] is not None]
if clobber:
cmd.append('--clobber')
if no_shallow:
cmd.append('--no_shallow')
if output_manifest:
cmd.append('--output_manifest')
if with_branch_heads or cfg.with_branch_heads:
cmd.append('--with_branch_heads')
if gerrit_no_reset:
cmd.append('--gerrit_no_reset')
if gerrit_no_rebase_patch_ref:
cmd.append('--gerrit_no_rebase_patch_ref')
# Inject Json output for testing.
first_sln = cfg.solutions[0].name
step_test_data = lambda: self.test_api.output_json(
root, first_sln, rev_map, self._fail_patch,
output_manifest=output_manifest, fixed_revisions=fixed_revisions)
# Add suffixes to the step name, if specified.
name = 'bot_update'
if not patch:
name += ' (without patch)'
if suffix:
name += ' - %s' % suffix
# Ah hah! Now that everything is in place, lets run bot_update!
step_result = None
try:
# 87 and 88 are the 'patch failure' codes for patch download and patch
# apply, respectively. We don't actually use the error codes, and instead
# rely on emitted json to determine cause of failure.
step_result = self(name, cmd, step_test_data=step_test_data,
ok_ret=(0, 87, 88), **kwargs)
except self.m.step.StepFailure as f:
step_result = f.result
raise
finally:
if step_result:
self._last_returned_properties = step_result.json.output.get(
'properties', {})
if update_presentation:
# Set properties such as got_revision.
for prop_name, prop_value in (
self.last_returned_properties.iteritems()):
step_result.presentation.properties[prop_name] = prop_value
# Add helpful step description in the step UI.
if 'step_text' in step_result.json.output:
step_text = step_result.json.output['step_text']
step_result.presentation.step_text = step_text
# Add log line output.
if 'log_lines' in step_result.json.output:
for log_name, log_lines in step_result.json.output['log_lines']:
step_result.presentation.logs[log_name] = log_lines.splitlines()
# Set the "checkout" path for the main solution.
# This is used by the Chromium module to figure out where to look for
# the checkout.
# If there is a patch failure, emit another step that said things
# failed.
if step_result.json.output.get('patch_failure'):
return_code = step_result.json.output.get('patch_apply_return_code')
if return_code == 3:
# This is download failure, hence an infra failure.
# Sadly, python.failing_step doesn't support kwargs.
self.m.python.inline(
'Patch failure - Try Rebasing',
('import sys;'
'print "Patch download failed. See bot_update step for'
' details";sys.exit(1)'),
infra_step=True,
step_test_data=lambda: self.m.raw_io.test_api.output(
'Patch download failed. See bot_update step for details',
retcode=1)
)
else:
# This is actual patch failure.
self.m.tryserver.set_patch_failure_tryjob_result()
self.m.python.failing_step(
'Patch failure', 'Check the bot_update step for details')
# bot_update actually just sets root to be the folder name of the
# first solution.
if step_result.json.output['did_run']:
co_root = step_result.json.output['root']
cwd = kwargs.get('cwd', self.m.path['slave_build'])
if 'checkout' not in self.m.path:
self.m.path['checkout'] = cwd.join(*co_root.split(self.m.path.sep))
return step_result
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# SVG transformation list parser
#
# Copyright 2010 Louis Simard
#
# This file is part of Scour, http://www.codedread.com/scour/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Small recursive descent parser for SVG transform="" data.
In [1]: from svg_transform import svg_transform_parser
In [3]: svg_transform_parser.parse('translate(50, 50)')
Out[3]: [('translate', [50.0, 50.0])]
In [4]: svg_transform_parser.parse('translate(50)')
Out[4]: [('translate', [50.0])]
In [5]: svg_transform_parser.parse('rotate(36 50,50)')
Out[5]: [('rotate', [36.0, 50.0, 50.0])]
In [6]: svg_transform_parser.parse('rotate(36)')
Out[6]: [('rotate', [36.0])]
In [7]: svg_transform_parser.parse('skewX(20)')
Out[7]: [('skewX', [20.0])]
In [8]: svg_transform_parser.parse('skewY(40)')
Out[8]: [('skewX', [20.0])]
In [9]: svg_transform_parser.parse('scale(2 .5)')
Out[9]: [('scale', [2.0, 0.5])]
In [10]: svg_transform_parser.parse('scale(.5)')
Out[10]: [('scale', [0.5])]
In [11]: svg_transform_parser.parse('matrix(1 0 50 0 1 80)')
Out[11]: [('matrix', [1.0, 0.0, 50.0, 0.0, 1.0, 80.0])]
Multiple transformations are supported:
In [12]: svg_transform_parser.parse('translate(30 -30) rotate(36)')
Out[12]: [('translate', [30.0, -30.0]), ('rotate', [36.0])]
"""
from __future__ import absolute_import
import re
from decimal import Decimal
from functools import partial
from six.moves import range
# Sentinel.
class _EOF(object):
def __repr__(self):
return 'EOF'
EOF = _EOF()
lexicon = [
('float', r'[-+]?(?:(?:[0-9]*\.[0-9]+)|(?:[0-9]+\.?))(?:[Ee][-+]?[0-9]+)?'),
('int', r'[-+]?[0-9]+'),
('command', r'(?:matrix|translate|scale|rotate|skew[XY])'),
('coordstart', r'\('),
('coordend', r'\)'),
]
class Lexer(object):
""" Break SVG path data into tokens.
The SVG spec requires that tokens are greedy. This lexer relies on Python's
regexes defaulting to greediness.
This style of implementation was inspired by this article:
http://www.gooli.org/blog/a-simple-lexer-in-python/
"""
def __init__(self, lexicon):
self.lexicon = lexicon
parts = []
for name, regex in lexicon:
parts.append('(?P<%s>%s)' % (name, regex))
self.regex_string = '|'.join(parts)
self.regex = re.compile(self.regex_string)
def lex(self, text):
""" Yield (token_type, str_data) tokens.
The last token will be (EOF, None) where EOF is the singleton object
defined in this module.
"""
for match in self.regex.finditer(text):
for name, _ in self.lexicon:
m = match.group(name)
if m is not None:
yield (name, m)
break
yield (EOF, None)
svg_lexer = Lexer(lexicon)
class SVGTransformationParser(object):
""" Parse SVG transform="" data into a list of commands.
Each distinct command will take the form of a tuple (type, data). The
`type` is the character string that defines the type of transformation in the
transform data, so either of "translate", "rotate", "scale", "matrix",
"skewX" and "skewY". Data is always a list of numbers contained within the
transformation's parentheses.
See the SVG documentation for the interpretation of the individual elements
for each transformation.
The main method is `parse(text)`. It can only consume actual strings, not
filelike objects or iterators.
"""
def __init__(self, lexer=svg_lexer):
self.lexer = lexer
self.command_dispatch = {
'translate': self.rule_1or2numbers,
'scale': self.rule_1or2numbers,
'skewX': self.rule_1number,
'skewY': self.rule_1number,
'rotate': self.rule_1or3numbers,
'matrix': self.rule_6numbers,
}
# self.number_tokens = set(['int', 'float'])
self.number_tokens = list(['int', 'float'])
def parse(self, text):
""" Parse a string of SVG transform="" data.
"""
gen = self.lexer.lex(text)
next_val_fn = partial(next, *(gen,))
commands = []
token = next_val_fn()
while token[0] is not EOF:
command, token = self.rule_svg_transform(next_val_fn, token)
commands.append(command)
return commands
def rule_svg_transform(self, next_val_fn, token):
if token[0] != 'command':
raise SyntaxError("expecting a transformation type; got %r" % (token,))
command = token[1]
rule = self.command_dispatch[command]
token = next_val_fn()
if token[0] != 'coordstart':
raise SyntaxError("expecting '('; got %r" % (token,))
numbers, token = rule(next_val_fn, token)
if token[0] != 'coordend':
raise SyntaxError("expecting ')'; got %r" % (token,))
token = next_val_fn()
return (command, numbers), token
def rule_1or2numbers(self, next_val_fn, token):
numbers = []
# 1st number is mandatory
token = next_val_fn()
number, token = self.rule_number(next_val_fn, token)
numbers.append(number)
# 2nd number is optional
number, token = self.rule_optional_number(next_val_fn, token)
if number is not None:
numbers.append(number)
return numbers, token
def rule_1number(self, next_val_fn, token):
# this number is mandatory
token = next_val_fn()
number, token = self.rule_number(next_val_fn, token)
numbers = [number]
return numbers, token
def rule_1or3numbers(self, next_val_fn, token):
numbers = []
# 1st number is mandatory
token = next_val_fn()
number, token = self.rule_number(next_val_fn, token)
numbers.append(number)
# 2nd number is optional
number, token = self.rule_optional_number(next_val_fn, token)
if number is not None:
# but, if the 2nd number is provided, the 3rd is mandatory.
# we can't have just 2.
numbers.append(number)
number, token = self.rule_number(next_val_fn, token)
numbers.append(number)
return numbers, token
def rule_6numbers(self, next_val_fn, token):
numbers = []
token = next_val_fn()
# all numbers are mandatory
for i in range(6):
number, token = self.rule_number(next_val_fn, token)
numbers.append(number)
return numbers, token
def rule_number(self, next_val_fn, token):
if token[0] not in self.number_tokens:
raise SyntaxError("expecting a number; got %r" % (token,))
x = Decimal(token[1]) * 1
token = next_val_fn()
return x, token
def rule_optional_number(self, next_val_fn, token):
if token[0] not in self.number_tokens:
return None, token
else:
x = Decimal(token[1]) * 1
token = next_val_fn()
return x, token
svg_transform_parser = SVGTransformationParser()
|
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from tempest import clients
from tempest.common.utils import data_utils
from tempest import config
from tempest import exceptions
from tempest.openstack.common import excutils
from tempest.openstack.common import log as logging
import tempest.test
CONF = config.CONF
LOG = logging.getLogger(__name__)
class BaseComputeTest(tempest.test.BaseTestCase):
"""Base test case class for all Compute API tests."""
_api_version = 2
force_tenant_isolation = False
@classmethod
def resource_setup(cls):
cls.set_network_resources()
super(BaseComputeTest, cls).resource_setup()
# TODO(andreaf) WE should care also for the alt_manager here
# but only once client lazy load in the manager is done
cls.os = cls.get_client_manager()
cls.multi_user = cls.check_multi_user()
cls.build_interval = CONF.compute.build_interval
cls.build_timeout = CONF.compute.build_timeout
cls.ssh_user = CONF.compute.ssh_user
cls.image_ref = CONF.compute.image_ref
cls.image_ref_alt = CONF.compute.image_ref_alt
cls.flavor_ref = CONF.compute.flavor_ref
cls.flavor_ref_alt = CONF.compute.flavor_ref_alt
cls.image_ssh_user = CONF.compute.image_ssh_user
cls.image_ssh_password = CONF.compute.image_ssh_password
cls.servers = []
cls.images = []
cls.security_groups = []
cls.server_groups = []
if cls._api_version == 2:
cls.servers_client = cls.os.servers_client
cls.flavors_client = cls.os.flavors_client
cls.images_client = cls.os.images_client
cls.extensions_client = cls.os.extensions_client
cls.floating_ips_client = cls.os.floating_ips_client
cls.keypairs_client = cls.os.keypairs_client
cls.security_groups_client = cls.os.security_groups_client
cls.quotas_client = cls.os.quotas_client
# NOTE(mriedem): os-quota-class-sets is v2 API only
cls.quota_classes_client = cls.os.quota_classes_client
# NOTE(mriedem): os-networks is v2 API only
cls.networks_client = cls.os.networks_client
cls.limits_client = cls.os.limits_client
cls.volumes_extensions_client = cls.os.volumes_extensions_client
cls.volumes_client = cls.os.volumes_client
cls.interfaces_client = cls.os.interfaces_client
cls.fixed_ips_client = cls.os.fixed_ips_client
cls.availability_zone_client = cls.os.availability_zone_client
cls.agents_client = cls.os.agents_client
cls.aggregates_client = cls.os.aggregates_client
cls.services_client = cls.os.services_client
cls.instance_usages_audit_log_client = \
cls.os.instance_usages_audit_log_client
cls.hypervisor_client = cls.os.hypervisor_client
cls.certificates_client = cls.os.certificates_client
cls.migrations_client = cls.os.migrations_client
cls.security_group_default_rules_client = (
cls.os.security_group_default_rules_client)
else:
msg = ("Unexpected API version is specified (%s)" %
cls._api_version)
raise exceptions.InvalidConfiguration(message=msg)
@classmethod
def check_multi_user(cls):
# We have a list of accounts now, so just checking if the list is gt 2
if not cls.isolated_creds.is_multi_user():
msg = "Not enough users available for multi-user testing"
raise exceptions.InvalidConfiguration(msg)
return True
@classmethod
def clear_servers(cls):
LOG.debug('Clearing servers: %s', ','.join(
server['id'] for server in cls.servers))
for server in cls.servers:
try:
cls.servers_client.delete_server(server['id'])
except exceptions.NotFound:
# Something else already cleaned up the server, nothing to be
# worried about
pass
except Exception:
LOG.exception('Deleting server %s failed' % server['id'])
for server in cls.servers:
try:
cls.servers_client.wait_for_server_termination(server['id'])
except Exception:
LOG.exception('Waiting for deletion of server %s failed'
% server['id'])
@classmethod
def server_check_teardown(cls):
"""Checks is the shared server clean enough for subsequent test.
Method will delete the server when it's dirty.
The setUp method is responsible for creating a new server.
Exceptions raised in tearDown class are fails the test case,
This method supposed to use only by tierDown methods, when
the shared server_id is stored in the server_id of the class.
"""
if getattr(cls, 'server_id', None) is not None:
try:
cls.servers_client.wait_for_server_status(cls.server_id,
'ACTIVE')
except Exception as exc:
LOG.exception(exc)
cls.servers_client.delete_server(cls.server_id)
cls.servers_client.wait_for_server_termination(cls.server_id)
cls.server_id = None
raise
@classmethod
def clear_images(cls):
LOG.debug('Clearing images: %s', ','.join(cls.images))
for image_id in cls.images:
try:
cls.images_client.delete_image(image_id)
except exceptions.NotFound:
# The image may have already been deleted which is OK.
pass
except Exception:
LOG.exception('Exception raised deleting image %s' % image_id)
@classmethod
def clear_security_groups(cls):
LOG.debug('Clearing security groups: %s', ','.join(
str(sg['id']) for sg in cls.security_groups))
for sg in cls.security_groups:
try:
cls.security_groups_client.delete_security_group(sg['id'])
except exceptions.NotFound:
# The security group may have already been deleted which is OK.
pass
except Exception as exc:
LOG.info('Exception raised deleting security group %s',
sg['id'])
LOG.exception(exc)
@classmethod
def clear_server_groups(cls):
LOG.debug('Clearing server groups: %s', ','.join(cls.server_groups))
for server_group_id in cls.server_groups:
try:
cls.servers_client.delete_server_group(server_group_id)
except exceptions.NotFound:
# The server-group may have already been deleted which is OK.
pass
except Exception:
LOG.exception('Exception raised deleting server-group %s',
server_group_id)
@classmethod
def resource_cleanup(cls):
cls.clear_images()
cls.clear_servers()
cls.clear_security_groups()
cls.clear_server_groups()
super(BaseComputeTest, cls).resource_cleanup()
@classmethod
def create_test_server(cls, **kwargs):
"""Wrapper utility that returns a test server."""
name = data_utils.rand_name(cls.__name__ + "-instance")
if 'name' in kwargs:
name = kwargs.pop('name')
flavor = kwargs.get('flavor', cls.flavor_ref)
image_id = kwargs.get('image_id', cls.image_ref)
resp, body = cls.servers_client.create_server(
name, image_id, flavor, **kwargs)
# handle the case of multiple servers
servers = [body]
if 'min_count' in kwargs or 'max_count' in kwargs:
# Get servers created which name match with name param.
r, b = cls.servers_client.list_servers()
servers = [s for s in b['servers'] if s['name'].startswith(name)]
if 'wait_until' in kwargs:
for server in servers:
try:
cls.servers_client.wait_for_server_status(
server['id'], kwargs['wait_until'])
except Exception:
with excutils.save_and_reraise_exception():
if ('preserve_server_on_error' not in kwargs
or kwargs['preserve_server_on_error'] is False):
for server in servers:
try:
cls.servers_client.delete_server(
server['id'])
except Exception:
pass
cls.servers.extend(servers)
return resp, body
@classmethod
def create_security_group(cls, name=None, description=None):
if name is None:
name = data_utils.rand_name(cls.__name__ + "-securitygroup")
if description is None:
description = data_utils.rand_name('description-')
body = \
cls.security_groups_client.create_security_group(name,
description)
cls.security_groups.append(body)
return body
@classmethod
def create_test_server_group(cls, name="", policy=None):
if not name:
name = data_utils.rand_name(cls.__name__ + "-Server-Group")
if policy is None:
policy = ['affinity']
resp, body = cls.servers_client.create_server_group(name, policy)
cls.server_groups.append(body['id'])
return resp, body
def wait_for(self, condition):
"""Repeatedly calls condition() until a timeout."""
start_time = int(time.time())
while True:
try:
condition()
except Exception:
pass
else:
return
if int(time.time()) - start_time >= self.build_timeout:
condition()
return
time.sleep(self.build_interval)
@staticmethod
def _delete_volume(volumes_client, volume_id):
"""Deletes the given volume and waits for it to be gone."""
try:
volumes_client.delete_volume(volume_id)
# TODO(mriedem): We should move the wait_for_resource_deletion
# into the delete_volume method as a convenience to the caller.
volumes_client.wait_for_resource_deletion(volume_id)
except exceptions.NotFound:
LOG.warn("Unable to delete volume '%s' since it was not found. "
"Maybe it was already deleted?" % volume_id)
@classmethod
def prepare_instance_network(cls):
if (CONF.compute.ssh_auth_method != 'disabled' and
CONF.compute.ssh_connect_method == 'floating'):
cls.set_network_resources(network=True, subnet=True, router=True,
dhcp=True)
@classmethod
def create_image_from_server(cls, server_id, **kwargs):
"""Wrapper utility that returns an image created from the server."""
name = data_utils.rand_name(cls.__name__ + "-image")
if 'name' in kwargs:
name = kwargs.pop('name')
image = cls.images_client.create_image(server_id, name)
image_id = data_utils.parse_image_id(image.response['location'])
cls.images.append(image_id)
if 'wait_until' in kwargs:
cls.images_client.wait_for_image_status(image_id,
kwargs['wait_until'])
image = cls.images_client.get_image(image_id)
if kwargs['wait_until'] == 'ACTIVE':
if kwargs.get('wait_for_server', True):
cls.servers_client.wait_for_server_status(server_id,
'ACTIVE')
return image
@classmethod
def rebuild_server(cls, server_id, **kwargs):
# Destroy an existing server and creates a new one
if server_id:
try:
cls.servers_client.delete_server(server_id)
cls.servers_client.wait_for_server_termination(server_id)
except Exception:
LOG.exception('Failed to delete server %s' % server_id)
resp, server = cls.create_test_server(wait_until='ACTIVE', **kwargs)
cls.password = server['adminPass']
return server['id']
@classmethod
def delete_volume(cls, volume_id):
"""Deletes the given volume and waits for it to be gone."""
cls._delete_volume(cls.volumes_extensions_client, volume_id)
class BaseV2ComputeTest(BaseComputeTest):
_api_version = 2
_interface = "json"
class BaseComputeAdminTest(BaseComputeTest):
"""Base test case class for Compute Admin API tests."""
_interface = "json"
@classmethod
def resource_setup(cls):
super(BaseComputeAdminTest, cls).resource_setup()
try:
creds = cls.isolated_creds.get_admin_creds()
cls.os_adm = clients.Manager(
credentials=creds, interface=cls._interface)
except NotImplementedError:
msg = ("Missing Compute Admin API credentials in configuration.")
raise cls.skipException(msg)
cls.availability_zone_admin_client = (
cls.os_adm.availability_zone_client)
class BaseV2ComputeAdminTest(BaseComputeAdminTest):
"""Base test case class for Compute Admin V2 API tests."""
_api_version = 2
|
|
"""
kombu.transport.zmq
===================
ZeroMQ transport.
"""
from __future__ import absolute_import
import errno
import os
import socket
try:
import zmq
from zmq import ZMQError
except ImportError:
zmq = ZMQError = None # noqa
from kombu.five import Empty
from kombu.log import get_logger
from kombu.serialization import pickle
from kombu.utils import cached_property
from kombu.utils.eventio import poll, READ
from . import virtual
logger = get_logger('kombu.transport.zmq')
DEFAULT_PORT = 5555
DEFAULT_HWM = 128
DEFAULT_INCR = 1
dumps, loads = pickle.dumps, pickle.loads
class MultiChannelPoller(object):
eventflags = READ
def __init__(self):
# active channels
self._channels = set()
# file descriptor -> channel map
self._fd_to_chan = {}
# poll implementation (epoll/kqueue/select)
self.poller = poll()
def close(self):
for fd in self._fd_to_chan:
try:
self.poller.unregister(fd)
except KeyError:
pass
self._channels.clear()
self._fd_to_chan.clear()
self.poller = None
def add(self, channel):
self._channels.add(channel)
def discard(self, channel):
self._channels.discard(channel)
self._fd_to_chan.pop(channel.client.connection.fd, None)
def _register(self, channel):
conn = channel.client.connection
self._fd_to_chan[conn.fd] = channel
self.poller.register(conn.fd, self.eventflags)
def on_poll_start(self):
for channel in self._channels:
self._register(channel)
def on_readable(self, fileno):
chan = self._fd_to_chan[fileno]
return chan.drain_events(), chan
def get(self, timeout=None):
self.on_poll_start()
events = self.poller.poll(timeout)
for fileno, _ in events or []:
return self.on_readable(fileno)
raise Empty()
@property
def fds(self):
return self._fd_to_chan
class Client(object):
def __init__(self, uri='tcp://127.0.0.1', port=DEFAULT_PORT,
hwm=DEFAULT_HWM, swap_size=None, enable_sink=True,
context=None):
try:
scheme, parts = uri.split('://')
except ValueError:
scheme = 'tcp'
parts = uri
endpoints = parts.split(';')
self.port = port
if scheme != 'tcp':
raise NotImplementedError('Currently only TCP can be used')
self.context = context or zmq.Context.instance()
if enable_sink:
self.sink = self.context.socket(zmq.PULL)
self.sink.bind('tcp://*:{0.port}'.format(self))
else:
self.sink = None
self.vent = self.context.socket(zmq.PUSH)
if hasattr(zmq, 'SNDHWM'):
self.vent.setsockopt(zmq.SNDHWM, hwm)
else:
self.vent.setsockopt(zmq.HWM, hwm)
if swap_size:
self.vent.setsockopt(zmq.SWAP, swap_size)
for endpoint in endpoints:
if scheme == 'tcp' and ':' not in endpoint:
endpoint += ':' + str(DEFAULT_PORT)
endpoint = ''.join([scheme, '://', endpoint])
self.connect(endpoint)
def connect(self, endpoint):
self.vent.connect(endpoint)
def get(self, queue=None, timeout=None):
sink = self.sink
try:
if timeout is not None:
prev_timeout, sink.RCVTIMEO = sink.RCVTIMEO, timeout
try:
return sink.recv()
finally:
sink.RCVTIMEO = prev_timeout
else:
return sink.recv()
except ZMQError as exc:
if exc.errno == zmq.EAGAIN:
raise socket.error(errno.EAGAIN, exc.strerror)
else:
raise
def put(self, queue, message, **kwargs):
return self.vent.send(message)
def close(self):
if self.sink and not self.sink.closed:
self.sink.close()
if not self.vent.closed:
self.vent.close()
@property
def connection(self):
if self.sink:
return self.sink
return self.vent
class Channel(virtual.Channel):
Client = Client
hwm = DEFAULT_HWM
swap_size = None
enable_sink = True
port_incr = DEFAULT_INCR
from_transport_options = (
virtual.Channel.from_transport_options +
('hwm', 'swap_size', 'enable_sink', 'port_incr')
)
def __init__(self, *args, **kwargs):
super_ = super(Channel, self)
super_.__init__(*args, **kwargs)
# Evaluate socket
self.client.connection.closed
self.connection.cycle.add(self)
self.connection_errors = self.connection.connection_errors
def _get(self, queue, timeout=None):
try:
return loads(self.client.get(queue, timeout))
except socket.error as exc:
if exc.errno == errno.EAGAIN and timeout != 0:
raise Empty()
else:
raise
def _put(self, queue, message, **kwargs):
self.client.put(queue, dumps(message, -1), **kwargs)
def _purge(self, queue):
return 0
def _poll(self, cycle, timeout=None):
return cycle.get(timeout=timeout)
def close(self):
if not self.closed:
self.connection.cycle.discard(self)
try:
self.__dict__['client'].close()
except KeyError:
pass
super(Channel, self).close()
def _prepare_port(self, port):
return (port + self.channel_id - 1) * self.port_incr
def _create_client(self):
conninfo = self.connection.client
port = self._prepare_port(conninfo.port or DEFAULT_PORT)
return self.Client(uri=conninfo.hostname or 'tcp://127.0.0.1',
port=port,
hwm=self.hwm,
swap_size=self.swap_size,
enable_sink=self.enable_sink,
context=self.connection.context)
@cached_property
def client(self):
return self._create_client()
class Transport(virtual.Transport):
Channel = Channel
can_parse_url = True
default_port = DEFAULT_PORT
driver_type = 'zeromq'
driver_name = 'zmq'
connection_errors = virtual.Transport.connection_errors + (ZMQError, )
supports_ev = True
polling_interval = None
def __init__(self, *args, **kwargs):
if zmq is None:
raise ImportError('The zmq library is not installed')
super(Transport, self).__init__(*args, **kwargs)
self.cycle = MultiChannelPoller()
def driver_version(self):
return zmq.__version__
def register_with_event_loop(self, connection, loop):
cycle = self.cycle
cycle.poller = loop.poller
add_reader = loop.add_reader
on_readable = self.on_readable
cycle_poll_start = cycle.on_poll_start
def on_poll_start():
cycle_poll_start()
[add_reader(fd, on_readable, fd) for fd in cycle.fds]
loop.on_tick.add(on_poll_start)
def on_readable(self, fileno):
self._handle_event(self.cycle.on_readable(fileno))
def drain_events(self, connection, timeout=None):
more_to_read = False
for channel in connection.channels:
try:
evt = channel.cycle.get(timeout=timeout)
except socket.error as exc:
if exc.errno == errno.EAGAIN:
continue
raise
else:
connection._handle_event((evt, channel))
more_to_read = True
if not more_to_read:
raise socket.error(errno.EAGAIN, os.strerror(errno.EAGAIN))
def _handle_event(self, evt):
item, channel = evt
message, queue = item
if not queue or queue not in self._callbacks:
raise KeyError(
'Message for queue {0!r} without consumers: {1}'.format(
queue, message))
self._callbacks[queue](message)
def establish_connection(self):
self.context.closed
return super(Transport, self).establish_connection()
def close_connection(self, connection):
super(Transport, self).close_connection(connection)
try:
connection.__dict__['context'].term()
except KeyError:
pass
@cached_property
def context(self):
return zmq.Context(1)
|
|
"""
An example of `qconcurrency.widgets.SessionList`
This is a heavily modified `QListWidget` that
facilitates storig all changes in memory until
you want to perform a database-save operation.
For example:
* load 5x items from the database
* change 1x item
* add 2x items
* delete 1x item
* save all of these changes now, back to database.
The widget supports using colours to indicate the various
statuses (new, changed, deleted, editable).
Run this script, and play around with the widget
to get a better understanding of this widget's intended usage.
"""
import os
import sys
qconcurrency_path = '/'.join(os.path.realpath(__file__).replace('\\','/').split('/')[:-2])
sys.path.insert(0, qconcurrency_path )
from qconcurrency import QBaseWindow
from qconcurrency.widgets import SessionList
from Qt import QtCore, QtWidgets, QtGui
import time
class FakeDBCursor( object ):
"""
A fake database cursor, for the sake of this example
"""
def __init__(self):
self.lastrowid = 100
def execute(self, sql, **kwds ):
print( sql.format(**kwds) )
self.lastrowid +=1
class GroceryList( QtWidgets.QWidget ):
"""
An example of a SessionWidget.
"""
def __init__(self):
QtWidgets.QWidget.__init__(self)
self._initui()
def _initui(self):
colours = {
'changed':{
'fg':QtGui.QColor(30,30,30),
'bg':QtGui.QColor(170,70,50),
},
'new':{
'fg':QtGui.QColor(30,30,30),
'bg':QtGui.QColor(180,140,50),
},
'editable':{
'fg':QtGui.QColor(30,30,30),
'bg':QtGui.QColor(60,120,140),
},
}
# Create Widgets
layout = QtWidgets.QVBoxLayout()
actionbar = QtWidgets.QHBoxLayout()
self._list = SessionList( colours )
add_btn = QtWidgets.QPushButton('+ Add Item')
del_btn = QtWidgets.QPushButton('- Del Item')
self._save_btn = QtWidgets.QPushButton('Save')
# Position Widgets
self.setLayout( layout )
layout.addWidget( self._list )
layout.addLayout( actionbar )
actionbar.addStretch(1)
actionbar.addWidget( del_btn )
actionbar.addWidget( add_btn )
actionbar.addWidget( self._save_btn )
# Widget Attrs
self._list.setSelectionMode(
QtWidgets.QAbstractItemView.ExtendedSelection
)
# Connections
self._list.save_requested.connect(
self._handle_save_changes
)
del_btn.clicked.connect(
self._handle_delete_item
)
add_btn.clicked.connect(
self._handle_add_item
)
self._save_btn.clicked.connect(
self._handle_save
)
self._list.changes_exist.connect(
self._handle_changes_exist
)
def load(self):
"""
Loads 3x items, representing saved data
"""
self._list.clear()
fake_database_items = {
1 : 'Rice',
2 : 'Chicken',
3 : 'Soup Stock',
}
for _id in fake_database_items:
text = fake_database_items[ _id ]
self._list.add_item( text, _id=_id, saved_val=text )
def _handle_save_changes(self, changes):
"""
Called when :py:meth:`save_changes` is emitted.
This example will:
* update our fake SQL database
* update widget colours
* update self._delitems, self._newitems, self._changeditems
as information is `saved` to our fake database
"""
# a fake database cursor
cursor = FakeDBCursor()
# Saved Items
# ===========
for _id in changes['new'].keys():
widget = changes['new'][_id]
cursor.execute((
'INSERT INTO groceryTable \n'
' ( food_type ) \n'
'VALUES ({food_type}) \n'),
food_type = widget.text()
)
# update internaldata, UI colours
widget.set_saved( _id=cursor.lastrowid )
# Changed Items
# =============
for _id in changes['changed'].keys():
widget = changes['changed'][_id]
cursor.execute((
'UPDATE groceryTable \n'
'SET food_type = {food_type} \n'
'WHERE food_Id = {food_Id} \n'),
food_type = widget.text(),
food_Id = _id,
)
# update internaldata, UI colours
widget.set_saved( _id=cursor.lastrowid )
# Deleted Items
# =============
if changes['deleted']:
cursor.execute((
'DELETE FROM groceryTable \n'
'WHERE food_Id IN ( %s ) \n'
) % ','.join([str(_id) for _id in changes['deleted'] ])
)
self._delitems = set()
def _handle_delete_item(self):
selitems = self._list.selectedItems()
if not selitems:
return
for widget in selitems:
self._list.remove_item( widget.id() )
def _handle_add_item(self):
self._list.add_item( '' )
def _handle_save(self):
self._list.save_changes()
def _handle_changes_exist(self, status):
"""
Enables/Disables the `Save` button
based on whether or not there are unsaved changes
"""
if status:
self._save_btn.setEnabled(True)
else:
self._save_btn.setEnabled(False)
if __name__ == '__main__':
from qconcurrency import QApplication
with QApplication():
glist = GroceryList()
glist.load() # load some bogus saved-data
glist.show()
|
|
import json
import sys
import glob
import os
import argparse
import re
import base64
import gzip
class AppSvcsBuilder:
_apl_validators = {
'ipaddr':'IpAddress',
'port':'PortNumber',
'number':'NonNegativeNumber',
'allnumber':'Number',
'iporfqdn':'IpOrFqdn',
'fqdn':'FQDN'
}
options = {
'preso': os.path.join('src','presentation_layer.json'),
'impl': os.path.join('src','implementation_layer.tcl'),
'workingdir': os.getcwd(),
'tempdir': 'tmp',
'bundledir': 'bundled',
'outfile': None,
'docsdir': 'docs',
'append': "",
'roottmpl': os.path.join('src','master.template'),
'debug':False,
'github_root':'https://www.github.com/0xHiteshPatel/appsvcs_integration_iapp/',
'github_tag':'',
'github_url':'',
'debug':False
}
def __init__(self, **kwargs):
self._debug("in __init__")
self.options.update(**kwargs)
self.options["preso_fn"] = os.path.join(self.options["workingdir"],self.options["preso"])
self.options["impl_fn"] = os.path.join(self.options["workingdir"],self.options["impl"])
if len(self.options["append"]) > 0:
self.options["append"] = '_' + self.options["append"]
self._debug("options=%s" % self.options)
self.buildinfo = {
"pres_rev":"",
"impl_major": "",
"impl_minor": "",
"allvars": [],
"allvarsTCL": "",
"github_tag":"",
"github_url":""
}
self._load_info()
if not self.options["outfile"]:
self.options["outfile_fn"] = "%s/appsvcs_integration_v%s-%s_%s%s.tmpl" \
% (self.options["workingdir"],
self.buildinfo["impl_major"],
self.buildinfo["impl_minor"],
self.buildinfo["pres_rev"],
self.options["append"])
else:
self.options["outfile_fn"] = os.path.join(self.options["workingdir"], self.options["outfile"])
# if self.options["append"]:
# print "Appending \"%s\" to template name" % self.options["append"]
self.options['github_url'] = self.options['github_root'] + 'tree/' + self.options['github_tag'] + '/'
self.buildinfo['github_root'] = self.options['github_root']
self.buildinfo['github_url'] = self.options['github_url']
self.buildinfo['github_tag'] = self.options['github_tag']
self._debug("buildinfo=%s" % self.buildinfo)
#self._debug(self._stringify_modes([1,2,3,4]))
#self._debug(self._search_test_cases("pool__port"))
# self._debug(self._doc_RST_section("Test Test XXX","="))
# self._debug(self._doc_RST_links(["include_defaults.json","test_vs_ipother.json"], "test/"))
# for section in self.pres_data["sections"]:
# if section["name"] == "pool":
# for field in section["fields"]:
# if field["name"] == "addr":
# self._debug(self._doc_RST_print_field(field, "pool__addr", 0))
# self._debug(self._doc_RST_print_field(field, "pool__addr", 1))
def _debug(self, msg):
if(self.options["debug"]):
sys.stderr.write("[debug] %s\n" % msg)
def _load_info(self):
impl = self._safe_open(self.options["impl_fn"])
pres = self._safe_open(self.options["preso_fn"])
self.pres_data = self._load_json(pres)
pres.close()
for line in impl:
implmajormatch = re.match( r'^set IMPLMAJORVERSION \"(.*)\"', line)
implminormatch = re.match( r'^set IMPLMINORVERSION \"(.*)\"', line)
if implmajormatch: self.buildinfo["impl_major"] = implmajormatch.group(1)
if implminormatch: self.buildinfo["impl_minor"] = implminormatch.group(1)
if ( self.buildinfo["impl_major"] and self.buildinfo["impl_minor"] ):
break
impl.close()
if self.pres_data["tmpl_majorversion"] != self.buildinfo["impl_major"]:
print "[fatal] IMPLVERSION_MAJOR (%s) in %s does not match 'tmpl_majorversion' (%s) in %s" \
% (self.buildinfo["impl_major"],
self.options["impl_fn"],
self.pres_data["tmpl_majorversion"],
self.options["preso_fn"])
sys.exit(1)
self.buildinfo["pres_rev"] = str(self.pres_data["pres_revision"])
for section in self.pres_data["sections"]:
if section["name"] == "intro":
continue
for field in section["fields"]:
self.buildinfo["allvars"].append("%s__%s" % (section["name"], field["name"]))
self.buildinfo["allvarsTCL"] = "\n".join(map(' {0} \\\\'.format, self.buildinfo["allvars"]))
ver = self._safe_open(os.path.join(self.options["workingdir"], 'VERSION'))
self.options['github_tag'] = str(ver.readline()).rstrip()
ver.close
def _safe_open(self, filename, mode="r", exit=True):
self._debug("_safe_open: %s %s" % (mode, filename))
try:
fh = open(filename, mode)
except IOError as error:
print "[fatal] Open of file '%s' failed: %s" % (filename, error)
if exit: sys.exit(1)
return fh
def _load_json(self, fh, exit=True):
self._debug("_load_json: %s" % (fh.name))
try:
json_data = json.load(fh)
except (ValueError, NameError) as error:
print "[fatal] JSON format error in '%s': %s" % (fh.name, error)
if exit: sys.exit(1)
return json_data
def _stringify_modes(self, modes):
return ", ".join([self.pres_data["modes"][str(x)] for x in modes])
def _search_test_cases(self, string):
self._debug("in _search_test_cases")
ret = ["include_defaults.json"]
test_templates = sorted(glob.glob(self.options["workingdir"] + os.sep + os.path.join("test","test_*.json")))
for template in test_templates:
f = self._safe_open(template)
for line in f:
if string in line:
ret.append(template.split(os.path.sep)[-1])
f.close()
break
f.close()
return ret
def _doc_RST_section(self, string, char):
return("%s\n%s\n\n" % (string, (char * len(string))))
def _doc_RST_inline_ref(self, ref, prepend=""):
return '%s.. _%s:\n\n' % (prepend, ref)
def _doc_RST_anon_ref(self, list, path):
return ['`{0} <{1}{2}{0}>`__'.format(x, self.options['github_url'], path) for x in list]
def _doc_RST_generate(self, fh):
for section in self.pres_data["sections"]:
if section["name"] == "intro":
continue
fh.write(self._doc_RST_section("Section: %s" % section["name"], '-'))
for field in section["fields"]:
fh.write(self._doc_RST_inline_ref("preso-%s-%s" % (section["name"], field["name"])))
if field["type"] != "table":
fh.write(self._doc_RST_section("Field: %s__%s" % (section["name"], field["name"]), '^'))
fh.write(".. csv-table::\n")
fh.write("\t:stub-columns: 1\n")
fh.write("\t:widths: 10 80\n\n")
self._doc_RST_generate_field(field, "%s__%s" % (section["name"], field["name"]), fh, 0)
fh.write("\n")
else:
table_name = "%s__%s" % (section["name"], field["name"])
fh.write(self._doc_RST_section("Table: %s__%s" % (section["name"], field["name"]), '^'))
fh.write("%s\n\n" % field["help"])
fh.write(".. csv-table::\n")
fh.write('\t:header: "Column","Details"\n')
fh.write("\t:stub-columns: 1\n")
fh.write("\t:widths: 10 80\n\n")
for table_field in field["fields"]:
fh.write(self._doc_RST_inline_ref("preso-%s-%s-%s" % (section["name"], field["name"], table_field["name"]), '\t"%s","' % table_field["name"]))
self._doc_RST_generate_field(table_field, "", fh, 1)
fh.write('\t"\n')
fh.write('\t"Examples",')
test_files = self._search_test_cases("%s__%s" % (section["name"], field["name"]))
fh.write('"%s"\n\n' % " | ".join(self._doc_RST_anon_ref(test_files, "test/")))
def _doc_RST_generate_field(self, field, search, fh, mode=0):
self._debug("field=%s" % field)
reqstr = "No"
defstr = ""
if 'required' in field.keys() and field["required"] == True: reqstr = "Yes"
if 'default' in field.keys(): defstr = "%s" % field["default"]
if 'help' in field.keys():
descrstr = field["help"]
else:
descrstr = field["description"]
strings = []
strings.append({"Display Name": field["description"]})
strings.append({"Description": descrstr})
strings.append({"Modes": self._stringify_modes(field["modes"])})
strings.append({"Type": field["type"]})
strings.append({"Default": defstr})
strings.append({"Min. Version": field["minver"]})
if "choice" in field["type"] and "choices" in field.keys():
choicestr = ", ".join(field["choices"])
choicestr = choicestr.replace("<", "<")
choicestr = choicestr.replace(">", ">")
strings.append({"Choices": choicestr})
if len(search) > 0:
test_files = self._search_test_cases(search)
strings.append({"Examples": " | ".join(self._doc_RST_anon_ref(test_files, "test/"))})
if mode:
strformat = '\t:{0}: {1}\n'
else:
strformat = '\t"{0}","{1}"\n'
ret = ""
self._debug("strings=%s" % strings)
for string in strings:
self._debug("string=%s" % string)
name = string.keys()[0]
value = string.values()[0]
fh.write(strformat.format(name, value))
return ret
def _tmpl_process_file(self, fin, fout, prepend=""):
print "%sfound insertfile, file=%s" % (prepend, fin.name)
ret = []
for line in fin:
line = re.sub(r'%IMPLVERSION_MAJOR%', self.buildinfo["impl_major"], line)
line = re.sub(r'%IMPLVERSION_MINOR%', self.buildinfo["impl_minor"], line)
line = re.sub(r'%PRESENTATION_REV%', self.buildinfo["pres_rev"], line)
line = re.sub(r'%PRESENTATION_TCL_ALLVARS%', self.buildinfo["allvarsTCL"], line)
line = re.sub(r'%NAME_APPEND%', self.options["append"], line)
line = re.sub(r'%TMPL_NAME%', "appsvcs_integration_v%s_%s%s" % (self.buildinfo["impl_major"], self.buildinfo["pres_rev"], self.options["append"]), line)
line = re.sub(r'%TEMP_DIR%', self.options["tempdir"], line)
match = re.match( r'(.*)\%insertfile:(.*)\%(.*)', line)
if match:
insertfile = self._safe_open("%s/%s" % (self.options["workingdir"], match.group(2)))
insert = self._tmpl_process_file(insertfile, fout, '%s ' % prepend)
insertfile.close()
line = "%s%s%s" % (match.group(1), ''.join(insert), match.group(3))
ret.append(line)
return ret
def _apl_generate_field_text(self, field, tab=""):
return "\t%smessage %s" % (tab, field["name"])
def _apl_generate_field_boolean(self, field, tab=""):
if 'default' in field.keys():
if field["default"] == True:
field["default"] = "enabled"
else:
field["default"] = "disabled"
field["_apl_defstr"] = " default \"%s\"" % field["default"]
return "\t%schoice %s%s%s {\n\t\t%s\"enabled\",\n\t\t%s\"disabled\"\n\t%s}" % (tab, field["name"], field["_apl_defstr"], field["_apl_reqstr"], tab, tab, tab)
def _apl_generate_field_string(self, field, tab=""):
self._debug("VALIDATOR field=%s" % field)
valstr = ""
if '_apl_validator' in field.keys():
valstr = ' validator "%s"' % field["_apl_validator"]
return "\t%sstring %s%s%s%s%s" % (tab, field["name"], field["_apl_reqstr"], field["_apl_dispstr"], valstr, field["_apl_defstr"])
def _apl_generate_field_choice(self, field, tab=""):
tclstr = ""
retstr = ""
if 'create_list' in field.keys():
tclstr = """ tcl {
tmsh::cd /
set results ""
set cmds [list %s]
foreach cmd $cmds {
set objs [list]
set objs_status [catch {tmsh::get_config $cmd recursive} objs]
if { $objs_status == 1 } { continue }
foreach obj $objs {
set name [string map {"\\\"" ""} [tmsh::get_name $obj]]
if { $name ne "" } {
append results \"/$name\"
append results \"\\n\"
}
}
}
return $results
}
""" % ' '.join(field["create_list"])
if 'glob' in field.keys():
types = {}
filenames = []
field['choices'] = []
files = []
for globitem in field["glob"]:
if os.sep != "/":
globitem["path"] = globitem["path"].replace("/","\\")
files = glob.glob("%s%s%s" % (self.options["bundledir"], os.sep, globitem["path"]))
for filename in files:
name_parts = filename.split(os.sep)
file_parts = name_parts[-1].split('.')
filenames.append(file_parts[0])
types[file_parts[0]] = globitem["prefix"]
if len(filenames) > 0:
for choice in filenames:
field['choices'].append('%s%s' % (types.get(choice), choice))
if len(tclstr) > 0:
return "\t%s%s %s%s%s%s%s" % (tab, field["type"], field["name"], field["_apl_reqstr"], field["_apl_dispstr"], field["_apl_defstr"], tclstr)
else:
if len(field['choices']) == 0:
field['choices'].append('** no bundled items **')
retstr += "\t%s%s %s%s%s%s {\n" % (tab, field["type"], field["name"], field["_apl_reqstr"], field["_apl_dispstr"], field["_apl_defstr"])
retstr += ",\n".join(['\t\t{0}"{1}"'.format(tab, x) for x in field["choices"]])
retstr += "\n\t%s}" % tab
return retstr
def _apl_generate_field_editchoice(self, field, tab=""):
return self._apl_generate_field_choice(field, tab)
def _apl_generate_field_dynamic_filelist_multi(self, field, tab=""):
types = {}
filenames = []
retstr = ""
for globitem in field["glob"]:
if os.sep != "/":
globitem["path"] = globitem["path"].replace("/","\\")
files = glob.glob("%s%s%s" % (self.options["bundledir"], os.sep, globitem["path"]))
for filename in files:
name_parts = filename.split(os.sep)
file_parts = name_parts[-1].split('.')
filenames.append(file_parts[0])
types[file_parts[0]] = globitem["prefix"]
retstr += "\t%smultichoice %s%s%s%s {" % (tab, field["name"], field["_apl_reqstr"], field["_apl_dispstr"], field["_apl_defstr"])
if len(filenames) > 0:
for choice in filenames[:-1]:
retstr += "\t\t%s\"%s%s\"," % (tab, types.get('%s' % choice), choice)
else:
retstr += "\t\t%s\"%s%s\"" % (tab, types.get('%s' % filenames[-1]), filenames[-1])
else:
retstr += "\t\t%s\"** no bundled items **\"" % tab
retstr += "\t%s}" % tab
return retstr
def _apl_generate_field (self, field, section, tab):
apl_field = ""
if ('uivisible' not in field.keys()):
field["uivisible"] = True
field["_apl_reqstr"] = ""
if ('required' in field.keys() and field["required"] == True):
field["_apl_reqstr"] = " required"
field["_apl_defstr"] = ""
if 'default' in field.keys():
field["_apl_defstr"] = " default \"%s\"" % field["default"]
field["_apl_dispstr"] = ""
field["_apl_dispstr"] = " display \"large\" "
if 'display' in field.keys():
field["_apl_dispstr"] = " display \"%s\"" % field["display"]
self._debug("VALIDATOR type=%s keys=%s" % (field["type"], self._apl_validators.keys()))
if field["type"] in self._apl_validators.keys():
field["_apl_validator"] = self._apl_validators[field["type"]]
field["type"] = 'string'
self._debug("VALIDATOR pre field=%s v=%s" % (field["type"], field["_apl_validator"]))
if field["type"] == "text":
field["_apl_text"] = ("\t%s.%s \"%s\" \"%s\"\n" % (section, field["name"], field["description"], field["text"]))
else:
field["_apl_text"] = ("\t%s.%s \"%s\"\n" % (section, field["name"], field["description"]))
func_name = '_apl_generate_field_%s' % field["type"]
self._debug("field=%s" % field)
if hasattr(AppSvcsBuilder, func_name):
apl_field = getattr(AppSvcsBuilder, func_name)(self, field, tab)
else:
print "Invalid APL Field Type: %s field=%s section=%s" % (field["type"], field["name"], section)
sys.exit(1)
if field['uivisible'] == True:
return apl_field + '\n'
else:
return "optional (\"dont\" == \"show\") {\n %s \n}\n" % apl_field
def createBundledResources(self):
resources = []
print "Building bundled resources:"
fh = self._safe_open(os.path.join(self.options["workingdir"], self.options["tempdir"],'bundler.build'), "wt")
print " Adding iRules (%sirules/*.irule)..." % (self.options["bundledir"])
files = glob.glob(os.path.join(self.options["bundledir"],'irules','*.irule'))
print " Adding ASM policies (%sasm_policies/*.xml)..." % (self.options["bundledir"])
files += glob.glob(os.path.join(self.options["bundledir"], 'asm_policies','*.xml'))
print " Adding APM policies (%sapm_policies/*.tar.gz)..." % (self.options["bundledir"])
files += glob.glob(os.path.join(self.options["bundledir"],'apm_policies','*.tar.gz'))
if len(files) == 0:
print " no files found"
fh.write("\n")
return
fh.write("array set bundler_objects {}\n")
fh.write("array set bundler_data {}\n")
for filename in files:
print " Processing file: %s" % filename
filetype = ""
apm_bip_version = ['1']
name_parts = filename.split(os.sep)
file_parts = name_parts[-1].split('.')
just_name = file_parts[0]
if re.match( r'.*irules.*irule$', filename): filetype = 'irule'
if re.match( r'.*asm.*xml$', filename): filetype = 'asm'
if re.match( r'.*apm.*.tar.gz$', filename): filetype = 'apm'
if filetype == "":
print "[fatal] Could not determine the type of object for bundled file '%s'" % filename
sys.exit(1)
if filetype == "apm":
with gzip.open(filename, 'rb') as gz:
apm_raw = gz.read()
apm_bip_version = re.findall(r'^\#F5\[Version:(.*)\]', apm_raw, re.MULTILINE)
if len(apm_bip_version) != 1:
print "[fatal] Could not determine BIG-IP TMOS version for bundled APM file '%s'" % filename
sys.exit(1)
gz.close()
print " Found BIG-IP Version: %s" % apm_bip_version[0]
infile = self._safe_open(filename, "rb")
key = "%s:%s" % (filetype, just_name)
resources.append({
"key":key,
"ver":apm_bip_version[0],
"data":base64.b64encode(infile.read())
})
#self._debug("resources=%s" % resources)
for r in resources:
fh.write("set bundler_objects(%s) %s\n" % (r["key"], r["ver"]))
fh.write('\n')
for r in resources:
fh.write("set bundler_data(%s) {%s}\n" % (r["key"], r["data"]))
fh.close()
def buildDocVersion(self, **kwargs):
self._debug("in buildDocVersion")
if bool(kwargs):
self.__init__(**kwargs)
self._debug("buildDocVersion options=%s" % self.options)
ver = self._safe_open(os.path.join(self.options["workingdir"],self.options["docsdir"],'VERSION'), "wt")
ver.write(json.dumps(self.buildinfo))
ver.close()
def buildDoc(self, **kwargs):
self._debug("in buildDoc")
if bool(kwargs):
self.__init__(**kwargs)
self._debug("buildDoc options=%s" % self.options)
self.buildDocVersion(**kwargs)
fh = self._safe_open(os.path.join(self.options["workingdir"],self.options["docsdir"],'presoref.rst'), "wt")
fh.write("Presentation Layer Reference\n")
fh.write("============================\n\n")
self._doc_RST_generate(fh)
def buildAPL(self, **kwargs):
self._debug("in buildAPL")
if bool(kwargs):
self.__init__(**kwargs)
self._debug("buildAPL options=%s" % self.options)
fh = self._safe_open(os.path.join(self.options["workingdir"], self.options["tempdir"],'apl.build'), "wt")
for section in self.pres_data["sections"]:
fh.write("section %s {\n" % section["name"])
section["_apl_text"] = "\t%s \"%s\"\n" % (section["name"], section["description"])
for field in section["fields"]:
if field["type"] != "table":
fh.write(self._apl_generate_field(field, section["name"], ""))
else:
field["_apl_text"] = "\t%s.%s \"%s\"\n" % (section["name"], field["name"], field["description"])
if ('uivisible' in field.keys() and field["uivisible"] == False):
fh.write("optional (\"dont\" == \"show\") {\ntable %s {\n" % field["name"])
else:
fh.write("\ttable %s {\n" % field["name"])
for table_field in field["fields"]:
fh.write(self._apl_generate_field(table_field, "%s.%s" % (section["name"], field["name"]), "\t"))
if ('uivisible' in field.keys() and field["uivisible"] == False):
fh.write("\t}\n}\n")
else:
fh.write("\t}\n")
fh.write("}\n\n")
#text.append("")
fh.write("\ntext {\n")
#for descr in text:
# print "%s" % descr
for section in self.pres_data["sections"]:
fh.write(section["_apl_text"])
for field in section["fields"]:
fh.write(field["_apl_text"])
if field["type"] == "table":
for table_field in field["fields"]:
fh.write(table_field["_apl_text"])
fh.write("\n")
fh.write("}\n")
def buildTemplate(self, **kwargs):
self._debug("in buildTmpl")
if bool(kwargs):
self.__init__(**kwargs)
self._debug("buildTemplate options=%s" % self.options)
self.createBundledResources()
print "Writing to file: %s" % self.options["outfile_fn"]
out = self._safe_open(self.options["outfile_fn"], "wt")
main = self._safe_open(os.path.join(self.options["workingdir"],self.options["roottmpl"]))
final = self._tmpl_process_file(main, out)
out.write(''.join(final))
out.close()
main.close()
|
|
import bisect
import difflib
import sys
import warnings
import rope.base.oi.doa
import rope.base.oi.objectinfo
import rope.base.oi.soa
from rope.base import ast, exceptions, taskhandle, utils, stdmods
from rope.base.exceptions import ModuleNotFoundError
from rope.base.pyobjectsdef import PyModule, PyPackage, PyClass
import rope.base.resources
import rope.base.resourceobserver
from rope.base import builtins
class PyCore(object):
def __init__(self, project):
self.project = project
self._init_resource_observer()
self.cache_observers = []
self.module_cache = _ModuleCache(self)
self.extension_cache = _ExtensionCache(self)
self.object_info = rope.base.oi.objectinfo.ObjectInfoManager(project)
self._init_python_files()
self._init_automatic_soa()
self._init_source_folders()
def _init_python_files(self):
self.python_matcher = None
patterns = self.project.prefs.get('python_files', None)
if patterns is not None:
self.python_matcher = rope.base.resources._ResourceMatcher()
self.python_matcher.set_patterns(patterns)
def _init_resource_observer(self):
callback = self._invalidate_resource_cache
observer = rope.base.resourceobserver.ResourceObserver(
changed=callback, moved=callback, removed=callback)
self.observer = rope.base.resourceobserver.FilteredResourceObserver(observer)
self.project.add_observer(self.observer)
def _init_source_folders(self):
self._custom_source_folders = []
for path in self.project.prefs.get('source_folders', []):
folder = self.project.get_resource(path)
self._custom_source_folders.append(folder)
def _init_automatic_soa(self):
if not self.automatic_soa:
return
callback = self._file_changed_for_soa
observer = rope.base.resourceobserver.ResourceObserver(
changed=callback, moved=callback, removed=callback)
self.project.add_observer(observer)
@property
def automatic_soa(self):
auto_soa = self.project.prefs.get('automatic_soi', None)
return self.project.prefs.get('automatic_soa', auto_soa)
def _file_changed_for_soa(self, resource, new_resource=None):
old_contents = self.project.history.\
contents_before_current_change(resource)
if old_contents is not None:
perform_soa_on_changed_scopes(self.project, resource, old_contents)
def is_python_file(self, resource):
if resource.is_folder():
return False
if self.python_matcher is None:
return resource.name.endswith('.py')
return self.python_matcher.does_match(resource)
def get_module(self, name, folder=None):
"""Returns a `PyObject` if the module was found."""
# check if this is a builtin module
pymod = self._builtin_module(name)
if pymod is not None:
return pymod
module = self.find_module(name, folder)
if module is None:
raise ModuleNotFoundError('Module %s not found' % name)
return self.resource_to_pyobject(module)
def _builtin_submodules(self, modname):
result = {}
for extension in self.extension_modules:
if extension.startswith(modname + '.'):
name = extension[len(modname) + 1:]
if '.' not in name:
result[name] = self._builtin_module(extension)
return result
def _builtin_module(self, name):
return self.extension_cache.get_pymodule(name)
def get_relative_module(self, name, folder, level):
module = self.find_relative_module(name, folder, level)
if module is None:
raise ModuleNotFoundError('Module %s not found' % name)
return self.resource_to_pyobject(module)
def get_string_module(self, code, resource=None, force_errors=False):
"""Returns a `PyObject` object for the given code
If `force_errors` is `True`, `exceptions.ModuleSyntaxError` is
raised if module has syntax errors. This overrides
``ignore_syntax_errors`` project config.
"""
return PyModule(self, code, resource, force_errors=force_errors)
def get_string_scope(self, code, resource=None):
"""Returns a `Scope` object for the given code"""
return self.get_string_module(code, resource).get_scope()
def _invalidate_resource_cache(self, resource, new_resource=None):
for observer in self.cache_observers:
observer(resource)
def _find_module_in_folder(self, folder, modname):
module = folder
packages = modname.split('.')
for pkg in packages[:-1]:
if module.is_folder() and module.has_child(pkg):
module = module.get_child(pkg)
else:
return None
if module.is_folder():
if module.has_child(packages[-1]) and \
module.get_child(packages[-1]).is_folder():
return module.get_child(packages[-1])
elif module.has_child(packages[-1] + '.py') and \
not module.get_child(packages[-1] + '.py').is_folder():
return module.get_child(packages[-1] + '.py')
def get_python_path_folders(self):
import rope.base.project
result = []
for src in self.project.prefs.get('python_path', []) + sys.path:
try:
src_folder = rope.base.project.get_no_project().get_resource(src)
result.append(src_folder)
except rope.base.exceptions.ResourceNotFoundError:
pass
return result
def find_module(self, modname, folder=None):
"""Returns a resource corresponding to the given module
returns None if it can not be found
"""
return self._find_module(modname, folder)
def find_relative_module(self, modname, folder, level):
for i in range(level - 1):
folder = folder.parent
if modname == '':
return folder
else:
return self._find_module_in_folder(folder, modname)
def _find_module(self, modname, folder=None):
"""Return `modname` module resource"""
for src in self.get_source_folders():
module = self._find_module_in_folder(src, modname)
if module is not None:
return module
for src in self.get_python_path_folders():
module = self._find_module_in_folder(src, modname)
if module is not None:
return module
if folder is not None:
module = self._find_module_in_folder(folder, modname)
if module is not None:
return module
return None
# INFO: It was decided not to cache source folders, since:
# - Does not take much time when the root folder contains
# packages, that is most of the time
# - We need a separate resource observer; `self.observer`
# does not get notified about module and folder creations
@utils.memoize
def get_source_folders(self):
"""Returns project source folders"""
if self.project.root is None:
return []
result = list(self._custom_source_folders)
result.extend(self._find_source_folders(self.project.root))
return result
def resource_to_pyobject(self, resource, force_errors=False):
return self.module_cache.get_pymodule(resource, force_errors)
def get_python_files(self):
"""Returns all python files available in the project"""
return [resource for resource in self.project.get_files()
if self.is_python_file(resource)]
def _is_package(self, folder):
if folder.has_child('__init__.py') and \
not folder.get_child('__init__.py').is_folder():
return True
else:
return False
@utils.memoize
def _find_source_folders(self, folder):
for resource in folder.get_folders():
if self._is_package(resource):
return [folder]
result = []
for resource in folder.get_files():
if resource.name.endswith('.py'):
result.append(folder)
break
for resource in folder.get_folders():
result.extend(self._find_source_folders(resource))
return result
def run_module(self, resource, args=None, stdin=None, stdout=None):
"""Run `resource` module
Returns a `rope.base.oi.doa.PythonFileRunner` object for
controlling the process.
"""
perform_doa = self.project.prefs.get('perform_doi', True)
perform_doa = self.project.prefs.get('perform_doa', perform_doa)
receiver = self.object_info.doa_data_received
if not perform_doa:
receiver = None
runner = rope.base.oi.doa.PythonFileRunner(
self, resource, args, stdin, stdout, receiver)
runner.add_finishing_observer(self.module_cache.forget_all_data)
runner.run()
return runner
def analyze_module(self, resource, should_analyze=lambda py: True,
search_subscopes=lambda py: True, followed_calls=None):
"""Analyze `resource` module for static object inference
This function forces rope to analyze this module to collect
information about function calls. `should_analyze` is a
function that is called with a `PyDefinedObject` argument. If
it returns `True` the element is analyzed. If it is `None` or
returns `False` the element is not analyzed.
`search_subscopes` is like `should_analyze`; The difference is
that if it returns `False` the sub-scopes are all ignored.
That is it is assumed that `should_analyze` returns `False`
for all of its subscopes.
`followed_calls` override the value of ``soa_followed_calls``
project config.
"""
if followed_calls is None:
followed_calls = self.project.prefs.get('soa_followed_calls', 0)
pymodule = self.resource_to_pyobject(resource)
self.module_cache.forget_all_data()
rope.base.oi.soa.analyze_module(
self, pymodule, should_analyze, search_subscopes, followed_calls)
def get_classes(self, task_handle=taskhandle.NullTaskHandle()):
warnings.warn('`PyCore.get_classes()` is deprecated',
DeprecationWarning, stacklevel=2)
return []
def __str__(self):
return str(self.module_cache) + str(self.object_info)
def modname(self, resource):
if resource.is_folder():
module_name = resource.name
source_folder = resource.parent
elif resource.name == '__init__.py':
module_name = resource.parent.name
source_folder = resource.parent.parent
else:
module_name = resource.name[:-3]
source_folder = resource.parent
while source_folder != source_folder.parent and \
source_folder.has_child('__init__.py'):
module_name = source_folder.name + '.' + module_name
source_folder = source_folder.parent
return module_name
@property
@utils.cacheit
def extension_modules(self):
result = set(self.project.prefs.get('extension_modules', []))
if self.project.prefs.get('import_dynload_stdmods', False):
result.update(stdmods.dynload_modules())
return result
class _ModuleCache(object):
def __init__(self, pycore):
self.pycore = pycore
self.module_map = {}
self.pycore.cache_observers.append(self._invalidate_resource)
self.observer = self.pycore.observer
def _invalidate_resource(self, resource):
if resource in self.module_map:
self.forget_all_data()
self.observer.remove_resource(resource)
del self.module_map[resource]
def get_pymodule(self, resource, force_errors=False):
if resource in self.module_map:
return self.module_map[resource]
if resource.is_folder():
result = PyPackage(self.pycore, resource,
force_errors=force_errors)
else:
result = PyModule(self.pycore, resource=resource,
force_errors=force_errors)
if result.has_errors:
return result
self.module_map[resource] = result
self.observer.add_resource(resource)
return result
def forget_all_data(self):
for pymodule in self.module_map.values():
pymodule._forget_concluded_data()
def __str__(self):
return 'PyCore caches %d PyModules\n' % len(self.module_map)
class _ExtensionCache(object):
def __init__(self, pycore):
self.pycore = pycore
self.extensions = {}
def get_pymodule(self, name):
if name == '__builtin__':
return builtins.builtins
allowed = self.pycore.extension_modules
if name not in self.extensions and name in allowed:
self.extensions[name] = builtins.BuiltinModule(name, self.pycore)
return self.extensions.get(name)
def perform_soa_on_changed_scopes(project, resource, old_contents):
pycore = project.pycore
if resource.exists() and pycore.is_python_file(resource):
try:
new_contents = resource.read()
# detecting changes in new_contents relative to old_contents
detector = _TextChangeDetector(new_contents, old_contents)
def search_subscopes(pydefined):
scope = pydefined.get_scope()
return detector.is_changed(scope.get_start(), scope.get_end())
def should_analyze(pydefined):
scope = pydefined.get_scope()
start = scope.get_start()
end = scope.get_end()
return detector.consume_changes(start, end)
pycore.analyze_module(resource, should_analyze, search_subscopes)
except exceptions.ModuleSyntaxError:
pass
class _TextChangeDetector(object):
def __init__(self, old, new):
self.old = old
self.new = new
self._set_diffs()
def _set_diffs(self):
differ = difflib.Differ()
self.lines = []
lineno = 0
for line in differ.compare(self.old.splitlines(True),
self.new.splitlines(True)):
if line.startswith(' '):
lineno += 1
elif line.startswith('-'):
lineno += 1
self.lines.append(lineno)
def is_changed(self, start, end):
"""Tell whether any of start till end lines have changed
The end points are inclusive and indices start from 1.
"""
left, right = self._get_changed(start, end)
if left < right:
return True
return False
def consume_changes(self, start, end):
"""Clear the changed status of lines from start till end"""
left, right = self._get_changed(start, end)
if left < right:
del self.lines[left:right]
return left < right
def _get_changed(self, start, end):
left = bisect.bisect_left(self.lines, start)
right = bisect.bisect_right(self.lines, end)
return left, right
|
|
''' Finding Strongly-Connected Components of a graph
Kosaraju's Two-Pass Algorithm
'''
import sys
from collections import defaultdict, deque
def load_data(filename):
graph = defaultdict(list)
max = 0
with open(filename) as stream:
l = stream.readline()
while l:
parts = [int(x) for x in l.split()]
graph[parts[0]].append(parts[1])
if parts[0] > max:
max = parts[0]
l = stream.readline()
graph['max'] = max
# Graph is now a dict of:
#
# { vertex: [tail1, tail2, ... tailn], }
#
# for all edges vertex, {tails}
return graph
def reverse_arcs(g):
''' Return a new graph based on g with all arcs reversed.
'''
ret = defaultdict(list)
for u in xrange(1, g['max'] + 1):
for v in g[u]:
ret[v].append(u)
ret['max'] = g['max']
return ret
def cmp_len(a, b):
''' Sort comparer for lengths of items.
'''
if len(a) > len(b):
return 1
elif len(a) < len(b):
return -1
else:
return 0
# State for Kosaraju's versions of DFS_loop() and DFS()
def make_state():
''' Return a fresh state container.
'''
state = {}
state['t'] = 0
state['s'] = None
state['explored'] = []
state['finishing'] = {} # finishing[t] = node with finishing time t
state['leaders'] = {} # leaders[i] = [list of nodes where i is the leader]
return state
def kosaraju(g):
''' Return a list containing the SCCs in g. Each entry is a list of nodes.
'''
global finishing
global leaders
# Create g_rev from g with all arcs reversed.
g_rev = reverse_arcs(g)
# Run DFS_loop on g_rev to get finishing times.
first_run = make_state()
DFS_loop(g_rev, first_run)
# Run DFS_loop on g using the finishing times as index.
second_run = make_state()
DFS_loop(g, second_run, indexes=first_run['finishing'])
# Collect SCCs grouped by their leader.
sccs = [second_run['leaders'][x] for x in second_run['leaders'].keys()]
sccs.sort(cmp=cmp_len, reverse=True)
return sccs
def DFS_loop(g, state, indexes=None):
''' Loop over graph g doing depth-first search on unexplored nodes.
'''
n = g['max']
if indexes is None:
items = range(n, 0, -1)
else:
items = [indexes[x] for x in xrange(n, 0, -1)]
for i in items:
if i not in state['explored']:
state['s'] = i
DFS_recursive(g, i, state)
#DFS_iter(g, i, state)
#DFS_iter2(g, i, state)
def DFS_iter2(g, i, state):
''' Non-recursive depth-first search on g starting at i.
list nodes_to_visit = {root};
while( nodes_to_visit isn't empty ) {
currentnode = nodes_to_visit.first();
nodes_to_visit.prepend( currentnode.children );
//do something
}
'''
to_visit = deque([i])
while len(to_visit) > 0:
current = to_visit.popleft()
if current in state['explored']:
continue
state['explored'].append(current)
print 'visit %s' % (current)
if state['s'] in state['leaders']:
state['leaders'][state['s']].append(current)
else:
state['leaders'][state['s']] = [current,]
for j in reversed(g[current]):
if j not in state['explored']:
to_visit.appendleft(j)
state['t'] += 1
state['finishing'][state['t']] = current
def DFS_iter(g, i, state):
''' Non-recursive depth-first search on g starting at i.
'''
print "%s/%s explored" % (len(state['explored']), len(g))
nodes_to_visit = deque([i])
while nodes_to_visit:
visit = nodes_to_visit.popleft()
if visit in state['explored']:
continue
print "visit %s" % (visit)
state['explored'].append(visit)
if state['s'] in state['leaders']:
state['leaders'][state['s']].append(visit)
else:
state['leaders'][state['s']] = [visit,]
if visit not in g:
# In case there are holes in the input data.
g[visit] = []
for j in reversed(g[visit]):
if j not in state['explored']:
nodes_to_visit.appendleft(j)
state['t'] += 1
state['finishing'][state['t']] = i
def DFS_recursive(g, i, state):
''' Do a depth-first search on graph g starting at node i.
'''
state['explored'].append(i)
if state['s'] in state['leaders']:
state['leaders'][state['s']].append(i)
else:
state['leaders'][state['s']] = [i,]
for j in g[i]:
if j not in state['explored']:
DFS_recursive(g, j, state)
state['t'] += 1
state['finishing'][state['t']] = i
def main(argv):
sys.setrecursionlimit(1000000) # remember: ulimit -s 65532
for arg in argv:
graph = load_data(arg)
print '%s graph has %s nodes' % (arg, graph['max'])
sccs = kosaraju(graph)
print "Five largest SCCs:", ','.join([str(len(x)) for x in sccs[0:5]])
if __name__ == "__main__":
#sys.argv.append('/Users/chrish/Dropbox/courses/algs-1/week4/SCC.txt')
main(sys.argv[1:])
|
|
#Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
# $URI:$
__version__=''' $Id: utils.py 3959 2012-09-27 14:39:39Z robin $ '''
__doc__='''Gazillions of miscellaneous internal utility functions'''
import os, sys, imp, time
import base64
import pickle
from io import BytesIO
import hashlib
from reportlab.lib.logger import warnOnce
from reportlab.lib.rltempfile import get_rl_tempfile, get_rl_tempdir, _rl_getuid
def UniChr(v):
if sys.version_info[0] == 3:
return chr(v)
else:
return unichr(v)
def isStrType(v):
if sys.version_info[0] == 3:
return isinstance(v, str)
else:
return isinstance(v, basestring)
def isBytesType(v):
if sys.version_info[0] == 3:
return isinstance(v, bytes)
else:
return isinstance(v, str)
def isUnicodeType(v):
if sys.version_info[0] == 3:
return isinstance(v, str)
else:
return isinstance(v, unicode)
def isFunctionType(v):
return type(v) == type(isFunctionType)
def isClassType(v):
if sys.version_info[0] == 3:
return isinstance(v, type)
else:
import types
return isinstance(v, types.ClassType)
def isMethodType(v):
class c:
def m(self): pass
return type(v) == type(c.m)
def isModuleType(v):
return type(v) == type(sys)
def isSeqType(v,_st=(tuple,list)):
return isinstance(v,_st)
def _findFiles(dirList,ext='.ttf'):
from os.path import isfile, isdir, join as path_join
from os import listdir
ext = ext.lower()
R = []
A = R.append
for D in dirList:
if not isdir(D): continue
for fn in listdir(D):
fn = path_join(D,fn)
if isfile(fn) and (not ext or fn.lower().endswith(ext)): A(fn)
return R
try:
_UserDict = dict
except:
from UserDict import UserDict as _UserDict
class CIDict(_UserDict):
def __init__(self,*args,**kwds):
for a in args: self.update(a)
self.update(kwds)
def update(self,D):
for k,v in D.items(): self[k] = v
def __setitem__(self,k,v):
try:
k = k.lower()
except:
pass
_UserDict.__setitem__(self,k,v)
def __getitem__(self,k):
try:
k = k.lower()
except:
pass
return _UserDict.__getitem__(self,k)
def __delitem__(self,k):
try:
k = k.lower()
except:
pass
return _UserDict.__delitem__(self,k)
def get(self,k,dv=None):
try:
return self[k]
except KeyError:
return dv
def __contains__(self,k):
try:
self[k]
return True
except:
return False
def pop(self,k,*a):
try:
k = k.lower()
except:
pass
return _UserDict.pop(*((self,k)+a))
def setdefault(self,k,*a):
try:
k = k.lower()
except:
pass
return _UserDict.setdefault(*((self,k)+a))
if os.name == 'mac':
#with the Mac, we need to tag the file in a special
#way so the system knows it is a PDF file.
#This supplied by Joe Strout
import macfs, macostools
_KNOWN_MAC_EXT = {
'BMP' : ('ogle','BMP '),
'EPS' : ('ogle','EPSF'),
'EPSF': ('ogle','EPSF'),
'GIF' : ('ogle','GIFf'),
'JPG' : ('ogle','JPEG'),
'JPEG': ('ogle','JPEG'),
'PCT' : ('ttxt','PICT'),
'PICT': ('ttxt','PICT'),
'PNG' : ('ogle','PNGf'),
'PPM' : ('ogle','.PPM'),
'TIF' : ('ogle','TIFF'),
'TIFF': ('ogle','TIFF'),
'PDF' : ('CARO','PDF '),
'HTML': ('MSIE','TEXT'),
}
def markfilename(filename,creatorcode=None,filetype=None,ext='PDF'):
try:
if creatorcode is None or filetype is None and ext is not None:
try:
creatorcode, filetype = _KNOWN_MAC_EXT[ext.upper()]
except:
return
macfs.FSSpec(filename).SetCreatorType(creatorcode,filetype)
macostools.touched(filename)
except:
pass
else:
def markfilename(filename,creatorcode=None,filetype=None):
pass
import reportlab
__RL_DIR=os.path.dirname(reportlab.__file__) #possibly relative
_RL_DIR=os.path.isabs(__RL_DIR) and __RL_DIR or os.path.abspath(__RL_DIR)
del reportlab
#Attempt to detect if this copy of reportlab is running in a
#file system (as opposed to mostly running in a zip or McMillan
#archive or Jar file). This is used by test cases, so that
#we can write test cases that don't get activated in a compiled
try:
__file__
except:
__file__ = sys.argv[0]
import glob, fnmatch
try:
_isFSD = not __loader__
_archive = os.path.normcase(os.path.normpath(__loader__.archive))
_archivepfx = _archive + os.sep
_archivedir = os.path.dirname(_archive)
_archivedirpfx = _archivedir + os.sep
_archivepfxlen = len(_archivepfx)
_archivedirpfxlen = len(_archivedirpfx)
def __startswith_rl(fn,
_archivepfx=_archivepfx,
_archivedirpfx=_archivedirpfx,
_archive=_archive,
_archivedir=_archivedir,
os_path_normpath=os.path.normpath,
os_path_normcase=os.path.normcase,
os_getcwd=os.getcwd,
os_sep=os.sep,
os_sep_len = len(os.sep)):
'''if the name starts with a known prefix strip it off'''
fn = os_path_normpath(fn.replace('/',os_sep))
nfn = os_path_normcase(fn)
if nfn in (_archivedir,_archive): return 1,''
if nfn.startswith(_archivepfx): return 1,fn[_archivepfxlen:]
if nfn.startswith(_archivedirpfx): return 1,fn[_archivedirpfxlen:]
cwd = os_path_normcase(os_getcwd())
n = len(cwd)
if nfn.startswith(cwd):
if fn[n:].startswith(os_sep): return 1, fn[n+os_sep_len:]
if n==len(fn): return 1,''
return not os.path.isabs(fn),fn
def _startswith_rl(fn):
return __startswith_rl(fn)[1]
def rl_glob(pattern,glob=glob.glob,fnmatch=fnmatch.fnmatch, _RL_DIR=_RL_DIR,pjoin=os.path.join):
c, pfn = __startswith_rl(pattern)
r = glob(pfn)
if c or r==[]:
r += map(lambda x,D=_archivepfx,pjoin=pjoin: pjoin(_archivepfx,x),filter(lambda x,pfn=pfn,fnmatch=fnmatch: fnmatch(x,pfn),__loader__._files.keys()))
return r
except:
_isFSD = os.path.isfile(__file__) #slight risk of wrong path
__loader__ = None
def _startswith_rl(fn):
return fn
def rl_glob(pattern,glob=glob.glob):
return glob(pattern)
del glob, fnmatch
_isFSSD = _isFSD and os.path.isfile(os.path.splitext(__file__)[0] +'.py')
def isFileSystemDistro():
'''return truth if a file system distribution'''
return _isFSD
def isCompactDistro():
'''return truth if not a file system distribution'''
return not _isFSD
def isSourceDistro():
'''return truth if a source file system distribution'''
return _isFSSD
try:
#raise ImportError
### NOTE! FP_STR SHOULD PROBABLY ALWAYS DO A PYTHON STR() CONVERSION ON ARGS
### IN CASE THEY ARE "LAZY OBJECTS". ACCELLERATOR DOESN'T DO THIS (YET)
try:
from _rl_accel import fp_str # in case of builtin version
except ImportError:
from reportlab.lib._rl_accel import fp_str # specific
except ImportError:
from math import log
_log_10 = lambda x,log=log,_log_e_10=log(10.0): log(x)/_log_e_10
_fp_fmts = "%.0f", "%.1f", "%.2f", "%.3f", "%.4f", "%.5f", "%.6f"
import re
_tz_re = re.compile('0+$')
del re
def fp_str(*a):
'''convert separate arguments (or single sequence arg) into space separated numeric strings'''
if len(a)==1 and isSeqType(a[0]): a = a[0]
s = []
A = s.append
for i in a:
sa =abs(i)
if sa<=1e-7: A('0')
else:
l = sa<=1 and 6 or min(max(0,(6-int(_log_10(sa)))),6)
n = _fp_fmts[l]%i
if l:
n = _tz_re.sub('',n)
try:
if n[-1]=='.': n = n[:-1]
except:
print(i, n)
raise
A((n[0]!='0' or len(n)==1) and n or n[1:])
return ' '.join(s)
#hack test for comma users
if ',' in fp_str(0.25):
_FP_STR = fp_str
def fp_str(*a):
return _FP_STR(*a).replace(',','.')
def recursiveGetAttr(obj, name):
"Can call down into e.g. object1.object2[4].attr"
return eval(name, obj.__dict__)
def recursiveSetAttr(obj, name, value):
"Can call down into e.g. object1.object2[4].attr = value"
#get the thing above last.
tokens = name.split('.')
if len(tokens) == 1:
setattr(obj, name, value)
else:
most = '.'.join(tokens[:-1])
last = tokens[-1]
parent = recursiveGetAttr(obj, most)
setattr(parent, last, value)
def import_zlib():
try:
import zlib
except ImportError:
zlib = None
from reportlab.rl_config import ZLIB_WARNINGS
if ZLIB_WARNINGS: warnOnce('zlib not available')
return zlib
# Image Capability Detection. Set a flag haveImages
# to tell us if either PIL or Java imaging libraries present.
# define PIL_Image as either None, or an alias for the PIL.Image
# module, as there are 2 ways to import it
if sys.platform[0:4] == 'java':
try:
import javax.imageio
import java.awt.image
haveImages = 1
except:
haveImages = 0
else:
try:
from PIL import Image
except ImportError:
try:
import Image
except ImportError:
Image = None
haveImages = Image is not None
def getBytesIO(buf=None):
'''unified StringIO instance interface'''
if buf:
return BytesIO(buf)
return BytesIO()
class ArgvDictValue:
'''A type to allow clients of getArgvDict to specify a conversion function'''
def __init__(self,value,func):
self.value = value
self.func = func
def getArgvDict(**kw):
''' Builds a dictionary from its keyword arguments with overrides from sys.argv.
Attempts to be smart about conversions, but the value can be an instance
of ArgDictValue to allow specifying a conversion function.
'''
def handleValue(v,av,func):
if func:
v = func(av)
else:
if isStrType(v):
v = av
elif isinstance(v,float):
v = float(av)
elif isinstance(v,int):
v = int(av)
elif isinstance(v,list):
v = list(eval(av))
elif isinstance(v,tuple):
v = tuple(eval(av))
else:
raise TypeError("Can't convert string %r to %s" % (av,type(v)))
return v
A = sys.argv[1:]
R = {}
for k, v in kw.items():
if isinstance(v,ArgvDictValue):
v, func = v.value, v.func
else:
func = None
handled = 0
ke = k+'='
for a in A:
if a.find(ke)==0:
av = a[len(ke):]
A.remove(a)
R[k] = handleValue(v,av,func)
handled = 1
break
if not handled: R[k] = handleValue(v,v,func)
return R
def getHyphenater(hDict=None):
try:
from reportlab.lib.pyHnj import Hyphen
if hDict is None: hDict=os.path.join(os.path.dirname(__file__),'hyphen.mashed')
return Hyphen(hDict)
except ImportError as errMsg:
if str(errMsg)!='No module named pyHnj': raise
return None
def _className(self):
'''Return a shortened class name'''
try:
name = self.__class__.__name__
i=name.rfind('.')
if i>=0: return name[i+1:]
return name
except AttributeError:
return str(self)
def open_for_read_by_name(name,mode='b'):
if 'r' not in mode: mode = 'r'+mode
try:
return open(name,mode)
except IOError:
if _isFSD or __loader__ is None: raise
#we have a __loader__, perhaps the filename starts with
#the dirname(reportlab.__file__) or is relative
name = _startswith_rl(name)
s = __loader__.get_data(name)
if 'b' not in mode and os.linesep!='\n': s = s.replace(os.linesep,'\n')
return getBytesIO(s)
def open_for_read(name,mode='b', urlopen=None):
'''attempt to open a file or URL for reading'''
if hasattr(name,'read'): return name
if not urlopen:
try:
import urllib2
urlopen=urllib2.urlopen
except ImportError:
import urllib.request
urlopen=urllib.request.urlopen
try:
return open_for_read_by_name(name,mode)
except:
try:
return getBytesIO(urlopen(name).read())
except:
raise IOError('Cannot open resource "%s"' % name)
def open_and_read(name,mode='b'):
return open_for_read(name,mode).read()
def open_and_readlines(name,mode='t'):
return open_and_read(name,mode).split('\n')
def rl_isfile(fn,os_path_isfile=os.path.isfile):
if hasattr(fn,'read'): return True
if os_path_isfile(fn): return True
if _isFSD or __loader__ is None: return False
fn = _startswith_rl(fn)
return fn in __loader__._files.keys()
def rl_isdir(pn,os_path_isdir=os.path.isdir,os_path_normpath=os.path.normpath):
if os_path_isdir(pn): return True
if _isFSD or __loader__ is None: return False
pn = _startswith_rl(os_path_normpath(pn))
if not pn.endswith(os.sep): pn += os.sep
return len(filter(lambda x,pn=pn: x.startswith(pn),__loader__._files.keys()))>0
def rl_listdir(pn,os_path_isdir=os.path.isdir,os_path_normpath=os.path.normpath,os_listdir=os.listdir):
if os_path_isdir(pn) or _isFSD or __loader__ is None: return os_listdir(pn)
pn = _startswith_rl(os_path_normpath(pn))
if not pn.endswith(os.sep): pn += os.sep
return [x[len(pn):] for x in __loader__._files.keys() if x.startswith(pn)]
def rl_getmtime(pn,os_path_isfile=os.path.isfile,os_path_normpath=os.path.normpath,os_path_getmtime=os.path.getmtime,time_mktime=time.mktime):
if os_path_isfile(pn) or _isFSD or __loader__ is None: return os_path_getmtime(pn)
p = _startswith_rl(os_path_normpath(pn))
try:
e = __loader__._files[p]
except KeyError:
return os_path_getmtime(pn)
s = e[5]
d = e[6]
return time_mktime((((d>>9)&0x7f)+1980,(d>>5)&0xf,d&0x1f,(s>>11)&0x1f,(s>>5)&0x3f,(s&0x1f)<<1,0,0,0))
def rl_get_module(name,dir):
if name in sys.modules:
om = sys.modules[name]
del sys.modules[name]
else:
om = None
try:
f = None
try:
f, p, desc= imp.find_module(name,[dir])
return imp.load_module(name,f,p,desc)
except:
if isCompactDistro():
#attempt a load from inside the zip archive
import zipimport
dir = _startswith_rl(dir)
dir = (dir=='.' or not dir) and _archive or os.path.join(_archive,dir.replace('/',os.sep))
zi = zipimport.zipimporter(dir)
return zi.load_module(name)
raise ImportError('%s[%s]' % (name,dir))
finally:
if om: sys.modules[name] = om
del om
if f: f.close()
def _isPILImage(im):
if haveImages:
try:
return isinstance(im,Image.Image)
except AttributeError:
return False
else:
return False
class ImageReader(object):
"Wraps up either PIL or Java to get data from bitmaps"
_cache={}
def __init__(self, fileName,ident=None):
if isinstance(fileName,ImageReader):
self.__dict__ = fileName.__dict__ #borgize
return
self._ident = ident
#start wih lots of null private fields, to be populated by
#the relevant engine.
self.fileName = fileName
self._image = None
self._width = None
self._height = None
self._transparent = None
self._data = None
if _isPILImage(fileName):
self._image = fileName
self.fp = getattr(fileName,'fp',None)
try:
self.fileName = self._image.fileName
except AttributeError:
self.fileName = 'PILIMAGE_%d' % id(self)
else:
try:
from reportlab.rl_config import imageReaderFlags
self.fp = open_for_read(fileName,'b')
if isinstance(self.fp,BytesIO): imageReaderFlags=0 #avoid messing with already internal files
if imageReaderFlags>0: #interning
data = self.fp.read()
if imageReaderFlags&2: #autoclose
try:
self.fp.close()
except:
pass
if imageReaderFlags&4: #cache the data
if not self._cache:
from rl_config import register_reset
register_reset(self._cache.clear)
data=self._cache.setdefault(hashlib.md5(data).digest(),data)
self.fp=getBytesIO(data)
elif imageReaderFlags==-1 and isinstance(fileName,(str,unicode)):
#try Ralf Schmitt's re-opening technique of avoiding too many open files
self.fp.close()
del self.fp #will become a property in the next statement
self.__class__=LazyImageReader
if haveImages:
#detect which library we are using and open the image
if not self._image:
self._image = self._read_image(self.fp)
if getattr(self._image,'format',None)=='JPEG': self.jpeg_fh = self._jpeg_fh
else:
from reportlab.pdfbase.pdfutils import readJPEGInfo
try:
self._width,self._height,c=readJPEGInfo(self.fp)
except:
annotateException('\nImaging Library not available, unable to import bitmaps only jpegs\nfileName=%r identity=%s'%(fileName,self.identity()))
self.jpeg_fh = self._jpeg_fh
self._data = self.fp.read()
self._dataA=None
self.fp.seek(0)
except:
annotateException('\nfileName=%r identity=%s'%(fileName,self.identity()))
def identity(self):
'''try to return information that will identify the instance'''
fn = self.fileName
if not isStrType(fn):
fn = getattr(getattr(self,'fp',None),'name',None)
ident = self._ident
return '[%s@%s%s%s]' % (self.__class__.__name__,hex(id(self)),ident and (' ident=%r' % ident) or '',fn and (' filename=%r' % fn) or '')
def _read_image(self,fp):
if sys.platform[0:4] == 'java':
from javax.imageio import ImageIO
return ImageIO.read(fp)
else:
return Image.open(fp)
def _jpeg_fh(self):
fp = self.fp
fp.seek(0)
return fp
def jpeg_fh(self):
return None
def getSize(self):
if (self._width is None or self._height is None):
if sys.platform[0:4] == 'java':
self._width = self._image.getWidth()
self._height = self._image.getHeight()
else:
self._width, self._height = self._image.size
return (self._width, self._height)
def getRGBData(self):
"Return byte array of RGB data as string"
try:
if self._data is None:
self._dataA = None
if sys.platform[0:4] == 'java':
import jarray
from java.awt.image import PixelGrabber
width, height = self.getSize()
buffer = jarray.zeros(width*height, 'i')
pg = PixelGrabber(self._image, 0,0,width,height,buffer,0,width)
pg.grabPixels()
# there must be a way to do this with a cast not a byte-level loop,
# I just haven't found it yet...
pixels = []
a = pixels.append
for i in range(len(buffer)):
rgb = buffer[i]
a(chr((rgb>>16)&0xff))
a(chr((rgb>>8)&0xff))
a(chr(rgb&0xff))
self._data = ''.join(pixels)
self.mode = 'RGB'
else:
im = self._image
mode = self.mode = im.mode
if mode=='RGBA':
if Image.VERSION.startswith('1.1.7'): im.load()
self._dataA = ImageReader(im.split()[3])
im = im.convert('RGB')
self.mode = 'RGB'
elif mode not in ('L','RGB','CMYK'):
im = im.convert('RGB')
self.mode = 'RGB'
self._data = im.tostring()
return self._data
except:
annotateException('\nidentity=%s'%self.identity())
def getImageData(self):
width, height = self.getSize()
return width, height, self.getRGBData()
def getTransparent(self):
if sys.platform[0:4] == 'java':
return None
else:
if "transparency" in self._image.info:
transparency = self._image.info["transparency"] * 3
palette = self._image.palette
try:
palette = palette.palette
except:
palette = palette.data
if sys.version_info[0] == 3:
return palette[transparency:transparency+3]
else:
return [ord(c) for c in palette[transparency:transparency+3]]
else:
return None
class LazyImageReader(ImageReader):
def fp(self):
return open_for_read(self.fileName, 'b')
fp=property(fp)
def _image(self):
return self._read_image(self.fp)
_image=property(_image)
def getImageData(imageFileName):
"Get width, height and RGB pixels from image file. Wraps Java/PIL"
try:
return imageFileName.getImageData()
except AttributeError:
return ImageReader(imageFileName).getImageData()
class DebugMemo:
'''Intended as a simple report back encapsulator
Typical usages:
1. To record error data::
dbg = DebugMemo(fn='dbgmemo.dbg',myVar=value)
dbg.add(anotherPayload='aaaa',andagain='bbb')
dbg.dump()
2. To show the recorded info::
dbg = DebugMemo(fn='dbgmemo.dbg',mode='r')
dbg.load()
dbg.show()
3. To re-use recorded information::
dbg = DebugMemo(fn='dbgmemo.dbg',mode='r')
dbg.load()
myTestFunc(dbg.payload('myVar'),dbg.payload('andagain'))
In addition to the payload variables the dump records many useful bits
of information which are also printed in the show() method.
'''
def __init__(self,fn='rl_dbgmemo.dbg',mode='w',getScript=1,modules=(),capture_traceback=1, stdout=None, **kw):
import time, socket
self.fn = fn
if not stdout:
self.stdout = sys.stdout
else:
if hasattr(stdout,'write'):
self.stdout = stdout
else:
self.stdout = open(stdout,'w')
if mode!='w': return
self.store = store = {}
if capture_traceback and sys.exc_info() != (None,None,None):
import traceback
s = getBytesIO()
traceback.print_exc(None,s)
store['__traceback'] = s.getvalue()
cwd=os.getcwd()
lcwd = os.listdir(cwd)
pcwd = os.path.dirname(cwd)
lpcwd = pcwd and os.listdir(pcwd) or '???'
exed = os.path.abspath(os.path.dirname(sys.argv[0]))
project_version='???'
md=None
try:
import marshal
md=marshal.loads(__loader__.get_data('meta_data.mar'))
project_version=md['project_version']
except:
pass
env = os.environ
K=env.keys()
store.update({ 'gmt': time.asctime(time.gmtime(time.time())),
'platform': sys.platform,
'version': sys.version,
'hexversion': hex(sys.hexversion),
'executable': sys.executable,
'exec_prefix': sys.exec_prefix,
'prefix': sys.prefix,
'path': sys.path,
'argv': sys.argv,
'cwd': cwd,
'hostname': socket.gethostname(),
'lcwd': lcwd,
'lpcwd': lpcwd,
'byteorder': sys.byteorder,
'maxint': sys.maxint,
'maxint': getattr(sys,'maxunicode','????'),
'api_version': getattr(sys,'api_version','????'),
'version_info': getattr(sys,'version_info','????'),
'winver': getattr(sys,'winver','????'),
'environment': '\n\t\t\t'.join(['']+['%s=%r' % (k,env[k]) for k in K]),
'__loader__': repr(__loader__),
'project_meta_data': md,
'project_version': project_version,
})
for M,A in (
(sys,('getwindowsversion','getfilesystemencoding')),
(os,('uname', 'ctermid', 'getgid', 'getuid', 'getegid',
'geteuid', 'getlogin', 'getgroups', 'getpgrp', 'getpid', 'getppid',
)),
):
for a in A:
if hasattr(M,a):
try:
store[a] = getattr(M,a)()
except:
pass
if exed!=cwd:
try:
store.update({'exed': exed, 'lexed': os.listdir(exed),})
except:
pass
if getScript:
fn = os.path.abspath(sys.argv[0])
if os.path.isfile(fn):
try:
store['__script'] = (fn,open(fn,'r').read())
except:
pass
module_versions = {}
for n,m in sys.modules.items():
if n=='reportlab' or n=='rlextra' or n[:10]=='reportlab.' or n[:8]=='rlextra.':
v = [getattr(m,x,None) for x in ('__version__','__path__','__file__')]
if filter(None,v):
v = [v[0]] + filter(None,v[1:])
module_versions[n] = tuple(v)
store['__module_versions'] = module_versions
self.store['__payload'] = {}
self._add(kw)
def _add(self,D):
payload = self.store['__payload']
for k, v in D.items():
payload[k] = v
def add(self,**kw):
self._add(kw)
def _dump(self,f):
try:
pos=f.tell()
pickle.dump(self.store,f)
except:
S=self.store.copy()
ff=getBytesIO()
for k,v in S.iteritems():
try:
pickle.dump({k:v},ff)
except:
S[k] = '<unpicklable object %r>' % v
f.seek(pos,0)
pickle.dump(S,f)
def dump(self):
f = open(self.fn,'wb')
try:
self._dump(f)
finally:
f.close()
def dumps(self):
f = getBytesIO()
self._dump(f)
return f.getvalue()
def _load(self,f):
self.store = pickle.load(f)
def load(self):
f = open(self.fn,'rb')
try:
self._load(f)
finally:
f.close()
def loads(self,s):
self._load(getBytesIO(s))
def _show_module_versions(self,k,v):
self._writeln(k[2:])
K = v.keys()
K.sort()
for k in K:
vk = vk0 = v[k]
if isinstance(vk,tuple): vk0 = vk[0]
try:
__import__(k)
m = sys.modules[k]
d = getattr(m,'__version__',None)==vk0 and 'SAME' or 'DIFFERENT'
except:
m = None
d = '??????unknown??????'
self._writeln(' %s = %s (%s)' % (k,vk,d))
def _banner(self,k,what):
self._writeln('###################%s %s##################' % (what,k[2:]))
def _start(self,k):
self._banner(k,'Start ')
def _finish(self,k):
self._banner(k,'Finish ')
def _show_lines(self,k,v):
self._start(k)
self._writeln(v)
self._finish(k)
def _show_file(self,k,v):
k = '%s %s' % (k,os.path.basename(v[0]))
self._show_lines(k,v[1])
def _show_payload(self,k,v):
if v:
import pprint
self._start(k)
pprint.pprint(v,self.stdout)
self._finish(k)
def _show_extensions(self):
for mn in ('_rl_accel','_renderPM','sgmlop','pyRXP','pyRXPU','_imaging','Image'):
try:
A = [mn].append
__import__(mn)
m = sys.modules[mn]
A(m.__file__)
for vn in ('__version__','VERSION','_version','version'):
if hasattr(m,vn):
A('%s=%r' % (vn,getattr(m,vn)))
except:
A('not found')
self._writeln(' '+' '.join(A.__self__))
specials = {'__module_versions': _show_module_versions,
'__payload': _show_payload,
'__traceback': _show_lines,
'__script': _show_file,
}
def show(self):
K = self.store.keys()
K.sort()
for k in K:
if k not in self.specials.keys(): self._writeln('%-15s = %s' % (k,self.store[k]))
for k in K:
if k in self.specials.keys(): self.specials[k](self,k,self.store[k])
self._show_extensions()
def payload(self,name):
return self.store['__payload'][name]
def __setitem__(self,name,value):
self.store['__payload'][name] = value
def __getitem__(self,name):
return self.store['__payload'][name]
def _writeln(self,msg):
self.stdout.write(msg+'\n')
def _flatten(L,a):
for x in L:
if isSeqType(x): _flatten(x,a)
else: a(x)
def flatten(L):
'''recursively flatten the list or tuple L'''
R = []
_flatten(L,R.append)
return R
def find_locals(func,depth=0):
'''apply func to the locals at each stack frame till func returns a non false value'''
while 1:
_ = func(sys._getframe(depth).f_locals)
if _: return _
depth += 1
class _FmtSelfDict:
def __init__(self,obj,overrideArgs):
self.obj = obj
self._overrideArgs = overrideArgs
def __getitem__(self,k):
try:
return self._overrideArgs[k]
except KeyError:
try:
return self.obj.__dict__[k]
except KeyError:
return getattr(self.obj,k)
class FmtSelfDict:
'''mixin to provide the _fmt method'''
def _fmt(self,fmt,**overrideArgs):
D = _FmtSelfDict(self, overrideArgs)
return fmt % D
def _simpleSplit(txt,mW,SW):
L = []
ws = SW(' ')
O = []
w = -ws
for t in txt.split():
lt = SW(t)
if w+ws+lt<=mW or O==[]:
O.append(t)
w = w + ws + lt
else:
L.append(' '.join(O))
O = [t]
w = lt
if O!=[]: L.append(' '.join(O))
return L
def simpleSplit(text,fontName,fontSize,maxWidth):
from reportlab.pdfbase.pdfmetrics import stringWidth
lines = text.split('\n')
SW = lambda text, fN=fontName, fS=fontSize: stringWidth(text, fN, fS)
if maxWidth:
L = []
for l in lines:
L[-1:-1] = _simpleSplit(l,maxWidth,SW)
lines = L
return lines
def escapeTextOnce(text):
"Escapes once only"
from xml.sax.saxutils import escape
if text is None:
return text
text = escape(text)
text = text.replace('&amp;', '&')
text = text.replace('&gt;', '>')
text = text.replace('&lt;', '<')
return text
import itertools
def prev_this_next(items):
"""
Loop over a collection with look-ahead and look-back.
From Thomas Guest,
http://wordaligned.org/articles/zippy-triples-served-with-python
Seriously useful looping tool (Google "zippy triples")
lets you loop a collection and see the previous and next items,
which get set to None at the ends.
To be used in layout algorithms where one wants a peek at the
next item coming down the pipe.
"""
extend = itertools.chain([None], items, [None])
prev, this, next = itertools.tee(extend, 3)
try:
this.next()
next.next()
next.next()
except StopIteration:
pass
return itertools.izip(prev, this, next)
def commasplit(s):
'''
Splits the string s at every unescaped comma and returns the result as a list.
To escape a comma, double it. Individual items are stripped.
To avoid the ambiguity of 3 successive commas to denote a comma at the beginning
or end of an item, add a space between the item seperator and the escaped comma.
>>> commasplit('a,b,c')
['a', 'b', 'c']
>>> commasplit('a,, , b , c ')
['a,', 'b', 'c']
>>> commasplit('a, ,,b, c')
['a', ',b', 'c']
'''
n = len(s)-1
s += ' '
i = 0
r=['']
while i<=n:
if s[i]==',':
if s[i+1]==',':
r[-1]+=','
i += 1
else:
r[-1] = r[-1].strip()
if i!=n: r.append('')
else:
r[-1] += s[i]
i+=1
r[-1] = r[-1].strip()
return r
def commajoin(l):
'''
Inverse of commasplit, except that whitespace around items is not conserved.
Adds more whitespace than needed for simplicity and performance.
>>> commasplit(commajoin(['a', 'b', 'c']))
['a', 'b', 'c']
>>> commasplit((commajoin(['a,', ' b ', 'c']))
['a,', 'b', 'c']
>>> commasplit((commajoin(['a ', ',b', 'c']))
['a', ',b', 'c']
'''
return ','.join([ ' ' + i.replace(',', ',,') + ' ' for i in l ])
def findInPaths(fn,paths,isfile=True,fail=False):
'''search for relative files in likely places'''
exists = isfile and os.path.isfile or os.path.isdir
if exists(fn): return fn
pjoin = os.path.join
if not os.path.isabs(fn):
for p in paths:
pfn = pjoin(p,fn)
if exists(pfn):
return pfn
if fail: raise ValueError('cannot locate %r with paths=%r' % (fn,paths))
return fn
def annotateException(msg,enc='utf8'):
'''add msg to the args of an existing exception'''
if not msg: rise
t,v,b=sys.exc_info()
if not hasattr(v,'args'): raise
e = -1
A = list(v.args)
for i,a in enumerate(A):
if isStrType(a):
e = i
break
if e>=0:
if sys.version_info[0] != 3:
if isinstance(a,unicode):
if not isinstance(msg,unicode):
msg=msg.decode(enc)
else:
if isinstance(msg,unicode):
msg=msg.encode(enc)
else:
msg = str(msg)
if isinstance(v,IOError) and getattr(v,'strerror',None):
v.strerror = msg+'\n'+str(v.strerror)
else:
A[e] += msg
else:
A.append(msg)
v.args = tuple(A)
e = t(v)
e.__traceback__ = b
raise e
def escapeOnce(data):
"""Ensure XML output is escaped just once, irrespective of input
>>> escapeOnce('A & B')
'A & B'
>>> escapeOnce('C & D')
'C & D'
>>> escapeOnce('E &amp; F')
'E & F'
"""
data = data.replace("&", "&")
#...but if it was already escaped, make sure it
# is not done twice....this will turn any tags
# back to how they were at the start.
data = data.replace("&amp;", "&")
data = data.replace("&gt;", ">")
data = data.replace("&lt;", "<")
data = data.replace("&#", "&#")
#..and just in case someone had double-escaped it, do it again
data = data.replace("&amp;", "&")
data = data.replace("&gt;", ">")
data = data.replace("&lt;", "<")
return data
def encode_label(args):
s = base64.encodestring(pickle.dumps(args)).strip()
if not isStrType(s):
s = s.decode('utf-8')
return s
def decode_label(label):
if isUnicodeType(label):
label = label.encode('utf-8')
v = pickle.loads(base64.decodestring(label))
return v
class IdentStr(str):
'''useful for identifying things that get split'''
def __new__(cls,value):
if isinstance(value,IdentStr):
inc = value.__inc
value = value[:-(2+len(str(inc)))]
inc += 1
else:
inc = 0
value += '[%d]' % inc
self = str.__new__(cls,value)
self.__inc = inc
return self
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for testing `LinearOperator` and sub-classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import numpy as np
import six
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util
from tensorflow.contrib.linalg.python.ops import linear_operator_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
@six.add_metaclass(abc.ABCMeta) # pylint: disable=no-init
class LinearOperatorDerivedClassTest(test.TestCase):
"""Tests for derived classes.
Subclasses should implement every abstractmethod, and this will enable all
test methods to work.
"""
# Absolute/relative tolerance for tests.
_atol = {
dtypes.float16: 1e-3,
dtypes.float32: 1e-6,
dtypes.float64: 1e-12,
dtypes.complex64: 1e-6,
dtypes.complex128: 1e-12
}
_rtol = {
dtypes.float16: 1e-3,
dtypes.float32: 1e-6,
dtypes.float64: 1e-12,
dtypes.complex64: 1e-6,
dtypes.complex128: 1e-12
}
def assertAC(self, x, y):
"""Derived classes can set _atol, _rtol to get different tolerance."""
dtype = dtypes.as_dtype(x.dtype)
atol = self._atol[dtype]
rtol = self._rtol[dtype]
self.assertAllClose(x, y, atol=atol, rtol=rtol)
@property
def _dtypes_to_test(self):
# TODO(langmore) Test tf.float16 once tf.matrix_solve works in 16bit.
return [dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128]
@abc.abstractproperty
def _shapes_to_test(self):
"""Returns list of tuples, each is one shape that will be tested."""
raise NotImplementedError("shapes_to_test has not been implemented.")
@abc.abstractmethod
def _operator_and_mat_and_feed_dict(self, shape, dtype, use_placeholder):
"""Build a batch matrix and an Operator that should have similar behavior.
Every operator acts like a (batch) matrix. This method returns both
together, and is used by tests.
Args:
shape: List-like of Python integers giving full shape of operator.
dtype: Numpy dtype. Data type of returned array/operator.
use_placeholder: Python bool. If True, initialize the operator with a
placeholder of undefined shape and correct dtype.
Returns:
operator: `LinearOperator` subclass instance.
mat: `Tensor` representing operator.
feed_dict: Dictionary.
If placholder is True, this must contains everything needed to be fed
to sess.run calls at runtime to make the operator work.
"""
# Create a matrix as a numpy array with desired shape/dtype.
# Create a LinearOperator that should have the same behavior as the matrix.
raise NotImplementedError("Not implemented yet.")
@abc.abstractmethod
def _make_rhs(self, operator, adjoint):
"""Make a rhs appropriate for calling operator.solve(rhs).
Args:
operator: A `LinearOperator`
adjoint: Python `bool`. If `True`, we are making a 'rhs' value for the
adjoint operator.
Returns:
A `Tensor`
"""
raise NotImplementedError("_make_rhs is not defined.")
@abc.abstractmethod
def _make_x(self, operator, adjoint):
"""Make an 'x' appropriate for calling operator.apply(x).
Args:
operator: A `LinearOperator`
adjoint: Python `bool`. If `True`, we are making an 'x' value for the
adjoint operator.
Returns:
A `Tensor`
"""
raise NotImplementedError("_make_x is not defined.")
@property
def _tests_to_skip(self):
"""List of test names to skip."""
# Subclasses should over-ride if they want to skip some tests.
# To skip "test_foo", add "foo" to this list.
return []
def _skip_if_tests_to_skip_contains(self, test_name):
"""If self._tests_to_skip contains test_name, raise SkipTest exception.
See tests below for usage.
Args:
test_name: String name corresponding to a test.
Raises:
SkipTest Exception, if test_name is in self._tests_to_skip.
"""
if test_name in self._tests_to_skip:
self.skipTest("%s skipped because it was added to self._tests_to_skip.")
def test_to_dense(self):
self._skip_if_tests_to_skip_contains("to_dense")
for use_placeholder in False, True:
for shape in self._shapes_to_test:
for dtype in self._dtypes_to_test:
with self.test_session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat, feed_dict = self._operator_and_mat_and_feed_dict(
shape, dtype, use_placeholder=use_placeholder)
op_dense = operator.to_dense()
if not use_placeholder:
self.assertAllEqual(shape, op_dense.get_shape())
op_dense_v, mat_v = sess.run([op_dense, mat], feed_dict=feed_dict)
self.assertAC(op_dense_v, mat_v)
def test_det(self):
self._skip_if_tests_to_skip_contains("det")
for use_placeholder in False, True:
for shape in self._shapes_to_test:
for dtype in self._dtypes_to_test:
if dtype.is_complex:
self.skipTest(
"tf.matrix_determinant does not work with complex, so this "
"test is being skipped.")
with self.test_session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat, feed_dict = self._operator_and_mat_and_feed_dict(
shape, dtype, use_placeholder=use_placeholder)
op_det = operator.determinant()
if not use_placeholder:
self.assertAllEqual(shape[:-2], op_det.get_shape())
op_det_v, mat_det_v = sess.run(
[op_det, linalg_ops.matrix_determinant(mat)],
feed_dict=feed_dict)
self.assertAC(op_det_v, mat_det_v)
def test_log_abs_det(self):
self._skip_if_tests_to_skip_contains("log_abs_det")
for use_placeholder in False, True:
for shape in self._shapes_to_test:
for dtype in self._dtypes_to_test:
if dtype.is_complex:
self.skipTest(
"tf.matrix_determinant does not work with complex, so this "
"test is being skipped.")
with self.test_session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat, feed_dict = self._operator_and_mat_and_feed_dict(
shape, dtype, use_placeholder=use_placeholder)
op_log_abs_det = operator.log_abs_determinant()
mat_log_abs_det = math_ops.log(
math_ops.abs(linalg_ops.matrix_determinant(mat)))
if not use_placeholder:
self.assertAllEqual(shape[:-2], op_log_abs_det.get_shape())
op_log_abs_det_v, mat_log_abs_det_v = sess.run(
[op_log_abs_det, mat_log_abs_det],
feed_dict=feed_dict)
self.assertAC(op_log_abs_det_v, mat_log_abs_det_v)
def test_apply(self):
self._skip_if_tests_to_skip_contains("apply")
for use_placeholder in False, True:
for shape in self._shapes_to_test:
for dtype in self._dtypes_to_test:
for adjoint in False, True:
for adjoint_arg in False, True:
with self.test_session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat, feed_dict = self._operator_and_mat_and_feed_dict(
shape, dtype, use_placeholder=use_placeholder)
x = self._make_x(operator, adjoint=adjoint)
# If adjoint_arg, compute A X^H^H = A X.
if adjoint_arg:
op_apply = operator.apply(
linear_operator_util.matrix_adjoint(x),
adjoint=adjoint, adjoint_arg=adjoint_arg)
else:
op_apply = operator.apply(x, adjoint=adjoint)
mat_apply = math_ops.matmul(mat, x, adjoint_a=adjoint)
if not use_placeholder:
self.assertAllEqual(
op_apply.get_shape(), mat_apply.get_shape())
op_apply_v, mat_apply_v = sess.run([op_apply, mat_apply],
feed_dict=feed_dict)
self.assertAC(op_apply_v, mat_apply_v)
def test_solve(self):
self._skip_if_tests_to_skip_contains("solve")
for use_placeholder in False, True:
for shape in self._shapes_to_test:
for dtype in self._dtypes_to_test:
for adjoint in False, True:
for adjoint_arg in False, True:
with self.test_session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat, feed_dict = self._operator_and_mat_and_feed_dict(
shape, dtype, use_placeholder=use_placeholder)
rhs = self._make_rhs(operator, adjoint=adjoint)
# If adjoint_arg, solve A X = (rhs^H)^H = rhs.
if adjoint_arg:
op_solve = operator.solve(
linear_operator_util.matrix_adjoint(rhs),
adjoint=adjoint, adjoint_arg=adjoint_arg)
else:
op_solve = operator.solve(
rhs, adjoint=adjoint, adjoint_arg=adjoint_arg)
mat_solve = linalg_ops.matrix_solve(mat, rhs, adjoint=adjoint)
if not use_placeholder:
self.assertAllEqual(
op_solve.get_shape(), mat_solve.get_shape())
op_solve_v, mat_solve_v = sess.run([op_solve, mat_solve],
feed_dict=feed_dict)
self.assertAC(op_solve_v, mat_solve_v)
def test_add_to_tensor(self):
self._skip_if_tests_to_skip_contains("add_to_tensor")
for use_placeholder in False, True:
for shape in self._shapes_to_test:
for dtype in self._dtypes_to_test:
with self.test_session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat, feed_dict = self._operator_and_mat_and_feed_dict(
shape, dtype, use_placeholder=use_placeholder)
op_plus_2mat = operator.add_to_tensor(2 * mat)
if not use_placeholder:
self.assertAllEqual(shape, op_plus_2mat.get_shape())
op_plus_2mat_v, mat_v = sess.run([op_plus_2mat, mat],
feed_dict=feed_dict)
self.assertAC(op_plus_2mat_v, 3 * mat_v)
def test_diag_part(self):
self._skip_if_tests_to_skip_contains("diag_part")
for use_placeholder in False, True:
for shape in self._shapes_to_test:
for dtype in self._dtypes_to_test:
with self.test_session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat, feed_dict = self._operator_and_mat_and_feed_dict(
shape, dtype, use_placeholder=use_placeholder)
op_diag_part = operator.diag_part()
mat_diag_part = array_ops.matrix_diag_part(mat)
if not use_placeholder:
self.assertAllEqual(
mat_diag_part.get_shape(), op_diag_part.get_shape())
op_diag_part_, mat_diag_part_ = sess.run(
[op_diag_part, mat_diag_part], feed_dict=feed_dict)
self.assertAC(op_diag_part_, mat_diag_part_)
@six.add_metaclass(abc.ABCMeta)
class SquareLinearOperatorDerivedClassTest(LinearOperatorDerivedClassTest):
"""Base test class appropriate for square operators.
Sub-classes must still define all abstractmethods from
LinearOperatorDerivedClassTest that are not defined here.
"""
@property
def _shapes_to_test(self):
# non-batch operators (n, n) and batch operators.
return [(0, 0), (1, 1), (1, 3, 3), (3, 4, 4), (2, 1, 4, 4)]
def _make_rhs(self, operator, adjoint):
# This operator is square, so rhs and x will have same shape.
# adjoint value makes no difference because the operator shape doesn't
# change since it is square, but be pedantic.
return self._make_x(operator, adjoint=not adjoint)
def _make_x(self, operator, adjoint):
# Value of adjoint makes no difference because the operator is square.
# Return the number of systems to solve, R, equal to 1 or 2.
r = self._get_num_systems(operator)
# If operator.shape = [B1,...,Bb, N, N] this returns a random matrix of
# shape [B1,...,Bb, N, R], R = 1 or 2.
if operator.shape.is_fully_defined():
batch_shape = operator.batch_shape.as_list()
n = operator.domain_dimension.value
x_shape = batch_shape + [n, r]
else:
batch_shape = operator.batch_shape_tensor()
n = operator.domain_dimension_tensor()
x_shape = array_ops.concat((batch_shape, [n, r]), 0)
return random_normal(x_shape, dtype=operator.dtype)
def _get_num_systems(self, operator):
"""Get some number, either 1 or 2, depending on operator."""
if operator.tensor_rank is None or operator.tensor_rank % 2:
return 1
else:
return 2
@six.add_metaclass(abc.ABCMeta)
class NonSquareLinearOperatorDerivedClassTest(LinearOperatorDerivedClassTest):
"""Base test class appropriate for generic rectangular operators.
Square shapes are never tested by this class, so if you want to test your
operator with a square shape, create two test classes, the other subclassing
SquareLinearOperatorFullMatrixTest.
Sub-classes must still define all abstractmethods from
LinearOperatorDerivedClassTest that are not defined here.
"""
@property
def _tests_to_skip(self):
"""List of test names to skip."""
return ["solve", "det", "log_abs_det"]
@property
def _shapes_to_test(self):
# non-batch operators (n, n) and batch operators.
return [(2, 1), (1, 2), (1, 3, 2), (3, 3, 4), (2, 1, 2, 4)]
def _make_rhs(self, operator, adjoint):
# TODO(langmore) Add once we're testing solve_ls.
raise NotImplementedError(
"_make_rhs not implemented because we don't test solve")
def _make_x(self, operator, adjoint):
# Return the number of systems for the argument 'x' for .apply(x)
r = self._get_num_systems(operator)
# If operator.shape = [B1,...,Bb, M, N] this returns a random matrix of
# shape [B1,...,Bb, N, R], R = 1 or 2.
if operator.shape.is_fully_defined():
batch_shape = operator.batch_shape.as_list()
if adjoint:
n = operator.range_dimension.value
else:
n = operator.domain_dimension.value
x_shape = batch_shape + [n, r]
else:
batch_shape = operator.batch_shape_tensor()
if adjoint:
n = operator.range_dimension_tensor()
else:
n = operator.domain_dimension_tensor()
x_shape = array_ops.concat((batch_shape, [n, r]), 0)
return random_normal(x_shape, dtype=operator.dtype)
def _get_num_systems(self, operator):
"""Get some number, either 1 or 2, depending on operator."""
if operator.tensor_rank is None or operator.tensor_rank % 2:
return 1
else:
return 2
def random_positive_definite_matrix(shape, dtype, force_well_conditioned=False):
"""[batch] positive definite matrix.
Args:
shape: `TensorShape` or Python list. Shape of the returned matrix.
dtype: `TensorFlow` `dtype` or Python dtype.
force_well_conditioned: Python bool. If `True`, returned matrix has
eigenvalues with modulus in `(1, 4)`. Otherwise, eigenvalues are
chi-squared random variables.
Returns:
`Tensor` with desired shape and dtype.
"""
dtype = dtypes.as_dtype(dtype)
if not contrib_tensor_util.is_tensor(shape):
shape = tensor_shape.TensorShape(shape)
# Matrix must be square.
shape[-1].assert_is_compatible_with(shape[-2])
with ops.name_scope("random_positive_definite_matrix"):
tril = random_tril_matrix(
shape, dtype, force_well_conditioned=force_well_conditioned)
return math_ops.matmul(tril, tril, adjoint_b=True)
def random_tril_matrix(shape,
dtype,
force_well_conditioned=False,
remove_upper=True):
"""[batch] lower triangular matrix.
Args:
shape: `TensorShape` or Python `list`. Shape of the returned matrix.
dtype: `TensorFlow` `dtype` or Python dtype
force_well_conditioned: Python `bool`. If `True`, returned matrix will have
eigenvalues with modulus in `(1, 2)`. Otherwise, eigenvalues are unit
normal random variables.
remove_upper: Python `bool`.
If `True`, zero out the strictly upper triangle.
If `False`, the lower triangle of returned matrix will have desired
properties, but will not not have the strictly upper triangle zero'd out.
Returns:
`Tensor` with desired shape and dtype.
"""
with ops.name_scope("random_tril_matrix"):
# Totally random matrix. Has no nice properties.
tril = random_normal(shape, dtype=dtype)
if remove_upper:
tril = array_ops.matrix_band_part(tril, -1, 0)
# Create a diagonal with entries having modulus in [1, 2].
if force_well_conditioned:
maxval = ops.convert_to_tensor(np.sqrt(2.), dtype=dtype.real_dtype)
diag = random_sign_uniform(
shape[:-1], dtype=dtype, minval=1., maxval=maxval)
tril = array_ops.matrix_set_diag(tril, diag)
return tril
def random_normal(shape, mean=0.0, stddev=1.0, dtype=dtypes.float32, seed=None):
"""Tensor with (possibly complex) Gaussian entries.
Samples are distributed like
```
N(mean, stddev^2), if dtype is real,
X + iY, where X, Y ~ N(mean, stddev^2) if dtype is complex.
```
Args:
shape: `TensorShape` or Python list. Shape of the returned tensor.
mean: `Tensor` giving mean of normal to sample from.
stddev: `Tensor` giving stdev of normal to sample from.
dtype: `TensorFlow` `dtype` or numpy dtype
seed: Python integer seed for the RNG.
Returns:
`Tensor` with desired shape and dtype.
"""
dtype = dtypes.as_dtype(dtype)
with ops.name_scope("random_normal"):
samples = random_ops.random_normal(
shape, mean=mean, stddev=stddev, dtype=dtype.real_dtype, seed=seed)
if dtype.is_complex:
if seed is not None:
seed += 1234
more_samples = random_ops.random_normal(
shape, mean=mean, stddev=stddev, dtype=dtype.real_dtype, seed=seed)
samples = math_ops.complex(samples, more_samples)
return samples
def random_uniform(shape,
minval=None,
maxval=None,
dtype=dtypes.float32,
seed=None):
"""Tensor with (possibly complex) Uniform entries.
Samples are distributed like
```
Uniform[minval, maxval], if dtype is real,
X + iY, where X, Y ~ Uniform[minval, maxval], if dtype is complex.
```
Args:
shape: `TensorShape` or Python list. Shape of the returned tensor.
minval: `0-D` `Tensor` giving the minimum values.
maxval: `0-D` `Tensor` giving the maximum values.
dtype: `TensorFlow` `dtype` or Python dtype
seed: Python integer seed for the RNG.
Returns:
`Tensor` with desired shape and dtype.
"""
dtype = dtypes.as_dtype(dtype)
with ops.name_scope("random_uniform"):
samples = random_ops.random_uniform(
shape, dtype=dtype.real_dtype, minval=minval, maxval=maxval, seed=seed)
if dtype.is_complex:
if seed is not None:
seed += 12345
more_samples = random_ops.random_uniform(
shape,
dtype=dtype.real_dtype,
minval=minval,
maxval=maxval,
seed=seed)
samples = math_ops.complex(samples, more_samples)
return samples
def random_sign_uniform(shape,
minval=None,
maxval=None,
dtype=dtypes.float32,
seed=None):
"""Tensor with (possibly complex) random entries from a "sign Uniform".
Letting `Z` be a random variable equal to `-1` and `1` with equal probability,
Samples from this `Op` are distributed like
```
Z * X, where X ~ Uniform[minval, maxval], if dtype is real,
Z * (X + iY), where X, Y ~ Uniform[minval, maxval], if dtype is complex.
```
Args:
shape: `TensorShape` or Python list. Shape of the returned tensor.
minval: `0-D` `Tensor` giving the minimum values.
maxval: `0-D` `Tensor` giving the maximum values.
dtype: `TensorFlow` `dtype` or Python dtype
seed: Python integer seed for the RNG.
Returns:
`Tensor` with desired shape and dtype.
"""
dtype = dtypes.as_dtype(dtype)
with ops.name_scope("random_sign_uniform"):
unsigned_samples = random_uniform(
shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed)
if seed is not None:
seed += 12
signs = math_ops.sign(
random_ops.random_uniform(
shape, minval=-1., maxval=1., seed=seed))
return unsigned_samples * math_ops.cast(signs, unsigned_samples.dtype)
def random_normal_correlated_columns(
shape, mean=0.0, stddev=1.0, dtype=dtypes.float32, eps=1e-4, seed=None):
"""Batch matrix with (possibly complex) Gaussian entries and correlated cols.
Returns random batch matrix `A` with specified element-wise `mean`, `stddev`,
living close to an embedded hyperplane.
Suppose `shape[-2:] = (M, N)`.
If `M < N`, `A` is a random `M x N` [batch] matrix with iid Gaussian entries.
If `M >= N`, then the colums of `A` will be made almost dependent as follows:
```
L = random normal N x N-1 matrix, mean = 0, stddev = 1 / sqrt(N - 1)
B = random normal M x N-1 matrix, mean = 0, stddev = stddev.
G = (L B^H)^H, a random normal M x N matrix, living on N-1 dim hyperplane
E = a random normal M x N matrix, mean = 0, stddev = eps
mu = a constant M x N matrix, equal to the argument "mean"
A = G + E + mu
```
Args:
shape: Python list of integers.
Shape of the returned tensor. Must be at least length two.
mean: `Tensor` giving mean of normal to sample from.
stddev: `Tensor` giving stdev of normal to sample from.
dtype: `TensorFlow` `dtype` or numpy dtype
eps: Distance each column is perturbed from the low-dimensional subspace.
seed: Python integer seed for the RNG.
Returns:
`Tensor` with desired shape and dtype.
Raises:
ValueError: If `shape` is not at least length 2.
"""
dtype = dtypes.as_dtype(dtype)
if len(shape) < 2:
raise ValueError(
"Argument shape must be at least length 2. Found: %s" % shape)
# Shape is the final shape, e.g. [..., M, N]
shape = list(shape)
batch_shape = shape[:-2]
m, n = shape[-2:]
# If there is only one column, "they" are by definition correlated.
if n < 2 or n < m:
return random_normal(
shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed)
# Shape of the matrix with only n - 1 columns that we will embed in higher
# dimensional space.
smaller_shape = batch_shape + [m, n - 1]
# Shape of the embedding matrix, mapping batch matrices
# from [..., N-1, M] to [..., N, M]
embedding_mat_shape = batch_shape + [n, n - 1]
# This stddev for the embedding_mat ensures final result has correct stddev.
stddev_mat = 1 / np.sqrt(n - 1)
with ops.name_scope("random_normal_correlated_columns"):
smaller_mat = random_normal(
smaller_shape, mean=0.0, stddev=stddev_mat, dtype=dtype, seed=seed)
if seed is not None:
seed += 1287
embedding_mat = random_normal(embedding_mat_shape, dtype=dtype, seed=seed)
embedded_t = math_ops.matmul(embedding_mat, smaller_mat, transpose_b=True)
embedded = array_ops.matrix_transpose(embedded_t)
mean_mat = array_ops.ones_like(embedded) * mean
return embedded + random_normal(shape, stddev=eps, dtype=dtype) + mean_mat
|
|
<<<<<<< HEAD
<<<<<<< HEAD
"""
Tests common to genericpath, macpath, ntpath and posixpath
"""
import genericpath
import os
import sys
import unittest
import warnings
from test import support
def safe_rmdir(dirname):
try:
os.rmdir(dirname)
except OSError:
pass
class GenericTest:
common_attributes = ['commonprefix', 'getsize', 'getatime', 'getctime',
'getmtime', 'exists', 'isdir', 'isfile']
attributes = []
def test_no_argument(self):
for attr in self.common_attributes + self.attributes:
with self.assertRaises(TypeError):
getattr(self.pathmodule, attr)()
raise self.fail("{}.{}() did not raise a TypeError"
.format(self.pathmodule.__name__, attr))
def test_commonprefix(self):
commonprefix = self.pathmodule.commonprefix
self.assertEqual(
commonprefix([]),
""
)
self.assertEqual(
commonprefix(["/home/swenson/spam", "/home/swen/spam"]),
"/home/swen"
)
self.assertEqual(
commonprefix(["/home/swen/spam", "/home/swen/eggs"]),
"/home/swen/"
)
self.assertEqual(
commonprefix(["/home/swen/spam", "/home/swen/spam"]),
"/home/swen/spam"
)
self.assertEqual(
commonprefix(["home:swenson:spam", "home:swen:spam"]),
"home:swen"
)
self.assertEqual(
commonprefix([":home:swen:spam", ":home:swen:eggs"]),
":home:swen:"
)
self.assertEqual(
commonprefix([":home:swen:spam", ":home:swen:spam"]),
":home:swen:spam"
)
self.assertEqual(
commonprefix([b"/home/swenson/spam", b"/home/swen/spam"]),
b"/home/swen"
)
self.assertEqual(
commonprefix([b"/home/swen/spam", b"/home/swen/eggs"]),
b"/home/swen/"
)
self.assertEqual(
commonprefix([b"/home/swen/spam", b"/home/swen/spam"]),
b"/home/swen/spam"
)
self.assertEqual(
commonprefix([b"home:swenson:spam", b"home:swen:spam"]),
b"home:swen"
)
self.assertEqual(
commonprefix([b":home:swen:spam", b":home:swen:eggs"]),
b":home:swen:"
)
self.assertEqual(
commonprefix([b":home:swen:spam", b":home:swen:spam"]),
b":home:swen:spam"
)
testlist = ['', 'abc', 'Xbcd', 'Xb', 'XY', 'abcd',
'aXc', 'abd', 'ab', 'aX', 'abcX']
for s1 in testlist:
for s2 in testlist:
p = commonprefix([s1, s2])
self.assertTrue(s1.startswith(p))
self.assertTrue(s2.startswith(p))
if s1 != s2:
n = len(p)
self.assertNotEqual(s1[n:n+1], s2[n:n+1])
def test_getsize(self):
f = open(support.TESTFN, "wb")
try:
f.write(b"foo")
f.close()
self.assertEqual(self.pathmodule.getsize(support.TESTFN), 3)
finally:
if not f.closed:
f.close()
support.unlink(support.TESTFN)
def test_time(self):
f = open(support.TESTFN, "wb")
try:
f.write(b"foo")
f.close()
f = open(support.TESTFN, "ab")
f.write(b"bar")
f.close()
f = open(support.TESTFN, "rb")
d = f.read()
f.close()
self.assertEqual(d, b"foobar")
self.assertLessEqual(
self.pathmodule.getctime(support.TESTFN),
self.pathmodule.getmtime(support.TESTFN)
)
finally:
if not f.closed:
f.close()
support.unlink(support.TESTFN)
def test_exists(self):
self.assertIs(self.pathmodule.exists(support.TESTFN), False)
f = open(support.TESTFN, "wb")
try:
f.write(b"foo")
f.close()
self.assertIs(self.pathmodule.exists(support.TESTFN), True)
if not self.pathmodule == genericpath:
self.assertIs(self.pathmodule.lexists(support.TESTFN),
True)
finally:
if not f.close():
f.close()
support.unlink(support.TESTFN)
@unittest.skipUnless(hasattr(os, "pipe"), "requires os.pipe()")
def test_exists_fd(self):
r, w = os.pipe()
try:
self.assertTrue(self.pathmodule.exists(r))
finally:
os.close(r)
os.close(w)
self.assertFalse(self.pathmodule.exists(r))
def test_isdir(self):
self.assertIs(self.pathmodule.isdir(support.TESTFN), False)
f = open(support.TESTFN, "wb")
try:
f.write(b"foo")
f.close()
self.assertIs(self.pathmodule.isdir(support.TESTFN), False)
os.remove(support.TESTFN)
os.mkdir(support.TESTFN)
self.assertIs(self.pathmodule.isdir(support.TESTFN), True)
os.rmdir(support.TESTFN)
finally:
if not f.close():
f.close()
support.unlink(support.TESTFN)
safe_rmdir(support.TESTFN)
def test_isfile(self):
self.assertIs(self.pathmodule.isfile(support.TESTFN), False)
f = open(support.TESTFN, "wb")
try:
f.write(b"foo")
f.close()
self.assertIs(self.pathmodule.isfile(support.TESTFN), True)
os.remove(support.TESTFN)
os.mkdir(support.TESTFN)
self.assertIs(self.pathmodule.isfile(support.TESTFN), False)
os.rmdir(support.TESTFN)
finally:
if not f.close():
f.close()
support.unlink(support.TESTFN)
safe_rmdir(support.TESTFN)
@staticmethod
def _create_file(filename):
with open(filename, 'wb') as f:
f.write(b'foo')
def test_samefile(self):
try:
test_fn = support.TESTFN + "1"
self._create_file(test_fn)
self.assertTrue(self.pathmodule.samefile(test_fn, test_fn))
self.assertRaises(TypeError, self.pathmodule.samefile)
finally:
os.remove(test_fn)
@support.skip_unless_symlink
def test_samefile_on_symlink(self):
self._test_samefile_on_link_func(os.symlink)
def test_samefile_on_link(self):
self._test_samefile_on_link_func(os.link)
def _test_samefile_on_link_func(self, func):
try:
test_fn1 = support.TESTFN + "1"
test_fn2 = support.TESTFN + "2"
self._create_file(test_fn1)
func(test_fn1, test_fn2)
self.assertTrue(self.pathmodule.samefile(test_fn1, test_fn2))
os.remove(test_fn2)
self._create_file(test_fn2)
self.assertFalse(self.pathmodule.samefile(test_fn1, test_fn2))
finally:
os.remove(test_fn1)
os.remove(test_fn2)
def test_samestat(self):
try:
test_fn = support.TESTFN + "1"
self._create_file(test_fn)
test_fns = [test_fn]*2
stats = map(os.stat, test_fns)
self.assertTrue(self.pathmodule.samestat(*stats))
finally:
os.remove(test_fn)
@support.skip_unless_symlink
def test_samestat_on_symlink(self):
self._test_samestat_on_link_func(os.symlink)
def test_samestat_on_link(self):
self._test_samestat_on_link_func(os.link)
def _test_samestat_on_link_func(self, func):
try:
test_fn1 = support.TESTFN + "1"
test_fn2 = support.TESTFN + "2"
self._create_file(test_fn1)
test_fns = (test_fn1, test_fn2)
func(*test_fns)
stats = map(os.stat, test_fns)
self.assertTrue(self.pathmodule.samestat(*stats))
os.remove(test_fn2)
self._create_file(test_fn2)
stats = map(os.stat, test_fns)
self.assertFalse(self.pathmodule.samestat(*stats))
self.assertRaises(TypeError, self.pathmodule.samestat)
finally:
os.remove(test_fn1)
os.remove(test_fn2)
def test_sameopenfile(self):
fname = support.TESTFN + "1"
with open(fname, "wb") as a, open(fname, "wb") as b:
self.assertTrue(self.pathmodule.sameopenfile(
a.fileno(), b.fileno()))
class TestGenericTest(GenericTest, unittest.TestCase):
# Issue 16852: GenericTest can't inherit from unittest.TestCase
# for test discovery purposes; CommonTest inherits from GenericTest
# and is only meant to be inherited by others.
pathmodule = genericpath
# Following TestCase is not supposed to be run from test_genericpath.
# It is inherited by other test modules (macpath, ntpath, posixpath).
class CommonTest(GenericTest):
common_attributes = GenericTest.common_attributes + [
# Properties
'curdir', 'pardir', 'extsep', 'sep',
'pathsep', 'defpath', 'altsep', 'devnull',
# Methods
'normcase', 'splitdrive', 'expandvars', 'normpath', 'abspath',
'join', 'split', 'splitext', 'isabs', 'basename', 'dirname',
'lexists', 'islink', 'ismount', 'expanduser', 'normpath', 'realpath',
]
def test_normcase(self):
normcase = self.pathmodule.normcase
# check that normcase() is idempotent
for p in ["FoO/./BaR", b"FoO/./BaR"]:
p = normcase(p)
self.assertEqual(p, normcase(p))
self.assertEqual(normcase(''), '')
self.assertEqual(normcase(b''), b'')
# check that normcase raises a TypeError for invalid types
for path in (None, True, 0, 2.5, [], bytearray(b''), {'o','o'}):
self.assertRaises(TypeError, normcase, path)
def test_splitdrive(self):
# splitdrive for non-NT paths
splitdrive = self.pathmodule.splitdrive
self.assertEqual(splitdrive("/foo/bar"), ("", "/foo/bar"))
self.assertEqual(splitdrive("foo:bar"), ("", "foo:bar"))
self.assertEqual(splitdrive(":foo:bar"), ("", ":foo:bar"))
self.assertEqual(splitdrive(b"/foo/bar"), (b"", b"/foo/bar"))
self.assertEqual(splitdrive(b"foo:bar"), (b"", b"foo:bar"))
self.assertEqual(splitdrive(b":foo:bar"), (b"", b":foo:bar"))
def test_expandvars(self):
if self.pathmodule.__name__ == 'macpath':
self.skipTest('macpath.expandvars is a stub')
expandvars = self.pathmodule.expandvars
with support.EnvironmentVarGuard() as env:
env.clear()
env["foo"] = "bar"
env["{foo"] = "baz1"
env["{foo}"] = "baz2"
self.assertEqual(expandvars("foo"), "foo")
self.assertEqual(expandvars("$foo bar"), "bar bar")
self.assertEqual(expandvars("${foo}bar"), "barbar")
self.assertEqual(expandvars("$[foo]bar"), "$[foo]bar")
self.assertEqual(expandvars("$bar bar"), "$bar bar")
self.assertEqual(expandvars("$?bar"), "$?bar")
self.assertEqual(expandvars("$foo}bar"), "bar}bar")
self.assertEqual(expandvars("${foo"), "${foo")
self.assertEqual(expandvars("${{foo}}"), "baz1}")
self.assertEqual(expandvars("$foo$foo"), "barbar")
self.assertEqual(expandvars("$bar$bar"), "$bar$bar")
self.assertEqual(expandvars(b"foo"), b"foo")
self.assertEqual(expandvars(b"$foo bar"), b"bar bar")
self.assertEqual(expandvars(b"${foo}bar"), b"barbar")
self.assertEqual(expandvars(b"$[foo]bar"), b"$[foo]bar")
self.assertEqual(expandvars(b"$bar bar"), b"$bar bar")
self.assertEqual(expandvars(b"$?bar"), b"$?bar")
self.assertEqual(expandvars(b"$foo}bar"), b"bar}bar")
self.assertEqual(expandvars(b"${foo"), b"${foo")
self.assertEqual(expandvars(b"${{foo}}"), b"baz1}")
self.assertEqual(expandvars(b"$foo$foo"), b"barbar")
self.assertEqual(expandvars(b"$bar$bar"), b"$bar$bar")
@unittest.skipUnless(support.FS_NONASCII, 'need support.FS_NONASCII')
def test_expandvars_nonascii(self):
if self.pathmodule.__name__ == 'macpath':
self.skipTest('macpath.expandvars is a stub')
expandvars = self.pathmodule.expandvars
def check(value, expected):
self.assertEqual(expandvars(value), expected)
with support.EnvironmentVarGuard() as env:
env.clear()
nonascii = support.FS_NONASCII
env['spam'] = nonascii
env[nonascii] = 'ham' + nonascii
check(nonascii, nonascii)
check('$spam bar', '%s bar' % nonascii)
check('${spam}bar', '%sbar' % nonascii)
check('${%s}bar' % nonascii, 'ham%sbar' % nonascii)
check('$bar%s bar' % nonascii, '$bar%s bar' % nonascii)
check('$spam}bar', '%s}bar' % nonascii)
check(os.fsencode(nonascii), os.fsencode(nonascii))
check(b'$spam bar', os.fsencode('%s bar' % nonascii))
check(b'${spam}bar', os.fsencode('%sbar' % nonascii))
check(os.fsencode('${%s}bar' % nonascii),
os.fsencode('ham%sbar' % nonascii))
check(os.fsencode('$bar%s bar' % nonascii),
os.fsencode('$bar%s bar' % nonascii))
check(b'$spam}bar', os.fsencode('%s}bar' % nonascii))
def test_abspath(self):
self.assertIn("foo", self.pathmodule.abspath("foo"))
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
self.assertIn(b"foo", self.pathmodule.abspath(b"foo"))
# Abspath returns bytes when the arg is bytes
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
for path in (b'', b'foo', b'f\xf2\xf2', b'/foo', b'C:\\'):
self.assertIsInstance(self.pathmodule.abspath(path), bytes)
def test_realpath(self):
self.assertIn("foo", self.pathmodule.realpath("foo"))
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
self.assertIn(b"foo", self.pathmodule.realpath(b"foo"))
def test_normpath_issue5827(self):
# Make sure normpath preserves unicode
for path in ('', '.', '/', '\\', '///foo/.//bar//'):
self.assertIsInstance(self.pathmodule.normpath(path), str)
def test_abspath_issue3426(self):
# Check that abspath returns unicode when the arg is unicode
# with both ASCII and non-ASCII cwds.
abspath = self.pathmodule.abspath
for path in ('', 'fuu', 'f\xf9\xf9', '/fuu', 'U:\\'):
self.assertIsInstance(abspath(path), str)
unicwd = '\xe7w\xf0'
try:
os.fsencode(unicwd)
except (AttributeError, UnicodeEncodeError):
# FS encoding is probably ASCII
pass
else:
with support.temp_cwd(unicwd):
for path in ('', 'fuu', 'f\xf9\xf9', '/fuu', 'U:\\'):
self.assertIsInstance(abspath(path), str)
def test_nonascii_abspath(self):
if (support.TESTFN_UNDECODABLE
# Mac OS X denies the creation of a directory with an invalid
# UTF-8 name. Windows allows to create a directory with an
# arbitrary bytes name, but fails to enter this directory
# (when the bytes name is used).
and sys.platform not in ('win32', 'darwin')):
name = support.TESTFN_UNDECODABLE
elif support.TESTFN_NONASCII:
name = support.TESTFN_NONASCII
else:
self.skipTest("need support.TESTFN_NONASCII")
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
with support.temp_cwd(name):
self.test_abspath()
if __name__=="__main__":
unittest.main()
=======
"""
Tests common to genericpath, macpath, ntpath and posixpath
"""
import genericpath
import os
import sys
import unittest
import warnings
from test import support
def safe_rmdir(dirname):
try:
os.rmdir(dirname)
except OSError:
pass
class GenericTest:
common_attributes = ['commonprefix', 'getsize', 'getatime', 'getctime',
'getmtime', 'exists', 'isdir', 'isfile']
attributes = []
def test_no_argument(self):
for attr in self.common_attributes + self.attributes:
with self.assertRaises(TypeError):
getattr(self.pathmodule, attr)()
raise self.fail("{}.{}() did not raise a TypeError"
.format(self.pathmodule.__name__, attr))
def test_commonprefix(self):
commonprefix = self.pathmodule.commonprefix
self.assertEqual(
commonprefix([]),
""
)
self.assertEqual(
commonprefix(["/home/swenson/spam", "/home/swen/spam"]),
"/home/swen"
)
self.assertEqual(
commonprefix(["/home/swen/spam", "/home/swen/eggs"]),
"/home/swen/"
)
self.assertEqual(
commonprefix(["/home/swen/spam", "/home/swen/spam"]),
"/home/swen/spam"
)
self.assertEqual(
commonprefix(["home:swenson:spam", "home:swen:spam"]),
"home:swen"
)
self.assertEqual(
commonprefix([":home:swen:spam", ":home:swen:eggs"]),
":home:swen:"
)
self.assertEqual(
commonprefix([":home:swen:spam", ":home:swen:spam"]),
":home:swen:spam"
)
self.assertEqual(
commonprefix([b"/home/swenson/spam", b"/home/swen/spam"]),
b"/home/swen"
)
self.assertEqual(
commonprefix([b"/home/swen/spam", b"/home/swen/eggs"]),
b"/home/swen/"
)
self.assertEqual(
commonprefix([b"/home/swen/spam", b"/home/swen/spam"]),
b"/home/swen/spam"
)
self.assertEqual(
commonprefix([b"home:swenson:spam", b"home:swen:spam"]),
b"home:swen"
)
self.assertEqual(
commonprefix([b":home:swen:spam", b":home:swen:eggs"]),
b":home:swen:"
)
self.assertEqual(
commonprefix([b":home:swen:spam", b":home:swen:spam"]),
b":home:swen:spam"
)
testlist = ['', 'abc', 'Xbcd', 'Xb', 'XY', 'abcd',
'aXc', 'abd', 'ab', 'aX', 'abcX']
for s1 in testlist:
for s2 in testlist:
p = commonprefix([s1, s2])
self.assertTrue(s1.startswith(p))
self.assertTrue(s2.startswith(p))
if s1 != s2:
n = len(p)
self.assertNotEqual(s1[n:n+1], s2[n:n+1])
def test_getsize(self):
f = open(support.TESTFN, "wb")
try:
f.write(b"foo")
f.close()
self.assertEqual(self.pathmodule.getsize(support.TESTFN), 3)
finally:
if not f.closed:
f.close()
support.unlink(support.TESTFN)
def test_time(self):
f = open(support.TESTFN, "wb")
try:
f.write(b"foo")
f.close()
f = open(support.TESTFN, "ab")
f.write(b"bar")
f.close()
f = open(support.TESTFN, "rb")
d = f.read()
f.close()
self.assertEqual(d, b"foobar")
self.assertLessEqual(
self.pathmodule.getctime(support.TESTFN),
self.pathmodule.getmtime(support.TESTFN)
)
finally:
if not f.closed:
f.close()
support.unlink(support.TESTFN)
def test_exists(self):
self.assertIs(self.pathmodule.exists(support.TESTFN), False)
f = open(support.TESTFN, "wb")
try:
f.write(b"foo")
f.close()
self.assertIs(self.pathmodule.exists(support.TESTFN), True)
if not self.pathmodule == genericpath:
self.assertIs(self.pathmodule.lexists(support.TESTFN),
True)
finally:
if not f.close():
f.close()
support.unlink(support.TESTFN)
@unittest.skipUnless(hasattr(os, "pipe"), "requires os.pipe()")
def test_exists_fd(self):
r, w = os.pipe()
try:
self.assertTrue(self.pathmodule.exists(r))
finally:
os.close(r)
os.close(w)
self.assertFalse(self.pathmodule.exists(r))
def test_isdir(self):
self.assertIs(self.pathmodule.isdir(support.TESTFN), False)
f = open(support.TESTFN, "wb")
try:
f.write(b"foo")
f.close()
self.assertIs(self.pathmodule.isdir(support.TESTFN), False)
os.remove(support.TESTFN)
os.mkdir(support.TESTFN)
self.assertIs(self.pathmodule.isdir(support.TESTFN), True)
os.rmdir(support.TESTFN)
finally:
if not f.close():
f.close()
support.unlink(support.TESTFN)
safe_rmdir(support.TESTFN)
def test_isfile(self):
self.assertIs(self.pathmodule.isfile(support.TESTFN), False)
f = open(support.TESTFN, "wb")
try:
f.write(b"foo")
f.close()
self.assertIs(self.pathmodule.isfile(support.TESTFN), True)
os.remove(support.TESTFN)
os.mkdir(support.TESTFN)
self.assertIs(self.pathmodule.isfile(support.TESTFN), False)
os.rmdir(support.TESTFN)
finally:
if not f.close():
f.close()
support.unlink(support.TESTFN)
safe_rmdir(support.TESTFN)
@staticmethod
def _create_file(filename):
with open(filename, 'wb') as f:
f.write(b'foo')
def test_samefile(self):
try:
test_fn = support.TESTFN + "1"
self._create_file(test_fn)
self.assertTrue(self.pathmodule.samefile(test_fn, test_fn))
self.assertRaises(TypeError, self.pathmodule.samefile)
finally:
os.remove(test_fn)
@support.skip_unless_symlink
def test_samefile_on_symlink(self):
self._test_samefile_on_link_func(os.symlink)
def test_samefile_on_link(self):
self._test_samefile_on_link_func(os.link)
def _test_samefile_on_link_func(self, func):
try:
test_fn1 = support.TESTFN + "1"
test_fn2 = support.TESTFN + "2"
self._create_file(test_fn1)
func(test_fn1, test_fn2)
self.assertTrue(self.pathmodule.samefile(test_fn1, test_fn2))
os.remove(test_fn2)
self._create_file(test_fn2)
self.assertFalse(self.pathmodule.samefile(test_fn1, test_fn2))
finally:
os.remove(test_fn1)
os.remove(test_fn2)
def test_samestat(self):
try:
test_fn = support.TESTFN + "1"
self._create_file(test_fn)
test_fns = [test_fn]*2
stats = map(os.stat, test_fns)
self.assertTrue(self.pathmodule.samestat(*stats))
finally:
os.remove(test_fn)
@support.skip_unless_symlink
def test_samestat_on_symlink(self):
self._test_samestat_on_link_func(os.symlink)
def test_samestat_on_link(self):
self._test_samestat_on_link_func(os.link)
def _test_samestat_on_link_func(self, func):
try:
test_fn1 = support.TESTFN + "1"
test_fn2 = support.TESTFN + "2"
self._create_file(test_fn1)
test_fns = (test_fn1, test_fn2)
func(*test_fns)
stats = map(os.stat, test_fns)
self.assertTrue(self.pathmodule.samestat(*stats))
os.remove(test_fn2)
self._create_file(test_fn2)
stats = map(os.stat, test_fns)
self.assertFalse(self.pathmodule.samestat(*stats))
self.assertRaises(TypeError, self.pathmodule.samestat)
finally:
os.remove(test_fn1)
os.remove(test_fn2)
def test_sameopenfile(self):
fname = support.TESTFN + "1"
with open(fname, "wb") as a, open(fname, "wb") as b:
self.assertTrue(self.pathmodule.sameopenfile(
a.fileno(), b.fileno()))
class TestGenericTest(GenericTest, unittest.TestCase):
# Issue 16852: GenericTest can't inherit from unittest.TestCase
# for test discovery purposes; CommonTest inherits from GenericTest
# and is only meant to be inherited by others.
pathmodule = genericpath
# Following TestCase is not supposed to be run from test_genericpath.
# It is inherited by other test modules (macpath, ntpath, posixpath).
class CommonTest(GenericTest):
common_attributes = GenericTest.common_attributes + [
# Properties
'curdir', 'pardir', 'extsep', 'sep',
'pathsep', 'defpath', 'altsep', 'devnull',
# Methods
'normcase', 'splitdrive', 'expandvars', 'normpath', 'abspath',
'join', 'split', 'splitext', 'isabs', 'basename', 'dirname',
'lexists', 'islink', 'ismount', 'expanduser', 'normpath', 'realpath',
]
def test_normcase(self):
normcase = self.pathmodule.normcase
# check that normcase() is idempotent
for p in ["FoO/./BaR", b"FoO/./BaR"]:
p = normcase(p)
self.assertEqual(p, normcase(p))
self.assertEqual(normcase(''), '')
self.assertEqual(normcase(b''), b'')
# check that normcase raises a TypeError for invalid types
for path in (None, True, 0, 2.5, [], bytearray(b''), {'o','o'}):
self.assertRaises(TypeError, normcase, path)
def test_splitdrive(self):
# splitdrive for non-NT paths
splitdrive = self.pathmodule.splitdrive
self.assertEqual(splitdrive("/foo/bar"), ("", "/foo/bar"))
self.assertEqual(splitdrive("foo:bar"), ("", "foo:bar"))
self.assertEqual(splitdrive(":foo:bar"), ("", ":foo:bar"))
self.assertEqual(splitdrive(b"/foo/bar"), (b"", b"/foo/bar"))
self.assertEqual(splitdrive(b"foo:bar"), (b"", b"foo:bar"))
self.assertEqual(splitdrive(b":foo:bar"), (b"", b":foo:bar"))
def test_expandvars(self):
if self.pathmodule.__name__ == 'macpath':
self.skipTest('macpath.expandvars is a stub')
expandvars = self.pathmodule.expandvars
with support.EnvironmentVarGuard() as env:
env.clear()
env["foo"] = "bar"
env["{foo"] = "baz1"
env["{foo}"] = "baz2"
self.assertEqual(expandvars("foo"), "foo")
self.assertEqual(expandvars("$foo bar"), "bar bar")
self.assertEqual(expandvars("${foo}bar"), "barbar")
self.assertEqual(expandvars("$[foo]bar"), "$[foo]bar")
self.assertEqual(expandvars("$bar bar"), "$bar bar")
self.assertEqual(expandvars("$?bar"), "$?bar")
self.assertEqual(expandvars("$foo}bar"), "bar}bar")
self.assertEqual(expandvars("${foo"), "${foo")
self.assertEqual(expandvars("${{foo}}"), "baz1}")
self.assertEqual(expandvars("$foo$foo"), "barbar")
self.assertEqual(expandvars("$bar$bar"), "$bar$bar")
self.assertEqual(expandvars(b"foo"), b"foo")
self.assertEqual(expandvars(b"$foo bar"), b"bar bar")
self.assertEqual(expandvars(b"${foo}bar"), b"barbar")
self.assertEqual(expandvars(b"$[foo]bar"), b"$[foo]bar")
self.assertEqual(expandvars(b"$bar bar"), b"$bar bar")
self.assertEqual(expandvars(b"$?bar"), b"$?bar")
self.assertEqual(expandvars(b"$foo}bar"), b"bar}bar")
self.assertEqual(expandvars(b"${foo"), b"${foo")
self.assertEqual(expandvars(b"${{foo}}"), b"baz1}")
self.assertEqual(expandvars(b"$foo$foo"), b"barbar")
self.assertEqual(expandvars(b"$bar$bar"), b"$bar$bar")
@unittest.skipUnless(support.FS_NONASCII, 'need support.FS_NONASCII')
def test_expandvars_nonascii(self):
if self.pathmodule.__name__ == 'macpath':
self.skipTest('macpath.expandvars is a stub')
expandvars = self.pathmodule.expandvars
def check(value, expected):
self.assertEqual(expandvars(value), expected)
with support.EnvironmentVarGuard() as env:
env.clear()
nonascii = support.FS_NONASCII
env['spam'] = nonascii
env[nonascii] = 'ham' + nonascii
check(nonascii, nonascii)
check('$spam bar', '%s bar' % nonascii)
check('${spam}bar', '%sbar' % nonascii)
check('${%s}bar' % nonascii, 'ham%sbar' % nonascii)
check('$bar%s bar' % nonascii, '$bar%s bar' % nonascii)
check('$spam}bar', '%s}bar' % nonascii)
check(os.fsencode(nonascii), os.fsencode(nonascii))
check(b'$spam bar', os.fsencode('%s bar' % nonascii))
check(b'${spam}bar', os.fsencode('%sbar' % nonascii))
check(os.fsencode('${%s}bar' % nonascii),
os.fsencode('ham%sbar' % nonascii))
check(os.fsencode('$bar%s bar' % nonascii),
os.fsencode('$bar%s bar' % nonascii))
check(b'$spam}bar', os.fsencode('%s}bar' % nonascii))
def test_abspath(self):
self.assertIn("foo", self.pathmodule.abspath("foo"))
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
self.assertIn(b"foo", self.pathmodule.abspath(b"foo"))
# Abspath returns bytes when the arg is bytes
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
for path in (b'', b'foo', b'f\xf2\xf2', b'/foo', b'C:\\'):
self.assertIsInstance(self.pathmodule.abspath(path), bytes)
def test_realpath(self):
self.assertIn("foo", self.pathmodule.realpath("foo"))
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
self.assertIn(b"foo", self.pathmodule.realpath(b"foo"))
def test_normpath_issue5827(self):
# Make sure normpath preserves unicode
for path in ('', '.', '/', '\\', '///foo/.//bar//'):
self.assertIsInstance(self.pathmodule.normpath(path), str)
def test_abspath_issue3426(self):
# Check that abspath returns unicode when the arg is unicode
# with both ASCII and non-ASCII cwds.
abspath = self.pathmodule.abspath
for path in ('', 'fuu', 'f\xf9\xf9', '/fuu', 'U:\\'):
self.assertIsInstance(abspath(path), str)
unicwd = '\xe7w\xf0'
try:
os.fsencode(unicwd)
except (AttributeError, UnicodeEncodeError):
# FS encoding is probably ASCII
pass
else:
with support.temp_cwd(unicwd):
for path in ('', 'fuu', 'f\xf9\xf9', '/fuu', 'U:\\'):
self.assertIsInstance(abspath(path), str)
def test_nonascii_abspath(self):
if (support.TESTFN_UNDECODABLE
# Mac OS X denies the creation of a directory with an invalid
# UTF-8 name. Windows allows to create a directory with an
# arbitrary bytes name, but fails to enter this directory
# (when the bytes name is used).
and sys.platform not in ('win32', 'darwin')):
name = support.TESTFN_UNDECODABLE
elif support.TESTFN_NONASCII:
name = support.TESTFN_NONASCII
else:
self.skipTest("need support.TESTFN_NONASCII")
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
with support.temp_cwd(name):
self.test_abspath()
if __name__=="__main__":
unittest.main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
"""
Tests common to genericpath, macpath, ntpath and posixpath
"""
import genericpath
import os
import sys
import unittest
import warnings
from test import support
def safe_rmdir(dirname):
try:
os.rmdir(dirname)
except OSError:
pass
class GenericTest:
common_attributes = ['commonprefix', 'getsize', 'getatime', 'getctime',
'getmtime', 'exists', 'isdir', 'isfile']
attributes = []
def test_no_argument(self):
for attr in self.common_attributes + self.attributes:
with self.assertRaises(TypeError):
getattr(self.pathmodule, attr)()
raise self.fail("{}.{}() did not raise a TypeError"
.format(self.pathmodule.__name__, attr))
def test_commonprefix(self):
commonprefix = self.pathmodule.commonprefix
self.assertEqual(
commonprefix([]),
""
)
self.assertEqual(
commonprefix(["/home/swenson/spam", "/home/swen/spam"]),
"/home/swen"
)
self.assertEqual(
commonprefix(["/home/swen/spam", "/home/swen/eggs"]),
"/home/swen/"
)
self.assertEqual(
commonprefix(["/home/swen/spam", "/home/swen/spam"]),
"/home/swen/spam"
)
self.assertEqual(
commonprefix(["home:swenson:spam", "home:swen:spam"]),
"home:swen"
)
self.assertEqual(
commonprefix([":home:swen:spam", ":home:swen:eggs"]),
":home:swen:"
)
self.assertEqual(
commonprefix([":home:swen:spam", ":home:swen:spam"]),
":home:swen:spam"
)
self.assertEqual(
commonprefix([b"/home/swenson/spam", b"/home/swen/spam"]),
b"/home/swen"
)
self.assertEqual(
commonprefix([b"/home/swen/spam", b"/home/swen/eggs"]),
b"/home/swen/"
)
self.assertEqual(
commonprefix([b"/home/swen/spam", b"/home/swen/spam"]),
b"/home/swen/spam"
)
self.assertEqual(
commonprefix([b"home:swenson:spam", b"home:swen:spam"]),
b"home:swen"
)
self.assertEqual(
commonprefix([b":home:swen:spam", b":home:swen:eggs"]),
b":home:swen:"
)
self.assertEqual(
commonprefix([b":home:swen:spam", b":home:swen:spam"]),
b":home:swen:spam"
)
testlist = ['', 'abc', 'Xbcd', 'Xb', 'XY', 'abcd',
'aXc', 'abd', 'ab', 'aX', 'abcX']
for s1 in testlist:
for s2 in testlist:
p = commonprefix([s1, s2])
self.assertTrue(s1.startswith(p))
self.assertTrue(s2.startswith(p))
if s1 != s2:
n = len(p)
self.assertNotEqual(s1[n:n+1], s2[n:n+1])
def test_getsize(self):
f = open(support.TESTFN, "wb")
try:
f.write(b"foo")
f.close()
self.assertEqual(self.pathmodule.getsize(support.TESTFN), 3)
finally:
if not f.closed:
f.close()
support.unlink(support.TESTFN)
def test_time(self):
f = open(support.TESTFN, "wb")
try:
f.write(b"foo")
f.close()
f = open(support.TESTFN, "ab")
f.write(b"bar")
f.close()
f = open(support.TESTFN, "rb")
d = f.read()
f.close()
self.assertEqual(d, b"foobar")
self.assertLessEqual(
self.pathmodule.getctime(support.TESTFN),
self.pathmodule.getmtime(support.TESTFN)
)
finally:
if not f.closed:
f.close()
support.unlink(support.TESTFN)
def test_exists(self):
self.assertIs(self.pathmodule.exists(support.TESTFN), False)
f = open(support.TESTFN, "wb")
try:
f.write(b"foo")
f.close()
self.assertIs(self.pathmodule.exists(support.TESTFN), True)
if not self.pathmodule == genericpath:
self.assertIs(self.pathmodule.lexists(support.TESTFN),
True)
finally:
if not f.close():
f.close()
support.unlink(support.TESTFN)
@unittest.skipUnless(hasattr(os, "pipe"), "requires os.pipe()")
def test_exists_fd(self):
r, w = os.pipe()
try:
self.assertTrue(self.pathmodule.exists(r))
finally:
os.close(r)
os.close(w)
self.assertFalse(self.pathmodule.exists(r))
def test_isdir(self):
self.assertIs(self.pathmodule.isdir(support.TESTFN), False)
f = open(support.TESTFN, "wb")
try:
f.write(b"foo")
f.close()
self.assertIs(self.pathmodule.isdir(support.TESTFN), False)
os.remove(support.TESTFN)
os.mkdir(support.TESTFN)
self.assertIs(self.pathmodule.isdir(support.TESTFN), True)
os.rmdir(support.TESTFN)
finally:
if not f.close():
f.close()
support.unlink(support.TESTFN)
safe_rmdir(support.TESTFN)
def test_isfile(self):
self.assertIs(self.pathmodule.isfile(support.TESTFN), False)
f = open(support.TESTFN, "wb")
try:
f.write(b"foo")
f.close()
self.assertIs(self.pathmodule.isfile(support.TESTFN), True)
os.remove(support.TESTFN)
os.mkdir(support.TESTFN)
self.assertIs(self.pathmodule.isfile(support.TESTFN), False)
os.rmdir(support.TESTFN)
finally:
if not f.close():
f.close()
support.unlink(support.TESTFN)
safe_rmdir(support.TESTFN)
@staticmethod
def _create_file(filename):
with open(filename, 'wb') as f:
f.write(b'foo')
def test_samefile(self):
try:
test_fn = support.TESTFN + "1"
self._create_file(test_fn)
self.assertTrue(self.pathmodule.samefile(test_fn, test_fn))
self.assertRaises(TypeError, self.pathmodule.samefile)
finally:
os.remove(test_fn)
@support.skip_unless_symlink
def test_samefile_on_symlink(self):
self._test_samefile_on_link_func(os.symlink)
def test_samefile_on_link(self):
self._test_samefile_on_link_func(os.link)
def _test_samefile_on_link_func(self, func):
try:
test_fn1 = support.TESTFN + "1"
test_fn2 = support.TESTFN + "2"
self._create_file(test_fn1)
func(test_fn1, test_fn2)
self.assertTrue(self.pathmodule.samefile(test_fn1, test_fn2))
os.remove(test_fn2)
self._create_file(test_fn2)
self.assertFalse(self.pathmodule.samefile(test_fn1, test_fn2))
finally:
os.remove(test_fn1)
os.remove(test_fn2)
def test_samestat(self):
try:
test_fn = support.TESTFN + "1"
self._create_file(test_fn)
test_fns = [test_fn]*2
stats = map(os.stat, test_fns)
self.assertTrue(self.pathmodule.samestat(*stats))
finally:
os.remove(test_fn)
@support.skip_unless_symlink
def test_samestat_on_symlink(self):
self._test_samestat_on_link_func(os.symlink)
def test_samestat_on_link(self):
self._test_samestat_on_link_func(os.link)
def _test_samestat_on_link_func(self, func):
try:
test_fn1 = support.TESTFN + "1"
test_fn2 = support.TESTFN + "2"
self._create_file(test_fn1)
test_fns = (test_fn1, test_fn2)
func(*test_fns)
stats = map(os.stat, test_fns)
self.assertTrue(self.pathmodule.samestat(*stats))
os.remove(test_fn2)
self._create_file(test_fn2)
stats = map(os.stat, test_fns)
self.assertFalse(self.pathmodule.samestat(*stats))
self.assertRaises(TypeError, self.pathmodule.samestat)
finally:
os.remove(test_fn1)
os.remove(test_fn2)
def test_sameopenfile(self):
fname = support.TESTFN + "1"
with open(fname, "wb") as a, open(fname, "wb") as b:
self.assertTrue(self.pathmodule.sameopenfile(
a.fileno(), b.fileno()))
class TestGenericTest(GenericTest, unittest.TestCase):
# Issue 16852: GenericTest can't inherit from unittest.TestCase
# for test discovery purposes; CommonTest inherits from GenericTest
# and is only meant to be inherited by others.
pathmodule = genericpath
# Following TestCase is not supposed to be run from test_genericpath.
# It is inherited by other test modules (macpath, ntpath, posixpath).
class CommonTest(GenericTest):
common_attributes = GenericTest.common_attributes + [
# Properties
'curdir', 'pardir', 'extsep', 'sep',
'pathsep', 'defpath', 'altsep', 'devnull',
# Methods
'normcase', 'splitdrive', 'expandvars', 'normpath', 'abspath',
'join', 'split', 'splitext', 'isabs', 'basename', 'dirname',
'lexists', 'islink', 'ismount', 'expanduser', 'normpath', 'realpath',
]
def test_normcase(self):
normcase = self.pathmodule.normcase
# check that normcase() is idempotent
for p in ["FoO/./BaR", b"FoO/./BaR"]:
p = normcase(p)
self.assertEqual(p, normcase(p))
self.assertEqual(normcase(''), '')
self.assertEqual(normcase(b''), b'')
# check that normcase raises a TypeError for invalid types
for path in (None, True, 0, 2.5, [], bytearray(b''), {'o','o'}):
self.assertRaises(TypeError, normcase, path)
def test_splitdrive(self):
# splitdrive for non-NT paths
splitdrive = self.pathmodule.splitdrive
self.assertEqual(splitdrive("/foo/bar"), ("", "/foo/bar"))
self.assertEqual(splitdrive("foo:bar"), ("", "foo:bar"))
self.assertEqual(splitdrive(":foo:bar"), ("", ":foo:bar"))
self.assertEqual(splitdrive(b"/foo/bar"), (b"", b"/foo/bar"))
self.assertEqual(splitdrive(b"foo:bar"), (b"", b"foo:bar"))
self.assertEqual(splitdrive(b":foo:bar"), (b"", b":foo:bar"))
def test_expandvars(self):
if self.pathmodule.__name__ == 'macpath':
self.skipTest('macpath.expandvars is a stub')
expandvars = self.pathmodule.expandvars
with support.EnvironmentVarGuard() as env:
env.clear()
env["foo"] = "bar"
env["{foo"] = "baz1"
env["{foo}"] = "baz2"
self.assertEqual(expandvars("foo"), "foo")
self.assertEqual(expandvars("$foo bar"), "bar bar")
self.assertEqual(expandvars("${foo}bar"), "barbar")
self.assertEqual(expandvars("$[foo]bar"), "$[foo]bar")
self.assertEqual(expandvars("$bar bar"), "$bar bar")
self.assertEqual(expandvars("$?bar"), "$?bar")
self.assertEqual(expandvars("$foo}bar"), "bar}bar")
self.assertEqual(expandvars("${foo"), "${foo")
self.assertEqual(expandvars("${{foo}}"), "baz1}")
self.assertEqual(expandvars("$foo$foo"), "barbar")
self.assertEqual(expandvars("$bar$bar"), "$bar$bar")
self.assertEqual(expandvars(b"foo"), b"foo")
self.assertEqual(expandvars(b"$foo bar"), b"bar bar")
self.assertEqual(expandvars(b"${foo}bar"), b"barbar")
self.assertEqual(expandvars(b"$[foo]bar"), b"$[foo]bar")
self.assertEqual(expandvars(b"$bar bar"), b"$bar bar")
self.assertEqual(expandvars(b"$?bar"), b"$?bar")
self.assertEqual(expandvars(b"$foo}bar"), b"bar}bar")
self.assertEqual(expandvars(b"${foo"), b"${foo")
self.assertEqual(expandvars(b"${{foo}}"), b"baz1}")
self.assertEqual(expandvars(b"$foo$foo"), b"barbar")
self.assertEqual(expandvars(b"$bar$bar"), b"$bar$bar")
@unittest.skipUnless(support.FS_NONASCII, 'need support.FS_NONASCII')
def test_expandvars_nonascii(self):
if self.pathmodule.__name__ == 'macpath':
self.skipTest('macpath.expandvars is a stub')
expandvars = self.pathmodule.expandvars
def check(value, expected):
self.assertEqual(expandvars(value), expected)
with support.EnvironmentVarGuard() as env:
env.clear()
nonascii = support.FS_NONASCII
env['spam'] = nonascii
env[nonascii] = 'ham' + nonascii
check(nonascii, nonascii)
check('$spam bar', '%s bar' % nonascii)
check('${spam}bar', '%sbar' % nonascii)
check('${%s}bar' % nonascii, 'ham%sbar' % nonascii)
check('$bar%s bar' % nonascii, '$bar%s bar' % nonascii)
check('$spam}bar', '%s}bar' % nonascii)
check(os.fsencode(nonascii), os.fsencode(nonascii))
check(b'$spam bar', os.fsencode('%s bar' % nonascii))
check(b'${spam}bar', os.fsencode('%sbar' % nonascii))
check(os.fsencode('${%s}bar' % nonascii),
os.fsencode('ham%sbar' % nonascii))
check(os.fsencode('$bar%s bar' % nonascii),
os.fsencode('$bar%s bar' % nonascii))
check(b'$spam}bar', os.fsencode('%s}bar' % nonascii))
def test_abspath(self):
self.assertIn("foo", self.pathmodule.abspath("foo"))
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
self.assertIn(b"foo", self.pathmodule.abspath(b"foo"))
# Abspath returns bytes when the arg is bytes
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
for path in (b'', b'foo', b'f\xf2\xf2', b'/foo', b'C:\\'):
self.assertIsInstance(self.pathmodule.abspath(path), bytes)
def test_realpath(self):
self.assertIn("foo", self.pathmodule.realpath("foo"))
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
self.assertIn(b"foo", self.pathmodule.realpath(b"foo"))
def test_normpath_issue5827(self):
# Make sure normpath preserves unicode
for path in ('', '.', '/', '\\', '///foo/.//bar//'):
self.assertIsInstance(self.pathmodule.normpath(path), str)
def test_abspath_issue3426(self):
# Check that abspath returns unicode when the arg is unicode
# with both ASCII and non-ASCII cwds.
abspath = self.pathmodule.abspath
for path in ('', 'fuu', 'f\xf9\xf9', '/fuu', 'U:\\'):
self.assertIsInstance(abspath(path), str)
unicwd = '\xe7w\xf0'
try:
os.fsencode(unicwd)
except (AttributeError, UnicodeEncodeError):
# FS encoding is probably ASCII
pass
else:
with support.temp_cwd(unicwd):
for path in ('', 'fuu', 'f\xf9\xf9', '/fuu', 'U:\\'):
self.assertIsInstance(abspath(path), str)
def test_nonascii_abspath(self):
if (support.TESTFN_UNDECODABLE
# Mac OS X denies the creation of a directory with an invalid
# UTF-8 name. Windows allows to create a directory with an
# arbitrary bytes name, but fails to enter this directory
# (when the bytes name is used).
and sys.platform not in ('win32', 'darwin')):
name = support.TESTFN_UNDECODABLE
elif support.TESTFN_NONASCII:
name = support.TESTFN_NONASCII
else:
self.skipTest("need support.TESTFN_NONASCII")
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
with support.temp_cwd(name):
self.test_abspath()
if __name__=="__main__":
unittest.main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
|
|
#! /usr/bin/env python
"""
The MIT License (MIT)
Copyright (c) 2015 creon (creon.nu@gmail.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
OR OTHER DEALINGS IN THE SOFTWARE.
"""
import sys
import json
import hmac
import time
import urllib
import urllib2
import random
import hashlib
import httplib
import threading
import datetime
class Exchange(object):
def __init__(self, fee):
self.fee = fee
self._shift = 1
self._nonce = 0
def adjust(self, error):
if 'exception caught:' not in error:
self._shift = ((self._shift + 7) % 200) - 100 # -92 15 -78 29 -64 43 -50 57 ...
def nonce(self, factor=1000.0):
n = int((time.time() + self._shift) * float(factor))
if self._nonce >= n:
n = self._nonce + 10
self._nonce = n
return n
class Bittrex(Exchange):
def __init__(self):
super(Bittrex, self).__init__(0.0025)
self.placed = {}
self.closed = []
def __repr__(self):
return "bittrex"
def adjust(self, error):
pass
def post(self, method, params, key, secret, throttle=5):
data = 'https://bittrex.com/api/v1.1' + method + '?apikey=%s&nonce=%d&' % (
key, self.nonce()) + urllib.urlencode(params)
sign = hmac.new(secret, data, hashlib.sha512).hexdigest()
headers = {'apisign': sign}
connection = httplib.HTTPSConnection('bittrex.com', timeout=10)
connection.request('GET', data, headers=headers)
response = json.loads(connection.getresponse().read())
if throttle > 0 and not response['success'] and 'THROTTLED' in response['message']:
time.sleep(2)
return self.post(method, params, key, secret, throttle - 1)
return response
def get(self, method, params):
data = 'https://bittrex.com/api/v1.1' + method + '?' + urllib.urlencode(params)
connection = httplib.HTTPSConnection('bittrex.com', timeout=10)
connection.request('GET', data, headers={})
return json.loads(connection.getresponse().read())
def cancel_orders(self, unit, side, key, secret):
response = self.post('/market/getopenorders', {'market': "%s-NBT" % unit.upper()}, key, secret)
if not response['success']:
response['error'] = response['message']
return response
if not response['result']:
response['result'] = []
response['removed'] = []
response['amount'] = 0.0
for order in response['result']:
if side == 'all' or (side == 'bid' and 'BUY' in order['OrderType']) or (
side == 'ask' and 'SELL' in order['OrderType']):
ret = self.post('/market/cancel', {'uuid': order['OrderUuid']}, key, secret)
if not ret['success'] and ret['message'] != "ORDER_NOT_OPEN":
if not 'error' in response: response = {'error': ""}
response['error'] += "," + ret['message']
else:
response['removed'].append(order['OrderUuid'])
response['amount'] += order['Quantity']
if not 'error' in response and key in self.placed and unit in self.placed[key]:
if side == 'all':
self.placed[key][unit]['bid'] = False
self.placed[key][unit]['ask'] = False
else:
self.placed[key][unit][side] = False
return response
def place_order(self, unit, side, key, secret, amount, price):
ret = self.cancel_orders(unit, side, key, secret)
if 'error' in ret: return ret
amount += ret['amount']
if side == 'bid':
amount *= (1.0 - self.fee)
params = {'market': "%s-NBT" % unit.upper(), "rate": price, "quantity": amount}
response = self.post('/market/buylimit' if side == 'bid' else '/market/selllimit', params, key, secret)
if response['success']:
response['id'] = response['result']['uuid']
if not key in self.placed:
self.placed[key] = {}
if not unit in self.placed[key]:
self.placed[key][unit] = {'bid': False, 'ask': False}
self.placed[key][unit][side] = response['id']
else:
response['error'] = response['message']
response['residual'] = ret['amount']
return response
def get_balance(self, unit, key, secret):
response = self.post('/account/getbalance', {'currency': unit.upper()}, key, secret)
if response['success']:
try:
response['balance'] = float(response['result']['Available'])
except:
response['balance'] = 0.0
else:
response['error'] = response['message']
return response
def get_price(self, unit):
response = self.get('/public/getticker', {'market': '%s-NBT' % unit})
if response['success']:
response.update({'bid': response['result']['Bid'], 'ask': response['result']['Ask']})
else:
response['error'] = response['message']
return response
def create_request(self, unit, key=None, secret=None):
if not secret or not key:
return None, None
uuids = []
if key in self.placed and unit in self.placed[key]:
if self.placed[key][unit]['bid']:
uuids.append(self.placed[key][unit]['bid'])
if self.placed[key][unit]['ask']:
uuids.append(self.placed[key][unit]['ask'])
requests = []
signatures = []
for uuid in uuids:
data = 'https://bittrex.com/api/v1.1/account/getorder?apikey=%s&nonce=%d&uuid=%s' % (
key, self.nonce(), uuid)
requests.append(data)
signatures.append(hmac.new(secret, data, hashlib.sha512).hexdigest())
return {'requests': json.dumps(requests), 'signs': json.dumps(signatures)}, None
def validate_request(self, key, unit, data, signs):
orders = []
last_error = ""
requests = json.loads(data['requests'])
signs = json.loads(data['signs'])
if len(requests) != len(signs):
return {
'error': 'missmatch between requests and signatures (%d vs %d)' % (len(data['requests']), len(signs))}
if len(requests) > 2:
return {'error': 'too many requests received: %d' % len(requests)}
connection = httplib.HTTPSConnection('bittrex.com', timeout=5)
for data, sign in zip(requests, signs):
uuid = data.split('=')[-1]
if not uuid in self.closed:
headers = {'apisign': sign}
connection.request('GET', data, headers=headers)
response = json.loads(connection.getresponse().read())
if response['success']:
try:
opened = int(
datetime.datetime.strptime(response['result']['Opened'], '%Y-%m-%dT%H:%M:%S.%f').strftime(
"%s"))
except:
opened = 0
try:
closed = int(
datetime.datetime.strptime(response['result']['Closed'], '%Y-%m-%dT%H:%M:%S.%f').strftime(
"%s"))
except:
closed = sys.maxint
if closed < time.time() - 60:
self.closed.append(uuid)
orders.append({
'id': response['result']['OrderUuid'],
'price': response['result']['Limit'],
'type': 'ask' if 'SELL' in response['result']['Type'] else 'bid',
'amount': response['result']['QuantityRemaining'],
# if not closed == sys.maxint else response['result']['Quantity'],
'opened': opened,
'closed': closed,
})
else:
last_error = response['message']
if not orders and last_error != "":
return {'error': last_error}
return orders
class Cryptsy(Exchange):
def __init__(self):
super(Cryptsy, self).__init__(0.002)
self.market_codes = {}
self.currency_codes = {}
self.key = None
self.secret = None
def __repr__(self):
return "cryptsy"
def adjust(self, error):
pass
def post(self, params, key, secret):
self.key = key
self.secret = secret
request = {'nonce': self.nonce()}
request.update(params)
data = urllib.urlencode(request)
sign = hmac.new(secret, data, hashlib.sha512).hexdigest()
headers = {'Sign': sign, 'Key': key}
return json.loads(urllib2.urlopen(urllib2.Request(
url='https://api.cryptsy.com/api', data=data,
headers=headers)).read())
def get(self, method, params, key, secret):
self.key = key
self.secret = secret
request = {'nonce': self.nonce()}
request.update(params)
data = urllib.urlencode(request)
sign = hmac.new(secret, data, hashlib.sha512).hexdigest()
headers = {'Sign': sign, 'Key': key}
return json.loads(urllib2.urlopen(urllib2.Request(
url='https://api.cryptsy.com/api/{0}?{1}'.format(method, data),
headers=headers)).read())
def get_market_codes(self, key, secret):
if self.market_codes:
return self.market_codes
market_data = self.post({'method': 'getmarkets'}, key, secret)
for market in market_data['return']:
self.market_codes[market['label']] = market['marketid']
return self.market_codes
def get_market_id(self, unit, key, secret):
codes = self.get_market_codes(key, secret)
return codes['NBT/{0}'.format(unit.upper())]
def cancel_orders(self, unit, side, key, secret):
response = self.post({'method': 'myorders',
'marketid': self.get_market_id(unit, key, secret)}, key,
secret)
if int(response['success']) == 0:
return response
for order in response['return']:
if side == 'all' or (side == 'bid' and order['ordertype'] == 'Buy') or (
side == 'ask' and order['ordertype'] == 'Sell'):
ret = self.post({'method': 'cancelorder', 'orderid': order['orderid']},
key, secret)
if int(ret['success']) == 0:
if isinstance(response, list):
response = {'error': ""}
response['error'] += "," + ret['error']
return response
def get_fee(self, unit, side, amount, price, key, secret):
params = {'method': 'calculatefees',
'ordertype': 'Buy' if side == 'bid' else 'Sell',
'quantity': amount,
'price': price,
'marketid': self.get_market_id(unit, key, secret)}
response = self.post(params, key, secret)
if int(response['success']) == 0:
return self.fee
return (float(response['return']['fee']) / float(price)) / 100
def place_order(self, unit, side, key, secret, amount, price):
params = {'method': 'createorder',
'marketid': self.get_market_id(unit, key, secret),
'ordertype': 'Buy' if side == 'bid' else 'Sell',
'quantity':
(str(float(amount) - float(self.get_fee(unit, side, amount,
price, key, secret)))) if
side == 'bid' else amount,
'price': price}
response = self.post(params, key, secret)
if int(response['success']) > 0:
response['id'] = int(response['orderid'])
return response
def get_balance(self, unit, key, secret):
response = self.post({'method': 'getinfo'}, key, secret)
if int(response['success']) > 0:
response['balance'] = float(
response['return']['balances_available'][unit.upper()])
return response
def get_price(self, unit):
response = self.post({'method': 'depth', 'marketid': self.get_market_id(
unit,
self.key,
self.secret)}, self.key, self.secret)
if int(response['success']) > 0:
response.update({'bid': None, 'ask': None})
if response['return']['buy']:
response['bid'] = float(response['return']['buy'][0][0])
if response['return']['sell']:
response['ask'] = float(response['return']['sell'][0][0])
return response
def create_request(self, unit, key=None, secret=None):
if not secret:
return None, None
request = {'method': 'myorders',
'marketid': self.get_market_id(unit, key, secret),
'nonce': self.nonce()}
data = urllib.urlencode(request)
sign = hmac.new(secret, data, hashlib.sha512).hexdigest()
return request, sign
def validate_request(self, key, unit, data, sign):
headers = {'Sign': sign, 'Key': key}
ret = urllib2.urlopen(urllib2.Request('https://api.cryptsy.com/api',
urllib.urlencode(data), headers), timeout=5)
response = json.loads(ret.read())
if int(response['success']) == 0:
return response
return [{'id': int(order['orderid']),
'price': float(order['price']),
'type': 'ask' if order['ordertype'] == 'Sell' else 'bid',
'amount': float(order['quantity'])} for order in response['return']]
class SouthXChange(Exchange):
def __init__(self):
super(SouthXChange, self).__init__(0.002)
def __repr__(self):
return 'southxchange'
def adjust(self, error):
pass
def post(self, method, key, secret, params=None):
data = {'nonce': self.nonce(), 'key': key}
if params is not None:
data.update(params)
data = json.dumps(data)
sign = hmac.new(secret, data, hashlib.sha512).hexdigest()
headers = {'Hash': sign, 'Content-Type': 'application/json'}
url = 'https://www.southxchange.com/api/{0}/'.format(method)
try:
request = urllib2.Request(url=url, data=data, headers=headers)
except urllib2.HTTPError as e:
if e.code == 500:
return {'error': 'API returned 500 error'}
return {'error': 'API error'}
return json.loads(urllib2.urlopen(request).read())
def get(self, method, unit=None):
url = 'https://www.southxchange.com/api/{0}/'.format(method)
if unit is not None:
url += unit.upper() + "/NBT"
try:
request = urllib2.Request(url=url)
except urllib2.HTTPError as e:
if e.code == 500:
return {'error': 'API returned 500 error'}
return {'error': 'API error'}
return json.loads(urllib2.urlopen(request).read())
def get_price(self, unit):
response = self.get('price', unit)
if 'error' not in response:
response.update({'bid': None, 'ask': None})
response['bid'] = response['Bid']
response['ask'] = response['Ask']
return response
def place_order(self, unit, side, key, secret, amount, price):
method = 'placeOrder'
params = {'listingCurrency': unit.upper(),
'referenceCurrency': 'NBT',
'type': 'sell' if side == 'bid' else 'buy',
'amount': (round(float(amount) * float(price), 4)) if side == 'bid'
else (round(float(amount) / float(price), 4)),
'limitPrice': round(1 / float(price), 4)}
response = self.post(method, key, secret, params)
if 'error' in response:
return response
return {'id': response}
def cancel_orders(self, unit, side, key, secret):
response = self.post('listOrders', key, secret)
if 'error' in response:
return response
for order in response:
if side == 'all' or (side == 'bid' and order['Type'] == 'sell') or \
(side == 'ask' and order['Type'] == 'buy'):
if order['ListingCurrency'] == unit.upper() and \
order['ReferenceCurrency'] == 'NBT':
ret = self.post('cancelOrder', key, secret, {'orderCode': order['Code']})
if 'error' in ret:
if isinstance(response, list):
response = {'error': ""}
response['error'] += "," + ret['error']
print response
return response
def get_balance(self, unit, key, secret):
response = self.post('listBalances', key, secret)
if 'error' in response:
response = []
unit = unit.upper()
for balance in response:
if balance['Currency'] == unit:
return {'balance': balance['Available']}
return {'balance': 0}
def create_request(self, unit, key=None, secret=None):
if not secret and not key:
return None, None
request = {'nonce': self.nonce(), 'key': key}
data = json.dumps(request)
sign = hmac.new(secret, data, hashlib.sha512).hexdigest()
return {'data': data}, sign
def validate_request(self, key, unit, data, sign):
url = 'https://www.southxchange.com/api/listOrders'
headers = {'Hash': sign, 'Content-Type': 'application/json'}
request = urllib2.Request(url=url, data=data['data'], headers=headers)
response = json.loads(urllib2.urlopen(request).read())
return [{
'id': order['Code'],
'price': round(1 / float(order['LimitPrice']), 8),
'type': 'bid' if order['Type'] == 'sell' else 'ask',
'amount': round(float(order['Amount']) * float(order['LimitPrice']),
8) if order['Type'] == 'sell' else round(float(
order['Amount']) / float(order['LimitPrice']), 8)
} for order in response]
class Poloniex(Exchange):
def __init__(self):
super(Poloniex, self).__init__(0.002)
def __repr__(self):
return "poloniex"
def adjust(self, error):
if "Nonce must be greater than" in error: # (TODO: regex)
if ':' in error: error = error.split(':')[1].strip()
error = error.replace('.', '').split()
self._shift += 100.0 + (int(error[5]) - int(error[8])) / 1000.0
else:
self._shift = self._shift + 100.0
def post(self, method, params, key, secret):
request = {'nonce': self.nonce(), 'command': method}
request.update(params)
data = urllib.urlencode(request)
sign = hmac.new(secret, data, hashlib.sha512).hexdigest()
headers = {'Sign': sign, 'Key': key}
return json.loads(urllib2.urlopen(urllib2.Request('https://poloniex.com/tradingApi', data, headers)).read())
def cancel_orders(self, unit, side, key, secret):
response = self.post('returnOpenOrders', {'currencyPair': "%s_NBT" % unit.upper()}, key, secret)
if 'error' in response: return response
for order in response:
if side == 'all' or (side == 'bid' and order['type'] == 'buy') or (
side == 'ask' and order['type'] == 'sell'):
ret = self.post('cancelOrder',
{'currencyPair': "%s_NBT" % unit.upper(), 'orderNumber': order['orderNumber']}, key,
secret)
if 'error' in ret:
if isinstance(response, list): response = {'error': ""}
response['error'] += "," + ret['error']
return response
def place_order(self, unit, side, key, secret, amount, price):
params = {'currencyPair': "%s_NBT" % unit.upper(), "rate": price, "amount": amount}
response = self.post('buy' if side == 'bid' else 'sell', params, key, secret)
if not 'error' in response:
response['id'] = int(response['orderNumber'])
return response
def get_balance(self, unit, key, secret):
response = self.post('returnBalances', {}, key, secret)
if not 'error' in response:
response['balance'] = float(response[unit.upper()])
return response
def get_price(self, unit):
response = json.loads(urllib2.urlopen('https://poloniex.com/public?' +
urllib.urlencode({'command': 'returnOrderBook',
'currencyPair': "%s_NBT" % unit.upper(), 'depth': 1}),
timeout=5).read())
if not 'error' in response:
response.update({'bid': None, 'ask': None})
if response['bid']: response['bid'] = float(response['bid'][0])
if response['ask']: response['ask'] = float(response['ask'][0])
return response
def create_request(self, unit, key=None, secret=None):
if not secret: return None, None
request = {'command': 'returnOpenOrders', 'nonce': self.nonce(), 'currencyPair': "%s_NBT" % unit.upper()}
data = urllib.urlencode(request)
sign = hmac.new(secret, data, hashlib.sha512).hexdigest()
return request, sign
def validate_request(self, key, unit, data, sign):
headers = {'Sign': sign, 'Key': key}
ret = urllib2.urlopen(urllib2.Request('https://poloniex.com/tradingApi', urllib.urlencode(data), headers),
timeout=5)
response = json.loads(ret.read())
if 'error' in response: return response
return [{
'id': int(order['orderNumber']),
'price': float(order['rate']),
'type': 'ask' if order['type'] == 'sell' else 'bid',
'amount': float(order['amount']),
} for order in response]
class CCEDK(Exchange):
def __init__(self):
super(CCEDK, self).__init__(0.002)
self.pair_id = {}
self.currency_id = {}
failed = False
while not self.pair_id or not self.currency_id:
try:
response = None
if not self.pair_id:
url = 'https://www.ccedk.com/api/v1/stats/marketdepthfull'
response = json.loads(urllib2.urlopen(urllib2.Request(url), timeout=15).read())
for unit in response['response']['entities']:
if unit['pair_name'][:4] == 'nbt/':
self.pair_id[unit['pair_name'][4:]] = unit['pair_id']
if not self.currency_id:
url = 'https://www.ccedk.com/api/v1/currency/list'
response = json.loads(urllib2.urlopen(urllib2.Request(url), timeout=15).read())
for unit in response['response']['entities']:
self.currency_id[unit['iso'].lower()] = unit['currency_id']
except Exception as e:
if response and not response['response']:
self.adjust(",".join(response['errors'].values()))
if failed:
print >> sys.stderr, "could not retrieve ccedk ids, will adjust shift to", self._shift, \
"reason:", ",".join(response['errors'].values())
else:
print >> sys.stderr, "could not retrieve ccedk ids, server is unreachable", e
failed = True
time.sleep(1)
def __repr__(self):
return "ccedk"
def nonce(self, factor=1.0):
n = int(time.time() + self._shift)
if n == self._nonce:
n = self._nonce + 1
self._nonce = n
return n
def adjust(self, error):
if "incorrect range" in error: # (TODO: regex)
if ':' in error:
error = error.split(':')[1].strip()
try:
minimum = int(error.strip().split()[-3].replace('`', ''))
maximum = int(error.strip().split()[-1].replace('`', ''))
current = int(error.strip().split()[-7].split('`')[3])
except:
self._shift += random.randrange(-10, 10)
else:
if current < maximum:
new_shift = (minimum + 2 * maximum) / 3 - current
if new_shift < 0:
new_shift = 10
else:
new_shift = (2 * minimum + maximum) / 3 - current
if new_shift != 0:
self._shift += new_shift
else:
self._shift += random.randrange(-10, 10)
else:
self._shift += random.randrange(-10, 10)
def post(self, method, params, key, secret):
request = {'nonce': self.nonce()} # TODO: check for unique nonce
request.update(params)
data = urllib.urlencode(request)
sign = hmac.new(secret, data, hashlib.sha512).hexdigest()
headers = {"Content-type": "application/x-www-form-urlencoded", "Key": key, "Sign": sign}
url = 'https://www.ccedk.com/api/v1/' + method
response = json.loads(urllib2.urlopen(urllib2.Request(url, data, headers), timeout=15).read())
if response['errors'] is True:
response['error'] = ",".join(response['errors'].values())
return response
def cancel_orders(self, unit, side, key, secret):
response = self.post('order/list', {}, key, secret)
if not response['response'] or not response['response']['entities']:
return response
for order in response['response']['entities']:
if side == 'all' \
or (side == 'bid' and order['type'] == 'buy') \
or (side == 'ask' and order['type'] == 'sell'):
if order['pair_id'] == self.pair_id[unit.lower()]:
ret = self.post('order/cancel', {'order_id': order['order_id']}, key, secret)
if ret['errors'] is True:
if 'error' not in response:
response['error'] = ""
response['error'] += ",".join(ret['errors'].values())
return response
def place_order(self, unit, side, key, secret, amount, price):
params = {"type": 'buy' if side == 'bid' else 'sell',
"price": price,
"pair_id": int(self.pair_id[unit.lower()]),
"amount": amount}
response = self.post('order/new', params, key, secret)
if response['errors'] is True:
response['error'] = ",".join(response['errors'].values())
else:
response['id'] = int(response['response']['entity']['order_id'])
return response
def get_balance(self, unit, key, secret):
params = {"currency_id": self.currency_id[unit.lower()]}
response = self.post('balance/info', params, key, secret)
if response['errors'] is True:
response['error'] = ",".join(response['errors'].values())
else:
response['balance'] = float(response['response']['entity']['balance'])
return response
def get_price(self, unit):
url = 'https://www.ccedk.com/api/v1/orderbook/info?' + urllib.urlencode({'pair_id': self.pair_id[unit.lower()]})
response = json.loads(urllib2.urlopen(urllib2.Request(url), timeout=5).read())
if response['errors'] is True:
response['error'] = ",".join(response['errors'].values())
return response
response.update({'bid': None, 'ask': None})
if response['response']['entities']['bids']:
response['bid'] = float(response['response']['entities']['bids'][0]['price'])
if response['response']['entities']['asks']:
response['ask'] = float(response['response']['entities']['asks'][0]['price'])
return response
def create_request(self, unit, key=None, secret=None):
if not secret:
return None, None
request = {'nonce': self.nonce()}
data = urllib.urlencode(request)
sign = hmac.new(secret, data, hashlib.sha512).hexdigest()
return request, sign
def validate_request(self, key, unit, data, sign):
headers = {"Content-type": "application/x-www-form-urlencoded", "Key": key, "Sign": sign}
url = 'https://www.ccedk.com/api/v1/order/list'
response = json.loads(urllib2.urlopen(urllib2.Request(url, urllib.urlencode(data), headers), timeout=5).read())
if response['errors'] is True:
response['error'] = ",".join(response['errors'].values())
return response
if not response['response']['entities']:
response['response']['entities'] = []
validation = [{
'id': int(order['order_id']),
'price': float(order['price']),
'type': 'ask' if order['type'] == 'sell' else 'bid',
'amount': float(order['volume']),
} for order in response['response']['entities'] if order['pair_id'] == self.pair_id[unit.lower()]]
return validation
class BitcoinCoId(Exchange):
def __init__(self):
super(BitcoinCoId, self).__init__(0.0)
try:
ping = time.time()
response = json.loads(urllib2.urlopen(urllib2.Request('https://vip.bitcoin.co.id/api/summaries')).read())
self._shift = float(response['tickers']['btc_idr']['server_time']) - ping
except:
pass
def __repr__(self):
return "bitcoincoid"
def adjust(self, error):
if "Nonce must be greater than" in error: # (TODO: regex)
if ':' in error: error = error.split(':')[1].strip()
error = error.replace('.', '').split()
self._shift += 100.0 + (int(error[5]) - int(error[8])) / 1000.0
else:
self._shift = self._shift + 100.0
def nonce(self, factor=1000.0):
n = int((time.time() + self._shift) * float(factor))
if n - self._nonce < 300:
n = self._nonce + 300
self._nonce = n
return n
def post(self, method, params, key, secret):
request = {'nonce': self.nonce(), 'method': method}
request.update(params)
data = urllib.urlencode(request)
sign = hmac.new(secret, data, hashlib.sha512).hexdigest()
headers = {'Sign': sign, 'Key': key}
response = json.loads(urllib2.urlopen(urllib2.Request('https://vip.bitcoin.co.id/tapi', data, headers)).read())
return response
def cancel_orders(self, unit, side, key, secret):
response = self.post('openOrders', {'pair': 'nbt_' + unit.lower()}, key, secret)
if response['success'] == 0 or not response['return']['orders']: return response
for order in response['return']['orders']:
if side == 'all' or (side == 'bid' and order['type'] == 'buy') or (
side == 'ask' and order['type'] == 'sell'):
params = {'pair': 'nbt_' + unit.lower(), 'order_id': order['order_id'], 'type': order['type']}
ret = self.post('cancelOrder', params, key, secret)
if 'error' in ret:
if not 'error' in response: response['error'] = ""
response['error'] += "," + ret['error']
return response
def place_order(self, unit, side, key, secret, amount, price):
params = {'pair': 'nbt_' + unit.lower(), 'type': 'buy' if side == 'bid' else 'sell', 'price': price}
if side == 'bid':
params[unit.lower()] = amount * price
else:
params['nbt'] = amount
params[unit] = amount * price
response = self.post('trade', params, key, secret)
if response['success'] == 1:
response['id'] = int(response['return']['order_id'])
return response
def get_balance(self, unit, key, secret):
response = self.post('getInfo', {}, key, secret)
if response['success'] == 1:
response['balance'] = float(response['return']['balance'][unit.lower()])
return response
def get_price(self, unit):
response = json.loads(
urllib2.urlopen(urllib2.Request('https://vip.bitcoin.co.id/api/nbt_%s/depth' % unit.lower()),
timeout=5).read())
if 'error' in response:
return response
response.update({'bid': None, 'ask': None})
if response['buy']: response['bid'] = float(response['buy'][0][0])
if response['sell']: response['ask'] = float(response['sell'][0][0])
return response
def create_request(self, unit, key=None, secret=None):
if not secret: return None, None
request = {'nonce': self.nonce(), 'pair': 'nbt_' + unit.lower(), 'method': 'openOrders'}
data = urllib.urlencode(request)
sign = hmac.new(secret, data, hashlib.sha512).hexdigest()
return request, sign
def validate_request(self, key, unit, data, sign):
headers = {"Key": key, "Sign": sign}
response = json.loads(
urllib2.urlopen(urllib2.Request('https://vip.bitcoin.co.id/tapi', urllib.urlencode(data), headers),
timeout=5).read())
if response['success'] == 0:
return response
if not response['return']['orders']:
response['return']['orders'] = []
return [{
'id': int(order['order_id']),
'price': float(order['price']),
'type': 'ask' if order['type'] == 'sell' else 'bid',
'amount': float(order['remain_' + (unit.lower() if order['type'] == 'buy' else 'nbt')]) / (
float(order['price']) if order['type'] == 'buy' else 1.0),
} for order in response['return']['orders']]
class BTER(Exchange):
def __init__(self):
super(BTER, self).__init__(0.002)
def __repr__(self):
return "bter"
def adjust(self, error):
pass
def https_request(self, method, params, headers=None, timeout=None):
if not headers: headers = {}
connection = httplib.HTTPSConnection('data.bter.com', timeout=timeout)
connection.request('POST', '/api/1/private/' + method, params, headers)
response = connection.getresponse().read()
return json.loads(response)
def post(self, method, params, key, secret):
data = urllib.urlencode(params)
sign = hmac.new(secret, data, hashlib.sha512).hexdigest()
headers = {'Sign': sign, 'Key': key, "Content-type": "application/x-www-form-urlencoded"}
return self.https_request(method, data, headers)
def cancel_orders(self, unit, side, key, secret):
response = self.post('orderlist', {}, key, secret)
if not response['result']:
response['error'] = response['msg']
return response
if not response['orders']: response['orders'] = []
for order in response['orders']:
if side == 'all' or (side == 'ask' and order['sell_type'] != unit) or (
side == 'bid' and order['buy_type'] != unit):
if order['pair'] == 'nbt_' + unit.lower():
params = {'order_id': order['oid']}
ret = self.post('cancelorder', params, key, secret)
if not ret['result']:
if not 'error' in response: response['error'] = ""
response['error'] += "," + ret['msg']
return response
def place_order(self, unit, side, key, secret, amount, price):
params = {'pair': 'nbt_' + unit.lower(), 'type': 'buy' if side == 'bid' else 'sell', 'rate': price,
'amount': amount}
response = self.post('placeorder', params, key, secret)
if response['result']:
response['id'] = int(response['order_id'])
else:
response['error'] = response['msg']
return response
def get_balance(self, unit, key, secret):
response = self.post('getfunds', {}, key, secret)
if response['result']:
if unit.upper() in response['available_funds']:
response['balance'] = float(response['available_funds'][unit.upper()])
else:
response['balance'] = 0.0
else:
response['error'] = response['msg']
return response
def get_price(self, unit):
connection = httplib.HTTPSConnection('data.bter.com', timeout=5)
connection.request('GET', '/api/1/depth/nbt_' + unit.lower())
response = json.loads(connection.getresponse().read())
if not 'result' in response or not response['result']:
response['error'] = response['msg'] if 'msg' in response else 'invalid response: %s' % str(response)
return response
response.update({'bid': None, 'ask': None})
if response['bids']: response['bid'] = float(response['bids'][0][0])
if response['asks']: response['ask'] = float(response['asks'][-1][0])
return response
def create_request(self, unit, key=None, secret=None):
if not secret: return None, None
request = {} # no nonce required
data = urllib.urlencode(request)
sign = hmac.new(secret, data, hashlib.sha512).hexdigest()
return request, sign
def validate_request(self, key, unit, data, sign):
headers = {'Sign': sign, 'Key': key, "Content-type": "application/x-www-form-urlencoded"}
response = self.https_request('orderlist', urllib.urlencode(data), headers, timeout=15)
if not 'result' in response or not response['result']:
response['error'] = response['msg'] if 'msg' in response else 'invalid response: %s' % str(response)
return response
if not response['orders']:
response['orders'] = []
return [{
'id': int(order['oid']),
'price': float(order['rate']),
'type': 'ask' if order['buy_type'].lower() == unit.lower() else 'bid',
'amount': float(order['amount']) / (
1.0 if order['buy_type'].lower() == unit.lower() else float(order['rate'])),
} for order in response['orders'] if order['pair'] == 'nbt_' + unit.lower()]
class Peatio(Exchange):
def __init__(self):
super(Peatio, self).__init__(0.002)
def __repr__(self):
return "testing"
def adjust(self, error):
if "is invalid, current timestamp is" in error:
try:
tonce = int(error.split()[2])
times = int(error.split()[-1].replace('.', ''))
self._shift = int(float(times - tonce) / 1000.0)
except:
print error
pass
else:
print error
def urlencode(self, params): # from https://github.com/JohnnyZhao/peatio-client-python/blob/master/lib/auth.py#L11
keys = sorted(params.keys())
query = ''
for key in keys:
value = params[key]
if key != "orders":
query = "%s&%s=%s" % (query, key, value) if len(query) else "%s=%s" % (key, value)
else:
d = {key: params[key]}
for v in value:
ks = v.keys()
ks.sort()
for k in ks:
item = "orders[][%s]=%s" % (k, v[k])
query = "%s&%s" % (query, item) if len(query) else "%s" % item
return query
def query(self, qtype, method, params, key, secret):
request = {'tonce': self.nonce(), 'access_key': key}
request.update(params)
data = self.urlencode(request)
msg = "%s|/api/v2/%s|%s" % (qtype, method, data)
data += "&signature=" + hmac.new(secret, msg, hashlib.sha256).hexdigest()
connection = httplib.HTTPSConnection('178.62.140.24', timeout=5)
connection.request(qtype, '/api/v2/' + method + '?' + data)
return json.loads(connection.getresponse().read())
def post(self, method, params, key, secret):
return self.query('POST', method, params, key, secret)
def get(self, method, params, key, secret):
return self.query('GET', method, params, key, secret)
def cancel_orders(self, unit, side, key, secret):
response = self.get('orders.json', {'market': "nbt%s" % unit.lower()}, key, secret)
if 'error' in response:
response['error'] = response['error']['message']
return response
for order in response:
if side == 'all' or (side == 'bid' and order['side'] == 'buy') or (
side == 'ask' and order['side'] == 'sell'):
ret = self.post('order/delete.json', {'id': order['id']}, key, secret)
if 'error' in ret:
if isinstance(response, list): response = {'error': ""}
response['error'] += "," + ret['error']['message']
return response
def place_order(self, unit, side, key, secret, amount, price):
params = {'market': "nbt%s" % unit.lower(), "side": 'buy' if side == 'bid' else 'sell', "volume": amount,
"price": price}
response = self.post('orders', params, key, secret)
if 'error' in response:
response['error'] = response['error']['message']
else:
response['id'] = int(response['id'])
return response
def get_balance(self, unit, key, secret):
response = self.get('members/me.json', {}, key, secret)
if 'error' in response:
response['error'] = response['error']['message']
else:
response['balance'] = 0.0
for pair in response['accounts']:
if pair['currency'] == unit.lower():
response['balance'] = float(pair['balance'])
return response
def get_price(self, unit):
connection = httplib.HTTPSConnection('178.62.140.24', timeout=15)
connection.request('GET',
'/api/v2/depth.json?' + self.urlencode({'market': "nbt%s" % unit.lower(), 'limit': 1}))
response = json.loads(connection.getresponse().read())
if 'error' in response:
response['error'] = response['error']['message']
return response
response.update({'bid': None, 'ask': None})
if response['bids']: response['bid'] = float(response['bids'][0][0])
if response['asks']: response['ask'] = float(response['asks'][-1][0])
return response
def create_request(self, unit, key=None, secret=None):
if not secret: return None, None
request = {'tonce': self.nonce(), 'access_key': key, 'market': "nbt%s" % unit.lower()}
data = self.urlencode(request)
msg = "GET|/api/v2/orders.json|%s" % data
request['signature'] = hmac.new(secret, msg, hashlib.sha256).hexdigest()
return request, ''
def validate_request(self, key, unit, data, sign):
if not 'market' in data or data['market'] != "nbt%s" % unit.lower():
return {'error': 'invalid market'}
connection = httplib.HTTPSConnection('178.62.140.24', timeout=15)
connection.request('GET', '/api/v2/orders.json?' + self.urlencode(data))
response = json.loads(connection.getresponse().read())
if 'error' in response:
response['error'] = response['error']['message']
return response
return [{
'id': int(order['id']),
'price': float(order['price']),
'type': 'ask' if order['side'] == 'sell' else 'bid',
'amount': float(order['remaining_volume']),
} for order in response]
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PrivateEndpointConnectionsOperations:
"""PrivateEndpointConnectionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.media.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def list(
self,
resource_group_name: str,
account_name: str,
**kwargs: Any
) -> "_models.PrivateEndpointConnectionListResult":
"""Get all private endpoint connections.
Get all private endpoint connections.
:param resource_group_name: The name of the resource group within the Azure subscription.
:type resource_group_name: str
:param account_name: The Media Services account name.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnectionListResult, or the result of cls(response)
:rtype: ~azure.mgmt.media.models.PrivateEndpointConnectionListResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnectionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateEndpointConnectionListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaservices/{accountName}/privateEndpointConnections'} # type: ignore
async def get(
self,
resource_group_name: str,
account_name: str,
name: str,
**kwargs: Any
) -> "_models.PrivateEndpointConnection":
"""Get private endpoint connection.
Get private endpoint connection.
:param resource_group_name: The name of the resource group within the Azure subscription.
:type resource_group_name: str
:param account_name: The Media Services account name.
:type account_name: str
:param name:
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection, or the result of cls(response)
:rtype: ~azure.mgmt.media.models.PrivateEndpointConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaservices/{accountName}/privateEndpointConnections/{name}'} # type: ignore
async def create_or_update(
self,
resource_group_name: str,
account_name: str,
name: str,
parameters: "_models.PrivateEndpointConnection",
**kwargs: Any
) -> "_models.PrivateEndpointConnection":
"""Update private endpoint connection.
Update private endpoint connection.
:param resource_group_name: The name of the resource group within the Azure subscription.
:type resource_group_name: str
:param account_name: The Media Services account name.
:type account_name: str
:param name:
:type name: str
:param parameters: The request parameters.
:type parameters: ~azure.mgmt.media.models.PrivateEndpointConnection
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection, or the result of cls(response)
:rtype: ~azure.mgmt.media.models.PrivateEndpointConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'PrivateEndpointConnection')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaservices/{accountName}/privateEndpointConnections/{name}'} # type: ignore
async def delete(
self,
resource_group_name: str,
account_name: str,
name: str,
**kwargs: Any
) -> None:
"""Delete private endpoint connection.
Delete private endpoint connection.
:param resource_group_name: The name of the resource group within the Azure subscription.
:type resource_group_name: str
:param account_name: The Media Services account name.
:type account_name: str
:param name:
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaservices/{accountName}/privateEndpointConnections/{name}'} # type: ignore
|
|
from __future__ import with_statement
import collections
import os
import signal
import sys
from six.moves import _thread
import time
import argparse
import six
import plop.platform
class Collector(object):
MODES = {
'prof': (plop.platform.ITIMER_PROF, signal.SIGPROF),
'virtual': (plop.platform.ITIMER_VIRTUAL, signal.SIGVTALRM),
'real': (plop.platform.ITIMER_REAL, signal.SIGALRM),
}
def __init__(self, interval=0.01, mode='virtual'):
self.interval = interval
self.mode = mode
assert mode in Collector.MODES
timer, sig = Collector.MODES[self.mode]
signal.signal(sig, self.handler)
signal.siginterrupt(sig, False)
self.reset()
def reset(self):
self.stacks = list()
self.samples_remaining = 0
self.stopping = False
self.stopped = False
self.samples_taken = 0
self.sample_time = 0
def start(self, duration=30.0):
self.stopping = False
self.stopped = False
self.samples_remaining = int(duration / self.interval)
timer, sig = Collector.MODES[self.mode]
plop.platform.setitimer(timer, self.interval, self.interval)
def stop(self):
self.stopping = True
self.wait()
def wait(self):
while not self.stopped:
pass # need busy wait; ITIMER_PROF doesn't proceed while sleeping
def handler(self, sig, current_frame):
start = time.time()
self.samples_remaining -= 1
if self.samples_remaining <= 0 or self.stopping:
plop.platform.setitimer(Collector.MODES[self.mode][0], 0, 0)
self.stopped = True
return
current_tid = _thread.get_ident()
for tid, frame in six.iteritems(sys._current_frames()):
if tid == current_tid:
frame = current_frame
frames = []
while frame is not None:
code = frame.f_code
frames.append((code.co_filename, code.co_firstlineno, code.co_name))
frame = frame.f_back
self.stacks.append(frames)
end = time.time()
self.samples_taken += 1
self.sample_time += (end - start)
class CollectorFormatter(object):
"""
Abstract class for output formats
"""
def format(self, collector):
raise Exception("not implemented")
def store(self, collector, filename):
with open(filename, "wb") as f:
f.write(self.format(collector))
class PlopFormatter(CollectorFormatter):
"""
Formats stack frames for plop.viewer
"""
def __init__(self, max_stacks=50):
self.max_stacks = 50
def format(self, collector):
# defaultdict instead of counter for pre-2.7 compatibility
stack_counts = collections.defaultdict(int)
for frames in collector.stacks:
stack_counts[tuple(frames)] += 1
stack_counts = dict(sorted(six.iteritems(stack_counts),
key=lambda kv: -kv[1])[:self.max_stacks])
return repr(stack_counts)
class FlamegraphFormatter(CollectorFormatter):
"""
Creates Flamegraph files
"""
def format(self, collector):
output = ""
previous = None
previous_count = 1
for stack in collector.stacks:
current = self.format_flame(stack)
if current == previous:
previous_count += 1
else:
output += "%s %d\n" % (previous, previous_count)
previous_count = 1
previous = current
output += "%s %d\n" % (previous, previous_count)
return output
def format_flame(self, stack):
funcs = map("{0[2]} ({0[0]}:{0[1]})".format, reversed(stack))
return ";".join(funcs)
def main():
# TODO: more options, refactor this into somewhere shared
# between tornado.autoreload and auto2to3
parser = argparse.ArgumentParser(description="Plop: Python Low-Overhead Profiler",
prog="python -m plop.collector",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--format", "-f", help="Output format",
choices=["plop", "flamegraph"], default="plop")
parser.add_argument("--module", "-m", help="Execute target as a module",
action="store_const", const=True, default=False)
parser.add_argument("--mode", help="Interval timer mode to use, see `man 2 setitimer`",
choices=["prof", "real", "virtual"], default="prof")
parser.add_argument("--interval", help="Timer interval in seconds", default=0.01, type=float)
parser.add_argument("--duration", help="Profiling duration in seconds", default=3600,
type=int)
parser.add_argument("--max-stacks", help=("Number of most frequent stacks to store."
" Ignored for Flamegraph output."), type=int, default=50)
parser.add_argument("target", help="Module or script to run")
parser.add_argument("arguments", nargs=argparse.REMAINDER,
help="Pass-through arguments for the profiled application")
args = parser.parse_args()
sys.argv = [args.target] + args.arguments
if args.format == "flamegraph":
extension = "flame"
formatter = FlamegraphFormatter()
elif args.format == "plop":
extension = "plop"
formatter = PlopFormatter(max_stacks=args.max_stacks)
else:
sys.stderr.write("Unhandled output format: %s" % args.format)
sys.stderr.flush()
sys.exit(1)
if not os.path.exists('profiles'):
os.mkdir('profiles')
filename = 'profiles/%s-%s.%s' % (args.target, time.strftime('%Y%m%d-%H%M-%S'),
extension)
collector = Collector(mode=args.mode, interval=args.interval)
collector.start(duration=args.duration)
exit_code = 0
try:
if args.module:
import runpy
runpy.run_module(args.target, run_name="__main__", alter_sys=True)
else:
with open(args.target) as f:
# Execute the script in our namespace instead of creating
# a new one so that something that tries to import __main__
# (e.g. the unittest module) will see names defined in the
# script instead of just those defined in this module.
global __file__
__file__ = args.target
# If __package__ is defined, imports may be incorrectly
# interpreted as relative to this module.
global __package__
del __package__
six.exec_(f.read(), globals(), globals())
except SystemExit as e:
exit_code = e.code
collector.stop()
if collector.samples_taken:
formatter.store(collector, filename)
print("profile output saved to %s" % filename)
overhead = float(collector.sample_time) / collector.samples_taken
print("overhead was %s per sample (%s%%)" % (
overhead, overhead / collector.interval))
else:
print("no samples collected; program was too fast")
sys.exit(exit_code)
if __name__ == '__main__':
main()
|
|
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
# Add python-bitcoinrpc to module search path:
import os
import sys
from decimal import Decimal, ROUND_DOWN
import json
import random
import shutil
import subprocess
import time
import re
from authproxy import AuthServiceProxy, JSONRPCException
from util import *
def p2p_port(n):
return 11000 + n + os.getpid()%999
def rpc_port(n):
return 12000 + n + os.getpid()%999
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def sync_blocks(rpc_connections, wait=1):
"""
Wait until everybody has the same block count
"""
while True:
counts = [ x.getblockcount() for x in rpc_connections ]
if counts == [ counts[0] ]*len(counts):
break
time.sleep(wait)
def sync_mempools(rpc_connections, wait=1):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while True:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
break
time.sleep(wait)
bitcoind_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "lemoncoin.conf"), 'w') as f:
f.write("regtest=1\n");
f.write("rpcuser=rt\n");
f.write("rpcpassword=rt\n");
f.write("port="+str(p2p_port(n))+"\n");
f.write("rpcport="+str(rpc_port(n))+"\n");
return datadir
def initialize_chain(test_dir):
"""
Create (or copy from cache) a 200-block-long chain and
4 wallets.
bitcoind and bitcoin-cli must be in search path.
"""
if not os.path.isdir(os.path.join("cache", "node0")):
devnull = open("/dev/null", "w+")
# Create cache directories, run bitcoinds:
for i in range(4):
datadir=initialize_datadir("cache", i)
args = [ os.getenv("LEMONCOIND", "lemoncoind"), "-keypool=1", "-datadir="+datadir, "-discover=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
bitcoind_processes[i] = subprocess.Popen(args)
if os.getenv("PYTHON_DEBUG", ""):
print "initialize_chain: lemoncoind started, calling lemoncoin-cli -rpcwait getblockcount"
subprocess.check_call([ os.getenv("LEMONCOINCLI", "lemoncoin-cli"), "-datadir="+datadir,
"-rpcwait", "getblockcount"], stdout=devnull)
if os.getenv("PYTHON_DEBUG", ""):
print "initialize_chain: lemoncoin-cli -rpcwait getblockcount completed"
devnull.close()
rpcs = []
for i in range(4):
try:
url = "http://rt:rt@127.0.0.1:%d"%(rpc_port(i),)
rpcs.append(AuthServiceProxy(url))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 nodes
# gets 25 mature blocks and 25 immature.
# blocks are created with timestamps 10 minutes apart, starting
# at 1 Jan 2014
block_time = 1388534400
for i in range(2):
for peer in range(4):
for j in range(15):
set_node_times(rpcs, block_time)
rpcs[peer].generate(1)
block_time += 10*60
# Must sync before next peer starts generating blocks
sync_blocks(rpcs)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)
wait_bitcoinds()
for i in range(4):
os.remove(log_filename("cache", i, "debug.log"))
os.remove(log_filename("cache", i, "db.log"))
os.remove(log_filename("cache", i, "peers.dat"))
os.remove(log_filename("cache", i, "fee_estimates.dat"))
for i in range(4):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in bitcoin.conf
def initialize_chain_clean(test_dir, num_nodes):
"""
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization.
"""
for i in range(num_nodes):
datadir=initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None):
"""
Start a bitcoind and return RPC connection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
if binary is None:
binary = os.getenv("LEMONCOIND", "lemoncoind")
args = [ binary, "-datadir="+datadir, "-keypool=1", "-discover=0", "-rest" ]
if extra_args is not None: args.extend(extra_args)
bitcoind_processes[i] = subprocess.Popen(args)
devnull = open("/dev/null", "w+")
if os.getenv("PYTHON_DEBUG", ""):
print "start_node: lemoncoind started, calling lemoncoin-cli -rpcwait getblockcount"
subprocess.check_call([ os.getenv("LEMONCOINCLI", "lemoncoin-cli"), "-datadir="+datadir] +
_rpchost_to_args(rpchost) +
["-rpcwait", "getblockcount"], stdout=devnull)
if os.getenv("PYTHON_DEBUG", ""):
print "start_node: calling lemoncoin-cli -rpcwait getblockcount returned"
devnull.close()
url = "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i))
if timewait is not None:
proxy = AuthServiceProxy(url, timeout=timewait)
else:
proxy = AuthServiceProxy(url)
proxy.url = url # store URL on proxy for info
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None, binary=None):
"""
Start multiple bitcoinds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for i in range(num_nodes) ]
if binary is None: binary = [ None for i in range(num_nodes) ]
return [ start_node(i, dirname, extra_args[i], rpchost, binary=binary[i]) for i in range(num_nodes) ]
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def stop_node(node, i):
node.stop()
bitcoind_processes[i].wait()
del bitcoind_processes[i]
def stop_nodes(nodes):
for node in nodes:
node.stop()
del nodes[:] # Emptying array closes connections as a side effect
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def wait_bitcoinds():
# Wait for all bitcoinds to cleanly exit
for bitcoind in bitcoind_processes.values():
bitcoind.wait()
bitcoind_processes.clear()
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using its output
"""
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc:
pass
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
|
|
"""Implementation of JSONDecoder
"""
import re
import sys
import struct
from lib.simplejson.scanner import make_scanner
try:
from lib.simplejson._speedups import scanstring as c_scanstring
except ImportError:
c_scanstring = None
__all__ = ['JSONDecoder']
FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
def _floatconstants():
_BYTES = '7FF80000000000007FF0000000000000'.decode('hex')
if sys.byteorder != 'big':
_BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1]
nan, inf = struct.unpack('dd', _BYTES)
return nan, inf, -inf
NaN, PosInf, NegInf = _floatconstants()
def linecol(doc, pos):
lineno = doc.count('\n', 0, pos) + 1
if lineno == 1:
colno = pos
else:
colno = pos - doc.rindex('\n', 0, pos)
return lineno, colno
def errmsg(msg, doc, pos, end=None):
# Note that this function is called from _speedups
lineno, colno = linecol(doc, pos)
if end is None:
#fmt = '{0}: line {1} column {2} (char {3})'
#return fmt.format(msg, lineno, colno, pos)
fmt = '%s: line %d column %d (char %d)'
return fmt % (msg, lineno, colno, pos)
endlineno, endcolno = linecol(doc, end)
#fmt = '{0}: line {1} column {2} - line {3} column {4} (char {5} - {6})'
#return fmt.format(msg, lineno, colno, endlineno, endcolno, pos, end)
fmt = '%s: line %d column %d - line %d column %d (char %d - %d)'
return fmt % (msg, lineno, colno, endlineno, endcolno, pos, end)
_CONSTANTS = {
'-Infinity': NegInf,
'Infinity': PosInf,
'NaN': NaN,
}
STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS)
BACKSLASH = {
'"': u'"', '\\': u'\\', '/': u'/',
'b': u'\b', 'f': u'\f', 'n': u'\n', 'r': u'\r', 't': u'\t',
}
DEFAULT_ENCODING = "utf-8"
def py_scanstring(s, end, encoding=None, strict=True, _b=BACKSLASH, _m=STRINGCHUNK.match):
"""Scan the string s for a JSON string. End is the index of the
character in s after the quote that started the JSON string.
Unescapes all valid JSON string escape sequences and raises ValueError
on attempt to decode an invalid string. If strict is False then literal
control characters are allowed in the string.
Returns a tuple of the decoded string and the index of the character in s
after the end quote."""
if encoding is None:
encoding = DEFAULT_ENCODING
chunks = []
_append = chunks.append
begin = end - 1
while 1:
chunk = _m(s, end)
if chunk is None:
raise ValueError(
errmsg("Unterminated string starting at", s, begin))
end = chunk.end()
content, terminator = chunk.groups()
# Content is contains zero or more unescaped string characters
if content:
if not isinstance(content, unicode):
content = unicode(content, encoding)
_append(content)
# Terminator is the end of string, a literal control character,
# or a backslash denoting that an escape sequence follows
if terminator == '"':
break
elif terminator != '\\':
if strict:
msg = "Invalid control character %r at" % (terminator,)
#msg = "Invalid control character {0!r} at".format(terminator)
raise ValueError(errmsg(msg, s, end))
else:
_append(terminator)
continue
try:
esc = s[end]
except IndexError:
raise ValueError(
errmsg("Unterminated string starting at", s, begin))
# If not a unicode escape sequence, must be in the lookup table
if esc != 'u':
try:
char = _b[esc]
except KeyError:
msg = "Invalid \\escape: " + repr(esc)
raise ValueError(errmsg(msg, s, end))
end += 1
else:
# Unicode escape sequence
esc = s[end + 1:end + 5]
next_end = end + 5
if len(esc) != 4:
msg = "Invalid \\uXXXX escape"
raise ValueError(errmsg(msg, s, end))
uni = int(esc, 16)
# Check for surrogate pair on UCS-4 systems
if 0xd800 <= uni <= 0xdbff and sys.maxunicode > 65535:
msg = "Invalid \\uXXXX\\uXXXX surrogate pair"
if not s[end + 5:end + 7] == '\\u':
raise ValueError(errmsg(msg, s, end))
esc2 = s[end + 7:end + 11]
if len(esc2) != 4:
raise ValueError(errmsg(msg, s, end))
uni2 = int(esc2, 16)
uni = 0x10000 + (((uni - 0xd800) << 10) | (uni2 - 0xdc00))
next_end += 6
char = unichr(uni)
end = next_end
# Append the unescaped character
_append(char)
return u''.join(chunks), end
# Use speedup if available
scanstring = c_scanstring or py_scanstring
WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS)
WHITESPACE_STR = ' \t\n\r'
def JSONObject((s, end), encoding, strict, scan_once, object_hook, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
pairs = {}
# Use a slice to prevent IndexError from being raised, the following
# check will raise a more specific ValueError if the string is empty
nextchar = s[end:end + 1]
# Normally we expect nextchar == '"'
if nextchar != '"':
if nextchar in _ws:
end = _w(s, end).end()
nextchar = s[end:end + 1]
# Trivial empty object
if nextchar == '}':
return pairs, end + 1
elif nextchar != '"':
raise ValueError(errmsg("Expecting property name", s, end))
end += 1
while True:
key, end = scanstring(s, end, encoding, strict)
# To skip some function call overhead we optimize the fast paths where
# the JSON key separator is ": " or just ":".
if s[end:end + 1] != ':':
end = _w(s, end).end()
if s[end:end + 1] != ':':
raise ValueError(errmsg("Expecting : delimiter", s, end))
end += 1
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
try:
value, end = scan_once(s, end)
except StopIteration:
raise ValueError(errmsg("Expecting object", s, end))
pairs[key] = value
try:
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar == '}':
break
elif nextchar != ',':
raise ValueError(errmsg("Expecting , delimiter", s, end - 1))
try:
nextchar = s[end]
if nextchar in _ws:
end += 1
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar != '"':
raise ValueError(errmsg("Expecting property name", s, end - 1))
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end
def JSONArray((s, end), scan_once, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
values = []
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
# Look-ahead for trivial empty array
if nextchar == ']':
return values, end + 1
_append = values.append
while True:
try:
value, end = scan_once(s, end)
except StopIteration:
raise ValueError(errmsg("Expecting object", s, end))
_append(value)
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
end += 1
if nextchar == ']':
break
elif nextchar != ',':
raise ValueError(errmsg("Expecting , delimiter", s, end))
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
return values, end
class JSONDecoder(object):
"""Simple JSON <http://json.org> decoder
Performs the following translations in decoding by default:
+---------------+-------------------+
| JSON | Python |
+===============+===================+
| object | dict |
+---------------+-------------------+
| array | list |
+---------------+-------------------+
| string | unicode |
+---------------+-------------------+
| number (int) | int, long |
+---------------+-------------------+
| number (real) | float |
+---------------+-------------------+
| true | True |
+---------------+-------------------+
| false | False |
+---------------+-------------------+
| null | None |
+---------------+-------------------+
It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as
their corresponding ``float`` values, which is outside the JSON spec.
"""
def __init__(self, encoding=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, strict=True):
"""``encoding`` determines the encoding used to interpret any ``str``
objects decoded by this instance (utf-8 by default). It has no
effect when decoding ``unicode`` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as ``unicode``.
``object_hook``, if specified, will be called with the result
of every JSON object decoded and its return value will be used in
place of the given ``dict``. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
``parse_float``, if specified, will be called with the string
of every JSON float to be decoded. By default this is equivalent to
float(num_str). This can be used to use another datatype or parser
for JSON floats (e.g. decimal.Decimal).
``parse_int``, if specified, will be called with the string
of every JSON int to be decoded. By default this is equivalent to
int(num_str). This can be used to use another datatype or parser
for JSON integers (e.g. float).
``parse_constant``, if specified, will be called with one of the
following strings: -Infinity, Infinity, NaN.
This can be used to raise an exception if invalid JSON numbers
are encountered.
"""
self.encoding = encoding
self.object_hook = object_hook
self.parse_float = parse_float or float
self.parse_int = parse_int or int
self.parse_constant = parse_constant or _CONSTANTS.__getitem__
self.strict = strict
self.parse_object = JSONObject
self.parse_array = JSONArray
self.parse_string = scanstring
self.scan_once = make_scanner(self)
def decode(self, s, _w=WHITESPACE.match):
"""Return the Python representation of ``s`` (a ``str`` or ``unicode``
instance containing a JSON document)
"""
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
end = _w(s, end).end()
if end != len(s):
raise ValueError(errmsg("Extra data", s, end, len(s)))
return obj
def raw_decode(self, s, idx=0):
"""Decode a JSON document from ``s`` (a ``str`` or ``unicode`` beginning
with a JSON document) and return a 2-tuple of the Python
representation and the index in ``s`` where the document ended.
This can be used to decode a JSON document from a string that may
have extraneous data at the end.
"""
try:
obj, end = self.scan_once(s, idx)
except StopIteration:
raise ValueError("No JSON object could be decoded")
return obj, end
|
|
# Copyright 2015 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flexmock import flexmock, flexmock_teardown
from hamcrest import assert_that, equal_to, is_
from netaddr import IPNetwork
from netaddr.ip import IPAddress
from netman.core.objects.vrrp_group import VrrpGroup
from tests import ExactIpNetwork
from tests.api import matches_fixture
from tests.api.base_api_test import BaseApiTest
from netman.api.api_utils import RegexConverter
from netman.api.switch_api import SwitchApi
from netman.api.switch_session_api import SwitchSessionApi
from netman.core.objects.access_groups import IN, OUT
from netman.core.objects.exceptions import IPNotAvailable, UnknownIP, UnknownVlan, UnknownAccessGroup, UnknownInterface, \
UnknownSwitch, OperationNotCompleted, UnknownSession, SessionAlreadyExists
from netman.core.objects.interface import Interface
from netman.core.objects.port_modes import ACCESS, TRUNK, DYNAMIC, BOND_MEMBER
from netman.core.objects.vlan import Vlan
from netman.core.objects.bond import Bond
class SwitchApiTest(BaseApiTest):
def setUp(self):
super(SwitchApiTest, self).setUp()
self.app.url_map.converters['regex'] = RegexConverter
self.switch_factory = flexmock()
self.switch_mock = flexmock()
self.session_manager = flexmock()
self.session_manager.should_receive("get_switch_for_session").and_raise(UnknownSession("patate"))
SwitchApi(self.switch_factory, self.session_manager).hook_to(self.app)
SwitchSessionApi(self.switch_factory, self.session_manager).hook_to(self.app)
def tearDown(self):
flexmock_teardown()
def test_vlans_serialization(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('get_vlans').and_return([
Vlan(2, "", [IPNetwork('3.3.3.3/24'), IPNetwork('2.2.2.2/24')],
vrrp_groups=[
VrrpGroup(id=1, ips=[IPAddress('2.2.2.2')], priority=100),
VrrpGroup(id=2, ips=[IPAddress('3.3.3.1')], priority=100)
],
dhcp_relay_servers=[IPAddress("10.10.10.1")],
icmp_redirects=True),
Vlan(1, "One", [IPNetwork('1.1.1.1/24')], vrf_forwarding="MY_VRF", access_group_in="Blah_blah",
vrrp_groups=[
VrrpGroup(id=1, ips=[IPAddress('1.1.1.2')], priority=90, hello_interval=5, dead_interval=15,
track_id='101', track_decrement=50)
],
icmp_redirects=False),
]).once().ordered()
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.get("/switches/my.switch/vlans")
assert_that(code, equal_to(200))
assert_that(result, matches_fixture("get_switch_hostname_vlans.json"))
def test_single_vlan_serialization(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('get_vlan').with_args(1).and_return(
Vlan(1, "One", [IPNetwork('1.1.1.1/24')], vrf_forwarding="MY_VRF", access_group_in="Blah_blah",
vrrp_groups=[
VrrpGroup(id=1, ips=[IPAddress('1.1.1.2')], priority=90, hello_interval=5, dead_interval=15,
track_id='101', track_decrement=50)
],
icmp_redirects=False),
).once().ordered()
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.get("/switches/my.switch/vlans/1")
assert_that(code, equal_to(200))
assert_that(result, matches_fixture("get_switch_hostname_vlans_vlan.json"))
def test_get_vlans_can_be_empty(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('get_vlans').and_return([]).once().ordered()
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.get("/switches/my.switch/vlans")
assert_that(code, equal_to(200))
assert_that(result, equal_to([]))
def test_interfaces_serialization(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('get_interfaces').and_return([
Interface(name="FastEthernet0/3", shutdown=True, port_mode=ACCESS, access_vlan=1999),
Interface(name="GigabitEthernet0/6", shutdown=False, port_mode=DYNAMIC, access_vlan=1999, trunk_native_vlan=2999, trunk_vlans=[3001, 3000, 3002]),
Interface(name="ethernet 1/4", shutdown=False, port_mode=TRUNK, trunk_native_vlan=2999, trunk_vlans=[3001, 3000, 3002]),
Interface(name="GigabitEthernet0/8", shutdown=False, bond_master=12, port_mode=BOND_MEMBER, trunk_native_vlan=None, trunk_vlans=[]),
]).once().ordered()
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.get("/switches/my.switch/interfaces")
assert_that(code, equal_to(200))
assert_that(result, matches_fixture("get_switch_hostname_interfaces.json"))
def test_add_vlan(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('add_vlan').with_args(2000, "two_thousands").once().ordered()
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.post("/switches/my.switch/vlans", fixture="post_switch_hostname_vlans.json")
assert_that(code, equal_to(201))
def test_add_vlan_name_is_optionnal(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('add_vlan').with_args(2000, None).once().ordered()
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.post("/switches/my.switch/vlans", data={"number": 2000})
assert_that(code, equal_to(201))
def test_add_vlan_name_is_optionnal_and_can_be_specified_empty(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('add_vlan').with_args(2000, None).once().ordered()
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.post("/switches/my.switch/vlans", data={"number": "2000", "name": ""})
assert_that(code, equal_to(201))
def test_add_vlan_validates_the_name_if_present(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').never()
result, code = self.post("/switches/my.switch/vlans", data={"number": 2000, "name": "deux milles"})
assert_that(code, equal_to(400))
assert_that(result, equal_to({'error': 'Vlan name is invalid'}))
result, code = self.post("/switches/my.switch/vlans", data={"number": 4097})
assert_that(code, equal_to(400))
assert_that(result, equal_to({'error': 'Vlan number is invalid'}))
result, code = self.post("/switches/my.switch/vlans", data={"number": 0})
assert_that(code, equal_to(400))
assert_that(result, equal_to({'error': 'Vlan number is invalid'}))
result, code = self.post("/switches/my.switch/vlans", data={"number": "patate"})
assert_that(code, equal_to(400))
assert_that(result, equal_to({'error': 'Vlan number is invalid'}))
result, code = self.post("/switches/my.switch/vlans", data={})
assert_that(code, equal_to(400))
assert_that(result, equal_to({'error': 'Vlan number is invalid'}))
result, code = self.post("/switches/my.switch/vlans", raw_data="not even json")
assert_that(code, equal_to(400))
assert_that(result, equal_to({'error': 'Malformed content, should be a JSON object'}))
def test_add_vlan_nameless(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('add_vlan').with_args(2000, None).once().ordered()
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.post("/switches/my.switch/vlans", data={"number": 2000})
assert_that(code, equal_to(201))
def test_remove_vlan(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('remove_vlan').with_args(2000).once().ordered()
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.delete("/switches/my.switch/vlans/2000")
assert_that(code, equal_to(204))
def test_configure_switch_port_access(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('set_access_mode').with_args("FastEthernet0/4").once().ordered()
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.put("/switches/my.switch/interfaces/FastEthernet0/4/port-mode", raw_data="access")
assert_that(code, equal_to(204))
def test_configure_switch_port_trunk(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('set_trunk_mode').with_args("FastEthernet0/4").once().ordered()
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.put("/switches/my.switch/interfaces/FastEthernet0/4/port-mode", raw_data="trunk")
assert_that(code, equal_to(204))
def test_configure_switch_port_unknown(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('set_switchport_mode').never()
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.put("/switches/my.switch/interfaces/FastEthernet0/4/port-mode", raw_data="sirpatate")
assert_that(code, equal_to(400))
assert_that(result, equal_to({'error': 'Unknown port mode detected sirpatate'}))
def test_configure_switch_bond_port_access(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('set_bond_access_mode').with_args(123).once().ordered()
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.put("/switches/my.switch/bonds/123/port-mode", raw_data="access")
assert_that(code, equal_to(204))
def test_configure_switch_bond_port_trunk(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('set_bond_trunk_mode').with_args(123).once().ordered()
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.put("/switches/my.switch/bonds/123/port-mode", raw_data="trunk")
assert_that(code, equal_to(204))
def test_edit_bond_spanning_tree(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('edit_bond_spanning_tree').with_args(5, edge=True).once().ordered()
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.put("/switches/my.switch/bonds/5/spanning-tree",
fixture="put_switch_hostname_interfaces_intname_spanningtree.json")
assert_that(code, equal_to(204))
def test_edit_bond_spanning_tree_optional_params(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('edit_bond_spanning_tree').with_args(5).once().ordered()
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.put("/switches/my.switch/bonds/5/spanning-tree", raw_data="{}")
assert_that(code, equal_to(204))
def test_edit_bond_spanning_tree_with_wrong_params(self):
result, code = self.put("/switches/my.switch/bonds/5/spanning-tree",
raw_data="whizzle")
assert_that(code, equal_to(400))
assert_that(result['error'], is_('Malformed JSON request'))
result, code = self.put("/switches/my.switch/bonds/5/spanning-tree",
raw_data='{"unknown_key": "value"}')
assert_that(code, equal_to(400))
assert_that(result['error'], is_('Unknown key: unknown_key'))
def test_anonymous_switch(self):
self.switch_factory.should_receive('get_anonymous_switch').with_args(
hostname='my.switch',
model='cisco',
username='root',
password='password',
port=None,
netman_server=None).once().ordered().and_return(self.switch_mock)
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('get_vlans').once().ordered().and_return([Vlan(1, "One"), Vlan(2, "Two")])
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.get("/switches/my.switch/vlans", headers={'Netman-Model':'cisco', 'Netman-Username':'root',
'Netman-Password':'password'})
assert_that(code, equal_to(200))
def test_anonymous_switch_all_headers_set(self):
result, code = self.get("/switches/my.switch/vlans", headers={'Netman-Model':'cisco'})
assert_that(code, equal_to(400))
assert_that(result, equal_to({'error': 'For anonymous switch usage, please specify headers: Netman-Model, Netman-Username and Netman-Password.'}))
result, code = self.get("/switches/my.switch/vlans", headers={'Netman-Username':'root'})
assert_that(code, equal_to(400))
assert_that(result, equal_to({'error': 'For anonymous switch usage, please specify headers: Netman-Model, Netman-Username and Netman-Password.'}))
result, code = self.get("/switches/my.switch/vlans", headers={'Netman-Password':'password'})
assert_that(code, equal_to(400))
assert_that(result, equal_to({'error': 'For anonymous switch usage, please specify headers: Netman-Model, Netman-Username and Netman-Password.'}))
def test_anonymous_switch_can_have_a_port_specified(self):
self.switch_factory.should_receive('get_anonymous_switch').with_args(
hostname='my.switch',
model='cisco',
username='root',
password='password',
port=830,
netman_server=None).once().ordered().and_return(self.switch_mock)
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('get_vlans').and_return([Vlan(1, "One"), Vlan(2, "Two")]).once().ordered()
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.get("/switches/my.switch/vlans", headers={'Netman-Model':'cisco', 'Netman-Username':'root',
'Netman-Password':'password', 'Netman-Port':'830'})
assert_that(code, equal_to(200))
def test_anonymous_switch_port_has_to_be_integer(self):
result, code = self.get("/switches/my.switch/vlans", headers={'Netman-Model':'cisco', 'Netman-Username':'root',
'Netman-Password':'password', 'Netman-Port':'bleh'})
assert_that(code, equal_to(400))
def test_anonymous_switch_can_be_netman_proxied(self):
self.switch_factory.should_receive('get_anonymous_switch').with_args(
hostname='my.switch',
model='cisco',
username='root',
password='password',
port=None,
netman_server='1.2.3.4').once().ordered().and_return(self.switch_mock)
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('get_vlans').and_return([Vlan(1, "One"), Vlan(2, "Two")]).once().ordered()
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.get("/switches/my.switch/vlans", headers={
'Netman-Model':'cisco',
'Netman-Username':'root',
'Netman-Password':'password',
'Netman-Proxy-Server':'1.2.3.4'
})
assert_that(code, equal_to(200))
def test_anonymous_switch_can_be_multi_netman_proxied(self):
self.switch_factory.should_receive('get_anonymous_switch').with_args(
hostname='my.switch',
model='cisco',
username='root',
password='password',
port=None,
netman_server=['1.2.3.4', '5.6.7.8']).once().ordered().and_return(self.switch_mock)
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('get_vlans').and_return([Vlan(1, "One"), Vlan(2, "Two")]).once().ordered()
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.get("/switches/my.switch/vlans", headers={
'Netman-Model':'cisco',
'Netman-Username':'root',
'Netman-Password':'password',
'Netman-Proxy-Server':' 1.2.3.4 , 5.6.7.8 '
})
assert_that(code, equal_to(200))
def test_shutdown_interface(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('shutdown_interface').with_args('FastEthernet0/4').once().ordered()
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.put("/switches/my.switch/interfaces/FastEthernet0/4/shutdown", raw_data='true')
assert_that(code, equal_to(204))
def test_openup_interface(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('openup_interface').with_args('FastEthernet0/4').once().ordered()
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.put("/switches/my.switch/interfaces/FastEthernet0/4/shutdown", raw_data='false')
assert_that(code, equal_to(204))
def test_shutdown_interface_invalid_argument(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).never()
self.switch_mock.should_receive('connect').never()
self.switch_mock.should_receive('shutdown_interface').with_args('FastEthernet0/4').never()
self.switch_mock.should_receive('disconnect').never()
result, code = self.put("/switches/my.switch/interfaces/FastEthernet0/4/shutdown", raw_data='Patate')
assert_that(code, equal_to(400))
assert_that(result, equal_to({'error': 'Unreadable content "patate". Should be either "true" or "false"'}))
def test_shutdown_interface_no_argument(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).never()
self.switch_mock.should_receive('connect').never()
self.switch_mock.should_receive('shutdown_interface').with_args('FastEthernet0/4').never()
self.switch_mock.should_receive('disconnect').never()
result, code = self.put("/switches/my.switch/interfaces/FastEthernet0/4/shutdown")
assert_that(code, equal_to(400))
assert_that(result, equal_to({'error': 'Unreadable content "". Should be either "true" or "false"'}))
def test_enable_icmp_redirects(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('set_vlan_icmp_redirects_state').with_args(2500, True).once().ordered()
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.put("/switches/my.switch/vlans/2500/icmp-redirects", raw_data='true')
assert_that(code, equal_to(204))
def test_disable_icmp_redirects(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('set_vlan_icmp_redirects_state').with_args(2500, False).once().ordered()
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.put("/switches/my.switch/vlans/2500/icmp-redirects", raw_data='false')
assert_that(code, equal_to(204))
def test_icmp_redirects_invalid_argument(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).never()
self.switch_mock.should_receive('connect').never()
self.switch_mock.should_receive('set_vlan_icmp_redirects_state').never()
self.switch_mock.should_receive('disconnect').never()
result, code = self.put("/switches/my.switch/vlans/2500/icmp-redirects", raw_data='invalid')
assert_that(code, equal_to(400))
assert_that(result, equal_to({'error': 'Unreadable content "invalid". Should be either "true" or "false"'}))
def test_set_access_vlan(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('set_access_vlan').with_args('FastEthernet0/4', 1000).once().ordered()
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.put("/switches/my.switch/interfaces/FastEthernet0/4/access-vlan",
fixture="put_switch_hostname_interfaces_intname_accessvlan.txt")
assert_that(code, equal_to(204))
def test_remove_access_vlan(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('remove_access_vlan').with_args('FastEthernet0/4').once().ordered()
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.delete("/switches/my.switch/interfaces/FastEthernet0/4/access-vlan")
assert_that(code, equal_to(204))
def test_set_bond_access_vlan(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('set_bond_access_vlan').with_args(4, 1000).once().ordered()
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.put("/switches/my.switch/bonds/4/access-vlan",
fixture="put_switch_hostname_interfaces_intname_accessvlan.txt")
assert_that(code, equal_to(204))
def test_remove_bond_access_vlan(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('remove_bond_access_vlan').with_args(4).once().ordered()
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.delete("/switches/my.switch/bonds/4/access-vlan")
assert_that(code, equal_to(204))
def test_invalid_set_access_vlan(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).never()
self.switch_mock.should_receive('set_access_vlan').never()
self.switch_mock.should_receive('connect').never()
self.switch_mock.should_receive('disconnect').never()
result, code = self.put("/switches/my.switch/interfaces/FastEthernet0/4/access-vlan", raw_data='patate')
assert_that(code, equal_to(400))
assert_that(result, equal_to({'error': 'Vlan number is invalid'}))
def test_add_trunk_vlan(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('add_trunk_vlan').with_args('FastEthernet0/4', 1000).once().ordered()
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.post("/switches/my.switch/interfaces/FastEthernet0/4/trunk-vlans",
fixture="post_switch_hostname_interfaces_intname_trunkvlans.txt")
assert_that(code, equal_to(204))
def test_remove_trunk_vlans(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('remove_trunk_vlan').with_args('FastEthernet0/4', 2999).once().ordered()
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.delete("/switches/my.switch/interfaces/FastEthernet0/4/trunk-vlans/2999")
assert_that(code, equal_to(204))
def test_add_bond_trunk_vlan(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('add_bond_trunk_vlan').with_args(123, 1000).once().ordered()
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.post("/switches/my.switch/bonds/123/trunk-vlans",
fixture="post_switch_hostname_interfaces_intname_trunkvlans.txt")
assert_that(code, equal_to(204))
def test_remove_bond_trunk_vlans(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('remove_bond_trunk_vlan').with_args(123, 2999).once().ordered()
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.delete("/switches/my.switch/bonds/123/trunk-vlans/2999")
assert_that(code, equal_to(204))
def test_invalid_add_trunk_vlan(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).never()
self.switch_mock.should_receive('connect').once().never()
self.switch_mock.should_receive('add_trunk_vlan').never()
self.switch_mock.should_receive('disconnect').never()
result, code = self.post("/switches/my.switch/interfaces/FastEthernet0/4/trunk-vlans", raw_data='patate')
assert_that(code, equal_to(400))
assert_that(result, equal_to({'error': 'Vlan number is invalid'}))
def test_invalid_remove_trunk_vlan(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock)
self.switch_mock.should_receive('connect').never()
self.switch_mock.should_receive('remove_trunk_vlan').never()
self.switch_mock.should_receive('disconnect').never()
result, code = self.delete("/switches/my.switch/interfaces/FastEthernet0/4/trunk-vlans/patate")
assert_that(code, equal_to(400))
assert_that(result, equal_to({'error': 'Vlan number is invalid'}))
def test_configure_native_vlan_on_trunk(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('configure_native_vlan').with_args('FastEthernet0/4', 2999).once().ordered()
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.put("/switches/my.switch/interfaces/FastEthernet0/4/trunk-native-vlan",
fixture="put_switch_hostname_interfaces_intname_nativevlan.txt")
assert_that(code, equal_to(204))
def test_configure_bond_native_vlan_on_trunk(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('configure_bond_native_vlan').with_args(123, 2999).once().ordered()
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.put("/switches/my.switch/bonds/123/trunk-native-vlan",
fixture="put_switch_hostname_interfaces_intname_nativevlan.txt")
assert_that(code, equal_to(204))
def test_remove_bond_native_vlan_on_trunk(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('remove_bond_native_vlan').with_args(123).once().ordered()
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.delete("/switches/my.switch/bonds/123/trunk-native-vlan")
assert_that(code, equal_to(204))
def test_add_ip_json(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('add_ip_to_vlan').with_args(2500, ExactIpNetwork("1.2.3.4", 25)).once().ordered()
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.post("/switches/my.switch/vlans/2500/ips", fixture="post_switch_hostname_vlans_vlanid_ips.json")
assert_that(code, equal_to(201))
def test_add_ip_ipnetwork(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('add_ip_to_vlan').with_args(2500, ExactIpNetwork("1.2.3.4", 25)).once().ordered()
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.post("/switches/my.switch/vlans/2500/ips", fixture="post_switch_hostname_vlans_vlanid_ips.txt")
assert_that(code, equal_to(201))
def test_add_ip_malformed_request(self):
self.switch_factory.should_receive('get_switch').never()
result, code = self.post("/switches/my.switch/vlans/2500/ips", raw_data="not json and not ip network")
assert_that(code, equal_to(400))
assert_that(result, equal_to({'error': 'Malformed content, should be : x.x.x.x/xx or {"address": "x.x.x.x", "mask": "xx"}'}))
result, code = self.post("/switches/my.switch/vlans/2500/ips", data={})
assert_that(code, equal_to(400))
assert_that(result, equal_to({'error': 'Malformed content, should be : x.x.x.x/xx or {"address": "x.x.x.x", "mask": "xx"}'}))
result, code = self.post("/switches/my.switch/vlans/2500/ips", data={"address": "1.1.1.1"})
assert_that(code, equal_to(400))
assert_that(result, equal_to({'error': 'Malformed content, should be : x.x.x.x/xx or {"address": "x.x.x.x", "mask": "xx"}'}))
result, code = self.post("/switches/my.switch/vlans/2500/ips", data={"mask": "25"})
assert_that(code, equal_to(400))
assert_that(result, equal_to({'error': 'Malformed content, should be : x.x.x.x/xx or {"address": "x.x.x.x", "mask": "xx"}'}))
result, code = self.post("/switches/my.switch/vlans/2500/ips", data={"address": "not an ip", "mask": 25})
assert_that(code, equal_to(400))
assert_that(result, equal_to({'error': 'Malformed content, should be : x.x.x.x/xx or {"address": "x.x.x.x", "mask": "xx"}'}))
result, code = self.post("/switches/my.switch/vlans/2500/ips", data={"address": "1.1.1.1", "mask": "not a mask"})
assert_that(code, equal_to(400))
assert_that(result, equal_to({'error': 'Malformed content, should be : x.x.x.x/xx or {"address": "x.x.x.x", "mask": "xx"}'}))
def test_add_ip_not_available(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('add_ip_to_vlan').with_args(2500, ExactIpNetwork("1.2.3.4", 25)).once().ordered()\
.and_raise(IPNotAvailable(IPNetwork("1.2.3.4/25")))
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.post("/switches/my.switch/vlans/2500/ips", data={"address": "1.2.3.4", "mask": 25})
assert_that(code, equal_to(409))
assert_that(result, equal_to({'error': 'IP 1.2.3.4/25 is not available in this vlan'}))
def test_add_ip_unknown_vlan(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('add_ip_to_vlan').with_args(2500, ExactIpNetwork("1.2.3.4", 25)).once().ordered()\
.and_raise(UnknownVlan('2500'))
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.post("/switches/my.switch/vlans/2500/ips", data={"address": "1.2.3.4", "mask": 25})
assert_that(code, equal_to(404))
assert_that(result, equal_to({'error': 'Vlan 2500 not found'}))
def test_remove_ip(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('remove_ip_from_vlan').with_args(2500, ExactIpNetwork("1.2.3.4", 25)).once().ordered()
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.delete("/switches/my.switch/vlans/2500/ips/1.2.3.4/25")
assert_that(code, equal_to(204))
def test_remove_ip_unknown_vlan(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('remove_ip_from_vlan').with_args(2500, ExactIpNetwork("1.2.3.4", 25)).once().ordered()\
.and_raise(UnknownVlan('2500'))
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.delete("/switches/my.switch/vlans/2500/ips/1.2.3.4/25")
assert_that(code, equal_to(404))
assert_that(result, equal_to({'error': 'Vlan 2500 not found'}))
def test_remove_unknown_ip(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('remove_ip_from_vlan').with_args(2500, ExactIpNetwork("1.2.3.4", 25)).once().ordered()\
.and_raise(UnknownIP(IPNetwork("1.2.3.4/25")))
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.delete("/switches/my.switch/vlans/2500/ips/1.2.3.4/25")
assert_that(code, equal_to(404))
assert_that(result, equal_to({'error': 'IP 1.2.3.4/25 not found'}))
def test_remove_ip_malformed_url(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock)
self.switch_mock.should_receive('connect').never()
result, code = self.delete("/switches/my.switch/vlans/2500/ips/wat")
assert_that(code, equal_to(400))
assert_that(result, equal_to({'error': 'Malformed IP, should be : x.x.x.x/xx'}))
result, code = self.delete("/switches/my.switch/vlans/2500/ips/1.1.1.")
assert_that(code, equal_to(400))
assert_that(result, equal_to({'error': 'Malformed IP, should be : x.x.x.x/xx'}))
result, code = self.delete("/switches/my.switch/vlans/2500/ips/1.1.1/")
assert_that(code, equal_to(400))
assert_that(result, equal_to({'error': 'Malformed IP, should be : x.x.x.x/xx'}))
def test_add_vrrp_group_json(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('add_vrrp_group').with_args(
vlan_number=2500, group_id=2,
ips=[IPAddress("10.10.0.1"), IPAddress("10.10.0.2"), IPAddress("10.10.0.3")],
priority=100,
hello_interval=5,
dead_interval=15,
track_decrement=50,
track_id="101",
).once().ordered()
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.post("/switches/my.switch/vlans/2500/vrrp-groups",
fixture="post_switch_hostname_vlans_vlanid_vrrp_groups.json")
assert_that(code, equal_to(201))
def test_add_partial_vrrp_group_json(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('add_vrrp_group').with_args(
vlan_number=2500, group_id=2,
ips=[IPAddress("10.10.0.1"), IPAddress("10.10.0.2"), IPAddress("10.10.0.3")],
priority=100,
hello_interval=None,
dead_interval=None,
track_decrement=None,
track_id=None,
).once().ordered()
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.post("/switches/my.switch/vlans/2500/vrrp-groups",
data={
"id": 2,
"ips": ["10.10.0.1", "10.10.0.2", "10.10.0.3"],
"priority": 100,
})
assert_that(code, equal_to(201))
def test_add_vrrp_group_malformed_request(self):
self.switch_factory.should_receive('get_switch').never()
result, code = self.post("/switches/my.switch/vlans/2500/vrrp-groups", raw_data="not json")
assert_that(code, equal_to(400))
assert_that(result, equal_to({'error': 'Malformed content, should be a JSON object'}))
result, code = self.post("/switches/my.switch/vlans/2500/vrrp-groups", data={})
assert_that(code, equal_to(400))
assert_that(result, equal_to({'error': 'VRRP group id is mandatory'}))
result, code = self.post("/switches/my.switch/vlans/2500/vrrp-groups",
data={"id": 2, "ips": ["dwwdqdw"], "priority": 100})
assert_that(code, equal_to(400))
assert_that(result, equal_to({'error': 'Incorrect IP Address: "dwwdqdw", should be x.x.x.x'}))
result, code = self.post("/switches/my.switch/vlans/2500/vrrp-groups",
data={"id": 2, "ips": ["10.10.0.1/32"], "priority": 100})
assert_that(code, equal_to(400))
assert_that(result, equal_to({'error': 'Incorrect IP Address: "10.10.0.1/32", should be x.x.x.x'}))
def test_remove_vrrp_group(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('remove_vrrp_group').with_args(2500, 4).once().ordered()
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.delete("/switches/my.switch/vlans/2500/vrrp-groups/4")
assert_that(code, equal_to(204))
def test_add_dhcp_relay_server(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('add_dhcp_relay_server').with_args(
vlan_number=2500,
ip_address=IPAddress("10.10.10.1"),
).once().ordered()
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.post("/switches/my.switch/vlans/2500/dhcp-relay-server",
fixture="put_switch_hostname_vlans_vlanid_dhcp_relay_server.txt")
assert_that(code, equal_to(204))
def test_remove_dhcp_relay_server(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('remove_dhcp_relay_server').with_args(
vlan_number=2500,
ip_address=IPAddress('10.10.10.1')
).once().ordered()
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.delete("/switches/my.switch/vlans/2500/dhcp-relay-server/10.10.10.1")
assert_that(code, equal_to(204))
def test_add_dhcp_relay_server_malformed_data(self):
result, code = self.post("/switches/my.switch/vlans/2500/dhcp-relay-server", raw_data="NOT AN IP")
assert_that(code, equal_to(400))
def test_remove_dhcp_relay_server_malformed_data(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
result, code = self.delete("/switches/my.switch/vlans/2500/dhcp-relay-server/NOT_AN_IP")
assert_that(code, equal_to(400))
def test_enable_lldp(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('enable_lldp').with_args("FastEthernet0/4", True).once().ordered()
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.put("/switches/my.switch/interfaces/FastEthernet0/4/lldp", raw_data="true")
assert_that(code, equal_to(204))
def test_disable_lldp(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('enable_lldp').with_args("FastEthernet0/4", False).once().ordered()
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.put("/switches/my.switch/interfaces/FastEthernet0/4/lldp", raw_data="false")
assert_that(code, equal_to(204))
def test_put_access_groups_in(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('set_vlan_access_group').with_args(2500, IN, "spaceless_string").once().ordered()
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.put("/switches/my.switch/vlans/2500/access-groups/in",
fixture="put_switch_hostname_vlans_vlanid_accessgroups_in.txt")
assert_that(code, equal_to(204))
def test_put_access_groups_out(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('set_vlan_access_group').with_args(2500, OUT, "spaceless_string").once().ordered()
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.put("/switches/my.switch/vlans/2500/access-groups/out",
fixture="put_switch_hostname_vlans_vlanid_accessgroups_in.txt")
assert_that(code, equal_to(204))
def test_put_access_groups_malformed_body(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
result, code = self.put("/switches/my.switch/vlans/2500/access-groups/out", raw_data="Hey hey")
assert_that(code, equal_to(400))
result, code = self.put("/switches/my.switch/vlans/2500/access-groups/out", raw_data="")
assert_that(code, equal_to(400))
result, code = self.put("/switches/my.switch/vlans/2500/access-groups/notin", raw_data="good")
assert_that(code, equal_to(404))
def test_put_access_groups_vlan_not_found(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('set_vlan_access_group').with_args(2500, OUT, "spaceless_string").once().ordered().and_raise(UnknownVlan('2500'))
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.put("/switches/my.switch/vlans/2500/access-groups/out",
fixture="put_switch_hostname_vlans_vlanid_accessgroups_in.txt")
assert_that(code, equal_to(404))
assert_that(result, equal_to({'error': 'Vlan 2500 not found'}))
def test_put_access_groups_vlan_wrong_name(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('set_vlan_access_group').with_args(2500, OUT, "blablabla").once().ordered()\
.and_raise(ValueError('Access group name \"blablabla\" is invalid'))
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.put("/switches/my.switch/vlans/2500/access-groups/out", raw_data="blablabla")
assert_that(code, equal_to(400))
assert_that(result, equal_to({'error': 'Access group name \"blablabla\" is invalid'}))
def delete_put_access_groups_in(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('remove_vlan_access_group').with_args(2500, IN).once().ordered()
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.delete("/switches/my.switch/vlans/2500/access-groups/in")
assert_that(code, equal_to(204))
def delete_put_access_groups_out(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('remove_vlan_access_group').with_args(2500, OUT).once().ordered()
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.delete("/switches/my.switch/vlans/2500/access-groups/out")
assert_that(code, equal_to(204))
def test_delete_access_groups_vlan_not_found(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('remove_vlan_access_group').with_args(2500, OUT).once().ordered()\
.and_raise(UnknownVlan('2500'))
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.delete("/switches/my.switch/vlans/2500/access-groups/out")
assert_that(code, equal_to(404))
assert_that(result, equal_to({'error': 'Vlan 2500 not found'}))
def test_delete_access_groups_not_found(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('remove_vlan_access_group').with_args(2500, OUT).once().ordered()\
.and_raise(UnknownAccessGroup(IN))
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.delete("/switches/my.switch/vlans/2500/access-groups/out")
assert_that(code, equal_to(404))
assert_that(result, equal_to({'error': 'Inbound IP access group not found'}))
def test_delete_access_groups_malformed_request(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
result, code = self.delete("/switches/my.switch/vlans/2500/access-groups/notout")
assert_that(code, equal_to(404))
def test_set_vlan_vrf(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('set_vlan_vrf').with_args(2500, "DEFAULT_LAN").once().ordered()
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.put("/switches/my.switch/vlans/2500/vrf-forwarding", raw_data="DEFAULT_LAN")
assert_that(code, equal_to(204))
def test_remove_vlan_vrf(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('remove_vlan_vrf').with_args(2500).once().ordered()
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.delete("/switches/my.switch/vlans/2500/vrf-forwarding")
assert_that(code, equal_to(204))
def test_bonds_serialization_v1(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('get_bonds').once().ordered().and_return([
Bond(
number=3,
link_speed='1g',
shutdown=True,
port_mode=ACCESS,
access_vlan=1999),
Bond(
number=6,
link_speed='10g',
shutdown=False,
port_mode=DYNAMIC,
access_vlan=1999,
trunk_native_vlan=2999,
trunk_vlans=[3001, 3000, 3002]),
Bond(
number=4,
members=["ge-0/0/1", "ge-1/0/1"],
shutdown=False,
port_mode=TRUNK,
trunk_native_vlan=2999,
trunk_vlans=[3001, 3000, 3002]),
])
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.get("/switches/my.switch/bonds")
assert_that(code, equal_to(200))
assert_that(result, matches_fixture("get_switch_hostname_bonds_v1.json"))
def test_get_bond_is_correctly_serialized_v1(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('get_bond').with_args(3).once().ordered().and_return(
Bond(
number=3,
link_speed='1g',
shutdown=True,
port_mode=ACCESS,
access_vlan=1999)
)
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.get("/switches/my.switch/bonds/3")
assert_that(code, equal_to(200))
assert_that(result, matches_fixture("get_switch_hostname_bond_v1.json"))
def test_bonds_serialization_v2(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('get_bonds').once().ordered().and_return([
Bond(
number=3,
link_speed='1g',
shutdown=True,
port_mode=ACCESS,
access_vlan=1999),
Bond(
number=6,
link_speed='10g',
shutdown=False,
port_mode=DYNAMIC,
access_vlan=1999,
trunk_native_vlan=2999,
trunk_vlans=[3001, 3000, 3002]),
Bond(
number=4,
members=["ge-0/0/1", "ge-1/0/1"],
shutdown=False,
port_mode=TRUNK,
trunk_native_vlan=2999,
trunk_vlans=[3001, 3000, 3002]),
])
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.get("/switches/my.switch/bonds",
headers={"Netman-Max-Version": "2"})
assert_that(code, equal_to(200))
assert_that(result, matches_fixture("get_switch_hostname_bonds_v2.json"))
def test_get_bond_is_correctly_serialized_v2(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('get_bond').with_args(3).once().ordered().and_return(
Bond(
number=3,
link_speed='1g',
shutdown=True,
port_mode=ACCESS,
access_vlan=1999)
)
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.get("/switches/my.switch/bonds/3",
headers={"Netman-Max-Version": "2"})
assert_that(code, equal_to(200))
assert_that(result, matches_fixture("get_switch_hostname_bond_v2.json"))
def test_add_bond(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('add_bond').with_args(55).once().ordered()
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.post("/switches/my.switch/bonds", fixture="post_switch_hostname_bonds.json")
assert_that(code, equal_to(201))
def test_remove_bond(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('remove_bond').with_args(55).once().ordered()
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.delete("/switches/my.switch/bonds/55")
assert_that(code, equal_to(204))
def test_remove_bond_bad_number(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
result, code = self.delete("/switches/my.switch/bonds/allo")
assert_that(code, equal_to(400))
def test_set_bond_link_speed(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('set_bond_link_speed').with_args(4, '1g').once().ordered()
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.put("/switches/my.switch/bonds/4/link-speed", fixture="put_switch_hostname_bonds_link_speed.txt")
assert_that(code, equal_to(204))
def test_set_bond_link_speed_bad_speed(self):
result, code = self.put("/switches/my.switch/bonds/4/link-speed", raw_data="9001pb")
assert_that(code, equal_to(400))
assert_that(result, equal_to({'error': 'Malformed bond link speed'}))
def test_add_interface_to_bond(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('add_interface_to_bond').with_args('FastEthernet0/4', 10).once().ordered()
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.put("/switches/my.switch/interfaces/FastEthernet0/4/bond-master",
fixture="put_switch_hostname_interfaces_bond_master.txt")
assert_that(code, equal_to(204))
def test_remove_interface_from_bond(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('remove_interface_from_bond').with_args('FastEthernet0/4').once().ordered()
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.delete("/switches/my.switch/interfaces/FastEthernet0/4/bond-master")
assert_that(code, equal_to(204))
def test_set_interface_description(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('set_interface_description').with_args("FastEthernet0/4", "Resistance is futile").once().ordered()
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.put("/switches/my.switch/interfaces/FastEthernet0/4/description", raw_data="Resistance is futile")
assert_that(code, equal_to(204))
def test_remove_interface_description(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('remove_interface_description').with_args("FastEthernet0/4").once().ordered()
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.delete("/switches/my.switch/interfaces/FastEthernet0/4/description")
assert_that(code, equal_to(204))
def test_set_bond_description(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('set_bond_description').with_args(123, "Resistance is futile").once().ordered()
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.put("/switches/my.switch/bonds/123/description", raw_data="Resistance is futile")
assert_that(code, equal_to(204))
def test_remove_bond_description(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('remove_bond_description').with_args(123).once().ordered()
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.delete("/switches/my.switch/bonds/123/description")
assert_that(code, equal_to(204))
def test_edit_interface_spanning_tree(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('edit_interface_spanning_tree').with_args("FastEthernet0/4", edge=True).once().ordered()
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.put("/switches/my.switch/interfaces/FastEthernet0/4/spanning-tree",
fixture="put_switch_hostname_interfaces_intname_spanningtree.json")
assert_that(code, equal_to(204))
def test_edit_interface_spanning_tree_optional_params(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('edit_interface_spanning_tree').with_args("FastEthernet0/4").once().ordered()
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.put("/switches/my.switch/interfaces/FastEthernet0/4/spanning-tree", raw_data="{}")
assert_that(code, equal_to(204))
def test_edit_interface_spanning_tree_with_wrong_params(self):
result, code = self.put("/switches/my.switch/interfaces/FastEthernet0/4/spanning-tree",
raw_data="whizzle")
assert_that(code, equal_to(400))
assert_that(result['error'], is_('Malformed JSON request'))
result, code = self.put("/switches/my.switch/interfaces/FastEthernet0/4/spanning-tree",
raw_data='{"unknown_key": "value"}')
assert_that(code, equal_to(400))
assert_that(result['error'], is_('Unknown key: unknown_key'))
def test_uncaught_exceptions_are_formatted_correctly(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('set_access_vlan').with_args('FastEthernet0/4', 1000).once().ordered()\
.and_raise(Exception("SHIZZLE"))
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.put("/switches/my.switch/interfaces/FastEthernet0/4/access-vlan",
fixture="put_switch_hostname_interfaces_intname_accessvlan.txt")
assert_that(code, is_(500))
assert_that(result, is_({"error": "SHIZZLE"}))
def test_raised_exceptions_are_marshalled_correctly(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('set_access_vlan').with_args('FastEthernet0/4', 1000).once().ordered()\
.and_raise(UnknownInterface("SHIZZLE"))
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.put("/switches/my.switch/interfaces/FastEthernet0/4/access-vlan",
fixture="put_switch_hostname_interfaces_intname_accessvlan.txt",
headers={"Netman-Verbose-Errors": "yes"})
assert_that(code, is_(404))
assert_that(result, is_({
"error": "Unknown interface SHIZZLE",
"error-module": UnknownInterface.__module__,
"error-class": UnknownInterface.__name__,
}))
def test_raised_base_exceptions_are_marshalled_correctly(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('set_access_vlan').with_args('FastEthernet0/4', 1000).once().ordered() \
.and_raise(Exception("ERMAHGERD"))
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.put("/switches/my.switch/interfaces/FastEthernet0/4/access-vlan",
fixture="put_switch_hostname_interfaces_intname_accessvlan.txt",
headers={"Netman-Verbose-Errors": "yes"})
assert_that(code, is_(500))
assert_that(result, is_({
"error": "ERMAHGERD",
"error-class": "Exception",
}))
def test_raised_not_implemented_error_are_marshalled_correctly(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('set_access_vlan').with_args('FastEthernet0/4', 1000).once().ordered() \
.and_raise(NotImplementedError())
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.put("/switches/my.switch/interfaces/FastEthernet0/4/access-vlan",
fixture="put_switch_hostname_interfaces_intname_accessvlan.txt",
headers={"Netman-Verbose-Errors": "yes"})
assert_that(code, is_(501))
assert_that(result, is_({
"error": "",
"error-class": "NotImplementedError",
}))
def test_open_session(self):
session_id = 'patate'
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.session_manager.should_receive('open_session').with_args(self.switch_mock, session_id).and_return(session_id).once().ordered()
result, code = self.post("/switches-sessions/patate", fixture="post_switch_session.json")
assert_that(result, matches_fixture("post_switch_session_result.json"))
assert_that(code, equal_to(201))
def test_duplicate_session(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.session_manager.should_receive('open_session').with_args(self.switch_mock, 'patate')\
.and_raise(SessionAlreadyExists('patate'))
result, code = self.post("/switches-sessions/patate", fixture="post_switch_session.json")
assert_that(code, equal_to(409))
assert_that(result['error'], is_('Session ID already exists: patate'))
def test_close_session(self):
session_uuid = 'patate'
self.session_manager.should_receive("get_switch_for_session").with_args(session_uuid).and_return(self.switch_mock)
self.session_manager.should_receive('close_session').with_args(session_uuid).once().ordered()
result, code = self.delete("/switches-sessions/" + session_uuid)
assert_that(code, equal_to(204))
def test_session_commit(self):
session_uuid = 'poisson'
self.session_manager.should_receive("get_switch_for_session").with_args(session_uuid).and_return(self.switch_mock)
self.session_manager.should_receive('commit_session').with_args(session_uuid).once().ordered()
result, code = self.post("/switches-sessions/{}/actions".format(session_uuid), raw_data="commit")
assert_that(code, equal_to(204), str(result))
def test_session_rollback(self):
session_uuid = 'poisson'
self.session_manager.should_receive("get_switch_for_session").with_args(session_uuid).and_return(self.switch_mock)
self.session_manager.should_receive('rollback_session').with_args(session_uuid).once().ordered()
result, code = self.post("/switches-sessions/{}/actions".format(session_uuid), raw_data="rollback")
assert_that(code, equal_to(204), str(result))
def test_unknown_session(self):
session_uuid = 'patate'
result, code = self.post("/switches-sessions/{}/vlans".format(session_uuid), data={"number": 2000})
assert_that(code, equal_to(404))
assert_that(result['error'], is_("Session \"%s\" not found." % session_uuid))
def test_open_session_with_malformed_post_data(self):
result, code = self.post("/switches-sessions/session_me_timbers", data={"bad_data": 666})
assert_that(code, is_(400))
assert_that(result['error'], is_('Malformed switch session request'))
def test_open_session_unknown_switch(self):
self.switch_factory.should_receive('get_switch').with_args('bad_hostname').and_raise(UnknownSwitch(name='bad_hostname'))
result, code = self.post("/switches-sessions/session-me-timbers", data={"hostname": 'bad_hostname'})
assert_that(code, is_(404))
assert_that(result['error'], is_("Switch \"{0}\" is not configured".format('bad_hostname')))
def test_close_session_with_error(self):
session_uuid = 'patate'
self.session_manager.should_receive("get_switch_for_session").with_args(session_uuid).and_return(self.switch_mock)
self.session_manager.should_receive('close_session').with_args(session_uuid).once().ordered()\
.and_raise(OperationNotCompleted())
result, code = self.delete("/switches-sessions/" + session_uuid)
assert_that(code, equal_to(500))
def test_an_error_inside_a_session_call_is_properly_relayed(self):
session_uuid = 'patate'
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.session_manager.should_receive("open_session").with_args(self.switch_mock, 'patate').once()\
.and_return(session_uuid)
result, code = self.post("/switches-sessions/patate", fixture="post_switch_session.json")
assert_that(code, equal_to(201))
assert_that(result['session_id'], session_uuid)
self.session_manager.should_receive("get_switch_for_session").with_args(session_uuid).and_return(self.switch_mock)
self.session_manager.should_receive("keep_alive").with_args(session_uuid).once()
self.switch_mock.should_receive('set_vlan_access_group').with_args(2500, OUT, "spaceless_string").once()\
.and_raise(UnknownVlan('2500')).ordered()
result, code = self.put("/switches-sessions/{}/vlans/2500/access-groups/out".format(session_uuid),
fixture="put_switch_hostname_vlans_vlanid_accessgroups_in.txt")
assert_that(code, equal_to(404))
assert_that(result, equal_to({'error': 'Vlan 2500 not found',}))
def test_an_error_inside_a_session_call_is_properly_relayed_with_exception_marshalling_when_requested(self):
session_uuid = 'patate'
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.session_manager.should_receive("open_session").with_args(self.switch_mock, session_uuid).once()\
.and_return(session_uuid)
result, code = self.post("/switches-sessions/patate", fixture="post_switch_session.json")
assert_that(code, equal_to(201))
assert_that(result['session_id'], session_uuid)
self.session_manager.should_receive("get_switch_for_session").with_args(session_uuid).and_return(self.switch_mock)
self.session_manager.should_receive("keep_alive").with_args(session_uuid).once().ordered()
self.switch_mock.should_receive('set_vlan_access_group').with_args(2500, OUT, "spaceless_string").once().ordered()\
.and_raise(UnknownVlan('2500'))
result, code = self.put("/switches-sessions/{}/vlans/2500/access-groups/out".format(session_uuid),
fixture="put_switch_hostname_vlans_vlanid_accessgroups_in.txt",
headers={"Netman-Verbose-Errors": "yes"})
assert_that(code, equal_to(404))
assert_that(result, equal_to({
'error': 'Vlan 2500 not found',
"error-module": UnknownVlan.__module__,
"error-class": UnknownVlan.__name__,
}))
def test_an_error_without_a_message_is_given_one_containing_the_error_name_and_module(self):
self.switch_factory.should_receive('get_switch').with_args('my.switch').and_return(self.switch_mock).once().ordered()
self.switch_mock.should_receive('connect').once().ordered()
self.switch_mock.should_receive('remove_vlan_access_group').with_args(2500, OUT).once().ordered()\
.and_raise(EmptyException())
self.switch_mock.should_receive('disconnect').once().ordered()
result, code = self.delete("/switches/my.switch/vlans/2500/access-groups/out")
assert_that(code, equal_to(500))
assert_that(result, equal_to({'error': 'Unexpected error: tests.api.switch_api_test.EmptyException'}))
class EmptyException(Exception):
pass
|
|
# support_oppose_deciding/controllers.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from ballot.controllers import figure_out_google_civic_election_id_voter_is_watching
from ballot.models import CANDIDATE, MEASURE, OFFICE, BallotItemListManager
from candidate.models import CandidateManager, CandidateListManager
from friend.models import FriendManager
from measure.models import ContestMeasureManager
from django.http import HttpResponse
from follow.models import FollowOrganizationList
import json
from position.models import ANY_STANCE, FRIENDS_ONLY, SUPPORT, OPPOSE, PositionManager, PositionListManager, PUBLIC_ONLY
from voter.models import fetch_voter_id_from_voter_device_link, VoterManager
import wevote_functions.admin
from wevote_functions.functions import convert_to_int, is_voter_device_id_valid, positive_value_exists
logger = wevote_functions.admin.get_logger(__name__)
def position_oppose_count_for_ballot_item_for_api(voter_device_id,
candidate_id, candidate_we_vote_id,
measure_id, measure_we_vote_id):
stance_we_are_looking_for = OPPOSE
return positions_count_for_api(voter_device_id,
candidate_id, candidate_we_vote_id,
measure_id, measure_we_vote_id, stance_we_are_looking_for)
def positions_count_for_api(voter_device_id,
candidate_id, candidate_we_vote_id,
measure_id, measure_we_vote_id,
stance_we_are_looking_for):
# Get voter_id from the voter_device_id so we can know who is supporting/opposing
results = is_voter_device_id_valid(voter_device_id)
if not results['success']:
json_data = {
'status': 'VALID_VOTER_DEVICE_ID_MISSING',
'success': False,
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
voter_id = fetch_voter_id_from_voter_device_link(voter_device_id)
if not positive_value_exists(voter_id):
json_data = {
'status': "VALID_VOTER_ID_MISSING ",
'success': False,
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
show_positions_this_voter_follows = True
if positive_value_exists(candidate_id) or positive_value_exists(candidate_we_vote_id):
results = positions_count_for_candidate(voter_id,
candidate_id, candidate_we_vote_id,
stance_we_are_looking_for,
show_positions_this_voter_follows)
json_data = results['json_data']
return HttpResponse(json.dumps(json_data), content_type='application/json')
elif positive_value_exists(measure_id) or positive_value_exists(measure_we_vote_id):
results = positions_count_for_contest_measure(voter_id,
measure_id, measure_we_vote_id,
stance_we_are_looking_for,
show_positions_this_voter_follows)
json_data = results['json_data']
return HttpResponse(json.dumps(json_data), content_type='application/json')
else:
status = 'UNABLE_TO_RETRIEVE-CANDIDATE_ID_AND_MEASURE_ID_MISSING'
success = False
json_data = {
'status': status,
'success': success,
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
def positions_count_for_candidate(voter_id, candidate_id, candidate_we_vote_id, stance_we_are_looking_for,
show_positions_this_voter_follows=True):
"""
We want to return a JSON file with the number of orgs, friends and public figures the voter follows who support
this particular candidate's campaign
"""
# Since we can take in either candidate_id or candidate_we_vote_id, we need to retrieve the candidate object
# so we make sure we have both of these values to return
if positive_value_exists(candidate_id):
candidate_manager = CandidateManager()
results = candidate_manager.retrieve_candidate_from_id(candidate_id)
if results['candidate_found']:
candidate = results['candidate']
candidate_we_vote_id = candidate.we_vote_id
elif positive_value_exists(candidate_we_vote_id):
candidate_manager = CandidateManager()
results = candidate_manager.retrieve_candidate_from_we_vote_id(candidate_we_vote_id)
if results['candidate_found']:
candidate = results['candidate']
candidate_id = candidate.id
position_list_manager = PositionListManager()
############################
# Retrieve public positions
retrieve_public_positions_now = True # The alternate is positions for friends-only
most_recent_only = True
public_positions_list_for_candidate = \
position_list_manager.retrieve_all_positions_for_candidate(
retrieve_public_positions_now, candidate_id, candidate_we_vote_id,
stance_we_are_looking_for, most_recent_only
)
organizations_followed_by_voter_by_id = []
if len(public_positions_list_for_candidate):
follow_organization_list_manager = FollowOrganizationList()
organizations_followed_by_voter_by_id = \
follow_organization_list_manager.retrieve_follow_organization_by_voter_id_simple_id_array(voter_id)
if show_positions_this_voter_follows:
position_objects = position_list_manager.calculate_positions_followed_by_voter(
voter_id, public_positions_list_for_candidate, organizations_followed_by_voter_by_id)
##################################
# Now retrieve friend's positions
voter_manager = VoterManager()
voter_results = voter_manager.retrieve_voter_by_id(voter_id)
if voter_results['voter_found']:
voter = voter_results['voter']
voter_we_vote_id = voter.we_vote_id
else:
voter_we_vote_id = ""
friends_we_vote_id_list = []
if positive_value_exists(voter_we_vote_id):
retrieve_public_positions_now = False # Retrieve positions intended for friends-only
most_recent_only = False
friend_manager = FriendManager()
friend_results = friend_manager.retrieve_friends_we_vote_id_list(voter_we_vote_id)
if friend_results['friends_we_vote_id_list_found']:
friends_we_vote_id_list = friend_results['friends_we_vote_id_list']
# Add yourself as a friend so your opinions show up
friends_we_vote_id_list.append(voter_we_vote_id)
friends_positions_list_for_candidate = \
position_list_manager.retrieve_all_positions_for_candidate(
retrieve_public_positions_now, candidate_id, candidate_we_vote_id,
stance_we_are_looking_for, most_recent_only,
friends_we_vote_id_list)
if len(friends_positions_list_for_candidate):
position_objects = friends_positions_list_for_candidate + position_objects
positions_followed_count = len(position_objects)
json_data = {
'status': 'SUCCESSFUL_RETRIEVE_OF_POSITIONS_FOLLOWED_COUNT_FOR_CANDIDATE',
'success': True,
'count': positions_followed_count,
'ballot_item_id': convert_to_int(candidate_id),
'ballot_item_we_vote_id': candidate_we_vote_id,
'kind_of_ballot_item': CANDIDATE,
}
results = {
'json_data': json_data,
}
return results
else:
positions_not_followed = position_list_manager.calculate_positions_not_followed_by_voter(
public_positions_list_for_candidate, organizations_followed_by_voter_by_id)
positions_not_followed_count = len(positions_not_followed)
json_data = {
'status': 'SUCCESSFUL_RETRIEVE_OF_POSITIONS_NOT_FOLLOWED_COUNT_FOR_CANDIDATE',
'success': True,
'count': positions_not_followed_count,
'ballot_item_id': convert_to_int(candidate_id),
'ballot_item_we_vote_id': candidate_we_vote_id,
'kind_of_ballot_item': CANDIDATE,
}
results = {
'json_data': json_data,
}
return results
def positions_count_for_contest_measure(voter_id, measure_id, measure_we_vote_id, stance_we_are_looking_for,
show_positions_this_voter_follows=True):
"""
We want to return a JSON file with the number of orgs, friends and public figures the voter follows who support
this particular measure
"""
# Since we can take in either measure_id or measure_we_vote_id, we need to retrieve the measure object
# so we make sure we have both of these values to return
if positive_value_exists(measure_id):
contest_measure_manager = ContestMeasureManager()
results = contest_measure_manager.retrieve_contest_measure_from_id(measure_id)
if results['contest_measure_found']:
contest_measure = results['contest_measure']
measure_we_vote_id = contest_measure.we_vote_id
elif positive_value_exists(measure_we_vote_id):
contest_measure_manager = ContestMeasureManager()
results = contest_measure_manager.retrieve_contest_measure_from_we_vote_id(measure_we_vote_id)
if results['contest_measure_found']:
contest_measure = results['contest_measure']
measure_id = contest_measure.id
position_list_manager = PositionListManager()
############################
# Retrieve public positions
retrieve_public_positions_now = True # The alternate is positions for friends-only
most_recent_only = True
public_positions_list_for_contest_measure = \
position_list_manager.retrieve_all_positions_for_contest_measure(
retrieve_public_positions_now, measure_id, measure_we_vote_id,
stance_we_are_looking_for, most_recent_only)
organizations_followed_by_voter_by_id = []
if len(public_positions_list_for_contest_measure):
follow_organization_list_manager = FollowOrganizationList()
organizations_followed_by_voter_by_id = \
follow_organization_list_manager.retrieve_follow_organization_by_voter_id_simple_id_array(voter_id)
if show_positions_this_voter_follows:
position_objects = position_list_manager.calculate_positions_followed_by_voter(
voter_id, public_positions_list_for_contest_measure, organizations_followed_by_voter_by_id)
##################################
# Now retrieve friend's positions
voter_manager = VoterManager()
voter_results = voter_manager.retrieve_voter_by_id(voter_id)
if voter_results['voter_found']:
voter = voter_results['voter']
voter_we_vote_id = voter.we_vote_id
else:
voter_we_vote_id = ""
friends_we_vote_id_list = []
if positive_value_exists(voter_we_vote_id):
retrieve_public_positions_now = False # Retrieve positions intended for friends-only
most_recent_only = False
friend_manager = FriendManager()
friend_results = friend_manager.retrieve_friends_we_vote_id_list(voter_we_vote_id)
if friend_results['friends_we_vote_id_list_found']:
friends_we_vote_id_list = friend_results['friends_we_vote_id_list']
# Add yourself as a friend so your opinions show up
friends_we_vote_id_list.append(voter_we_vote_id)
friends_positions_list_for_contest_measure = \
position_list_manager.retrieve_all_positions_for_contest_measure(
retrieve_public_positions_now, measure_id, measure_we_vote_id,
stance_we_are_looking_for, most_recent_only,
friends_we_vote_id_list)
if len(friends_positions_list_for_contest_measure):
position_objects = friends_positions_list_for_contest_measure + position_objects
positions_followed_count = len(position_objects)
json_data = {
'status': 'SUCCESSFUL_RETRIEVE_OF_POSITION_COUNT_FOR_CONTEST_MEASURE',
'success': True,
'count': positions_followed_count,
'ballot_item_id': convert_to_int(measure_id),
'ballot_item_we_vote_id': measure_we_vote_id,
'kind_of_ballot_item': MEASURE,
}
results = {
'json_data': json_data,
}
return results
else:
positions_not_followed = position_list_manager.calculate_positions_not_followed_by_voter(
public_positions_list_for_contest_measure, organizations_followed_by_voter_by_id)
positions_not_followed_count = len(positions_not_followed)
json_data = {
'status': 'SUCCESSFUL_RETRIEVE_OF_POSITIONS_NOT_FOLLOWED_COUNT_FOR_CONTEST_MEASURE',
'success': True,
'count': positions_not_followed_count,
'ballot_item_id': convert_to_int(measure_id),
'ballot_item_we_vote_id': measure_we_vote_id,
'kind_of_ballot_item': MEASURE,
}
results = {
'json_data': json_data,
}
return results
def position_support_count_for_ballot_item_for_api(voter_device_id,
candidate_id, candidate_we_vote_id,
measure_id, measure_we_vote_id):
stance_we_are_looking_for = SUPPORT
return positions_count_for_api(voter_device_id,
candidate_id, candidate_we_vote_id,
measure_id, measure_we_vote_id, stance_we_are_looking_for)
def position_public_oppose_count_for_ballot_item_for_api(candidate_id, candidate_we_vote_id,
measure_id, measure_we_vote_id):
stance_we_are_looking_for = OPPOSE
return positions_public_count_for_api(candidate_id, candidate_we_vote_id,
measure_id, measure_we_vote_id, stance_we_are_looking_for)
def position_public_support_count_for_ballot_item_for_api(candidate_id, candidate_we_vote_id,
measure_id, measure_we_vote_id):
stance_we_are_looking_for = SUPPORT
return positions_public_count_for_api(candidate_id, candidate_we_vote_id,
measure_id, measure_we_vote_id, stance_we_are_looking_for)
def finalize_support_and_oppose_positions_count(voter_id, show_positions_this_voter_follows,
organizations_followed_by_voter_by_id, friends_we_vote_id_list,
support_positions_list_for_one_ballot_item,
oppose_positions_list_for_one_ballot_item):
oppose_positions_followed = []
position_list_manager = PositionListManager()
support_positions_followed = []
if show_positions_this_voter_follows:
support_positions_followed = position_list_manager.calculate_positions_followed_by_voter(
voter_id, support_positions_list_for_one_ballot_item, organizations_followed_by_voter_by_id,
friends_we_vote_id_list)
support_positions_count = len(support_positions_followed)
oppose_positions_followed = position_list_manager.calculate_positions_followed_by_voter(
voter_id, oppose_positions_list_for_one_ballot_item, organizations_followed_by_voter_by_id,
friends_we_vote_id_list)
oppose_positions_count = len(oppose_positions_followed)
else:
support_positions_not_followed = position_list_manager.calculate_positions_not_followed_by_voter(
support_positions_list_for_one_ballot_item, organizations_followed_by_voter_by_id,
friends_we_vote_id_list)
support_positions_count = len(support_positions_not_followed)
oppose_positions_not_followed = position_list_manager.calculate_positions_not_followed_by_voter(
oppose_positions_list_for_one_ballot_item, organizations_followed_by_voter_by_id,
friends_we_vote_id_list)
oppose_positions_count = len(oppose_positions_not_followed)
results = {
'support_positions_count': support_positions_count,
'support_positions_followed': support_positions_followed,
'oppose_positions_count': oppose_positions_count,
'oppose_positions_followed': oppose_positions_followed,
}
return results
def positions_public_count_for_api(candidate_id, candidate_we_vote_id, measure_id, measure_we_vote_id,
stance_we_are_looking_for):
if positive_value_exists(candidate_id) or positive_value_exists(candidate_we_vote_id):
results = positions_public_count_for_candidate(candidate_id, candidate_we_vote_id,
stance_we_are_looking_for)
json_data = results['json_data']
return HttpResponse(json.dumps(json_data), content_type='application/json')
elif positive_value_exists(measure_id) or positive_value_exists(measure_we_vote_id):
results = positions_public_count_for_contest_measure(measure_id, measure_we_vote_id,
stance_we_are_looking_for)
json_data = results['json_data']
return HttpResponse(json.dumps(json_data), content_type='application/json')
else:
pass
json_data = {
'status': 'UNABLE_TO_RETRIEVE-CANDIDATE_ID_AND_MEASURE_ID_MISSING',
'success': False,
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
def positions_public_count_for_candidate(candidate_id, candidate_we_vote_id, stance_we_are_looking_for):
"""
We want to return a JSON file with the number of orgs and public figures who support
this particular candidate's campaign
"""
# This implementation is built to make only two database calls. All other calculations are done here in the
# application layer
position_list_manager = PositionListManager()
all_positions_count_for_candidate = \
position_list_manager.fetch_public_positions_count_for_candidate(
candidate_id,
candidate_we_vote_id,
stance_we_are_looking_for)
if positive_value_exists(candidate_id) or positive_value_exists(candidate_we_vote_id):
candidate_manager = CandidateManager()
# Since we can take in either candidate_id or candidate_we_vote_id, we need to retrieve the value we don't have
if positive_value_exists(candidate_id):
candidate_we_vote_id = candidate_manager.fetch_candidate_we_vote_id_from_id(candidate_id)
elif positive_value_exists(candidate_we_vote_id):
candidate_id = candidate_manager.fetch_candidate_id_from_we_vote_id(candidate_we_vote_id)
json_data = {
'status': 'SUCCESSFUL_RETRIEVE_OF_PUBLIC_POSITION_COUNT_RE_CANDIDATE',
'success': True,
'count': all_positions_count_for_candidate,
'ballot_item_id': convert_to_int(candidate_id),
'ballot_item_we_vote_id': candidate_we_vote_id,
'kind_of_ballot_item': CANDIDATE,
}
results = {
'json_data': json_data,
}
return results
def positions_public_count_for_contest_measure(measure_id, measure_we_vote_id, stance_we_are_looking_for):
"""
We want to return a JSON file with the number of orgs and public figures who support
this particular measure
"""
# This implementation is built to make only two database calls. All other calculations are done here in the
# application layer
position_list_manager = PositionListManager()
all_positions_count_for_contest_measure = \
position_list_manager.fetch_public_positions_count_for_contest_measure(
measure_id, measure_we_vote_id, stance_we_are_looking_for)
if positive_value_exists(measure_id) or positive_value_exists(measure_we_vote_id):
contest_measure_manager = ContestMeasureManager()
# Since we can take in either measure_id or measure_we_vote_id, we need to retrieve the value we don't have
if positive_value_exists(measure_id):
measure_we_vote_id = contest_measure_manager.fetch_contest_measure_we_vote_id_from_id(measure_id)
elif positive_value_exists(measure_we_vote_id):
measure_id = contest_measure_manager.fetch_contest_measure_id_from_we_vote_id(measure_we_vote_id)
json_data = {
'status': 'SUCCESSFUL_RETRIEVE_OF_PUBLIC_POSITION_COUNT_FOR_CONTEST_MEASURE',
'success': True,
'count': all_positions_count_for_contest_measure,
'ballot_item_id': convert_to_int(measure_id),
'ballot_item_we_vote_id': measure_we_vote_id,
'kind_of_ballot_item': MEASURE,
}
results = {
'json_data': json_data,
}
return results
def voter_opposing_save(voter_device_id, candidate_id, candidate_we_vote_id, # voterOpposingSave
measure_id, measure_we_vote_id, user_agent_string, user_agent_object):
# Get voter_id from the voter_device_id so we can know who is supporting/opposing
results = is_voter_device_id_valid(voter_device_id)
if not results['success']:
json_data = {
'status': 'VALID_VOTER_DEVICE_ID_MISSING',
'success': False,
'ballot_item_id': 0,
'ballot_item_we_vote_id': '',
'kind_of_ballot_item': '',
'position_we_vote_id': '',
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
voter_id = fetch_voter_id_from_voter_device_link(voter_device_id)
if not positive_value_exists(voter_id):
json_data = {
'status': "VALID_VOTER_ID_MISSING",
'success': False,
'ballot_item_id': 0,
'ballot_item_we_vote_id': '',
'kind_of_ballot_item': '',
'position_we_vote_id': '',
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
position_manager = PositionManager()
if positive_value_exists(candidate_id) or positive_value_exists(candidate_we_vote_id):
candidate_manager = CandidateManager()
# Since we can take in either candidate_id or candidate_we_vote_id, we need to retrieve the value we don't have
if positive_value_exists(candidate_id):
candidate_we_vote_id = candidate_manager.fetch_candidate_we_vote_id_from_id(candidate_id)
elif positive_value_exists(candidate_we_vote_id):
candidate_id = candidate_manager.fetch_candidate_id_from_we_vote_id(candidate_we_vote_id)
results = position_manager.toggle_on_voter_oppose_for_candidate(voter_id, candidate_id,
user_agent_string, user_agent_object)
# toggle_off_voter_support_for_candidate
status = "OPPOSING_CANDIDATE " + results['status']
success = results['success']
json_data = {
'status': status,
'success': success,
'ballot_item_id': convert_to_int(candidate_id),
'ballot_item_we_vote_id': candidate_we_vote_id,
'kind_of_ballot_item': CANDIDATE,
'position_we_vote_id': results['position_we_vote_id'],
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
elif positive_value_exists(measure_id) or positive_value_exists(measure_we_vote_id):
contest_measure_manager = ContestMeasureManager()
# Since we can take in either measure_id or measure_we_vote_id, we need to retrieve the value we don't have
if positive_value_exists(measure_id):
measure_we_vote_id = contest_measure_manager.fetch_contest_measure_we_vote_id_from_id(measure_id)
elif positive_value_exists(measure_we_vote_id):
measure_id = contest_measure_manager.fetch_contest_measure_id_from_we_vote_id(measure_we_vote_id)
results = position_manager.toggle_on_voter_oppose_for_contest_measure(voter_id, measure_id,
user_agent_string, user_agent_object)
status = "OPPOSING_MEASURE " + results['status']
success = results['success']
json_data = {
'status': status,
'success': success,
'ballot_item_id': convert_to_int(measure_id),
'ballot_item_we_vote_id': measure_we_vote_id,
'kind_of_ballot_item': MEASURE,
'position_we_vote_id': results['position_we_vote_id'],
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
else:
status = 'UNABLE_TO_SAVE-CANDIDATE_ID_AND_MEASURE_ID_MISSING'
success = False
json_data = {
'status': status,
'success': success,
'ballot_item_id': 0,
'ballot_item_we_vote_id': '',
'kind_of_ballot_item': '',
'position_we_vote_id': '',
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
def voter_stop_opposing_save(voter_device_id, candidate_id, candidate_we_vote_id, # voterStopOpposingSave
measure_id, measure_we_vote_id, user_agent_string, user_agent_object):
# Get voter_id from the voter_device_id so we can know who is supporting/opposing
results = is_voter_device_id_valid(voter_device_id)
if not results['success']:
json_data = {
'status': 'VALID_VOTER_DEVICE_ID_MISSING',
'success': False,
'ballot_item_id': 0,
'ballot_item_we_vote_id': '',
'kind_of_ballot_item': '',
'position_we_vote_id': '',
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
voter_id = fetch_voter_id_from_voter_device_link(voter_device_id)
if not positive_value_exists(voter_id):
json_data = {
'status': "VALID_VOTER_ID_MISSING ",
'success': False,
'ballot_item_id': 0,
'ballot_item_we_vote_id': '',
'kind_of_ballot_item': '',
'position_we_vote_id': '',
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
position_manager = PositionManager()
if positive_value_exists(candidate_id) or positive_value_exists(candidate_we_vote_id):
candidate_manager = CandidateManager()
# Since we can take in either candidate_id or candidate_we_vote_id, we need to retrieve the value we don't have
if positive_value_exists(candidate_id):
candidate_we_vote_id = candidate_manager.fetch_candidate_we_vote_id_from_id(candidate_id)
elif positive_value_exists(candidate_we_vote_id):
candidate_id = candidate_manager.fetch_candidate_id_from_we_vote_id(candidate_we_vote_id)
results = position_manager.toggle_off_voter_oppose_for_candidate(voter_id, candidate_id,
user_agent_string, user_agent_object)
status = "STOP_OPPOSING_CANDIDATE " + results['status']
success = results['success']
json_data = {
'status': status,
'success': success,
'ballot_item_id': convert_to_int(candidate_id),
'ballot_item_we_vote_id': candidate_we_vote_id,
'kind_of_ballot_item': CANDIDATE,
'position_we_vote_id': results['position_we_vote_id'],
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
elif positive_value_exists(measure_id) or positive_value_exists(measure_we_vote_id):
contest_measure_manager = ContestMeasureManager()
# Since we can take in either measure_id or measure_we_vote_id, we need to retrieve the value we don't have
if positive_value_exists(measure_id):
measure_we_vote_id = contest_measure_manager.fetch_contest_measure_we_vote_id_from_id(measure_id)
elif positive_value_exists(measure_we_vote_id):
measure_id = contest_measure_manager.fetch_contest_measure_id_from_we_vote_id(measure_we_vote_id)
results = position_manager.toggle_off_voter_oppose_for_contest_measure(voter_id, measure_id,
user_agent_string, user_agent_object)
status = "STOP_OPPOSING_MEASURE" + results['status']
success = results['success']
json_data = {
'status': status,
'success': success,
'ballot_item_id': convert_to_int(measure_id),
'ballot_item_we_vote_id': measure_we_vote_id,
'kind_of_ballot_item': MEASURE,
'position_we_vote_id': results['position_we_vote_id'],
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
else:
status = 'UNABLE_TO_SAVE-CANDIDATE_ID_AND_MEASURE_ID_MISSING'
success = False
json_data = {
'status': status,
'success': success,
'ballot_item_id': 0,
'ballot_item_we_vote_id': '',
'kind_of_ballot_item': '',
'position_we_vote_id': '',
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
def voter_stop_supporting_save(voter_device_id, candidate_id, candidate_we_vote_id, # voterStopSupportingSave
measure_id, measure_we_vote_id, user_agent_string, user_agent_object):
# Get voter_id from the voter_device_id so we can know who is supporting/opposing
results = is_voter_device_id_valid(voter_device_id)
if not results['success']:
json_data = {
'status': 'VALID_VOTER_DEVICE_ID_MISSING',
'success': False,
'ballot_item_id': 0,
'ballot_item_we_vote_id': '',
'kind_of_ballot_item': '',
'position_we_vote_id': '',
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
voter_id = fetch_voter_id_from_voter_device_link(voter_device_id)
if not positive_value_exists(voter_id):
json_data = {
'status': "VALID_VOTER_ID_MISSING ",
'success': False,
'ballot_item_id': 0,
'ballot_item_we_vote_id': '',
'kind_of_ballot_item': '',
'position_we_vote_id': '',
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
position_manager = PositionManager()
if positive_value_exists(candidate_id) or positive_value_exists(candidate_we_vote_id):
candidate_manager = CandidateManager()
# Since we can take in either candidate_id or candidate_we_vote_id, we need to retrieve the value we don't have
if positive_value_exists(candidate_id):
candidate_we_vote_id = candidate_manager.fetch_candidate_we_vote_id_from_id(candidate_id)
elif positive_value_exists(candidate_we_vote_id):
candidate_id = candidate_manager.fetch_candidate_id_from_we_vote_id(candidate_we_vote_id)
results = position_manager.toggle_off_voter_support_for_candidate(voter_id, candidate_id,
user_agent_string, user_agent_object)
status = "STOP_SUPPORTING_CANDIDATE " + results['status']
success = results['success']
json_data = {
'status': status,
'success': success,
'ballot_item_id': convert_to_int(candidate_id),
'ballot_item_we_vote_id': candidate_we_vote_id,
'kind_of_ballot_item': CANDIDATE,
'position_we_vote_id': results['position_we_vote_id'],
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
elif positive_value_exists(measure_id) or positive_value_exists(measure_we_vote_id):
contest_measure_manager = ContestMeasureManager()
# Since we can take in either measure_id or measure_we_vote_id, we need to retrieve the value we don't have
if positive_value_exists(measure_id):
measure_we_vote_id = contest_measure_manager.fetch_contest_measure_we_vote_id_from_id(measure_id)
elif positive_value_exists(measure_we_vote_id):
measure_id = contest_measure_manager.fetch_contest_measure_id_from_we_vote_id(measure_we_vote_id)
results = position_manager.toggle_off_voter_support_for_contest_measure(voter_id, measure_id,
user_agent_string, user_agent_object)
status = "STOP_SUPPORTING_MEASURE " + results['status']
success = results['success']
json_data = {
'status': status,
'success': success,
'ballot_item_id': convert_to_int(measure_id),
'ballot_item_we_vote_id': measure_we_vote_id,
'kind_of_ballot_item': MEASURE,
'position_we_vote_id': results['position_we_vote_id'],
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
else:
status = 'UNABLE_TO_SAVE-CANDIDATE_ID_AND_MEASURE_ID_MISSING'
success = False
json_data = {
'status': status,
'success': success,
'ballot_item_id': 0,
'ballot_item_we_vote_id': '',
'kind_of_ballot_item': '',
'position_we_vote_id': '',
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
def voter_supporting_save_for_api(voter_device_id, # voterSupportingSave
candidate_id, candidate_we_vote_id,
measure_id, measure_we_vote_id, user_agent_string, user_agent_object):
"""
Default to this being a private position
:param voter_device_id:
:param candidate_id:
:param candidate_we_vote_id:
:param measure_id:
:param measure_we_vote_id:
:param user_agent_string:
:param user_agent_object:
:return:
"""
status = ""
# Get voter_id from the voter_device_id so we can know who is supporting/opposing
results = is_voter_device_id_valid(voter_device_id)
if not results['success']:
json_data = {
'status': 'VALID_VOTER_DEVICE_ID_MISSING ',
'success': False,
'ballot_item_id': 0,
'ballot_item_we_vote_id': '',
'kind_of_ballot_item': '',
'position_we_vote_id': '',
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
voter_id = fetch_voter_id_from_voter_device_link(voter_device_id)
if not positive_value_exists(voter_id):
json_data = {
'status': "VALID_VOTER_ID_MISSING ",
'success': False,
'ballot_item_id': 0,
'ballot_item_we_vote_id': '',
'kind_of_ballot_item': '',
'position_we_vote_id': '',
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
position_manager = PositionManager()
if positive_value_exists(candidate_id) or positive_value_exists(candidate_we_vote_id):
candidate_manager = CandidateManager()
# Since we can take in either candidate_id or candidate_we_vote_id, we need to retrieve the value we don't have
if positive_value_exists(candidate_id):
candidate_we_vote_id = candidate_manager.fetch_candidate_we_vote_id_from_id(candidate_id)
elif positive_value_exists(candidate_we_vote_id):
candidate_id = candidate_manager.fetch_candidate_id_from_we_vote_id(candidate_we_vote_id)
results = position_manager.toggle_on_voter_support_for_candidate(voter_id, candidate_id,
user_agent_string, user_agent_object)
status += "SUPPORTING_CANDIDATE " + results['status'] + " "
success = results['success']
json_data = {
'status': status,
'success': success,
'ballot_item_id': convert_to_int(candidate_id),
'ballot_item_we_vote_id': candidate_we_vote_id,
'kind_of_ballot_item': CANDIDATE,
'position_we_vote_id': results['position_we_vote_id'],
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
elif positive_value_exists(measure_id) or positive_value_exists(measure_we_vote_id):
contest_measure_manager = ContestMeasureManager()
# Since we can take in either measure_id or measure_we_vote_id, we need to retrieve the value we don't have
if positive_value_exists(measure_id):
measure_we_vote_id = contest_measure_manager.fetch_contest_measure_we_vote_id_from_id(measure_id)
elif positive_value_exists(measure_we_vote_id):
measure_id = contest_measure_manager.fetch_contest_measure_id_from_we_vote_id(measure_we_vote_id)
results = position_manager.toggle_on_voter_support_for_contest_measure(
voter_id, measure_id, user_agent_string, user_agent_object)
status += "SUPPORTING_MEASURE: " + results['status'] + " "
success = results['success']
json_data = {
'status': status,
'success': success,
'ballot_item_id': convert_to_int(measure_id),
'ballot_item_we_vote_id': measure_we_vote_id,
'kind_of_ballot_item': MEASURE,
'position_we_vote_id': results['position_we_vote_id'],
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
else:
status += 'UNABLE_TO_SAVE-CANDIDATE_ID_AND_MEASURE_ID_MISSING '
success = False
json_data = {
'status': status,
'success': success,
'ballot_item_id': 0,
'ballot_item_we_vote_id': '',
'kind_of_ballot_item': '',
'position_we_vote_id': '',
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
|
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Flake8 cannot disable a warning for the file. Flake8 does not like beam code
# and reports many 'W503 line break before binary operator' errors. So turn off
# flake8 for this file.
# flake8: noqa
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import datetime
import json
import logging
import os
import sys
import apache_beam as beam
import textwrap
def parse_arguments(argv):
"""Parse command line arguments.
Args:
argv: list of command line arguments including program name.
Returns:
The parsed arguments as returned by argparse.ArgumentParser.
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent("""\
Runs preprocessing on raw data for TensorFlow training.
This script applies some transformations to raw data to improve
training performance. Some data transformations can be expensive
such as the tf-idf text column transformation. During training, the
same raw data row might be used multiply times to train a model. This
means the same transformations are applied to the same data row
multiple times. This can be very inefficient, so this script applies
partial transformations to the raw data and writes an intermediate
preprocessed datasource to disk for training.
Running this transformation step is required for two usage paths:
1) If the img_url_to_vec transform is used. This is because
preprocessing as image is expensive and TensorFlow cannot easily
read raw image files during training.
2) If the raw data is in BigQuery. TensorFlow cannot read from a
BigQuery source.
Running this transformation step is recommended if a text transform is
used (like tf-idf or bag-of-words), and the text value for each row
is very long.
Running this transformation step may not have an interesting training
performance impact if the transforms are all simple like scaling
numerical values."""))
source_group = parser.add_mutually_exclusive_group(required=True)
source_group.add_argument(
'--csv',
metavar='FILE',
required=False,
action='append',
help='CSV data to transform.')
source_group.add_argument(
'--bigquery',
metavar='PROJECT_ID.DATASET.TABLE_NAME',
type=str,
required=False,
help=('Must be in the form `project.dataset.table_name`. BigQuery '
'data to transform'))
parser.add_argument(
'--analysis',
metavar='ANALYSIS_OUTPUT_DIR',
required=True,
help='The output folder of analyze')
parser.add_argument(
'--prefix',
metavar='OUTPUT_FILENAME_PREFIX',
required=True,
type=str)
parser.add_argument(
'--output',
metavar='DIR',
default=None,
required=True,
help=('Google Cloud Storage or Local directory in which '
'to place outputs.'))
parser.add_argument(
'--shuffle',
action='store_true',
default=False,
help='If used, data source is shuffled. This is recommended for training data.')
parser.add_argument(
'--batch-size',
metavar='N',
type=int,
default=100,
help='Larger values increase performance and peak memory usage.')
cloud_group = parser.add_argument_group(
title='Cloud Parameters',
description='These parameters are only used if --cloud is used.')
cloud_group.add_argument(
'--cloud',
action='store_true',
help='Run preprocessing on the cloud.')
cloud_group.add_argument(
'--job-name',
type=str,
help='Unique dataflow job name.')
cloud_group.add_argument(
'--project-id',
help='The project to which the job will be submitted.')
cloud_group.add_argument(
'--num-workers',
metavar='N',
type=int,
default=0,
help='Set to 0 to use the default size determined by the Dataflow service.')
cloud_group.add_argument(
'--worker-machine-type',
metavar='NAME',
type=str,
help='A machine name from https://cloud.google.com/compute/docs/machine-types. '
' If not given, the service uses the default machine type.')
cloud_group.add_argument(
'--async',
action='store_true',
help='If used, this script returns before the dataflow job is completed.')
args = parser.parse_args(args=argv[1:])
if args.cloud and not args.project_id:
raise ValueError('--project-id is needed for --cloud')
if args.async and not args.cloud:
raise ValueError('--async should only be used with --cloud')
if not args.job_name:
args.job_name = ('dataflow-job-{}'.format(
datetime.datetime.now().strftime('%Y%m%d%H%M%S')))
return args
@beam.ptransform_fn
def shuffle(pcoll): # pylint: disable=invalid-name
import random
return (pcoll
| 'PairWithRandom' >> beam.Map(lambda x: (random.random(), x))
| 'GroupByRandom' >> beam.GroupByKey()
| 'DropRandom' >> beam.FlatMap(lambda (k, vs): vs))
def image_transform_columns(features):
"""Returns a list of columns that prepare_image_transforms() should run on.
Because of beam + pickle, IMAGE_URL_TO_VEC_TRANSFORM cannot be used inside of
a beam function, so we extract the columns prepare_image_transforms() should
run on outside of beam.
"""
import six
from trainer import feature_transforms
img_cols = []
for name, transform in six.iteritems(features):
if transform['transform'] == feature_transforms.IMAGE_TRANSFORM:
img_cols.append(name)
return img_cols
def prepare_image_transforms(element, image_columns):
"""Replace an images url with its jpeg bytes.
Args:
element: one input row, as a dict
image_columns: list of columns that are image paths
Return:
element, where each image file path has been replaced by a base64 image.
"""
import base64
import cStringIO
from PIL import Image
from tensorflow.python.lib.io import file_io as tf_file_io
from apache_beam.metrics import Metrics
img_error_count = Metrics.counter('main', 'ImgErrorCount')
img_missing_count = Metrics.counter('main', 'ImgMissingCount')
for name in image_columns:
uri = element[name]
if not uri:
img_missing_count.inc()
continue
try:
with tf_file_io.FileIO(uri, 'r') as f:
img = Image.open(f).convert('RGB')
# A variety of different calling libraries throw different exceptions here.
# They all correspond to an unreadable file so we treat them equivalently.
# pylint: disable broad-except
except Exception as e:
logging.exception('Error processing image %s: %s', uri, str(e))
img_error_count.inc()
return
# Convert to desired format and output.
output = cStringIO.StringIO()
img.save(output, 'jpeg')
element[name] = base64.urlsafe_b64encode(output.getvalue())
return element
class EmitAsBatchDoFn(beam.DoFn):
"""A DoFn that buffers the records and emits them batch by batch."""
def __init__(self, batch_size):
"""Constructor of EmitAsBatchDoFn beam.DoFn class.
Args:
batch_size: the max size we want to buffer the records before emitting.
"""
self._batch_size = batch_size
self._cached = []
def process(self, element):
self._cached.append(element)
if len(self._cached) >= self._batch_size:
emit = self._cached
self._cached = []
yield emit
def finish_bundle(self, element=None):
from apache_beam.transforms import window
from apache_beam.utils.windowed_value import WindowedValue
if len(self._cached) > 0: # pylint: disable=g-explicit-length-test
yield WindowedValue(self._cached, -1, [window.GlobalWindow()])
class TransformFeaturesDoFn(beam.DoFn):
"""Converts raw data into transformed data."""
def __init__(self, analysis_output_dir, features, schema, stats):
self._analysis_output_dir = analysis_output_dir
self._features = features
self._schema = schema
self._stats = stats
self._session = None
def start_bundle(self, element=None):
"""Build the transfromation graph once."""
import tensorflow as tf
from trainer import feature_transforms
g = tf.Graph()
session = tf.Session(graph=g)
# Build the transformation graph
with g.as_default():
transformed_features, _, placeholders = (
feature_transforms.build_csv_serving_tensors_for_transform_step(
analysis_path=self._analysis_output_dir,
features=self._features,
schema=self._schema,
stats=self._stats,
keep_target=True))
session.run(tf.tables_initializer())
self._session = session
self._transformed_features = transformed_features
self._input_placeholder_tensor = placeholders['csv_example']
def finish_bundle(self, element=None):
self._session.close()
def process(self, element):
"""Run the transformation graph on batched input data
Args:
element: list of csv strings, representing one batch input to the TF graph.
Returns:
dict containing the transformed data. Results are un-batched. Sparse
tensors are converted to lists.
"""
import apache_beam as beam
import six
import tensorflow as tf
# This function is invoked by a separate sub-process so setting the logging level
# does not affect Datalab's kernel process.
tf.logging.set_verbosity(tf.logging.ERROR)
try:
clean_element = []
for line in element:
clean_element.append(line.rstrip())
# batch_result is list of numpy arrays with batch_size many rows.
batch_result = self._session.run(
fetches=self._transformed_features,
feed_dict={self._input_placeholder_tensor: clean_element})
# ex batch_result.
# Dense tensor: {'col1': array([[batch_1], [batch_2]])}
# Sparse tensor: {'col1': tf.SparseTensorValue(
# indices=array([[batch_1, 0], [batch_1, 1], ...,
# [batch_2, 0], [batch_2, 1], ...]],
# values=array[value, value, value, ...])}
# Unbatch the results.
for i in range(len(clean_element)):
transformed_features = {}
for name, value in six.iteritems(batch_result):
if isinstance(value, tf.SparseTensorValue):
batch_i_indices = value.indices[:, 0] == i
batch_i_values = value.values[batch_i_indices]
transformed_features[name] = batch_i_values.tolist()
else:
transformed_features[name] = value[i].tolist()
yield transformed_features
except Exception as e: # pylint: disable=broad-except
yield beam.pvalue.TaggedOutput('errors', (str(e), element))
def decode_csv(csv_string, column_names):
"""Parse a csv line into a dict.
Args:
csv_string: a csv string. May contain missing values "a,,c"
column_names: list of column names
Returns:
Dict of {column_name, value_from_csv}. If there are missing values,
value_from_csv will be ''.
"""
import csv
r = next(csv.reader([csv_string]))
if len(r) != len(column_names):
raise ValueError('csv line %s does not have %d columns' % (csv_string, len(column_names)))
return {k: v for k, v in zip(column_names, r)}
def encode_csv(data_dict, column_names):
"""Builds a csv string.
Args:
data_dict: dict of {column_name: 1 value}
column_names: list of column names
Returns:
A csv string version of data_dict
"""
import csv
import six
values = [str(data_dict[x]) for x in column_names]
str_buff = six.StringIO()
writer = csv.writer(str_buff, lineterminator='')
writer.writerow(values)
return str_buff.getvalue()
def serialize_example(transformed_json_data, features, feature_indices, target_name):
"""Makes an instance of data in libsvm format.
Args:
transformed_json_data: dict of transformed data.
features: features config.
feature_indices: output of feature_transforms.get_transformed_feature_indices()
Returns:
The text line representation of an instance in libsvm format.
"""
import six
import tensorflow as tf
from trainer import feature_transforms
line = str(transformed_json_data[target_name][0])
for name, info in feature_indices:
if features[name]['transform'] in [feature_transforms.IDENTITY_TRANSFORM,
feature_transforms.SCALE_TRANSFORM]:
line += ' %d:%s' % (info['index_start'], str(transformed_json_data[name][0]))
elif features[name]['transform'] in [feature_transforms.ONE_HOT_TRANSFORM,
feature_transforms.MULTI_HOT_TRANSFORM]:
for i in range(info['size']):
if i in transformed_json_data[name]:
line += ' %d:1' % (info['index_start'] + i)
elif features[name]['transform'] in [feature_transforms.IMAGE_TRANSFORM]:
for i in range(info['size']):
line += ' %d:%s' % (info['index_start'] + i, str(transformed_json_data[name][i]))
return line
def preprocess(pipeline, args):
"""Transfrom csv data into transfromed tf.example files.
Outline:
1) read the input data (as csv or bigquery) into a dict format
2) replace image paths with base64 encoded image files
3) build a csv input string with images paths replaced with base64. This
matches the serving csv that a trained model would expect.
4) batch the csv strings
5) run the transformations
6) write the results to tf.example files and save any errors.
"""
import six
from tensorflow.python.lib.io import file_io
from trainer import feature_transforms
schema = json.loads(file_io.read_file_to_string(
os.path.join(args.analysis, feature_transforms.SCHEMA_FILE)).decode())
features = json.loads(file_io.read_file_to_string(
os.path.join(args.analysis, feature_transforms.FEATURES_FILE)).decode())
stats = json.loads(file_io.read_file_to_string(
os.path.join(args.analysis, feature_transforms.STATS_FILE)).decode())
column_names = [col['name'] for col in schema]
if args.csv:
all_files = []
for i, file_pattern in enumerate(args.csv):
all_files.append(pipeline | ('ReadCSVFile%d' % i) >> beam.io.ReadFromText(file_pattern))
raw_data = (
all_files
| 'MergeCSVFiles' >> beam.Flatten()
| 'ParseCSVData' >> beam.Map(decode_csv, column_names))
else:
columns = ', '.join(column_names)
query = 'SELECT {columns} FROM `{table}`'.format(columns=columns,
table=args.bigquery)
raw_data = (
pipeline
| 'ReadBiqQueryData'
>> beam.io.Read(beam.io.BigQuerySource(query=query,
use_standard_sql=True)))
# Note that prepare_image_transforms does not make embeddings, it justs reads
# the image files and converts them to byte stings. TransformFeaturesDoFn()
# will make the image embeddings.
image_columns = image_transform_columns(features)
clean_csv_data = (
raw_data
| 'PreprocessTransferredLearningTransformations'
>> beam.Map(prepare_image_transforms, image_columns)
| 'BuildCSVString'
>> beam.Map(encode_csv, column_names))
if args.shuffle:
clean_csv_data = clean_csv_data | 'ShuffleData' >> shuffle()
transform_dofn = TransformFeaturesDoFn(args.analysis, features, schema, stats)
(transformed_data, errors) = (
clean_csv_data
| 'Batch Input'
>> beam.ParDo(EmitAsBatchDoFn(args.batch_size))
| 'Run TF Graph on Batches'
>> beam.ParDo(transform_dofn).with_outputs('errors', main='main'))
target_name = next((name for name, transform in six.iteritems(features)
if transform['transform'] == feature_transforms.TARGET_TRANSFORM),
None)
feature_indices = feature_transforms.get_transformed_feature_indices(features, stats)
_ = (transformed_data
| 'SerializeExamples' >> beam.Map(serialize_example, features,
feature_indices, target_name)
| 'WriteExamples' >> beam.io.WriteToText(
os.path.join(args.output, args.prefix),
file_name_suffix='.libsvm',
num_shards=1))
feature_map = feature_transforms.create_feature_map(features, feature_indices, args.analysis)
# Create the whole file content as one string to avoid dataflow reordering the entries.
feature_map_content = ['\n'.join(['%d,%s' % x for x in feature_map])]
_ = (pipeline
| beam.Create(feature_map_content)
| 'WriteFeatureMap'
>> beam.io.WriteToText(
os.path.join(args.output, 'featuremap'),
file_name_suffix='.txt',
num_shards=1))
def main(argv=None):
"""Run Preprocessing as a Dataflow."""
args = parse_arguments(sys.argv if argv is None else argv)
temp_dir = os.path.join(args.output, 'tmp')
if args.cloud:
pipeline_name = 'DataflowRunner'
else:
pipeline_name = 'DirectRunner'
# Suppress TF warnings.
os.environ['TF_CPP_MIN_LOG_LEVEL']='3'
options = {
'job_name': args.job_name,
'temp_location': temp_dir,
'project': args.project_id,
'setup_file':
os.path.abspath(os.path.join(
os.path.dirname(__file__),
'setup.py')),
}
if args.num_workers:
options['num_workers'] = args.num_workers
if args.worker_machine_type:
options['worker_machine_type'] = args.worker_machine_type
pipeline_options = beam.pipeline.PipelineOptions(flags=[], **options)
p = beam.Pipeline(pipeline_name, options=pipeline_options)
preprocess(pipeline=p, args=args)
pipeline_result = p.run()
if not args.async:
pipeline_result.wait_until_finish()
if args.async and args.cloud:
print('View job at https://console.developers.google.com/dataflow/job/%s?project=%s' %
(pipeline_result.job_id(), args.project_id))
if __name__ == '__main__':
main()
|
|
# -*- coding: utf-8 -*-
#
# django-autocomplete-light documentation build configuration file, created by
# sphinx-quickstart on Wed Feb 3 02:03:57 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__), "_ext")))
sys.path.insert(0, os.path.abspath('../../../lib/python2.7/site-packages/'))
sys.path.insert(0, os.path.abspath('../src/'))
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__), "../test_project")))
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings')
import django
django.setup()
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'djangodocs',
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-autocomplete-light'
copyright = u'2012-2016, James Pic & contributors'
author = u'James Pic & contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'3.0'
# The full version, including alpha/beta/rc tags.
release = u'3.0.3'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-autocomplete-lightdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'django-autocomplete-light.tex', u'django-autocomplete-light Documentation',
u'James Pic \\& contributors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'django-autocomplete-light', u'django-autocomplete-light Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'django-autocomplete-light', u'django-autocomplete-light Documentation',
author, 'django-autocomplete-light', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
#epub_basename = project
# The HTML theme for the epub output. Since the default themes are not
# optimized for small screen space, using the same theme for HTML and epub
# output is usually not wise. This defaults to 'epub', a theme designed to save
# visual space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files that should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('http://docs.python.org/2.7', None),
'django': ('http://docs.djangoproject.com/en/dev/',
'http://docs.djangoproject.com/en/dev/_objects/'),
'pypa': ('https://pypa.io/en/latest/', None),
'pip': ('https://pip.pypa.io/en/latest/', None),
'pypug': ('https://packaging.python.org/en/latest/', None),
}
|
|
import io
import unittest
from unittest.mock import Mock, patch, PropertyMock
from src.gene import Gene
# Some mock functions
def no_start_no_stop(self, item):
return False
def start_stop(self, item):
return True
def start_no_stop(self, item):
if item == 'start_codon':
return True
return False
def no_start_stop(self, item):
if item == 'stop_codon':
return True
return False
###################
class TestGene(unittest.TestCase):
def setUp(self):
self.gene1 = Gene()
self.mrna1 = Mock()
self.cds1 = Mock()
self.exon1 = Mock()
self.gene1.get_mrna = Mock(return_value=self.mrna1)
self.mrna1.get_cds = Mock(return_value=self.cds1)
self.mrna1.get_exon = Mock(return_value=self.exon1)
self.gene1.start = 1
self.gene1.end = 100
self.gene1.attributes = {'ID':'foo_gene'}
self.mrna1.start = 1
self.mrna1.end = 100
self.mrna1.attributes = {'ID':'m.foo'}
self.cds1.start = 1
self.cds1.end = 100
self.cds1.phase = 0
self.exon1.start = 1
self.exon1.end = 100
def test_from_gff_feature_success(self):
gff_gene = Mock()
gff_gene.type = "gene"
tran_gene = Gene.from_gff_feature(gff_gene)
self.assertTrue(tran_gene)
def test_from_gff_features_fails(self):
gff_gene = Mock()
gff_gene.type = "asdf"
tran_gene = Gene.from_gff_feature(gff_gene)
self.assertFalse(tran_gene)
def test_get_cds_length(self):
gene = Gene()
mrna = Mock()
cds = Mock()
cds.length = Mock(return_value=42)
mrna.get_cds = Mock(return_value=cds)
gene.children = {'mrna':[mrna]}
self.assertEquals(gene.get_cds_length(), 42)
def test_remove_contig_from_gene_id(self):
expected = 'g.123'
self.gene1.attributes['ID'] = 'contig123|g.123'
self.gene1.remove_contig_from_gene_id()
self.assertEquals(expected, self.gene1.attributes['ID'])
def test_gene_to_tbl_nostart_nostop(self):
expected = \
"<1\t>100\tgene\n"\
"\t\t\tlocus_tag\tfoo_gene\n"\
"<1\t>100\tCDS\n"\
"\t\t\tprotein_id\tm.foo\n"\
"\t\t\tproduct\thypothetical protein\n"
self.mrna1.__contains__ = no_start_no_stop
tbl = self.gene1.to_tbl()
self.assertEquals(tbl, expected)
def test_gene_to_tbl_start_nostop(self):
expected = \
"1\t>100\tgene\n"\
"\t\t\tlocus_tag\tfoo_gene\n"\
"1\t>100\tCDS\n"\
"\t\t\tprotein_id\tm.foo\n"\
"\t\t\tproduct\thypothetical protein\n"
self.mrna1.__contains__ = start_no_stop
tbl = self.gene1.to_tbl()
self.assertEquals(tbl, expected)
def test_gene_to_tbl_nostart_stop(self):
expected = \
"<1\t100\tgene\n"\
"\t\t\tlocus_tag\tfoo_gene\n"\
"<1\t100\tCDS\n"\
"\t\t\tprotein_id\tm.foo\n"\
"\t\t\tproduct\thypothetical protein\n"
self.mrna1.__contains__ = no_start_stop
tbl = self.gene1.to_tbl()
self.assertEquals(tbl, expected)
def test_gene_to_tbl_start_stop(self):
expected = \
"1\t100\tgene\n"\
"\t\t\tlocus_tag\tfoo_gene\n"\
"1\t100\tCDS\n"\
"\t\t\tprotein_id\tm.foo\n"\
"\t\t\tproduct\thypothetical protein\n"
self.mrna1.__contains__ = start_stop
tbl = self.gene1.to_tbl()
self.assertEquals(tbl, expected)
def test_gene_to_tbl_genename(self):
expected = \
"<1\t>100\tgene\n"\
"\t\t\tgene\tf00x4\n"\
"\t\t\tlocus_tag\tfoo_gene\n"\
"<1\t>100\tCDS\n"\
"\t\t\tprotein_id\tm.foo\n"\
"\t\t\tproduct\thypothetical protein\n"
self.gene1.attributes["Name"] = "f00x4"
self.mrna1.__contains__ = no_start_no_stop
tbl = self.gene1.to_tbl()
self.assertEquals(tbl, expected)
def test_gene_to_tbl_dbxref(self):
expected = \
"<1\t>100\tgene\n"\
"\t\t\tlocus_tag\tfoo_gene\n"\
"<1\t>100\tCDS\n"\
"\t\t\tprotein_id\tm.foo\n"\
"\t\t\tdb_xref\tPfam:foo\n"\
"\t\t\tdb_xref\tPfam:dog\n"\
"\t\t\tdb_xref\tPfam:baz\n"\
"\t\t\tproduct\thypothetical protein\n"\
self.gene1.get_mrna().attributes["Dbxref"] = "Pfam:foo,Pfam:dog,Pfam:baz"
self.mrna1.__contains__ = no_start_no_stop
tbl = self.gene1.to_tbl()
self.assertEquals(tbl, expected)
### FIX PHASE TESTS ###
def test_fix_phase(self):
self.gene1.start = 2
self.mrna1.start = 2
self.mrna1.__contains__ = no_start_stop
self.cds1.start = 2
self.cds1.phase = 0
self.assertEqual(self.cds1.phase, 0)
self.gene1.fix_phase("ATGC")
self.assertEqual(self.cds1.phase, 1)
def test_fix_phase_to_two(self):
self.gene1.start = 3
self.mrna1.start = 3
self.mrna1.__contains__ = no_start_stop
self.cds1.start = 3
self.cds1.phase = 0
self.assertEqual(self.cds1.phase, 0)
self.gene1.fix_phase("ATGC")
self.assertEqual(self.cds1.phase, 2)
def test_fix_phase_does_nothing_when_indices_too_large(self):
self.gene1.start = 4
self.mrna1.start = 4
self.mrna1.__contains__ = start_stop
self.cds1.start = 4
self.cds1.phase = 0
self.assertEqual(self.cds1.phase, 0)
self.gene1.fix_phase("ATGC")
self.assertEqual(self.cds1.phase, 0)
def test_fix_phase_works_on_cds_only(self):
self.gene1.start = 1
self.mrna1.start = 1
self.mrna1.__contains__ = no_start_stop
self.cds1.start = 3
self.cds1.phase = 0
self.assertEqual(self.cds1.phase, 0)
self.assertEqual(self.cds1.start, 3)
self.gene1.fix_phase("ATGC")
self.assertEqual(self.cds1.phase, 2)
self.assertEqual(self.cds1.start, 1)
def test_fix_phase_does_nothing_when_not_partial(self):
self.gene1.start = 2
self.mrna1.start = 2
self.mrna1.__contains__ = start_stop
self.cds1.start = 2
self.cds1.phase = 0
self.assertEqual(self.cds1.phase, 0)
self.gene1.fix_phase("ATGC")
self.assertEqual(self.cds1.phase, 0)
def test_fix_phase_adjusts_end_on_3prime_partial(self):
self.gene1.start = 2
self.mrna1.start = 2
self.mrna1.__contains__ = start_no_stop
self.cds1.start = 2
self.gene1.end = 2
self.mrna1.end = 2
self.cds1.end = 2
self.assertEqual(self.cds1.end, 2)
self.gene1.fix_phase("ATGC")
self.assertEqual(self.cds1.end, 4)
#### MAKE POSITIVE TESTS ####
def test_make_positive(self):
seq_len = 8
gene = Gene(start=1, end=7, strand='-')
mrna = Mock()
mrna.type = 'mrna'
mrna.make_positive = Mock()
gene.add_child(mrna)
gene.make_positive(seq_len)
self.assertEqual(gene.start, 2)
self.assertEqual(gene.end, 8)
self.assertEqual(gene.strand, '+')
mrna.make_positive.assertCalledWith(seq_len)
#### MATCH CDS AND EXON END TESTS ####
def test_match_cds_and_exon_end(self):
gene = Gene()
mrna = Mock()
gene.children = {'mrna':[mrna]}
gene.match_cds_and_exon_end()
mrna.match_cds_and_exon_end.assertCalled()
#### STARTS AND STOPS TESTS ####
def test_create_starts_and_stops(self):
gene = Gene()
mrna = Mock()
gene.children = {'mrna':[mrna]}
gene.create_starts_and_stops('ATGC')
mrna.create_starts_and_stops.assertCalledWith('ATGC')
###################
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestGene))
return suite
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python3
"""
This is an utility script that allows to generate EOS S3 IOMUX configuration
either from data in JSON format or from the given EBLIF netlist plus PCF
constraints of the FPGA design.
"""
import argparse
import csv
import json
import re
import sys
from lib.parse_pcf import parse_simple_pcf
from eblif import parse_blif
# =============================================================================
# Known IOB types and VPR cells that can be placed at their sites
IOB_TYPES = {
"CLOCK": ["PB-CLOCK", ],
"BIDIR": ["PB-BIDIR", ],
"SDIOMUX": ["PB-SDIOMUX", ],
}
# Default configuration of the IOMUX pad
PAD_DEFAULT = {
"func_sel": 0,
"ctrl_sel": 0,
"mode": "none",
"pull": "none",
"drive": 2,
"slew": "slow",
"schmitt": 0
}
# Base address of the FBIO_SEL registers
FBIOSEL_BASE = 0x40004D80
# Base address of the IOMUX registers
IOMUX_BASE = 0x40004C00
# =============================================================================
def generate_iomux_register_content(config):
"""
Generates a content of IOMUX registers according to the given config.
"""
iomux_regs = {}
# Generate content of the IOMUX_PAD_O_CTRL register for each pad
for pad, pad_cfg in config["pads"].items():
pad = int(pad)
reg = 0
# Patch default settings with settings read from the config file
pad_cfg = dict(PAD_DEFAULT, **pad_cfg)
func_sel = pad_cfg["func_sel"]
assert func_sel in [0, 1], func_sel
reg |= func_sel
ctrl_sel = pad_cfg["ctrl_sel"]
assert ctrl_sel in ["A0", "others", "fabric"], ctrl_sel
if ctrl_sel == "A0":
reg |= (0 << 3)
elif ctrl_sel == "others":
reg |= (1 << 3)
elif ctrl_sel == "fabric":
reg |= (2 << 3)
mode = pad_cfg["mode"]
assert mode in ["none", "input", "output", "inout"], mode
if mode == "none":
oen = 0
ren = 0
elif mode == "input":
oen = 0
ren = 1
elif mode == "output":
oen = 1
ren = 0
elif mode == "inout":
oen = 1
ren = 1
reg |= (oen << 5) | (ren << 11)
pull = pad_cfg["pull"]
assert pull in ["none", "up", "down", "keeper"], pull
if pull == "none":
reg |= (0 << 6)
elif pull == "up":
reg |= (1 << 6)
elif pull == "down":
reg |= (2 << 6)
elif pull == "keeper":
reg |= (3 << 6)
drive = pad_cfg["drive"]
assert drive in [2, 4, 8, 12], drive
if drive == 2:
reg |= (0 << 8)
elif drive == 4:
reg |= (1 << 8)
elif drive == 8:
reg |= (2 << 8)
elif drive == 12:
reg |= (3 << 8)
slew = pad_cfg["slew"]
assert slew in ["slow", "fast"], slew
if slew == "slow":
reg |= (0 << 10)
elif slew == "fast":
reg |= (1 << 10)
schmitt = pad_cfg["schmitt"]
assert schmitt in [0, 1], schmitt
reg |= (schmitt << 12)
# Register address
adr = IOMUX_BASE + pad * 4
# Store the value
iomux_regs[adr] = reg
# Generate content of FBIO_SEL_1 and FBIO_SEL_2
fbio_sel = {0: 0, 1: 0}
for pad in config["pads"].keys():
r = int(pad) // 32
b = int(pad) % 32
fbio_sel[r] |= (1 << b)
iomux_regs[FBIOSEL_BASE + 0x0] = fbio_sel[0]
iomux_regs[FBIOSEL_BASE + 0x4] = fbio_sel[1]
return iomux_regs
# =============================================================================
def main():
"""
Main
"""
# Parse arguments
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
"--json",
default=None,
type=str,
help="Read IOMUX configuration from the given JSON file"
)
parser.add_argument(
"--eblif",
default=None,
type=str,
help="EBLIF netlist file of a design"
)
parser.add_argument(
"--pcf",
default=None,
type=str,
help="PCF constraints file for a design"
)
parser.add_argument(
"--map",
"-m",
"-M",
type=argparse.FileType('r'),
required=True,
help='Pin map CSV file'
)
parser.add_argument(
"--output-format",
default=None,
type=str,
help='Output format of IOMUX commands (openocd/jlink)'
)
args = parser.parse_args()
# Read the requested configurtion from a JSON file
if args.json is not None:
if args.pcf is not None or args.eblif is not None:
print("Use either '--json' or '--pcf' + '--eblif' options!")
exit(-1)
with open(args.json, "r") as fp:
config = json.load(fp)
# Generate the config according to the EBLIF netlist and PCF constraints.
else:
if args.json is not None or (args.eblif is None or args.pcf is None):
print("Use either '--json' or '--pcf' + '--eblif' options!")
exit(-1)
pad_map = {}
pad_alias_map = {}
for pin_map_entry in csv.DictReader(args.map):
if pin_map_entry['type'] not in IOB_TYPES:
continue
name = pin_map_entry['name']
alias = ""
if 'alias' in pin_map_entry:
alias = pin_map_entry['alias']
pad_alias_map[alias] = name
pad_map[name] = alias
else:
pad_map[name] = name
# Read and parse PCF
with open(args.pcf, "r") as fp:
pcf = list(parse_simple_pcf(fp))
# Read and parse BLIF/EBLIF
with open(args.eblif, "r") as fp:
eblif = parse_blif(fp)
# Build the config
config = {"pads": {}}
eblif_inputs = eblif["inputs"]["args"]
eblif_outputs = eblif["outputs"]["args"]
for constraint in pcf:
pad_name = constraint.pad
if pad_name not in pad_map and pad_name not in pad_alias_map:
print(
"PCF constraint '{}' from line {} constraints pad {} "
"which is not in available pad map:\n{}".format(
constraint.line_str, constraint.line_num, pad_name,
'\n'.join(sorted(pad_map.keys()))
),
file=sys.stderr
)
sys.exit(1)
# get pad alias to get IO pad count
pad_alias = ""
if pad_name in pad_map:
pad_alias = pad_map[pad_name]
# Alias is specified in pcf file so assign it to corresponding pad name
if pad_name in pad_alias_map:
pad_alias = pad_name
pad = None
match = re.match(r"^IO_([0-9]+)$", pad_alias)
if match is not None:
pad = int(match.group(1))
# Pad not found or out of range
if pad is None or pad < 0 or pad >= 46:
continue
# Detect inouts:
is_inout_in = constraint.net + '_$inp' in eblif_inputs
is_inout_out = constraint.net + '_$out' in eblif_outputs
if is_inout_in and is_inout_out:
pad_config = {
"ctrl_sel": "fabric",
"mode": "inout",
}
elif constraint.net in eblif_inputs:
pad_config = {
"ctrl_sel": "fabric",
"mode": "input",
}
# Configure as output
elif constraint.net in eblif_outputs:
pad_config = {
"ctrl_sel": "fabric",
"mode": "output",
}
else:
assert False, (constraint.net, constraint.pad)
config["pads"][str(pad)] = pad_config
# Convert the config to IOMUX register content
iomux_regs = generate_iomux_register_content(config)
if args.output_format == "openocd":
# Output openOCD process
for adr in sorted(iomux_regs.keys()):
print(" mww 0x{:08x} 0x{:08x}".format(adr, iomux_regs[adr]))
elif args.output_format == "jlink":
# Output JLink commands
for adr in sorted(iomux_regs.keys()):
print("w4 0x{:08x} 0x{:08x}".format(adr, iomux_regs[adr]))
elif args.output_format == "binary":
# Output binary file: <REGADDR 4B><REGVAL 4B>...
for adr in sorted(iomux_regs.keys()):
# first the address
addr_bytes = int(adr).to_bytes(4, byteorder='little')
# output the address as raw bytes, bypass the print(), LE, 4B
sys.stdout.buffer.write(addr_bytes)
# second the value
val_bytes = int(iomux_regs[adr]).to_bytes(4, byteorder='little')
# output the value as raw bytes, bypass the print(), LE, 4B
sys.stdout.buffer.write(val_bytes)
else:
print("Use either 'openocd' or 'jlink' or 'binary' output format!")
exit(-1)
# =============================================================================
if __name__ == "__main__":
main()
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for cond_v2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import cond_v2
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import saver
from tensorflow.python.util import compat
class CondV2Test(test.TestCase):
def _testCond(self, true_fn, false_fn, train_vals, feed_dict=None):
if not feed_dict:
feed_dict = {}
with self.session(graph=ops.get_default_graph()) as sess:
pred = array_ops.placeholder(dtypes.bool, name="pred")
expected = control_flow_ops.cond(pred, true_fn, false_fn, name="expected")
actual = cond_v2.cond_v2(pred, true_fn, false_fn, name="actual")
expected_grad = gradients_impl.gradients(expected, train_vals)
actual_grad = gradients_impl.gradients(actual, train_vals)
sess_run_args = {pred: True}
sess_run_args.update(feed_dict)
expected_val, actual_val, expected_grad_val, actual_grad_val = sess.run(
(expected, actual, expected_grad, actual_grad), sess_run_args)
self.assertEqual(expected_val, actual_val)
self.assertEqual(expected_grad_val, actual_grad_val)
sess_run_args = {pred: False}
sess_run_args.update(feed_dict)
expected_val, actual_val, expected_grad_val, actual_grad_val = sess.run(
(expected, actual, expected_grad, actual_grad), sess_run_args)
self.assertEqual(expected_val, actual_val)
self.assertEqual(expected_grad_val, actual_grad_val)
@test_util.run_deprecated_v1
def testBasic(self):
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return x * 2.0
def false_fn():
return y * 3.0
self._testCond(true_fn, false_fn, [x])
self._testCond(true_fn, false_fn, [x, y])
self._testCond(true_fn, false_fn, [y])
def testExternalControlDependencies(self):
with ops.Graph().as_default(), self.test_session():
v = variables.Variable(1.0)
v.initializer.run()
op = v.assign_add(1.0)
def true_branch():
with ops.control_dependencies([op]):
return 1.0
cond_v2.cond_v2(array_ops.placeholder_with_default(False, None),
true_branch,
lambda: 2.0).eval()
self.assertAllEqual(self.evaluate(v), 2.0)
@test_util.run_deprecated_v1
def testMultipleOutputs(self):
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(3.0, name="y")
def true_fn():
return x * y, y
def false_fn():
return x, y * 3.0
self._testCond(true_fn, false_fn, [x])
self._testCond(true_fn, false_fn, [x, y])
self._testCond(true_fn, false_fn, [y])
@test_util.run_deprecated_v1
def testBasic2(self):
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return x * y * 2.0
def false_fn():
return 2.0
self._testCond(true_fn, false_fn, [x])
self._testCond(true_fn, false_fn, [x, y])
self._testCond(true_fn, false_fn, [y])
@test_util.run_deprecated_v1
def testNoInputs(self):
with self.cached_session() as sess:
pred = array_ops.placeholder(dtypes.bool, name="pred")
def true_fn():
return constant_op.constant(1.0)
def false_fn():
return constant_op.constant(2.0)
out = cond_v2.cond_v2(pred, true_fn, false_fn)
self.assertEqual(sess.run(out, {pred: True}), (1.0,))
self.assertEqual(sess.run(out, {pred: False}), (2.0,))
def _createCond(self, name):
"""Creates a cond_v2 call and returns the output tensor and the cond op."""
pred = constant_op.constant(True, name="pred")
x = constant_op.constant(1.0, name="x")
def true_fn():
return x
def false_fn():
return x + 1
output = cond_v2.cond_v2(pred, true_fn, false_fn, name=name)
cond_op = output.op.inputs[0].op
self.assertEqual(cond_op.type, "If")
return output, cond_op
def _createNestedCond(self, name):
"""Like _createCond but creates a nested cond_v2 call as well."""
pred = constant_op.constant(True, name="pred")
x = constant_op.constant(1.0, name="x")
def true_fn():
return cond_v2.cond_v2(pred, lambda: x, lambda: x + 1)
def false_fn():
return x + 2
output = cond_v2.cond_v2(pred, true_fn, false_fn, name=name)
cond_op = output.op.inputs[0].op
self.assertEqual(cond_op.type, "If")
return output, cond_op
def testDefaultName(self):
with ops.Graph().as_default():
_, cond_op = self._createCond(None)
self.assertEqual(cond_op.name, "cond")
self.assertRegexpMatches(
cond_op.get_attr("then_branch").name, r"cond_true_\d*")
self.assertRegexpMatches(
cond_op.get_attr("else_branch").name, r"cond_false_\d*")
with ops.Graph().as_default():
with ops.name_scope("foo"):
_, cond1_op = self._createCond("")
self.assertEqual(cond1_op.name, "foo/cond")
self.assertRegexpMatches(
cond1_op.get_attr("then_branch").name, r"foo_cond_true_\d*")
self.assertRegexpMatches(
cond1_op.get_attr("else_branch").name, r"foo_cond_false_\d*")
_, cond2_op = self._createCond(None)
self.assertEqual(cond2_op.name, "foo/cond_1")
self.assertRegexpMatches(
cond2_op.get_attr("then_branch").name, r"foo_cond_1_true_\d*")
self.assertRegexpMatches(
cond2_op.get_attr("else_branch").name, r"foo_cond_1_false_\d*")
@test_util.run_v1_only("b/120545219")
def testDefunInCond(self):
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
@function.defun
def fn():
return x * y * 2.0
return fn()
def false_fn():
return 2.0
self._testCond(true_fn, false_fn, [x])
self._testCond(true_fn, false_fn, [x, y])
self._testCond(true_fn, false_fn, [y])
@test_util.run_deprecated_v1
def testNestedDefunInCond(self):
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return 2.0
def false_fn():
@function.defun
def fn():
@function.defun
def nested_fn():
return x * y * 2.0
return nested_fn()
return fn()
self._testCond(true_fn, false_fn, [x])
self._testCond(true_fn, false_fn, [x, y])
self._testCond(true_fn, false_fn, [y])
@test_util.run_deprecated_v1
def testDoubleNestedDefunInCond(self):
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
@function.defun
def fn():
@function.defun
def nested_fn():
@function.defun
def nested_nested_fn():
return x * y * 2.0
return nested_nested_fn()
return nested_fn()
return fn()
def false_fn():
return 2.0
self._testCond(true_fn, false_fn, [x])
self._testCond(true_fn, false_fn, [x, y])
self._testCond(true_fn, false_fn, [y])
def testNestedCond(self):
def run_test(pred_value):
def build_graph():
pred = array_ops.placeholder(dtypes.bool, name="pred")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return 2.0
def false_fn():
def false_true_fn():
return x * y * 2.0
def false_false_fn():
return x * 5.0
return _cond(pred, false_true_fn, false_false_fn, "inside_false_fn")
return x, y, pred, true_fn, false_fn
with ops.Graph().as_default():
x, y, pred, true_fn, false_fn = build_graph()
self._testCond(true_fn, false_fn, [x, y], {pred: pred_value})
self._testCond(true_fn, false_fn, [x], {pred: pred_value})
self._testCond(true_fn, false_fn, [y], {pred: pred_value})
run_test(True)
run_test(False)
def testNestedCondBothBranches(self):
def run_test(pred_value):
def build_graph():
pred = array_ops.placeholder(dtypes.bool, name="pred")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return _cond(pred, lambda: x + y, lambda: x * x, name=None)
def false_fn():
return _cond(pred, lambda: x - y, lambda: y * y, name=None)
return x, y, pred, true_fn, false_fn
with ops.Graph().as_default():
x, y, pred, true_fn, false_fn = build_graph()
self._testCond(true_fn, false_fn, [x, y], {pred: pred_value})
self._testCond(true_fn, false_fn, [x], {pred: pred_value})
self._testCond(true_fn, false_fn, [y], {pred: pred_value})
run_test(True)
run_test(False)
def testDoubleNestedCond(self):
def run_test(pred1_value, pred2_value):
def build_graph():
pred1 = array_ops.placeholder(dtypes.bool, name="pred1")
pred2 = array_ops.placeholder(dtypes.bool, name="pred2")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return 2.0
def false_fn():
def false_true_fn():
def false_true_true_fn():
return x * y * 2.0
def false_true_false_fn():
return x * 10.0
return _cond(
pred1,
false_true_true_fn,
false_true_false_fn,
name="inside_false_true_fn")
def false_false_fn():
return x * 5.0
return _cond(
pred2, false_true_fn, false_false_fn, name="inside_false_fn")
return x, y, pred1, pred2, true_fn, false_fn
with ops.Graph().as_default():
x, y, pred1, pred2, true_fn, false_fn = build_graph()
self._testCond(true_fn, false_fn, [x, y], {
pred1: pred1_value,
pred2: pred2_value
})
x, y, pred1, pred2, true_fn, false_fn = build_graph()
self._testCond(true_fn, false_fn, [x], {
pred1: pred1_value,
pred2: pred2_value
})
x, y, pred1, pred2, true_fn, false_fn = build_graph()
self._testCond(true_fn, false_fn, [y], {
pred1: pred1_value,
pred2: pred2_value
})
run_test(True, True)
run_test(True, False)
run_test(False, False)
run_test(False, True)
def testGradientFromInsideDefun(self):
def build_graph():
pred_outer = array_ops.placeholder(dtypes.bool, name="pred_outer")
pred_inner = array_ops.placeholder(dtypes.bool, name="pred_inner")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return 2.0
def false_fn():
def inner_true_fn():
return x * y * 2.0
def inner_false_fn():
return x * 5.0
return cond_v2.cond_v2(
pred_inner, inner_true_fn, inner_false_fn, name="inner_cond")
cond_outer = cond_v2.cond_v2(
pred_outer, true_fn, false_fn, name="outer_cond")
# Compute grads inside a Defun.
@function.defun
def nesting_fn():
return gradients_impl.gradients(cond_outer, [x, y])
grads = nesting_fn()
return grads, pred_outer, pred_inner
with ops.Graph().as_default():
grads, pred_outer, pred_inner = build_graph()
with self.session(graph=ops.get_default_graph()) as sess:
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: True,
pred_inner: True
}), [0., 0.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: True,
pred_inner: False
}), [0., 0.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: False,
pred_inner: True
}), [4., 2.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: False,
pred_inner: False
}), [5., 0.])
def testGradientFromInsideNestedDefun(self):
def build_graph():
pred_outer = array_ops.placeholder(dtypes.bool, name="pred_outer")
pred_inner = array_ops.placeholder(dtypes.bool, name="pred_inner")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return 2.0
def false_fn():
def inner_true_fn():
return x * y * 2.0
def inner_false_fn():
return x * 5.0
return cond_v2.cond_v2(
pred_inner, inner_true_fn, inner_false_fn, name="inner_cond")
cond_outer = cond_v2.cond_v2(
pred_outer, true_fn, false_fn, name="outer_cond")
# Compute grads inside a Defun.
@function.defun
def nesting_fn():
@function.defun
def inner_nesting_fn():
return gradients_impl.gradients(cond_outer, [x, y])
return inner_nesting_fn()
grads = nesting_fn()
return grads, pred_outer, pred_inner
with ops.Graph().as_default():
grads, pred_outer, pred_inner = build_graph()
with self.session(graph=ops.get_default_graph()) as sess:
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: True,
pred_inner: True
}), [0., 0.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: True,
pred_inner: False
}), [0., 0.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: False,
pred_inner: True
}), [4., 2.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: False,
pred_inner: False
}), [5., 0.])
def testBuildCondAndGradientInsideDefun(self):
def build_graph():
pred_outer = array_ops.placeholder(dtypes.bool, name="pred_outer")
pred_inner = array_ops.placeholder(dtypes.bool, name="pred_inner")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
# Build cond and its gradient inside a Defun.
@function.defun
def fn():
def true_fn():
return 2.0
def false_fn():
def inner_true_fn():
return x * y * 2.0
def inner_false_fn():
return x * 5.0
return cond_v2.cond_v2(
pred_inner, inner_true_fn, inner_false_fn, name="inner_cond")
cond_outer = cond_v2.cond_v2(
pred_outer, true_fn, false_fn, name="outer_cond")
return gradients_impl.gradients(cond_outer, [x, y])
grads = fn()
return grads, pred_outer, pred_inner
with ops.Graph().as_default(), self.session(
graph=ops.get_default_graph()) as sess:
grads, pred_outer, pred_inner = build_graph()
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: True,
pred_inner: True
}), [0., 0.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: True,
pred_inner: False
}), [0., 0.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: False,
pred_inner: True
}), [4., 2.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: False,
pred_inner: False
}), [5., 0.])
@test_util.run_deprecated_v1
def testSecondDerivative(self):
with self.cached_session() as sess:
pred = array_ops.placeholder(dtypes.bool, name="pred")
x = constant_op.constant(3.0, name="x")
def true_fn():
return math_ops.pow(x, 3)
def false_fn():
return x
cond = cond_v2.cond_v2(pred, true_fn, false_fn, name="cond")
cond_grad = gradients_impl.gradients(cond, [x])
cond_grad_grad = gradients_impl.gradients(cond_grad, [x])
# d[x^3]/dx = 3x^2
true_val = sess.run(cond_grad, {pred: True})
self.assertEqual(true_val, [27.0])
# d[x]/dx = 1
false_val = sess.run(cond_grad, {pred: False})
self.assertEqual(false_val, [1.0])
true_val = sess.run(cond_grad_grad, {pred: True})
# d2[x^3]/dx2 = 6x
self.assertEqual(true_val, [18.0])
false_val = sess.run(cond_grad_grad, {pred: False})
# d2[x]/dx2 = 0
self.assertEqual(false_val, [0.0])
def testGradientOfDeserializedCond(self):
with ops.Graph().as_default():
pred = array_ops.placeholder(dtypes.bool, name="pred")
x = constant_op.constant(3.0, name="x")
ops.add_to_collection("x", x)
def true_fn():
return math_ops.pow(x, 3)
def false_fn():
return x
ops.add_to_collection("pred", pred)
cond = cond_v2.cond_v2(pred, true_fn, false_fn, name="cond")
ops.add_to_collection("cond", cond)
meta_graph = saver.export_meta_graph()
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
saver.import_meta_graph(meta_graph)
x = ops.get_collection("x")[0]
pred = ops.get_collection("pred")[0]
cond = ops.get_collection("cond")
cond_grad = gradients_impl.gradients(cond, [x], name="cond_grad")
cond_grad_grad = gradients_impl.gradients(
cond_grad, [x], name="cond_grad_grad")
# d[x^3]/dx = 3x^2
true_val = sess.run(cond_grad, {pred: True})
self.assertEqual(true_val, [27.0])
# d[x]/dx = 1
false_val = sess.run(cond_grad, {pred: False})
self.assertEqual(false_val, [1.0])
true_val = sess.run(cond_grad_grad, {pred: True})
# d2[x^3]/dx2 = 6x
self.assertEqual(true_val, [18.0])
false_val = sess.run(cond_grad_grad, {pred: False})
# d2[x]/dx2 = 0
self.assertEqual(false_val, [0.0])
def testGradientTapeOfCondWithResourceVariableInFunction(self):
with context.eager_mode():
v = variables.Variable(2.)
@def_function.function
def fnWithCond(): # pylint: disable=invalid-name
with backprop.GradientTape() as tape:
pred = constant_op.constant(True, dtype=dtypes.bool)
def true_fn():
return math_ops.pow(v, 3)
def false_fn():
return v
cond = cond_v2.cond_v2(pred, true_fn, false_fn, name="cond")
return tape.gradient(cond, v)
self.assertAllEqual(fnWithCond(), 12.0)
def testLowering(self):
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cond_output, _ = self._createCond("cond")
run_options = config_pb2.RunOptions(output_partition_graphs=True)
run_metadata = config_pb2.RunMetadata()
sess.run(cond_output, options=run_options, run_metadata=run_metadata)
# If lowering was enabled, there should be a `Switch` node
switch_found = any(
any(node.op == "Switch" for node in graph.node)
for graph in run_metadata.partition_graphs
)
self.assertTrue(switch_found,
"A `Switch` op should exist if the graph was lowered.")
# If lowering was enabled, there should be no `If` node
if_found = any(
any(node.op == "If" for node in graph.node)
for graph in run_metadata.partition_graphs
)
self.assertFalse(if_found,
"An `If` op was found, but it should be lowered.")
@test_util.run_deprecated_v1
def testLoweringDisabledInXLA(self):
with self.session(graph=ops.Graph()) as sess:
# Build the cond_v2 in an XLA context
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
cond_output, cond_op = self._createCond("cond")
xla_context.Exit()
# Check lowering attr is not set.
with self.assertRaises(ValueError):
cond_op.get_attr("_lower_using_switch_merge")
# Check the actual graph that is run.
run_options = config_pb2.RunOptions(output_partition_graphs=True)
run_metadata = config_pb2.RunMetadata()
sess.run(cond_output, options=run_options, run_metadata=run_metadata)
# Lowering disabled in XLA, there should be no `Switch` node
switch_found = any(
any(node.op == "Switch" for node in graph.node)
for graph in run_metadata.partition_graphs
)
self.assertFalse(
switch_found,
"A `Switch` op exists, but the graph should not be lowered.")
# Lowering disabled in XLA, there should still be an `If` node
if_found = any(
any(node.op == "If" for node in graph.node)
for graph in run_metadata.partition_graphs
)
self.assertTrue(
if_found,
"An `If` op was not found, but the graph should not be lowered.")
@test_util.run_deprecated_v1
def testNestedLoweringDisabledInXLA(self):
# Build the cond_v2 in an XLA context
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
_, cond_op = self._createNestedCond("cond")
xla_context.Exit()
# Check lowering attr is not set for either If node.
with self.assertRaises(ValueError):
cond_op.get_attr("_lower_using_switch_merge")
nested_if_ops = []
for func in ops.get_default_graph()._functions.values():
nested_if_ops.extend(op for op in func.graph.get_operations()
if op.type == "If")
self.assertEqual(len(nested_if_ops), 1)
with self.assertRaises(ValueError):
nested_if_ops[0].get_attr("_lower_using_switch_merge")
# TODO(skyewm): check the actual graphs that are run once we have a way to
# programmatically access those graphs.
@test_util.run_deprecated_v1
def testLoweringDisabledWithSingleThreadedExecutorContext(self):
with self.session(graph=ops.Graph()) as sess:
@function.defun
def _add_cond(x):
return cond_v2.cond_v2(
constant_op.constant(True, name="pred"),
lambda: x,
lambda: x + 1)
x = array_ops.placeholder(shape=None, dtype=dtypes.float32)
with context.function_executor_type("SINGLE_THREADED_EXECUTOR"):
out_cond = _add_cond(x)
# The fact that sess.run() succeeds means lowering is disabled, because
# the single threaded executor does not support cond v1 ops.
sess.run(out_cond, feed_dict={x: 1.0})
@test_util.enable_control_flow_v2
def testStructuredOutputs(self):
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(3.0, name="y")
def true_fn():
return ((x * y,), y)
def false_fn():
return ((x,), y * 3.0)
output = control_flow_ops.cond(
constant_op.constant(False), true_fn, false_fn)
self.assertEqual(self.evaluate(output[0][0]), 1.)
self.assertEqual(self.evaluate(output[1]), 9.)
@test_util.enable_control_flow_v2
@test_util.run_deprecated_v1
def testRaisesOutputStructuresMismatch(self):
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(3.0, name="y")
def true_fn():
return x * y, y
def false_fn():
return ((x,), y * 3.0)
with self.assertRaisesRegexp(
TypeError, "true_fn and false_fn arguments to tf.cond must have the "
"same number, type, and overall structure of return values."):
control_flow_ops.cond(constant_op.constant(False), true_fn, false_fn)
@test_util.enable_control_flow_v2
def testCondAndTensorArray(self):
x = math_ops.range(-5, 5)
output = tensor_array_ops.TensorArray(dtype=dtypes.int32, size=x.shape[0])
def loop_body(i, output):
def if_true():
return output.write(i, x[i]**2)
def if_false():
return output.write(i, x[i])
output = control_flow_ops.cond(x[i] > 0, if_true, if_false)
return i + 1, output
_, output = control_flow_ops.while_loop(
lambda i, arr: i < x.shape[0],
loop_body,
loop_vars=(constant_op.constant(0), output))
output_t = output.stack()
self.assertAllEqual(
self.evaluate(output_t), [-5, -4, -3, -2, -1, 0, 1, 4, 9, 16])
@test_util.enable_control_flow_v2
def testCondAndTensorArrayInDefun(self):
@function.defun
def f():
x = math_ops.range(-5, 5)
output = tensor_array_ops.TensorArray(dtype=dtypes.int32, size=x.shape[0])
def loop_body(i, output):
def if_true():
return output.write(i, x[i]**2)
def if_false():
return output.write(i, x[i])
output = control_flow_ops.cond(x[i] > 0, if_true, if_false)
return i + 1, output
_, output = control_flow_ops.while_loop(
lambda i, arr: i < x.shape[0],
loop_body,
loop_vars=(constant_op.constant(0), output))
return output.stack()
output_t = f()
self.assertAllEqual(
self.evaluate(output_t), [-5, -4, -3, -2, -1, 0, 1, 4, 9, 16])
@test_util.run_deprecated_v1
def testForwardPassRewrite(self):
x = constant_op.constant(1.0, name="x")
output = cond_v2.cond_v2(constant_op.constant(True),
lambda: x * 2.0,
lambda: x)
if_op = output.op.inputs[0].op
self.assertEqual(if_op.type, "If")
# pylint: disable=g-deprecated-assert
self.assertEqual(len(if_op.outputs), 1)
gradients_impl.gradients(output, x)
# if_op should have been rewritten to output 2.0 intermediate.
self.assertEqual(len(if_op.outputs), 2)
gradients_impl.gradients(output, x)
# Computing the gradient again shouldn't rewrite if_op again.
self.assertEqual(len(if_op.outputs), 2)
# pylint: enable=g-deprecated-assert
class CondV2CollectionTest(test.TestCase):
def testCollectionIntValueAccessInCond(self):
"""Read values from graph collections inside of cond_v2."""
with ops.Graph().as_default() as g:
with self.session(graph=g):
x = 2
y = 5
ops.add_to_collection("x", x)
ops.add_to_collection("y", y)
def fn():
x_const = constant_op.constant(ops.get_collection("x")[0])
y_const = constant_op.constant(ops.get_collection("y")[0])
return math_ops.add(x_const, y_const)
cnd = cond_v2.cond_v2(constant_op.constant(True), fn, fn)
self.assertEquals(cnd.eval(), 7)
def testCollectionTensorValueAccessInCond(self):
"""Read tensors from collections inside of cond_v2 & use them."""
with ops.Graph().as_default() as g:
with self.session(graph=g):
x = constant_op.constant(2)
y = constant_op.constant(5)
ops.add_to_collection("x", x)
ops.add_to_collection("y", y)
def fn():
x_read = ops.get_collection("x")[0]
y_read = ops.get_collection("y")[0]
return math_ops.add(x_read, y_read)
cnd = cond_v2.cond_v2(math_ops.less(x, y), fn, fn)
self.assertEquals(cnd.eval(), 7)
def testCollectionIntValueWriteInCond(self):
"""Make sure Int writes to collections work inside of cond_v2."""
with ops.Graph().as_default() as g:
with self.session(graph=g):
x = constant_op.constant(2)
y = constant_op.constant(5)
def true_fn():
z = math_ops.add(x, y)
ops.add_to_collection("z", 7)
return math_ops.mul(x, z)
def false_fn():
z = math_ops.add(x, y)
return math_ops.mul(x, z)
cnd = cond_v2.cond_v2(constant_op.constant(True), true_fn, false_fn)
self.assertEquals(cnd.eval(), 14)
read_z_collection = ops.get_collection("z")
self.assertEquals(read_z_collection, [7])
class CondV2ContainerTest(test.TestCase):
def testContainer(self):
"""Set containers outside & inside of cond_v2.
Make sure the containers are set correctly for both variable creation
(tested by variables.Variable) and for stateful ops (tested by FIFOQueue)
"""
self.skipTest("b/113048653")
with ops.Graph().as_default() as g:
with self.session(graph=g):
v0 = variables.Variable([0])
q0 = data_flow_ops.FIFOQueue(1, dtypes.float32)
def container(node):
return node.op.get_attr("container")
self.assertEqual(compat.as_bytes(""), container(v0))
self.assertEqual(compat.as_bytes(""), container(q0.queue_ref))
def true_fn():
# When this branch is created in cond below,
# the container should begin with 'l1'
v1 = variables.Variable([1])
q1 = data_flow_ops.FIFOQueue(1, dtypes.float32)
with ops.container("l2t"):
v2 = variables.Variable([2])
q2 = data_flow_ops.FIFOQueue(1, dtypes.float32)
v3 = variables.Variable([1])
q3 = data_flow_ops.FIFOQueue(1, dtypes.float32)
self.assertEqual(compat.as_bytes("l1"), container(v1))
self.assertEqual(compat.as_bytes("l1"), container(q1.queue_ref))
self.assertEqual(compat.as_bytes("l2t"), container(v2))
self.assertEqual(compat.as_bytes("l2t"), container(q2.queue_ref))
self.assertEqual(compat.as_bytes("l1"), container(v3))
self.assertEqual(compat.as_bytes("l1"), container(q3.queue_ref))
return constant_op.constant(2.0)
def false_fn():
# When this branch is created in cond below,
# the container should begin with 'l1'
v1 = variables.Variable([1])
q1 = data_flow_ops.FIFOQueue(1, dtypes.float32)
with ops.container("l2f"):
v2 = variables.Variable([2])
q2 = data_flow_ops.FIFOQueue(1, dtypes.float32)
v3 = variables.Variable([1])
q3 = data_flow_ops.FIFOQueue(1, dtypes.float32)
self.assertEqual(compat.as_bytes("l1"), container(v1))
self.assertEqual(compat.as_bytes("l1"), container(q1.queue_ref))
self.assertEqual(compat.as_bytes("l2f"), container(v2))
self.assertEqual(compat.as_bytes("l2f"), container(q2.queue_ref))
self.assertEqual(compat.as_bytes("l1"), container(v3))
self.assertEqual(compat.as_bytes("l1"), container(q3.queue_ref))
return constant_op.constant(6.0)
with ops.container("l1"):
cnd_true = cond_v2.cond_v2(
constant_op.constant(True), true_fn, false_fn)
self.assertEquals(cnd_true.eval(), 2)
cnd_false = cond_v2.cond_v2(
constant_op.constant(False), true_fn, false_fn)
self.assertEquals(cnd_false.eval(), 6)
v4 = variables.Variable([3])
q4 = data_flow_ops.FIFOQueue(1, dtypes.float32)
v5 = variables.Variable([4])
q5 = data_flow_ops.FIFOQueue(1, dtypes.float32)
self.assertEqual(compat.as_bytes("l1"), container(v4))
self.assertEqual(compat.as_bytes("l1"), container(q4.queue_ref))
self.assertEqual(compat.as_bytes(""), container(v5))
self.assertEqual(compat.as_bytes(""), container(q5.queue_ref))
class CondV2ColocationGroupAndDeviceTest(test.TestCase):
def testColocateWithBeforeCond(self):
with ops.Graph().as_default() as g:
with self.session(graph=g):
a = constant_op.constant([2.0], name="a")
b = constant_op.constant([2.0], name="b")
def fn():
c = constant_op.constant(3.0)
self.assertEqual([b"loc:@a"], c.op.colocation_groups())
return c
with ops.colocate_with(a.op):
self.assertEquals(
cond_v2.cond_v2(constant_op.constant(True), fn, fn).eval(), 3)
def fn2():
c = constant_op.constant(3.0)
self.assertEqual([b"loc:@a", b"loc:@b"], c.op.colocation_groups())
return c
with ops.colocate_with(a.op):
with ops.colocate_with(b.op):
self.assertEquals(
cond_v2.cond_v2(constant_op.constant(True), fn2, fn2).eval(), 3)
def testColocateWithInAndOutOfCond(self):
with ops.Graph().as_default() as g:
with self.session(graph=g):
a = constant_op.constant([2.0], name="a")
b = constant_op.constant([2.0], name="b")
def fn2():
with ops.colocate_with(b.op):
c = constant_op.constant(3.0)
self.assertEqual([b"loc:@a", b"loc:@b"], c.op.colocation_groups())
return c
with ops.colocate_with(a.op):
self.assertEquals(
cond_v2.cond_v2(constant_op.constant(True), fn2, fn2).eval(), 3)
d = constant_op.constant([2.0], name="d")
self.assertEqual([b"loc:@a"], d.op.colocation_groups())
def testColocateWithInCondGraphPartitioning(self):
with ops.Graph().as_default() as g:
with self.session(
graph=g,
config=config_pb2.ConfigProto(device_count={"CPU": 2})
) as sess:
with ops.device("/device:CPU:0"):
a = constant_op.constant([2.0], name="a")
with ops.device("/device:CPU:1"):
b = constant_op.constant([2.0], name="b")
def fn():
with ops.colocate_with(b.op):
c = math_ops.add(a, a, name="c")
return c
out_cond_2 = cond_v2.cond_v2(constant_op.constant(True), fn, fn)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
run_metadata = config_pb2.RunMetadata()
sess.run(out_cond_2, options=run_options, run_metadata=run_metadata)
# We expect there to be two partitions because of the
# colocate_with. We are only running the cond, which has a data
# dependency on `a` but not on `b`. So, without the colocate_with
# we would expect execution on just one device.
self.assertTrue(len(run_metadata.partition_graphs) >= 2)
def testDeviceBeforeCond(self):
with ops.Graph().as_default() as g:
with self.session(graph=g):
def fn():
self.assertEqual("", constant_op.constant(3.0).op.device)
return test_ops.device_placement_op()
with ops.device("/device:CPU:0"):
self.assertIn(
compat.as_bytes("CPU:0"),
self.evaluate(cond_v2.cond_v2(constant_op.constant(True),
fn, fn)))
def fn2():
self.assertEqual("", constant_op.constant(3.0).op.device)
return test_ops.device_placement_op()
if test_util.is_gpu_available():
with ops.device("/device:GPU:0"):
self.assertIn(
compat.as_bytes("GPU:0"),
self.evaluate(cond_v2.cond_v2(constant_op.constant(True),
fn2, fn2)))
else:
self.skipTest("Test requires a GPU to check GPU device placement.")
def testDeviceInAndOutOfCond(self):
with ops.Graph().as_default() as g:
with self.session(
graph=g, config=config_pb2.ConfigProto(device_count={"CPU": 2})):
def fn2():
with ops.device("/device:CPU:1"):
c = constant_op.constant(3.0)
self.assertEqual("/device:CPU:1", c.op.device)
return c
with ops.device("/device:CPU:0"):
self.assertEquals(
cond_v2.cond_v2(constant_op.constant(True), fn2, fn2).eval(), 3)
d = constant_op.constant(4.0)
self.assertEqual("/device:CPU:0", d.op.device)
def testDeviceInCondGraphPartitioning(self):
with ops.Graph().as_default() as g:
with self.session(
graph=g,
config=config_pb2.ConfigProto(device_count={"CPU": 2})
) as sess:
def fn():
with ops.device("/device:CPU:1"):
c = math_ops.add(a, a, name="c")
return c
with ops.device("/device:CPU:0"):
a = constant_op.constant([2.0], name="a")
out_cond_2 = cond_v2.cond_v2(constant_op.constant(True), fn, fn)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
run_metadata = config_pb2.RunMetadata()
sess.run(out_cond_2, options=run_options, run_metadata=run_metadata)
self.assertTrue(len(run_metadata.partition_graphs) >= 2)
def _cond(pred, true_fn, false_fn, name):
if _is_old_cond():
return control_flow_ops.cond(pred, true_fn, false_fn, name=name)
else:
return cond_v2.cond_v2(pred, true_fn, false_fn, name=name)
def _is_old_cond():
return isinstance(ops.get_default_graph()._get_control_flow_context(),
control_flow_ops.CondContext)
if __name__ == "__main__":
test.main()
|
|
#!/usr/bin/env python
import unittest
from pentai.base.board import *
import pentai.base.mock as mock_m
from pentai.ai.priority_filter_2 import *
from pentai.ai.utility_filter import *
class UtilityFilterTest(unittest.TestCase):
def setUp(self):
self.ab_game = mock_m.Mock()
self.ab_game.mockAddReturnValues(utility=1.0)
self.ab_state = mock_m.Mock()
self.ab_state.mockAddReturnValues(create_state=self.ab_state)
self.ab_state.mockAddReturnValues(is_our_turn=True)
self.pf2 = PriorityFilter2()
self.uf = UtilityFilter(self.pf2, 10)
self.uf.set_game(self.ab_game)
def get_iter(self, colour):
return list(self.uf.get_iter(colour, self.ab_state))
def arc(self, colour, length, candidate_list, inc=1):
self.uf.add_or_remove_candidates(colour, length, candidate_list, inc)
def set_captured_by(self, colour, captured):
self.uf.captured[colour] = captured
def ar_take(self, *args, **kwargs):
self.uf.add_or_remove_take(*args, **kwargs)
def ar_threat(self, *args, **kwargs):
self.uf.add_or_remove_threat(*args, **kwargs)
def test_dont_start_in_the_middle_13(self):
l = self.get_iter(P1)
self.assertEquals(len(l), 0)
def test_add_and_remove(self):
self.arc(P1, 4, ((3,4),))
self.arc(P1, 4, ((3,4),), -1)
self.arc(P1, 3, ((3,2),))
l = self.get_iter(P1)
self.assertEquals(len(l), 1)
self.assertEquals(l[0],(3,2))
def test_iterate_over_our_four(self):
self.arc(P1, 4, ((3,4),))
self.arc(P1, 3, ((3,2),))
l = self.get_iter(P1)
self.assertEquals(len(l), 1)
self.assertEquals(l[0],(3,4))
def test_iterate_over_one_of_their_fours(self):
self.arc(P2, 4, ((3,4),))
self.ar_take(P1, (3,2))
self.set_captured_by(P1, 6)
l = self.get_iter(P1)
self.assertEquals(len(l), 2)
self.assertEquals(l[0],(3,4))
self.assertEquals(l[1],(3,2))
def test_two_of_their_fours_try_the_take(self):
self.arc(P2, 4, ((1,2),))
self.arc(P2, 4, ((3,4),))
self.ar_take(P1, (3,2))
l = self.get_iter(P1)
self.assertEquals(len(l), 1)
self.assertEquals(l[0],(3,2))
def test_two_of_their_fours_no_take(self):
#st()
self.arc(P2, 4, ((1,2),))
self.arc(P2, 4, ((3,4),))
l = self.get_iter(P1)
self.assertEquals(len(l), 1)
# It doesn't matter which one we choose, we're lost
# Evaluating this node should give the result
# But we need to choose one or the other
def test_finish_capture_win(self):
self.set_captured_by(P1, 8)
self.ar_take(P1, (1,2))
self.arc(P2, 4, ((3,4),))
l = self.get_iter(P1)
self.assertEquals(len(l), 1)
self.assertEquals(l[0],(1,2))
def test_block_or_take_to_defend_capture_loss(self):
self.set_captured_by(P2, 8)
self.ar_take(P1, (1,2))
self.ar_take(P2, (3,4))
l = self.get_iter(P1)
self.assertEquals(len(l), 2)
def test_iterate_over_own_black_first(self):
self.arc(P2, 4, ((1,5),))
self.arc(P1, 4, ((3,4),))
l = self.get_iter(P1)
self.assertEquals(len(l), 1)
self.assertEquals(l[0],(3,4))
def test_iterate_over_higher_priority_only(self):
self.arc(P2, 3, ((1,5),))
self.arc(P2, 4, ((3,4),))
l = self.get_iter(P1)
self.assertEquals(len(l), 1)
self.assertEquals(l[0],(3,4))
def test_iterate_over_capture(self):
self.uf.add_or_remove_take(P1, (3,4))
l = self.get_iter(P1)
self.assertEquals(len(l), 1)
self.assertEquals(l[0],(3,4))
def test_iterate_over_own_capture_first(self):
self.uf.add_or_remove_take(P1, (1,2))
self.uf.add_or_remove_take(P2, (3,4))
l = self.get_iter(P1)
self.assertEquals(len(l), 2)
self.assertEquals(l[0],(3,4))
self.assertEquals(l[1],(1,2))
def atest_iterate_over_other_players_four_before_our_capture(self):
self.uf.add_or_remove_take(P2, (7,2))
self.arc(P1, 4, ((3,4),))
l = self.get_iter(P1)
self.assertEquals(len(l), 2)
self.assertEquals(l[0],(3,4))
self.assertEquals(l[1],(7,2))
def test_iterate_over_other_players_capture_before_our_threes(self):
self.arc(P1, 3, ((3,4),(1,5)))
self.uf.add_or_remove_take(P2, (7,2))
l = self.get_iter(P1)
self.assertEquals(len(l), 3)
self.assertEquals(l[0],(7,2))
our_threes = ((3,4),(1,5))
self.assertIn(l[1], our_threes)
self.assertIn(l[2], our_threes)
def test_iterate_block_only(self):
self.arc(P2, 3, ((1,5),(2,4)))
self.uf.add_or_remove_take(P1, (1,5))
self.arc(P1, 4, ((2,4),))
l = self.get_iter(P1)
self.assertEquals(len(l), 1)
self.assertEquals(l[0],(2,4))
def test_iterate_over_capture(self):
self.uf.add_or_remove_take(P1, (1,5))
l = self.get_iter(P1)
self.assertEquals(len(l), 1)
self.assertEquals(l[0],(1,5))
def atest_iterate_over_their_capture_before_our_two(self):
self.arc(P1, 2, ((2,4),(4,6),(5,7)))
self.uf.add_or_remove_take(P2, (1,5))
l = self.get_iter(P1)
self.assertEquals(len(l), 4)
self.assertEquals(l[0],(1,5))
twos = (2,4),(4,6),(5,7)
self.assertIn(l[1], twos)
self.assertIn(l[2], twos)
self.assertIn(l[3], twos)
def test_iterate_over_their_three_before_our_threat(self):
self.arc(P1, 3, ((2,4),(4,6),))
self.uf.add_or_remove_threat(P2, (1,5))
l = self.get_iter(P1)
self.assertEquals(len(l), 3)
threes = (2,4),(4,6)
self.assertIn(l[0], threes)
self.assertIn(l[1], threes)
self.assertEquals(l[2],(1,5))
def test_add_and_remove_length_candidate(self):
self.arc(P1, 3, ((2,4),(4,6),), inc=1)
self.uf.add_or_remove_threat(P1, (1,5))
self.arc(P1, 3, ((2,4),(4,6),), inc=-1)
l = self.get_iter(P1)
self.assertEquals(len(l), 1)
self.assertEquals(l[0],(1,5))
def test_add_and_remove_capture_candidate(self):
self.uf.add_or_remove_take(P1, (1,5), inc=1)
self.uf.add_or_remove_take(P1, (1,5), inc=-1)
l = self.get_iter(P1)
self.assertEquals(len(l), 0)
def test_add_and_remove_threat_candidate(self):
self.uf.add_or_remove_threat(P1, (1,5), inc=1)
self.uf.add_or_remove_threat(P1, (1,5), inc=-1)
l = self.get_iter(P1)
self.assertEquals(len(l), 0)
def test_add_and_remove_length_candidate_from_diff_directions(self):
self.arc(P1, 3, ((2,4),(4,6),), inc=1)
self.arc(P1, 3, ((2,4),(3,3),), inc=1)
self.arc(P1, 3, ((2,4),(4,6),), inc=-1)
l = self.get_iter(P1)
self.assertEquals(len(l), 2)
pair = ((2,4),(3,3),)
self.assertIn(l[0], pair)
self.assertIn(l[1], pair)
def atest_multiple_entries_searched_first(self):
self.arc(P1, 3, ((2,4),(4,6),), inc=1)
self.arc(P1, 3, ((2,4),(3,3),), inc=1)
l = self.get_iter(P1)
self.assertEquals(len(l), 3)
self.assertEquals(l[0],(2,4))
others = ((4,6), (3,3))
self.assertIn(l[1], others)
self.assertIn(l[2], others)
def test_copy_is_deep(self):
self.arc(P1, 3, ((2,4),(3,3),), inc=1)
self.arc(P1, 4, ((3,3),), inc=1)
bsc = self.uf.copy()
bsc.add_or_remove_candidates(P1, 4, [(3,3)], inc=-1)
# Modifying the descendant should not have affected the parent
l = self.get_iter(P1)
self.assertEquals(l[0],(3,3))
def atest_multiple_entries_searched_first2(self):
self.arc(P1, 3, ((4,6),(5,6),), inc=1)
self.arc(P1, 3, ((9,6),(10,6),), inc=1)
self.arc(P1, 3, ((5,6),(9,6),), inc=1)
self.arc(P2, 2, ((7,8),(8,8),(10,8)), inc=1)
self.arc(P2, 2, ((8,8),(10,8),(12,8)), inc=1)
self.arc(P2, 2, ((10,8),(12,8),(13,8)), inc=1)
l = self.get_iter(P1)
self.assertEquals(len(l), 4)
first_pair = ((5,6), (9,6))
self.assertIn(l[0], first_pair)
self.assertIn(l[1], first_pair)
def test_pointless_positions_ignored_gracefully(self):
self.arc(P1, 4, ((4,6),), inc=1)
self.arc(P1, 4, ((5,7),), inc=1)
self.arc(P1, 4, ((4,6),), inc=-1)
l = self.get_iter(P1)
self.assertEquals(len(l), 1)
self.assertEquals(l[0],(5,7))
if __name__ == "__main__":
unittest.main()
|
|
"""
"Make fixture" command.
Highly useful for making test fixtures. Use it to pick only few items
from your data to serialize, restricted by primary keys. By default
command also serializes foreign keys and m2m relations. You can turn
off related items serialization with --skip-related option.
How to use:
python manage.py makefixture
will display what models are installed
python manage.py makefixture User[:3]
or
python manage.py makefixture auth.User[:3]
or
python manage.py makefixture django.contrib.auth.User[:3]
will serialize users with ids 1 and 2, with assigned groups, permissions
and content types.
python manage.py makefixture YourModel[3] YourModel[6:10]
will serialize YourModel with key 3 and keys 6 to 9 inclusively.
Of course, you can serialize whole tables, and also different tables at
once, and use options of dumpdata:
python manage.py makefixture --format=xml --indent=4 YourModel[3] AnotherModel auth.User[:5] auth.Group
"""
# From http://www.djangosnippets.org/snippets/918/
#save into anyapp/management/commands/makefixture.py
#or back into django/core/management/commands/makefixture.py
#v0.1 -- current version
#known issues:
#no support for generic relations
#no support for one-to-one relations
from optparse import make_option
from django.core import serializers
from django.core.management.base import BaseCommand
from django.core.management.base import CommandError
from django.core.management.base import LabelCommand
from django.db.models.fields.related import ForeignKey
from django.db.models.fields.related import ManyToManyField
from django.db.models.loading import get_models
DEBUG = False
def model_name(m):
module = m.__module__.split('.')[:-1] # remove .models
return ".".join(module + [m._meta.object_name])
class Command(LabelCommand):
help = 'Output the contents of the database as a fixture of the given format.'
args = 'modelname[pk] or modelname[id1:id2] repeated one or more times'
option_list = BaseCommand.option_list + (
make_option('--skip-related', default=True, action='store_false', dest='propagate',
help='Specifies if we shall not add related objects.'),
make_option('--reverse', default=[], action='append', dest='reverse',
help="Reverse relations to follow (e.g. 'Job.task_set')."),
make_option('--format', default='json', dest='format',
help='Specifies the output serialization format for fixtures.'),
make_option('--indent', default=None, dest='indent', type='int',
help='Specifies the indent level to use when pretty-printing output'),
)
def handle_reverse(self, **options):
follow_reverse = options.get('reverse', [])
to_reverse = {}
for arg in follow_reverse:
try:
model_name, related_set_name = arg.rsplit(".", 1)
except:
raise CommandError("Bad fieldname on '--reverse %s'" % arg)
model = self.get_model_from_name(model_name)
try:
getattr(model, related_set_name)
except AttributeError:
raise CommandError("Field '%s' does not exist on model '%s'" % (
related_set_name, model_name))
to_reverse.setdefault(model, []).append(related_set_name)
return to_reverse
def handle_models(self, models, **options):
format = options.get('format','json')
indent = options.get('indent',None)
show_traceback = options.get('traceback', False)
propagate = options.get('propagate', True)
follow_reverse = self.handle_reverse(**options)
# Check that the serialization format exists; this is a shortcut to
# avoid collating all the objects and _then_ failing.
if format not in serializers.get_public_serializer_formats():
raise CommandError("Unknown serialization format: %s" % format)
try:
serializers.get_serializer(format)
except KeyError:
raise CommandError("Unknown serialization format: %s" % format)
objects = []
for model, slice in models:
if isinstance(slice, basestring) and slice:
objects.extend(model._default_manager.filter(pk__exact=slice))
elif not slice or type(slice) is list:
items = model._default_manager.all()
if slice and slice[0]:
items = items.filter(pk__gte=slice[0])
if slice and slice[1]:
items = items.filter(pk__lt=slice[1])
items = items.order_by(model._meta.pk.attname)
objects.extend(items)
else:
raise CommandError("Wrong slice: %s" % slice)
all = objects
if propagate:
collected = set([(x.__class__, x.pk) for x in all])
while objects:
related = []
for x in objects:
if DEBUG:
print "Adding %s[%s]" % (model_name(x), x.pk)
# follow forward relation fields
for f in x.__class__._meta.fields + x.__class__._meta.many_to_many:
if isinstance(f, ForeignKey):
new = getattr(x, f.name) # instantiate object
if new and not (new.__class__, new.pk) in collected:
collected.add((new.__class__, new.pk))
related.append(new)
if isinstance(f, ManyToManyField):
for new in getattr(x, f.name).all():
if new and not (new.__class__, new.pk) in collected:
collected.add((new.__class__, new.pk))
related.append(new)
# follow reverse relations as requested
for reverse_field in follow_reverse.get(x.__class__, []):
mgr = getattr(x, reverse_field)
for new in mgr.all():
if new and not (new.__class__, new.pk) in collected:
collected.add((new.__class__, new.pk))
related.append(new)
objects = related
all.extend(objects)
try:
return serializers.serialize(format, all, indent=indent)
except Exception, e:
if show_traceback:
raise
raise CommandError("Unable to serialize database: %s" % e)
def get_models(self):
return [(m, model_name(m)) for m in get_models()]
def get_model_from_name(self, search):
"""Given a name of a model, return the model object associated with it
The name can be either fully specified or uniquely matching the
end of the model name. e.g.
django.contrib.auth.User
or
auth.User
raises CommandError if model can't be found or uniquely determined
"""
models = [model for model, name in self.get_models()
if name.endswith('.'+name) or name == search]
if not models:
raise CommandError("Unknown model: %s" % search)
if len(models)>1:
raise CommandError("Ambiguous model name: %s" % search)
return models[0]
def handle_label(self, labels, **options):
parsed = []
for label in labels:
search, pks = label, ''
if '[' in label:
search, pks = label.split('[', 1)
slice = ''
if ':' in pks:
slice = pks.rstrip(']').split(':', 1)
elif pks:
slice = pks.rstrip(']')
model = self.get_model_from_name(search)
parsed.append((model, slice))
return self.handle_models(parsed, **options)
def list_models(self):
names = [name for _model, name in self.get_models()]
raise CommandError('Neither model name nor slice given. Installed model names: \n%s' % ",\n".join(names))
def handle(self, *labels, **options):
if not labels:
self.list_models()
output = []
label_output = self.handle_label(labels, **options)
if label_output:
output.append(label_output)
return '\n'.join(output)
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Functional operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_functional_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope as vs
# pylint: disable=unused-import
from tensorflow.python.ops.gen_functional_ops import remote_call
# pylint: enable=unused-import
from tensorflow.python.ops.gen_functional_ops import symbolic_gradient
from tensorflow.python.util import compat
from tensorflow.python.util import function_utils
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
# TODO(yuanbyu, mrry): Handle stride to support sliding windows.
@tf_export("foldl")
def foldl(fn,
elems,
initializer=None,
parallel_iterations=10,
back_prop=True,
swap_memory=False,
name=None):
"""foldl on the list of tensors unpacked from `elems` on dimension 0.
This foldl operator repeatedly applies the callable `fn` to a sequence
of elements from first to last. The elements are made of the tensors
unpacked from `elems` on dimension 0. The callable fn takes two tensors as
arguments. The first argument is the accumulated value computed from the
preceding invocation of fn. If `initializer` is None, `elems` must contain
at least one element, and its first element is used as the initializer.
Suppose that `elems` is unpacked into `values`, a list of tensors. The shape
of the result tensor is fn(initializer, values[0]).shape`.
This method also allows multi-arity `elems` and output of `fn`. If `elems`
is a (possibly nested) list or tuple of tensors, then each of these tensors
must have a matching first (unpack) dimension. The signature of `fn` may
match the structure of `elems`. That is, if `elems` is
`(t1, [t2, t3, [t4, t5]])`, then an appropriate signature for `fn` is:
`fn = lambda (t1, [t2, t3, [t4, t5]]):`.
Args:
fn: The callable to be performed.
elems: A tensor or (possibly nested) sequence of tensors, each of which will
be unpacked along their first dimension. The nested sequence of the
resulting slices will be the first argument to `fn`.
initializer: (optional) A tensor or (possibly nested) sequence of tensors,
as the initial value for the accumulator.
parallel_iterations: (optional) The number of iterations allowed to run in
parallel.
back_prop: (optional) True enables support for back propagation.
swap_memory: (optional) True enables GPU-CPU memory swapping.
name: (optional) Name prefix for the returned tensors.
Returns:
A tensor or (possibly nested) sequence of tensors, resulting from applying
`fn` consecutively to the list of tensors unpacked from `elems`, from first
to last.
Raises:
TypeError: if `fn` is not callable.
Example:
```python
elems = tf.constant([1, 2, 3, 4, 5, 6])
sum = foldl(lambda a, x: a + x, elems)
# sum == 21
```
"""
if not callable(fn):
raise TypeError("fn must be callable.")
def create_ta(elem):
return tensor_array_ops.TensorArray(
dtype=elem.dtype, size=n, dynamic_size=False,
infer_shape=True).unstack(elem)
in_graph_mode = not context.executing_eagerly()
with ops.name_scope(name, "foldl", [elems]):
# TODO(akshayka): Remove the in_graph_mode check once caching devices are
# supported in Eager
if in_graph_mode:
# Any get_variable calls in fn will cache the first call locally
# and not issue repeated network I/O requests for each iteration.
varscope = vs.get_variable_scope()
varscope_caching_device_was_none = False
if varscope.caching_device is None:
# TODO(ebrevdo): Change to using colocate_with here and in other
# methods.
varscope.set_caching_device(lambda op: op.device)
varscope_caching_device_was_none = True
# Convert elems to tensor array. n may be known statically.
elems_flat = [
ops.convert_to_tensor(elem, name="elem") for elem in nest.flatten(elems)
]
n = (
tensor_shape.dimension_value(elems_flat[0].shape[0]) or
array_ops.shape(elems_flat[0])[0])
elems_ta = nest.map_structure(create_ta, elems)
if initializer is None:
a = nest.map_structure(lambda elem: elem.read(0), elems_ta)
i = constant_op.constant(1)
else:
a = initializer
i = constant_op.constant(0)
def compute(i, a):
elem_i = nest.map_structure(lambda elem: elem.read(i), elems_ta)
a = fn(a, elem_i)
return [i + 1, a]
_, r_a = control_flow_ops.while_loop(
lambda i, a: i < n,
compute, [i, a],
parallel_iterations=parallel_iterations,
back_prop=back_prop,
swap_memory=swap_memory,
maximum_iterations=n)
# TODO(akshayka): Remove the in_graph_mode check once caching devices are
# supported in Eager
if in_graph_mode and varscope_caching_device_was_none:
varscope.set_caching_device(None)
return r_a
@tf_export("foldr")
def foldr(fn,
elems,
initializer=None,
parallel_iterations=10,
back_prop=True,
swap_memory=False,
name=None):
"""foldr on the list of tensors unpacked from `elems` on dimension 0.
This foldr operator repeatedly applies the callable `fn` to a sequence
of elements from last to first. The elements are made of the tensors
unpacked from `elems`. The callable fn takes two tensors as arguments.
The first argument is the accumulated value computed from the preceding
invocation of fn. If `initializer` is None, `elems` must contain at least
one element, and its first element is used as the initializer.
Suppose that `elems` is unpacked into `values`, a list of tensors. The shape
of the result tensor is `fn(initializer, values[0]).shape`.
This method also allows multi-arity `elems` and output of `fn`. If `elems`
is a (possibly nested) list or tuple of tensors, then each of these tensors
must have a matching first (unpack) dimension. The signature of `fn` may
match the structure of `elems`. That is, if `elems` is
`(t1, [t2, t3, [t4, t5]])`, then an appropriate signature for `fn` is:
`fn = lambda (t1, [t2, t3, [t4, t5]]):`.
Args:
fn: The callable to be performed.
elems: A tensor or (possibly nested) sequence of tensors, each of which will
be unpacked along their first dimension. The nested sequence of the
resulting slices will be the first argument to `fn`.
initializer: (optional) A tensor or (possibly nested) sequence of tensors,
as the initial value for the accumulator.
parallel_iterations: (optional) The number of iterations allowed to run in
parallel.
back_prop: (optional) True enables support for back propagation.
swap_memory: (optional) True enables GPU-CPU memory swapping.
name: (optional) Name prefix for the returned tensors.
Returns:
A tensor or (possibly nested) sequence of tensors, resulting from applying
`fn` consecutively to the list of tensors unpacked from `elems`, from last
to first.
Raises:
TypeError: if `fn` is not callable.
Example:
```python
elems = [1, 2, 3, 4, 5, 6]
sum = foldr(lambda a, x: a + x, elems)
# sum == 21
```
"""
if not callable(fn):
raise TypeError("fn must be callable.")
def create_ta(elem):
return tensor_array_ops.TensorArray(
dtype=elem.dtype, size=n, dynamic_size=False,
infer_shape=True).unstack(elem)
in_graph_mode = not context.executing_eagerly()
with ops.name_scope(name, "foldr", [elems]):
# TODO(akshayka): Remove the in_graph_mode check once caching devices are
# supported in Eager
if in_graph_mode:
# Any get_variable calls in fn will cache the first call locally and not
# issue repeated network I/O requests for each iteration.
varscope = vs.get_variable_scope()
varscope_caching_device_was_none = False
if varscope.caching_device is None:
# TODO(ebrevdo): Change to using colocate_with here and in other
# methods.
varscope.set_caching_device(lambda op: op.device)
varscope_caching_device_was_none = True
# Convert elems to tensor array. n may be known statically.
elems_flat = [
ops.convert_to_tensor(elem, name="elem") for elem in nest.flatten(elems)
]
n = (
tensor_shape.dimension_value(elems_flat[0].shape[0]) or
array_ops.shape(elems_flat[0])[0])
elems_ta = nest.map_structure(create_ta, elems)
if initializer is None:
i = n - 1
a = nest.map_structure(lambda elem: elem.read(i), elems_ta)
else:
i = n
a = initializer
def compute(i, a):
i -= 1
elem = nest.map_structure(lambda elem: elem.read(i), elems_ta)
a_out = fn(a, elem)
return [i, a_out]
_, r_a = control_flow_ops.while_loop(
lambda i, a: i > 0,
compute, [i, a],
parallel_iterations=parallel_iterations,
back_prop=back_prop,
swap_memory=swap_memory,
maximum_iterations=n)
# TODO(akshayka): Remove the in_graph_mode check once caching devices are
# supported in Eager
if in_graph_mode and varscope_caching_device_was_none:
varscope.set_caching_device(None)
return r_a
@tf_export("scan")
def scan(fn,
elems,
initializer=None,
parallel_iterations=10,
back_prop=True,
swap_memory=False,
infer_shape=True,
reverse=False,
name=None):
"""scan on the list of tensors unpacked from `elems` on dimension 0.
The simplest version of `scan` repeatedly applies the callable `fn` to a
sequence of elements from first to last. The elements are made of the tensors
unpacked from `elems` on dimension 0. The callable fn takes two tensors as
arguments. The first argument is the accumulated value computed from the
preceding invocation of fn. If `initializer` is None, `elems` must contain
at least one element, and its first element is used as the initializer.
Suppose that `elems` is unpacked into `values`, a list of tensors. The shape
of the result tensor is `[len(values)] + fn(initializer, values[0]).shape`.
If reverse=True, it's fn(initializer, values[-1]).shape.
This method also allows multi-arity `elems` and accumulator. If `elems`
is a (possibly nested) list or tuple of tensors, then each of these tensors
must have a matching first (unpack) dimension. The second argument of
`fn` must match the structure of `elems`.
If no `initializer` is provided, the output structure and dtypes of `fn`
are assumed to be the same as its input; and in this case, the first
argument of `fn` must match the structure of `elems`.
If an `initializer` is provided, then the output of `fn` must have the same
structure as `initializer`; and the first argument of `fn` must match
this structure.
For example, if `elems` is `(t1, [t2, t3])` and `initializer` is
`[i1, i2]` then an appropriate signature for `fn` in `python2` is:
`fn = lambda (acc_p1, acc_p2), (t1, [t2, t3]):` and `fn` must return a list,
`[acc_n1, acc_n2]`. An alternative correct signature for `fn`, and the
one that works in `python3`, is:
`fn = lambda a, t:`, where `a` and `t` correspond to the input tuples.
Args:
fn: The callable to be performed. It accepts two arguments. The first will
have the same structure as `initializer` if one is provided, otherwise it
will have the same structure as `elems`. The second will have the same
(possibly nested) structure as `elems`. Its output must have the same
structure as `initializer` if one is provided, otherwise it must have the
same structure as `elems`.
elems: A tensor or (possibly nested) sequence of tensors, each of which will
be unpacked along their first dimension. The nested sequence of the
resulting slices will be the first argument to `fn`.
initializer: (optional) A tensor or (possibly nested) sequence of tensors,
initial value for the accumulator, and the expected output type of `fn`.
parallel_iterations: (optional) The number of iterations allowed to run in
parallel.
back_prop: (optional) True enables support for back propagation.
swap_memory: (optional) True enables GPU-CPU memory swapping.
infer_shape: (optional) False disables tests for consistent output shapes.
reverse: (optional) True scans the tensor last to first (instead of first to
last).
name: (optional) Name prefix for the returned tensors.
Returns:
A tensor or (possibly nested) sequence of tensors. Each tensor packs the
results of applying `fn` to tensors unpacked from `elems` along the first
dimension, and the previous accumulator value(s), from first to last (or
last to first, if `reverse=True`).
Raises:
TypeError: if `fn` is not callable or the structure of the output of
`fn` and `initializer` do not match.
ValueError: if the lengths of the output of `fn` and `initializer`
do not match.
Examples:
```python
elems = np.array([1, 2, 3, 4, 5, 6])
sum = scan(lambda a, x: a + x, elems)
# sum == [1, 3, 6, 10, 15, 21]
sum = scan(lambda a, x: a + x, elems, reverse=True)
# sum == [21, 20, 18, 15, 11, 6]
```
```python
elems = np.array([1, 2, 3, 4, 5, 6])
initializer = np.array(0)
sum_one = scan(
lambda a, x: x[0] - x[1] + a, (elems + 1, elems), initializer)
# sum_one == [1, 2, 3, 4, 5, 6]
```
```python
elems = np.array([1, 0, 0, 0, 0, 0])
initializer = (np.array(0), np.array(1))
fibonaccis = scan(lambda a, _: (a[1], a[0] + a[1]), elems, initializer)
# fibonaccis == ([1, 1, 2, 3, 5, 8], [1, 2, 3, 5, 8, 13])
```
"""
if not callable(fn):
raise TypeError("fn must be callable.")
input_is_sequence = nest.is_sequence(elems)
input_flatten = lambda x: nest.flatten(x) if input_is_sequence else [x]
def input_pack(x):
return nest.pack_sequence_as(elems, x) if input_is_sequence else x[0]
if initializer is None:
output_is_sequence = input_is_sequence
output_flatten = input_flatten
output_pack = input_pack
else:
output_is_sequence = nest.is_sequence(initializer)
output_flatten = lambda x: nest.flatten(x) if output_is_sequence else [x]
def output_pack(x):
return (nest.pack_sequence_as(initializer, x)
if output_is_sequence else x[0])
elems_flat = input_flatten(elems)
in_graph_mode = not context.executing_eagerly()
with ops.name_scope(name, "scan", elems_flat):
# TODO(akshayka): Remove the in_graph_mode check once caching devices are
# supported in Eager
if in_graph_mode:
# Any get_variable calls in fn will cache the first call locally
# and not issue repeated network I/O requests for each iteration.
varscope = vs.get_variable_scope()
varscope_caching_device_was_none = False
if varscope.caching_device is None:
# TODO(ebrevdo): Change to using colocate_with here and in other
# methods.
varscope.set_caching_device(lambda op: op.device)
varscope_caching_device_was_none = True
# Convert elems to tensor array.
elems_flat = [
ops.convert_to_tensor(elem, name="elem") for elem in elems_flat
]
# Convert elems to tensor array. n may be known statically.
n = tensor_shape.dimension_value(elems_flat[0].shape[0])
if n is None:
n = array_ops.shape(elems_flat[0])[0]
# TensorArrays are always flat
elems_ta = [
tensor_array_ops.TensorArray(
dtype=elem.dtype,
size=n,
dynamic_size=False,
element_shape=elem.shape[1:],
infer_shape=True) for elem in elems_flat
]
# Unpack elements
elems_ta = [
elem_ta.unstack(elem) for elem_ta, elem in zip(elems_ta, elems_flat)
]
if initializer is None:
a_flat = [elem.read(n - 1 if reverse else 0) for elem in elems_ta]
i = 1
else:
initializer_flat = output_flatten(initializer)
a_flat = [ops.convert_to_tensor(init) for init in initializer_flat]
i = 0
# Create a tensor array to store the intermediate values.
accs_ta = [
tensor_array_ops.TensorArray(
dtype=init.dtype,
size=n,
element_shape=init.shape if infer_shape else None,
dynamic_size=False,
infer_shape=infer_shape) for init in a_flat
]
if initializer is None:
accs_ta = [
acc_ta.write(n - 1 if reverse else 0, a)
for (acc_ta, a) in zip(accs_ta, a_flat)
]
def compute(i, a_flat, tas):
"""The loop body of scan.
Args:
i: the loop counter.
a_flat: the accumulator value(s), flattened.
tas: the output accumulator TensorArray(s), flattened.
Returns:
[i + 1, a_flat, tas]: the updated counter + new accumulator values +
updated TensorArrays
Raises:
TypeError: if initializer and fn() output structure do not match
ValueType: if initializer and fn() output lengths do not match
"""
packed_elems = input_pack([elem_ta.read(i) for elem_ta in elems_ta])
packed_a = output_pack(a_flat)
a_out = fn(packed_a, packed_elems)
nest.assert_same_structure(elems if initializer is None else initializer,
a_out)
flat_a_out = output_flatten(a_out)
tas = [ta.write(i, value) for (ta, value) in zip(tas, flat_a_out)]
if reverse:
next_i = i - 1
else:
next_i = i + 1
return (next_i, flat_a_out, tas)
if reverse:
initial_i = n - 1 - i
condition = lambda i, _1, _2: i >= 0
else:
initial_i = i
condition = lambda i, _1, _2: i < n
_, _, r_a = control_flow_ops.while_loop(
condition,
compute, (initial_i, a_flat, accs_ta),
parallel_iterations=parallel_iterations,
back_prop=back_prop,
swap_memory=swap_memory,
maximum_iterations=n)
results_flat = [r.stack() for r in r_a]
n_static = tensor_shape.Dimension(
tensor_shape.dimension_value(
elems_flat[0].get_shape().with_rank_at_least(1)[0]))
for elem in elems_flat[1:]:
n_static.merge_with(
tensor_shape.Dimension(
tensor_shape.dimension_value(
elem.get_shape().with_rank_at_least(1)[0])))
for r in results_flat:
r.set_shape(
tensor_shape.TensorShape(n_static).concatenate(r.get_shape()[1:]))
# TODO(akshayka): Remove the in_graph_mode check once caching devices are
# supported in Eager
if in_graph_mode and varscope_caching_device_was_none:
varscope.set_caching_device(None)
return output_pack(results_flat)
# pylint: disable=invalid-name
def If(cond, inputs, then_branch, else_branch, name=None):
r"""output = Cond(inputs) ?
then_branch(inputs) : else_branch(inputs).
Args:
cond: A `Tensor`. A scalar. If the scalar is not a boolean, the scalar is
converted to a boolean according to the following rule: if the scalar is a
numerical value, non-zero means True and zero means False; if the scalar
is a string, non-empty means True and empty means False.
inputs: A list of input tensors.
then_branch: A function takes 'inputs' and returns a list of tensors, whose
types are the same as what else_branch returns.
else_branch: A function takes 'inputs' and returns a list of tensors. whose
types are the same as what then_branch returns.
name: A name for the operation (optional).
Returns:
A list of tensors returned by either then_branch(inputs)
or else_branch(inputs).
"""
# pylint: disable=protected-access
return gen_functional_ops._if(
cond,
inputs, [_.type for _ in then_branch.definition.signature.output_arg],
then_branch,
else_branch,
name=name)
def Gradient(inputs, f, name=None):
r"""Computes the gradient function for function f via backpropagation.
Args:
inputs: A list of tensors of size N + M.
f: The function we want to compute the gradient for. The function 'f' must
be a numerical function which takes N inputs and produces M outputs. Its
gradient function 'g', which is a function taking N + M inputs and
produces N outputs. I.e. if we have (y1, y2, ..., yM) = f(x1, x2, ...,
xN), then, g is (dL/dx1, dL/dx2, ..., dL/dxN) = g(x1, x2, ..., xN, dL/dy1,
dL/dy2, ..., dL/dyM), where L is a scalar-value function of (x1, x2, ...,
xN) (e.g., the loss function). dL/dxi is the partial derivative of L with
respect to xi.
name: A name for the operation (optional).
Returns:
A list of tensors of size N.
"""
# TODO(zhifengc): Pretty-print the above spec in latex.
# TODO(zhfiengc): Needs some math expert to say the comment above better.
tlist = [_.type for _ in f.definition.signature.input_arg]
return symbolic_gradient(input=inputs, Tout=tlist, f=f, name=name)
def _LoopBodyCaptureWrapper(func):
"""Returns a wrapper for `func` that handles loop-carried captured inputs."""
@function.Defun(
*func.declared_input_types, func_name="%s_Wrapper" % func.name)
def Wrapper(*args):
"""A wrapper that handles loop-carried captured inputs."""
result = func(*args)
extra_args = tuple(function.get_extra_args())
# Nullary functions return an Operation. Normal functions can't do this
# because their return values are converted to Tensors.
if isinstance(result, ops.Operation):
return extra_args
# Unary functions return a single Tensor value.
elif not isinstance(result, tuple):
return (result,) + extra_args
# N-ary functions return a tuple of Tensors.
else:
return result + extra_args
return Wrapper
# pylint: disable=invalid-name,protected-access
def While(input_, cond, body, name=None, hostmem=None):
r"""output = input; While (Cond(output)) { output = Body(output) }.
Args:
input_: A list of `Tensor` objects. A list of input tensors whose types are
T.
cond: . A function takes 'input' and returns a tensor. If the tensor is a
scalar of non-boolean, the scalar is converted to a boolean
according to the following rule: if the scalar is a numerical value,
non-zero means True and zero means False; if the scalar is a string,
non-empty means True and empty means False. If the tensor is not a
scalar, non-emptiness means True and False otherwise.
body: . A function takes a list of tensors and returns another list tensors.
Both lists have the same types as specified by T.
name: A name for the operation (optional).
hostmem: A list of integer. If i is in the list, input[i] is a host memory
tensor.
Raises:
ValueError: if `cond` has implicitly captured inputs or if `cond` and `body`
have different signatures.
Returns:
A list of `Tensor` objects. Has the same type as `input`.
A list of output tensors whose types are T.
"""
if cond.captured_inputs:
raise ValueError("While op 'cond' argument must be a function "
"without implicitly captured inputs.")
if cond.declared_input_types != body.declared_input_types:
raise ValueError(
"While op 'cond' and 'body' signatures do not match. %r vs %r" %
(cond.declared_input_types, body.declared_input_types))
if body.captured_inputs:
cond_dtypes = list(
body.declared_input_types) + [t.dtype for t in body.captured_inputs]
@function.Defun(*cond_dtypes, func_name="%s_Wrapper" % cond.name)
def CondWrapper(*args):
"""A wrapper that handles loop-carried captured inputs."""
return cond(*args[:len(body.declared_input_types)])
ret = gen_functional_ops._while(
input_ + body.captured_inputs,
CondWrapper,
_LoopBodyCaptureWrapper(body),
name=name)
# Slice off the loop-carried captured inputs.
ret = ret[:-len(body.captured_inputs)]
else:
ret = gen_functional_ops._while(input_, cond, body, name=name)
if hostmem:
input_attr = attr_value_pb2.AttrValue()
input_attr.list.i.extend(hostmem)
ret[0].op._set_attr("_input_hostmem", input_attr) # pylint: disable=protected-access
output_attr = attr_value_pb2.AttrValue()
output_attr.list.i.extend(hostmem)
ret[0].op._set_attr("_output_hostmem", output_attr) # pylint: disable=protected-access
return ret
# b/36459430
#
# Ideally, we do not need this rewrite For loop into a While loop.
# However, today, if a While runs on GPU and the condition returns a
# boolean, the While kernel crashes. Even if we fix the crash, the
# bool needs to be copied between GPU and CPU. So, a for loop is much
# preferred when running on GPU.
#
# On the other hand, For op has no directly XLA kernel. So, when we run
# a for loop, we need to rewrite it using a While op.
#
# It should be possible and probably better to write a XLA C++ kernel
# implementing the logic in _ForUsingWhile.
def _ForUsingWhile(start,
limit,
delta,
inputs,
forbody,
name=None,
hostmem=None):
"""Helper to implement a For loop using a While."""
# To support negative delta (e.g., range(100, 0, -3)), we iterate
# over the range(n) and use iter * delta + start as the real
# iteration index. (e.g., for i in range(34): iter = i * (-3) +
# 100).
d = math_ops.abs(delta)
# XLA on TPUs doesn't support integer division
n = math_ops.cast(
math_ops.cast((math_ops.abs(limit - start) + d - 1), dtypes.float32) /
math_ops.cast(d, dtypes.float32), dtypes.int32)
# Carried loop variables ("extra_args") are implicitly added to the input list
# of the WhileBody function. WhileCond does not call forbody, and so does not
# depend on any of forbody's extra_args. Since WhileCond and WhileBody
# must have identical inputs, we have to augment the cond signature to take
# the same types as the carried loop variables.
body_sig = [dtypes.int32] * 4 + list(forbody.declared_input_types)[1:]
cond_name = "%s_Cond" % forbody.name
@function.Defun(*body_sig, func_name=cond_name)
def WhileCond(i, n, *args):
del args
return i < n
body_name = "%s_Body" % forbody.name
@function.Defun(*body_sig, func_name=body_name)
def WhileBody(i, n, start, delta, *args):
"""A While wrapper for forbody that handles loop-carried captured inputs."""
for_result = forbody(start + i * delta, *args)
# Nullary functions return an Operation. Normal functions can't do this
# because their return values are converted to Tensors.
if isinstance(for_result, ops.Operation):
for_result = ()
# Unary functions return a single Tensor value.
elif isinstance(for_result, ops.Tensor):
for_result = (for_result,)
return (i + 1, n, start, delta) + tuple(for_result)
if hostmem is not None:
hostmem = [0, 1, 2, 3] + [(4 + _) for _ in hostmem]
else:
hostmem = [0, 1, 2, 3]
results = While(
input_=[0, n, start, delta] + inputs,
cond=WhileCond,
body=WhileBody,
name=name,
hostmem=hostmem)
# Slice off the loop-carried captured inputs.
return list(results[4:len(results)])
def For(start,
limit,
delta,
inputs,
body,
name=None,
hostmem=None,
rewrite_with_while=None):
r"""out = input; for i in range(start, limit, delta) out = body(i, out).
Args:
start: A `Tensor` of type `int32`.
limit: A `Tensor` of type `int32`.
delta: A `Tensor` of type `int32`.
inputs: A list of `Tensor` objects. A list of input tensors whose types are
T.
body: A function takes a list of tensors and returns another list of
tensors. Both lists have the same types as (int32, T...).
name: A name for the operation (optional).
hostmem: A list of integer. If i is in the list, inputs[i] is a host memory
tensor. In other words, (i+1)-th argument of the body function is
expecting a host memory.
rewrite_with_while: If True, using While op to implement the For.
Returns:
A list of `Tensor` objects. Has the same type as `input`.
A list of output tensors whose types are T.
"""
if rewrite_with_while:
return _ForUsingWhile(start, limit, delta, inputs, body, name, hostmem)
if body.captured_inputs:
ret = gen_functional_ops._for(
start,
limit,
delta,
inputs + body.captured_inputs,
_LoopBodyCaptureWrapper(body),
name=name)
# Slice off the loop-carried captured inputs.
ret = ret[:-len(body.captured_inputs)]
else:
ret = gen_functional_ops._for(start, limit, delta, inputs, body, name=name)
if hostmem:
num_for_params = 3 # start/limit/delta
input_attr = attr_value_pb2.AttrValue()
input_attr.list.i.extend([num_for_params + i for i in hostmem])
ret[0].op._set_attr("_input_hostmem", input_attr) # pylint: disable=protected-access
output_attr = attr_value_pb2.AttrValue()
output_attr.list.i.extend(hostmem)
ret[0].op._set_attr("_output_hostmem", output_attr) # pylint: disable=protected-access
return ret
# pylint: enable=invalid-name,protected-access
def partitioned_call(args,
f,
tout=None,
executing_eagerly=None,
config=None,
executor_type=None):
"""Executes a function while respecting device annotations.
Currently, only those functions that execute within the same address space
can be executed.
Args:
args: The arguments of the function, including captured inputs.
f: The function to execute; an instance of `_DefinedFunction` or
`_EagerDefinedFunction`.
tout: a list containing the output dtypes enums; if `None`, inferred from
the signature of `f`.
executing_eagerly: (Optional) A boolean indicating whether the context is
executing eagerly. If `None`, fetched from the global context.
config: (Optional) A `tensorflow::ConfigProto` proto, serialized. If `None`,
all optimizations are disabled. Currently only handled for eager defined
functions.
executor_type: (Optional) A string for the name of the executor to be used
in the function call. If not set, or set to an empty string, the default
tensorflow executor will be used.
Returns:
The list of `Tensor`s returned by invoking `f(args)`. If the function does
not return anything, then returns `None` if eager execution is enabled, or
the `Operation` if not.
"""
if tout is None:
tout = tuple(x.type for x in f.definition.signature.output_arg)
if executing_eagerly is None:
executing_eagerly = context.executing_eagerly()
if config is None:
config = function_utils.get_disabled_rewriter_config()
if executor_type is None:
executor_type = ""
if executing_eagerly or len(tout):
if f.stateful_ops:
outputs = gen_functional_ops.stateful_partitioned_call(
args=args,
Tout=tout,
f=f,
config_proto=config,
executor_type=executor_type)
else:
outputs = gen_functional_ops.partitioned_call(
args=args,
Tout=tout,
f=f,
config_proto=config,
executor_type=executor_type)
return outputs if outputs else None
# The generated binding returns an empty list for functions that don't
# return any Tensors, hence the need to use `create_op` directly.
args = [ops.internal_convert_to_tensor(x) for x in args]
tin_attr = attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(
type=[x.dtype.as_datatype_enum for x in args]))
tout_attr = attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(type=tout))
func_attr = attr_value_pb2.AttrValue(
func=attr_value_pb2.NameAttrList(name=f.name))
executor_type_attr = attr_value_pb2.AttrValue(
s=compat.as_bytes(executor_type))
# When running in graph mode, the graph and function graphs are optimized
# (i.e. run through grappler) per the session options, so we can disable any
# eager-specific rewriting.
config_proto = attr_value_pb2.AttrValue(
s=function_utils.get_disabled_rewriter_config())
graph = ops.get_default_graph()
f.add_to_graph(graph)
op_name = "StatefulPartitionedCall" if f.stateful_ops else "PartitionedCall"
op = graph.create_op(
op_name,
args,
tout,
name="PartitionedFunctionCall",
attrs={
"Tin": tin_attr,
"Tout": tout_attr,
"f": func_attr,
"config_proto": config_proto,
"executor_type": executor_type_attr,
})
outputs = op.outputs
return outputs if outputs else op
|
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
from twilio.rest.insights.v1.conference.conference_participant import ConferenceParticipantList
class ConferenceList(ListResource):
def __init__(self, version):
"""
Initialize the ConferenceList
:param Version version: Version that contains the resource
:returns: twilio.rest.insights.v1.conference.ConferenceList
:rtype: twilio.rest.insights.v1.conference.ConferenceList
"""
super(ConferenceList, self).__init__(version)
# Path Solution
self._solution = {}
self._uri = '/Conferences'.format(**self._solution)
def stream(self, conference_sid=values.unset, friendly_name=values.unset,
status=values.unset, created_after=values.unset,
created_before=values.unset, mixer_region=values.unset,
tags=values.unset, subaccount=values.unset,
detected_issues=values.unset, end_reason=values.unset, limit=None,
page_size=None):
"""
Streams ConferenceInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param unicode conference_sid: The SID of the conference.
:param unicode friendly_name: Custom label for the conference.
:param unicode status: Conference status.
:param unicode created_after: Conferences created after timestamp.
:param unicode created_before: Conferences created before timestamp.
:param unicode mixer_region: Region where the conference was mixed.
:param unicode tags: Tags applied by Twilio for common issues.
:param unicode subaccount: Account SID for the subaccount.
:param unicode detected_issues: Potential issues detected during the conference.
:param unicode end_reason: Conference end reason.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.insights.v1.conference.ConferenceInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(
conference_sid=conference_sid,
friendly_name=friendly_name,
status=status,
created_after=created_after,
created_before=created_before,
mixer_region=mixer_region,
tags=tags,
subaccount=subaccount,
detected_issues=detected_issues,
end_reason=end_reason,
page_size=limits['page_size'],
)
return self._version.stream(page, limits['limit'])
def list(self, conference_sid=values.unset, friendly_name=values.unset,
status=values.unset, created_after=values.unset,
created_before=values.unset, mixer_region=values.unset,
tags=values.unset, subaccount=values.unset,
detected_issues=values.unset, end_reason=values.unset, limit=None,
page_size=None):
"""
Lists ConferenceInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param unicode conference_sid: The SID of the conference.
:param unicode friendly_name: Custom label for the conference.
:param unicode status: Conference status.
:param unicode created_after: Conferences created after timestamp.
:param unicode created_before: Conferences created before timestamp.
:param unicode mixer_region: Region where the conference was mixed.
:param unicode tags: Tags applied by Twilio for common issues.
:param unicode subaccount: Account SID for the subaccount.
:param unicode detected_issues: Potential issues detected during the conference.
:param unicode end_reason: Conference end reason.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.insights.v1.conference.ConferenceInstance]
"""
return list(self.stream(
conference_sid=conference_sid,
friendly_name=friendly_name,
status=status,
created_after=created_after,
created_before=created_before,
mixer_region=mixer_region,
tags=tags,
subaccount=subaccount,
detected_issues=detected_issues,
end_reason=end_reason,
limit=limit,
page_size=page_size,
))
def page(self, conference_sid=values.unset, friendly_name=values.unset,
status=values.unset, created_after=values.unset,
created_before=values.unset, mixer_region=values.unset,
tags=values.unset, subaccount=values.unset,
detected_issues=values.unset, end_reason=values.unset,
page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of ConferenceInstance records from the API.
Request is executed immediately
:param unicode conference_sid: The SID of the conference.
:param unicode friendly_name: Custom label for the conference.
:param unicode status: Conference status.
:param unicode created_after: Conferences created after timestamp.
:param unicode created_before: Conferences created before timestamp.
:param unicode mixer_region: Region where the conference was mixed.
:param unicode tags: Tags applied by Twilio for common issues.
:param unicode subaccount: Account SID for the subaccount.
:param unicode detected_issues: Potential issues detected during the conference.
:param unicode end_reason: Conference end reason.
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of ConferenceInstance
:rtype: twilio.rest.insights.v1.conference.ConferencePage
"""
data = values.of({
'ConferenceSid': conference_sid,
'FriendlyName': friendly_name,
'Status': status,
'CreatedAfter': created_after,
'CreatedBefore': created_before,
'MixerRegion': mixer_region,
'Tags': tags,
'Subaccount': subaccount,
'DetectedIssues': detected_issues,
'EndReason': end_reason,
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(method='GET', uri=self._uri, params=data, )
return ConferencePage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of ConferenceInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of ConferenceInstance
:rtype: twilio.rest.insights.v1.conference.ConferencePage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return ConferencePage(self._version, response, self._solution)
def get(self, conference_sid):
"""
Constructs a ConferenceContext
:param conference_sid: Conference SID.
:returns: twilio.rest.insights.v1.conference.ConferenceContext
:rtype: twilio.rest.insights.v1.conference.ConferenceContext
"""
return ConferenceContext(self._version, conference_sid=conference_sid, )
def __call__(self, conference_sid):
"""
Constructs a ConferenceContext
:param conference_sid: Conference SID.
:returns: twilio.rest.insights.v1.conference.ConferenceContext
:rtype: twilio.rest.insights.v1.conference.ConferenceContext
"""
return ConferenceContext(self._version, conference_sid=conference_sid, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Insights.V1.ConferenceList>'
class ConferencePage(Page):
def __init__(self, version, response, solution):
"""
Initialize the ConferencePage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:returns: twilio.rest.insights.v1.conference.ConferencePage
:rtype: twilio.rest.insights.v1.conference.ConferencePage
"""
super(ConferencePage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of ConferenceInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.insights.v1.conference.ConferenceInstance
:rtype: twilio.rest.insights.v1.conference.ConferenceInstance
"""
return ConferenceInstance(self._version, payload, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Insights.V1.ConferencePage>'
class ConferenceContext(InstanceContext):
def __init__(self, version, conference_sid):
"""
Initialize the ConferenceContext
:param Version version: Version that contains the resource
:param conference_sid: Conference SID.
:returns: twilio.rest.insights.v1.conference.ConferenceContext
:rtype: twilio.rest.insights.v1.conference.ConferenceContext
"""
super(ConferenceContext, self).__init__(version)
# Path Solution
self._solution = {'conference_sid': conference_sid, }
self._uri = '/Conferences/{conference_sid}'.format(**self._solution)
# Dependents
self._conference_participants = None
def fetch(self):
"""
Fetch the ConferenceInstance
:returns: The fetched ConferenceInstance
:rtype: twilio.rest.insights.v1.conference.ConferenceInstance
"""
payload = self._version.fetch(method='GET', uri=self._uri, )
return ConferenceInstance(self._version, payload, conference_sid=self._solution['conference_sid'], )
@property
def conference_participants(self):
"""
Access the conference_participants
:returns: twilio.rest.insights.v1.conference.conference_participant.ConferenceParticipantList
:rtype: twilio.rest.insights.v1.conference.conference_participant.ConferenceParticipantList
"""
if self._conference_participants is None:
self._conference_participants = ConferenceParticipantList(
self._version,
conference_sid=self._solution['conference_sid'],
)
return self._conference_participants
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Insights.V1.ConferenceContext {}>'.format(context)
class ConferenceInstance(InstanceResource):
class ConferenceStatus(object):
IN_PROGRESS = "in_progress"
NOT_STARTED = "not_started"
COMPLETED = "completed"
SUMMARY_TIMEOUT = "summary_timeout"
class ConferenceEndReason(object):
LAST_PARTICIPANT_LEFT = "last_participant_left"
CONFERENCE_ENDED_VIA_API = "conference_ended_via_api"
PARTICIPANT_WITH_END_CONFERENCE_ON_EXIT_LEFT = "participant_with_end_conference_on_exit_left"
LAST_PARTICIPANT_KICKED = "last_participant_kicked"
PARTICIPANT_WITH_END_CONFERENCE_ON_EXIT_KICKED = "participant_with_end_conference_on_exit_kicked"
class Region(object):
US1 = "us1"
AU1 = "au1"
BR1 = "br1"
IE1 = "ie1"
JP1 = "jp1"
SG1 = "sg1"
DE1 = "de1"
class Tag(object):
INVALID_REQUESTED_REGION = "invalid_requested_region"
DUPLICATE_IDENTITY = "duplicate_identity"
START_FAILURE = "start_failure"
REGION_CONFIGURATION_ISSUES = "region_configuration_issues"
QUALITY_WARNINGS = "quality_warnings"
PARTICIPANT_BEHAVIOR_ISSUES = "participant_behavior_issues"
HIGH_PACKET_LOSS = "high_packet_loss"
HIGH_JITTER = "high_jitter"
HIGH_LATENCY = "high_latency"
LOW_MOS = "low_mos"
DETECTED_SILENCE = "detected_silence"
class ProcessingState(object):
COMPLETE = "complete"
IN_PROGRESS = "in_progress"
TIMEOUT = "timeout"
def __init__(self, version, payload, conference_sid=None):
"""
Initialize the ConferenceInstance
:returns: twilio.rest.insights.v1.conference.ConferenceInstance
:rtype: twilio.rest.insights.v1.conference.ConferenceInstance
"""
super(ConferenceInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'conference_sid': payload.get('conference_sid'),
'account_sid': payload.get('account_sid'),
'friendly_name': payload.get('friendly_name'),
'create_time': deserialize.iso8601_datetime(payload.get('create_time')),
'start_time': deserialize.iso8601_datetime(payload.get('start_time')),
'end_time': deserialize.iso8601_datetime(payload.get('end_time')),
'duration_seconds': deserialize.integer(payload.get('duration_seconds')),
'connect_duration_seconds': deserialize.integer(payload.get('connect_duration_seconds')),
'status': payload.get('status'),
'max_participants': deserialize.integer(payload.get('max_participants')),
'max_concurrent_participants': deserialize.integer(payload.get('max_concurrent_participants')),
'unique_participants': deserialize.integer(payload.get('unique_participants')),
'end_reason': payload.get('end_reason'),
'ended_by': payload.get('ended_by'),
'mixer_region': payload.get('mixer_region'),
'mixer_region_requested': payload.get('mixer_region_requested'),
'recording_enabled': payload.get('recording_enabled'),
'detected_issues': payload.get('detected_issues'),
'tags': payload.get('tags'),
'tag_info': payload.get('tag_info'),
'processing_state': payload.get('processing_state'),
'url': payload.get('url'),
'links': payload.get('links'),
}
# Context
self._context = None
self._solution = {'conference_sid': conference_sid or self._properties['conference_sid'], }
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: ConferenceContext for this ConferenceInstance
:rtype: twilio.rest.insights.v1.conference.ConferenceContext
"""
if self._context is None:
self._context = ConferenceContext(self._version, conference_sid=self._solution['conference_sid'], )
return self._context
@property
def conference_sid(self):
"""
:returns: Conference SID.
:rtype: unicode
"""
return self._properties['conference_sid']
@property
def account_sid(self):
"""
:returns: Account SID.
:rtype: unicode
"""
return self._properties['account_sid']
@property
def friendly_name(self):
"""
:returns: Custom label for the conference.
:rtype: unicode
"""
return self._properties['friendly_name']
@property
def create_time(self):
"""
:returns: Conference creation date/time.
:rtype: datetime
"""
return self._properties['create_time']
@property
def start_time(self):
"""
:returns: Timestamp in ISO 8601 format when the conference started.
:rtype: datetime
"""
return self._properties['start_time']
@property
def end_time(self):
"""
:returns: Conference end date/time.
:rtype: datetime
"""
return self._properties['end_time']
@property
def duration_seconds(self):
"""
:returns: Conference duration in seconds.
:rtype: unicode
"""
return self._properties['duration_seconds']
@property
def connect_duration_seconds(self):
"""
:returns: Duration of the conference in seconds.
:rtype: unicode
"""
return self._properties['connect_duration_seconds']
@property
def status(self):
"""
:returns: Status of conference
:rtype: ConferenceInstance.ConferenceStatus
"""
return self._properties['status']
@property
def max_participants(self):
"""
:returns: Max participants specified in config.
:rtype: unicode
"""
return self._properties['max_participants']
@property
def max_concurrent_participants(self):
"""
:returns: Actual maximum concurrent participants.
:rtype: unicode
"""
return self._properties['max_concurrent_participants']
@property
def unique_participants(self):
"""
:returns: Unique conference participants.
:rtype: unicode
"""
return self._properties['unique_participants']
@property
def end_reason(self):
"""
:returns: Conference end reason.
:rtype: ConferenceInstance.ConferenceEndReason
"""
return self._properties['end_reason']
@property
def ended_by(self):
"""
:returns: Call SID that ended the conference.
:rtype: unicode
"""
return self._properties['ended_by']
@property
def mixer_region(self):
"""
:returns: Region where the conference was mixed.
:rtype: ConferenceInstance.Region
"""
return self._properties['mixer_region']
@property
def mixer_region_requested(self):
"""
:returns: Configuration-requested conference mixer region.
:rtype: ConferenceInstance.Region
"""
return self._properties['mixer_region_requested']
@property
def recording_enabled(self):
"""
:returns: Boolean. Indicates whether recording was enabled.
:rtype: bool
"""
return self._properties['recording_enabled']
@property
def detected_issues(self):
"""
:returns: Potential issues detected during the conference.
:rtype: dict
"""
return self._properties['detected_issues']
@property
def tags(self):
"""
:returns: Tags for detected conference conditions and participant behaviors.
:rtype: list[ConferenceInstance.Tag]
"""
return self._properties['tags']
@property
def tag_info(self):
"""
:returns: Object. Contains details about conference tags.
:rtype: dict
"""
return self._properties['tag_info']
@property
def processing_state(self):
"""
:returns: Processing state for the Conference Summary resource.
:rtype: ConferenceInstance.ProcessingState
"""
return self._properties['processing_state']
@property
def url(self):
"""
:returns: The URL of this resource.
:rtype: unicode
"""
return self._properties['url']
@property
def links(self):
"""
:returns: Nested resource URLs.
:rtype: unicode
"""
return self._properties['links']
def fetch(self):
"""
Fetch the ConferenceInstance
:returns: The fetched ConferenceInstance
:rtype: twilio.rest.insights.v1.conference.ConferenceInstance
"""
return self._proxy.fetch()
@property
def conference_participants(self):
"""
Access the conference_participants
:returns: twilio.rest.insights.v1.conference.conference_participant.ConferenceParticipantList
:rtype: twilio.rest.insights.v1.conference.conference_participant.ConferenceParticipantList
"""
return self._proxy.conference_participants
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Insights.V1.ConferenceInstance {}>'.format(context)
|
|
'''Devices
===========
Defines some of the devices that are used in the experiment.
'''
__all__ = (
'FTDIOdorsBase', 'FTDIOdorsSim', 'FTDIOdors', 'DAQInDeviceBase',
'DAQInDeviceSim', 'DAQInDevice', 'DAQOutDeviceBase', 'DAQOutDeviceSim',
'DAQOutDevice')
from weakref import ref
from moa.device.digital import ButtonChannel, ButtonPort
from moa.device.analog import NumericPropertyChannel
from ffpyplayer.player import MediaPlayer
from kivy.properties import (
ConfigParserProperty, BooleanProperty, ListProperty, ObjectProperty,
NumericProperty)
from kivy.app import App
from kivy import resources
from cplcom.moa.device.ftdi import FTDISerializerDevice
from cplcom.moa.device.mcdaq import MCDAQDevice
class FTDIOdorsBase(object):
'''Base class for the FTDI odor device.
'''
def __init__(self, **kwargs):
n_valve_boards = kwargs.get('n_valve_boards', self.n_valve_boards)
# we don't know ahead of time how many valves, so we need to create
# the bool prop for each valve dynamically
for i in range(8 * n_valve_boards):
self.create_property('p{}'.format(i), value=False, allownone=True)
super(FTDIOdorsBase, self).__init__(direction='o', **kwargs)
n_valve_boards = NumericProperty(2)
'''The number of valve boards that are connected to the FTDI controller.
Each board can typically control 8 valves.
Defaults to 2.
'''
class FTDIOdorsSim(FTDIOdorsBase, ButtonPort):
'''Device used when simulating the odor device.
'''
pass
class FTDIOdors(FTDIOdorsBase, FTDISerializerDevice):
'''Device used when using the barst ftdi odor device.
'''
def __init__(self, **kwargs):
super(FTDIOdors, self).__init__(**kwargs)
self.dev_map = {'p{}'.format(i): i
for i in range(self.n_valve_boards * 8)}
class DAQInDeviceBase(object):
'''Base class for the Switch & Sense 8/8 input ports.
'''
nose_beam = BooleanProperty(False, allownone=True)
'''Reads / controls the nose port photobeam.
'''
reward_beam_r = BooleanProperty(False, allownone=True)
'''Reads / controls the right reward port photobeam.
'''
reward_beam_l = BooleanProperty(False, allownone=True)
'''Reads / controls the left reward port photobeam.
'''
class DAQInDeviceSim(DAQInDeviceBase, ButtonPort):
'''Device used when simulating the Switch & Sense 8/8 input device.
'''
pass
class DAQInDevice(DAQInDeviceBase, MCDAQDevice):
'''Device used when using the barst Switch & Sense 8/8 input device.
'''
__settings_attrs__ = (
'nose_beam_pin', 'reward_beam_r_pin', 'reward_beam_l_pin')
def __init__(self, **kwargs):
super(DAQInDevice, self).__init__(direction='i', **kwargs)
self.dev_map = {
'nose_beam': self.nose_beam_pin,
'reward_beam_r': self.reward_beam_r_pin,
'reward_beam_l': self.reward_beam_l_pin}
nose_beam_pin = NumericProperty(1)
'''The port in the Switch & Sense to which the nose port photobeam is
connected to.
Defaults to 1.
'''
reward_beam_r_pin = NumericProperty(3)
'''The port in the Switch & Sense to which the right reward port photobeam
is connected to.
Defaults to 3.
'''
reward_beam_l_pin = NumericProperty(2)
'''The port in the Switch & Sense to which the left reward port photobeam
is connected to.
Defaults to 2.
'''
class DAQOutDeviceBase(object):
'''Base class for the Switch & Sense 8/8 output ports.
'''
house_light = BooleanProperty(False, allownone=True)
'''Controls the house light.
'''
ir_leds = BooleanProperty(False, allownone=True)
'''Controls the IR light.
'''
fans = BooleanProperty(False, allownone=True)
'''Controls the fans.
'''
feeder_r = BooleanProperty(False, allownone=True)
'''Controls the right feeder.
'''
feeder_l = BooleanProperty(False, allownone=True)
'''Controls the left feeder.
'''
class DAQOutDeviceSim(DAQOutDeviceBase, ButtonPort):
'''Device used when simulating the Switch & Sense 8/8 output device.
'''
pass
class DAQOutDevice(DAQOutDeviceBase, MCDAQDevice):
'''Device used when using the barst Switch & Sense 8/8 output device.
'''
__settings_attrs__ = (
'house_light_pin', 'ir_leds_pin', 'fans_pin', 'feeder_r_pin',
'feeder_l_pin')
def __init__(self, **kwargs):
super(DAQOutDevice, self).__init__(direction='o', **kwargs)
self.dev_map = {'house_light': self.house_light_pin,
'ir_leds': self.ir_leds_pin,
'fans': self.fans_pin,
'feeder_r': self.feeder_r_pin,
'feeder_l': self.feeder_l_pin}
house_light_pin = NumericProperty(4)
'''The port in the Switch & Sense that controls the house light.
Defaults to 4.
'''
ir_leds_pin = NumericProperty(6)
'''The port in the Switch & Sense that controls the IR lights.
Defaults to 6.
'''
fans_pin = NumericProperty(5)
'''The port in the Switch & Sense that controls the fans.
Defaults to 5.
'''
feeder_r_pin = NumericProperty(2)
'''The port in the Switch & Sense that controls the right feeder.
Defaults to 2.
'''
feeder_l_pin = NumericProperty(0)
'''The port in the Switch & Sense that controls the left feeder.
Defaults to 0.
'''
|
|
from sympy.combinatorics.permutations import (Permutation, perm_af_parity,
perm_af_mul, perm_af_muln, cyclic)
from sympy.utilities.pytest import raises
def test_Permutation():
p = Permutation([2, 5, 1, 6, 3, 0, 4])
q = Permutation([[1], [0, 3, 5, 6, 2, 4]])
assert Permutation(p.cyclic_form).array_form == p.array_form
assert p.cardinality == 5040
assert q.cardinality == 5040
assert q.cycles == 2
assert q*p == Permutation([4, 6, 1, 2, 5, 3, 0])
assert p*q == Permutation([6, 5, 3, 0, 2, 4, 1])
assert perm_af_mul([2, 5, 1, 6, 3, 0, 4], [3, 1, 4, 5, 0, 6, 2]) == \
[6, 5, 3, 0, 2, 4, 1]
assert cyclic([(2,3,5)], 5) == [[1, 2, 4], [0], [3]]
assert (Permutation([[1,2,3],[0,4]])*Permutation([[1,2,4],[0],[3]])).cyclic_form == \
[[1, 3], [0, 4, 2]]
assert q.array_form == [3, 1, 4, 5, 0, 6, 2]
assert p.cyclic_form == [[3, 6, 4], [0, 2, 1, 5]]
assert p.transpositions() == [(3, 4), (3, 6), (0, 5), (0, 1), (0, 2)]
assert p**13 == p
assert q**2 == Permutation([5, 1, 0, 6, 3, 2, 4])
assert p+q == Permutation([5, 6, 3, 1, 2, 4, 0])
assert q+p == p+q
assert p-q == Permutation([6, 3, 5, 1, 2, 4, 0])
assert q-p == Permutation([1, 4, 2, 6, 5, 3, 0])
a = p-q
b = q-p
assert (a+b).is_Identity
assert p.conjugate(q) == Permutation([5, 3, 0, 4, 6, 2, 1])
assert p.conjugate(q) == ~q*p*q == p**q
assert q.conjugate(p) == Permutation([6, 3, 2, 0, 1, 4, 5])
assert q.conjugate(p) == ~p*q*p == q**p
assert p.commutator(q) == Permutation([1, 4, 5, 6, 3, 0, 2])
assert q.commutator(p) == Permutation([5, 0, 6, 4, 1, 2, 3])
assert p.commutator(q) == ~ q.commutator(p)
assert len(p.atoms()) == 7
assert q.atoms() == set([0, 1, 2, 3, 4, 5, 6])
assert p.inversion_vector() == [2, 4, 1, 3, 1, 0]
assert q.inversion_vector() == [3, 1, 2, 2, 0, 1]
assert Permutation.from_inversion_vector(p.inversion_vector()) == p
assert Permutation.from_inversion_vector(q.inversion_vector()).array_form\
== q.array_form
assert Permutation([i for i in range(500,-1,-1)]).inversions() == 125250
assert Permutation([0, 4, 1, 3, 2]).parity() == 0
assert Permutation([0, 1, 4, 3, 2]).parity() == 1
assert perm_af_parity([0, 4, 1, 3, 2]) == 0
assert perm_af_parity([0, 1, 4, 3, 2]) == 1
s = Permutation([0])
assert s.is_Singleton
r = Permutation([3, 2, 1, 0])
assert (r**2).is_Identity
assert (p*(~p)).is_Identity
assert (~p)**13 == Permutation([5, 2, 0, 4, 6, 1, 3])
assert ~(r**2).is_Identity
assert p.max() == 6
assert p.min() == 0
q = Permutation([[6], [5], [0, 1, 2, 3, 4]])
assert q.max() == 4
assert q.min() == 0
p = Permutation([1, 5, 2, 0, 3, 6, 4])
q = Permutation([[1, 2, 3, 5, 6], [0, 4]])
assert p.ascents() == [0, 3, 4]
assert q.ascents() == [1, 2, 4]
assert r.ascents() == []
assert p.descents() == [1, 2, 5]
assert q.descents() == [0, 3, 5]
assert Permutation(r.descents()).is_Identity
assert p.inversions() == 7
assert p.signature() == -1
assert q.inversions() == 11
assert q.signature() == -1
assert (p*(~p)).inversions() == 0
assert (p*(~p)).signature() == 1
assert p.order() == 6
assert q.order() == 10
assert (p**(p.order())).is_Identity
assert p.length() == 6
assert q.length() == 7
assert r.length() == 4
assert p.runs() == [[1, 5], [2], [0, 3, 6], [4]]
assert q.runs() == [[4], [2, 3, 5], [0, 6], [1]]
assert r.runs() == [[3], [2], [1], [0]]
assert p.index() == 8
assert q.index() == 8
assert r.index() == 3
assert p.get_precedence_distance(q) == q.get_precedence_distance(p)
assert p.get_adjacency_distance(q) == p.get_adjacency_distance(q)
assert p.get_positional_distance(q) == p.get_positional_distance(q)
p = Permutation([0, 1, 2, 3])
q = Permutation([3, 2, 1, 0])
assert p.get_precedence_distance(q) == 6
assert p.get_adjacency_distance(q) == 3
assert p.get_positional_distance(q) == 8
a = [Permutation.unrank_nonlex(4, i) for i in range(5)]
iden = Permutation([0, 1, 2, 3])
for i in range(5):
for j in range(i+1, 5):
assert a[i].commutes_with(a[j]) == (a[i]*a[j] == a[j]*a[i])
if a[i].commutes_with(a[j]):
assert a[i].commutator(a[j]) == iden
assert a[j].commutator(a[i]) == iden
def test_josephus():
assert Permutation.josephus(4, 6, 1) == Permutation([3, 1, 0, 2, 5, 4])
assert Permutation.josephus(1, 5, 1).is_Identity
def test_ranking():
assert Permutation.unrank_lex(5, 10).rank() == 10
p = Permutation.unrank_lex(15, 225)
assert p.rank() == 225
p1 = p.next_lex()
assert p1.rank() == 226
assert Permutation.unrank_lex(15, 225).rank() == 225
assert Permutation.unrank_lex(10, 0).is_Identity
p = Permutation.unrank_lex(4, 23)
assert p.rank() == 23
assert p.array_form == [3, 2, 1, 0]
assert p.next_lex() == None
p = Permutation([1, 5, 2, 0, 3, 6, 4])
q = Permutation([[1, 2, 3, 5, 6], [0, 4]])
a = [Permutation.unrank_trotterjohnson(4, i).array_form for i in range(5)]
assert a == [[0,1,2,3], [0,1,3,2], [0,3,1,2], [3,0,1,2], [3,0,2,1] ]
assert [Permutation(pa).rank_trotterjohnson() for pa in a] == range(5)
assert Permutation([0,1,2,3]).next_trotterjohnson() == \
Permutation([0,1,3,2])
assert q.rank_trotterjohnson() == 2283
assert p.rank_trotterjohnson() == 3389
p = Permutation([2, 5, 1, 6, 3, 0, 4])
q = Permutation([[6], [5], [0, 1, 2, 3, 4]])
assert p.rank() == 1964
assert q.rank() == 870
assert Permutation([]).rank_nonlex() == 0
prank = p.rank_nonlex()
assert prank == 1600
assert Permutation.unrank_nonlex(7, 1600) == p
qrank = q.rank_nonlex()
assert qrank == 41
assert Permutation.unrank_nonlex(7, 41) == Permutation(q.array_form)
a = [Permutation.unrank_nonlex(4, i).array_form for i in range(24)]
assert a == \
[[1, 2, 3, 0], [3, 2, 0, 1], [1, 3, 0, 2], [1, 2, 0, 3], [2, 3, 1, 0], \
[2, 0, 3, 1], [3, 0, 1, 2], [2, 0, 1, 3], [1, 3, 2, 0], [3, 0, 2, 1], \
[1, 0, 3, 2], [1, 0, 2, 3], [2, 1, 3, 0], [2, 3, 0, 1], [3, 1, 0, 2], \
[2, 1, 0, 3], [3, 2, 1, 0], [0, 2, 3, 1], [0, 3, 1, 2], [0, 2, 1, 3], \
[3, 1, 2, 0], [0, 3, 2, 1], [0, 1, 3, 2], [0, 1, 2, 3]]
assert Permutation([3, 2, 0, 1]).next_nonlex() == Permutation([1, 3, 0, 2])
assert [Permutation(pa).rank_nonlex() for pa in a] == range(24)
def test_muln():
n = 6
m = 8
a = [Permutation.unrank_nonlex(n, i).array_form for i in range(m)]
h = range(n)
for i in range(m):
h = perm_af_mul(h, a[i])
h2 = perm_af_muln(*a[:i+1])
assert h == h2
def test_args():
p = Permutation([(0, 3, 1, 2), (4, 5)])
assert p.cyclic_form == [[0, 3, 1, 2], [4, 5]]
assert p._array_form == None
p = Permutation((0, 3, 1, 2))
assert p._cyclic_form == None
assert p._array_form == [0, 3, 1, 2]
assert Permutation([0]) == Permutation((0, ))
assert Permutation([[0], [1]]) == Permutation(((0, ), (1, ))) == Permutation(((0, ), [1]))
raises(ValueError, lambda: Permutation([[1, 2], [3]])) # 0, 1, 2 should be present
raises(ValueError, lambda: Permutation([1, 2, 3])) # 0, 1, 2 should be present
raises(ValueError, lambda: Permutation(0, 1, 2)) # enclosing brackets needed
raises(ValueError, lambda: Permutation([1, 2], [0])) # enclosing brackets needed
raises(ValueError, lambda: Permutation([[1, 2], 0])) # enclosing brackets needed on 0
|
|
# Copyright (C) 2003-2007 Robey Pointer <robey@lag.net>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
Packetizer.
"""
import errno
import select
import socket
import struct
import threading
import time
from paramiko.common import *
from paramiko import util
from paramiko.ssh_exception import SSHException
from paramiko.message import Message
got_r_hmac = False
try:
import r_hmac
got_r_hmac = True
except ImportError:
pass
def compute_hmac(key, message, digest_class):
if got_r_hmac:
return r_hmac.HMAC(key, message, digest_class).digest()
from Crypto.Hash import HMAC
return HMAC.HMAC(key, message, digest_class).digest()
class NeedRekeyException (Exception):
pass
class Packetizer (object):
"""
Implementation of the base SSH packet protocol.
"""
# READ the secsh RFC's before raising these values. if anything,
# they should probably be lower.
REKEY_PACKETS = pow(2, 30)
REKEY_BYTES = pow(2, 30)
def __init__(self, socket):
self.__socket = socket
self.__logger = None
self.__closed = False
self.__dump_packets = False
self.__need_rekey = False
self.__init_count = 0
self.__remainder = ''
# used for noticing when to re-key:
self.__sent_bytes = 0
self.__sent_packets = 0
self.__received_bytes = 0
self.__received_packets = 0
self.__received_packets_overflow = 0
# current inbound/outbound ciphering:
self.__block_size_out = 8
self.__block_size_in = 8
self.__mac_size_out = 0
self.__mac_size_in = 0
self.__block_engine_out = None
self.__block_engine_in = None
self.__mac_engine_out = None
self.__mac_engine_in = None
self.__mac_key_out = ''
self.__mac_key_in = ''
self.__compress_engine_out = None
self.__compress_engine_in = None
self.__sequence_number_out = 0L
self.__sequence_number_in = 0L
# lock around outbound writes (packet computation)
self.__write_lock = threading.RLock()
# keepalives:
self.__keepalive_interval = 0
self.__keepalive_last = time.time()
self.__keepalive_callback = None
def set_log(self, log):
"""
Set the python log object to use for logging.
"""
self.__logger = log
def set_outbound_cipher(self, block_engine, block_size, mac_engine, mac_size, mac_key):
"""
Switch outbound data cipher.
"""
self.__block_engine_out = block_engine
self.__block_size_out = block_size
self.__mac_engine_out = mac_engine
self.__mac_size_out = mac_size
self.__mac_key_out = mac_key
self.__sent_bytes = 0
self.__sent_packets = 0
# wait until the reset happens in both directions before clearing rekey flag
self.__init_count |= 1
if self.__init_count == 3:
self.__init_count = 0
self.__need_rekey = False
def set_inbound_cipher(self, block_engine, block_size, mac_engine, mac_size, mac_key):
"""
Switch inbound data cipher.
"""
self.__block_engine_in = block_engine
self.__block_size_in = block_size
self.__mac_engine_in = mac_engine
self.__mac_size_in = mac_size
self.__mac_key_in = mac_key
self.__received_bytes = 0
self.__received_packets = 0
self.__received_packets_overflow = 0
# wait until the reset happens in both directions before clearing rekey flag
self.__init_count |= 2
if self.__init_count == 3:
self.__init_count = 0
self.__need_rekey = False
def set_outbound_compressor(self, compressor):
self.__compress_engine_out = compressor
def set_inbound_compressor(self, compressor):
self.__compress_engine_in = compressor
def close(self):
self.__closed = True
self.__socket.close()
def set_hexdump(self, hexdump):
self.__dump_packets = hexdump
def get_hexdump(self):
return self.__dump_packets
def get_mac_size_in(self):
return self.__mac_size_in
def get_mac_size_out(self):
return self.__mac_size_out
def need_rekey(self):
"""
Returns C{True} if a new set of keys needs to be negotiated. This
will be triggered during a packet read or write, so it should be
checked after every read or write, or at least after every few.
@return: C{True} if a new set of keys needs to be negotiated
"""
return self.__need_rekey
def set_keepalive(self, interval, callback):
"""
Turn on/off the callback keepalive. If C{interval} seconds pass with
no data read from or written to the socket, the callback will be
executed and the timer will be reset.
"""
self.__keepalive_interval = interval
self.__keepalive_callback = callback
self.__keepalive_last = time.time()
def read_all(self, n, check_rekey=False):
"""
Read as close to N bytes as possible, blocking as long as necessary.
@param n: number of bytes to read
@type n: int
@return: the data read
@rtype: str
@raise EOFError: if the socket was closed before all the bytes could
be read
"""
out = ''
# handle over-reading from reading the banner line
if len(self.__remainder) > 0:
out = self.__remainder[:n]
self.__remainder = self.__remainder[n:]
n -= len(out)
if PY22:
return self._py22_read_all(n, out)
while n > 0:
got_timeout = False
try:
x = self.__socket.recv(n)
if len(x) == 0:
raise EOFError()
out += x
n -= len(x)
except socket.timeout:
got_timeout = True
except socket.error, e:
# on Linux, sometimes instead of socket.timeout, we get
# EAGAIN. this is a bug in recent (> 2.6.9) kernels but
# we need to work around it.
if (type(e.args) is tuple) and (len(e.args) > 0) and (e.args[0] == errno.EAGAIN):
got_timeout = True
elif (type(e.args) is tuple) and (len(e.args) > 0) and (e.args[0] == errno.EINTR):
# syscall interrupted; try again
pass
elif self.__closed:
raise EOFError()
else:
raise
if got_timeout:
if self.__closed:
raise EOFError()
if check_rekey and (len(out) == 0) and self.__need_rekey:
raise NeedRekeyException()
self._check_keepalive()
return out
def write_all(self, out):
self.__keepalive_last = time.time()
while len(out) > 0:
got_timeout = False
try:
n = self.__socket.send(out)
except socket.timeout:
got_timeout = True
except socket.error, e:
if (type(e.args) is tuple) and (len(e.args) > 0) and (e.args[0] == errno.EAGAIN):
got_timeout = True
elif (type(e.args) is tuple) and (len(e.args) > 0) and (e.args[0] == errno.EINTR):
# syscall interrupted; try again
pass
else:
n = -1
except Exception:
# could be: (32, 'Broken pipe')
n = -1
if got_timeout:
n = 0
if self.__closed:
n = -1
if n < 0:
raise EOFError()
if n == len(out):
break
out = out[n:]
return
def readline(self, timeout):
"""
Read a line from the socket. We assume no data is pending after the
line, so it's okay to attempt large reads.
"""
buf = ''
while not '\n' in buf:
buf += self._read_timeout(timeout)
n = buf.index('\n')
self.__remainder += buf[n+1:]
buf = buf[:n]
if (len(buf) > 0) and (buf[-1] == '\r'):
buf = buf[:-1]
return buf
def send_message(self, data):
"""
Write a block of data using the current cipher, as an SSH block.
"""
# encrypt this sucka
data = str(data)
cmd = ord(data[0])
if cmd in MSG_NAMES:
cmd_name = MSG_NAMES[cmd]
else:
cmd_name = '$%x' % cmd
orig_len = len(data)
self.__write_lock.acquire()
try:
if self.__compress_engine_out is not None:
data = self.__compress_engine_out(data)
packet = self._build_packet(data)
if self.__dump_packets:
self._log(DEBUG, 'Write packet <%s>, length %d' % (cmd_name, orig_len))
self._log(DEBUG, util.format_binary(packet, 'OUT: '))
if self.__block_engine_out != None:
out = self.__block_engine_out.encrypt(packet)
else:
out = packet
# + mac
if self.__block_engine_out != None:
payload = struct.pack('>I', self.__sequence_number_out) + packet
out += compute_hmac(self.__mac_key_out, payload, self.__mac_engine_out)[:self.__mac_size_out]
self.__sequence_number_out = (self.__sequence_number_out + 1) & 0xffffffffL
self.write_all(out)
self.__sent_bytes += len(out)
self.__sent_packets += 1
if (self.__sent_packets % 100) == 0:
# stirring the randpool takes 30ms on my ibook!!
randpool.stir()
if ((self.__sent_packets >= self.REKEY_PACKETS) or (self.__sent_bytes >= self.REKEY_BYTES)) \
and not self.__need_rekey:
# only ask once for rekeying
self._log(DEBUG, 'Rekeying (hit %d packets, %d bytes sent)' %
(self.__sent_packets, self.__sent_bytes))
self.__received_packets_overflow = 0
self._trigger_rekey()
finally:
self.__write_lock.release()
def read_message(self):
"""
Only one thread should ever be in this function (no other locking is
done).
@raise SSHException: if the packet is mangled
@raise NeedRekeyException: if the transport should rekey
"""
header = self.read_all(self.__block_size_in, check_rekey=True)
if self.__block_engine_in != None:
header = self.__block_engine_in.decrypt(header)
if self.__dump_packets:
self._log(DEBUG, util.format_binary(header, 'IN: '));
packet_size = struct.unpack('>I', header[:4])[0]
# leftover contains decrypted bytes from the first block (after the length field)
leftover = header[4:]
if (packet_size - len(leftover)) % self.__block_size_in != 0:
raise SSHException('Invalid packet blocking')
buf = self.read_all(packet_size + self.__mac_size_in - len(leftover))
packet = buf[:packet_size - len(leftover)]
post_packet = buf[packet_size - len(leftover):]
if self.__block_engine_in != None:
packet = self.__block_engine_in.decrypt(packet)
if self.__dump_packets:
self._log(DEBUG, util.format_binary(packet, 'IN: '));
packet = leftover + packet
if self.__mac_size_in > 0:
mac = post_packet[:self.__mac_size_in]
mac_payload = struct.pack('>II', self.__sequence_number_in, packet_size) + packet
my_mac = compute_hmac(self.__mac_key_in, mac_payload, self.__mac_engine_in)[:self.__mac_size_in]
if my_mac != mac:
raise SSHException('Mismatched MAC')
padding = ord(packet[0])
payload = packet[1:packet_size - padding]
randpool.add_event()
if self.__dump_packets:
self._log(DEBUG, 'Got payload (%d bytes, %d padding)' % (packet_size, padding))
if self.__compress_engine_in is not None:
payload = self.__compress_engine_in(payload)
msg = Message(payload[1:])
msg.seqno = self.__sequence_number_in
self.__sequence_number_in = (self.__sequence_number_in + 1) & 0xffffffffL
# check for rekey
self.__received_bytes += packet_size + self.__mac_size_in + 4
self.__received_packets += 1
if self.__need_rekey:
# we've asked to rekey -- give them 20 packets to comply before
# dropping the connection
self.__received_packets_overflow += 1
if self.__received_packets_overflow >= 20:
raise SSHException('Remote transport is ignoring rekey requests')
elif (self.__received_packets >= self.REKEY_PACKETS) or \
(self.__received_bytes >= self.REKEY_BYTES):
# only ask once for rekeying
self._log(DEBUG, 'Rekeying (hit %d packets, %d bytes received)' %
(self.__received_packets, self.__received_bytes))
self.__received_packets_overflow = 0
self._trigger_rekey()
cmd = ord(payload[0])
if cmd in MSG_NAMES:
cmd_name = MSG_NAMES[cmd]
else:
cmd_name = '$%x' % cmd
if self.__dump_packets:
self._log(DEBUG, 'Read packet <%s>, length %d' % (cmd_name, len(payload)))
return cmd, msg
########## protected
def _log(self, level, msg):
if self.__logger is None:
return
if issubclass(type(msg), list):
for m in msg:
self.__logger.log(level, m)
else:
self.__logger.log(level, msg)
def _check_keepalive(self):
if (not self.__keepalive_interval) or (not self.__block_engine_out) or \
self.__need_rekey:
# wait till we're encrypting, and not in the middle of rekeying
return
now = time.time()
if now > self.__keepalive_last + self.__keepalive_interval:
self.__keepalive_callback()
self.__keepalive_last = now
def _py22_read_all(self, n, out):
while n > 0:
r, w, e = select.select([self.__socket], [], [], 0.1)
if self.__socket not in r:
if self.__closed:
raise EOFError()
self._check_keepalive()
else:
x = self.__socket.recv(n)
if len(x) == 0:
raise EOFError()
out += x
n -= len(x)
return out
def _py22_read_timeout(self, timeout):
start = time.time()
while True:
r, w, e = select.select([self.__socket], [], [], 0.1)
if self.__socket in r:
x = self.__socket.recv(1)
if len(x) == 0:
raise EOFError()
break
if self.__closed:
raise EOFError()
now = time.time()
if now - start >= timeout:
raise socket.timeout()
return x
def _read_timeout(self, timeout):
if PY22:
return self._py22_read_timeout(timeout)
start = time.time()
while True:
try:
x = self.__socket.recv(128)
if len(x) == 0:
raise EOFError()
break
except socket.timeout:
pass
if self.__closed:
raise EOFError()
now = time.time()
if now - start >= timeout:
raise socket.timeout()
return x
def _build_packet(self, payload):
# pad up at least 4 bytes, to nearest block-size (usually 8)
bsize = self.__block_size_out
padding = 3 + bsize - ((len(payload) + 8) % bsize)
packet = struct.pack('>IB', len(payload) + padding + 1, padding)
packet += payload
if self.__block_engine_out is not None:
packet += randpool.get_bytes(padding)
else:
# cute trick i caught openssh doing: if we're not encrypting,
# don't waste random bytes for the padding
packet += (chr(0) * padding)
return packet
def _trigger_rekey(self):
# outside code should check for this flag
self.__need_rekey = True
|
|
# Copyright 2021, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils function for configuring a federated or centralized task."""
import collections
from typing import Callable, Dict, Iterator, List, Optional, Tuple
import tensorflow as tf
import tensorflow_federated as tff
from generalization.tasks import training_specs
from generalization.utils import client_data_utils
from generalization.utils import eval_metric_distribution
SamplerType = Iterator[List[tf.data.Dataset]]
MetricsDictType = training_specs.MetricsDictType
FederatedEvalFnType = training_specs.FederatedEvalFnType
CentralizedEvalFnType = training_specs.CentralizedEvalFnType
FederatedModelFnType = training_specs.FederatedModelFnType
StatFnType = eval_metric_distribution.StatFnType
ClientData = tff.simulation.datasets.ClientData
ServerState = tff.learning.framework.ServerState
def _create_samplers(
*, # Caller passes below args by name.
part_train_eval_cd: tff.simulation.datasets.ClientData,
part_val_cd: tff.simulation.datasets.ClientData,
unpart_cd: tff.simulation.datasets.ClientData,
test_cd: Optional[tff.simulation.datasets.ClientData],
part_clients_per_eval: Optional[int],
unpart_clients_per_eval: Optional[int],
test_clients_for_eval: Optional[int],
resample_eval_clients: bool,
eval_clients_random_seed: Optional[int],
) -> Tuple[SamplerType, SamplerType, SamplerType, Optional[SamplerType]]:
"""Create four samplers."""
part_train_eval_sampler = client_data_utils.FederatedDatasetSampler(
client_data=part_train_eval_cd,
num_sample_clients=part_clients_per_eval,
resample=resample_eval_clients,
seed=eval_clients_random_seed)
part_val_sampler = client_data_utils.FederatedDatasetSampler(
client_data=part_val_cd,
num_sample_clients=part_clients_per_eval,
resample=resample_eval_clients,
seed=eval_clients_random_seed)
unpart_sampler = client_data_utils.FederatedDatasetSampler(
client_data=unpart_cd,
num_sample_clients=unpart_clients_per_eval,
resample=resample_eval_clients,
seed=eval_clients_random_seed)
if test_cd is not None:
test_sampler = client_data_utils.FederatedDatasetSampler(
client_data=test_cd,
num_sample_clients=test_clients_for_eval,
seed=eval_clients_random_seed)
else:
test_sampler = None
return (part_train_eval_sampler, part_val_sampler, unpart_sampler,
test_sampler)
def create_federated_eval_fns(
*, # Caller passes below args by name.
tff_model_builder: FederatedModelFnType,
metrics_builder: Callable[[], List[tf.keras.metrics.Metric]],
part_train_eval_cd: tff.simulation.datasets.ClientData,
part_val_cd: tff.simulation.datasets.ClientData,
unpart_cd: tff.simulation.datasets.ClientData,
test_cd: Optional[tff.simulation.datasets.ClientData],
stat_fns: Dict[str, StatFnType],
rounds_per_eval: int,
part_clients_per_eval: Optional[int],
unpart_clients_per_eval: Optional[int],
test_clients_for_eval: Optional[int],
resample_eval_clients: bool,
eval_clients_random_seed: Optional[int],
) -> Tuple[FederatedEvalFnType, FederatedEvalFnType, FederatedEvalFnType,
Optional[FederatedEvalFnType]]:
"""Create federated evaluation functions: train_eval, validation and test.
Args:
tff_model_builder: A callable with no args that returns a
`tff.learning.Model`.
metrics_builder: A callable with no args that returns a list of keras
metrics.
part_train_eval_cd: Preprocessed training chunk of for training ClientData
used for evaluation.
part_val_cd: Preprocessed validation chunk of training ClientData.
unpart_cd: Preprocessed validation ClientData.
test_cd: Optional preprocessed ClientData for test after training. If None,
the test function will not be constructed, and None value will be returned
at the position of test function.
stat_fns: A mapping in which each key-value pair represents a custom
statistic to be evaluated on the client metrics. Each pair consists of a
string-typed key describing this statistic, and a callable-typed value
that computes the statistic of metrics. The callable value should accept
two sequence-typed arguments `all_clients_this_metric` and
`all_clients_num_examples` and returns the corresponding statistics.
rounds_per_eval: An integer representing how often to evaluate the global
model on training and validation dataset.
part_clients_per_eval: An optional integer representing the number of
training clients taken from training dataset for evaluation per round. If
`None`, all training clients will be used.
unpart_clients_per_eval: An optional integer representing the number of
clients taken from validation dataset. If `None`, all validation clients
will be used.
test_clients_for_eval: An optional integer representing the number of
clients taken from test dataset, valid only if test_cd is not None. If
`None`, all test clients will be used.
resample_eval_clients: A bool used to decide whether or not to resample
validation clients every evaluation round.
eval_clients_random_seed: An optional integer used to seed which validation
and test clients are sampled. If `None`, no seed is used.
Returns:
Federated train_train_eval, train_validation, validation and test functions.
"""
(part_train_eval_sampler, part_val_sampler, unpart_sampler,
test_sampler) = _create_samplers(
part_train_eval_cd=part_train_eval_cd,
part_val_cd=part_val_cd,
unpart_cd=unpart_cd,
test_cd=test_cd,
part_clients_per_eval=part_clients_per_eval,
unpart_clients_per_eval=unpart_clients_per_eval,
test_clients_for_eval=test_clients_for_eval,
resample_eval_clients=resample_eval_clients,
eval_clients_random_seed=eval_clients_random_seed)
evaluate_fn = eval_metric_distribution.create_federated_eval_distribution_fn(
model_fn=tff_model_builder,
metrics_builder=metrics_builder,
stat_fns=stat_fns)
def part_train_eval_fn(state: ServerState, round_num: int) -> MetricsDictType:
if round_num % rounds_per_eval == 0:
return evaluate_fn(state.model, next(part_train_eval_sampler))
else:
return collections.OrderedDict()
def part_val_fn(state: ServerState, round_num: int) -> MetricsDictType:
if round_num % rounds_per_eval == 0:
return evaluate_fn(state.model, next(part_val_sampler))
else:
return collections.OrderedDict()
def unpart_fn(state: ServerState, round_num: int) -> MetricsDictType:
if round_num % rounds_per_eval == 0:
return evaluate_fn(state.model, next(unpart_sampler))
else:
return collections.OrderedDict()
if test_sampler is not None:
def test_fn(state: ServerState, round_num: int = 0) -> MetricsDictType:
# test_fn does not need round_num. We define to keep interface consistent.
del round_num
return evaluate_fn(state.model, next(test_sampler))
else:
test_fn = None
return part_train_eval_fn, part_val_fn, unpart_fn, test_fn
def create_centralized_eval_fns(
*, # Caller passes below args by name.
tff_model_builder: FederatedModelFnType,
metrics_builder: Callable[[], List[tf.keras.metrics.Metric]],
part_train_eval_cd: ClientData,
part_val_cd: ClientData,
unpart_cd: ClientData,
test_cd: Optional[ClientData],
stat_fns: Dict[str, StatFnType],
part_clients_per_eval: Optional[int],
unpart_clients_per_eval: Optional[int],
test_clients_for_eval: Optional[int],
resample_eval_clients: bool,
eval_clients_random_seed: Optional[int],
) -> Tuple[CentralizedEvalFnType, CentralizedEvalFnType, CentralizedEvalFnType,
CentralizedEvalFnType]:
"""Create centralized evaluation functions: train_eval, validation and test.
Args:
tff_model_builder: A callable with no args that returns a
`tff.learning.Model`.
metrics_builder: A callable with no args that returns a list of keras
metrics.
part_train_eval_cd: Preprocessed training chunk of for training ClientData
used for evaluation.
part_val_cd: Preprocessed validation chunk of training ClientData.
unpart_cd: Preprocessed validation ClientData.
test_cd: Optional preprocessed ClientData for test after training. If None,
the test function will not be constructed, and None value will be returned
at the position of test function.
stat_fns: A mapping in which each key-value pair represents a custom
statistic to be evaluated on the client metrics. Each pair consists of a
string-typed key describing this statistic, and a callable-typed value
that computes the statistic of metrics. The callable value should accept
two sequence-typed arguments `all_clients_this_metric` and
`all_clients_num_examples` and returns the corresponding statistics.
part_clients_per_eval: An optional integer representing the number of
training clients taken from training dataset for evaluation per round. If
`None`, all training clients will be used.
unpart_clients_per_eval: An optional integer representing the number of
clients taken from validation dataset. If `None`, all validation clients
will be used.
test_clients_for_eval: An optional integer representing the number of
clients taken from test dataset. If `None`, all test clients will be used.
resample_eval_clients: A bool used to decide whether or not to resample
validation clients every evaluation round.
eval_clients_random_seed: An optional integer used to seed which validation
and test clients are sampled. If `None`, no seed is used.
Returns:
Centralized train_eval fn, validation fn and test fn, all of type
CentralizedEvalFnType.
"""
(part_train_eval_sampler, part_val_sampler, unpart_sampler,
test_sampler) = _create_samplers(
part_train_eval_cd=part_train_eval_cd,
part_val_cd=part_val_cd,
unpart_cd=unpart_cd,
test_cd=test_cd,
part_clients_per_eval=part_clients_per_eval,
unpart_clients_per_eval=unpart_clients_per_eval,
test_clients_for_eval=test_clients_for_eval,
resample_eval_clients=resample_eval_clients,
eval_clients_random_seed=eval_clients_random_seed)
evaluate_fn = eval_metric_distribution.create_federated_eval_distribution_fn(
model_fn=tff_model_builder,
metrics_builder=metrics_builder,
stat_fns=stat_fns)
def part_train_eval_fn(keras_model: tf.keras.Model) -> MetricsDictType:
return evaluate_fn(
tff.learning.ModelWeights.from_model(keras_model),
next(part_train_eval_sampler))
def part_val_fn(keras_model: tf.keras.Model) -> MetricsDictType:
return evaluate_fn(
tff.learning.ModelWeights.from_model(keras_model),
next(part_val_sampler))
def unpart_fn(keras_model: tf.keras.Model) -> MetricsDictType:
return evaluate_fn(
tff.learning.ModelWeights.from_model(keras_model), next(unpart_sampler))
if test_sampler is not None:
def test_fn(keras_model: tf.keras.Model) -> MetricsDictType:
return evaluate_fn(
tff.learning.ModelWeights.from_model(keras_model), next(test_sampler))
else:
test_fn = None
return part_train_eval_fn, part_val_fn, unpart_fn, test_fn
|
|
#!/usr/bin/env python
# version
__id__ = "$Id: aboutdialog.py 1193 2007-05-03 17:29:59Z dmitriy $"
__revision__ = "$Revision: 1193 $"
import wx
import os
import sys
import logging
from wx.lib.scrolledpanel import ScrolledPanel
from sas.sasgui.guiframe.events import StatusEvent
from sas.sasgui.guiframe.panel_base import PanelBase
from inversion_state import InversionState
from pr_widgets import PrTextCtrl
from pr_widgets import DataFileTextCtrl
from pr_widgets import OutputTextCtrl
from sas.sasgui.guiframe.documentation_window import DocumentationWindow
logger = logging.getLogger(__name__)
if sys.platform.count("win32") > 0:
FONT_VARIANT = 0
else:
FONT_VARIANT = 1
class InversionControl(ScrolledPanel, PanelBase):
"""
"""
window_name = 'pr_control'
window_caption = "P(r) control panel"
CENTER_PANE = True
# Figure of merit parameters [default]
## Oscillation parameters (sin function = 1.1)
oscillation_max = 1.5
def __init__(self, parent, id=-1, plots=None, **kwargs):
"""
"""
ScrolledPanel.__init__(self, parent, id=id, **kwargs)
PanelBase.__init__(self, parent)
self.SetupScrolling()
#Set window's font size
self.SetWindowVariant(variant=FONT_VARIANT)
self._set_analysis(False)
self.plots = plots
self.radio_buttons = {}
self.parent = parent.parent
## Data file TextCtrl
self.data_file = None
self.plot_data = None
self.nfunc_ctl = None
self.alpha_ctl = None
self.dmax_ctl = None
self.time_ctl = None
self.chi2_ctl = None
self.osc_ctl = None
self.file_radio = None
self.plot_radio = None
self.label_sugg = None
self.qmin_ctl = None
self.qmax_ctl = None
self.swidth_ctl = None
self.sheight_ctl = None
self.rg_ctl = None
self.iq0_ctl = None
self.bck_value = None
self.bck_est_ctl = None
self.bck_man_ctl = None
self.est_bck = True
self.bck_input = None
self.bck_ctl = None
# TextCtrl for fraction of positive P(r)
self.pos_ctl = None
# TextCtrl for fraction of 1 sigma positive P(r)
self.pos_err_ctl = None
## Estimates
self.alpha_estimate_ctl = None
self.nterms_estimate_ctl = None
## D_max distance explorator
self.distance_explorator_ctl = None
## Data manager
self._manager = None
## Default file location for save
self._default_save_location = os.getcwd()
if self.parent is not None:
self._default_save_location = \
self.parent._default_save_location
# Default width
self._default_width = 350
self._do_layout()
def __setattr__(self, name, value):
"""
Allow direct hooks to text boxes
"""
if name == 'nfunc':
self.nfunc_ctl.SetValue(str(int(value)))
elif name == 'd_max':
self.dmax_ctl.SetValue(str(value))
elif name == 'alpha':
self.alpha_ctl.SetValue(str(value))
elif name == 'chi2':
self.chi2_ctl.SetValue("%-5.2g" % value)
elif name == 'bck':
self.bck_ctl.SetValue("%-5.2g" % value)
elif name == 'q_min':
self.qmin_ctl.SetValue("%-5.2g" % value)
elif name == 'q_max':
self.qmax_ctl.SetValue("%-5.2g" % value)
elif name == 'elapsed':
self.time_ctl.SetValue("%-5.2g" % value)
elif name == 'rg':
self.rg_ctl.SetValue("%-5.2g" % value)
elif name == 'iq0':
self.iq0_ctl.SetValue("%-5.2g" % value)
elif name == 'oscillation':
self.osc_ctl.SetValue("%-5.2g" % value)
elif name == 'slit_width':
self.swidth_ctl.SetValue("%-5.2g" % value)
elif name == 'slit_height':
self.sheight_ctl.SetValue("%-5.2g" % value)
elif name == 'positive':
self.pos_ctl.SetValue("%-5.2g" % value)
elif name == 'pos_err':
self.pos_err_ctl.SetValue("%-5.2g" % value)
elif name == 'alpha_estimate':
self.alpha_estimate_ctl.SetToolTipString("Click to accept value.")
self.alpha_estimate_ctl.Enable(True)
self.alpha_estimate_ctl.SetLabel("%-3.1g" % value)
#self.alpha_estimate_ctl.Show()
#self.label_sugg.Show()
elif name == 'nterms_estimate':
self.nterms_estimate_ctl.SetToolTipString("Click to accept value.")
self.nterms_estimate_ctl.Enable(True)
self.nterms_estimate_ctl.SetLabel("%-g" % value)
elif name == 'plotname':
self.plot_data.SetValue(str(value))
self._on_pars_changed(None)
elif name == 'datafile':
self.plot_data.SetValue(str(value))
self._on_pars_changed(None)
else:
wx.Panel.__setattr__(self, name, value)
def __getattr__(self, name):
"""
Allow direct hooks to text boxes
"""
if name == 'nfunc':
try:
return int(self.nfunc_ctl.GetValue())
except:
return -1
elif name == 'd_max':
try:
return self.dmax_ctl.GetValue()
except:
return -1.0
elif name == 'alpha':
try:
return self.alpha_ctl.GetValue()
except:
return -1.0
elif name == 'chi2':
try:
return float(self.chi2_ctl.GetValue())
except:
return None
elif name == 'bck':
try:
return float(self.bck_ctl.GetValue())
except:
return None
elif name == 'q_min':
try:
return float(self.qmin_ctl.GetValue())
except:
return 0.0
elif name == 'q_max':
try:
return float(self.qmax_ctl.GetValue())
except:
return 0.0
elif name == 'elapsed':
try:
return float(self.time_ctl.GetValue())
except:
return None
elif name == 'rg':
try:
return float(self.rg_ctl.GetValue())
except:
return None
elif name == 'iq0':
try:
return float(self.iq0_ctl.GetValue())
except:
return None
elif name == 'oscillation':
try:
return float(self.osc_ctl.GetValue())
except:
return None
elif name == 'slit_width':
try:
return float(self.swidth_ctl.GetValue())
except:
return None
elif name == 'slit_height':
try:
return float(self.sheight_ctl.GetValue())
except:
return None
elif name == 'pos':
try:
return float(self.pos_ctl.GetValue())
except:
return None
elif name == 'pos_err':
try:
return float(self.pos_err_ctl.GetValue())
except:
return None
elif name == 'alpha_estimate':
try:
return float(self.alpha_estimate_ctl.GetLabel())
except:
return None
elif name == 'nterms_estimate':
try:
return int(self.nterms_estimate_ctl.GetLabel())
except:
return None
elif name == 'plotname':
return self.plot_data.GetValue()
elif name == 'datafile':
return self.plot_data.GetValue()
else:
return wx.Panel.__getattribute__(self, name)
def save_project(self, doc=None):
"""
return an xml node containing state of the panel
that guiframe can write to file
"""
data = self.get_data()
state = self.get_state()
if data is not None:
new_doc = self._manager.state_reader.write_toXML(data, state)
if new_doc is not None:
if doc is not None and hasattr(doc, "firstChild"):
child = new_doc.getElementsByTagName("SASentry")
for item in child:
doc.firstChild.appendChild(item)
else:
doc = new_doc
return doc
def on_save(self, evt=None):
"""
Method used to create a memento of the current state
:return: state object
"""
# Ask the user the location of the file to write to.
path = None
if self.parent is not None:
self._default_save_location = self.parent._default_save_location
dlg = wx.FileDialog(self, "Choose a file",
self._default_save_location,
self.window_caption, "*.prv", wx.SAVE)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
self._default_save_location = os.path.dirname(path)
if self.parent is not None:
self.parent._default_save_location = self._default_save_location
else:
return None
dlg.Destroy()
state = self.get_state()
# MAC always needs the extension for saving
extens = ".prv"
# Make sure the ext included in the file name
fName = os.path.splitext(path)[0] + extens
self._manager.save_data(filepath=fName, prstate=state)
return state
def get_data(self):
"""
"""
return self._manager.get_data()
def get_state(self):
"""
Get the current state
: return: state object
"""
# Construct the state object
state = InversionState()
# Read the panel's parameters
flag, alpha, dmax, nfunc, qmin, \
qmax, height, width, bck = self._read_pars()
state.nfunc = nfunc
state.d_max = dmax
state.alpha = alpha
state.qmin = qmin
state.qmax = qmax
state.width = width
state.height = height
# Data file
state.file = self.plot_data.GetValue()
# Background evaluation checkbox
state.estimate_bck = self.est_bck
state.bck_value = bck
# Estimates
state.nterms_estimate = self.nterms_estimate
state.alpha_estimate = self.alpha_estimate
# Read the output values
state.chi2 = self.chi2
state.elapsed = self.elapsed
state.osc = self.oscillation
state.pos = self.pos
state.pos_err = self.pos_err
state.rg = self.rg
state.iq0 = self.iq0
state.bck = self.bck
return state
def set_state(self, state):
"""
Set the state of the panel and inversion problem to
the state passed as a parameter.
Execute the inversion immediately after filling the
controls.
:param state: InversionState object
"""
if state.nfunc is not None:
self.nfunc = state.nfunc
if state.d_max is not None:
self.d_max = state.d_max
if state.alpha is not None:
self.alpha = state.alpha
if state.qmin is not None:
self.q_min = state.qmin
if state.qmax is not None:
self.q_max = state.qmax
if state.width is not None:
self.slit_width = state.width
if state.height is not None:
self.slit_height = state.height
# Data file
self.plot_data.SetValue(str(state.file))
# Background value
self.bck_est_ctl.SetValue(state.estimate_bck)
self.bck_man_ctl.SetValue(not state.estimate_bck)
if not state.estimate_bck:
self.bck_input.Enable()
self.bck_input.SetValue(str(state.bck_value))
self.est_bck = state.estimate_bck
self.bck_value = state.bck_value
# Estimates
if state.nterms_estimate is not None:
self.nterms_estimate = state.nterms_estimate
if state.alpha_estimate is not None:
self.alpha_estimate = state.alpha_estimate
# Read the output values
if state.chi2 is not None:
self.chi2 = state.chi2
if state.elapsed is not None:
self.elapsed = state.elapsed
if state.osc is not None:
self.oscillation = state.osc
if state.pos is not None:
self.positive = state.pos
if state.pos_err is not None:
self.pos_err = state.pos_err
if state.rg is not None:
self.rg = state.rg
if state.iq0 is not None:
self.iq0 = state.iq0
if state.bck is not None:
self.bck = state.bck
# We have the data available for serialization
self._set_analysis(True)
# Perform inversion
self._on_invert(None)
def set_manager(self, manager):
self._manager = manager
if manager is not None:
self._set_analysis(False)
def _do_layout(self):
vbox = wx.GridBagSizer(0, 0)
iy_vb = 0
# ----- I(q) data -----
databox = wx.StaticBox(self, -1, "I(q) data source")
boxsizer1 = wx.StaticBoxSizer(databox, wx.VERTICAL)
boxsizer1.SetMinSize((self._default_width, 50))
pars_sizer = wx.GridBagSizer(5, 5)
iy = 0
self.file_radio = wx.StaticText(self, -1, "Name:")
pars_sizer.Add(self.file_radio, (iy, 0), (1, 1),
wx.LEFT | wx.EXPAND | wx.ADJUST_MINSIZE, 15)
self.plot_data = DataFileTextCtrl(self, -1, size=(260, 20))
pars_sizer.Add(self.plot_data, (iy, 1), (1, 1),
wx.EXPAND | wx.LEFT | wx.RIGHT | wx.ADJUST_MINSIZE, 15)
radio_sizer = wx.GridBagSizer(5, 5)
self.bck_est_ctl = wx.RadioButton(self, -1, "Estimate background level",
name="estimate_bck", style=wx.RB_GROUP)
self.bck_man_ctl = wx.RadioButton(self, -1, "Input manual background level",
name="manual_bck")
self.bck_est_ctl.Bind(wx.EVT_RADIOBUTTON, self._on_bck_changed)
self.bck_man_ctl.Bind(wx.EVT_RADIOBUTTON, self._on_bck_changed)
radio_sizer.Add(self.bck_est_ctl, (0,0), (1,1), wx.LEFT | wx.EXPAND)
radio_sizer.Add(self.bck_man_ctl, (0,1), (1,1), wx.RIGHT | wx.EXPAND)
iy += 1
pars_sizer.Add(radio_sizer, (iy, 0), (1, 2),
wx.LEFT | wx.EXPAND | wx.ADJUST_MINSIZE, 15)
background_label = wx.StaticText(self, -1, "Background: ")
self.bck_input = PrTextCtrl(self, -1, style=wx.TE_PROCESS_ENTER,
size=(60, 20), value="0.0")
self.bck_input.Disable()
self.bck_input.Bind(wx.EVT_TEXT, self._read_pars)
background_units = wx.StaticText(self, -1, "[A^(-1)]", size=(55, 20))
iy += 1
background_sizer = wx.GridBagSizer(5, 5)
background_sizer.Add(background_label, (0, 0), (1,1),
wx.LEFT | wx.EXPAND | wx.ADJUST_MINSIZE, 23)
background_sizer.Add(self.bck_input, (0, 1), (1,1),
wx.LEFT | wx.ADJUST_MINSIZE, 5)
background_sizer.Add(background_units, (0, 2), (1,1),
wx.LEFT | wx.ADJUST_MINSIZE, 5)
pars_sizer.Add(background_sizer, (iy, 0), (1, 2),
wx.LEFT | wx.EXPAND | wx.ADJUST_MINSIZE, 15)
boxsizer1.Add(pars_sizer, 0, wx.EXPAND)
vbox.Add(boxsizer1, (iy_vb, 0), (1, 1),
wx.LEFT | wx.RIGHT | wx.EXPAND | wx.ADJUST_MINSIZE | wx.TOP, 5)
# ----- Add slit parameters -----
if True:
sbox = wx.StaticBox(self, -1, "Slit parameters")
sboxsizer = wx.StaticBoxSizer(sbox, wx.VERTICAL)
sboxsizer.SetMinSize((self._default_width, 20))
sizer_slit = wx.GridBagSizer(5, 5)
label_sheight = wx.StaticText(self, -1, "Height", size=(40, 20))
label_swidth = wx.StaticText(self, -1, "Width", size=(40, 20))
label_sunits2 = wx.StaticText(self, -1, "[A^(-1)]", size=(55, 20))
self.sheight_ctl = PrTextCtrl(self, -1, style=wx.TE_PROCESS_ENTER, size=(60, 20))
self.swidth_ctl = PrTextCtrl(self, -1, style=wx.TE_PROCESS_ENTER, size=(60, 20))
hint_msg = "Enter slit height in units of Q or leave blank."
self.sheight_ctl.SetToolTipString(hint_msg)
hint_msg = "Enter slit width in units of Q or leave blank."
self.swidth_ctl.SetToolTipString(hint_msg)
iy = 0
sizer_slit.Add(label_sheight, (iy, 0), (1, 1), wx.LEFT | wx.EXPAND, 5)
sizer_slit.Add(self.sheight_ctl, (iy, 1), (1, 1), wx.LEFT | wx.EXPAND, 5)
sizer_slit.Add(label_swidth, (iy, 2), (1, 1), wx.LEFT | wx.EXPAND, 5)
sizer_slit.Add(self.swidth_ctl, (iy, 3), (1, 1), wx.LEFT | wx.EXPAND, 5)
sizer_slit.Add(label_sunits2, (iy, 4), (1, 1), wx.LEFT | wx.EXPAND, 5)
sboxsizer.Add(sizer_slit, wx.TOP, 15)
iy_vb += 1
vbox.Add(sboxsizer, (iy_vb, 0), (1, 1),
wx.LEFT | wx.RIGHT | wx.EXPAND | wx.ADJUST_MINSIZE, 5)
# ----- Q range -----
qbox = wx.StaticBox(self, -1, "Q range")
qboxsizer = wx.StaticBoxSizer(qbox, wx.VERTICAL)
qboxsizer.SetMinSize((self._default_width, 20))
sizer_q = wx.GridBagSizer(5, 5)
label_qmin = wx.StaticText(self, -1, "Q min", size=(40, 20))
label_qmax = wx.StaticText(self, -1, "Q max", size=(40, 20))
label_qunits2 = wx.StaticText(self, -1, "[A^(-1)]", size=(55, 20))
self.qmin_ctl = PrTextCtrl(self, -1, style=wx.TE_PROCESS_ENTER, size=(60, 20))
self.qmax_ctl = PrTextCtrl(self, -1, style=wx.TE_PROCESS_ENTER, size=(60, 20))
hint_msg = "Select a lower bound for Q or leave blank."
self.qmin_ctl.SetToolTipString(hint_msg)
hint_msg = "Select an upper bound for Q or leave blank."
self.qmax_ctl.SetToolTipString(hint_msg)
self.qmin_ctl.Bind(wx.EVT_TEXT, self._on_pars_changed)
self.qmax_ctl.Bind(wx.EVT_TEXT, self._on_pars_changed)
iy = 0
sizer_q.Add(label_qmin, (iy, 0), (1, 1), wx.LEFT | wx.EXPAND, 5)
sizer_q.Add(self.qmin_ctl, (iy, 1), (1, 1), wx.LEFT | wx.EXPAND, 5)
sizer_q.Add(label_qmax, (iy, 2), (1, 1), wx.LEFT | wx.EXPAND, 5)
sizer_q.Add(self.qmax_ctl, (iy, 3), (1, 1), wx.LEFT | wx.EXPAND, 5)
sizer_q.Add(label_qunits2, (iy, 4), (1, 1), wx.LEFT | wx.EXPAND, 5)
qboxsizer.Add(sizer_q, wx.TOP, 15)
iy_vb += 1
vbox.Add(qboxsizer, (iy_vb, 0), (1, 1),
wx.LEFT | wx.RIGHT | wx.EXPAND | wx.ADJUST_MINSIZE, 5)
# ----- Parameters -----
parsbox = wx.StaticBox(self, -1, "Parameters")
boxsizer2 = wx.StaticBoxSizer(parsbox, wx.VERTICAL)
boxsizer2.SetMinSize((self._default_width, 50))
explanation = "P(r) is found by fitting a set of base functions"
explanation += " to I(Q). The minimization involves"
explanation += " a regularization term to ensure a smooth P(r)."
explanation += " The regularization constant gives the size of that "
explanation += "term. The suggested value is the value above which the"
explanation += " output P(r) will have only one peak."
label_explain = wx.StaticText(self, -1, explanation, size=(280, 90))
boxsizer2.Add(label_explain, wx.LEFT | wx.BOTTOM, 5)
label_nfunc = wx.StaticText(self, -1, "Number of terms")
label_nfunc.SetMinSize((120, 20))
label_alpha = wx.StaticText(self, -1, "Regularization constant")
label_dmax = wx.StaticText(self, -1, "Max distance [A]")
self.label_sugg = wx.StaticText(self, -1, "Suggested value")
self.nfunc_ctl = PrTextCtrl(self, -1, style=wx.TE_PROCESS_ENTER, size=(60, 20))
self.nfunc_ctl.SetToolTipString("Number of terms in the expansion.")
self.alpha_ctl = PrTextCtrl(self, -1, style=wx.TE_PROCESS_ENTER, size=(60, 20))
hint_msg = "Control parameter for the size of the regularization term."
self.alpha_ctl.SetToolTipString(hint_msg)
self.dmax_ctl = PrTextCtrl(self, -1, style=wx.TE_PROCESS_ENTER, size=(60, 20))
hint_msg = "Maximum distance between any two points in the system."
self.dmax_ctl.SetToolTipString(hint_msg)
wx_id = wx.NewId()
self.alpha_estimate_ctl = wx.Button(self, wx_id, "")
self.Bind(wx.EVT_BUTTON, self._on_accept_alpha, id=wx_id)
self.alpha_estimate_ctl.Enable(False)
self.alpha_estimate_ctl.SetToolTipString("Waiting for estimate...")
wx_id = wx.NewId()
self.nterms_estimate_ctl = wx.Button(self, wx_id, "")
#self.nterms_estimate_ctl.Hide()
self.Bind(wx.EVT_BUTTON, self._on_accept_nterms, id=wx_id)
self.nterms_estimate_ctl.Enable(False)
self.nterms_estimate_ctl.SetToolTipString("Waiting for estimate...")
self.nfunc_ctl.Bind(wx.EVT_TEXT, self._read_pars)
self.alpha_ctl.Bind(wx.EVT_TEXT, self._read_pars)
self.dmax_ctl.Bind(wx.EVT_TEXT, self._on_pars_changed)
# Distance explorator
wx_id = wx.NewId()
self.distance_explorator_ctl = wx.Button(self, wx_id, "Explore")
self.Bind(wx.EVT_BUTTON, self._on_explore, id=wx_id)
sizer_params = wx.GridBagSizer(5, 5)
iy = 0
sizer_params.Add(self.label_sugg, (iy, 2), (1, 1), wx.LEFT, 15)
iy += 1
sizer_params.Add(label_nfunc, (iy, 0), (1, 1), wx.LEFT, 15)
sizer_params.Add(self.nfunc_ctl, (iy, 1), (1, 1), wx.RIGHT, 0)
sizer_params.Add(self.nterms_estimate_ctl, (iy, 2), (1, 1), wx.LEFT, 15)
iy += 1
sizer_params.Add(label_alpha, (iy, 0), (1, 1), wx.LEFT, 15)
sizer_params.Add(self.alpha_ctl, (iy, 1), (1, 1), wx.RIGHT, 0)
sizer_params.Add(self.alpha_estimate_ctl, (iy, 2), (1, 1), wx.LEFT, 15)
iy += 1
sizer_params.Add(label_dmax, (iy, 0), (1, 1), wx.LEFT, 15)
sizer_params.Add(self.dmax_ctl, (iy, 1), (1, 1), wx.RIGHT, 0)
sizer_params.Add(self.distance_explorator_ctl, (iy, 2),
(1, 1), wx.LEFT, 15)
boxsizer2.Add(sizer_params, 0)
iy_vb += 1
vbox.Add(boxsizer2, (iy_vb, 0), (1, 1),
wx.LEFT | wx.RIGHT | wx.EXPAND | wx.ADJUST_MINSIZE, 5)
# ----- Results -----
resbox = wx.StaticBox(self, -1, "Outputs")
ressizer = wx.StaticBoxSizer(resbox, wx.VERTICAL)
ressizer.SetMinSize((self._default_width, 50))
label_rg = wx.StaticText(self, -1, "Rg")
label_rg_unit = wx.StaticText(self, -1, "[A]")
label_iq0 = wx.StaticText(self, -1, "I(Q=0)")
label_iq0_unit = wx.StaticText(self, -1, "[A^(-1)]")
label_bck = wx.StaticText(self, -1, "Background")
label_bck_unit = wx.StaticText(self, -1, "[A^(-1)]")
self.rg_ctl = OutputTextCtrl(self, -1, size=(60, 20))
hint_msg = "Radius of gyration for the computed P(r)."
self.rg_ctl.SetToolTipString(hint_msg)
self.iq0_ctl = OutputTextCtrl(self, -1, size=(60, 20))
hint_msg = "Scattering intensity at Q=0 for the computed P(r)."
self.iq0_ctl.SetToolTipString(hint_msg)
self.bck_ctl = OutputTextCtrl(self, -1, size=(60, 20))
self.bck_ctl.SetToolTipString("Value of estimated constant background.")
label_time = wx.StaticText(self, -1, "Computation time")
label_time_unit = wx.StaticText(self, -1, "secs")
label_time.SetMinSize((120, 20))
label_chi2 = wx.StaticText(self, -1, "Chi2/dof")
label_osc = wx.StaticText(self, -1, "Oscillations")
label_pos = wx.StaticText(self, -1, "Positive fraction")
label_pos_err = wx.StaticText(self, -1, "1-sigma positive fraction")
self.time_ctl = OutputTextCtrl(self, -1, size=(60, 20))
hint_msg = "Computation time for the last inversion, in seconds."
self.time_ctl.SetToolTipString(hint_msg)
self.chi2_ctl = OutputTextCtrl(self, -1, size=(60, 20))
self.chi2_ctl.SetToolTipString("Chi^2 over degrees of freedom.")
# Oscillation parameter
self.osc_ctl = OutputTextCtrl(self, -1, size=(60, 20))
hint_msg = "Oscillation parameter. P(r) for a sphere has an "
hint_msg += " oscillation parameter of 1.1."
self.osc_ctl.SetToolTipString(hint_msg)
# Positive fraction figure of merit
self.pos_ctl = OutputTextCtrl(self, -1, size=(60, 20))
hint_msg = "Fraction of P(r) that is positive. "
hint_msg += "Theoretically, P(r) is defined positive."
self.pos_ctl.SetToolTipString(hint_msg)
# 1-simga positive fraction figure of merit
self.pos_err_ctl = OutputTextCtrl(self, -1, size=(60, 20))
message = "Fraction of P(r) that is at least 1 standard deviation"
message += " greater than zero.\n"
message += "This figure of merit tells you about the size of the "
message += "P(r) errors.\n"
message += "If it is close to 1 and the other figures of merit are bad,"
message += " consider changing the maximum distance."
self.pos_err_ctl.SetToolTipString(message)
sizer_res = wx.GridBagSizer(5, 5)
iy = 0
sizer_res.Add(label_rg, (iy, 0), (1, 1), wx.LEFT | wx.EXPAND, 15)
sizer_res.Add(self.rg_ctl, (iy, 1), (1, 1), wx.RIGHT | wx.EXPAND, 15)
sizer_res.Add(label_rg_unit, (iy, 2), (1, 1), wx.RIGHT | wx.EXPAND, 15)
iy += 1
sizer_res.Add(label_iq0, (iy, 0), (1, 1), wx.LEFT | wx.EXPAND, 15)
sizer_res.Add(self.iq0_ctl, (iy, 1), (1, 1), wx.RIGHT | wx.EXPAND, 15)
sizer_res.Add(label_iq0_unit, (iy, 2), (1, 1), wx.RIGHT | wx.EXPAND, 15)
iy += 1
sizer_res.Add(label_bck, (iy, 0), (1, 1), wx.LEFT | wx.EXPAND, 15)
sizer_res.Add(self.bck_ctl, (iy, 1), (1, 1), wx.RIGHT | wx.EXPAND, 15)
sizer_res.Add(label_bck_unit, (iy, 2), (1, 1), wx.RIGHT | wx.EXPAND, 15)
iy += 1
sizer_res.Add(label_time, (iy, 0), (1, 1), wx.LEFT | wx.EXPAND, 15)
sizer_res.Add(self.time_ctl, (iy, 1), (1, 1), wx.RIGHT | wx.EXPAND, 15)
sizer_res.Add(label_time_unit, (iy, 2), (1, 1), wx.RIGHT | wx.EXPAND, 15)
iy += 1
sizer_res.Add(label_chi2, (iy, 0), (1, 1), wx.LEFT | wx.EXPAND, 15)
sizer_res.Add(self.chi2_ctl, (iy, 1), (1, 1), wx.RIGHT | wx.EXPAND, 15)
iy += 1
sizer_res.Add(label_osc, (iy, 0), (1, 1), wx.LEFT | wx.EXPAND, 15)
sizer_res.Add(self.osc_ctl, (iy, 1), (1, 1), wx.RIGHT | wx.EXPAND, 15)
iy += 1
sizer_res.Add(label_pos, (iy, 0), (1, 1), wx.LEFT | wx.EXPAND, 15)
sizer_res.Add(self.pos_ctl, (iy, 1), (1, 1), wx.RIGHT | wx.EXPAND, 15)
iy += 1
sizer_res.Add(label_pos_err, (iy, 0), (1, 1), wx.LEFT | wx.EXPAND, 15)
sizer_res.Add(self.pos_err_ctl, (iy, 1), (1, 1), wx.RIGHT | wx.EXPAND, 15)
ressizer.Add(sizer_res, 0)
iy_vb += 1
vbox.Add(ressizer, (iy_vb, 0), (1, 1),
wx.LEFT | wx.RIGHT | wx.EXPAND | wx.ADJUST_MINSIZE, 5)
# ----- Buttons -----
wx_id = wx.NewId()
button_ok = wx.Button(self, wx_id, "Compute")
button_ok.SetToolTipString("Perform P(r) inversion.")
self.Bind(wx.EVT_BUTTON, self._on_invert, id=wx_id)
self.button_help = wx.Button(self, -1, "HELP")
self.button_help.SetToolTipString("Get help on P(r) inversion.")
self.button_help.Bind(wx.EVT_BUTTON, self.on_help)
self._set_reset_flag(True)
self._set_save_flag(True)
sizer_button = wx.BoxSizer(wx.HORIZONTAL)
sizer_button.Add((20, 20), 1, wx.EXPAND | wx.ADJUST_MINSIZE, 0)
sizer_button.Add(button_ok, 0, wx.LEFT | wx.ADJUST_MINSIZE, 10)
sizer_button.Add(self.button_help, 0, wx.LEFT | wx.ADJUST_MINSIZE, 10)
iy_vb += 1
vbox.Add(sizer_button, (iy_vb, 0), (1, 1),
wx.EXPAND | wx.BOTTOM | wx.TOP | wx.RIGHT, 10)
self.Bind(wx.EVT_TEXT_ENTER, self._on_invert)
self.SetSizer(vbox)
def _on_accept_alpha(self, evt):
"""
User has accepted the estimated alpha,
set it as part of the input parameters
"""
try:
alpha = self.alpha_estimate_ctl.GetLabel()
# Check that we have a number
float(alpha)
self.alpha_ctl.SetValue(alpha)
except ValueError:
logger.error("InversionControl._on_accept_alpha got a value that was not a number: %s" % alpha )
except:
# No estimate or bad estimate, either do nothing
logger.error("InversionControl._on_accept_alpha: %s" % sys.exc_value)
def _on_accept_nterms(self, evt):
"""
User has accepted the estimated number of terms,
set it as part of the input parameters
"""
try:
nterms = self.nterms_estimate_ctl.GetLabel()
# Check that we have a number
float(nterms)
self.nfunc_ctl.SetValue(nterms)
except ValueError:
logger.error("InversionControl._on_accept_nterms got a value that was not a number: %s" % nterms )
except:
# No estimate or bad estimate, either do nothing
logger.error("InversionControl._on_accept_nterms: %s" % sys.exc_value)
def clear_panel(self):
"""
"""
self.plot_data.SetValue("")
self.on_reset(event=None)
def on_reset(self, event=None):
"""
Resets inversion parameters
"""
self.nfunc = self._manager.DEFAULT_NFUNC
self.d_max = self._manager.DEFAULT_DMAX
self.alpha = self._manager.DEFAULT_ALPHA
self.qmin_ctl.SetValue("")
self.qmax_ctl.SetValue("")
self.time_ctl.SetValue("")
self.rg_ctl.SetValue("")
self.iq0_ctl.SetValue("")
self.bck_ctl.SetValue("")
self.chi2_ctl.SetValue("")
self.osc_ctl.SetValue("")
self.pos_ctl.SetValue("")
self.pos_err_ctl.SetValue("")
self.alpha_estimate_ctl.Enable(False)
self.alpha_estimate_ctl.SetLabel("")
self.nterms_estimate_ctl.Enable(False)
self.nterms_estimate_ctl.SetLabel("")
self._set_analysis(False)
self._on_pars_changed()
def _on_bck_changed(self, evt=None):
self.est_bck = self.bck_est_ctl.GetValue()
if self.est_bck:
self.bck_input.Disable()
else:
self.bck_input.Enable()
def _on_pars_changed(self, evt=None):
"""
Called when an input parameter has changed
We will estimate the alpha parameter behind the
scenes.
"""
flag, alpha, dmax, nfunc, qmin, qmax, height, width, bck = self._read_pars()
# If the pars are valid, estimate alpha
if flag:
self.nterms_estimate_ctl.Enable(False)
self.alpha_estimate_ctl.Enable(False)
dataset = self.plot_data.GetValue()
if dataset is not None and dataset.strip() != "":
self._manager.estimate_plot_inversion(alpha=alpha, nfunc=nfunc,
d_max=dmax,
q_min=qmin, q_max=qmax,
est_bck=self.est_bck,
bck_val=bck,
height=height,
width=width)
def _read_pars(self, evt=None):
"""
"""
alpha = 0
nfunc = 5
dmax = 120
qmin = 0
qmax = 0
height = 0
width = 0
background = 0
flag = True
# Read slit height
try:
height_str = self.sheight_ctl.GetValue()
if len(height_str.lstrip().rstrip()) == 0:
height = 0
else:
height = float(height_str)
self.sheight_ctl.SetBackgroundColour(wx.WHITE)
self.sheight_ctl.Refresh()
except:
flag = False
self.sheight_ctl.SetBackgroundColour("pink")
self.sheight_ctl.Refresh()
# Read slit width
try:
width_str = self.swidth_ctl.GetValue()
if len(width_str.lstrip().rstrip()) == 0:
width = 0
else:
width = float(width_str)
self.swidth_ctl.SetBackgroundColour(wx.WHITE)
self.swidth_ctl.Refresh()
except:
flag = False
self.swidth_ctl.SetBackgroundColour("pink")
self.swidth_ctl.Refresh()
# Read alpha
try:
alpha = float(self.alpha_ctl.GetValue())
self.alpha_ctl.SetBackgroundColour(wx.WHITE)
self.alpha_ctl.Refresh()
except:
flag = False
self.alpha_ctl.SetBackgroundColour("pink")
self.alpha_ctl.Refresh()
# Read d_max
try:
dmax = float(self.dmax_ctl.GetValue())
self.dmax_ctl.SetBackgroundColour(wx.WHITE)
self.dmax_ctl.Refresh()
except:
flag = False
self.dmax_ctl.SetBackgroundColour("pink")
self.dmax_ctl.Refresh()
# Read nfunc
try:
nfunc = int(self.nfunc_ctl.GetValue())
npts = self._manager.get_npts()
if npts > 0 and nfunc > npts:
message = "Number of function terms should be smaller "
message += "than the number of points"
wx.PostEvent(self._manager.parent, StatusEvent(status=message))
raise ValueError, message
self.nfunc_ctl.SetBackgroundColour(wx.WHITE)
self.nfunc_ctl.Refresh()
except:
flag = False
self.nfunc_ctl.SetBackgroundColour("pink")
self.nfunc_ctl.Refresh()
# Read qmin
try:
qmin_str = self.qmin_ctl.GetValue()
if len(qmin_str.lstrip().rstrip()) == 0:
qmin = None
else:
qmin = float(qmin_str)
self.qmin_ctl.SetBackgroundColour(wx.WHITE)
self.qmin_ctl.Refresh()
except:
flag = False
self.qmin_ctl.SetBackgroundColour("pink")
self.qmin_ctl.Refresh()
# Read qmax
try:
qmax_str = self.qmax_ctl.GetValue()
if len(qmax_str.lstrip().rstrip()) == 0:
qmax = None
else:
qmax = float(qmax_str)
self.qmax_ctl.SetBackgroundColour(wx.WHITE)
self.qmax_ctl.Refresh()
except:
flag = False
self.qmax_ctl.SetBackgroundColour("pink")
self.qmax_ctl.Refresh()
# Read background
if not self.est_bck:
try:
bck_str = self.bck_input.GetValue()
if len(bck_str.strip()) == 0:
background = 0.0
else:
background = float(bck_str)
self.bck_input.SetBackgroundColour(wx.WHITE)
except ValueError:
background = 0.0
self.bck_input.SetBackgroundColour("pink")
self.bck_input.Refresh()
return flag, alpha, dmax, nfunc, qmin, qmax, height, width, background
def _on_explore(self, evt):
"""
Invoke the d_max exploration dialog
"""
from explore_dialog import ExploreDialog
if self._manager._last_pr is not None:
pr = self._manager._create_plot_pr()
dialog = ExploreDialog(pr, 10, None, -1, "")
dialog.Show()
else:
message = "No data to analyze. Please load a data set to proceed."
wx.PostEvent(self._manager.parent, StatusEvent(status=message))
def _on_invert(self, evt):
"""
Perform inversion
:param silent: when True, there will be no output for the user
"""
# Get the data from the form
# Push it to the manager
flag, alpha, dmax, nfunc, qmin, qmax, height, width, bck = self._read_pars()
if flag:
dataset = self.plot_data.GetValue()
if dataset is None or len(dataset.strip()) == 0:
message = "No data to invert. Select a data set before"
message += " proceeding with P(r) inversion."
wx.PostEvent(self._manager.parent, StatusEvent(status=message))
else:
self._manager.setup_plot_inversion(alpha=alpha, nfunc=nfunc,
d_max=dmax,
q_min=qmin, q_max=qmax,
est_bck=self.est_bck,
bck_val = bck,
height=height,
width=width)
else:
message = "The P(r) form contains invalid values: "
message += "please submit it again."
wx.PostEvent(self.parent, StatusEvent(status=message))
def _change_file(self, evt=None, filepath=None, data=None):
"""
Choose a new input file for I(q)
"""
if self._manager is not None:
self.plot_data.SetValue(str(data.name))
try:
self._manager.show_data(data=data, reset=True)
self._on_pars_changed(None)
self._on_invert(None)
self._set_analysis(True)
except:
msg = "InversionControl._change_file: %s" % sys.exc_value
logger.error(msg)
def on_help(self, event):
"""
Bring up the P(r) Documentation whenever
the HELP button is clicked.
Calls DocumentationWindow with the path of the location within the
documentation tree (after /doc/ ....". Note that when using old
versions of Wx (before 2.9) and thus not the release version of
installers, the help comes up at the top level of the file as
webbrowser does not pass anything past the # to the browser when it is
running "file:///...."
:param evt: Triggers on clicking the help button
"""
_TreeLocation = "user/sasgui/perspectives/pr/pr_help.html"
_doc_viewer = DocumentationWindow(self, -1, _TreeLocation, "",
"P(r) Help")
class PrDistDialog(wx.Dialog):
"""
Property dialog to let the user change the number
of points on the P(r) plot.
"""
def __init__(self, parent, id):
from sas.sascalc.pr.invertor import help
wx.Dialog.__init__(self, parent, id, size=(250, 120))
self.SetTitle("P(r) distribution")
vbox = wx.BoxSizer(wx.VERTICAL)
label_npts = wx.StaticText(self, -1, "Number of points")
self.npts_ctl = PrTextCtrl(self, -1, size=(100, 20))
pars_sizer = wx.GridBagSizer(5, 5)
iy = 0
pars_sizer.Add(label_npts, (iy, 0), (1, 1), wx.LEFT, 15)
pars_sizer.Add(self.npts_ctl, (iy, 1), (1, 1), wx.RIGHT, 0)
vbox.Add(pars_sizer, 0, wx.ALL | wx.EXPAND, 15)
static_line = wx.StaticLine(self, -1)
vbox.Add(static_line, 0, wx.EXPAND, 0)
button_ok = wx.Button(self, wx.ID_OK, "OK")
self.Bind(wx.EVT_BUTTON, self._checkValues, button_ok)
button_cancel = wx.Button(self, wx.ID_CANCEL, "Cancel")
sizer_button = wx.BoxSizer(wx.HORIZONTAL)
sizer_button.Add((20, 20), 1, wx.EXPAND | wx.ADJUST_MINSIZE, 0)
sizer_button.Add(button_ok, 0, wx.LEFT | wx.RIGHT | wx.ADJUST_MINSIZE, 10)
sizer_button.Add(button_cancel, 0,
wx.LEFT | wx.RIGHT | wx.ADJUST_MINSIZE, 10)
vbox.Add(sizer_button, 0, wx.EXPAND | wx.BOTTOM | wx.TOP, 10)
self.SetSizer(vbox)
self.SetAutoLayout(True)
self.Layout()
self.Centre()
def _checkValues(self, event):
"""
Check the dialog content.
"""
flag = True
try:
int(self.npts_ctl.GetValue())
self.npts_ctl.SetBackgroundColour(wx.WHITE)
self.npts_ctl.Refresh()
except:
flag = False
self.npts_ctl.SetBackgroundColour("pink")
self.npts_ctl.Refresh()
if flag:
event.Skip(True)
def get_content(self):
"""
Return the content of the dialog.
At this point the values have already been
checked.
"""
value = int(self.npts_ctl.GetValue())
return value
def set_content(self, npts):
"""
Initialize the content of the dialog.
"""
self.npts_ctl.SetValue("%i" % npts)
|
|
#!/usr/bin/env python3
"""A utility script for automating the beets release process.
"""
import click
import os
import re
import subprocess
from contextlib import contextmanager
import datetime
BASE = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
CHANGELOG = os.path.join(BASE, 'docs', 'changelog.rst')
@contextmanager
def chdir(d):
"""A context manager that temporary changes the working directory.
"""
olddir = os.getcwd()
os.chdir(d)
yield
os.chdir(olddir)
@click.group()
def release():
pass
# Locations (filenames and patterns) of the version number.
VERSION_LOCS = [
(
os.path.join(BASE, 'beets', '__init__.py'),
[
(
r'__version__\s*=\s*u[\'"]([0-9\.]+)[\'"]',
"__version__ = '{version}'",
)
]
),
(
os.path.join(BASE, 'docs', 'conf.py'),
[
(
r'version\s*=\s*[\'"]([0-9\.]+)[\'"]',
"version = '{minor}'",
),
(
r'release\s*=\s*[\'"]([0-9\.]+)[\'"]',
"release = '{version}'",
),
]
),
(
os.path.join(BASE, 'setup.py'),
[
(
r'\s*version\s*=\s*[\'"]([0-9\.]+)[\'"]',
" version='{version}',",
)
]
),
]
GITHUB_USER = 'beetbox'
GITHUB_REPO = 'beets'
def bump_version(version):
"""Update the version number in setup.py, docs config, changelog,
and root module.
"""
version_parts = [int(p) for p in version.split('.')]
assert len(version_parts) == 3, "invalid version number"
minor = '{}.{}'.format(*version_parts)
major = '{}'.format(*version_parts)
# Replace the version each place where it lives.
for filename, locations in VERSION_LOCS:
# Read and transform the file.
out_lines = []
with open(filename) as f:
found = False
for line in f:
for pattern, template in locations:
match = re.match(pattern, line)
if match:
# Check that this version is actually newer.
old_version = match.group(1)
old_parts = [int(p) for p in old_version.split('.')]
assert version_parts > old_parts, \
"version must be newer than {}".format(
old_version
)
# Insert the new version.
out_lines.append(template.format(
version=version,
major=major,
minor=minor,
) + '\n')
found = True
break
else:
# Normal line.
out_lines.append(line)
if not found:
print(f"No pattern found in {filename}")
# Write the file back.
with open(filename, 'w') as f:
f.write(''.join(out_lines))
# Generate bits to insert into changelog.
header_line = f'{version} (in development)'
header = '\n\n' + header_line + '\n' + '-' * len(header_line) + '\n\n'
header += 'Changelog goes here!\n'
# Insert into the right place.
with open(CHANGELOG) as f:
contents = f.read()
location = contents.find('\n\n') # First blank line.
contents = contents[:location] + header + contents[location:]
# Write back.
with open(CHANGELOG, 'w') as f:
f.write(contents)
@release.command()
@click.argument('version')
def bump(version):
"""Bump the version number.
"""
bump_version(version)
def get_latest_changelog():
"""Extract the first section of the changelog.
"""
started = False
lines = []
with open(CHANGELOG) as f:
for line in f:
if re.match(r'^--+$', line.strip()):
# Section boundary. Start or end.
if started:
# Remove last line, which is the header of the next
# section.
del lines[-1]
break
else:
started = True
elif started:
lines.append(line)
return ''.join(lines).strip()
def rst2md(text):
"""Use Pandoc to convert text from ReST to Markdown.
"""
pandoc = subprocess.Popen(
['pandoc', '--from=rst', '--to=markdown', '--wrap=none'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
stdout, _ = pandoc.communicate(text.encode('utf-8'))
md = stdout.decode('utf-8').strip()
# Fix up odd spacing in lists.
return re.sub(r'^- ', '- ', md, flags=re.M)
def changelog_as_markdown():
"""Get the latest changelog entry as hacked up Markdown.
"""
rst = get_latest_changelog()
# Replace plugin links with plugin names.
rst = re.sub(r':doc:`/plugins/(\w+)`', r'``\1``', rst)
# References with text.
rst = re.sub(r':ref:`([^<]+)(<[^>]+>)`', r'\1', rst)
# Other backslashes with verbatim ranges.
rst = re.sub(r'(\s)`([^`]+)`([^_])', r'\1``\2``\3', rst)
# Command links with command names.
rst = re.sub(r':ref:`(\w+)-cmd`', r'``\1``', rst)
# Bug numbers.
rst = re.sub(r':bug:`(\d+)`', r'#\1', rst)
# Users.
rst = re.sub(r':user:`(\w+)`', r'@\1', rst)
# Convert with Pandoc.
md = rst2md(rst)
# Restore escaped issue numbers.
md = re.sub(r'\\#(\d+)\b', r'#\1', md)
return md
@release.command()
def changelog():
"""Get the most recent version's changelog as Markdown.
"""
print(changelog_as_markdown())
def get_version(index=0):
"""Read the current version from the changelog.
"""
with open(CHANGELOG) as f:
cur_index = 0
for line in f:
match = re.search(r'^\d+\.\d+\.\d+', line)
if match:
if cur_index == index:
return match.group(0)
else:
cur_index += 1
@release.command()
def version():
"""Display the current version.
"""
print(get_version())
@release.command()
def datestamp():
"""Enter today's date as the release date in the changelog.
"""
dt = datetime.datetime.now()
stamp = '({} {}, {})'.format(dt.strftime('%B'), dt.day, dt.year)
marker = '(in development)'
lines = []
underline_length = None
with open(CHANGELOG) as f:
for line in f:
if marker in line:
# The header line.
line = line.replace(marker, stamp)
lines.append(line)
underline_length = len(line.strip())
elif underline_length:
# This is the line after the header. Rewrite the dashes.
lines.append('-' * underline_length + '\n')
underline_length = None
else:
lines.append(line)
with open(CHANGELOG, 'w') as f:
for line in lines:
f.write(line)
@release.command()
def prep():
"""Run all steps to prepare a release.
- Tag the commit.
- Build the sdist package.
- Generate the Markdown changelog to ``changelog.md``.
- Bump the version number to the next version.
"""
cur_version = get_version()
# Tag.
subprocess.check_call(['git', 'tag', f'v{cur_version}'])
# Build.
with chdir(BASE):
subprocess.check_call(['python', 'setup.py', 'sdist'])
# Generate Markdown changelog.
cl = changelog_as_markdown()
with open(os.path.join(BASE, 'changelog.md'), 'w') as f:
f.write(cl)
# Version number bump.
# FIXME It should be possible to specify this as an argument.
version_parts = [int(n) for n in cur_version.split('.')]
version_parts[-1] += 1
next_version = '.'.join(map(str, version_parts))
bump_version(next_version)
@release.command()
def publish():
"""Unleash a release unto the world.
- Push the tag to GitHub.
- Upload to PyPI.
"""
version = get_version(1)
# Push to GitHub.
with chdir(BASE):
subprocess.check_call(['git', 'push'])
subprocess.check_call(['git', 'push', '--tags'])
# Upload to PyPI.
path = os.path.join(BASE, 'dist', f'beets-{version}.tar.gz')
subprocess.check_call(['twine', 'upload', path])
@release.command()
def ghrelease():
"""Create a GitHub release using the `github-release` command-line
tool.
Reads the changelog to upload from `changelog.md`. Uploads the
tarball from the `dist` directory.
"""
version = get_version(1)
tag = 'v' + version
# Load the changelog.
with open(os.path.join(BASE, 'changelog.md')) as f:
cl_md = f.read()
# Create the release.
subprocess.check_call([
'github-release', 'release',
'-u', GITHUB_USER, '-r', GITHUB_REPO,
'--tag', tag,
'--name', f'{GITHUB_REPO} {version}',
'--description', cl_md,
])
# Attach the release tarball.
tarball = os.path.join(BASE, 'dist', f'beets-{version}.tar.gz')
subprocess.check_call([
'github-release', 'upload',
'-u', GITHUB_USER, '-r', GITHUB_REPO,
'--tag', tag,
'--name', os.path.basename(tarball),
'--file', tarball,
])
if __name__ == '__main__':
release()
|
|
from __future__ import print_function
import shutil
import getopt
import os, sys
from glob import glob
from collections import namedtuple
__author__ = 'antoine waugh: antoine@reltech.com'
TestAction = namedtuple('TestAction', 'name is_async')
SINGLE_LINE_COMMENT = '//'
ANNOTATION = '//@'
NEWLINE = '\n'
def usage():
print("Usage: aunit.py [-s ProjectName -f TestFile]\n")
print("Scans test source directory for aunit test files and creates associated pysys tests.\n")
print("$AUNIT_HOME environment variable must be set, $AUNIT_PROJECT_HOME optional, defaults")
print("to $AUNIT_HOME/workspace if not set.\n")
print(" -s, --source Run only test event files within given project.")
print(" -f, --file Run only test event file provided.")
print(" -h, --help Display this help message and exit\n")
sys.exit();
def remove_single_line_comments(content):
""" Removes single line comments from a block of text
Args:
content (string)
Returns:
string
"""
lines = content.splitlines()
for i,l in enumerate(lines):
if ( l.find(SINGLE_LINE_COMMENT) != l.find(ANNOTATION) ) :
l = l[:l.find(SINGLE_LINE_COMMENT)] # strip comment
lines[i] = l
return NEWLINE.join(lines)
class TestEvent(object):
"""AUnit TestEvent File
This class represents a single aunit TestEvent file.
Args:
filepath: location of the TestEvent (string)
content: file contents (string)
project_path: project's base directory (string)
"""
def __init__(self, content):
self._annotations = {
"TEST_ANNOTATION": "@Test",
"SETUP_ANNOTATION": "@Setup",
"TEARDOWN_ANNOTATION": "@Teardown",
"DEPENDS_ANNOTATION": "@Depends",
"INITIALISE_ANNOTATION": "@Initialise" }
self._content = content.splitlines()
self._test_actions = [] # [TestAction(name, is_async)]
# Traverse file contents, populate member variables
for i, line in enumerate(self._content):
# Look ahead to next_line requires traversing to stop at linenumber -1
if i == len(self._content)-1:
return
next_line = self._content[i+1]
if self._line_contains_annotation(line):
# Initialise Action
if (self._line_contains(line, self._annotations["INITIALISE_ANNOTATION"]) and
self._line_contains_action(next_line) ):
self._initialise_action = self._get_action_name(next_line)
# Setup Action
elif (self._line_contains(line, self._annotations["SETUP_ANNOTATION"]) and
self._line_contains_action(next_line) ):
self._setup_action = self._get_action_name(next_line)
# Teardown Action
elif (self._line_contains(line, self._annotations["TEARDOWN_ANNOTATION"]) and
self._line_contains_action(next_line) ):
self._teardown_action = self._get_action_name(next_line)
# Depends Annotation
elif (self._line_contains(line, self._annotations["DEPENDS_ANNOTATION"])):
self._project_depends = self._get_project_dependencies(line)
self._file_depends = self._get_file_dependencies(line)
# Test Event Definition
if self._line_contains_event(next_line):
self._event_name = self._get_event_name(next_line)
# Test Action
elif (self._line_contains(line, self._annotations["TEST_ANNOTATION"]) and
self._line_contains_action(next_line) ):
action = TestAction(
name=self._get_action_name(next_line),
is_async=self._is_action_async(next_line)
)
self._test_actions.append(action)
elif self._line_contains_package(line):
self._package = line
def get_content(self):
""" Returns raw contents of test event (string) """
return ''.join(self._content)
def has_initialise_action(self):
return hasattr(self, '_initialise_action')
def has_setup_action(self):
return hasattr(self, '_setup_action')
def has_teardown_action(self):
return hasattr(self, '_teardown_action')
def is_valid(self):
""" Returns true if all annotation requirements,
action and event definitions are met
"""
return (
hasattr(self, '_file_depends') and
hasattr(self, '_project_depends') and
hasattr(self, '_package') and
hasattr(self, '_event_name') and
len(self._test_actions) > 0
)
def is_missing_annotations(self):
""" Returns true if //Test annotation identified
but minimum annotation requirements not met
"""
return len(self._test_actions) > 0 and not self.is_valid()
def get_package(self):
""" Returns package declaration (string) """
return self._package
def get_event_name(self):
""" Returns test event name (string) """
return self._event_name
def get_setup_action(self):
""" Returns setup action name (string) """
return self._setup_action
def get_teardown_action(self):
""" Returns setup action name (string) """
return self._teardown_action
def get_initialise_action(self):
""" Returns setup action name (string) """
return self._initialise_action
def get_test_actions(self):
"""
Provides test action list in order of definition within TestEvent
Returns:
list(NamedTuple(string,boolean) )
Where: string=action name, boolean: is_async
"""
return self._test_actions
def get_file_dependencies(self):
""" Return monitor file dependency (list) """
return self._file_depends
def get_project_dependencies(self):
""" Return project bundle dependency (list) """
return self._project_depends
def _line_contains_package(self, line):
""" Returns True if package declaration found """
return ( line.find('package ') > -1 and line.find(';') > -1 )
def _line_contains(self, line, search):
""" Returns True if search string found """
return line.find(search) > -1
def _line_contains_annotation(self, line):
""" Returns if line has recognised annotation (boolean) """
for k,v in self._annotations.items():
if self._line_contains(line, v):
return True
return False
def _line_contains_action(self, line):
""" Returns True if line contains 'action' """
return self._line_contains(line, 'action ')
def _line_contains_event(self, line):
""" Returns True if line contains 'action' """
return self._line_contains(line, 'event ')
def _get_action_name(self, line):
""" Returns action name, delimited by { or (} """
action_str = 'action '
start, end = line.find(action_str), line.find('{')
if line.find('(') > -1:
end = line.find('(')
return line[start+len(action_str):end].strip()
def _is_action_async(self, line):
""" Retruns True if action contains callback parameter in signature """
return self._line_contains(line, '(action<> ')
def _get_event_name(self, l):
""" Returns event name, delimited by { """
event_str = 'event '
return l[l.find(event_str)+len(event_str): l.find('{')-1].strip()
def _get_project_dependencies(self, line):
""" Returns list bundle dependency names """
annotation = self._annotations["DEPENDS_ANNOTATION"]
project_dependencies = ['UnitTest']
for depend in line[line.find(annotation)+len(annotation):].split(","):
if depend.find('.mon') == -1:
if depend.strip() != "":
project_dependencies.append(depend.strip())
return project_dependencies
def _get_file_dependencies(self, line):
""" Returns list of file dependency names """
annotation = self._annotations["DEPENDS_ANNOTATION"]
file_dependencies = []
for depend in line[line.find(annotation)+len(annotation):].split(","):
if depend.find('.mon') > -1:
file_dependencies.append(depend.strip())
return file_dependencies
#####################################
# Helper Actions
#####################################
def write_file(source_path, dest_path, substitutions={}):
with open(dest_path, "wt") as fout:
with open(source_path, "rt") as fin:
for line in fin:
for k, v in substitutions.items():
line = line.replace(str(k), str(v))
fout.write(line)
def load_contents(filepath):
with open(filepath, encoding="utf8", errors="ignore") as f:
return f.read()
def list_files(path, filenameFilter='*', source_dir=None):
if source_dir:
path = os.path.join(path, source_dir)
return [y for x in os.walk(path) for y in glob(os.path.join(x[0], filenameFilter))]
def create_pysys_test(aunit_test, filename, aunit_template_dir, source_dir, output_dir):
""" Creates a runnable pysys test.
Using substitutions attainable from aunit_test,
creates all required *.py, *.mon and folder structure
for a pysys test.
Uses *.template files located in $AUNIT_HOME/test-build/template for reference(s).
"""
file_dependencies = []
# Copy File References to resources directory
for file in aunit_test.get_file_dependencies():
dependency_filename = os.path.basename(file)
source_filepath = os.path.join(source_dir, file)
dest_path = os.path.join(output_dir, 'resources')
dest_filepath = os.path.join(dest_path, dependency_filename)
if not os.path.isdir(dest_path):
os.mkdir(dest_path)
if os.path.isfile(source_filepath):
# print copy in verbose mode
print('\nCopying {} to {}\n'.format(source_filepath, dest_filepath))
shutil.copy(
source_filepath,
dest_filepath
)
file_dependencies.append(dependency_filename)
else:
print('\nInvalid file dependency listed within {}: {}. Moving to next test. \n'.format(
aunit_test.get_event_name(),
source_filepath
))
return
testrunner_load_list=''
# For each test action:
# - write EPL load command to be substituted into TestRunner.mon template
# - write pysys assert command to be substituted into run.py template
for test_action in aunit_test.get_test_actions():
# Load Asynchronous action into EPL test runner
if test_action.is_async:
testrunner_load_list += 't.loadAsynchronous("{}:{}",tests.{});\n'.format(
aunit_test.get_event_name(),
test_action.name,
test_action.name)
# Load Synchronous action into EPL test runner
else:
testrunner_load_list += 't.loadSynchronous("{}:{}",tests.{});\n'.format(
aunit_test.get_event_name(),
test_action.name,
test_action.name)
# Define File Dependencies (if any)
resources_dir = '"{}"'.format(os.path.join(output_dir, 'resources'))
if len(file_dependencies) > 0:
file_dependencies = 'correlator.injectMonitorscript(filenames={}, filedir={})'.format(
file_dependencies,
resources_dir.replace('\\', '\\\\')
)
else:
file_dependencies = ''
substitutions = {
'{!depends_bundles}': aunit_test.get_project_dependencies(),
'{!inject_depends_file_command}': file_dependencies,
'{!packagename}': aunit_test.get_package(),
'{!eventname}': aunit_test.get_event_name(),
'{!load_list}': testrunner_load_list,
'{!setupaction}': 'tests.{}'.format(aunit_test.get_setup_action()) if aunit_test.has_setup_action() else 'defaultSetup',
'{!teardownaction}': 'tests.{}'.format(aunit_test.get_teardown_action()) if aunit_test.has_teardown_action() else 'defaultTeardown',
'{!initialiseaction}': 'tests.{}'.format(aunit_test.get_initialise_action()) if aunit_test.has_initialise_action() else 'defaultInitialise',
'{!timeout}': os.environ.get('AUNIT_TIMEOUT', 60)
}
# Ensure target directory does not yet exist
# Prepare the pysys test directories
test_path = os.path.join(output_dir, aunit_test.get_event_name())
test_input_path = os.path.join(test_path, 'Input')
if not os.path.isdir(test_path):
os.mkdir(test_path)
if not os.path.isdir(test_input_path):
os.mkdir(test_input_path)
# Write pysystest.xml
write_file(
os.path.join(aunit_template_dir, 'pysystest.template'),
os.path.join(test_path, 'pysystest.xml'),
{'{!eventname}':aunit_test.get_event_name()}
)
# Write run.py
write_file(
os.path.join(aunit_template_dir, 'run_fast.py.template'),
os.path.join(test_path, 'run.py'),
substitutions
)
# Write TestEvent.mon
write_file(
filename,
os.path.join(test_input_path, 'TestEvent.mon'),
{}
)
# Write TestRunner.mon
write_file(
os.path.join(aunit_template_dir, 'TestRunner_fast.mon.template'),
os.path.join(test_input_path, 'TestRunner.mon'),
substitutions
)
################################################################################
#
# Entry point
#
def main(argv):
source_project = None
source_file = '*.mon'
aunit_home = os.environ.get('AUNIT_HOME')
aunit_project_home = os.environ.get('AUNIT_PROJECT_HOME',
os.path.join(aunit_home, 'workspace'))
test_output_dir = os.environ.get('AUNIT_TEST_HOME',
os.path.join(aunit_home, '.__test'))
try:
opts, args = getopt.getopt(argv, "hs:f:", ["help", "source=", "file="])
except (getopt.GetoptError, err):
print(err)
usage()
for o, a in opts:
if o in ["-h", "--help"]:
usage()
if o in ["-s", "--source"]:
source_project = a
if o in ["-f", "--file"]:
source_file = a
# Validate AUNIT_HOME and AUNIT_PROJECT_HOME exist
if not os.path.isdir(aunit_home) or \
not os.path.isdir(aunit_project_home):
print("AUNIT_HOME/AUNIT_PROJECT_HOME provided were not valid directories.")
sys.exit(2)
if os.path.exists(test_output_dir):
if not os.access(test_output_dir, os.W_OK):
os.chmod(path, stat.S_IWUSR)
shutil.rmtree(test_output_dir, ignore_errors=True)
if not os.path.exists(test_output_dir):
shutil.os.mkdir(test_output_dir)
# Copy Common Test Directories and Files
aunit_test_build_dir = os.path.join(aunit_home, 'test-build')
aunit_template_dir = os.path.join(aunit_test_build_dir, 'template')
shutil.copy(
os.path.join(aunit_template_dir, 'runtests.bat'),
os.path.join(test_output_dir,'runtests.bat')
)
shutil.copy(
os.path.join(aunit_template_dir, 'runtests.sh'),
os.path.join(test_output_dir,'runtests.sh')
)
shutil.copy(
os.path.join(aunit_template_dir, 'pysysproject.xml'),
os.path.join(test_output_dir,'pysysproject.xml')
)
shutil.copytree(
os.path.join(aunit_test_build_dir, 'lib'),
os.path.join(test_output_dir,'lib')
)
# For Each Valid TestEvent located in $AUNIT_PROJECT_HOME,
# create pysys test
for file in list_files(aunit_project_home, source_file, source_project):
contents = load_contents(file)
aunit_test = TestEvent(remove_single_line_comments(contents))
# Determine if *.mon matches test event signature
if aunit_test.is_valid():
create_pysys_test(aunit_test=aunit_test,
filename=file,
aunit_template_dir=aunit_template_dir,
source_dir=aunit_project_home,
output_dir=test_output_dir)
elif aunit_test.is_missing_annotations():
print("WARNING: {} contains tests but TestEvent minimum annotation requirements not met, ignoring.".format(file))
if __name__ == "__main__":
main(sys.argv[1:])
|
|
"""rbf - Radial basis functions for interpolation/smoothing scattered Nd data.
Written by John Travers <jtravs@gmail.com>, February 2007
Based closely on Matlab code by Alex Chirokov
Additional, large, improvements by Robert Hetland
Some additional alterations by Travis Oliphant
Permission to use, modify, and distribute this software is given under the
terms of the SciPy (BSD style) license. See LICENSE.txt that came with
this distribution for specifics.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
Copyright (c) 2006-2007, Robert Hetland <hetland@tamu.edu>
Copyright (c) 2007, John Travers <jtravs@gmail.com>
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of Robert Hetland nor the names of any
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from numpy import (sqrt, log, asarray, newaxis, all, dot, exp, eye,
float_)
from scipy import linalg
class Rbf(object):
"""
Rbf(*args)
A class for radial basis function approximation/interpolation of
n-dimensional scattered data.
Parameters
----------
*args : arrays
x, y, z, ..., d, where x, y, z, ... are the coordinates of the nodes
and d is the array of values at the nodes
function : str or callable, optional
The radial basis function, based on the radius, r, given by the norm
(defult is Euclidean distance); the default is 'multiquadric'::
'multiquadric': sqrt((r/self.epsilon)**2 + 1)
'inverse': 1.0/sqrt((r/self.epsilon)**2 + 1)
'gaussian': exp(-(r/self.epsilon)**2)
'linear': r
'cubic': r**3
'quintic': r**5
'thin_plate': r**2 * log(r)
If callable, then it must take 2 arguments (self, r). The epsilon
parameter will be available as self.epsilon. Other keyword
arguments passed in will be available as well.
epsilon : float, optional
Adjustable constant for gaussian or multiquadrics functions
- defaults to approximate average distance between nodes (which is
a good start).
smooth : float, optional
Values greater than zero increase the smoothness of the
approximation. 0 is for interpolation (default), the function will
always go through the nodal points in this case.
norm : callable, optional
A function that returns the 'distance' between two points, with
inputs as arrays of positions (x, y, z, ...), and an output as an
array of distance. E.g, the default::
def euclidean_norm(x1, x2):
return sqrt( ((x1 - x2)**2).sum(axis=0) )
which is called with x1=x1[ndims,newaxis,:] and
x2=x2[ndims,:,newaxis] such that the result is a matrix of the
distances from each point in x1 to each point in x2.
Examples
--------
>>> rbfi = Rbf(x, y, z, d) # radial basis function interpolator instance
>>> di = rbfi(xi, yi, zi) # interpolated values
"""
def _euclidean_norm(self, x1, x2):
return sqrt( ((x1 - x2)**2).sum(axis=0) )
def _h_multiquadric(self, r):
return sqrt((1.0/self.epsilon*r)**2 + 1)
def _h_inverse_multiquadric(self, r):
return 1.0/sqrt((1.0/self.epsilon*r)**2 + 1)
def _h_gaussian(self, r):
return exp(-(1.0/self.epsilon*r)**2)
def _h_linear(self, r):
return r
def _h_cubic(self, r):
return r**3
def _h_quintic(self, r):
return r**5
def _h_thin_plate(self, r):
result = r**2 * log(r)
result[r == 0] = 0 # the spline is zero at zero
return result
# Setup self._function and do smoke test on initial r
def _init_function(self, r):
if isinstance(self.function, str):
self.function = self.function.lower()
_mapped = {'inverse': 'inverse_multiquadric',
'inverse multiquadric': 'inverse_multiquadric',
'thin-plate': 'thin_plate'}
if self.function in _mapped:
self.function = _mapped[self.function]
func_name = "_h_" + self.function
if hasattr(self, func_name):
self._function = getattr(self, func_name)
else:
functionlist = [x[3:] for x in dir(self) if x.startswith('_h_')]
raise ValueError("function must be a callable or one of " +
", ".join(functionlist))
self._function = getattr(self, "_h_"+self.function)
elif callable(self.function):
allow_one = False
if hasattr(self.function, 'func_code') or \
hasattr(self.function, '__code__'):
val = self.function
allow_one = True
elif hasattr(self.function, "im_func"):
val = self.function.im_func
elif hasattr(self.function, "__call__"):
val = self.function.__call__.im_func
else:
raise ValueError("Cannot determine number of arguments to function")
argcount = val.func_code.co_argcount
if allow_one and argcount == 1:
self._function = self.function
elif argcount == 2:
if sys.version_info[0] >= 3:
self._function = function.__get__(self, Rbf)
else:
import new
self._function = new.instancemethod(self.function, self,
Rbf)
else:
raise ValueError("Function argument must take 1 or 2 arguments.")
a0 = self._function(r)
if a0.shape != r.shape:
raise ValueError("Callable must take array and return array of the same shape")
return a0
def __init__(self, *args, **kwargs):
self.xi = asarray([asarray(a, dtype=float_).flatten()
for a in args[:-1]])
self.N = self.xi.shape[-1]
self.di = asarray(args[-1]).flatten()
assert [x.size==self.di.size for x in self.xi], \
'All arrays must be equal length'
self.norm = kwargs.pop('norm', self._euclidean_norm)
r = self._call_norm(self.xi, self.xi)
self.epsilon = kwargs.pop('epsilon', r.mean())
self.smooth = kwargs.pop('smooth', 0.0)
self.function = kwargs.pop('function', 'multiquadric')
# attach anything left in kwargs to self
# for use by any user-callable function or
# to save on the object returned.
for item, value in kwargs.items():
setattr(self, item, value)
self.A = self._init_function(r) - eye(self.N)*self.smooth
self.nodes = linalg.solve(self.A, self.di)
def _call_norm(self, x1, x2):
if len(x1.shape) == 1:
x1 = x1[newaxis, :]
if len(x2.shape) == 1:
x2 = x2[newaxis, :]
x1 = x1[..., :, newaxis]
x2 = x2[..., newaxis, :]
return self.norm(x1, x2)
def __call__(self, *args):
args = [asarray(x) for x in args]
assert all([x.shape == y.shape \
for x in args \
for y in args]), 'Array lengths must be equal'
shp = args[0].shape
self.xa = asarray([a.flatten() for a in args], dtype=float_)
r = self._call_norm(self.xa, self.xi)
return dot(self._function(r), self.nodes).reshape(shp)
|
|
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Applies online refinement while running inference.
Instructions: Run static inference first before calling this script. Make sure
to point output_dir to the same folder where static inference results were
saved previously.
For example use, please refer to README.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import os
import random
from absl import app
from absl import flags
from absl import logging
import numpy as np
import tensorflow as tf
import model
import nets
import reader
import util
gfile = tf.gfile
SAVE_EVERY = 1 # Defines the interval that predictions should be saved at.
SAVE_PREVIEWS = True # If set, while save image previews of depth predictions.
FIXED_SEED = 8964 # Fixed seed for repeatability.
flags.DEFINE_string('output_dir', None, 'Directory to store predictions. '
'Assumes that regular inference has been executed before '
'and results were stored in this folder.')
flags.DEFINE_string('data_dir', None, 'Folder pointing to preprocessed '
'triplets to fine-tune on.')
flags.DEFINE_string('triplet_list_file', None, 'Text file containing paths to '
'image files to process. Paths should be relative with '
'respect to the list file location. Every line should be '
'of the form [input_folder_name] [input_frame_num] '
'[output_path], where [output_path] is optional to specify '
'a different path to store the prediction.')
flags.DEFINE_string('triplet_list_file_remains', None, 'Optional text file '
'containing relative paths to image files which should not '
'be fine-tuned, e.g. because of missing adjacent frames. '
'For all files listed, the static prediction will be '
'copied instead. File can be empty. If not, every line '
'should be of the form [input_folder_name] '
'[input_frame_num] [output_path], where [output_path] is '
'optional to specify a different path to take and store '
'the unrefined prediction from/to.')
flags.DEFINE_string('model_ckpt', None, 'Model checkpoint to optimize.')
flags.DEFINE_string('ft_name', '', 'Optional prefix for temporary files.')
flags.DEFINE_string('file_extension', 'png', 'Image data file extension.')
flags.DEFINE_float('learning_rate', 0.0001, 'Adam learning rate.')
flags.DEFINE_float('beta1', 0.9, 'Adam momentum.')
flags.DEFINE_float('reconstr_weight', 0.85, 'Frame reconstruction loss weight.')
flags.DEFINE_float('ssim_weight', 0.15, 'SSIM loss weight.')
flags.DEFINE_float('smooth_weight', 0.01, 'Smoothness loss weight.')
flags.DEFINE_float('icp_weight', 0.0, 'ICP loss weight.')
flags.DEFINE_float('size_constraint_weight', 0.0005, 'Weight of the object '
'size constraint loss. Use only with motion handling.')
flags.DEFINE_integer('batch_size', 1, 'The size of a sample batch')
flags.DEFINE_integer('img_height', 128, 'Input frame height.')
flags.DEFINE_integer('img_width', 416, 'Input frame width.')
flags.DEFINE_integer('seq_length', 3, 'Number of frames in sequence.')
flags.DEFINE_enum('architecture', nets.RESNET, nets.ARCHITECTURES,
'Defines the architecture to use for the depth prediction '
'network. Defaults to ResNet-based encoder and accompanying '
'decoder.')
flags.DEFINE_boolean('imagenet_norm', True, 'Whether to normalize the input '
'images channel-wise so that they match the distribution '
'most ImageNet-models were trained on.')
flags.DEFINE_float('weight_reg', 0.05, 'The amount of weight regularization to '
'apply. This has no effect on the ResNet-based encoder '
'architecture.')
flags.DEFINE_boolean('exhaustive_mode', False, 'Whether to exhaustively warp '
'from any frame to any other instead of just considering '
'adjacent frames. Where necessary, multiple egomotion '
'estimates will be applied. Does not have an effect if '
'compute_minimum_loss is enabled.')
flags.DEFINE_boolean('random_scale_crop', False, 'Whether to apply random '
'image scaling and center cropping during training.')
flags.DEFINE_bool('depth_upsampling', True, 'Whether to apply depth '
'upsampling of lower-scale representations before warping to '
'compute reconstruction loss on full-resolution image.')
flags.DEFINE_bool('depth_normalization', True, 'Whether to apply depth '
'normalization, that is, normalizing inverse depth '
'prediction maps by their mean to avoid degeneration towards '
'small values.')
flags.DEFINE_bool('compute_minimum_loss', True, 'Whether to take the '
'element-wise minimum of the reconstruction/SSIM error in '
'order to avoid overly penalizing dis-occlusion effects.')
flags.DEFINE_bool('use_skip', True, 'Whether to use skip connections in the '
'encoder-decoder architecture.')
flags.DEFINE_bool('joint_encoder', False, 'Whether to share parameters '
'between the depth and egomotion networks by using a joint '
'encoder architecture. The egomotion network is then '
'operating only on the hidden representation provided by the '
'joint encoder.')
flags.DEFINE_float('egomotion_threshold', 0.01, 'Minimum egomotion magnitude '
'to apply finetuning. If lower, just forwards the ordinary '
'prediction.')
flags.DEFINE_integer('num_steps', 20, 'Number of optimization steps to run.')
flags.DEFINE_boolean('handle_motion', True, 'Whether the checkpoint was '
'trained with motion handling.')
flags.DEFINE_bool('flip', False, 'Whether images should be flipped as well as '
'resulting predictions (for test-time augmentation). This '
'currently applies to the depth network only.')
FLAGS = flags.FLAGS
flags.mark_flag_as_required('output_dir')
flags.mark_flag_as_required('data_dir')
flags.mark_flag_as_required('model_ckpt')
flags.mark_flag_as_required('triplet_list_file')
def main(_):
"""Runs fine-tuning and inference.
There are three categories of images.
1) Images where we have previous and next frame, and that are not filtered
out by the heuristic. For them, we will use the fine-tuned predictions.
2) Images where we have previous and next frame, but that were filtered out
by our heuristic. For them, we will use the ordinary prediction instead.
3) Images where we have at least one missing adjacent frame. For them, we will
use the ordinary prediction as indicated by triplet_list_file_remains (if
provided). They will also not be part of the generated inference list in
the first place.
Raises:
ValueError: Invalid parameters have been passed.
"""
if FLAGS.handle_motion and FLAGS.joint_encoder:
raise ValueError('Using a joint encoder is currently not supported when '
'modeling object motion.')
if FLAGS.handle_motion and FLAGS.seq_length != 3:
raise ValueError('The current motion model implementation only supports '
'using a sequence length of three.')
if FLAGS.handle_motion and not FLAGS.compute_minimum_loss:
raise ValueError('Computing the minimum photometric loss is required when '
'enabling object motion handling.')
if FLAGS.size_constraint_weight > 0 and not FLAGS.handle_motion:
raise ValueError('To enforce object size constraints, enable motion '
'handling.')
if FLAGS.icp_weight > 0.0:
raise ValueError('ICP is currently not supported.')
if FLAGS.compute_minimum_loss and FLAGS.seq_length % 2 != 1:
raise ValueError('Compute minimum loss requires using an odd number of '
'images in a sequence.')
if FLAGS.compute_minimum_loss and FLAGS.exhaustive_mode:
raise ValueError('Exhaustive mode has no effect when compute_minimum_loss '
'is enabled.')
if FLAGS.img_width % (2 ** 5) != 0 or FLAGS.img_height % (2 ** 5) != 0:
logging.warn('Image size is not divisible by 2^5. For the architecture '
'employed, this could cause artefacts caused by resizing in '
'lower dimensions.')
if FLAGS.output_dir.endswith('/'):
FLAGS.output_dir = FLAGS.output_dir[:-1]
# Create file lists to prepare fine-tuning, save it to unique_file.
unique_file_name = (str(datetime.datetime.now().date()) + '_' +
str(datetime.datetime.now().time()).replace(':', '_'))
unique_file = os.path.join(FLAGS.data_dir, unique_file_name + '.txt')
with gfile.FastGFile(FLAGS.triplet_list_file, 'r') as f:
files_to_process = f.readlines()
files_to_process = [line.rstrip() for line in files_to_process]
files_to_process = [line for line in files_to_process if len(line)]
logging.info('Creating unique file list %s with %s entries.', unique_file,
len(files_to_process))
with gfile.FastGFile(unique_file, 'w') as f_out:
fetches_network = FLAGS.num_steps * FLAGS.batch_size
fetches_saves = FLAGS.batch_size * int(np.floor(FLAGS.num_steps/SAVE_EVERY))
repetitions = fetches_network + 3 * fetches_saves
for i in range(len(files_to_process)):
for _ in range(repetitions):
f_out.write(files_to_process[i] + '\n')
# Read remaining files.
remaining = []
if gfile.Exists(FLAGS.triplet_list_file_remains):
with gfile.FastGFile(FLAGS.triplet_list_file_remains, 'r') as f:
remaining = f.readlines()
remaining = [line.rstrip() for line in remaining]
remaining = [line for line in remaining if len(line)]
logging.info('Running fine-tuning on %s files, %s files are remaining.',
len(files_to_process), len(remaining))
# Run fine-tuning process and save predictions in id-folders.
tf.set_random_seed(FIXED_SEED)
np.random.seed(FIXED_SEED)
random.seed(FIXED_SEED)
flipping_mode = reader.FLIP_ALWAYS if FLAGS.flip else reader.FLIP_NONE
train_model = model.Model(data_dir=FLAGS.data_dir,
file_extension=FLAGS.file_extension,
is_training=True,
learning_rate=FLAGS.learning_rate,
beta1=FLAGS.beta1,
reconstr_weight=FLAGS.reconstr_weight,
smooth_weight=FLAGS.smooth_weight,
ssim_weight=FLAGS.ssim_weight,
icp_weight=FLAGS.icp_weight,
batch_size=FLAGS.batch_size,
img_height=FLAGS.img_height,
img_width=FLAGS.img_width,
seq_length=FLAGS.seq_length,
architecture=FLAGS.architecture,
imagenet_norm=FLAGS.imagenet_norm,
weight_reg=FLAGS.weight_reg,
exhaustive_mode=FLAGS.exhaustive_mode,
random_scale_crop=FLAGS.random_scale_crop,
flipping_mode=flipping_mode,
random_color=False,
depth_upsampling=FLAGS.depth_upsampling,
depth_normalization=FLAGS.depth_normalization,
compute_minimum_loss=FLAGS.compute_minimum_loss,
use_skip=FLAGS.use_skip,
joint_encoder=FLAGS.joint_encoder,
build_sum=False,
shuffle=False,
input_file=unique_file_name,
handle_motion=FLAGS.handle_motion,
size_constraint_weight=FLAGS.size_constraint_weight,
train_global_scale_var=False)
failed_heuristic_ids = finetune_inference(train_model, FLAGS.model_ckpt,
FLAGS.output_dir + '_ft')
logging.info('Fine-tuning completed, %s files were filtered out by '
'heuristic.', len(failed_heuristic_ids))
for failed_id in failed_heuristic_ids:
failed_entry = files_to_process[failed_id]
remaining.append(failed_entry)
logging.info('In total, %s images were fine-tuned, while %s were not.',
len(files_to_process)-len(failed_heuristic_ids), len(remaining))
# Copy all results to have the same structural output as running ordinary
# inference.
for i in range(len(files_to_process)):
if files_to_process[i] not in remaining: # Use fine-tuned result.
elements = files_to_process[i].split(' ')
source_file = os.path.join(FLAGS.output_dir + '_ft', FLAGS.ft_name +
'id_' + str(i),
str(FLAGS.num_steps).zfill(10) +
('_flip' if FLAGS.flip else ''))
if len(elements) == 2: # No differing mapping defined.
target_dir = os.path.join(FLAGS.output_dir + '_ft', elements[0])
target_file = os.path.join(
target_dir, elements[1] + ('_flip' if FLAGS.flip else ''))
else: # Other mapping for file defined, copy to this location instead.
target_dir = os.path.join(
FLAGS.output_dir + '_ft', os.path.dirname(elements[2]))
target_file = os.path.join(
target_dir,
os.path.basename(elements[2]) + ('_flip' if FLAGS.flip else ''))
if not gfile.Exists(target_dir):
gfile.MakeDirs(target_dir)
logging.info('Copy refined result %s to %s.', source_file, target_file)
gfile.Copy(source_file + '.npy', target_file + '.npy', overwrite=True)
gfile.Copy(source_file + '.txt', target_file + '.txt', overwrite=True)
gfile.Copy(source_file + '.%s' % FLAGS.file_extension,
target_file + '.%s' % FLAGS.file_extension, overwrite=True)
for j in range(len(remaining)):
elements = remaining[j].split(' ')
if len(elements) == 2: # No differing mapping defined.
target_dir = os.path.join(FLAGS.output_dir + '_ft', elements[0])
target_file = os.path.join(
target_dir, elements[1] + ('_flip' if FLAGS.flip else ''))
else: # Other mapping for file defined, copy to this location instead.
target_dir = os.path.join(
FLAGS.output_dir + '_ft', os.path.dirname(elements[2]))
target_file = os.path.join(
target_dir,
os.path.basename(elements[2]) + ('_flip' if FLAGS.flip else ''))
if not gfile.Exists(target_dir):
gfile.MakeDirs(target_dir)
source_file = target_file.replace('_ft', '')
logging.info('Copy unrefined result %s to %s.', source_file, target_file)
gfile.Copy(source_file + '.npy', target_file + '.npy', overwrite=True)
gfile.Copy(source_file + '.%s' % FLAGS.file_extension,
target_file + '.%s' % FLAGS.file_extension, overwrite=True)
logging.info('Done, predictions saved in %s.', FLAGS.output_dir + '_ft')
def finetune_inference(train_model, model_ckpt, output_dir):
"""Train model."""
vars_to_restore = None
if model_ckpt is not None:
vars_to_restore = util.get_vars_to_save_and_restore(model_ckpt)
ckpt_path = model_ckpt
pretrain_restorer = tf.train.Saver(vars_to_restore)
sv = tf.train.Supervisor(logdir=None, save_summaries_secs=0, saver=None,
summary_op=None)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
img_nr = 0
failed_heuristic = []
with sv.managed_session(config=config) as sess:
# TODO(casser): Caching the weights would be better to avoid I/O bottleneck.
while True: # Loop terminates when all examples have been processed.
if model_ckpt is not None:
logging.info('Restored weights from %s', ckpt_path)
pretrain_restorer.restore(sess, ckpt_path)
logging.info('Running fine-tuning, image %s...', img_nr)
img_pred_folder = os.path.join(
output_dir, FLAGS.ft_name + 'id_' + str(img_nr))
if not gfile.Exists(img_pred_folder):
gfile.MakeDirs(img_pred_folder)
step = 1
# Run fine-tuning.
while step <= FLAGS.num_steps:
logging.info('Running step %s of %s.', step, FLAGS.num_steps)
fetches = {
'train': train_model.train_op,
'global_step': train_model.global_step,
'incr_global_step': train_model.incr_global_step
}
_ = sess.run(fetches)
if step % SAVE_EVERY == 0:
# Get latest prediction for middle frame, highest scale.
pred = train_model.depth[1][0].eval(session=sess)
if FLAGS.flip:
pred = np.flip(pred, axis=2)
input_img = train_model.image_stack.eval(session=sess)
input_img_prev = input_img[0, :, :, 0:3]
input_img_center = input_img[0, :, :, 3:6]
input_img_next = input_img[0, :, :, 6:]
img_pred_file = os.path.join(
img_pred_folder,
str(step).zfill(10) + ('_flip' if FLAGS.flip else '') + '.npy')
motion = np.squeeze(train_model.egomotion.eval(session=sess))
# motion of shape (seq_length - 1, 6).
motion = np.mean(motion, axis=0) # Average egomotion across frames.
if SAVE_PREVIEWS or step == FLAGS.num_steps:
# Also save preview of depth map.
color_map = util.normalize_depth_for_display(
np.squeeze(pred[0, :, :]))
visualization = np.concatenate(
(input_img_prev, input_img_center, input_img_next, color_map))
motion_s = [str(m) for m in motion]
s_rep = ','.join(motion_s)
with gfile.Open(img_pred_file.replace('.npy', '.txt'), 'w') as f:
f.write(s_rep)
util.save_image(
img_pred_file.replace('.npy', '.%s' % FLAGS.file_extension),
visualization, FLAGS.file_extension)
with gfile.Open(img_pred_file, 'wb') as f:
np.save(f, pred)
# Apply heuristic to not finetune if egomotion magnitude is too low.
ego_magnitude = np.linalg.norm(motion[:3], ord=2)
heuristic = ego_magnitude >= FLAGS.egomotion_threshold
if not heuristic and step == FLAGS.num_steps:
failed_heuristic.append(img_nr)
step += 1
img_nr += 1
return failed_heuristic
if __name__ == '__main__':
app.run(main)
|
|
# coding=utf-8
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
from abc import abstractproperty
from collections import OrderedDict, defaultdict, deque
from textwrap import dedent
from twitter.common.collections import OrderedSet
from pants.engine.addressable import Exactly
from pants.engine.selectors import (Select, SelectDependencies, SelectLiteral, SelectProjection,
SelectVariant, type_or_constraint_repr)
from pants.util.meta import AbstractClass
from pants.util.objects import datatype
logger = logging.getLogger(__name__)
class Rule(AbstractClass):
"""Rules declare how to produce products for the product graph.
A rule describes what dependencies must be provided to produce a particular product. They also act
as factories for constructing the nodes within the graph.
"""
@abstractproperty
def input_selectors(self):
"""Collection of input selectors"""
@abstractproperty
def func(self):
"""Rule function."""
@abstractproperty
def output_product_type(self):
"""The product type produced by this rule."""
def as_triple(self):
"""Constructs an (output, input, func) triple for this rule."""
return (self.output_product_type, self.input_selectors, self.func)
class TaskRule(datatype('TaskRule', ['input_selectors', 'func', 'product_type', 'constraint']),
Rule):
"""A Rule that runs a task function when all of its input selectors are satisfied."""
@property
def output_product_type(self):
return self.product_type
def __str__(self):
return '({}, {!r}, {})'.format(type_or_constraint_repr(self.product_type),
self.input_selectors,
self.func.__name__)
class RuleValidationResult(datatype('RuleValidationResult', ['rule', 'errors', 'warnings'])):
"""Container for errors and warnings found during rule validation."""
def valid(self):
return len(self.errors) == 0 and len(self.warnings) == 0
def has_warnings(self):
return len(self.warnings) > 0
def has_errors(self):
return len(self.errors) > 0
class RulesetValidator(object):
"""Validates that the set of rules used by the node builder has no missing tasks."""
def __init__(self, node_builder, goal_to_product, root_subject_fns):
if not root_subject_fns:
raise ValueError('root_subject_fns must not be empty')
self._goal_to_product = goal_to_product
self._graph = GraphMaker(node_builder, root_subject_fns).full_graph()
def validate(self):
""" Validates that all tasks can be executed based on the declared product types and selectors.
It checks
- all products selected by tasks are produced by some task or intrinsic, or come from a root
subject type
- all goal products are also produced
"""
# TODO cycles, because it should handle that.
error_message = self._graph.error_message()
if error_message:
raise ValueError(error_message)
task_and_intrinsic_product_types = tuple(r.output_product_type for r in self._graph.root_rules)
self._validate_goal_products(task_and_intrinsic_product_types)
def _validate_goal_products(self, task_and_intrinsic_product_types):
for goal, goal_product in self._goal_to_product.items():
if goal_product not in task_and_intrinsic_product_types:
# NB: We could also check goals of the Goal type to see if the products they request are
# also available.
raise ValueError(
'no task for product used by goal "{}": {}'.format(goal, goal_product.__name__))
class SingletonRule(datatype('SingletonRule', ['product_type', 'func']), Rule):
"""A default rule for a product, which is thus a singleton for that product."""
@property
def input_selectors(self):
return tuple()
@property
def output_product_type(self):
return self.product_type
def __repr__(self):
return '{}({})'.format(type(self).__name__, self.product_type.__name__)
class IntrinsicRule(datatype('IntrinsicRule', ['subject_type', 'product_type', 'func']), Rule):
"""A default rule for a pair of subject+product."""
@property
def input_selectors(self):
return tuple()
@property
def output_product_type(self):
return self.product_type
def __repr__(self):
return '{}({})'.format(type(self).__name__, self.func.__name__)
class NodeBuilder(datatype('NodeBuilder', ['tasks', 'intrinsics', 'singletons'])):
"""Holds an index of tasks and intrinsics used to instantiate Nodes."""
@classmethod
def create(cls, task_entries, intrinsic_entries=None, singleton_entries=None):
"""Creates a NodeBuilder with tasks indexed by their output type."""
intrinsic_entries = intrinsic_entries or tuple()
singleton_entries = singleton_entries or tuple()
# NB make tasks ordered so that gen ordering is deterministic.
serializable_tasks = OrderedDict()
def add_task(product_type, rule):
if product_type not in serializable_tasks:
serializable_tasks[product_type] = OrderedSet()
serializable_tasks[product_type].add(rule)
for entry in task_entries:
if isinstance(entry, Rule):
add_task(entry.output_product_type, entry)
elif isinstance(entry, (tuple, list)) and len(entry) == 3:
output_type, input_selectors, task = entry
if isinstance(output_type, Exactly):
constraint = output_type
elif isinstance(output_type, type):
constraint = Exactly(output_type)
else:
raise TypeError("Unexpected product_type type {}, for rule {}".format(output_type, entry))
factory = TaskRule(tuple(input_selectors), task, output_type, constraint)
# TODO: The heterogenity here has some confusing implications here:
# see https://github.com/pantsbuild/pants/issues/4005
for kind in constraint.types:
add_task(kind, factory)
add_task(constraint, factory)
else:
raise TypeError("Unexpected rule type: {}."
" Rules either extend Rule, or are 3 elem tuples.".format(type(entry)))
intrinsics = dict()
for output_type, input_type, func in intrinsic_entries:
key = (input_type, output_type)
if key in intrinsics:
raise ValueError('intrinsic provided by {} has already been provided by: {}'.format(
func.__name__, intrinsics[key]))
intrinsics[key] = IntrinsicRule(input_type, output_type, func)
singletons = dict()
for output_type, func in singleton_entries:
if output_type in singletons:
raise ValueError('singleton provided by {} has already been provided by: {}'.format(
func.__name__, singletons[output_type]))
singletons[output_type] = SingletonRule(output_type, func)
return cls(serializable_tasks, intrinsics, singletons)
def all_rules(self):
"""Returns a set containing all rules including instrinsics."""
declared_rules = set(rule for rules_for_product in self.tasks.values()
for rule in rules_for_product)
declared_intrinsics = set(rule for rule in self.intrinsics.values())
declared_singletons = set(rule for rule in self.singletons.values())
return declared_rules.union(declared_intrinsics).union(declared_singletons)
def all_produced_product_types(self, subject_type):
intrinsic_products = set(prod for subj, prod in self.intrinsics.keys()
if subj == subject_type)
return intrinsic_products.union(set(self.tasks.keys())).union(set(self.singletons.keys()))
def gen_rules(self, subject_type, product_type):
# Singeltons or intrinsics that provide the requested product for the current subject type.
singleton_node_factory = self.singletons.get(product_type)
intrinsic_node_factory = self.intrinsics.get((subject_type, product_type))
if singleton_node_factory:
yield singleton_node_factory
elif intrinsic_node_factory:
yield intrinsic_node_factory
else:
# Tasks that provide the requested product.
for node_factory in self._lookup_tasks(product_type):
yield node_factory
def gen_nodes(self, subject, product_type, variants):
for rule in self.gen_rules(type(subject), product_type):
yield rule.as_node(subject, variants)
def _lookup_tasks(self, product_type):
for entry in self.tasks.get(product_type, tuple()):
yield entry
class CanHaveDependencies(object):
"""Marker class for graph entries that can have dependencies on other graph entries."""
input_selectors = None
subject_type = None
class CanBeDependency(object):
"""Marker class for graph entries that are leaves, and can be depended upon."""
class RuleGraph(datatype('RuleGraph',
['root_subject_types',
'root_rules',
'rule_dependencies',
'unfulfillable_rules'])):
"""A graph containing rules mapping rules to their dependencies taking into account subject types.
This is a graph of rules. It models dependencies between rules, along with the subject types for
those rules. This allows the resulting graph to include cases where a selector is fulfilled by the
subject of the graph.
Because in
`root_subject_types` the root subject types this graph was generated with.
`root_rules` The rule entries that can produce the root products this graph was generated
with.
`rule_dependencies` A map from rule entries to the rule entries they depend on.
The collections of dependencies are contained by RuleEdges objects.
Keys must be subclasses of CanHaveDependencies
values must be subclasses of CanBeDependency
`unfulfillable_rules` A map of rule entries to collections of Diagnostics
containing the reasons why they were eliminated from the graph.
"""
# TODO constructing nodes from the resulting graph.
# Possible approach:
# - walk out from root nodes, constructing each node.
# - when hit a node that can't be constructed yet, ie the subject type changes,
# skip and collect for later.
# - inject the constructed nodes into the product graph.
def error_message(self):
"""Returns a nice error message for errors in the rule graph."""
collated_errors = defaultdict(lambda : defaultdict(set))
for rule_entry, diagnostics in self.unfulfillable_rules.items():
# don't include the root rules in the error
# message since they aren't real.
if type(rule_entry) is RootRule:
continue
for diagnostic in diagnostics:
collated_errors[rule_entry.rule][diagnostic.reason].add(diagnostic.subject_type)
def subject_type_str(t):
if t is None:
return 'Any'
elif type(t) is type:
return t.__name__
elif type(t) is tuple:
return ', '.join(x.__name__ for x in t)
else:
return str(t)
def format_messages(rule, subject_types_by_reasons):
errors = '\n '.join(sorted('{} with subject types: {}'
.format(reason, ', '.join(sorted(subject_type_str(t) for t in subject_types)))
for reason, subject_types in subject_types_by_reasons.items()))
return '{}:\n {}'.format(rule, errors)
used_rule_lookup = set(rule_entry.rule for rule_entry in self.rule_dependencies.keys())
formatted_messages = sorted(format_messages(rule, subject_types_by_reasons)
for rule, subject_types_by_reasons in collated_errors.items()
if rule not in used_rule_lookup)
if not formatted_messages:
return None
return 'Rules with errors: {}\n {}'.format(len(formatted_messages),
'\n '.join(formatted_messages))
def __str__(self):
if not self.root_rules:
return '{empty graph}'
root_subject_types_str = ', '.join(x.__name__ for x in self.root_subject_types)
root_rules_str = ', '.join(sorted(str(r) for r in self.root_rules))
return dedent("""
{{
root_subject_types: ({},)
root_rules: {}
{}
}}""".format(root_subject_types_str,
root_rules_str,
'\n '.join(self._dependency_strs())
)).strip()
def _dependency_strs(self):
return sorted('{} => ({},)'.format(rule, ', '.join(str(d) for d in deps))
for rule, deps in self.rule_dependencies.items())
class RuleGraphSubjectIsProduct(datatype('RuleGraphSubjectIsProduct', ['value']), CanBeDependency):
"""Wrapper for when the dependency is the subject."""
@property
def output_product_type(self):
return self.value
def __repr__(self):
return '{}({})'.format(type(self).__name__, self.value.__name__)
def __str__(self):
return 'SubjectIsProduct({})'.format(self.value.__name__)
class RuleGraphLiteral(datatype('RuleGraphLiteral', ['value', 'product_type']), CanBeDependency):
"""The dependency is the literal value held by SelectLiteral."""
@property
def output_product_type(self):
return self.product_type
def __repr__(self):
return '{}({}, {})'.format(type(self).__name__, self.value, self.product_type.__name__)
def __str__(self):
return 'Literal({}, {})'.format(self.value, self.product_type.__name__)
class RuleGraphEntry(datatype('RuleGraphEntry', ['subject_type', 'rule']),
CanBeDependency,
CanHaveDependencies):
"""A synthetic rule with a specified subject type"""
@property
def input_selectors(self):
return self.rule.input_selectors
@property
def output_product_type(self):
return self.rule.output_product_type
def __repr__(self):
return '{}({}, {})'.format(type(self).__name__, self.subject_type.__name__, self.rule)
def __str__(self):
return '{} of {}'.format(self.rule, self.subject_type.__name__)
class RootRule(datatype('RootRule', ['subject_type', 'selector']), CanHaveDependencies):
"""A synthetic rule representing a root selector."""
@property
def input_selectors(self):
return (self.selector,)
@property
def output_product_type(self):
return self.selector.product
@property
def rule(self):
return self # might work
def __str__(self):
return '{} for {}'.format(self.selector, self.subject_type.__name__)
class Diagnostic(datatype('Diagnostic', ['subject_type', 'reason'])):
"""Holds on to error reasons for problems with the build graph."""
class UnreachableRule(object):
"""A rule entry that can't be reached."""
def __init__(self, rule):
self.rule = rule
class RuleEdges(object):
"""Represents the edges from a rule to its dependencies via selectors."""
# TODO add a highwater mark count to count how many branches are eliminated
def __init__(self, dependencies=tuple(), selector_to_deps=None):
self._dependencies = dependencies
if selector_to_deps is None:
self._selector_to_deps = defaultdict(tuple)
else:
self._selector_to_deps = selector_to_deps
def add_edges_via(self, selector, new_dependencies):
if selector is None and new_dependencies:
raise ValueError("Cannot specify a None selector with non-empty dependencies!")
tupled_other_rules = tuple(new_dependencies)
self._selector_to_deps[selector] += tupled_other_rules
self._dependencies += tupled_other_rules
def has_edges_for(self, selector):
return selector in self._selector_to_deps
def __contains__(self, rule):
return rule in self._dependencies
def __iter__(self):
return self._dependencies.__iter__()
def makes_unfulfillable(self, dep_to_eliminate):
"""Returns true if removing dep_to_eliminate makes this set of edges unfulfillable."""
if len(self._dependencies) == 1 and self._dependencies[0] == dep_to_eliminate:
return True
for selector, deps in self._selector_to_deps.items():
if len(deps) == 1 and dep_to_eliminate == deps[0]:
return True
else:
return False
def without_rule(self, dep_to_eliminate):
new_selector_to_deps = defaultdict(tuple)
for selector, deps in self._selector_to_deps.items():
new_selector_to_deps[selector] = tuple(d for d in deps if d != dep_to_eliminate)
return RuleEdges(tuple(d for d in self._dependencies if d != dep_to_eliminate),
new_selector_to_deps)
class GraphMaker(object):
def __init__(self, nodebuilder, root_subject_fns):
self.root_subject_selector_fns = root_subject_fns
self.nodebuilder = nodebuilder
def generate_subgraph(self, root_subject, requested_product):
root_subject_type = type(root_subject)
root_selector = self.root_subject_selector_fns[root_subject_type](requested_product)
root_rules, edges, unfulfillable = self._construct_graph(RootRule(root_subject_type, root_selector))
root_rules, edges = self._remove_unfulfillable_rules_and_dependents(root_rules,
edges, unfulfillable)
return RuleGraph((root_subject_type,), root_rules, edges, unfulfillable)
def full_graph(self):
"""Produces a full graph based on the root subjects and all of the products produced by rules."""
full_root_rules = set()
full_dependency_edges = {}
full_unfulfillable_rules = {}
for root_subject_type, selector_fn in self.root_subject_selector_fns.items():
for product in sorted(self.nodebuilder.all_produced_product_types(root_subject_type)):
beginning_root = RootRule(root_subject_type, selector_fn(product))
root_dependencies, rule_dependency_edges, unfulfillable_rules = self._construct_graph(
beginning_root,
root_rules=full_root_rules,
rule_dependency_edges=full_dependency_edges,
unfulfillable_rules=full_unfulfillable_rules
)
full_root_rules = set(root_dependencies)
full_dependency_edges = rule_dependency_edges
full_unfulfillable_rules = unfulfillable_rules
rules_in_graph = set(entry.rule for entry in full_dependency_edges.keys())
rules_eliminated_during_construction = [entry.rule for entry in full_unfulfillable_rules.keys()]
rules_used = set(rules_eliminated_during_construction + self.nodebuilder.intrinsics.values() + self.nodebuilder.singletons.values())
declared_rules = self.nodebuilder.all_rules()
unreachable_rules = declared_rules.difference(rules_in_graph, rules_used)
for rule in sorted(unreachable_rules):
full_unfulfillable_rules[UnreachableRule(rule)] = [Diagnostic(None, 'Unreachable')]
full_root_rules, full_dependency_edges = self._remove_unfulfillable_rules_and_dependents(
full_root_rules,
full_dependency_edges,
full_unfulfillable_rules)
return RuleGraph(self.root_subject_selector_fns,
list(full_root_rules),
full_dependency_edges,
full_unfulfillable_rules)
def _construct_graph(self,
beginning_rule,
root_rules=None,
rule_dependency_edges=None,
unfulfillable_rules=None):
root_rules = set() if root_rules is None else root_rules
rule_dependency_edges = dict() if rule_dependency_edges is None else rule_dependency_edges
unfulfillable_rules = dict() if unfulfillable_rules is None else unfulfillable_rules
rules_to_traverse = deque([beginning_rule])
def _find_rhs_for_select(subject_type, selector):
if selector.type_constraint.satisfied_by_type(subject_type):
# NB a matching subject is always picked first
return (RuleGraphSubjectIsProduct(subject_type),)
else:
return tuple(RuleGraphEntry(subject_type, rule)
for rule in self.nodebuilder.gen_rules(subject_type, selector.product))
def mark_unfulfillable(rule, subject_type, reason):
if rule not in unfulfillable_rules:
unfulfillable_rules[rule] = []
unfulfillable_rules[rule].append(Diagnostic(subject_type, reason))
def add_rules_to_graph(rule, selector_path, dep_rules):
unseen_dep_rules = [g for g in dep_rules
if g not in rule_dependency_edges and g not in unfulfillable_rules]
rules_to_traverse.extend(unseen_dep_rules)
if type(rule) is RootRule:
root_rules.update(dep_rules)
return
elif rule not in rule_dependency_edges:
new_edges = RuleEdges()
new_edges.add_edges_via(selector_path, dep_rules)
rule_dependency_edges[rule] = new_edges
else:
existing_deps = rule_dependency_edges[rule]
if existing_deps.has_edges_for(selector_path):
raise ValueError("rule {} already has dependencies set for selector {}"
.format(rule, selector_path))
existing_deps.add_edges_via(selector_path, dep_rules)
while rules_to_traverse:
entry = rules_to_traverse.popleft()
if isinstance(entry, CanBeDependency) and not isinstance(entry, CanHaveDependencies):
continue
if not isinstance(entry, CanHaveDependencies):
raise TypeError("Cannot determine dependencies of entry not of type CanHaveDependencies: {}"
.format(entry))
if entry in unfulfillable_rules:
continue
if entry in rule_dependency_edges:
continue
was_unfulfillable = False
for selector in entry.input_selectors:
if type(selector) in (Select, SelectVariant):
# TODO, handle the Addresses / Variants case
rules_or_literals_for_selector = _find_rhs_for_select(entry.subject_type, selector)
if not rules_or_literals_for_selector:
mark_unfulfillable(entry, entry.subject_type, 'no matches for {}'.format(selector))
was_unfulfillable = True
continue
add_rules_to_graph(entry, selector, rules_or_literals_for_selector)
elif type(selector) is SelectLiteral:
add_rules_to_graph(entry,
selector,
(RuleGraphLiteral(selector.subject, selector.product),))
elif type(selector) is SelectDependencies:
initial_selector = selector.dep_product_selector
initial_rules_or_literals = _find_rhs_for_select(entry.subject_type, initial_selector)
if not initial_rules_or_literals:
mark_unfulfillable(entry,
entry.subject_type,
'no matches for {} when resolving {}'
.format(initial_selector, selector))
was_unfulfillable = True
continue
rules_for_dependencies = []
for field_type in selector.field_types:
rules_for_field_subjects = _find_rhs_for_select(field_type,
selector.projected_product_selector)
rules_for_dependencies.extend(rules_for_field_subjects)
if not rules_for_dependencies:
mark_unfulfillable(entry,
selector.field_types,
'no matches for {} when resolving {}'
.format(selector.projected_product_selector, selector))
was_unfulfillable = True
continue
add_rules_to_graph(entry,
(selector, selector.dep_product_selector),
initial_rules_or_literals)
add_rules_to_graph(entry,
(selector, selector.projected_product_selector),
tuple(rules_for_dependencies))
elif type(selector) is SelectProjection:
# TODO, could validate that input product has fields
initial_rules_or_literals = _find_rhs_for_select(entry.subject_type,
selector.input_product_selector)
if not initial_rules_or_literals:
mark_unfulfillable(entry,
entry.subject_type,
'no matches for {} when resolving {}'
.format(selector.input_product_selector, selector))
was_unfulfillable = True
continue
projected_rules = _find_rhs_for_select(selector.projected_subject,
selector.projected_product_selector)
if not projected_rules:
mark_unfulfillable(entry,
selector.projected_subject,
'no matches for {} when resolving {}'
.format(selector.projected_product_selector, selector))
was_unfulfillable = True
continue
add_rules_to_graph(entry,
(selector, selector.input_product_selector),
initial_rules_or_literals)
add_rules_to_graph(entry,
(selector, selector.projected_product_selector),
projected_rules)
else:
raise TypeError('Unexpected type of selector: {}'.format(selector))
if not was_unfulfillable and entry not in rule_dependency_edges:
# NB: In this case, there are no selectors.
add_rules_to_graph(entry, None, tuple())
return root_rules, rule_dependency_edges, unfulfillable_rules
def _remove_unfulfillable_rules_and_dependents(self,
root_rules,
rule_dependency_edges,
unfulfillable_rules):
"""Removes all unfulfillable rules transitively from the roots and the dependency edges.
Takes the current root rule set and dependency table and removes all rules that are not
transitively fulfillable.
Deforestation. Leaping from tree to tree."""
# could experiment with doing this for each rule added and deduping the traversal list
removal_traversal = deque(unfulfillable_rules.keys())
while removal_traversal:
unfulfillable_entry = removal_traversal.popleft()
for current_entry, dependency_edges in tuple(rule_dependency_edges.items()):
if current_entry in unfulfillable_rules:
# NB: these are removed at the end
continue
if dependency_edges.makes_unfulfillable(unfulfillable_entry):
unfulfillable_rules[current_entry] = [Diagnostic(current_entry.subject_type,
'depends on unfulfillable {}'.format(unfulfillable_entry))]
removal_traversal.append(current_entry)
else:
rule_dependency_edges[current_entry] = dependency_edges.without_rule(unfulfillable_entry)
rule_dependency_edges = dict((k, v) for k, v in rule_dependency_edges.items()
if k not in unfulfillable_rules)
root_rules = tuple(r for r in root_rules if r not in unfulfillable_rules)
return root_rules, rule_dependency_edges
|
|
#!/usr/bin/python2
# Copyright (c) 2013, Rethink Robotics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the Rethink Robotics nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import errno
import getopt
import os
import sys
import roslib
roslib.load_manifest('tools')
import rospy
import std_msgs.msg
import dataflow
from baxter_msgs.msg import (
UpdateSources,
UpdateStatus,
)
class Updater(object):
"""
Control the updater on the robot.
Signals:
status_changed: Fired when the update status changes. Passes
the current UpdateStatus message.
"""
def __init__(self):
self.status_changed = dataflow.Signal()
self._status = UpdateStatus()
self._avail_updates = UpdateSources()
self._update_sources = rospy.Subscriber(
'/usb/update_sources',
UpdateSources,
self._on_update_sources)
self._updater_status_sub = rospy.Subscriber(
'/updater/status',
UpdateStatus,
self._on_update_status)
self._updater_start = rospy.Publisher(
'/updater/start',
std_msgs.msg.String)
self._updater_stop = rospy.Publisher(
'/updater/stop',
std_msgs.msg.Empty)
dataflow.wait_for(
lambda: self._avail_updates.uuid != '',
timeout = 1.0,
timeout_msg = "Failed to get list of available updates")
def _on_update_sources(self, msg):
if msg.uuid != self._avail_updates.uuid:
self._avail_updates = msg
def _on_update_status(self, msg):
if self._status != msg:
self._status = msg
self.status_changed(msg)
def list(self):
"""
Return a list of tuples (version, uuid) of all available updates
"""
return [(u.version, u.uuid) for u in self._avail_updates.sources]
def command_update(self, uuid):
"""
Command the robot to launch the update with the given uuid.
@param uuid - uuid of the update to start.
"""
if not any([u.uuid == uuid for u in self._avail_updates.sources]):
raise OSError(errno.EINVAL, "Invalid update uuid '%s'" % (uuid,))
self._updater_start.publish(std_msgs.msg.String(uuid))
def stop_update(self):
"""
Stop the current update process, if any.
"""
self._updater_stop.publish()
def run_update(updater, uuid):
"""
Run and monitor the progress of an update.
@param updater - Instance of Updater to use.
@param uuid - update uuid.
"""
# Work around lack of a nonlocal keyword in python 2.x
class NonLocal(object): pass
nl = NonLocal
nl.rc = 1
nl.done = False
def on_update_status(msg):
if msg.status == UpdateStatus.STS_IDLE:
nl.done = True
elif msg.status == UpdateStatus.STS_INVALID:
print ("Invalid update uuid, '%s'." % (uuid,))
nl.done = True
elif msg.status == UpdateStatus.STS_BUSY:
print ("Update already in progress (may be shutting down).")
nl.done = True
elif msg.status == UpdateStatus.STS_CANCELLED:
print ("Update cancelled.")
nl.done = True
elif msg.status == UpdateStatus.STS_ERR:
print ("Update failed: %s." % (msg.long_description,))
nl.done = True
nl.rc = 1
elif msg.status == UpdateStatus.STS_LOAD_KEXEC:
print ("Robot will now reboot to finish updating...")
nl.rc = 0
else:
print ("Updater: %s" % (msg.long_description))
def on_shutdown():
updater.stop_update()
rospy.on_shutdown(on_shutdown)
updater.status_changed.connect(on_update_status)
try:
updater.command_update(uuid)
except OSError, e:
if e.errno == errno.EINVAL:
print(str(e))
return 1
raise
try:
dataflow.wait_for(
lambda: nl.done == True,
timeout = 5 * 60,
timeout_msg = "Timeout waiting for update to succeed")
except Exception, e:
if not (hasattr(e, 'errno') and e.errno == errno.ESHUTDOWN):
print (str(e))
nl.rc = 1
return nl.rc
def usage():
print """
%s [ARGUMENTS]
-h, --help This screen
-l, --list List available updates
-u, --update [UUID] Launch the given update
""" % (os.path.basename(sys.argv[0]),)
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], 'hlu:',
['help', 'list', 'update=',])
except getopt.GetoptError as err:
print str(err)
usage()
sys.exit(2)
cmd = None
uuid = ''
for o, a in opts:
if o in ('-h', '--help'):
usage()
sys.exit(0)
elif o in ('-l', '--list'):
cmd = 'list'
elif o in ('-u', '--update'):
cmd = 'update'
uuid = a
rospy.init_node('update_robot', anonymous=True)
updater = Updater()
if cmd == 'list':
updates = updater.list()
if not len(updates):
print ("No available updates")
else:
print ("%-30s%s" % ("Version", "UUID"))
for update in updates:
print("%-30s%s" % (update[0], update[1]))
sys.exit(0)
elif cmd == 'update':
if uuid == '':
print "Error: no update uuid specified"
sys.exit(1)
sys.exit(run_update(updater, uuid))
if __name__ == '__main__':
main()
|
|
from core.himesis import Himesis, HimesisPreConditionPatternLHS
import uuid
class HUnitR07c_CompleteLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HUnitR07c_CompleteLHS
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HUnitR07c_CompleteLHS, self).__init__(name='HUnitR07c_CompleteLHS', num_nodes=0, edges=[])
# Add the edges
self.add_edges([])
# Set the graph attributes
self["mm__"] = ['MT_pre__FamiliesToPersonsMM', 'MoTifRule']
self["MT_constraint__"] = """return True"""
self["name"] = """"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'HUnitR07c_CompleteLHS')
self["equations"] = []
# Set the node attributes
# match class Transition(7.2.m.0Transition) node
self.add_node()
self.vs[0]["MT_pre__attr1"] = """return True"""
self.vs[0]["MT_label__"] = """1"""
self.vs[0]["mm__"] = """MT_pre__Transition"""
self.vs[0]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'7.2.m.0Transition')
# match class Vertex(7.2.m.1Vertex) node
self.add_node()
self.vs[1]["MT_pre__attr1"] = """return True"""
self.vs[1]["MT_label__"] = """2"""
self.vs[1]["mm__"] = """MT_pre__Vertex"""
self.vs[1]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'7.2.m.1Vertex')
# match class StateMachine(7.2.m.2StateMachine) node
self.add_node()
self.vs[2]["MT_pre__attr1"] = """return True"""
self.vs[2]["MT_label__"] = """3"""
self.vs[2]["mm__"] = """MT_pre__StateMachine"""
self.vs[2]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'7.2.m.2StateMachine')
# match class State(7.2.m.3State) node
self.add_node()
self.vs[3]["MT_pre__attr1"] = """return True"""
self.vs[3]["MT_label__"] = """4"""
self.vs[3]["mm__"] = """MT_pre__State"""
self.vs[3]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'7.2.m.3State')
# match class Trigger(7.2.m.4Trigger) node
self.add_node()
self.vs[4]["MT_pre__attr1"] = """return True"""
self.vs[4]["MT_label__"] = """5"""
self.vs[4]["mm__"] = """MT_pre__Trigger"""
self.vs[4]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'7.2.m.4Trigger')
# match class Signal(7.2.m.5Signal) node
self.add_node()
self.vs[5]["MT_pre__attr1"] = """return True"""
self.vs[5]["MT_label__"] = """6"""
self.vs[5]["mm__"] = """MT_pre__Signal"""
self.vs[5]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'7.2.m.5Signal')
# match class State(7.2.m.6State) node
self.add_node()
self.vs[6]["MT_pre__attr1"] = """return True"""
self.vs[6]["MT_label__"] = """7"""
self.vs[6]["mm__"] = """MT_pre__State"""
self.vs[6]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'7.2.m.6State')
# apply class ListenBranch(7.2.a.0ListenBranch) node
self.add_node()
self.vs[7]["MT_pre__attr1"] = """return True"""
self.vs[7]["MT_label__"] = """8"""
self.vs[7]["mm__"] = """MT_pre__ListenBranch"""
self.vs[7]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'7.2.a.0ListenBranch')
# apply class Seq(7.2.a.1Seq) node
self.add_node()
self.vs[8]["MT_pre__attr1"] = """return True"""
self.vs[8]["MT_label__"] = """9"""
self.vs[8]["mm__"] = """MT_pre__Seq"""
self.vs[8]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'7.2.a.1Seq')
# apply class Trigger(7.2.a.2Trigger) node
self.add_node()
self.vs[9]["MT_pre__attr1"] = """return True"""
self.vs[9]["MT_label__"] = """10"""
self.vs[9]["mm__"] = """MT_pre__Trigger"""
self.vs[9]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'7.2.a.2Trigger')
# apply class Listen(7.2.a.3Listen) node
self.add_node()
self.vs[10]["MT_pre__attr1"] = """return True"""
self.vs[10]["MT_label__"] = """11"""
self.vs[10]["mm__"] = """MT_pre__Listen"""
self.vs[10]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'7.2.a.3Listen')
# apply class ListenBranch(7.2.a.4ListenBranch) node
self.add_node()
self.vs[11]["MT_pre__attr1"] = """return True"""
self.vs[11]["MT_label__"] = """12"""
self.vs[11]["mm__"] = """MT_pre__ListenBranch"""
self.vs[11]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'7.2.a.4ListenBranch')
# apply class Inst(7.2.a.5Inst) node
self.add_node()
self.vs[12]["MT_pre__attr1"] = """return True"""
self.vs[12]["MT_label__"] = """13"""
self.vs[12]["mm__"] = """MT_pre__Inst"""
self.vs[12]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'7.2.a.5Inst')
# apply class Listen(7.2.a.6Listen) node
self.add_node()
self.vs[13]["MT_pre__attr1"] = """return True"""
self.vs[13]["MT_label__"] = """14"""
self.vs[13]["mm__"] = """MT_pre__Listen"""
self.vs[13]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'7.2.a.6Listen')
# match association Transition--src-->Vertexnode
self.add_node()
self.vs[14]["MT_pre__attr1"] = """return attr_value == "src" """
self.vs[14]["MT_label__"] = """15"""
self.vs[14]["mm__"] = """MT_pre__directLink_S"""
self.vs[14]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'7.2.m.0Transitionassoc147.2.m.1Vertex')
# match association Vertex--owningStateMachine-->StateMachinenode
self.add_node()
self.vs[15]["MT_pre__attr1"] = """return attr_value == "owningStateMachine" """
self.vs[15]["MT_label__"] = """16"""
self.vs[15]["mm__"] = """MT_pre__directLink_S"""
self.vs[15]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'7.2.m.1Vertexassoc157.2.m.2StateMachine')
# match association StateMachine--states-->Statenode
self.add_node()
self.vs[16]["MT_pre__attr1"] = """return attr_value == "states" """
self.vs[16]["MT_label__"] = """17"""
self.vs[16]["mm__"] = """MT_pre__directLink_S"""
self.vs[16]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'7.2.m.2StateMachineassoc167.2.m.3State')
# match association Transition--triggers-->Triggernode
self.add_node()
self.vs[17]["MT_pre__attr1"] = """return attr_value == "triggers" """
self.vs[17]["MT_label__"] = """18"""
self.vs[17]["mm__"] = """MT_pre__directLink_S"""
self.vs[17]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'7.2.m.0Transitionassoc177.2.m.4Trigger')
# match association Trigger--signal-->Signalnode
self.add_node()
self.vs[18]["MT_pre__attr1"] = """return attr_value == "signal" """
self.vs[18]["MT_label__"] = """19"""
self.vs[18]["mm__"] = """MT_pre__directLink_S"""
self.vs[18]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'7.2.m.4Triggerassoc187.2.m.5Signal')
# match association State--transitions-->Transitionnode
self.add_node()
self.vs[19]["MT_pre__attr1"] = """return attr_value == "transitions" """
self.vs[19]["MT_label__"] = """20"""
self.vs[19]["mm__"] = """MT_pre__directLink_S"""
self.vs[19]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'7.2.m.6Stateassoc197.2.m.0Transition')
# apply association ListenBranch--p-->Seqnode
self.add_node()
self.vs[20]["MT_pre__attr1"] = """return attr_value == "p" """
self.vs[20]["MT_label__"] = """21"""
self.vs[20]["mm__"] = """MT_pre__directLink_T"""
self.vs[20]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'7.2.a.0ListenBranchassoc207.2.a.1Seq')
# apply association Seq--p-->Triggernode
self.add_node()
self.vs[21]["MT_pre__attr1"] = """return attr_value == "p" """
self.vs[21]["MT_label__"] = """22"""
self.vs[21]["mm__"] = """MT_pre__directLink_T"""
self.vs[21]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'7.2.a.1Seqassoc217.2.a.2Trigger')
# apply association Seq--p-->Listennode
self.add_node()
self.vs[22]["MT_pre__attr1"] = """return attr_value == "p" """
self.vs[22]["MT_label__"] = """23"""
self.vs[22]["mm__"] = """MT_pre__directLink_T"""
self.vs[22]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'7.2.a.1Seqassoc227.2.a.3Listen')
# apply association Listen--branches-->ListenBranchnode
self.add_node()
self.vs[23]["MT_pre__attr1"] = """return attr_value == "branches" """
self.vs[23]["MT_label__"] = """24"""
self.vs[23]["mm__"] = """MT_pre__directLink_T"""
self.vs[23]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'7.2.a.3Listenassoc237.2.a.4ListenBranch')
# apply association ListenBranch--p-->Instnode
self.add_node()
self.vs[24]["MT_pre__attr1"] = """return attr_value == "p" """
self.vs[24]["MT_label__"] = """25"""
self.vs[24]["mm__"] = """MT_pre__directLink_T"""
self.vs[24]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'7.2.a.4ListenBranchassoc247.2.a.5Inst')
# apply association Listen--branches-->ListenBranchnode
self.add_node()
self.vs[25]["MT_pre__attr1"] = """return attr_value == "branches" """
self.vs[25]["MT_label__"] = """26"""
self.vs[25]["mm__"] = """MT_pre__directLink_T"""
self.vs[25]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'7.2.a.6Listenassoc257.2.a.0ListenBranch')
# trace association Inst--trace-->Transitionnode
self.add_node()
self.vs[26]["MT_label__"] = """27"""
self.vs[26]["mm__"] = """MT_pre__trace_link"""
self.vs[26]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'7.2.a.5Instassoc267.2.m.0Transition')
# trace association Listen--trace-->Statenode
self.add_node()
self.vs[27]["MT_label__"] = """28"""
self.vs[27]["mm__"] = """MT_pre__trace_link"""
self.vs[27]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'7.2.a.6Listenassoc277.2.m.6State')
self['equations'].append(((7,'channel'),(5,'name')))
self['equations'].append(((9,'channel'),('constant','exit_in')))
self['equations'].append(((11,'channel'),('constant','exack_in')))
# Add the edges
self.add_edges([
(0,14), # match class Transition(7.2.m.0Transition) -> association src
(14,1), # association Vertex -> match class Vertex(7.2.m.1Vertex)
(1,15), # match class Vertex(7.2.m.1Vertex) -> association owningStateMachine
(15,2), # association StateMachine -> match class StateMachine(7.2.m.2StateMachine)
(2,16), # match class StateMachine(7.2.m.2StateMachine) -> association states
(16,3), # association State -> match class State(7.2.m.3State)
(0,17), # match class Transition(7.2.m.0Transition) -> association triggers
(17,4), # association Trigger -> match class Trigger(7.2.m.4Trigger)
(4,18), # match class Trigger(7.2.m.4Trigger) -> association signal
(18,5), # association Signal -> match class Signal(7.2.m.5Signal)
(6,19), # match class State(7.2.m.6State) -> association transitions
(19,0), # association Transition -> match class Transition(7.2.m.0Transition)
(7,20), # apply class ListenBranch(7.2.a.0ListenBranch) -> association p
(20,8), # association Seq -> apply class Seq(7.2.a.1Seq)
(8,21), # apply class Seq(7.2.a.1Seq) -> association p
(21,9), # association Trigger -> apply class Trigger(7.2.a.2Trigger)
(8,22), # apply class Seq(7.2.a.1Seq) -> association p
(22,10), # association Listen -> apply class Listen(7.2.a.3Listen)
(10,23), # apply class Listen(7.2.a.3Listen) -> association branches
(23,11), # association ListenBranch -> apply class ListenBranch(7.2.a.4ListenBranch)
(11,24), # apply class ListenBranch(7.2.a.4ListenBranch) -> association p
(24,12), # association Inst -> apply class Inst(7.2.a.5Inst)
(13,25), # apply class Listen(7.2.a.6Listen) -> association branches
(25,7), # association ListenBranch -> apply class ListenBranch(7.2.a.0ListenBranch)
(12,26), # apply class Inst(7.2.m.0Transition) -> backward_association
(26,0), # backward_associationTransition -> match_class Transition(7.2.m.0Transition)
(13,27), # apply class Listen(7.2.m.6State) -> backward_association
(27,6), # backward_associationState -> match_class State(7.2.m.6State)
])
# define evaluation methods for each match class.
def eval_attr11(self, attr_value, this):
return True
def eval_attr12(self, attr_value, this):
return True
def eval_attr13(self, attr_value, this):
return True
def eval_attr14(self, attr_value, this):
return True
def eval_attr15(self, attr_value, this):
return True
def eval_attr16(self, attr_value, this):
return True
def eval_attr17(self, attr_value, this):
return True
# define evaluation methods for each apply class.
def eval_attr18(self, attr_value, this):
return True
def eval_attr19(self, attr_value, this):
return True
def eval_attr110(self, attr_value, this):
return True
def eval_attr111(self, attr_value, this):
return True
def eval_attr112(self, attr_value, this):
return True
def eval_attr113(self, attr_value, this):
return True
def eval_attr114(self, attr_value, this):
return True
# define evaluation methods for each match association.
def eval_attr115(self, attr_value, this):
return attr_value == "src"
def eval_attr116(self, attr_value, this):
return attr_value == "owningStateMachine"
def eval_attr117(self, attr_value, this):
return attr_value == "states"
def eval_attr118(self, attr_value, this):
return attr_value == "triggers"
def eval_attr119(self, attr_value, this):
return attr_value == "signal"
def eval_attr120(self, attr_value, this):
return attr_value == "transitions"
# define evaluation methods for each apply association.
def eval_attr121(self, attr_value, this):
return attr_value == "p"
def eval_attr122(self, attr_value, this):
return attr_value == "p"
def eval_attr123(self, attr_value, this):
return attr_value == "p"
def eval_attr124(self, attr_value, this):
return attr_value == "branches"
def eval_attr125(self, attr_value, this):
return attr_value == "p"
def eval_attr126(self, attr_value, this):
return attr_value == "branches"
def constraint(self, PreNode, graph):
return True
|
|
"""
This file provides fallback data for info attributes
that are required for building OTFs. There are two main
functions that are important:
* :func:`~getAttrWithFallback`
* :func:`~preflightInfo`
There are a set of other functions that are used internally
for synthesizing values for specific attributes. These can be
used externally as well.
"""
from __future__ import print_function, division, absolute_import, unicode_literals
import logging
import math
from datetime import datetime
import calendar
import time
import unicodedata
import os
from fontTools.misc.py23 import tobytes, tostr, tounicode, unichr
from fontTools.misc.fixedTools import otRound
from fontTools.misc.textTools import binary2num
from fontTools import ufoLib
logger = logging.getLogger(__name__)
# -----------------
# Special Fallbacks
# -----------------
# generic
_styleMapStyleNames = ["regular", "bold", "italic", "bold italic"]
def ascenderFallback(info):
upm = getAttrWithFallback(info, "unitsPerEm")
return otRound(upm * 0.8)
def descenderFallback(info):
upm = getAttrWithFallback(info, "unitsPerEm")
return -otRound(upm * 0.2)
def capHeightFallback(info):
upm = getAttrWithFallback(info, "unitsPerEm")
return otRound(upm * 0.7)
def xHeightFallback(info):
upm = getAttrWithFallback(info, "unitsPerEm")
return otRound(upm * 0.5)
def styleMapFamilyNameFallback(info):
"""
Fallback to *openTypeNamePreferredFamilyName* if
*styleMapStyleName* or, if *styleMapStyleName* isn't defined,
*openTypeNamePreferredSubfamilyName* is
*regular*, *bold*, *italic* or *bold italic*, otherwise
fallback to *openTypeNamePreferredFamilyName openTypeNamePreferredFamilyName*.
"""
familyName = getAttrWithFallback(info, "openTypeNamePreferredFamilyName")
styleName = info.styleMapStyleName
if not styleName:
styleName = getAttrWithFallback(info, "openTypeNamePreferredSubfamilyName")
if styleName is None:
styleName = ""
elif styleName.lower() in _styleMapStyleNames:
styleName = ""
return (familyName + " " + styleName).strip()
def styleMapStyleNameFallback(info):
"""
Fallback to *openTypeNamePreferredSubfamilyName* if
it is one of *regular*, *bold*, *italic*, *bold italic*, otherwise
fallback to *regular*.
"""
styleName = getAttrWithFallback(info, "openTypeNamePreferredSubfamilyName")
if styleName is None:
styleName = "regular"
elif styleName.strip().lower() not in _styleMapStyleNames:
styleName = "regular"
else:
styleName = styleName.strip().lower()
return styleName
# head
_date_format = "%Y/%m/%d %H:%M:%S"
def dateStringForNow():
return time.strftime(_date_format, time.gmtime())
def openTypeHeadCreatedFallback(info):
"""
Fallback to the environment variable SOURCE_DATE_EPOCH if set, otherwise
now.
"""
if "SOURCE_DATE_EPOCH" in os.environ:
t = datetime.utcfromtimestamp(int(os.environ["SOURCE_DATE_EPOCH"]))
return t.strftime(_date_format)
else:
return dateStringForNow()
# hhea
def openTypeHheaAscenderFallback(info):
"""
Fallback to *ascender + typoLineGap*.
"""
return getAttrWithFallback(info, "ascender") + getAttrWithFallback(
info, "openTypeOS2TypoLineGap"
)
def openTypeHheaDescenderFallback(info):
"""
Fallback to *descender*.
"""
return getAttrWithFallback(info, "descender")
def openTypeHheaCaretSlopeRiseFallback(info):
"""
Fallback to *openTypeHheaCaretSlopeRise*. If the italicAngle is zero,
return 1. If italicAngle is non-zero, compute the slope rise from the
complementary openTypeHheaCaretSlopeRun, if the latter is defined.
Else, default to an arbitrary fixed reference point (1000).
"""
italicAngle = getAttrWithFallback(info, "italicAngle")
if italicAngle != 0:
if (
hasattr(info, "openTypeHheaCaretSlopeRun")
and info.openTypeHheaCaretSlopeRun is not None
):
slopeRun = info.openTypeHheaCaretSlopeRun
return otRound(slopeRun / math.tan(math.radians(-italicAngle)))
else:
return 1000 # just an arbitrary non-zero reference point
return 1
def openTypeHheaCaretSlopeRunFallback(info):
"""
Fallback to *openTypeHheaCaretSlopeRun*. If the italicAngle is zero,
return 0. If italicAngle is non-zero, compute the slope run from the
complementary openTypeHheaCaretSlopeRise.
"""
italicAngle = getAttrWithFallback(info, "italicAngle")
if italicAngle != 0:
slopeRise = getAttrWithFallback(info, "openTypeHheaCaretSlopeRise")
return otRound(math.tan(math.radians(-italicAngle)) * slopeRise)
return 0
# name
def openTypeNameVersionFallback(info):
"""
Fallback to *versionMajor.versionMinor* in the form 0.000.
"""
versionMajor = getAttrWithFallback(info, "versionMajor")
versionMinor = getAttrWithFallback(info, "versionMinor")
return "Version %d.%s" % (versionMajor, str(versionMinor).zfill(3))
def openTypeNameUniqueIDFallback(info):
"""
Fallback to *openTypeNameVersion;openTypeOS2VendorID;postscriptFontName*.
"""
version = getAttrWithFallback(info, "openTypeNameVersion").replace("Version ", "")
vendor = getAttrWithFallback(info, "openTypeOS2VendorID")
fontName = getAttrWithFallback(info, "postscriptFontName")
return "%s;%s;%s" % (version, vendor, fontName)
def openTypeNamePreferredFamilyNameFallback(info):
"""
Fallback to *familyName*.
"""
return getAttrWithFallback(info, "familyName")
def openTypeNamePreferredSubfamilyNameFallback(info):
"""
Fallback to *styleName*.
"""
return getAttrWithFallback(info, "styleName")
def openTypeNameCompatibleFullNameFallback(info):
"""
Fallback to *styleMapFamilyName styleMapStyleName*.
If *styleMapStyleName* is *regular* this will not add
the style name.
"""
familyName = getAttrWithFallback(info, "styleMapFamilyName")
styleMapStyleName = getAttrWithFallback(info, "styleMapStyleName")
if styleMapStyleName != "regular":
familyName += " " + styleMapStyleName.title()
return familyName
def openTypeNameWWSFamilyNameFallback(info):
# not yet supported
return None
def openTypeNameWWSSubfamilyNameFallback(info):
# not yet supported
return None
# OS/2
def openTypeOS2TypoAscenderFallback(info):
"""
Fallback to *ascender*.
"""
return getAttrWithFallback(info, "ascender")
def openTypeOS2TypoDescenderFallback(info):
"""
Fallback to *descender*.
"""
return getAttrWithFallback(info, "descender")
def openTypeOS2TypoLineGapFallback(info):
"""
Fallback to *UPM * 1.2 - ascender + descender*, or zero if that's negative.
"""
return max(
int(getAttrWithFallback(info, "unitsPerEm") * 1.2)
- getAttrWithFallback(info, "ascender")
+ getAttrWithFallback(info, "descender"),
0,
)
def openTypeOS2WinAscentFallback(info):
"""
Fallback to *ascender + typoLineGap*.
"""
return getAttrWithFallback(info, "ascender") + getAttrWithFallback(
info, "openTypeOS2TypoLineGap"
)
def openTypeOS2WinDescentFallback(info):
"""
Fallback to *descender*.
"""
return abs(getAttrWithFallback(info, "descender"))
# postscript
_postscriptFontNameExceptions = set("[](){}<>/%")
_postscriptFontNameAllowed = set([unichr(i) for i in range(33, 127)])
def normalizeStringForPostscript(s, allowSpaces=True):
s = tounicode(s)
normalized = []
for c in s:
if c == " " and not allowSpaces:
continue
if c in _postscriptFontNameExceptions:
continue
if c not in _postscriptFontNameAllowed:
# Use compatibility decomposed form, to keep parts in ascii
c = unicodedata.normalize("NFKD", c)
if not set(c) < _postscriptFontNameAllowed:
c = tounicode(tobytes(c, errors="replace"))
normalized.append(tostr(c))
return "".join(normalized)
def normalizeNameForPostscript(name):
return normalizeStringForPostscript(name, allowSpaces=False)
def postscriptFontNameFallback(info):
"""
Fallback to a string containing only valid characters
as defined in the specification. This will draw from
*openTypeNamePreferredFamilyName* and *openTypeNamePreferredSubfamilyName*.
"""
name = "%s-%s" % (
getAttrWithFallback(info, "openTypeNamePreferredFamilyName"),
getAttrWithFallback(info, "openTypeNamePreferredSubfamilyName"),
)
return normalizeNameForPostscript(name)
def postscriptFullNameFallback(info):
"""
Fallback to *openTypeNamePreferredFamilyName openTypeNamePreferredSubfamilyName*.
"""
return "%s %s" % (
getAttrWithFallback(info, "openTypeNamePreferredFamilyName"),
getAttrWithFallback(info, "openTypeNamePreferredSubfamilyName"),
)
def postscriptSlantAngleFallback(info):
"""
Fallback to *italicAngle*.
"""
return getAttrWithFallback(info, "italicAngle")
def postscriptUnderlineThicknessFallback(info):
"""Return UPM * 0.05 (50 for 1000 UPM) and warn."""
logger.warning("Underline thickness not set in UFO, defaulting to UPM * 0.05")
return getAttrWithFallback(info, "unitsPerEm") * 0.05
def postscriptUnderlinePositionFallback(info):
"""Return UPM * -0.075 (-75 for 1000 UPM) and warn."""
logger.warning("Underline position not set in UFO, defaulting to UPM * -0.075")
return getAttrWithFallback(info, "unitsPerEm") * -0.075
def postscriptBlueScaleFallback(info):
"""
Fallback to a calculated value: 3/(4 * *maxZoneHeight*)
where *maxZoneHeight* is the tallest zone from *postscriptBlueValues*
and *postscriptOtherBlues*. If zones are not set, return 0.039625.
"""
blues = getAttrWithFallback(info, "postscriptBlueValues")
otherBlues = getAttrWithFallback(info, "postscriptOtherBlues")
maxZoneHeight = 0
blueScale = 0.039625
if blues:
assert len(blues) % 2 == 0
for x, y in zip(blues[:-1:2], blues[1::2]):
maxZoneHeight = max(maxZoneHeight, abs(y - x))
if otherBlues:
assert len(otherBlues) % 2 == 0
for x, y in zip(otherBlues[:-1:2], otherBlues[1::2]):
maxZoneHeight = max(maxZoneHeight, abs(y - x))
if maxZoneHeight != 0:
blueScale = 3 / (4 * maxZoneHeight)
return blueScale
# --------------
# Attribute Maps
# --------------
staticFallbackData = dict(
versionMajor=0,
versionMinor=0,
copyright=None,
trademark=None,
familyName="New Font",
styleName="Regular",
unitsPerEm=1000,
italicAngle=0,
# not needed
year=None,
note=None,
openTypeHeadLowestRecPPEM=6,
openTypeHeadFlags=[0, 1],
openTypeHheaLineGap=0,
openTypeHheaCaretOffset=0,
openTypeNameDesigner=None,
openTypeNameDesignerURL=None,
openTypeNameManufacturer=None,
openTypeNameManufacturerURL=None,
openTypeNameLicense=None,
openTypeNameLicenseURL=None,
openTypeNameDescription=None,
openTypeNameSampleText=None,
openTypeNameRecords=[],
openTypeOS2WidthClass=5,
openTypeOS2WeightClass=400,
openTypeOS2Selection=[],
openTypeOS2VendorID="NONE",
openTypeOS2Panose=[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
openTypeOS2FamilyClass=[0, 0],
openTypeOS2UnicodeRanges=None,
openTypeOS2CodePageRanges=None,
openTypeOS2Type=[2],
openTypeOS2SubscriptXSize=None,
openTypeOS2SubscriptYSize=None,
openTypeOS2SubscriptXOffset=None,
openTypeOS2SubscriptYOffset=None,
openTypeOS2SuperscriptXSize=None,
openTypeOS2SuperscriptYSize=None,
openTypeOS2SuperscriptXOffset=None,
openTypeOS2SuperscriptYOffset=None,
openTypeOS2StrikeoutSize=None,
openTypeOS2StrikeoutPosition=None,
# fallback to None on these
# as the user should be in
# complete control
openTypeVheaVertTypoAscender=None,
openTypeVheaVertTypoDescender=None,
openTypeVheaVertTypoLineGap=None,
# fallback to horizontal caret:
# a value of 0 for the rise
# and a value of 1 for the run.
openTypeVheaCaretSlopeRise=0,
openTypeVheaCaretSlopeRun=1,
openTypeVheaCaretOffset=0,
postscriptUniqueID=None,
postscriptWeightName=None,
postscriptIsFixedPitch=False,
postscriptBlueValues=[],
postscriptOtherBlues=[],
postscriptFamilyBlues=[],
postscriptFamilyOtherBlues=[],
postscriptStemSnapH=[],
postscriptStemSnapV=[],
postscriptBlueFuzz=0,
postscriptBlueShift=7,
postscriptForceBold=0,
postscriptDefaultWidthX=200,
postscriptNominalWidthX=0,
# not used in OTF
postscriptDefaultCharacter=None,
postscriptWindowsCharacterSet=None,
# not used in OTF
macintoshFONDFamilyID=None,
macintoshFONDName=None,
)
specialFallbacks = dict(
ascender=ascenderFallback,
descender=descenderFallback,
capHeight=capHeightFallback,
xHeight=xHeightFallback,
styleMapFamilyName=styleMapFamilyNameFallback,
styleMapStyleName=styleMapStyleNameFallback,
openTypeHeadCreated=openTypeHeadCreatedFallback,
openTypeHheaAscender=openTypeHheaAscenderFallback,
openTypeHheaDescender=openTypeHheaDescenderFallback,
openTypeHheaCaretSlopeRise=openTypeHheaCaretSlopeRiseFallback,
openTypeHheaCaretSlopeRun=openTypeHheaCaretSlopeRunFallback,
openTypeNameVersion=openTypeNameVersionFallback,
openTypeNameUniqueID=openTypeNameUniqueIDFallback,
openTypeNamePreferredFamilyName=openTypeNamePreferredFamilyNameFallback,
openTypeNamePreferredSubfamilyName=openTypeNamePreferredSubfamilyNameFallback,
openTypeNameCompatibleFullName=openTypeNameCompatibleFullNameFallback,
openTypeNameWWSFamilyName=openTypeNameWWSFamilyNameFallback,
openTypeNameWWSSubfamilyName=openTypeNameWWSSubfamilyNameFallback,
openTypeOS2TypoAscender=openTypeOS2TypoAscenderFallback,
openTypeOS2TypoDescender=openTypeOS2TypoDescenderFallback,
openTypeOS2TypoLineGap=openTypeOS2TypoLineGapFallback,
openTypeOS2WinAscent=openTypeOS2WinAscentFallback,
openTypeOS2WinDescent=openTypeOS2WinDescentFallback,
postscriptFontName=postscriptFontNameFallback,
postscriptFullName=postscriptFullNameFallback,
postscriptSlantAngle=postscriptSlantAngleFallback,
postscriptUnderlineThickness=postscriptUnderlineThicknessFallback,
postscriptUnderlinePosition=postscriptUnderlinePositionFallback,
postscriptBlueScale=postscriptBlueScaleFallback,
)
requiredAttributes = set(ufoLib.fontInfoAttributesVersion2) - (
set(staticFallbackData.keys()) | set(specialFallbacks.keys())
)
recommendedAttributes = set(
[
"styleMapFamilyName",
"versionMajor",
"versionMinor",
"copyright",
"trademark",
"openTypeHeadCreated",
"openTypeNameDesigner",
"openTypeNameDesignerURL",
"openTypeNameManufacturer",
"openTypeNameManufacturerURL",
"openTypeNameLicense",
"openTypeNameLicenseURL",
"openTypeNameDescription",
"openTypeNameSampleText",
"openTypeOS2WidthClass",
"openTypeOS2WeightClass",
"openTypeOS2VendorID",
"openTypeOS2Panose",
"openTypeOS2FamilyClass",
"openTypeOS2UnicodeRanges",
"openTypeOS2CodePageRanges",
"openTypeOS2TypoLineGap",
"openTypeOS2Type",
"postscriptBlueValues",
"postscriptOtherBlues",
"postscriptFamilyBlues",
"postscriptFamilyOtherBlues",
"postscriptStemSnapH",
"postscriptStemSnapV",
]
)
# ------------
# Main Methods
# ------------
def getAttrWithFallback(info, attr):
"""
Get the value for *attr* from the *info* object.
If the object does not have the attribute or the value
for the atribute is None, this will either get a
value from a predefined set of attributes or it
will synthesize a value from the available data.
"""
if hasattr(info, attr) and getattr(info, attr) is not None:
value = getattr(info, attr)
else:
if attr in specialFallbacks:
value = specialFallbacks[attr](info)
else:
value = staticFallbackData[attr]
return value
def preflightInfo(info):
"""
Returns a dict containing two items. The value for each
item will be a list of info attribute names.
================== ===
missingRequired Required data that is missing.
missingRecommended Recommended data that is missing.
================== ===
"""
missingRequired = set()
missingRecommended = set()
for attr in requiredAttributes:
if not hasattr(info, attr) or getattr(info, attr) is None:
missingRequired.add(attr)
for attr in recommendedAttributes:
if not hasattr(info, attr) or getattr(info, attr) is None:
missingRecommended.add(attr)
return dict(missingRequired=missingRequired, missingRecommended=missingRecommended)
# -----------------
# Low Level Support
# -----------------
# these should not be used outside of this package
def intListToNum(intList, start, length):
all = []
bin = ""
for i in range(start, start + length):
if i in intList:
b = "1"
else:
b = "0"
bin = b + bin
if not (i + 1) % 8:
all.append(bin)
bin = ""
if bin:
all.append(bin)
all.reverse()
all = " ".join(all)
return binary2num(all)
def dateStringToTimeValue(date):
try:
t = time.strptime(date, "%Y/%m/%d %H:%M:%S")
return calendar.timegm(t)
except ValueError:
return 0
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume manager manages creating, attaching, detaching, and persistent storage.
Persistent storage volumes keep their state independent of instances. You can
attach to an instance, terminate the instance, spawn a new instance (even
one from a different image) and re-attach the volume with the same data
intact.
**Related Flags**
:volume_topic: What :mod:`rpc` topic to listen to (default: `cinder-volume`).
:volume_manager: The module name of a class derived from
:class:`manager.Manager` (default:
:class:`cinder.volume.manager.Manager`).
:volume_driver: Used by :class:`Manager`. Defaults to
:class:`cinder.volume.drivers.lvm.LVMISCSIDriver`.
:volume_group: Name of the group that will contain exported volumes (default:
`cinder-volumes`)
:num_shell_tries: Number of times to attempt to run commands (default: 3)
"""
import time
from oslo.config import cfg
from cinder import compute
from cinder import context
from cinder import exception
from cinder.image import glance
from cinder import manager
from cinder.openstack.common import excutils
from cinder.openstack.common import importutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import periodic_task
from cinder.openstack.common import timeutils
from cinder.openstack.common import uuidutils
from cinder import quota
from cinder import utils
from cinder.volume.configuration import Configuration
from cinder.volume.flows import create_volume
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import utils as volume_utils
from cinder.volume import volume_types
from cinder.taskflow import states
from eventlet.greenpool import GreenPool
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
volume_manager_opts = [
cfg.StrOpt('volume_driver',
default='cinder.volume.drivers.lvm.LVMISCSIDriver',
help='Driver to use for volume creation'),
cfg.IntOpt('migration_create_volume_timeout_secs',
default=300,
help='Timeout for creating the volume to migrate to '
'when performing volume migration (seconds)'),
cfg.BoolOpt('volume_service_inithost_offload',
default=False,
help='Offload pending volume delete during '
'volume service startup'),
]
CONF = cfg.CONF
CONF.register_opts(volume_manager_opts)
MAPPING = {
'cinder.volume.driver.RBDDriver': 'cinder.volume.drivers.rbd.RBDDriver',
'cinder.volume.driver.SheepdogDriver':
'cinder.volume.drivers.sheepdog.SheepdogDriver',
'cinder.volume.nexenta.volume.NexentaDriver':
'cinder.volume.drivers.nexenta.iscsi.NexentaISCSIDriver',
'cinder.volume.drivers.nexenta.volume.NexentaDriver':
'cinder.volume.drivers.nexenta.iscsi.NexentaISCSIDriver',
'cinder.volume.san.SanISCSIDriver':
'cinder.volume.drivers.san.san.SanISCSIDriver',
'cinder.volume.san.SolarisISCSIDriver':
'cinder.volume.drivers.san.solaris.SolarisISCSIDriver',
'cinder.volume.san.HpSanISCSIDriver':
'cinder.volume.drivers.san.hp_lefthand.HpSanISCSIDriver',
'cinder.volume.nfs.NfsDriver':
'cinder.volume.drivers.nfs.NfsDriver',
'cinder.volume.solidfire.SolidFire':
'cinder.volume.drivers.solidfire.SolidFireDriver',
'cinder.volume.drivers.solidfire.SolidFire':
'cinder.volume.drivers.solidfire.SolidFireDriver',
'cinder.volume.storwize_svc.StorwizeSVCDriver':
'cinder.volume.drivers.storwize_svc.StorwizeSVCDriver',
'cinder.volume.windows.WindowsDriver':
'cinder.volume.drivers.windows.windows.WindowsDriver',
'cinder.volume.drivers.windows.WindowsDriver':
'cinder.volume.drivers.windows.windows.WindowsDriver',
'cinder.volume.xiv.XIVDriver':
'cinder.volume.drivers.xiv_ds8k.XIVDS8KDriver',
'cinder.volume.drivers.xiv.XIVDriver':
'cinder.volume.drivers.xiv_ds8k.XIVDS8KDriver',
'cinder.volume.zadara.ZadaraVPSAISCSIDriver':
'cinder.volume.drivers.zadara.ZadaraVPSAISCSIDriver',
'cinder.volume.driver.ISCSIDriver':
'cinder.volume.drivers.lvm.LVMISCSIDriver',
'cinder.volume.netapp.NetAppISCSIDriver':
'cinder.volume.drivers.netapp.common.Deprecated',
'cinder.volume.drivers.netapp.iscsi.NetAppISCSIDriver':
'cinder.volume.drivers.netapp.common.Deprecated',
'cinder.volume.netapp.NetAppCmodeISCSIDriver':
'cinder.volume.drivers.netapp.common.Deprecated',
'cinder.volume.drivers.netapp.iscsi.NetAppCmodeISCSIDriver':
'cinder.volume.drivers.netapp.common.Deprecated',
'cinder.volume.netapp_nfs.NetAppNFSDriver':
'cinder.volume.drivers.netapp.common.Deprecated',
'cinder.volume.drivers.netapp.nfs.NetAppNFSDriver':
'cinder.volume.drivers.netapp.common.Deprecated',
'cinder.volume.drivers.netapp.nfs.NetAppCmodeNfsDriver':
'cinder.volume.drivers.netapp.common.Deprecated',
'cinder.volume.drivers.huawei.HuaweiISCSIDriver':
'cinder.volume.drivers.huawei.HuaweiVolumeDriver'}
class VolumeManager(manager.SchedulerDependentManager):
"""Manages attachable block storage devices."""
RPC_API_VERSION = '1.11'
def __init__(self, volume_driver=None, service_name=None,
*args, **kwargs):
"""Load the driver from the one specified in args, or from flags."""
# update_service_capabilities needs service_name to be volume
super(VolumeManager, self).__init__(service_name='volume',
*args, **kwargs)
self.configuration = Configuration(volume_manager_opts,
config_group=service_name)
self._tp = GreenPool()
if not volume_driver:
# Get from configuration, which will get the default
# if its not using the multi backend
volume_driver = self.configuration.volume_driver
if volume_driver in MAPPING:
LOG.warn(_("Driver path %s is deprecated, update your "
"configuration to the new path."), volume_driver)
volume_driver = MAPPING[volume_driver]
if volume_driver == 'cinder.volume.drivers.lvm.ThinLVMVolumeDriver':
# Deprecated in Havana
# Not handled in MAPPING because it requires setting a conf option
LOG.warn(_("ThinLVMVolumeDriver is deprecated, please configure "
"LVMISCSIDriver and lvm_type=thin. Continuing with "
"those settings."))
volume_driver = 'cinder.volume.drivers.lvm.LVMISCSIDriver'
self.configuration.lvm_type = 'thin'
self.driver = importutils.import_object(
volume_driver,
configuration=self.configuration,
db=self.db)
def _add_to_threadpool(self, func, *args, **kwargs):
self._tp.spawn_n(func, *args, **kwargs)
def init_host(self):
"""Do any initialization that needs to be run if this is a
standalone service.
"""
ctxt = context.get_admin_context()
LOG.info(_("Starting volume driver %(driver_name)s (%(version)s)") %
{'driver_name': self.driver.__class__.__name__,
'version': self.driver.get_version()})
try:
self.driver.do_setup(ctxt)
self.driver.check_for_setup_error()
except Exception as ex:
LOG.error(_("Error encountered during "
"initialization of driver: %(name)s") %
{'name': self.driver.__class__.__name__})
LOG.exception(ex)
# we don't want to continue since we failed
# to initialize the driver correctly.
return
# at this point the driver is considered initailized.
# next re-initialize exports and clean up volumes that
# should be deleted.
self.driver.set_initialized()
volumes = self.db.volume_get_all_by_host(ctxt, self.host)
LOG.debug(_("Re-exporting %s volumes"), len(volumes))
for volume in volumes:
if volume['status'] in ['available', 'in-use']:
self.driver.ensure_export(ctxt, volume)
elif volume['status'] == 'downloading':
LOG.info(_("volume %s stuck in a downloading state"),
volume['id'])
self.driver.clear_download(ctxt, volume)
self.db.volume_update(ctxt, volume['id'], {'status': 'error'})
else:
LOG.info(_("volume %s: skipping export"), volume['id'])
LOG.debug(_('Resuming any in progress delete operations'))
for volume in volumes:
if volume['status'] == 'deleting':
LOG.info(_('Resuming delete on volume: %s') % volume['id'])
if CONF.volume_service_inithost_offload:
# Offload all the pending volume delete operations to the
# threadpool to prevent the main volume service thread
# from being blocked.
self._add_to_threadpool(self.delete_volume(ctxt,
volume['id']))
else:
# By default, delete volumes sequentially
self.delete_volume(ctxt, volume['id'])
# collect and publish service capabilities
self.publish_service_capabilities(ctxt)
@utils.require_driver_initialized
def create_volume(self, context, volume_id, request_spec=None,
filter_properties=None, allow_reschedule=True,
snapshot_id=None, image_id=None, source_volid=None):
"""Creates and exports the volume."""
flow = create_volume.get_manager_flow(
self.db,
self.driver,
self.scheduler_rpcapi,
self.host,
volume_id,
request_spec=request_spec,
filter_properties=filter_properties,
allow_reschedule=allow_reschedule,
snapshot_id=snapshot_id,
image_id=image_id,
source_volid=source_volid,
reschedule_context=context.deepcopy())
assert flow, _('Manager volume flow not retrieved')
flow.run(context.elevated())
if flow.state != states.SUCCESS:
raise exception.CinderException(_("Failed to successfully complete"
" manager volume workflow"))
self._reset_stats()
return volume_id
@utils.require_driver_initialized
def delete_volume(self, context, volume_id):
"""Deletes and unexports volume."""
context = context.elevated()
volume_ref = self.db.volume_get(context, volume_id)
if context.project_id != volume_ref['project_id']:
project_id = volume_ref['project_id']
else:
project_id = context.project_id
LOG.info(_("volume %s: deleting"), volume_ref['id'])
if volume_ref['attach_status'] == "attached":
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=volume_id)
if volume_ref['host'] != self.host:
raise exception.InvalidVolume(
reason=_("volume is not local to this node"))
self._notify_about_volume_usage(context, volume_ref, "delete.start")
self._reset_stats()
try:
LOG.debug(_("volume %s: removing export"), volume_ref['id'])
self.driver.remove_export(context, volume_ref)
LOG.debug(_("volume %s: deleting"), volume_ref['id'])
self.driver.delete_volume(volume_ref)
except exception.VolumeIsBusy:
LOG.error(_("Cannot delete volume %s: volume is busy"),
volume_ref['id'])
self.driver.ensure_export(context, volume_ref)
self.db.volume_update(context, volume_ref['id'],
{'status': 'available'})
return True
except Exception:
with excutils.save_and_reraise_exception():
self.db.volume_update(context,
volume_ref['id'],
{'status': 'error_deleting'})
# If deleting the source volume in a migration, we want to skip quotas
# and other database updates.
if volume_ref['migration_status']:
return True
# Get reservations
try:
reserve_opts = {'volumes': -1, 'gigabytes': -volume_ref['size']}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_("Failed to update usages deleting volume"))
# Delete glance metadata if it exists
try:
self.db.volume_glance_metadata_delete_by_volume(context, volume_id)
LOG.debug(_("volume %s: glance metadata deleted"),
volume_ref['id'])
except exception.GlanceMetadataNotFound:
LOG.debug(_("no glance metadata found for volume %s"),
volume_ref['id'])
self.db.volume_destroy(context, volume_id)
LOG.info(_("volume %s: deleted successfully"), volume_ref['id'])
self._notify_about_volume_usage(context, volume_ref, "delete.end")
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
self.publish_service_capabilities(context)
return True
@utils.require_driver_initialized
def create_snapshot(self, context, volume_id, snapshot_id):
"""Creates and exports the snapshot."""
caller_context = context
context = context.elevated()
snapshot_ref = self.db.snapshot_get(context, snapshot_id)
LOG.info(_("snapshot %s: creating"), snapshot_ref['id'])
self._notify_about_snapshot_usage(
context, snapshot_ref, "create.start")
try:
LOG.debug(_("snapshot %(snap_id)s: creating"),
{'snap_id': snapshot_ref['id']})
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
snapshot_ref['context'] = caller_context
model_update = self.driver.create_snapshot(snapshot_ref)
if model_update:
self.db.snapshot_update(context, snapshot_ref['id'],
model_update)
except Exception:
with excutils.save_and_reraise_exception():
self.db.snapshot_update(context,
snapshot_ref['id'],
{'status': 'error'})
self.db.snapshot_update(context,
snapshot_ref['id'], {'status': 'available',
'progress': '100%'})
vol_ref = self.db.volume_get(context, volume_id)
if vol_ref.bootable:
try:
self.db.volume_glance_metadata_copy_to_snapshot(
context, snapshot_ref['id'], volume_id)
except exception.CinderException as ex:
LOG.exception(_("Failed updating %(snapshot_id)s"
" metadata using the provided volumes"
" %(volume_id)s metadata") %
{'volume_id': volume_id,
'snapshot_id': snapshot_id})
raise exception.MetadataCopyFailure(reason=ex)
LOG.info(_("snapshot %s: created successfully"), snapshot_ref['id'])
self._notify_about_snapshot_usage(context, snapshot_ref, "create.end")
return snapshot_id
@utils.require_driver_initialized
def delete_snapshot(self, context, snapshot_id):
"""Deletes and unexports snapshot."""
caller_context = context
context = context.elevated()
snapshot_ref = self.db.snapshot_get(context, snapshot_id)
project_id = snapshot_ref['project_id']
LOG.info(_("snapshot %s: deleting"), snapshot_ref['id'])
self._notify_about_snapshot_usage(
context, snapshot_ref, "delete.start")
try:
LOG.debug(_("snapshot %s: deleting"), snapshot_ref['id'])
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
snapshot_ref['context'] = caller_context
self.driver.delete_snapshot(snapshot_ref)
except exception.SnapshotIsBusy:
LOG.error(_("Cannot delete snapshot %s: snapshot is busy"),
snapshot_ref['id'])
self.db.snapshot_update(context,
snapshot_ref['id'],
{'status': 'available'})
return True
except Exception:
with excutils.save_and_reraise_exception():
self.db.snapshot_update(context,
snapshot_ref['id'],
{'status': 'error_deleting'})
# Get reservations
try:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': -1}
else:
reserve_opts = {
'snapshots': -1,
'gigabytes': -snapshot_ref['volume_size'],
}
volume_ref = self.db.volume_get(context, snapshot_ref['volume_id'])
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_("Failed to update usages deleting snapshot"))
self.db.volume_glance_metadata_delete_by_snapshot(context, snapshot_id)
self.db.snapshot_destroy(context, snapshot_id)
LOG.info(_("snapshot %s: deleted successfully"), snapshot_ref['id'])
self._notify_about_snapshot_usage(context, snapshot_ref, "delete.end")
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
return True
@utils.require_driver_initialized
def attach_volume(self, context, volume_id, instance_uuid, host_name,
mountpoint, mode):
"""Updates db to show volume is attached"""
@utils.synchronized(volume_id, external=True)
def do_attach():
# check the volume status before attaching
volume = self.db.volume_get(context, volume_id)
volume_metadata = self.db.volume_admin_metadata_get(
context.elevated(), volume_id)
if volume['status'] == 'attaching':
if (volume['instance_uuid'] and volume['instance_uuid'] !=
instance_uuid):
msg = _("being attached by another instance")
raise exception.InvalidVolume(reason=msg)
if (volume['attached_host'] and volume['attached_host'] !=
host_name):
msg = _("being attached by another host")
raise exception.InvalidVolume(reason=msg)
if (volume_metadata.get('attached_mode') and
volume_metadata.get('attached_mode') != mode):
msg = _("being attached by different mode")
raise exception.InvalidVolume(reason=msg)
elif volume['status'] != "available":
msg = _("status must be available")
raise exception.InvalidVolume(reason=msg)
# TODO(jdg): attach_time column is currently varchar
# we should update this to a date-time object
# also consider adding detach_time?
self.db.volume_update(context, volume_id,
{"instance_uuid": instance_uuid,
"attached_host": host_name,
"status": "attaching",
"attach_time": timeutils.strtime()})
self.db.volume_admin_metadata_update(context.elevated(),
volume_id,
{"attached_mode": mode},
False)
if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
self.db.volume_update(context, volume_id,
{'status': 'error_attaching'})
raise exception.InvalidUUID(uuid=instance_uuid)
host_name_sanitized = utils.sanitize_hostname(
host_name) if host_name else None
volume = self.db.volume_get(context, volume_id)
if volume_metadata.get('readonly') == 'True' and mode != 'ro':
self.db.volume_update(context, volume_id,
{'status': 'error_attaching'})
raise exception.InvalidVolumeAttachMode(mode=mode,
volume_id=volume_id)
try:
self.driver.attach_volume(context,
volume,
instance_uuid,
host_name_sanitized,
mountpoint)
except Exception:
with excutils.save_and_reraise_exception():
self.db.volume_update(context, volume_id,
{'status': 'error_attaching'})
self.db.volume_attached(context.elevated(),
volume_id,
instance_uuid,
host_name_sanitized,
mountpoint)
return do_attach()
@utils.require_driver_initialized
def detach_volume(self, context, volume_id):
"""Updates db to show volume is detached"""
# TODO(vish): refactor this into a more general "unreserve"
# TODO(sleepsonthefloor): Is this 'elevated' appropriate?
volume = self.db.volume_get(context, volume_id)
try:
self.driver.detach_volume(context, volume)
except Exception:
with excutils.save_and_reraise_exception():
self.db.volume_update(context,
volume_id,
{'status': 'error_detaching'})
self.db.volume_detached(context.elevated(), volume_id)
self.db.volume_admin_metadata_delete(context.elevated(), volume_id,
'attached_mode')
# Check for https://bugs.launchpad.net/cinder/+bug/1065702
volume = self.db.volume_get(context, volume_id)
if (volume['provider_location'] and
volume['name'] not in volume['provider_location']):
self.driver.ensure_export(context, volume)
@utils.require_driver_initialized
def copy_volume_to_image(self, context, volume_id, image_meta):
"""Uploads the specified volume to Glance.
image_meta is a dictionary containing the following keys:
'id', 'container_format', 'disk_format'
"""
payload = {'volume_id': volume_id, 'image_id': image_meta['id']}
try:
volume = self.db.volume_get(context, volume_id)
self.driver.ensure_export(context.elevated(), volume)
image_service, image_id = \
glance.get_remote_image_service(context, image_meta['id'])
self.driver.copy_volume_to_image(context, volume, image_service,
image_meta)
LOG.debug(_("Uploaded volume %(volume_id)s to "
"image (%(image_id)s) successfully"),
{'volume_id': volume_id, 'image_id': image_id})
except Exception as error:
with excutils.save_and_reraise_exception():
payload['message'] = unicode(error)
finally:
if (volume['instance_uuid'] is None and
volume['attached_host'] is None):
self.db.volume_update(context, volume_id,
{'status': 'available'})
else:
self.db.volume_update(context, volume_id,
{'status': 'in-use'})
@utils.require_driver_initialized
def initialize_connection(self, context, volume_id, connector):
"""Prepare volume for connection from host represented by connector.
This method calls the driver initialize_connection and returns
it to the caller. The connector parameter is a dictionary with
information about the host that will connect to the volume in the
following format::
{
'ip': ip,
'initiator': initiator,
}
ip: the ip address of the connecting machine
initiator: the iscsi initiator name of the connecting machine.
This can be None if the connecting machine does not support iscsi
connections.
driver is responsible for doing any necessary security setup and
returning a connection_info dictionary in the following format::
{
'driver_volume_type': driver_volume_type,
'data': data,
}
driver_volume_type: a string to identify the type of volume. This
can be used by the calling code to determine the
strategy for connecting to the volume. This could
be 'iscsi', 'rbd', 'sheepdog', etc.
data: this is the data that the calling code will use to connect
to the volume. Keep in mind that this will be serialized to
json in various places, so it should not contain any non-json
data types.
"""
volume = self.db.volume_get(context, volume_id)
self.driver.validate_connector(connector)
conn_info = self.driver.initialize_connection(volume, connector)
# Add qos_specs to connection info
typeid = volume['volume_type_id']
specs = {}
if typeid:
res = volume_types.get_volume_type_qos_specs(typeid)
specs = res['qos_specs']
# Don't pass qos_spec as empty dict
qos_spec = dict(qos_spec=specs if specs else None)
conn_info['data'].update(qos_spec)
# Add access_mode to connection info
volume_metadata = self.db.volume_admin_metadata_get(context.elevated(),
volume_id)
if conn_info['data'].get('access_mode') is None:
access_mode = volume_metadata.get('attached_mode')
if access_mode is None:
# NOTE(zhiyan): client didn't call 'os-attach' before
access_mode = ('ro'
if volume_metadata.get('readonly') == 'True'
else 'rw')
conn_info['data']['access_mode'] = access_mode
return conn_info
@utils.require_driver_initialized
def terminate_connection(self, context, volume_id, connector, force=False):
"""Cleanup connection from host represented by connector.
The format of connector is the same as for initialize_connection.
"""
volume_ref = self.db.volume_get(context, volume_id)
self.driver.terminate_connection(volume_ref, connector, force=force)
@utils.require_driver_initialized
def accept_transfer(self, context, volume_id, new_user, new_project):
# NOTE(jdg): need elevated context as we haven't "given" the vol
# yet
volume_ref = self.db.volume_get(context.elevated(), volume_id)
self.driver.accept_transfer(context, volume_ref, new_user, new_project)
def _migrate_volume_generic(self, ctxt, volume, host):
rpcapi = volume_rpcapi.VolumeAPI()
# Create new volume on remote host
new_vol_values = {}
for k, v in volume.iteritems():
new_vol_values[k] = v
del new_vol_values['id']
del new_vol_values['_name_id']
# We don't copy volume_type because the db sets that according to
# volume_type_id, which we do copy
del new_vol_values['volume_type']
new_vol_values['host'] = host['host']
new_vol_values['status'] = 'creating'
new_vol_values['migration_status'] = 'target:%s' % volume['id']
new_vol_values['attach_status'] = 'detached'
new_volume = self.db.volume_create(ctxt, new_vol_values)
rpcapi.create_volume(ctxt, new_volume, host['host'],
None, None, allow_reschedule=False)
# Wait for new_volume to become ready
starttime = time.time()
deadline = starttime + CONF.migration_create_volume_timeout_secs
new_volume = self.db.volume_get(ctxt, new_volume['id'])
tries = 0
while new_volume['status'] != 'available':
tries = tries + 1
now = time.time()
if new_volume['status'] == 'error':
msg = _("failed to create new_volume on destination host")
raise exception.VolumeMigrationFailed(reason=msg)
elif now > deadline:
msg = _("timeout creating new_volume on destination host")
raise exception.VolumeMigrationFailed(reason=msg)
else:
time.sleep(tries ** 2)
new_volume = self.db.volume_get(ctxt, new_volume['id'])
# Copy the source volume to the destination volume
try:
if volume['status'] == 'available':
self.driver.copy_volume_data(ctxt, volume, new_volume,
remote='dest')
# The above call is synchronous so we complete the migration
self.migrate_volume_completion(ctxt, volume['id'],
new_volume['id'], error=False)
else:
nova_api = compute.API()
# This is an async call to Nova, which will call the completion
# when it's done
nova_api.update_server_volume(ctxt, volume['instance_uuid'],
volume['id'], new_volume['id'])
except Exception:
with excutils.save_and_reraise_exception():
msg = _("Failed to copy volume %(vol1)s to %(vol2)s")
LOG.error(msg % {'vol1': volume['id'],
'vol2': new_volume['id']})
volume = self.db.volume_get(ctxt, volume['id'])
# If we're in the completing phase don't delete the target
# because we may have already deleted the source!
if volume['migration_status'] == 'migrating':
rpcapi.delete_volume(ctxt, new_volume)
new_volume['migration_status'] = None
def migrate_volume_completion(self, ctxt, volume_id, new_volume_id,
error=False):
volume = self.db.volume_get(ctxt, volume_id)
new_volume = self.db.volume_get(ctxt, new_volume_id)
rpcapi = volume_rpcapi.VolumeAPI()
if error:
new_volume['migration_status'] = None
rpcapi.delete_volume(ctxt, new_volume)
self.db.volume_update(ctxt, volume_id, {'migration_status': None})
return volume_id
self.db.volume_update(ctxt, volume_id,
{'migration_status': 'completing'})
# Delete the source volume (if it fails, don't fail the migration)
try:
self.delete_volume(ctxt, volume_id)
except Exception as ex:
msg = _("Failed to delete migration source vol %(vol)s: %(err)s")
LOG.error(msg % {'vol': volume_id, 'err': ex})
self.db.finish_volume_migration(ctxt, volume_id, new_volume_id)
self.db.volume_destroy(ctxt, new_volume_id)
self.db.volume_update(ctxt, volume_id, {'migration_status': None})
return volume['id']
@utils.require_driver_initialized
def migrate_volume(self, ctxt, volume_id, host, force_host_copy=False):
"""Migrate the volume to the specified host (called on source host)."""
volume_ref = self.db.volume_get(ctxt, volume_id)
model_update = None
moved = False
self.db.volume_update(ctxt, volume_ref['id'],
{'migration_status': 'migrating'})
if not force_host_copy:
try:
LOG.debug(_("volume %s: calling driver migrate_volume"),
volume_ref['id'])
moved, model_update = self.driver.migrate_volume(ctxt,
volume_ref,
host)
if moved:
updates = {'host': host['host'],
'migration_status': None}
if model_update:
updates.update(model_update)
volume_ref = self.db.volume_update(ctxt,
volume_ref['id'],
updates)
except Exception:
with excutils.save_and_reraise_exception():
updates = {'migration_status': None}
model_update = self.driver.create_export(ctxt, volume_ref)
if model_update:
updates.update(model_update)
self.db.volume_update(ctxt, volume_ref['id'], updates)
if not moved:
try:
self._migrate_volume_generic(ctxt, volume_ref, host)
except Exception:
with excutils.save_and_reraise_exception():
updates = {'migration_status': None}
model_update = self.driver.create_export(ctxt, volume_ref)
if model_update:
updates.update(model_update)
self.db.volume_update(ctxt, volume_ref['id'], updates)
@periodic_task.periodic_task
def _report_driver_status(self, context):
LOG.info(_("Updating volume status"))
if not self.driver.initialized:
LOG.warning(_('Unable to update stats, driver is '
'uninitialized'))
else:
volume_stats = self.driver.get_volume_stats(refresh=True)
if volume_stats:
# This will grab info about the host and queue it
# to be sent to the Schedulers.
self.update_service_capabilities(volume_stats)
def publish_service_capabilities(self, context):
"""Collect driver status and then publish."""
self._report_driver_status(context)
self._publish_service_capabilities(context)
def _reset_stats(self):
LOG.info(_("Clear capabilities"))
self._last_volume_stats = []
def notification(self, context, event):
LOG.info(_("Notification {%s} received"), event)
self._reset_stats()
def _notify_about_volume_usage(self,
context,
volume,
event_suffix,
extra_usage_info=None):
volume_utils.notify_about_volume_usage(
context, volume, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_snapshot_usage(self,
context,
snapshot,
event_suffix,
extra_usage_info=None):
volume_utils.notify_about_snapshot_usage(
context, snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
@utils.require_driver_initialized
def extend_volume(self, context, volume_id, new_size):
volume = self.db.volume_get(context, volume_id)
size_increase = (int(new_size)) - volume['size']
try:
reservations = QUOTAS.reserve(context, gigabytes=+size_increase)
except exception.OverQuota as exc:
self.db.volume_update(context, volume['id'],
{'status': 'error_extending'})
overs = exc.kwargs['overs']
usages = exc.kwargs['usages']
quotas = exc.kwargs['quotas']
def _consumed(name):
return (usages[name]['reserved'] + usages[name]['in_use'])
if 'gigabytes' in overs:
msg = _("Quota exceeded for %(s_pid)s, "
"tried to extend volume by "
"%(s_size)sG, (%(d_consumed)dG of %(d_quota)dG "
"already consumed)")
LOG.error(msg % {'s_pid': context.project_id,
's_size': size_increase,
'd_consumed': _consumed('gigabytes'),
'd_quota': quotas['gigabytes']})
return
self._notify_about_volume_usage(context, volume, "resize.start")
try:
LOG.info(_("volume %s: extending"), volume['id'])
self.driver.extend_volume(volume, new_size)
LOG.info(_("volume %s: extended successfully"), volume['id'])
except Exception:
LOG.exception(_("volume %s: Error trying to extend volume"),
volume_id)
try:
self.db.volume_update(context, volume['id'],
{'status': 'error_extending'})
finally:
QUOTAS.rollback(context, reservations)
return
QUOTAS.commit(context, reservations)
self.db.volume_update(context, volume['id'], {'size': int(new_size),
'status': 'available'})
self._notify_about_volume_usage(
context, volume, "resize.end",
extra_usage_info={'size': int(new_size)})
|
|
#!/usr/bin/env python3
import os
import pickle
import prody
from time import perf_counter
# own modules
import consensx.graph as graph
import consensx.calc as calc
from consensx.misc.natural_sort import natural_sort
from .models import CSX_upload
def get_pdb_models(path):
pdb_models = []
for file in os.listdir(path):
if file.endswith(".pdb") and file.startswith("model_"):
pdb_models.append(file)
pdb_models = natural_sort(pdb_models)
return pdb_models
class ChemshiftModelData:
"""Class for per model chemical shift data"""
def __init__(self, type_dict):
self.type_dict = type_dict
def get_type_data(self, my_type):
type_data = []
for model in self.type_dict:
type_data.append(model[my_type])
return type_data
class DumpedData:
def __init__(self):
self.RDC_is_loaded = False
self.RDC_lists = None
self.RDC_model_data = None
self.S2_is_loaded = False
self.S2_dict = None
self.S2_fit = False
self.S2_fit_range = None
self.PDB_is_loaded = False
self.pdb_model_data = None
self.Jcoup_is_loaded = False
self.Jcoup_dict = None
self.Jcoup_model_data = None
self.ChemShift_is_loaded = False
self.ChemShift_lists = None
self.ChemShift_model_data = None
def load_rdc_dump(self, path):
rdc_lists_path = path + "/RDC_lists.pickle"
self.RDC_lists = pickle.load(open(rdc_lists_path, 'rb'))
model_data_path = path + "/RDC_model_data.pickle"
self.RDC_model_data = pickle.load(open(model_data_path, 'rb'))
self.RDC_is_loaded = True
def load_s2_dump(self, path):
s2_dict_path = path + "/S2_dict.pickle"
s2_dump = pickle.load(open(s2_dict_path, 'rb'))
self.S2_dict = s2_dump[0]
self.S2_fit = s2_dump[1]
self.S2_fit_range = s2_dump[2]
self.S2_is_loaded = True
def load_pdb_data(self, path):
pdb_model_path = path + "/PDB_model.pickle"
self.pdb_model_data = pickle.load(open(pdb_model_path, 'rb'))
self.PDB_is_loaded = True
def load_jcoup_data(self, path):
jcoup_dict_path = path + "/Jcoup_dict.pickle"
self.Jcoup_dict = pickle.load(open(jcoup_dict_path, 'rb'))
jcoup_model_data = path + "/Jcoup_model.pickle"
self.Jcoup_model_data = pickle.load(open(jcoup_model_data, 'rb'))
self.Jcoup_is_loaded = True
def load_chemshift_data(self, path):
cs_lists_path = path + "/ChemShift_lists.pickle"
self.ChemShift_lists = pickle.load(open(cs_lists_path, 'rb'))
cs_model_data_path = path + "/ChemShift_model_data.pickle"
model_data = pickle.load(open(cs_model_data_path, 'rb'))
self.ChemShift_model_data = ChemshiftModelData(model_data)
self.ChemShift_is_loaded = True
def averageRDCs_on(models, my_data):
"""Returns a dictonary with the average RDCs for the given RDC type:
averageRDC[residue] = value"""
averageRDC = {}
for model_num, model in enumerate(my_data):
if model_num not in models:
continue
for resnum in model:
if resnum in averageRDC.keys():
averageRDC[resnum] += model[resnum]
else:
averageRDC[resnum] = model[resnum]
for resnum in list(averageRDC.keys()):
averageRDC[resnum] /= len(models)
return averageRDC
def averageJCoup_on(models, my_data):
"""Returns a dictonary with the average J-Couplings for the given type:
averageJCoup[residue] = value"""
averageJCoup = {}
for model_num, model in enumerate(my_data):
if model_num not in models:
continue
for resnum in model:
if resnum in averageJCoup.keys():
averageJCoup[resnum] += model[resnum]
else:
averageJCoup[resnum] = model[resnum]
for resnum in list(averageJCoup.keys()):
averageJCoup[resnum] /= len(models)
return averageJCoup
def averageChemShift_on(models, my_data):
"""Returns a dictonary with the average chemical shifts for the given type:
averageChemShift[residue] = value"""
averageChemShift = {}
for model_num, model in enumerate(my_data):
if model_num not in models:
continue
for resnum in model:
if resnum in averageChemShift.keys():
averageChemShift[resnum] += model[resnum]
else:
averageChemShift[resnum] = model[resnum]
for resnum in list(averageChemShift.keys()):
averageChemShift[resnum] /= len(models)
return averageChemShift
def averageS2_on(models, model_data, S2_dict, S2_type, fit, fit_range):
"""Returns a dictonary with the average S2 values for the given S2 type:
averageS2[residue] = value"""
my_models = []
for model_num in models:
model_data.atomgroup.setACSIndex(model_num)
my_models.append(model_data.atomgroup[:])
# csx_obj.PDB_model.is_fitted = False
return calc.s2_values(
model_data, models, S2_dict[S2_type], S2_type, fit, fit_range
)
def get_two_random_from(models):
from numpy import random
random.shuffle(models)
return models[0:2]
class Selection:
def __init__(self, my_path, original_values, user_selection_json):
self.max_size = None
self.min_size = None
self.overdrive = None
self.measure = None
self.RDC_lists = None
self.RDC_model_data = None
self.S2_dict = None
self.pdb_data = None
self.Jcoup_dict = None
self.JCoup_modell_data = None
self.ChemShifts = None
self.ChemShift_model_data = None
self.my_path = my_path
self.original_values = original_values
self.dumped_data = DumpedData()
self.user_sel = []
for key, value in user_selection_json.items():
# set MEASURE for selection
if key == "MEASURE":
self.measure = value
print("MEASURE is set to:", self.measure)
if key == "MIN_SIZE":
self.min_size = int(value)
if key == "MAX_SIZE":
self.max_size = int(value)
if key == "OVERDRIVE":
self.overdrive = int(value)
# read RDC selections
if key.split('_')[0] == "RDC":
my_list = int(key.split('_')[1])
my_type = key.split('_')[2]
# TODO - we could avoid this
if my_type == "0CAC":
my_type = "0_CA_C"
elif my_type == "0HACA":
my_type = "0_HA_CA"
elif my_type == "0HN":
my_type = "0_H_N"
elif my_type == "0NH":
my_type = "0_N_H"
elif my_type == "1NC":
my_type = "1_N_C"
elif my_type == "0CCA":
my_type = "0_C_CA"
elif my_type == "0CAHA":
my_type = "0_CA_HA"
elif my_type == "0CACB":
my_type = "0_CA_CB"
elif my_type == "0CH":
my_type = "0_C_H"
elif my_type == "0CN":
my_type = "0_C_N"
elif my_type == "0HAC":
my_type = "0_HA_C"
elif my_type == "0HAN":
my_type = "0_HA_N"
elif my_type == "1HC":
my_type = "1_H_C"
my_weight = float(value)
self.user_sel.append(["RDC", my_list, my_type, my_weight])
# read S2 selections
elif key.split('_')[0] == "S2":
my_type = key.split('_')[1]
my_weight = float(value)
self.user_sel.append(["S2", my_type, my_weight])
# read J-coupling selections
elif key.split('_')[0] == "JCoup":
my_type = key.split('_')[1]
my_weight = float(value)
self.user_sel.append(["JCoup", my_type, my_weight])
# read chemical shifts selections
elif key.split('_')[0] == "CS":
my_type = '_'.join(key.split('_')[1:])
my_weight = float(value)
self.user_sel.append(["ChemShift", my_type, my_weight])
def run_selection(self):
pdb_output_name = self.my_path + "/raw.pdb"
if os.path.isfile(pdb_output_name):
os.remove(pdb_output_name)
if os.path.isfile(self.my_path + "/selected.pdb"):
os.remove(self.my_path + "/selected.pdb")
in_selection, iter_data = self.selection_on()
for key, val in iter_data.items():
print("CALCED ", key, val)
self.dumped_data.load_pdb_data(self.my_path)
sel_ensemble = self.dumped_data.pdb_model_data.atomgroup.copy()
for model_num in reversed(range(sel_ensemble.numCoordsets())):
if model_num not in in_selection:
sel_ensemble.delCoordset(model_num)
num_coordsets = sel_ensemble.numCoordsets()
print("NUM_COORDSETS: ", num_coordsets)
prody.alignCoordsets(sel_ensemble.calpha)
prody.writePDB(pdb_output_name, sel_ensemble)
in_selection = [str(x+1) for x in sorted(in_selection)]
dummy_pdb = open(pdb_output_name, 'r')
output_pdb = open(self.my_path + "/selected.pdb", "w")
for line in dummy_pdb:
output_pdb.write(line)
if 'REMARK' in line:
model_line = "REMARK ORIGINAL MODELS: "
for model_num in in_selection:
if len(model_line) < 76:
model_line += model_num + " "
else:
output_pdb.write(model_line + "\n")
model_line = "REMARK ORIGINAL MODELS: "
output_pdb.write(model_line + "\n")
calc_id = self.my_path.split('/')[-1]
print('calcID', calc_id)
db_entry = CSX_upload.objects.get(id_code=calc_id)
print(db_entry.PDB_file)
print("db_entry", db_entry)
pca_image_names = graph.pca.create_pca_comparison(
self.my_path, db_entry.PDB_file, in_selection
)
return num_coordsets, iter_data, pca_image_names
def selection_on(self):
pdb_models = get_pdb_models(self.my_path)
print("user_sel: ", self.user_sel)
for sel in self.user_sel:
print(sel)
if "RDC" in sel and not self.dumped_data.RDC_is_loaded:
self.dumped_data.load_rdc_dump(self.my_path)
print("RDC_lists assigned")
# TODO do we need double names?
self.RDC_lists = self.dumped_data.RDC_lists
self.RDC_model_data = self.dumped_data.RDC_model_data
if "S2" in sel and not self.dumped_data.S2_is_loaded:
self.dumped_data.load_s2_dump(self.my_path)
self.S2_dict = self.dumped_data.S2_dict
fit = self.dumped_data.S2_fit
fit_range = self.dumped_data.S2_fit_range
self.dumped_data.load_pdb_data(self.my_path)
self.pdb_data = self.dumped_data.pdb_model_data
if "JCoup" in sel and not self.dumped_data.Jcoup_is_loaded:
self.dumped_data.load_jcoup_data(self.my_path)
self.Jcoup_dict = self.dumped_data.Jcoup_dict
self.JCoup_modell_data = self.dumped_data.Jcoup_model_data
if "ChemShift" in sel and not self.dumped_data.ChemShift_is_loaded:
self.dumped_data.load_chemshift_data(self.my_path)
self.ChemShifts = self.dumped_data.ChemShift_lists
self.ChemShift_model_data = self.dumped_data.ChemShift_model_data
in_selection = []
print("STARTING WITH MODEL(S):", in_selection)
first_run = True
first_try = True
above_best = 0
iter_scores = {}
divide_by = 0.0
num_models = len(pdb_models)
if self.measure == "correlation":
prev_best = -2
else:
prev_best = 1000
t1_start = perf_counter()
while True:
model_scores = {}
# iterate on all PDB models
for num in range(num_models):
# skip models already included in selection
if num in in_selection:
continue
divide_by = 0.0 # variable for storing weight sum
pdb_sel = [num] + in_selection # creating test ensemble
pdb_sel_key = tuple(sorted(pdb_sel))
iter_scores[pdb_sel_key] = {}
for sel_data in self.user_sel:
if sel_data[0] == "RDC":
RDC_num = sel_data[1]
RDC_type = sel_data[2]
RDC_weight = sel_data[3]
my_data = self.RDC_model_data.rdc_data[RDC_num][RDC_type]
averageRDC = averageRDCs_on(pdb_sel, my_data)
my_RDC = self.RDC_lists[RDC_num - 1][RDC_type]
calced = None
if self.measure == "correlation":
calced = calc.correlation(averageRDC, my_RDC)
elif self.measure == "q-value":
calced = calc.q_value(averageRDC, my_RDC)
elif self.measure == "rmsd":
calced = calc.rmsd(averageRDC, my_RDC)
if num in model_scores.keys():
model_scores[num] += calced * RDC_weight
else:
model_scores[num] = calced * RDC_weight
divide_by += RDC_weight
my_type = "".join(sel_data[2].split('_'))
my_key = (
sel_data[0] + '_' + str(sel_data[1]) + '_' + my_type
)
iter_scores[pdb_sel_key][my_key] = calced
elif sel_data[0] == "S2":
S2_type = sel_data[1]
S2_weight = sel_data[2]
averageS2 = averageS2_on(
pdb_sel, self.pdb_data,
self.S2_dict, S2_type,
self.dumped_data.S2_fit, self.dumped_data.S2_fit_range
)
experimental = self.S2_dict[S2_type]
calced = None
if self.measure == "correlation":
calced = calc.correlation(averageS2, experimental)
elif self.measure == "q-value":
calced = calc.q_value(averageS2, experimental)
elif self.measure == "rmsd":
calced = calc.rmsd(averageS2, experimental)
if num in model_scores.keys():
model_scores[num] += calced * S2_weight
else:
model_scores[num] = calced * S2_weight
divide_by += S2_weight
iter_scores[pdb_sel_key][sel_data[0] + '_' + str(sel_data[1])] = calced
elif sel_data[0] == "JCoup":
JCoup_type = sel_data[1]
JCoup_weight = sel_data[2]
my_type = self.JCoup_modell_data[JCoup_type]
averageJCoup = averageJCoup_on(pdb_sel, my_type)
my_JCoup = self.Jcoup_dict[JCoup_type]
calced = None
if self.measure == "correlation":
calced = calc.correlation(averageJCoup, my_JCoup)
elif self.measure == "q-value":
calced = calc.q_value(averageJCoup, my_JCoup)
elif self.measure == "rmsd":
calced = calc.rmsd(averageJCoup, my_JCoup)
if num in model_scores.keys():
model_scores[num] += calced * JCoup_weight
else:
model_scores[num] = calced * JCoup_weight
divide_by += JCoup_weight
iter_scores[pdb_sel_key][sel_data[0] + '_' + str(sel_data[1])] = calced
elif sel_data[0] == "ChemShift":
ChemShift_type = sel_data[1]
ChemShift_weight = sel_data[2]
my_ChemShifts = self.ChemShifts[0][ChemShift_type]
my_type = self.ChemShift_model_data.get_type_data(
ChemShift_type
)
averageChemShift = averageChemShift_on(pdb_sel, my_type)
calced = None
if self.measure == "correlation":
calced = calc.correlation(
averageChemShift, my_ChemShifts
)
elif self.measure == "q-value":
calced = calc.q_value(
averageChemShift, my_ChemShifts
)
elif self.measure == "rmsd":
calced = calc.rmsd(
averageChemShift, my_ChemShifts
)
if num in model_scores.keys():
model_scores[num] += calced * ChemShift_weight
else:
model_scores[num] = calced * ChemShift_weight
divide_by += ChemShift_weight
iter_scores[pdb_sel_key]["CS_" + sel_data[1]] = calced
best_num = -1
if self.measure == "correlation":
best_val = -2
else:
best_val = 1000
for num in model_scores.keys():
model_score = model_scores[num] / divide_by
if self.measure == "correlation" and model_score > best_val:
best_val = model_score
best_num = num
elif self.measure in ["q-value", "rmsd"] and model_score < best_val:
best_val = model_score
best_num = num
if first_run:
first_run = False
if self.measure == "correlation":
best_val = -2
else:
best_val = 1000
print("###### ITERATION ######")
print("prev best: " + str(prev_best))
print("current best: " + str(best_val))
if self.max_size and len(in_selection) == self.max_size:
print("size limit reached!")
if self.overdrive:
if (
(self.measure == "correlation" and best_val > prev_best) or
(self.measure in ["q-value", "rmsd"] and best_val < prev_best)
):
above_best = 0
print("overdrive is:", above_best)
# remove overdrive models
for _ in range(above_best):
print("POP", in_selection[-1])
del in_selection[-1]
print("CURRENT SEL:", in_selection)
in_selection.sort()
if above_best == 0:
print(
"EXIT -> selection reached max desired size \
NOT in overdrive"
)
t1_stop = perf_counter()
print("[selection] Selection in seconds:", t1_stop - t1_start)
return in_selection, iter_scores[tuple(sorted(in_selection))]
else:
print(
"EXIT -> selection reached max desired size \
in overdrive"
)
t1_stop = perf_counter()
print("[selection] Selection in seconds:", t1_stop - t1_start)
return in_selection, iter_scores[tuple(sorted(in_selection))]
# if new selection results a higher score
if (
(self.measure == "correlation" and best_val > prev_best) or
(self.measure in ["q-value", "rmsd"] and best_val < prev_best)
):
# reset above the best threshold
above_best = 0
prev_best = best_val
overdrive_best = -1
print("CURRENT SEL:", in_selection)
print("APPEND:", best_num)
in_selection.append(best_num)
# check if selection reached the desired maximal size (if any)
if self.max_size and len(in_selection) - 1 == self.max_size:
print("size limit reached!")
# in_selection = [x+1 for x in in_selection]
in_selection.sort()
# print("numbered as in PDB file:\n", in_selection)
print("EXIT -> selection reached max desired size")
t1_stop = perf_counter()
print("[selection] Selection in seconds:", t1_stop - t1_start)
return in_selection, iter_scores[tuple(sorted(in_selection))]
# if new selection results a lower score
else:
# check if overdrive is enabled
if self.overdrive and self.overdrive > above_best:
# don't overdrive until minimum ensemble size reached
if self.min_size and len(in_selection) <= self.min_size:
prev_best = best_val
in_selection.append(best_num)
continue
# stop iteration if size of the original ensemble reached
if len(in_selection) == len(pdb_models):
for _ in range(above_best + 1):
# print(in_selection)
print("POP", in_selection[-1])
del in_selection[-1]
print("CURRENT SEL:", in_selection)
print(
"EXIT -> selection reached original size in overdrive"
)
del in_selection[-1]
t1_stop = perf_counter()
print("[selection] Selection in seconds:", t1_stop - t1_start)
return in_selection, iter_scores[tuple(sorted(in_selection))]
above_best += 1
print("\x1b[31mwe are in overdrive with \x1b[0m" +
str(above_best))
overdrive_best = best_val
print("overdrive_best: " + str(overdrive_best))
print("prev_best: " + str(prev_best))
print("CURRENT SEL:", in_selection)
print("APPEND:", best_num)
in_selection.append(best_num)
if self.measure == "correlation" and overdrive_best > prev_best:
prev_best = overdrive_best
above_best = 0
elif (self.measure in ["q-value", "rmsd"] and
overdrive_best < prev_best):
prev_best = overdrive_best
above_best = 0
if self.overdrive == above_best:
if self.measure == "correlation" and overdrive_best < prev_best:
for _ in range(above_best + 1):
# print(in_selection)
print("POP", in_selection[-1])
del in_selection[-1]
print("CURRENT SEL:", in_selection)
print("EXIT -> selection reached max override value")
t1_stop = perf_counter()
print("[selection] Selection in seconds:", t1_stop - t1_start)
return (
in_selection, iter_scores[tuple(sorted(in_selection))]
)
if (self.measure in ["q-value", "rmsd"] and
overdrive_best > prev_best):
for _ in range(above_best + 1):
print("POP", in_selection[-1])
del in_selection[-1]
print("CURRENT SEL:", in_selection)
print("EXIT -> selection reached max override value")
t1_stop = perf_counter()
print("[selection] Selection in seconds:", t1_stop - t1_start)
return (
in_selection, iter_scores[tuple(sorted(in_selection))]
)
continue
# check if selection reached the desired minimal size (if any)
if self.min_size and len(in_selection) <= self.min_size:
print("we are over the peak!")
prev_best = best_val
in_selection.append(best_num)
continue
if first_try:
first_try = False
continue
if len(in_selection) > 1:
del in_selection[-1]
print("EXIT -> selection got a worse score, no override")
t1_stop = perf_counter()
print("[selection] Selection in seconds:", t1_stop - t1_start)
return in_selection, iter_scores[tuple(sorted(in_selection))]
|
|
#!/usr/bin/python -u
import sys
import os
import subprocess
import time
from datetime import datetime
import shutil
import tempfile
import hashlib
import re
import logging
import argparse
################
#### Telegraf Variables
################
# Packaging variables
PACKAGE_NAME = "telegraf"
INSTALL_ROOT_DIR = "/usr/bin"
LOG_DIR = "/var/log/telegraf"
SCRIPT_DIR = "/usr/lib/telegraf/scripts"
CONFIG_DIR = "/etc/telegraf"
CONFIG_DIR_D = "/etc/telegraf/telegraf.d"
LOGROTATE_DIR = "/etc/logrotate.d"
INIT_SCRIPT = "scripts/init.sh"
SYSTEMD_SCRIPT = "scripts/telegraf.service"
LOGROTATE_SCRIPT = "etc/logrotate.d/telegraf"
DEFAULT_CONFIG = "etc/telegraf.conf"
DEFAULT_WINDOWS_CONFIG = "etc/telegraf_windows.conf"
POSTINST_SCRIPT = "scripts/post-install.sh"
PREINST_SCRIPT = "scripts/pre-install.sh"
POSTREMOVE_SCRIPT = "scripts/post-remove.sh"
PREREMOVE_SCRIPT = "scripts/pre-remove.sh"
# Default AWS S3 bucket for uploads
DEFAULT_BUCKET = "dl.influxdata.com/telegraf/artifacts"
CONFIGURATION_FILES = [
CONFIG_DIR + '/telegraf.conf',
LOGROTATE_DIR + '/telegraf',
]
# META-PACKAGE VARIABLES
PACKAGE_LICENSE = "MIT"
PACKAGE_URL = "https://github.com/influxdata/telegraf"
MAINTAINER = "support@influxdb.com"
VENDOR = "InfluxData"
DESCRIPTION = "Plugin-driven server agent for reporting metrics into InfluxDB."
# SCRIPT START
prereqs = [ 'git', 'go' ]
go_vet_command = "go tool vet -composites=true ./"
optional_prereqs = [ 'gvm', 'fpm', 'rpmbuild' ]
fpm_common_args = "-f -s dir --log error \
--vendor {} \
--url {} \
--license {} \
--maintainer {} \
--config-files {} \
--config-files {} \
--after-install {} \
--before-install {} \
--after-remove {} \
--before-remove {} \
--description \"{}\"".format(
VENDOR,
PACKAGE_URL,
PACKAGE_LICENSE,
MAINTAINER,
CONFIG_DIR + '/telegraf.conf',
LOGROTATE_DIR + '/telegraf',
POSTINST_SCRIPT,
PREINST_SCRIPT,
POSTREMOVE_SCRIPT,
PREREMOVE_SCRIPT,
DESCRIPTION)
targets = {
'telegraf' : './cmd/telegraf',
}
supported_builds = {
"windows": [ "amd64", "i386" ],
"linux": [ "amd64", "i386", "armhf", "armel", "arm64", "static_amd64", "s390x"],
"freebsd": [ "amd64", "i386" ]
}
supported_packages = {
"linux": [ "deb", "rpm", "tar" ],
"windows": [ "zip" ],
"freebsd": [ "tar" ]
}
next_version = '1.9.0'
################
#### Telegraf Functions
################
def print_banner():
logging.info("""
_____ _ __
/__ \\___| | ___ __ _ _ __ __ _ / _|
/ /\\/ _ \\ |/ _ \\/ _` | '__/ _` | |_
/ / | __/ | __/ (_| | | | (_| | _|
\\/ \\___|_|\\___|\\__, |_| \\__,_|_|
|___/
Build Script
""")
def create_package_fs(build_root):
"""Create a filesystem structure to mimic the package filesystem.
"""
logging.debug("Creating a filesystem hierarchy from directory: {}".format(build_root))
# Using [1:] for the path names due to them being absolute
# (will overwrite previous paths, per 'os.path.join' documentation)
dirs = [ INSTALL_ROOT_DIR[1:], LOG_DIR[1:], SCRIPT_DIR[1:], CONFIG_DIR[1:], LOGROTATE_DIR[1:], CONFIG_DIR_D[1:] ]
for d in dirs:
os.makedirs(os.path.join(build_root, d))
os.chmod(os.path.join(build_root, d), 0o755)
def package_scripts(build_root, config_only=False, windows=False):
"""Copy the necessary scripts and configuration files to the package
filesystem.
"""
if config_only or windows:
logging.info("Copying configuration to build directory")
if windows:
shutil.copyfile(DEFAULT_WINDOWS_CONFIG, os.path.join(build_root, "telegraf.conf"))
else:
shutil.copyfile(DEFAULT_CONFIG, os.path.join(build_root, "telegraf.conf"))
os.chmod(os.path.join(build_root, "telegraf.conf"), 0o644)
else:
logging.info("Copying scripts and configuration to build directory")
shutil.copyfile(INIT_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1]))
os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1]), 0o644)
shutil.copyfile(SYSTEMD_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1]))
os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1]), 0o644)
shutil.copyfile(LOGROTATE_SCRIPT, os.path.join(build_root, LOGROTATE_DIR[1:], "telegraf"))
os.chmod(os.path.join(build_root, LOGROTATE_DIR[1:], "telegraf"), 0o644)
shutil.copyfile(DEFAULT_CONFIG, os.path.join(build_root, CONFIG_DIR[1:], "telegraf.conf"))
os.chmod(os.path.join(build_root, CONFIG_DIR[1:], "telegraf.conf"), 0o644)
def run_generate():
# NOOP for Telegraf
return True
def go_get(branch, update=False, no_uncommitted=False):
"""Retrieve build dependencies or restore pinned dependencies.
"""
if local_changes() and no_uncommitted:
logging.error("There are uncommitted changes in the current directory.")
return False
logging.info("Retrieving dependencies with `dep`...")
run("{}/bin/dep ensure -v -vendor-only".format(os.environ.get("GOPATH",
os.path.expanduser("~/go"))))
return True
def run_tests(race, parallel, timeout, no_vet):
# Currently a NOOP for Telegraf
return True
################
#### All Telegraf-specific content above this line
################
def run(command, allow_failure=False, shell=False):
"""Run shell command (convenience wrapper around subprocess).
"""
out = None
logging.debug("{}".format(command))
try:
if shell:
out = subprocess.check_output(command, stderr=subprocess.STDOUT, shell=shell)
else:
out = subprocess.check_output(command.split(), stderr=subprocess.STDOUT)
out = out.decode('utf-8').strip()
# logging.debug("Command output: {}".format(out))
except subprocess.CalledProcessError as e:
if allow_failure:
logging.warn("Command '{}' failed with error: {}".format(command, e.output))
return None
else:
logging.error("Command '{}' failed with error: {}".format(command, e.output))
sys.exit(1)
except OSError as e:
if allow_failure:
logging.warn("Command '{}' failed with error: {}".format(command, e))
return out
else:
logging.error("Command '{}' failed with error: {}".format(command, e))
sys.exit(1)
else:
return out
def create_temp_dir(prefix = None):
""" Create temporary directory with optional prefix.
"""
if prefix is None:
return tempfile.mkdtemp(prefix="{}-build.".format(PACKAGE_NAME))
else:
return tempfile.mkdtemp(prefix=prefix)
def increment_minor_version(version):
"""Return the version with the minor version incremented and patch
version set to zero.
"""
ver_list = version.split('.')
if len(ver_list) != 3:
logging.warn("Could not determine how to increment version '{}', will just use provided version.".format(version))
return version
ver_list[1] = str(int(ver_list[1]) + 1)
ver_list[2] = str(0)
inc_version = '.'.join(ver_list)
logging.debug("Incremented version from '{}' to '{}'.".format(version, inc_version))
return inc_version
def get_current_version_tag():
"""Retrieve the raw git version tag.
"""
version = run("git describe --exact-match --tags 2>/dev/null",
allow_failure=True, shell=True)
return version
def get_current_version():
"""Parse version information from git tag output.
"""
version_tag = get_current_version_tag()
if not version_tag:
return None
# Remove leading 'v'
if version_tag[0] == 'v':
version_tag = version_tag[1:]
# Replace any '-'/'_' with '~'
if '-' in version_tag:
version_tag = version_tag.replace("-","~")
if '_' in version_tag:
version_tag = version_tag.replace("_","~")
return version_tag
def get_current_commit(short=False):
"""Retrieve the current git commit.
"""
command = None
if short:
command = "git log --pretty=format:'%h' -n 1"
else:
command = "git rev-parse HEAD"
out = run(command)
return out.strip('\'\n\r ')
def get_current_branch():
"""Retrieve the current git branch.
"""
command = "git rev-parse --abbrev-ref HEAD"
out = run(command)
return out.strip()
def local_changes():
"""Return True if there are local un-committed changes.
"""
output = run("git diff-files --ignore-submodules --").strip()
if len(output) > 0:
return True
return False
def get_system_arch():
"""Retrieve current system architecture.
"""
arch = os.uname()[4]
if arch == "x86_64":
arch = "amd64"
elif arch == "386":
arch = "i386"
elif "arm64" in arch:
arch = "arm64"
elif 'arm' in arch:
# Prevent uname from reporting full ARM arch (eg 'armv7l')
arch = "arm"
return arch
def get_system_platform():
"""Retrieve current system platform.
"""
if sys.platform.startswith("linux"):
return "linux"
else:
return sys.platform
def get_go_version():
"""Retrieve version information for Go.
"""
out = run("go version")
matches = re.search('go version go(\S+)', out)
if matches is not None:
return matches.groups()[0].strip()
return None
def check_path_for(b):
"""Check the the user's path for the provided binary.
"""
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
full_path = os.path.join(path, b)
if os.path.isfile(full_path) and os.access(full_path, os.X_OK):
return full_path
def check_environ(build_dir = None):
"""Check environment for common Go variables.
"""
logging.info("Checking environment...")
for v in [ "GOPATH", "GOBIN", "GOROOT" ]:
logging.debug("Using '{}' for {}".format(os.environ.get(v), v))
cwd = os.getcwd()
if build_dir is None and os.environ.get("GOPATH") and os.environ.get("GOPATH") not in cwd:
logging.warn("Your current directory is not under your GOPATH. This may lead to build failures.")
return True
def check_prereqs():
"""Check user path for required dependencies.
"""
logging.info("Checking for dependencies...")
for req in prereqs:
if not check_path_for(req):
logging.error("Could not find dependency: {}".format(req))
return False
return True
def upload_packages(packages, bucket_name=None, overwrite=False):
"""Upload provided package output to AWS S3.
"""
logging.debug("Uploading files to bucket '{}': {}".format(bucket_name, packages))
try:
import boto
from boto.s3.key import Key
from boto.s3.connection import OrdinaryCallingFormat
logging.getLogger("boto").setLevel(logging.WARNING)
except ImportError:
logging.warn("Cannot upload packages without 'boto' Python library!")
return False
logging.info("Connecting to AWS S3...")
# Up the number of attempts to 10 from default of 1
boto.config.add_section("Boto")
boto.config.set("Boto", "metadata_service_num_attempts", "10")
c = boto.connect_s3(calling_format=OrdinaryCallingFormat())
if bucket_name is None:
bucket_name = DEFAULT_BUCKET
bucket = c.get_bucket(bucket_name.split('/')[0])
for p in packages:
if '/' in bucket_name:
# Allow for nested paths within the bucket name (ex:
# bucket/folder). Assuming forward-slashes as path
# delimiter.
name = os.path.join('/'.join(bucket_name.split('/')[1:]),
os.path.basename(p))
else:
name = os.path.basename(p)
logging.debug("Using key: {}".format(name))
if bucket.get_key(name) is None or overwrite:
logging.info("Uploading file {}".format(name))
k = Key(bucket)
k.key = name
if overwrite:
n = k.set_contents_from_filename(p, replace=True)
else:
n = k.set_contents_from_filename(p, replace=False)
k.make_public()
else:
logging.warn("Not uploading file {}, as it already exists in the target bucket.".format(name))
return True
def go_list(vendor=False, relative=False):
"""
Return a list of packages
If vendor is False vendor package are not included
If relative is True the package prefix defined by PACKAGE_URL is stripped
"""
p = subprocess.Popen(["go", "list", "./..."], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
packages = out.split('\n')
if packages[-1] == '':
packages = packages[:-1]
if not vendor:
non_vendor = []
for p in packages:
if '/vendor/' not in p:
non_vendor.append(p)
packages = non_vendor
if relative:
relative_pkgs = []
for p in packages:
r = p.replace(PACKAGE_URL, '.')
if r != '.':
relative_pkgs.append(r)
packages = relative_pkgs
return packages
def build(version=None,
platform=None,
arch=None,
nightly=False,
race=False,
clean=False,
outdir=".",
tags=[],
static=False):
"""Build each target for the specified architecture and platform.
"""
logging.info("Starting build for {}/{}...".format(platform, arch))
logging.info("Using Go version: {}".format(get_go_version()))
logging.info("Using git branch: {}".format(get_current_branch()))
logging.info("Using git commit: {}".format(get_current_commit()))
if static:
logging.info("Using statically-compiled output.")
if race:
logging.info("Race is enabled.")
if len(tags) > 0:
logging.info("Using build tags: {}".format(','.join(tags)))
logging.info("Sending build output to: {}".format(outdir))
if not os.path.exists(outdir):
os.makedirs(outdir)
elif clean and outdir != '/' and outdir != ".":
logging.info("Cleaning build directory '{}' before building.".format(outdir))
shutil.rmtree(outdir)
os.makedirs(outdir)
logging.info("Using version '{}' for build.".format(version))
tmp_build_dir = create_temp_dir()
for target, path in targets.items():
logging.info("Building target: {}".format(target))
build_command = ""
# Handle static binary output
if static is True or "static_" in arch:
if "static_" in arch:
static = True
arch = arch.replace("static_", "")
build_command += "CGO_ENABLED=0 "
# Handle variations in architecture output
goarch = arch
if arch == "i386" or arch == "i686":
goarch = "386"
elif "arm64" in arch:
goarch = "arm64"
elif "arm" in arch:
goarch = "arm"
build_command += "GOOS={} GOARCH={} ".format(platform, goarch)
if "arm" in arch:
if arch == "armel":
build_command += "GOARM=5 "
elif arch == "armhf" or arch == "arm":
build_command += "GOARM=6 "
elif arch == "arm64":
# TODO(rossmcdonald) - Verify this is the correct setting for arm64
build_command += "GOARM=7 "
else:
logging.error("Invalid ARM architecture specified: {}".format(arch))
logging.error("Please specify either 'armel', 'armhf', or 'arm64'.")
return False
if platform == 'windows':
target = target + '.exe'
build_command += "go build -o {} ".format(os.path.join(outdir, target))
if race:
build_command += "-race "
if len(tags) > 0:
build_command += "-tags {} ".format(','.join(tags))
ldflags = [
'-w', '-s',
'-X', 'main.branch={}'.format(get_current_branch()),
'-X', 'main.commit={}'.format(get_current_commit(short=True))]
if version:
ldflags.append('-X')
ldflags.append('main.version={}'.format(version))
build_command += ' -ldflags="{}" '.format(' '.join(ldflags))
if static:
build_command += " -a -installsuffix cgo "
build_command += path
start_time = datetime.utcnow()
run(build_command, shell=True)
end_time = datetime.utcnow()
logging.info("Time taken: {}s".format((end_time - start_time).total_seconds()))
return True
def generate_sha256_from_file(path):
"""Generate SHA256 hash signature based on the contents of the file at path.
"""
m = hashlib.sha256()
with open(path, 'rb') as f:
m.update(f.read())
return m.hexdigest()
def generate_sig_from_file(path):
"""Generate a detached GPG signature from the file at path.
"""
logging.debug("Generating GPG signature for file: {}".format(path))
gpg_path = check_path_for('gpg')
if gpg_path is None:
logging.warn("gpg binary not found on path! Skipping signature creation.")
return False
if os.environ.get("GNUPG_HOME") is not None:
run('gpg --homedir {} --armor --yes --detach-sign {}'.format(os.environ.get("GNUPG_HOME"), path))
else:
run('gpg --armor --detach-sign --yes {}'.format(path))
return True
def package(build_output, pkg_name, version, nightly=False, iteration=1, static=False, release=False):
"""Package the output of the build process.
"""
outfiles = []
tmp_build_dir = create_temp_dir()
logging.debug("Packaging for build output: {}".format(build_output))
logging.info("Using temporary directory: {}".format(tmp_build_dir))
try:
for platform in build_output:
# Create top-level folder displaying which platform (linux, etc)
os.makedirs(os.path.join(tmp_build_dir, platform))
for arch in build_output[platform]:
logging.info("Creating packages for {}/{}".format(platform, arch))
# Create second-level directory displaying the architecture (amd64, etc)
current_location = build_output[platform][arch]
# Create directory tree to mimic file system of package
build_root = os.path.join(tmp_build_dir,
platform,
arch,
PACKAGE_NAME)
os.makedirs(build_root)
# Copy packaging scripts to build directory
if platform == "windows":
# For windows and static builds, just copy
# binaries to root of package (no other scripts or
# directories)
package_scripts(build_root, config_only=True, windows=True)
elif static or "static_" in arch:
package_scripts(build_root, config_only=True)
else:
create_package_fs(build_root)
package_scripts(build_root)
for binary in targets:
# Copy newly-built binaries to packaging directory
if platform == 'windows':
binary = binary + '.exe'
if platform == 'windows' or static or "static_" in arch:
# Where the binary should go in the package filesystem
to = os.path.join(build_root, binary)
# Where the binary currently is located
fr = os.path.join(current_location, binary)
else:
# Where the binary currently is located
fr = os.path.join(current_location, binary)
# Where the binary should go in the package filesystem
to = os.path.join(build_root, INSTALL_ROOT_DIR[1:], binary)
shutil.copy(fr, to)
for package_type in supported_packages[platform]:
# Package the directory structure for each package type for the platform
logging.debug("Packaging directory '{}' as '{}'.".format(build_root, package_type))
name = pkg_name
# Reset version, iteration, and current location on each run
# since they may be modified below.
package_version = version
package_iteration = iteration
if "static_" in arch:
# Remove the "static_" from the displayed arch on the package
package_arch = arch.replace("static_", "")
elif package_type == "rpm" and arch == 'armhf':
package_arch = 'armv6hl'
else:
package_arch = arch
if not version:
package_version = "{}~{}".format(next_version, get_current_commit(short=True))
package_iteration = "0"
package_build_root = build_root
current_location = build_output[platform][arch]
if package_type in ['zip', 'tar']:
# For tars and zips, start the packaging one folder above
# the build root (to include the package name)
package_build_root = os.path.join('/', '/'.join(build_root.split('/')[:-1]))
if nightly:
if static or "static_" in arch:
name = '{}-static-nightly_{}_{}'.format(name,
platform,
package_arch)
else:
name = '{}-nightly_{}_{}'.format(name,
platform,
package_arch)
else:
if static or "static_" in arch:
name = '{}-{}-static_{}_{}'.format(name,
package_version,
platform,
package_arch)
else:
name = '{}-{}_{}_{}'.format(name,
package_version,
platform,
package_arch)
current_location = os.path.join(os.getcwd(), current_location)
if package_type == 'tar':
tar_command = "cd {} && tar -cvzf {}.tar.gz ./*".format(package_build_root, name)
run(tar_command, shell=True)
run("mv {}.tar.gz {}".format(os.path.join(package_build_root, name), current_location), shell=True)
outfile = os.path.join(current_location, name + ".tar.gz")
outfiles.append(outfile)
elif package_type == 'zip':
zip_command = "cd {} && zip -r {}.zip ./*".format(package_build_root, name)
run(zip_command, shell=True)
run("mv {}.zip {}".format(os.path.join(package_build_root, name), current_location), shell=True)
outfile = os.path.join(current_location, name + ".zip")
outfiles.append(outfile)
elif package_type not in ['zip', 'tar'] and static or "static_" in arch:
logging.info("Skipping package type '{}' for static builds.".format(package_type))
else:
if package_type == 'rpm' and release and '~' in package_version:
package_version, suffix = package_version.split('~', 1)
# The ~ indicatees that this is a prerelease so we give it a leading 0.
package_iteration = "0.%s" % suffix
fpm_command = "fpm {} --name {} -a {} -t {} --version {} --iteration {} -C {} -p {} ".format(
fpm_common_args,
name,
package_arch,
package_type,
package_version,
package_iteration,
package_build_root,
current_location)
if package_type == "rpm":
fpm_command += "--depends coreutils --depends shadow-utils --rpm-posttrans {}".format(POSTINST_SCRIPT)
out = run(fpm_command, shell=True)
matches = re.search(':path=>"(.*)"', out)
outfile = None
if matches is not None:
outfile = matches.groups()[0]
if outfile is None:
logging.warn("Could not determine output from packaging output!")
else:
if nightly:
# Strip nightly version from package name
new_outfile = outfile.replace("{}-{}".format(package_version, package_iteration), "nightly")
os.rename(outfile, new_outfile)
outfile = new_outfile
else:
if package_type == 'rpm':
# rpm's convert any dashes to underscores
package_version = package_version.replace("-", "_")
outfiles.append(os.path.join(os.getcwd(), outfile))
logging.debug("Produced package files: {}".format(outfiles))
return outfiles
finally:
# Cleanup
shutil.rmtree(tmp_build_dir)
def main(args):
global PACKAGE_NAME
if args.release and args.nightly:
logging.error("Cannot be both a nightly and a release.")
return 1
if args.nightly:
args.iteration = 0
# Pre-build checks
check_environ()
if not check_prereqs():
return 1
if args.build_tags is None:
args.build_tags = []
else:
args.build_tags = args.build_tags.split(',')
orig_commit = get_current_commit(short=True)
orig_branch = get_current_branch()
if args.platform not in supported_builds and args.platform != 'all':
logging.error("Invalid build platform: {}".format(args.platform))
return 1
build_output = {}
if args.branch != orig_branch and args.commit != orig_commit:
logging.error("Can only specify one branch or commit to build from.")
return 1
elif args.branch != orig_branch:
logging.info("Moving to git branch: {}".format(args.branch))
run("git checkout {}".format(args.branch))
elif args.commit != orig_commit:
logging.info("Moving to git commit: {}".format(args.commit))
run("git checkout {}".format(args.commit))
if not args.no_get:
if not go_get(args.branch, update=args.update, no_uncommitted=args.no_uncommitted):
return 1
if args.generate:
if not run_generate():
return 1
if args.test:
if not run_tests(args.race, args.parallel, args.timeout, args.no_vet):
return 1
platforms = []
single_build = True
if args.platform == 'all':
platforms = supported_builds.keys()
single_build = False
else:
platforms = [args.platform]
for platform in platforms:
build_output.update( { platform : {} } )
archs = []
if args.arch == "all":
single_build = False
archs = supported_builds.get(platform)
else:
archs = [args.arch]
for arch in archs:
od = args.outdir
if not single_build:
od = os.path.join(args.outdir, platform, arch)
if not build(version=args.version,
platform=platform,
arch=arch,
nightly=args.nightly,
race=args.race,
clean=args.clean,
outdir=od,
tags=args.build_tags,
static=args.static):
return 1
build_output.get(platform).update( { arch : od } )
# Build packages
if args.package:
if not check_path_for("fpm"):
logging.error("FPM ruby gem required for packaging. Stopping.")
return 1
packages = package(build_output,
args.name,
args.version,
nightly=args.nightly,
iteration=args.iteration,
static=args.static,
release=args.release)
if args.sign:
logging.debug("Generating GPG signatures for packages: {}".format(packages))
sigs = [] # retain signatures so they can be uploaded with packages
for p in packages:
if generate_sig_from_file(p):
sigs.append(p + '.asc')
else:
logging.error("Creation of signature for package [{}] failed!".format(p))
return 1
packages += sigs
if args.upload:
logging.debug("Files staged for upload: {}".format(packages))
if args.nightly:
args.upload_overwrite = True
if not upload_packages(packages, bucket_name=args.bucket, overwrite=args.upload_overwrite):
return 1
logging.info("Packages created:")
for filename in packages:
logging.info("%s (SHA256=%s)",
os.path.basename(filename),
generate_sha256_from_file(filename))
if orig_branch != get_current_branch():
logging.info("Moving back to original git branch: {}".format(args.branch))
run("git checkout {}".format(orig_branch))
return 0
if __name__ == '__main__':
LOG_LEVEL = logging.INFO
if '--debug' in sys.argv[1:]:
LOG_LEVEL = logging.DEBUG
log_format = '[%(levelname)s] %(funcName)s: %(message)s'
logging.basicConfig(level=LOG_LEVEL,
format=log_format)
parser = argparse.ArgumentParser(description='InfluxDB build and packaging script.')
parser.add_argument('--verbose','-v','--debug',
action='store_true',
help='Use debug output')
parser.add_argument('--outdir', '-o',
metavar='<output directory>',
default='./build/',
type=os.path.abspath,
help='Output directory')
parser.add_argument('--name', '-n',
metavar='<name>',
default=PACKAGE_NAME,
type=str,
help='Name to use for package name (when package is specified)')
parser.add_argument('--arch',
metavar='<amd64|i386|armhf|arm64|armel|all>',
type=str,
default=get_system_arch(),
help='Target architecture for build output')
parser.add_argument('--platform',
metavar='<linux|darwin|windows|all>',
type=str,
default=get_system_platform(),
help='Target platform for build output')
parser.add_argument('--branch',
metavar='<branch>',
type=str,
default=get_current_branch(),
help='Build from a specific branch')
parser.add_argument('--commit',
metavar='<commit>',
type=str,
default=get_current_commit(short=True),
help='Build from a specific commit')
parser.add_argument('--version',
metavar='<version>',
type=str,
default=get_current_version(),
help='Version information to apply to build output (ex: 0.12.0)')
parser.add_argument('--iteration',
metavar='<package iteration>',
type=str,
default="1",
help='Package iteration to apply to build output (defaults to 1)')
parser.add_argument('--stats',
action='store_true',
help='Emit build metrics (requires InfluxDB Python client)')
parser.add_argument('--stats-server',
metavar='<hostname:port>',
type=str,
help='Send build stats to InfluxDB using provided hostname and port')
parser.add_argument('--stats-db',
metavar='<database name>',
type=str,
help='Send build stats to InfluxDB using provided database name')
parser.add_argument('--nightly',
action='store_true',
help='Mark build output as nightly build (will incremement the minor version)')
parser.add_argument('--update',
action='store_true',
help='Update build dependencies prior to building')
parser.add_argument('--package',
action='store_true',
help='Package binary output')
parser.add_argument('--release',
action='store_true',
help='Mark build output as release')
parser.add_argument('--clean',
action='store_true',
help='Clean output directory before building')
parser.add_argument('--no-get',
action='store_true',
help='Do not retrieve pinned dependencies when building')
parser.add_argument('--no-uncommitted',
action='store_true',
help='Fail if uncommitted changes exist in the working directory')
parser.add_argument('--upload',
action='store_true',
help='Upload output packages to AWS S3')
parser.add_argument('--upload-overwrite','-w',
action='store_true',
help='Upload output packages to AWS S3')
parser.add_argument('--bucket',
metavar='<S3 bucket name>',
type=str,
default=DEFAULT_BUCKET,
help='Destination bucket for uploads')
parser.add_argument('--generate',
action='store_true',
help='Run "go generate" before building')
parser.add_argument('--build-tags',
metavar='<tags>',
help='Optional build tags to use for compilation')
parser.add_argument('--static',
action='store_true',
help='Create statically-compiled binary output')
parser.add_argument('--sign',
action='store_true',
help='Create GPG detached signatures for packages (when package is specified)')
parser.add_argument('--test',
action='store_true',
help='Run tests (does not produce build output)')
parser.add_argument('--no-vet',
action='store_true',
help='Do not run "go vet" when running tests')
parser.add_argument('--race',
action='store_true',
help='Enable race flag for build output')
parser.add_argument('--parallel',
metavar='<num threads>',
type=int,
help='Number of tests to run simultaneously')
parser.add_argument('--timeout',
metavar='<timeout>',
type=str,
help='Timeout for tests before failing')
args = parser.parse_args()
print_banner()
sys.exit(main(args))
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for convolutional layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.keras._impl import keras
from tensorflow.python.keras._impl.keras import testing_utils
from tensorflow.python.platform import test
class Convolution1DTest(test.TestCase):
def _run_test(self, kwargs, arg, values):
num_samples = 2
stack_size = 3
length = 7
test_kwargs = copy.copy(kwargs)
for value in values:
test_kwargs[arg] = value
with self.test_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.Conv1D,
kwargs=test_kwargs,
input_shape=(num_samples, length, stack_size))
@tf_test_util.run_in_graph_and_eager_modes()
def test_conv1d(self):
kwargs = {
'filters': 2,
'kernel_size': 3,
}
self._run_test(kwargs, 'padding', ['valid', 'same'])
self._run_test(kwargs, 'strides', [2])
self._run_test(kwargs, 'dilation_rate', [2])
kwargs = {
'filters': 2,
'kernel_size': 3,
'padding': 'same',
}
self._run_test(kwargs, 'dilation_rate', [2])
self._run_test(kwargs, 'dilation_rate', [3])
def test_conv1d_regularizers(self):
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'kernel_regularizer': 'l2',
'bias_regularizer': 'l2',
'activity_regularizer': 'l2',
'strides': 1
}
with self.test_session(use_gpu=True):
layer = keras.layers.Conv1D(**kwargs)
layer.build((None, 5, 2))
self.assertEqual(len(layer.losses), 2)
layer(keras.backend.variable(np.ones((1, 5, 2))))
self.assertEqual(len(layer.losses), 3)
def test_conv1d_constraints(self):
k_constraint = lambda x: x
b_constraint = lambda x: x
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'kernel_constraint': k_constraint,
'bias_constraint': b_constraint,
'strides': 1
}
with self.test_session(use_gpu=True):
layer = keras.layers.Conv1D(**kwargs)
layer.build((None, 5, 2))
self.assertEqual(layer.kernel.constraint, k_constraint)
self.assertEqual(layer.bias.constraint, b_constraint)
class Conv2DTest(test.TestCase):
def _run_test(self, kwargs, arg, values):
num_samples = 2
stack_size = 3
num_row = 7
num_col = 6
test_kwargs = copy.copy(kwargs)
for value in values:
test_kwargs[arg] = value
with self.test_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.SeparableConv2D,
kwargs=test_kwargs,
input_shape=(num_samples, num_row, num_col, stack_size))
@tf_test_util.run_in_graph_and_eager_modes()
def test_conv2d(self):
kwargs = {
'filters': 2,
'kernel_size': (3, 3),
}
self._run_test(kwargs, 'padding', ['valid', 'same'])
self._run_test(kwargs, 'strides', [(2, 2)])
if test.is_gpu_available(cuda_only=True):
# Only runs on GPU with CUDA, channels_first is not supported on CPU.
# TODO(b/62340061): Support channels_first on CPU.
self._run_test(kwargs, 'data_format', ['channels_first'])
self._run_test(kwargs, 'dilation_rate', [(2, 2)])
kwargs = {
'filters': 2,
'kernel_size': 3,
'padding': 'same',
}
self._run_test(kwargs, 'dilation_rate', [2])
def test_conv2d_regularizers(self):
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'kernel_regularizer': 'l2',
'bias_regularizer': 'l2',
'activity_regularizer': 'l2',
'strides': 1
}
with self.test_session(use_gpu=True):
layer = keras.layers.Conv2D(**kwargs)
layer.build((None, 5, 5, 2))
self.assertEqual(len(layer.losses), 2)
layer(keras.backend.variable(np.ones((1, 5, 5, 2))))
self.assertEqual(len(layer.losses), 3)
def test_conv2d_constraints(self):
k_constraint = lambda x: x
b_constraint = lambda x: x
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'kernel_constraint': k_constraint,
'bias_constraint': b_constraint,
'strides': 1
}
with self.test_session(use_gpu=True):
layer = keras.layers.Conv2D(**kwargs)
layer.build((None, 5, 5, 2))
self.assertEqual(layer.kernel.constraint, k_constraint)
self.assertEqual(layer.bias.constraint, b_constraint)
class Conv2DTransposeTest(test.TestCase):
def _run_test(self, kwargs, arg, values):
num_samples = 2
stack_size = 3
num_row = 7
num_col = 6
test_kwargs = copy.copy(kwargs)
for value in values:
test_kwargs[arg] = value
with self.test_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.Conv2DTranspose,
kwargs=test_kwargs,
input_shape=(num_samples, num_row, num_col, stack_size))
@tf_test_util.run_in_graph_and_eager_modes()
def test_conv2dtranspose(self):
kwargs = {
'filters': 2,
'kernel_size': (3, 3),
}
self._run_test(kwargs, 'padding', ['valid', 'same'])
self._run_test(kwargs, 'strides', [(2, 2)])
if test.is_gpu_available(cuda_only=True):
self._run_test(kwargs, 'data_format', ['channels_first'])
def test_conv2dtranspose_regularizers(self):
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'kernel_regularizer': 'l2',
'bias_regularizer': 'l2',
'activity_regularizer': 'l2',
'strides': 1
}
with self.test_session(use_gpu=True):
layer = keras.layers.Conv2DTranspose(**kwargs)
layer.build((None, 5, 5, 2))
self.assertEqual(len(layer.losses), 2)
layer(keras.backend.variable(np.ones((1, 5, 5, 2))))
self.assertEqual(len(layer.losses), 3)
def test_conv2dtranspose_constraints(self):
k_constraint = lambda x: x
b_constraint = lambda x: x
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'kernel_constraint': k_constraint,
'bias_constraint': b_constraint,
'strides': 1
}
with self.test_session(use_gpu=True):
layer = keras.layers.Conv2DTranspose(**kwargs)
layer.build((None, 5, 5, 2))
self.assertEqual(layer.kernel.constraint, k_constraint)
self.assertEqual(layer.bias.constraint, b_constraint)
class Conv3DTransposeTest(test.TestCase):
def _run_test(self, kwargs, arg, values):
num_samples = 2
stack_size = 3
num_row = 7
num_col = 6
depth = 5
test_kwargs = copy.copy(kwargs)
for value in values:
test_kwargs[arg] = value
with self.test_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.Conv3DTranspose,
kwargs=test_kwargs,
input_shape=(num_samples, depth, num_row, num_col, stack_size))
@tf_test_util.run_in_graph_and_eager_modes()
def test_conv3dtranspose(self):
kwargs = {
'filters': 2,
'kernel_size': (3, 3, 3),
}
self._run_test(kwargs, 'padding', ['valid', 'same'])
self._run_test(kwargs, 'strides', [(2, 2, 2)])
if test.is_gpu_available(cuda_only=True):
self._run_test(kwargs, 'data_format', ['channels_first'])
def test_conv3dtranspose_regularizers(self):
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'kernel_regularizer': 'l2',
'bias_regularizer': 'l2',
'activity_regularizer': 'l2',
'strides': 1
}
with self.test_session(use_gpu=True):
layer = keras.layers.Conv3DTranspose(**kwargs)
layer.build((None, 5, 5, 5, 2))
self.assertEqual(len(layer.losses), 2)
layer(keras.backend.variable(np.ones((1, 5, 5, 5, 2))))
self.assertEqual(len(layer.losses), 3)
def test_conv3dtranspose_constraints(self):
k_constraint = lambda x: x
b_constraint = lambda x: x
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'kernel_constraint': k_constraint,
'bias_constraint': b_constraint,
'strides': 1
}
with self.test_session(use_gpu=True):
layer = keras.layers.Conv3DTranspose(**kwargs)
layer.build((None, 5, 5, 5, 2))
self.assertEqual(layer.kernel.constraint, k_constraint)
self.assertEqual(layer.bias.constraint, b_constraint)
class SeparableConv1DTest(test.TestCase):
def _run_test(self, kwargs, arg, values):
num_samples = 2
stack_size = 3
length = 7
test_kwargs = copy.copy(kwargs)
for value in values:
test_kwargs[arg] = value
with self.test_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.SeparableConv1D,
kwargs=test_kwargs,
input_shape=(num_samples, length, stack_size))
@tf_test_util.run_in_graph_and_eager_modes()
def test_separable_conv1d(self):
kwargs = {
'filters': 2,
'kernel_size': 3,
}
self._run_test(kwargs, 'padding', ['valid', 'same'])
self._run_test(kwargs, 'strides', [2])
self._run_test(kwargs, 'dilation_rate', [2])
self._run_test(kwargs, 'depth_multiplier', [2])
kwargs = {
'filters': 2,
'kernel_size': 3,
'padding': 'same',
}
self._run_test(kwargs, 'dilation_rate', [2])
def test_separable_conv1d_regularizers(self):
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'depthwise_regularizer': 'l2',
'pointwise_regularizer': 'l2',
'bias_regularizer': 'l2',
'activity_regularizer': 'l2',
'strides': 1
}
with self.test_session(use_gpu=True):
layer = keras.layers.SeparableConv1D(**kwargs)
layer.build((None, 5, 2))
self.assertEqual(len(layer.losses), 3)
layer(keras.backend.variable(np.ones((1, 5, 2))))
self.assertEqual(len(layer.losses), 4)
def test_separable_conv1d_constraints(self):
d_constraint = lambda x: x
p_constraint = lambda x: x
b_constraint = lambda x: x
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'pointwise_constraint': p_constraint,
'depthwise_constraint': d_constraint,
'bias_constraint': b_constraint,
'strides': 1
}
with self.test_session(use_gpu=True):
layer = keras.layers.SeparableConv1D(**kwargs)
layer.build((None, 5, 2))
self.assertEqual(layer.depthwise_kernel.constraint, d_constraint)
self.assertEqual(layer.pointwise_kernel.constraint, p_constraint)
self.assertEqual(layer.bias.constraint, b_constraint)
class SeparableConv2DTest(test.TestCase):
def _run_test(self, kwargs, arg, values):
num_samples = 2
stack_size = 3
num_row = 7
num_col = 6
test_kwargs = copy.copy(kwargs)
for value in values:
test_kwargs[arg] = value
with self.test_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.SeparableConv2D,
kwargs=test_kwargs,
input_shape=(num_samples, num_row, num_col, stack_size))
@tf_test_util.run_in_graph_and_eager_modes()
def test_separable_conv2d(self):
kwargs = {
'filters': 2,
'kernel_size': 3,
}
self._run_test(kwargs, 'padding', ['valid', 'same'])
self._run_test(kwargs, 'strides', [2])
if test.is_gpu_available(cuda_only=True):
self._run_test(kwargs, 'data_format', ['channels_first'])
self._run_test(kwargs, 'dilation_rate', [2])
self._run_test(kwargs, 'depth_multiplier', [2])
kwargs = {
'filters': 2,
'kernel_size': 3,
'padding': 'same',
}
self._run_test(kwargs, 'dilation_rate', [2])
def test_separable_conv2d_regularizers(self):
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'depthwise_regularizer': 'l2',
'pointwise_regularizer': 'l2',
'bias_regularizer': 'l2',
'activity_regularizer': 'l2',
'strides': 1
}
with self.test_session(use_gpu=True):
layer = keras.layers.SeparableConv2D(**kwargs)
layer.build((None, 5, 5, 2))
self.assertEqual(len(layer.losses), 3)
layer(keras.backend.variable(np.ones((1, 5, 5, 2))))
self.assertEqual(len(layer.losses), 4)
def test_separable_conv2d_constraints(self):
d_constraint = lambda x: x
p_constraint = lambda x: x
b_constraint = lambda x: x
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'pointwise_constraint': p_constraint,
'depthwise_constraint': d_constraint,
'bias_constraint': b_constraint,
'strides': 1
}
with self.test_session(use_gpu=True):
layer = keras.layers.SeparableConv2D(**kwargs)
layer.build((None, 5, 5, 2))
self.assertEqual(layer.depthwise_kernel.constraint, d_constraint)
self.assertEqual(layer.pointwise_kernel.constraint, p_constraint)
self.assertEqual(layer.bias.constraint, b_constraint)
class Conv3DTest(test.TestCase):
def _run_test(self, kwargs, arg, values):
num_samples = 2
stack_size = 3
num_row = 7
num_col = 6
depth = 5
test_kwargs = copy.copy(kwargs)
for value in values:
test_kwargs[arg] = value
with self.test_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.Conv3D,
kwargs=test_kwargs,
input_shape=(num_samples, depth, num_row, num_col, stack_size))
@tf_test_util.run_in_graph_and_eager_modes()
def test_conv3d(self):
kwargs = {
'filters': 2,
'kernel_size': (3, 3, 3),
}
self._run_test(kwargs, 'padding', ['valid', 'same'])
self._run_test(kwargs, 'strides', [(2, 2, 2)])
self._run_test(kwargs, 'dilation_rate', [(2, 2, 2)])
if test.is_gpu_available(cuda_only=True):
self._run_test(kwargs, 'data_format', ['channels_first'])
def test_conv3d_regularizers(self):
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'kernel_regularizer': 'l2',
'bias_regularizer': 'l2',
'activity_regularizer': 'l2',
'strides': 1
}
with self.test_session(use_gpu=True):
layer = keras.layers.Conv3D(**kwargs)
layer.build((None, 5, 5, 5, 2))
self.assertEqual(len(layer.losses), 2)
self.assertEqual(len(layer.losses), 2)
layer(keras.backend.variable(np.ones((1, 5, 5, 5, 2))))
self.assertEqual(len(layer.losses), 3)
def test_conv3d_constraints(self):
k_constraint = lambda x: x
b_constraint = lambda x: x
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'kernel_constraint': k_constraint,
'bias_constraint': b_constraint,
'strides': 1
}
with self.test_session(use_gpu=True):
layer = keras.layers.Conv3D(**kwargs)
layer.build((None, 5, 5, 5, 2))
self.assertEqual(layer.kernel.constraint, k_constraint)
self.assertEqual(layer.bias.constraint, b_constraint)
class ZeroPaddingTest(test.TestCase):
@tf_test_util.run_in_graph_and_eager_modes()
def test_zero_padding_1d(self):
num_samples = 2
input_dim = 2
num_steps = 5
shape = (num_samples, num_steps, input_dim)
inputs = np.ones(shape)
# basic test
with self.test_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.ZeroPadding1D,
kwargs={'padding': 2},
input_shape=inputs.shape)
testing_utils.layer_test(
keras.layers.ZeroPadding1D,
kwargs={'padding': (1, 2)},
input_shape=inputs.shape)
# correctness test
with self.test_session(use_gpu=True):
layer = keras.layers.ZeroPadding1D(padding=2)
layer.build(shape)
output = layer(keras.backend.variable(inputs))
if context.executing_eagerly():
np_output = output.numpy()
else:
np_output = keras.backend.eval(output)
for offset in [0, 1, -1, -2]:
np.testing.assert_allclose(np_output[:, offset, :], 0.)
np.testing.assert_allclose(np_output[:, 2:-2, :], 1.)
layer = keras.layers.ZeroPadding1D(padding=(1, 2))
layer.build(shape)
output = layer(keras.backend.variable(inputs))
if context.executing_eagerly():
np_output = output.numpy()
else:
np_output = keras.backend.eval(output)
for left_offset in [0]:
np.testing.assert_allclose(np_output[:, left_offset, :], 0.)
for right_offset in [-1, -2]:
np.testing.assert_allclose(np_output[:, right_offset, :], 0.)
np.testing.assert_allclose(np_output[:, 1:-2, :], 1.)
layer.get_config()
# test incorrect use
with self.assertRaises(ValueError):
keras.layers.ZeroPadding1D(padding=(1, 1, 1))
with self.assertRaises(ValueError):
keras.layers.ZeroPadding1D(padding=None)
@tf_test_util.run_in_graph_and_eager_modes()
def test_zero_padding_2d(self):
num_samples = 2
stack_size = 2
input_num_row = 4
input_num_col = 5
for data_format in ['channels_first', 'channels_last']:
inputs = np.ones((num_samples, input_num_row, input_num_col, stack_size))
inputs = np.ones((num_samples, stack_size, input_num_row, input_num_col))
# basic test
with self.test_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.ZeroPadding2D,
kwargs={'padding': (2, 2),
'data_format': data_format},
input_shape=inputs.shape)
testing_utils.layer_test(
keras.layers.ZeroPadding2D,
kwargs={'padding': ((1, 2), (3, 4)),
'data_format': data_format},
input_shape=inputs.shape)
# correctness test
with self.test_session(use_gpu=True):
layer = keras.layers.ZeroPadding2D(
padding=(2, 2), data_format=data_format)
layer.build(inputs.shape)
output = layer(keras.backend.variable(inputs))
if context.executing_eagerly():
np_output = output.numpy()
else:
np_output = keras.backend.eval(output)
if data_format == 'channels_last':
for offset in [0, 1, -1, -2]:
np.testing.assert_allclose(np_output[:, offset, :, :], 0.)
np.testing.assert_allclose(np_output[:, :, offset, :], 0.)
np.testing.assert_allclose(np_output[:, 2:-2, 2:-2, :], 1.)
elif data_format == 'channels_first':
for offset in [0, 1, -1, -2]:
np.testing.assert_allclose(np_output[:, :, offset, :], 0.)
np.testing.assert_allclose(np_output[:, :, :, offset], 0.)
np.testing.assert_allclose(np_output[:, 2:-2, 2:-2, :], 1.)
layer = keras.layers.ZeroPadding2D(
padding=((1, 2), (3, 4)), data_format=data_format)
layer.build(inputs.shape)
output = layer(keras.backend.variable(inputs))
if context.executing_eagerly():
np_output = output.numpy()
else:
np_output = keras.backend.eval(output)
if data_format == 'channels_last':
for top_offset in [0]:
np.testing.assert_allclose(np_output[:, top_offset, :, :], 0.)
for bottom_offset in [-1, -2]:
np.testing.assert_allclose(np_output[:, bottom_offset, :, :], 0.)
for left_offset in [0, 1, 2]:
np.testing.assert_allclose(np_output[:, :, left_offset, :], 0.)
for right_offset in [-1, -2, -3, -4]:
np.testing.assert_allclose(np_output[:, :, right_offset, :], 0.)
np.testing.assert_allclose(np_output[:, 1:-2, 3:-4, :], 1.)
elif data_format == 'channels_first':
for top_offset in [0]:
np.testing.assert_allclose(np_output[:, :, top_offset, :], 0.)
for bottom_offset in [-1, -2]:
np.testing.assert_allclose(np_output[:, :, bottom_offset, :], 0.)
for left_offset in [0, 1, 2]:
np.testing.assert_allclose(np_output[:, :, :, left_offset], 0.)
for right_offset in [-1, -2, -3, -4]:
np.testing.assert_allclose(np_output[:, :, :, right_offset], 0.)
np.testing.assert_allclose(np_output[:, :, 1:-2, 3:-4], 1.)
# test incorrect use
with self.assertRaises(ValueError):
keras.layers.ZeroPadding2D(padding=(1, 1, 1))
with self.assertRaises(ValueError):
keras.layers.ZeroPadding2D(padding=None)
@tf_test_util.run_in_graph_and_eager_modes()
def test_zero_padding_3d(self):
num_samples = 2
stack_size = 2
input_len_dim1 = 4
input_len_dim2 = 5
input_len_dim3 = 3
inputs = np.ones((num_samples, input_len_dim1, input_len_dim2,
input_len_dim3, stack_size))
# basic test
with self.test_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.ZeroPadding3D,
kwargs={'padding': (2, 2, 2)},
input_shape=inputs.shape)
# correctness test
with self.test_session(use_gpu=True):
layer = keras.layers.ZeroPadding3D(padding=(2, 2, 2))
layer.build(inputs.shape)
output = layer(keras.backend.variable(inputs))
if context.executing_eagerly():
np_output = output.numpy()
else:
np_output = keras.backend.eval(output)
for offset in [0, 1, -1, -2]:
np.testing.assert_allclose(np_output[:, offset, :, :, :], 0.)
np.testing.assert_allclose(np_output[:, :, offset, :, :], 0.)
np.testing.assert_allclose(np_output[:, :, :, offset, :], 0.)
np.testing.assert_allclose(np_output[:, 2:-2, 2:-2, 2:-2, :], 1.)
# test incorrect use
with self.assertRaises(ValueError):
keras.layers.ZeroPadding3D(padding=(1, 1))
with self.assertRaises(ValueError):
keras.layers.ZeroPadding3D(padding=None)
class UpSamplingTest(test.TestCase):
@tf_test_util.run_in_graph_and_eager_modes()
def test_upsampling_1d(self):
with self.test_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.UpSampling1D, kwargs={'size': 2}, input_shape=(3, 5, 4))
@tf_test_util.run_in_graph_and_eager_modes()
def test_upsampling_2d(self):
num_samples = 2
stack_size = 2
input_num_row = 11
input_num_col = 12
for data_format in ['channels_first', 'channels_last']:
if data_format == 'channels_first':
inputs = np.random.rand(num_samples, stack_size, input_num_row,
input_num_col)
else:
inputs = np.random.rand(num_samples, input_num_row, input_num_col,
stack_size)
# basic test
with self.test_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.UpSampling2D,
kwargs={'size': (2, 2),
'data_format': data_format},
input_shape=inputs.shape)
for length_row in [2]:
for length_col in [2, 3]:
layer = keras.layers.UpSampling2D(
size=(length_row, length_col), data_format=data_format)
layer.build(inputs.shape)
output = layer(keras.backend.variable(inputs))
if context.executing_eagerly():
np_output = output.numpy()
else:
np_output = keras.backend.eval(output)
if data_format == 'channels_first':
assert np_output.shape[2] == length_row * input_num_row
assert np_output.shape[3] == length_col * input_num_col
else: # tf
assert np_output.shape[1] == length_row * input_num_row
assert np_output.shape[2] == length_col * input_num_col
# compare with numpy
if data_format == 'channels_first':
expected_out = np.repeat(inputs, length_row, axis=2)
expected_out = np.repeat(expected_out, length_col, axis=3)
else: # tf
expected_out = np.repeat(inputs, length_row, axis=1)
expected_out = np.repeat(expected_out, length_col, axis=2)
np.testing.assert_allclose(np_output, expected_out)
@tf_test_util.run_in_graph_and_eager_modes()
def test_upsampling_3d(self):
num_samples = 2
stack_size = 2
input_len_dim1 = 10
input_len_dim2 = 11
input_len_dim3 = 12
for data_format in ['channels_first', 'channels_last']:
if data_format == 'channels_first':
inputs = np.random.rand(num_samples, stack_size, input_len_dim1,
input_len_dim2, input_len_dim3)
else:
inputs = np.random.rand(num_samples, input_len_dim1, input_len_dim2,
input_len_dim3, stack_size)
# basic test
with self.test_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.UpSampling3D,
kwargs={'size': (2, 2, 2),
'data_format': data_format},
input_shape=inputs.shape)
for length_dim1 in [2, 3]:
for length_dim2 in [2]:
for length_dim3 in [3]:
layer = keras.layers.UpSampling3D(
size=(length_dim1, length_dim2, length_dim3),
data_format=data_format)
layer.build(inputs.shape)
output = layer(keras.backend.variable(inputs))
if context.executing_eagerly():
np_output = output.numpy()
else:
np_output = keras.backend.eval(output)
if data_format == 'channels_first':
assert np_output.shape[2] == length_dim1 * input_len_dim1
assert np_output.shape[3] == length_dim2 * input_len_dim2
assert np_output.shape[4] == length_dim3 * input_len_dim3
else: # tf
assert np_output.shape[1] == length_dim1 * input_len_dim1
assert np_output.shape[2] == length_dim2 * input_len_dim2
assert np_output.shape[3] == length_dim3 * input_len_dim3
# compare with numpy
if data_format == 'channels_first':
expected_out = np.repeat(inputs, length_dim1, axis=2)
expected_out = np.repeat(expected_out, length_dim2, axis=3)
expected_out = np.repeat(expected_out, length_dim3, axis=4)
else: # tf
expected_out = np.repeat(inputs, length_dim1, axis=1)
expected_out = np.repeat(expected_out, length_dim2, axis=2)
expected_out = np.repeat(expected_out, length_dim3, axis=3)
np.testing.assert_allclose(np_output, expected_out)
class CroppingTest(test.TestCase):
@tf_test_util.run_in_graph_and_eager_modes()
def test_cropping_1d(self):
num_samples = 2
time_length = 4
input_len_dim1 = 2
inputs = np.random.rand(num_samples, time_length, input_len_dim1)
with self.test_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.Cropping1D,
kwargs={'cropping': (2, 2)},
input_shape=inputs.shape)
# test incorrect use
with self.assertRaises(ValueError):
keras.layers.Cropping1D(cropping=(1, 1, 1))
with self.assertRaises(ValueError):
keras.layers.Cropping1D(cropping=None)
@tf_test_util.run_in_graph_and_eager_modes()
def test_cropping_2d(self):
num_samples = 2
stack_size = 2
input_len_dim1 = 9
input_len_dim2 = 9
cropping = ((2, 2), (3, 3))
for data_format in ['channels_first', 'channels_last']:
if data_format == 'channels_first':
inputs = np.random.rand(num_samples, stack_size, input_len_dim1,
input_len_dim2)
else:
inputs = np.random.rand(num_samples, input_len_dim1, input_len_dim2,
stack_size)
# basic test
with self.test_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.Cropping2D,
kwargs={'cropping': cropping,
'data_format': data_format},
input_shape=inputs.shape)
# correctness test
with self.test_session(use_gpu=True):
layer = keras.layers.Cropping2D(
cropping=cropping, data_format=data_format)
layer.build(inputs.shape)
output = layer(keras.backend.variable(inputs))
if context.executing_eagerly():
np_output = output.numpy()
else:
np_output = keras.backend.eval(output)
# compare with numpy
if data_format == 'channels_first':
expected_out = inputs[:, :, cropping[0][0]:-cropping[0][1], cropping[
1][0]:-cropping[1][1]]
else:
expected_out = inputs[:, cropping[0][0]:-cropping[0][1], cropping[1][
0]:-cropping[1][1], :]
np.testing.assert_allclose(np_output, expected_out)
for data_format in ['channels_first', 'channels_last']:
if data_format == 'channels_first':
inputs = np.random.rand(num_samples, stack_size, input_len_dim1,
input_len_dim2)
else:
inputs = np.random.rand(num_samples, input_len_dim1, input_len_dim2,
stack_size)
# another correctness test (no cropping)
with self.test_session(use_gpu=True):
cropping = ((0, 0), (0, 0))
layer = keras.layers.Cropping2D(
cropping=cropping, data_format=data_format)
layer.build(inputs.shape)
output = layer(keras.backend.variable(inputs))
if context.executing_eagerly():
np_output = output.numpy()
else:
np_output = keras.backend.eval(output)
# compare with input
np.testing.assert_allclose(np_output, inputs)
# test incorrect use
with self.assertRaises(ValueError):
keras.layers.Cropping2D(cropping=(1, 1, 1))
with self.assertRaises(ValueError):
keras.layers.Cropping2D(cropping=None)
@tf_test_util.run_in_graph_and_eager_modes()
def test_cropping_3d(self):
num_samples = 2
stack_size = 2
input_len_dim1 = 8
input_len_dim2 = 8
input_len_dim3 = 8
croppings = [((2, 2), (1, 1), (2, 3)), 3, (0, 1, 1)]
for cropping in croppings:
for data_format in ['channels_last', 'channels_first']:
if data_format == 'channels_first':
inputs = np.random.rand(num_samples, stack_size, input_len_dim1,
input_len_dim2, input_len_dim3)
else:
inputs = np.random.rand(num_samples, input_len_dim1, input_len_dim2,
input_len_dim3, stack_size)
# basic test
with self.test_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.Cropping3D,
kwargs={'cropping': cropping,
'data_format': data_format},
input_shape=inputs.shape)
if len(croppings) == 3 and len(croppings[0]) == 2:
# correctness test
with self.test_session(use_gpu=True):
layer = keras.layers.Cropping3D(
cropping=cropping, data_format=data_format)
layer.build(inputs.shape)
output = layer(keras.backend.variable(inputs))
if context.executing_eagerly():
np_output = output.numpy()
else:
np_output = keras.backend.eval(output)
# compare with numpy
if data_format == 'channels_first':
expected_out = inputs[:, :,
cropping[0][0]:-cropping[0][1],
cropping[1][0]:-cropping[1][1],
cropping[2][0]:-cropping[2][1]]
else:
expected_out = inputs[:,
cropping[0][0]:-cropping[0][1],
cropping[1][0]:-cropping[1][1],
cropping[2][0]:-cropping[2][1], :]
np.testing.assert_allclose(np_output, expected_out)
# test incorrect use
with self.assertRaises(ValueError):
keras.layers.Cropping3D(cropping=(1, 1))
with self.assertRaises(ValueError):
keras.layers.Cropping3D(cropping=None)
class DepthwiseConv2DTest(test.TestCase):
def _run_test(self, kwargs, arg, values):
num_samples = 2
stack_size = 3
num_row = 7
num_col = 6
test_kwargs = copy.copy(kwargs)
for value in values:
test_kwargs[arg] = value
with self.test_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.DepthwiseConv2D,
kwargs=test_kwargs,
input_shape=(num_samples, num_row, num_col, stack_size))
def test_depthwise_conv2d(self):
kwargs = {'kernel_size': (3, 3)}
self._run_test(kwargs, 'padding', ['valid', 'same'])
self._run_test(kwargs, 'strides', [(2, 2)])
if test.is_gpu_available(cuda_only=True):
self._run_test(kwargs, 'data_format', ['channels_first'])
self._run_test(kwargs, 'depth_multiplier', [1, 2])
kwargs = {'kernel_size': 3,
'padding': 'valid',
'data_format': 'channels_first',
'activation': None,
'depthwise_regularizer': 'l2',
'bias_regularizer': 'l2',
'activity_regularizer': 'l2',
'depthwise_constraint': 'unit_norm',
'strides': (2, 2),
}
self._run_test(kwargs, 'depth_multiplier', [1])
if __name__ == '__main__':
test.main()
|
|
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
MultiRun module
This module contains the Step that is aimed to be employed when
either Sampling or Optimization analyses are requested
results of a RAVEN (or not) analysis.
Created on May 6, 2021
@author: alfoa
supercedes Steps.py from alfoa (2/16/2013)
"""
#External Modules------------------------------------------------------------------------------------
import time
import copy
#External Modules End--------------------------------------------------------------------------------
#Internal Modules------------------------------------------------------------------------------------
from .SingleRun import SingleRun
import Models
from utils import utils
from OutStreams import OutStreamEntity
#Internal Modules End--------------------------------------------------------------------------------
class MultiRun(SingleRun):
"""
This class implements one step of the simulation pattern' where several runs are needed
"""
def __init__(self):
"""
Constructor
@ In, None
@ Out, None
"""
super().__init__()
self._samplerInitDict = {} #this is a dictionary that gets sent as key-worded list to the initialization of the sampler
self.counter = 0 #just an handy counter of the runs already performed
self.printTag = 'STEP MULTIRUN'
def _localInputAndCheckParam(self,paramInput):
"""
Place here specialized reading, input consistency check and
initialization of what will not change during the whole life of the object
@ In, paramInput, ParameterInput, node that represents the portion of the input that belongs to this Step class
@ Out, None
"""
SingleRun._localInputAndCheckParam(self,paramInput)
if self.samplerType not in [item[0] for item in self.parList]:
self.raiseAnError(IOError,'It is not possible a multi-run without a sampler or optimizer!')
def _initializeSampler(self,inDictionary):
"""
Method to initialize the sampler
@ In, inDictionary, dict, contains the list of instances (see Simulation)
@ Out, None
"""
if 'SolutionExport' in inDictionary.keys():
self._samplerInitDict['solutionExport']=inDictionary['SolutionExport']
inDictionary[self.samplerType].initialize(**self._samplerInitDict)
self.raiseADebug('for the role of sampler the item of class '+inDictionary[self.samplerType].type+' and name '+inDictionary[self.samplerType].name+' has been initialized')
self.raiseADebug('Sampler initialization dictionary: '+str(self._samplerInitDict))
def _localInitializeStep(self,inDictionary):
"""
This is the API for the local initialization of the children classes of step
The inDictionary contains the instances for each possible role supported in the step (dictionary keywords) the instances of the objects in list if more than one is allowed
The role of _localInitializeStep is to call the initialize method instance if needed
Remember after each initialization to put:
self.raiseADebug('for the role "+key+" the item of class '+inDictionary['key'].type+' and name '+inDictionary['key'].name+' has been initialized')
@ In, inDictionary, dict, the initialization dictionary
@ Out, None
"""
SingleRun._localInitializeStep(self,inDictionary)
# check that no input data objects are also used as outputs?
for out in inDictionary['Output']:
if out.type not in ['PointSet','HistorySet','DataSet']:
continue
for inp in inDictionary['Input']:
if inp.type not in ['PointSet','HistorySet','DataSet']:
continue
if inp == out:
self.raiseAnError(IOError,'The same data object should not be used as both <Input> and <Output> in the same MultiRun step! ' \
+ 'Step: "{}", DataObject: "{}"'.format(self.name,out.name))
self.counter = 0
self._samplerInitDict['externalSeeding'] = self.initSeed
self._initializeSampler(inDictionary)
#generate lambda function list to collect the output without checking the type
self._outputCollectionLambda = []
self._outputDictCollectionLambda = []
# set up output collection lambdas
for outIndex, output in enumerate(inDictionary['Output']):
if not isinstance(output, OutStreamEntity):
if 'SolutionExport' in inDictionary.keys() and output.name == inDictionary['SolutionExport'].name:
self._outputCollectionLambda.append((lambda x:None, outIndex))
self._outputDictCollectionLambda.append((lambda x:None, outIndex))
else:
self._outputCollectionLambda.append( (lambda x: inDictionary['Model'].collectOutput(x[0],x[1]), outIndex) )
self._outputDictCollectionLambda.append( (lambda x: inDictionary['Model'].collectOutputFromDict(x[0],x[1]), outIndex) )
else:
self._outputCollectionLambda.append((lambda x: x[1].addOutput(), outIndex))
self._outputDictCollectionLambda.append((lambda x: x[1].addOutput(), outIndex))
self._registerMetadata(inDictionary)
self.raiseADebug('Generating input batch of size '+str(inDictionary['jobHandler'].runInfoDict['batchSize']))
# set up and run the first batch of samples
# FIXME this duplicates a lot of code from _locatTakeAstepRun, which should be consolidated
# first, check and make sure the model is ready
model = inDictionary['Model']
if isinstance(model,Models.ROM):
if not model.amITrained:
model.raiseAnError(RuntimeError,'ROM model "%s" has not been trained yet, so it cannot be sampled!' %model.name+\
' Use a RomTrainer step to train it.')
for inputIndex in range(inDictionary['jobHandler'].runInfoDict['batchSize']):
if inDictionary[self.samplerType].amIreadyToProvideAnInput():
try:
newInput = self._findANewInputToRun(inDictionary[self.samplerType], inDictionary['Model'], inDictionary['Input'], inDictionary['Output'], inDictionary['jobHandler'])
if newInput is not None:
inDictionary["Model"].submit(newInput, inDictionary[self.samplerType].type, inDictionary['jobHandler'], **copy.deepcopy(inDictionary[self.samplerType].inputInfo))
self.raiseADebug('Submitted input '+str(inputIndex+1))
except utils.NoMoreSamplesNeeded:
self.raiseAMessage('Sampler returned "NoMoreSamplesNeeded". Continuing...')
@profile
def _localTakeAstepRun(self,inDictionary):
"""
This is the API for the local run of a step for the children classes
@ In, inDictionary, dict, contains the list of instances (see Simulation)
@ Out, None
"""
jobHandler = inDictionary['jobHandler']
model = inDictionary['Model' ]
inputs = inDictionary['Input' ]
outputs = inDictionary['Output' ]
sampler = inDictionary[self.samplerType]
# check to make sure model can be run
## first, if it's a ROM, check that it's trained
if isinstance(model,Models.ROM):
if not model.amITrained:
model.raiseAnError(RuntimeError,'ROM model "%s" has not been trained yet, so it cannot be sampled!' %model.name+\
' Use a RomTrainer step to train it.')
#Every reportDeltaTime seconds, write some debug information for this step.
reportDeltaTime = 60.0
nextReportTime = time.time() + reportDeltaTime
# run step loop
while True:
# collect finished jobs
finishedJobs = jobHandler.getFinished()
##FIXME: THE BATCH STRATEGY IS TOO INTRUSIVE. A MORE ELEGANT WAY NEEDS TO BE FOUND (E.G. REALIZATION OBJECT)
for finishedJobObjs in finishedJobs:
# NOTE: HERE WE RETRIEVE THE JOBS. IF BATCHING, THE ELEMENT IN finishedJobs is a LIST
# WE DO THIS in this way because:
# in case of BATCHING, the finalizeActualSampling method MUST BE called ONCE/BATCH
# otherwise, the finalizeActualSampling method MUST BE called ONCE/job
#FIXME: This method needs to be improved since it is very intrusise
if type(finishedJobObjs).__name__ in 'list':
finishedJobList = finishedJobObjs
self.raiseADebug('BATCHING: Collecting JOB batch named "{}".'.format(finishedJobList[0].groupId))
else:
finishedJobList = [finishedJobObjs]
currentFailures = []
for finishedJob in finishedJobList:
finishedJob.trackTime('step_collected')
# update number of collected runs
self.counter +=1
# collect run if it succeeded
if finishedJob.getReturnCode() == 0:
for myLambda, outIndex in self._outputCollectionLambda:
myLambda([finishedJob,outputs[outIndex]])
self.raiseADebug('Just collected job {j:^8} and sent to output "{o}"'
.format(j=finishedJob.identifier,
o=inDictionary['Output'][outIndex].name))
# pool it if it failed, before we loop back to "while True" we'll check for these again
else:
self.raiseADebug('the job "{}" has failed.'.format(finishedJob.identifier))
if self.failureHandling['fail']:
# is this sampler/optimizer able to handle failed runs? If not, add the failed run in the pool
if not sampler.ableToHandelFailedRuns:
#add run to a pool that can be sent to the sampler later
self.failedRuns.append(copy.copy(finishedJob))
else:
if finishedJob.identifier not in self.failureHandling['jobRepetitionPerformed']:
self.failureHandling['jobRepetitionPerformed'][finishedJob.identifier] = 1
if self.failureHandling['jobRepetitionPerformed'][finishedJob.identifier] <= self.failureHandling['repetitions']:
# we re-add the failed job
jobHandler.reAddJob(finishedJob)
self.raiseAWarning('As prescribed in the input, trying to re-submit the job "'+finishedJob.identifier+'". Trial '+
str(self.failureHandling['jobRepetitionPerformed'][finishedJob.identifier]) +'/'+str(self.failureHandling['repetitions']))
self.failureHandling['jobRepetitionPerformed'][finishedJob.identifier] += 1
else:
# is this sampler/optimizer able to handle failed runs? If not, add the failed run in the pool
if not sampler.ableToHandelFailedRuns:
self.failedRuns.append(copy.copy(finishedJob))
self.raiseAWarning('The job "'+finishedJob.identifier+'" has been submitted '+ str(self.failureHandling['repetitions'])+' times, failing all the times!!!')
if sampler.ableToHandelFailedRuns:
self.raiseAWarning('The sampler/optimizer "'+sampler.type+'" is able to handle failed runs!')
#collect the failed job index from the list
currentFailures.append(finishedJobList.index(finishedJob))
if currentFailures:
# In the previous approach, the job was removed directly in the list of jobs on which we were iterating,
# determining a messing-up of the loop. Since now we collect only the indices
# I need to reverse it so I can remove the jobs starting from the last and back.
# For example, if currentFailures=[0,2,4] If we do not sort it (i.e. correntFailures = [4,2,0])
# when we start removing the jobs from the list we would mess up the indices...=> If I remove 0 first,
# then the index 2 should become 1 and index 4 should become 3 (and so on)
currentFailures.sort(reverse=True)
for idx in currentFailures:
finishedJobList.pop(idx)
if type(finishedJobObjs).__name__ in 'list': # TODO: should be consistent, if no batching should batch size be 1 or 0 ?
# if sampler claims it's batching, then only collect once, since it will collect the batch
# together, not one-at-a-time
# FIXME: IN HERE WE SEND IN THE INSTANCE OF THE FIRST JOB OF A BATCH
# FIXME: THIS IS DONE BECAUSE CURRENTLY SAMPLERS/OPTIMIZERS RETRIEVE SOME INFO from the Runner instance but it can be
# FIXME: dangerous if the sampler/optimizer requires info from each job. THIS MUST BE FIXED.
if finishedJobList:
sampler.finalizeActualSampling(finishedJobList[0],model,inputs)
else:
# sampler isn't intending to batch, so we send them in one-at-a-time as per normal
for finishedJob in finishedJobList:
# finalize actual sampler
sampler.finalizeActualSampling(finishedJob,model,inputs)
for finishedJob in finishedJobList:
finishedJob.trackTime('step_finished')
# terminate jobs as requested by the sampler, in case they're not needed anymore
## TODO is this a safe place to put this?
## If it's placed after adding new jobs and IDs are re-used i.e. for failed tests,
## -> then the new jobs will be killed if this is placed after new job submission!
jobHandler.terminateJobs(sampler.getJobsToEnd(clear=True))
# add new jobs, for DET-type samplers
# put back this loop (do not take it away again. it is NEEDED for NOT-POINT samplers(aka DET)). Andrea
# NOTE for non-DET samplers, this check also happens outside this collection loop
if sampler.onlySampleAfterCollecting:
self._addNewRuns(sampler, model, inputs, outputs, jobHandler, inDictionary)
# END for each collected finished run ...
## If all of the jobs given to the job handler have finished, and the sampler
## has nothing else to provide, then we are done with this step.
if jobHandler.isFinished() and not sampler.amIreadyToProvideAnInput():
self.raiseADebug('Sampling finished with %d runs submitted, %d jobs running, and %d completed jobs waiting to be processed.' % (jobHandler.numSubmitted(),jobHandler.numRunning(),len(jobHandler.getFinishedNoPop())) )
break
currentTime = time.time()
if currentTime > nextReportTime:
nextReportTime = currentTime + reportDeltaTime
self.raiseADebug("Continuing to run. isFinished: %r running: %d unclaimed runs: %d" % (jobHandler.isFinished(), jobHandler.numRunning(), len(jobHandler.getFinishedNoPop())))
# Note: calling amIreadyToProvideAnInput can change results,
# but might be helpful for debugging sometimes
# "sampler ready with input: %r" sampler.amIreadyToProvideAnInput()
if not sampler.onlySampleAfterCollecting:
# NOTE for some reason submission outside collection breaks the DET
# however, it is necessary i.e. batch sampling
self._addNewRuns(sampler, model, inputs, outputs, jobHandler, inDictionary, verbose=False)
time.sleep(self.sleepTime)
# END while loop that runs the step iterations (collection and submission-for-DET)
# if any collected runs failed, let the sampler treat them appropriately, and any other closing-out actions
sampler.finalizeSampler(self.failedRuns)
def _addNewRuns(self, sampler, model, inputs, outputs, jobHandler, inDictionary, verbose=True):
"""
Checks for open spaces and adds new runs to jobHandler queue (via model.submit currently)
@ In, sampler, Sampler, the sampler in charge of generating the sample
@ In, model, Model, the model in charge of evaluating the sample
@ In, inputs, object, the raven object used as the input in this step
(i.e., a DataObject, File, or Database, I guess? Maybe these should all
inherit from some base "Data" so that we can ensure a consistent
interface for these?)
@ In, outputs, object, the raven object used as the output in this step
(i.e., a DataObject, File, or Database, I guess? Maybe these should all
inherit from some base "Data" so that we can ensure a consistent
interface for these?)
@ In, jobHandler, object, the raven object used to handle jobs
@ In, inDictionary, dict, additional step objects map
@ In, verbose, bool, optional, if True print DEBUG statements
@ Out, None
"""
isEnsemble = isinstance(model, Models.EnsembleModel)
## In order to ensure that the queue does not grow too large, we will
## employ a threshold on the number of jobs the jobHandler can take,
## in addition, we cannot provide more jobs than the sampler can provide.
## So, we take the minimum of these two values.
if verbose:
self.raiseADebug('Testing if the sampler is ready to generate a new input')
for _ in range(min(jobHandler.availability(isEnsemble), sampler.endJobRunnable())):
if sampler.amIreadyToProvideAnInput():
try:
newInput = self._findANewInputToRun(sampler, model, inputs, outputs, jobHandler)
if newInput is not None:
model.submit(newInput, inDictionary[self.samplerType].type, jobHandler, **copy.deepcopy(sampler.inputInfo))
except utils.NoMoreSamplesNeeded:
self.raiseAMessage(' ... Sampler returned "NoMoreSamplesNeeded". Continuing...')
break
else:
if verbose:
self.raiseADebug(' ... sampler has no new inputs currently.')
break
else:
if verbose:
self.raiseADebug(' ... no available JobHandler spots currently (or the Sampler is done.)')
def _findANewInputToRun(self, sampler, model, inputs, outputs, jobHandler):
"""
Repeatedly calls Sampler until a new run is found or "NoMoreSamplesNeeded" is raised.
@ In, sampler, Sampler, the sampler in charge of generating the sample
@ In, model, Model, the model in charge of evaluating the sample
@ In, inputs, object, the raven object used as the input in this step
(i.e., a DataObject, File, or Database, I guess? Maybe these should all
inherit from some base "Data" so that we can ensure a consistent
interface for these?)
@ In, outputs, object, the raven object used as the output in this step
(i.e., a DataObject, File, or Database, I guess? Maybe these should all
inherit from some base "Data" so that we can ensure a consistent
interface for these?)
@ In, jobHandler, object, the raven object used to handle jobs
@ Out, newInp, list, list containing the new inputs (or None if a restart)
"""
#The value of "found" determines what the Sampler is ready to provide.
# case 0: a new sample has been discovered and can be run, and newInp is a new input list.
# case 1: found the input in restart, and newInp is a realization dictionary of data to use
found, newInp = sampler.generateInput(model,inputs)
if found == 1:
kwargs = copy.deepcopy(sampler.inputInfo)
# "submit" the finished run
jobHandler.addFinishedJob(newInp, metadata=kwargs)
return None
# NOTE: we return None here only because the Sampler's "counter" is not correctly passed
# through if we add several samples at once through the restart. If we actually returned
# a Realization object from the Sampler, this would not be a problem. - talbpaul
return newInp
|
|
"""
To run this test, type this in command line <kolibri manage test -- kolibri.core.content>
"""
import datetime
import unittest
import uuid
import mock
import requests
from django.conf import settings
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.utils import timezone
from le_utils.constants import content_kinds
from rest_framework import status
from rest_framework.test import APITestCase
from kolibri.core.auth.models import Facility
from kolibri.core.auth.models import FacilityUser
from kolibri.core.auth.test.helpers import provision_device
from kolibri.core.content import models as content
from kolibri.core.content.test.test_channel_upgrade import ChannelBuilder
from kolibri.core.device.models import DevicePermissions
from kolibri.core.device.models import DeviceSettings
from kolibri.core.logger.models import ContentSessionLog
from kolibri.core.logger.models import ContentSummaryLog
DUMMY_PASSWORD = "password"
class ContentNodeTestBase(object):
"""
Basecase for content metadata methods
"""
def test_get_prerequisites_for(self):
"""
test the directional characteristic of prerequisite relationship
"""
c1 = content.ContentNode.objects.get(title="c1")
root = content.ContentNode.objects.get(title="root")
# if root is the prerequisite of c1
expected_output = content.ContentNode.objects.filter(title__in=["root"])
actual_output = content.ContentNode.objects.filter(prerequisite_for=c1)
self.assertEqual(set(expected_output), set(actual_output))
# then c1 should not be the prerequisite of root
unexpected_output = content.ContentNode.objects.filter(title__in=["c1"])
actual_output = content.ContentNode.objects.filter(prerequisite_for=root)
self.assertNotEqual(set(actual_output), set(unexpected_output))
def test_get_has_prerequisites(self):
"""
test the directional characteristic of prerequisite relationship
"""
c1 = content.ContentNode.objects.get(title="c1")
root = content.ContentNode.objects.get(title="root")
# if root is the prerequisite of c1
expected_output = content.ContentNode.objects.filter(title__in=["c1"])
actual_output = content.ContentNode.objects.filter(has_prerequisite=root)
self.assertEqual(set(expected_output), set(actual_output))
# then c1 should not be the prerequisite of root
unexpected_output = content.ContentNode.objects.filter(title__in=["root"])
actual_output = content.ContentNode.objects.filter(has_prerequisite=c1)
self.assertNotEqual(set(actual_output), set(unexpected_output))
def test_get_all_related(self):
"""
test the nondirectional characteristic of related relationship
"""
c1 = content.ContentNode.objects.get(title="c1")
c2 = content.ContentNode.objects.get(title="c2")
# if c1 is related to c2
expected_output = content.ContentNode.objects.filter(title__in=["c2"])
actual_output = content.ContentNode.objects.filter(related=c1)
self.assertEqual(set(expected_output), set(actual_output))
# then c2 should be related to c1
expected_output = content.ContentNode.objects.filter(title__in=["c1"])
actual_output = content.ContentNode.objects.filter(related=c2)
self.assertEqual(set(expected_output), set(actual_output))
def test_descendants_of_kind(self):
p = content.ContentNode.objects.get(title="root")
expected_output = content.ContentNode.objects.filter(title__in=["c1"])
actual_output = p.get_descendants(include_self=False).filter(
kind=content_kinds.VIDEO
)
self.assertEqual(set(expected_output), set(actual_output))
def test_get_top_level_topics(self):
p = content.ContentNode.objects.get(title="root")
expected_output = content.ContentNode.objects.filter(
parent=p, kind=content_kinds.TOPIC
)
actual_output = (
content.ContentNode.objects.get(title="root")
.get_children()
.filter(kind=content_kinds.TOPIC)
)
self.assertEqual(set(expected_output), set(actual_output))
def test_tag_str(self):
# test for ContentTag __str__
p = content.ContentTag.objects.get(tag_name="tag_2")
self.assertEqual(str(p), "tag_2")
def test_lang_str(self):
# test for Language __str__
p = content.Language.objects.get(lang_code="en")
self.assertEqual(str(p), "English-Test")
def test_channelmetadata_str(self):
# test for ChannelMetadata __str__
p = content.ChannelMetadata.objects.get(name="testing")
self.assertEqual(str(p), "testing")
def test_tags(self):
root_tag_count = content.ContentNode.objects.get(title="root").tags.count()
self.assertEqual(root_tag_count, 3)
c1_tag_count = content.ContentNode.objects.get(title="c1").tags.count()
self.assertEqual(c1_tag_count, 1)
c2_tag_count = content.ContentNode.objects.get(title="c2").tags.count()
self.assertEqual(c2_tag_count, 1)
c2c1_tag_count = content.ContentNode.objects.get(title="c2c1").tags.count()
self.assertEqual(c2c1_tag_count, 0)
def test_local_files(self):
self.assertTrue(
content.LocalFile.objects.filter(
id="9f9438fe6b0d42dd8e913d7d04cfb2b2"
).exists()
)
self.assertTrue(
content.LocalFile.objects.filter(
id="725257a0570044acbd59f8cf6a68b2be"
).exists()
)
self.assertTrue(
content.LocalFile.objects.filter(
id="e00699f859624e0f875ac6fe1e13d648"
).exists()
)
self.assertTrue(
content.LocalFile.objects.filter(
id="4c30dc7619f74f97ae2ccd4fffd09bf2"
).exists()
)
self.assertTrue(
content.LocalFile.objects.filter(
id="8ad3fffedf144cba9492e16daec1e39a"
).exists()
)
def test_delete_tree(self):
channel = content.ChannelMetadata.objects.first()
channel_id = channel.id
channel.delete_content_tree_and_files()
self.assertFalse(
content.ContentNode.objects.filter(channel_id=channel_id).exists()
)
self.assertFalse(content.File.objects.all().exists())
class ContentNodeQuerysetTestCase(TestCase):
fixtures = ["content_test.json"]
the_channel_id = "6199dde695db4ee4ab392222d5af1e5c"
@classmethod
def setUpTestData(cls):
provision_device()
cls.facility = Facility.objects.create(name="facility")
cls.admin = FacilityUser.objects.create(username="admin", facility=cls.facility)
cls.admin.set_password(DUMMY_PASSWORD)
cls.admin.save()
cls.facility.add_admin(cls.admin)
def test_filter_uuid(self):
content_ids = content.ContentNode.objects.values_list("id", flat=True)
self.assertEqual(
content.ContentNode.objects.filter_by_uuids(content_ids).count(),
len(content_ids),
)
def test_filter_uuid_bad_uuid(self):
content_ids = list(content.ContentNode.objects.values_list("id", flat=True))
content_ids[0] = '7d1bOR"1"="1"d08e29c36115f1af3da99'
self.assertEqual(
content.ContentNode.objects.filter_by_uuids(content_ids).count(), 0
)
kind_activity_map = {
content_kinds.EXERCISE: "practice",
content_kinds.VIDEO: "watch",
content_kinds.AUDIO: "listen",
content_kinds.DOCUMENT: "read",
content_kinds.HTML5: "explore",
}
def infer_learning_activity(kind):
activity = kind_activity_map.get(kind)
if activity:
return [activity]
return []
class ContentNodeAPITestCase(APITestCase):
"""
Testcase for content API methods
"""
fixtures = ["content_test.json"]
the_channel_id = "6199dde695db4ee4ab392222d5af1e5c"
@classmethod
def setUpTestData(cls):
provision_device()
cls.facility = Facility.objects.create(name="facility")
cls.admin = FacilityUser.objects.create(username="admin", facility=cls.facility)
cls.admin.set_password(DUMMY_PASSWORD)
cls.admin.save()
cls.facility.add_admin(cls.admin)
def test_prerequisite_for_filter(self):
c1_id = content.ContentNode.objects.get(title="c1").id
response = self.client.get(
reverse("kolibri:core:contentnode-list"), data={"prerequisite_for": c1_id}
)
self.assertEqual(response.data[0]["title"], "root")
def test_has_prerequisite_filter(self):
root_id = content.ContentNode.objects.get(title="root").id
response = self.client.get(
reverse("kolibri:core:contentnode-list"), data={"has_prerequisite": root_id}
)
self.assertEqual(response.data[0]["title"], "c1")
def test_related_filter(self):
c1_id = content.ContentNode.objects.get(title="c1").id
response = self.client.get(
reverse("kolibri:core:contentnode-list"), data={"related": c1_id}
)
self.assertEqual(response.data[0]["title"], "c2")
def map_language(self, lang):
if lang:
return {
f: getattr(lang, f)
for f in [
"id",
"lang_code",
"lang_subcode",
"lang_name",
"lang_direction",
]
}
def _assert_node(self, actual, expected):
assessmentmetadata = (
expected.assessmentmetadata.all()
.values(
"assessment_item_ids",
"number_of_assessments",
"mastery_model",
"randomize",
"is_manipulable",
"contentnode",
)
.first()
)
thumbnail = None
files = []
for f in expected.files.all():
"local_file__id",
"local_file__available",
"local_file__file_size",
"local_file__extension",
"lang_id",
file = {}
for field in [
"id",
"priority",
"preset",
"supplementary",
"thumbnail",
]:
file[field] = getattr(f, field)
file["checksum"] = f.local_file_id
for field in [
"available",
"file_size",
"extension",
]:
file[field] = getattr(f.local_file, field)
file["lang"] = self.map_language(f.lang)
file["storage_url"] = f.get_storage_url()
files.append(file)
if f.thumbnail:
thumbnail = f.get_storage_url()
self.assertEqual(
actual,
{
"id": expected.id,
"available": expected.available,
"author": expected.author,
"channel_id": expected.channel_id,
"coach_content": expected.coach_content,
"content_id": expected.content_id,
"description": expected.description,
"duration": expected.duration,
"learning_activities": expected.learning_activities.split(",")
if expected.learning_activities
else [],
"grade_levels": expected.grade_levels.split(",")
if expected.grade_levels
else [],
"resource_types": expected.resource_types.split(",")
if expected.resource_types
else [],
"accessibility_labels": expected.accessibility_labels.split(",")
if expected.accessibility_labels
else [],
"categories": expected.categories.split(",")
if expected.categories
else [],
"kind": expected.kind,
"lang": self.map_language(expected.lang),
"license_description": expected.license_description,
"license_name": expected.license_name,
"license_owner": expected.license_owner,
"num_coach_contents": expected.num_coach_contents,
"options": expected.options,
"parent": expected.parent_id,
"sort_order": expected.sort_order,
"title": expected.title,
"lft": expected.lft,
"rght": expected.rght,
"tree_id": expected.tree_id,
"ancestors": [],
"tags": list(
expected.tags.all()
.order_by("tag_name")
.values_list("tag_name", flat=True)
),
"thumbnail": thumbnail,
"assessmentmetadata": assessmentmetadata,
"is_leaf": expected.kind != "topic",
"files": files,
},
)
def _assert_nodes(self, data, nodes):
for actual, expected in zip(
sorted(data, key=lambda x: x["id"]), sorted(nodes, key=lambda x: x.id)
):
self._assert_node(actual, expected)
def test_contentnode_list(self):
root = content.ContentNode.objects.get(title="root")
nodes = root.get_descendants(include_self=True).filter(available=True)
expected_output = len(nodes)
response = self.client.get(reverse("kolibri:core:contentnode-list"))
self.assertEqual(len(response.data), expected_output)
self._assert_nodes(response.data, nodes)
@unittest.skipIf(
getattr(settings, "DATABASES")["default"]["ENGINE"]
== "django.db.backends.postgresql",
"Skipping postgres as not as vulnerable to large queries and large insertions are less performant",
)
def test_contentnode_list_long(self):
# This will make > 1000 nodes which should test our ancestor batching behaviour
builder = ChannelBuilder(num_children=10)
builder.insert_into_default_db()
content.ContentNode.objects.update(available=True)
nodes = content.ContentNode.objects.filter(available=True)
expected_output = len(nodes)
self.assertGreater(expected_output, 1000)
response = self.client.get(reverse("kolibri:core:contentnode-list"))
self.assertEqual(len(response.data), expected_output)
self._assert_nodes(response.data, nodes)
def _recurse_and_assert(self, data, nodes, recursion_depth=0):
for actual, expected in zip(data, nodes):
children = actual.pop("children", None)
self._assert_node(actual, expected)
if children:
child_nodes = content.ContentNode.objects.filter(
available=True, parent=expected
)
if children["more"] is None:
self.assertEqual(len(child_nodes), len(children["results"]))
else:
self.assertGreater(len(child_nodes), len(children["results"]))
self.assertEqual(children["more"]["id"], expected.id)
self.assertEqual(
children["more"]["params"]["next__gt"], child_nodes[11].rght
)
self.assertEqual(
children["more"]["params"]["depth"], 2 - recursion_depth
)
self._recurse_and_assert(
children["results"],
child_nodes,
recursion_depth=recursion_depth + 1,
)
def test_contentnode_tree(self):
root = content.ContentNode.objects.get(title="root")
response = self.client.get(
reverse("kolibri:core:contentnode_tree-detail", kwargs={"pk": root.id})
)
self._recurse_and_assert([response.data], [root])
@unittest.skipIf(
getattr(settings, "DATABASES")["default"]["ENGINE"]
== "django.db.backends.postgresql",
"Skipping postgres as not as vulnerable to large queries and large insertions are less performant",
)
def test_contentnode_tree_long(self):
builder = ChannelBuilder(levels=2, num_children=30)
builder.insert_into_default_db()
content.ContentNode.objects.all().update(available=True)
root = content.ContentNode.objects.get(id=builder.root_node["id"])
response = self.client.get(
reverse("kolibri:core:contentnode_tree-detail", kwargs={"pk": root.id})
)
self._recurse_and_assert([response.data], [root])
def test_contentnode_tree_depth_1(self):
root = content.ContentNode.objects.get(title="root")
response = self.client.get(
reverse("kolibri:core:contentnode_tree-detail", kwargs={"pk": root.id}),
data={"depth": 1},
)
self._recurse_and_assert([response.data], [root])
@unittest.skipIf(
getattr(settings, "DATABASES")["default"]["ENGINE"]
== "django.db.backends.postgresql",
"Skipping postgres as not as vulnerable to large queries and large insertions are less performant",
)
def test_contentnode_tree_next__gt(self):
builder = ChannelBuilder(levels=2, num_children=17)
builder.insert_into_default_db()
content.ContentNode.objects.all().update(available=True)
root = content.ContentNode.objects.get(id=builder.root_node["id"])
next__gt = content.ContentNode.objects.filter(parent=root)[11].rght
response = self.client.get(
reverse("kolibri:core:contentnode_tree-detail", kwargs={"pk": root.id}),
data={"next__gt": next__gt},
)
self.assertEqual(len(response.data["children"]["results"]), 5)
self.assertIsNone(response.data["children"]["more"])
first_node = content.ContentNode.objects.filter(parent=root)[12]
self._recurse_and_assert(
[response.data["children"]["results"][0]], [first_node], recursion_depth=1
)
@unittest.skipIf(
getattr(settings, "DATABASES")["default"]["ENGINE"]
== "django.db.backends.postgresql",
"Skipping postgres as not as vulnerable to large queries and large insertions are less performant",
)
def test_contentnode_tree_more(self):
builder = ChannelBuilder(levels=2, num_children=17)
builder.insert_into_default_db()
content.ContentNode.objects.all().update(available=True)
root = content.ContentNode.objects.get(id=builder.root_node["id"])
response = self.client.get(
reverse("kolibri:core:contentnode_tree-detail", kwargs={"pk": root.id})
)
first_child = response.data["children"]["results"][0]
self.assertEqual(first_child["children"]["more"]["params"]["depth"], 1)
nested_page_response = self.client.get(
reverse(
"kolibri:core:contentnode_tree-detail",
kwargs={"pk": first_child["children"]["more"]["id"]},
),
data=first_child["children"]["more"]["params"],
)
self.assertEqual(len(nested_page_response.data["children"]["results"]), 5)
self.assertIsNone(nested_page_response.data["children"]["more"])
@mock.patch("kolibri.core.content.api.get_channel_stats_from_studio")
def test_contentnode_granular_network_import(self, stats_mock):
c1 = content.ContentNode.objects.get(title="root")
c1_id = c1.id
c2_id = content.ContentNode.objects.get(title="c1").id
c3_id = content.ContentNode.objects.get(title="c2").id
content.ContentNode.objects.all().update(available=False)
stats = {
c1_id: {
"total_resources": 2,
"coach_content": False,
"num_coach_contents": 0,
},
c2_id: {
"total_resources": 1,
"coach_content": False,
"num_coach_contents": 0,
},
c3_id: {
"total_resources": 1,
"coach_content": False,
"num_coach_contents": 0,
},
}
stats_mock.return_value = stats
response = self.client.get(
reverse("kolibri:core:contentnode_granular-detail", kwargs={"pk": c1_id})
)
self.assertEqual(
response.data,
{
"id": c1_id,
"title": "root",
"kind": "topic",
"is_leaf": False,
"available": False,
"total_resources": 2,
"on_device_resources": 0,
"coach_content": False,
"importable": True,
"num_coach_contents": 0,
"new_resource": False,
"num_new_resources": 0,
"updated_resource": False,
"ancestors": list(c1.get_ancestors().values("id", "title")),
"children": [
{
"id": c2_id,
"title": "c1",
"kind": "video",
"is_leaf": True,
"available": False,
"total_resources": 1,
"on_device_resources": 0,
"importable": True,
"coach_content": False,
"num_coach_contents": 0,
"new_resource": False,
"num_new_resources": 0,
"updated_resource": False,
},
{
"id": c3_id,
"title": "c2",
"kind": "topic",
"is_leaf": False,
"available": False,
"total_resources": 1,
"on_device_resources": 0,
"importable": True,
"coach_content": False,
"num_coach_contents": 0,
"new_resource": False,
"num_new_resources": 0,
"updated_resource": False,
},
],
},
)
@mock.patch("kolibri.core.content.api.get_channel_stats_from_disk")
def test_contentnode_granular_local_import(self, stats_mock):
content.LocalFile.objects.update(available=False)
content.ContentNode.objects.update(available=False)
c1 = content.ContentNode.objects.get(title="root")
c1_id = c1.id
c2_id = content.ContentNode.objects.get(title="c1").id
c3_id = content.ContentNode.objects.get(title="c2").id
stats = {
c1_id: {
"total_resources": 1,
"coach_content": False,
"num_coach_contents": 0,
},
c3_id: {
"total_resources": 1,
"coach_content": False,
"num_coach_contents": 0,
},
}
stats_mock.return_value = stats
response = self.client.get(
reverse("kolibri:core:contentnode_granular-detail", kwargs={"pk": c1_id}),
{"importing_from_drive_id": "123"},
)
self.assertEqual(
response.data,
{
"id": c1_id,
"title": "root",
"kind": "topic",
"is_leaf": False,
"available": False,
"total_resources": 1,
"on_device_resources": 0,
"importable": True,
"coach_content": False,
"num_coach_contents": 0,
"new_resource": False,
"num_new_resources": 0,
"updated_resource": False,
"ancestors": list(c1.get_ancestors().values("id", "title")),
"children": [
{
"id": c2_id,
"title": "c1",
"kind": "video",
"is_leaf": True,
"available": False,
"total_resources": 0,
"on_device_resources": 0,
"importable": False,
"coach_content": False,
"num_coach_contents": 0,
"new_resource": False,
"num_new_resources": 0,
"updated_resource": False,
},
{
"id": c3_id,
"title": "c2",
"kind": "topic",
"is_leaf": False,
"available": False,
"total_resources": 1,
"on_device_resources": 0,
"importable": True,
"coach_content": False,
"num_coach_contents": 0,
"new_resource": False,
"num_new_resources": 0,
"updated_resource": False,
},
],
},
)
@mock.patch("kolibri.core.content.api.get_channel_stats_from_peer")
def test_contentnode_granular_remote_import(self, stats_mock):
content.LocalFile.objects.update(available=False)
content.ContentNode.objects.update(available=False)
c1 = content.ContentNode.objects.get(title="root")
c1_id = c1.id
c2_id = content.ContentNode.objects.get(title="c1").id
c3_id = content.ContentNode.objects.get(title="c2").id
stats = {
c1_id: {
"total_resources": 1,
"coach_content": False,
"num_coach_contents": 0,
},
c3_id: {
"total_resources": 1,
"coach_content": False,
"num_coach_contents": 0,
},
}
stats_mock.return_value = stats
response = self.client.get(
reverse("kolibri:core:contentnode_granular-detail", kwargs={"pk": c1_id}),
{"importing_from_peer_id": "test"},
)
self.assertEqual(
response.data,
{
"id": c1_id,
"title": "root",
"kind": "topic",
"is_leaf": False,
"available": False,
"total_resources": 1,
"on_device_resources": 0,
"importable": True,
"coach_content": False,
"num_coach_contents": 0,
"new_resource": False,
"num_new_resources": 0,
"updated_resource": False,
"ancestors": list(c1.get_ancestors().values("id", "title")),
"children": [
{
"id": c2_id,
"title": "c1",
"kind": "video",
"is_leaf": True,
"available": False,
"total_resources": 0,
"on_device_resources": 0,
"importable": False,
"coach_content": False,
"num_coach_contents": 0,
"new_resource": False,
"num_new_resources": 0,
"updated_resource": False,
},
{
"id": c3_id,
"title": "c2",
"kind": "topic",
"is_leaf": False,
"available": False,
"total_resources": 1,
"on_device_resources": 0,
"importable": True,
"coach_content": False,
"num_coach_contents": 0,
"new_resource": False,
"num_new_resources": 0,
"updated_resource": False,
},
],
},
)
def test_contentnode_granular_export_available(self):
c1 = content.ContentNode.objects.get(title="c1")
c1_id = c1.id
content.ContentNode.objects.filter(title="c1").update(on_device_resources=1)
response = self.client.get(
reverse("kolibri:core:contentnode_granular-detail", kwargs={"pk": c1_id}),
data={"for_export": True},
)
self.assertEqual(
response.data,
{
"id": c1_id,
"title": "c1",
"kind": "video",
"is_leaf": True,
"available": True,
"total_resources": 1,
"on_device_resources": 1,
"importable": None,
"children": [],
"coach_content": False,
"num_coach_contents": 0,
"new_resource": None,
"num_new_resources": None,
"updated_resource": None,
"ancestors": list(c1.get_ancestors().values("id", "title")),
},
)
def test_contentnode_granular_export_unavailable(self):
c1 = content.ContentNode.objects.get(title="c1")
c1_id = c1.id
content.ContentNode.objects.filter(title="c1").update(available=False)
response = self.client.get(
reverse("kolibri:core:contentnode_granular-detail", kwargs={"pk": c1_id}),
data={"for_export": True},
)
self.assertEqual(
response.data,
{
"id": c1_id,
"title": "c1",
"kind": "video",
"is_leaf": True,
"available": False,
"total_resources": 0,
"on_device_resources": 0,
"importable": None,
"children": [],
"coach_content": False,
"num_coach_contents": 0,
"new_resource": None,
"num_new_resources": None,
"updated_resource": None,
"ancestors": list(c1.get_ancestors().values("id", "title")),
},
)
def test_contentnode_retrieve(self):
c1_id = content.ContentNode.objects.get(title="c1").id
response = self.client.get(
reverse("kolibri:core:contentnode-detail", kwargs={"pk": c1_id})
)
self.assertEqual(response.data["id"], c1_id.__str__())
def test_contentnode_descendants_assessments_exercise_node(self):
c1 = content.ContentNode.objects.filter(kind=content_kinds.EXERCISE).first()
c1_id = c1.id
response = self.client.get(
reverse("kolibri:core:contentnode-descendants-assessments"),
data={"ids": c1_id},
)
self.assertEqual(
next(
item["num_assessments"] for item in response.data if item["id"] == c1_id
),
c1.assessmentmetadata.first().number_of_assessments,
)
def test_contentnode_descendants_assessments_exercise_parent(self):
c1 = content.ContentNode.objects.filter(kind=content_kinds.EXERCISE).first()
parent = c1.parent
parent_id = parent.id
response = self.client.get(
reverse("kolibri:core:contentnode-descendants-assessments"),
data={"ids": parent_id},
)
self.assertEqual(
next(
item["num_assessments"]
for item in response.data
if item["id"] == parent_id
),
c1.assessmentmetadata.first().number_of_assessments,
)
def test_contentnode_descendants_assessments_exercise_root(self):
c1 = content.ContentNode.objects.filter(kind=content_kinds.EXERCISE).first()
root = content.ContentNode.objects.get(parent__isnull=True)
root_id = root.id
response = self.client.get(
reverse("kolibri:core:contentnode-descendants-assessments"),
data={"ids": root_id},
)
self.assertEqual(
next(
item["num_assessments"]
for item in response.data
if item["id"] == root_id
),
c1.assessmentmetadata.first().number_of_assessments,
)
def test_contentnode_descendants_assessments_exercise_parent_sum_siblings(self):
c1 = content.ContentNode.objects.filter(kind=content_kinds.EXERCISE).first()
parent = c1.parent
parent_id = parent.id
sibling = content.ContentNode.objects.create(
pk="6a406ac66b224106aa2e93f73a94333d",
channel_id=c1.channel_id,
content_id="ded4a083e75f4689b386fd2b706e792a",
kind=content_kinds.EXERCISE,
parent=parent,
title="sibling exercise",
available=True,
)
sibling_assessment_metadata = content.AssessmentMetaData.objects.create(
id="6a406ac66b224106aa2e93f73a94333d",
contentnode=sibling,
number_of_assessments=5,
)
response = self.client.get(
reverse("kolibri:core:contentnode-descendants-assessments"),
data={"ids": parent_id},
)
self.assertEqual(
next(
item["num_assessments"]
for item in response.data
if item["id"] == parent_id
),
c1.assessmentmetadata.first().number_of_assessments
+ sibling_assessment_metadata.number_of_assessments,
)
def test_contentnode_descendants_assessments_exercise_parent_sum_siblings_one_unavailable(
self,
):
c1 = content.ContentNode.objects.filter(kind=content_kinds.EXERCISE).first()
c1.available = False
c1.save()
parent = c1.parent
parent_id = parent.id
sibling = content.ContentNode.objects.create(
pk="6a406ac66b224106aa2e93f73a94333d",
channel_id=c1.channel_id,
content_id="ded4a083e75f4689b386fd2b706e792a",
kind=content_kinds.EXERCISE,
parent=parent,
title="sibling exercise",
available=True,
)
sibling_assessment_metadata = content.AssessmentMetaData.objects.create(
id="6a406ac66b224106aa2e93f73a94333d",
contentnode=sibling,
number_of_assessments=5,
)
response = self.client.get(
reverse("kolibri:core:contentnode-descendants-assessments"),
data={"ids": parent_id},
)
self.assertEqual(
next(
item["num_assessments"]
for item in response.data
if item["id"] == parent_id
),
sibling_assessment_metadata.number_of_assessments,
)
def test_contentnode_descendants_topic_siblings_ancestor_ids(self):
root = content.ContentNode.objects.get(parent__isnull=True)
topics = content.ContentNode.objects.filter(
parent=root, kind=content_kinds.TOPIC
)
topic_ids = topics.values_list("id", flat=True)
response = self.client.get(
reverse("kolibri:core:contentnode-descendants"),
data={"ids": ",".join(topic_ids)},
)
for datum in response.data:
topic = topics.get(id=datum["ancestor_id"])
self.assertTrue(topic.get_descendants().filter(id=datum["id"]).exists())
def test_contentnode_descendants_topic_siblings_kind_filter(self):
root = content.ContentNode.objects.get(parent__isnull=True)
topics = content.ContentNode.objects.filter(
parent=root, kind=content_kinds.TOPIC
)
topic_ids = topics.values_list("id", flat=True)
response = self.client.get(
reverse("kolibri:core:contentnode-descendants"),
data={
"ids": ",".join(topic_ids),
"descendant_kind": content_kinds.EXERCISE,
},
)
for datum in response.data:
topic = topics.get(id=datum["ancestor_id"])
self.assertTrue(
topic.get_descendants()
.filter(id=datum["id"], kind=content_kinds.EXERCISE)
.exists()
)
def test_contentnode_descendants_topic_parent_child_ancestor_ids(self):
root = content.ContentNode.objects.get(parent__isnull=True)
topic = content.ContentNode.objects.filter(
parent=root, kind=content_kinds.TOPIC, children__isnull=False
).first()
response = self.client.get(
reverse("kolibri:core:contentnode-descendants"),
data={"ids": ",".join((root.id, topic.id))},
)
topic_items = [
datum for datum in response.data if datum["ancestor_id"] == topic.id
]
for node in topic.get_descendants(include_self=False).filter(available=True):
self.assertTrue(next(item for item in topic_items if item["id"] == node.id))
root_items = [
datum for datum in response.data if datum["ancestor_id"] == root.id
]
for node in root.get_descendants(include_self=False).filter(available=True):
self.assertTrue(next(item for item in root_items if item["id"] == node.id))
def test_contentnode_descendants_availability(self):
content.ContentNode.objects.all().update(available=False)
root = content.ContentNode.objects.get(parent__isnull=True)
topics = content.ContentNode.objects.filter(
parent=root, kind=content_kinds.TOPIC
)
topic_ids = topics.values_list("id", flat=True)
response = self.client.get(
reverse("kolibri:core:contentnode-descendants"),
data={"ids": ",".join(topic_ids)},
)
self.assertEqual(len(response.data), 0)
def test_contentnode_node_assessments_available(self):
content.ContentNode.objects.all().update(available=True)
root = content.ContentNode.objects.get(parent__isnull=True)
exercise_ids = (
root.get_descendants()
.filter(kind=content_kinds.EXERCISE)
.values_list("id", flat=True)
)
response = self.client.get(
reverse("kolibri:core:contentnode-node-assessments"),
data={"ids": ",".join(exercise_ids)},
)
self.assertEqual(response.data, 1)
def test_contentnode_node_assessments_not_available(self):
content.ContentNode.objects.all().update(available=False)
root = content.ContentNode.objects.get(parent__isnull=True)
exercise_ids = (
root.get_descendants()
.filter(kind=content_kinds.EXERCISE)
.values_list("id", flat=True)
)
response = self.client.get(
reverse("kolibri:core:contentnode-node-assessments"),
data={"ids": ",".join(exercise_ids)},
)
self.assertEqual(response.data, 0)
def test_contentnode_recommendations(self):
node_id = content.ContentNode.objects.get(title="c2c2").id
response = self.client.get(
reverse(
"kolibri:core:contentnode-recommendations-for", kwargs={"pk": node_id}
)
)
self.assertEqual(len(response.data), 2)
def test_contentnode_recommendations_does_error_for_unavailable_node(self):
node = content.ContentNode.objects.get(title="c2c2")
node.available = False
node.save()
node_id = node.id
response = self.client.get(
reverse(
"kolibri:core:contentnode-recommendations-for", kwargs={"pk": node_id}
)
)
self.assertEqual(response.status_code, 404)
def test_contentnode_ids(self):
titles = ["c2c2", "c2c3"]
nodes = [content.ContentNode.objects.get(title=title) for title in titles]
response = self.client.get(
reverse("kolibri:core:contentnode-list"),
data={"ids": ",".join([n.id for n in nodes])},
)
self.assertEqual(len(response.data), 2)
for i in range(len(titles)):
self.assertEqual(response.data[i]["title"], titles[i])
def test_contentnode_parent(self):
parent = content.ContentNode.objects.get(title="c2")
children = parent.get_children()
response = self.client.get(
reverse("kolibri:core:contentnode-list"),
data={"parent": parent.id, "include_coach_content": False},
)
self.assertEqual(len(response.data), children.count())
for i in range(len(children)):
self.assertEqual(response.data[i]["title"], children[i].title)
def test_contentnode_tags(self):
expected = {
"root": ["tag_1", "tag_2", "tag_3"],
"c1": ["tag_1"],
"c2": ["tag_2"],
}
for title, tags in expected.items():
node = content.ContentNode.objects.get(title=title)
response = self.client.get(
reverse("kolibri:core:contentnode-detail", kwargs={"pk": node.id})
)
self.assertEqual(set(response.data["tags"]), set(tags))
def test_channelmetadata_list(self):
response = self.client.get(reverse("kolibri:core:channel-list", kwargs={}))
self.assertEqual(response.data[0]["name"], "testing")
def test_channelmetadata_retrieve(self):
data = content.ChannelMetadata.objects.values()[0]
response = self.client.get(
reverse("kolibri:core:channel-detail", kwargs={"pk": data["id"]})
)
self.assertEqual(response.data["name"], "testing")
def test_channelmetadata_langfield(self):
data = content.ChannelMetadata.objects.first()
root_lang = content.Language.objects.get(pk=1)
data.root.lang = root_lang
data.root.save()
response = self.client.get(
reverse("kolibri:core:channel-detail", kwargs={"pk": data.id})
)
self.assertEqual(response.data["lang_code"], root_lang.lang_code)
self.assertEqual(response.data["lang_name"], root_lang.lang_name)
def test_channelmetadata_langfield_none(self):
data = content.ChannelMetadata.objects.first()
response = self.client.get(
reverse("kolibri:core:channel-detail", kwargs={"pk": data.id})
)
self.assertEqual(response.data["lang_code"], None)
self.assertEqual(response.data["lang_name"], None)
def test_channelmetadata_content_available_param_filter_lowercase_true(self):
response = self.client.get(
reverse("kolibri:core:channel-list"), {"available": "true"}
)
self.assertEqual(response.data[0]["id"], "6199dde695db4ee4ab392222d5af1e5c")
def test_channelmetadata_content_available_param_filter_uppercase_true(self):
response = self.client.get(
reverse("kolibri:core:channel-list"), {"available": True}
)
self.assertEqual(response.data[0]["id"], "6199dde695db4ee4ab392222d5af1e5c")
def test_channelmetadata_content_unavailable_param_filter_false(self):
content.ContentNode.objects.filter(title="root").update(available=False)
response = self.client.get(
reverse("kolibri:core:channel-list"), {"available": False}
)
self.assertEqual(response.data[0]["id"], "6199dde695db4ee4ab392222d5af1e5c")
def test_channelmetadata_content_available_field_true(self):
response = self.client.get(reverse("kolibri:core:channel-list"))
self.assertEqual(response.data[0]["available"], True)
def test_channelmetadata_content_available_field_false(self):
content.ContentNode.objects.filter(title="root").update(available=False)
response = self.client.get(reverse("kolibri:core:channel-list"))
self.assertEqual(response.data[0]["available"], False)
def test_channelmetadata_has_exercises_filter(self):
# Has nothing else for that matter...
no_exercise_channel = content.ContentNode.objects.create(
pk="6a406ac66b224106aa2e93f73a94333d",
channel_id="f8ec4a5d14cd4716890999da596032d2",
content_id="ded4a083e75f4689b386fd2b706e792a",
kind="topic",
title="no exercise channel",
)
content.ChannelMetadata.objects.create(
id="63acff41781543828861ade41dbdd7ff",
name="no exercise channel metadata",
root=no_exercise_channel,
)
no_filter_response = self.client.get(reverse("kolibri:core:channel-list"))
self.assertEqual(len(no_filter_response.data), 2)
with_filter_response = self.client.get(
reverse("kolibri:core:channel-list"), {"has_exercise": True}
)
self.assertEqual(len(with_filter_response.data), 1)
self.assertEqual(with_filter_response.data[0]["name"], "testing")
def test_file_list(self):
response = self.client.get(reverse("kolibri:core:file-list"))
self.assertEqual(len(response.data), 5)
def test_file_retrieve(self):
response = self.client.get(
reverse(
"kolibri:core:file-detail",
kwargs={"pk": "6bdfea4a01830fdd4a585181c0b8068c"},
)
)
self.assertEqual(response.data["preset"], "high_res_video")
def _setup_contentnode_progress(self):
# set up data for testing progress_fraction field on content node endpoint
facility = Facility.objects.create(name="MyFac")
user = FacilityUser.objects.create(username="learner", facility=facility)
user.set_password("pass")
user.save()
root = content.ContentNode.objects.get(title="root")
c1 = content.ContentNode.objects.get(title="c1")
c2 = content.ContentNode.objects.get(title="c2")
c2c1 = content.ContentNode.objects.get(title="c2c1")
c2c3 = content.ContentNode.objects.get(title="c2c3")
for node, progress in [(c2c1, 0.7), (c2c3, 0.5)]:
ContentSummaryLog.objects.create(
user=user,
content_id=node.content_id,
progress=progress,
channel_id=self.the_channel_id,
start_timestamp=datetime.datetime.now(),
)
return facility, root, c1, c2, c2c1, c2c3
def test_contentnode_progress_list_endpoint(self):
facility, root, c1, c2, c2c1, c2c3 = self._setup_contentnode_progress()
response = self.client.get(reverse("kolibri:core:contentnodeprogress-list"))
def get_progress_fraction(node):
return list(
filter(lambda x: x["content_id"] == node.content_id, response.data)
)[0]["progress"]
# check that there is no progress when not logged in
self.assertEqual(len(response.data), 0)
# check that progress is calculated appropriately when user is logged in
self.client.login(username="learner", password="pass", facility=facility)
response = self.client.get(reverse("kolibri:core:contentnodeprogress-list"))
self.assertEqual(get_progress_fraction(c2c1), 0.7)
def test_filtering_coach_content_anon(self):
response = self.client.get(
reverse("kolibri:core:contentnode-list"),
data={"include_coach_content": False},
)
# TODO make content_test.json fixture more organized. Here just, hardcoding the correct count
self.assertEqual(len(response.data), 7)
def test_filtering_coach_content_admin(self):
self.client.login(username=self.admin.username, password=DUMMY_PASSWORD)
response = self.client.get(
reverse("kolibri:core:contentnode-list"),
data={"include_coach_content": True},
)
expected_output = content.ContentNode.objects.exclude(
available=False
).count() # coach_content node should be returned
self.assertEqual(len(response.data), expected_output)
def test_copies(self):
# the pk is actually a content id
response = self.client.get(
reverse(
"kolibri:core:contentnode-copies",
kwargs={"pk": "c6f49ea527824f398f4d5d26faf19396"},
)
)
expected_titles = set(["root", "c1", "copy"])
response_titles = set()
for node in response.data[0]:
response_titles.add(node["title"])
self.assertSetEqual(expected_titles, response_titles)
def test_available_copies(self):
# the pk is actually a content id
response = self.client.get(
reverse(
"kolibri:core:contentnode-copies",
kwargs={"pk": "f2332710c2fd483386cdeb5dcbdda81a"},
)
)
# no results should be returned for unavailable content node
self.assertEqual(len(response.data), 0)
def test_copies_count(self):
response = self.client.get(
reverse("kolibri:core:contentnode-copies-count"),
data={
"content_ids": "f2332710c2fd483386cdeb5dcbdda81f,c6f49ea527824f398f4d5d26faf15555,f2332710c2fd483386cdeb5dcbdda81a"
},
)
# assert non existent content id does not show up in results
# no results should be returned for unavailable content node
self.assertEqual(len(response.data), 1)
self.assertEqual(
response.data[0]["count"],
content.ContentNode.objects.filter(
content_id="f2332710c2fd483386cdeb5dcbdda81f"
).count(),
)
def test_search_total_results(self):
response = self.client.get(
reverse("kolibri:core:contentnode_search-list"), data={"search": "root"}
)
self.assertEqual(response.data["total_results"], 1)
def test_search_kinds(self):
response = self.client.get(
reverse("kolibri:core:contentnode_search-list"), data={"search": "root"}
)
self.assertEqual(list(response.data["content_kinds"]), [content_kinds.TOPIC])
def test_search_repeated_kinds(self):
# Ensure that each kind is only returned once.
response = self.client.get(
reverse("kolibri:core:contentnode_search-list"), data={"search": "c"}
)
kinds = response.data["content_kinds"][:]
self.assertEqual(len(kinds), len(set(kinds)))
def test_search_channels(self):
response = self.client.get(
reverse("kolibri:core:contentnode_search-list"), data={"search": "root"}
)
self.assertEqual(response.data["channel_ids"][:], [self.the_channel_id])
def test_search_repeated_channels(self):
# Ensure that each channel_id is only returned once.
response = self.client.get(
reverse("kolibri:core:contentnode_search-list"), data={"search": "c"}
)
channel_ids = response.data["channel_ids"][:]
self.assertEqual(len(channel_ids), len(set(channel_ids)))
def test_search(self):
# ensure search works when there are no words not defined
response = self.client.get(
reverse("kolibri:core:contentnode_search-list"), data={"search": "!?,"}
)
self.assertEqual(len(response.data["results"]), 0)
# ensure search words when there is only stopwords
response = self.client.get(
reverse("kolibri:core:contentnode_search-list"), data={"search": "or"}
)
self.assertEqual(len(response.data["results"]), 0)
# regular search
response = self.client.get(
reverse("kolibri:core:contentnode_search-list"), data={"search": "root"}
)
self.assertEqual(len(response.data["results"]), 1)
def _create_session_logs(self):
content_ids = (
"f2332710c2fd483386cdeb5ecbdda81f",
"ce603df7c46b424b934348995e1b05fb",
"481e1bda1faa445d801ceb2afbd2f42f",
)
channel_id = "6199dde695db4ee4ab392222d5af1e5c"
[
ContentSessionLog.objects.create(
channel_id=channel_id,
content_id=content_ids[0],
start_timestamp=timezone.now(),
kind="audio",
)
for _ in range(50)
]
[
ContentSessionLog.objects.create(
channel_id=channel_id,
content_id=content_ids[1],
start_timestamp=timezone.now(),
kind="exercise",
)
for _ in range(25)
]
[
ContentSessionLog.objects.create(
channel_id=channel_id,
content_id=content_ids[2],
start_timestamp=timezone.now(),
kind="document",
)
for _ in range(1)
]
# create log for non existent content id
# should not show up in api response
ContentSessionLog.objects.create(
channel_id=uuid.uuid4().hex,
content_id=uuid.uuid4().hex,
start_timestamp=timezone.now(),
kind="content",
)
return content_ids
def test_popular(self):
expected_content_ids = self._create_session_logs()
response = self.client.get(reverse("kolibri:core:contentnode-popular"))
response_content_ids = {node["content_id"] for node in response.json()}
self.assertSetEqual(set(expected_content_ids), response_content_ids)
def test_popular_no_coach_content(self):
expected_content_ids = self._create_session_logs()
node = content.ContentNode.objects.get(content_id=expected_content_ids[0])
node.coach_content = True
node.save()
expected_content_ids = expected_content_ids[1:]
response = self.client.get(
reverse("kolibri:core:contentnode-popular"),
data={"include_coach_content": False},
)
response_content_ids = {node["content_id"] for node in response.json()}
self.assertSetEqual(set(expected_content_ids), response_content_ids)
def test_popular_coach_has_coach_content(self):
coach = FacilityUser.objects.create(username="coach", facility=self.facility)
coach.set_password(DUMMY_PASSWORD)
coach.save()
self.facility.add_coach(coach)
expected_content_ids = self._create_session_logs()
node = content.ContentNode.objects.get(content_id=expected_content_ids[0])
node.coach_content = True
node.save()
self.client.login(username="coach", password=DUMMY_PASSWORD)
response = self.client.get(
reverse("kolibri:core:contentnode-popular"),
data={"include_coach_content": True},
)
response_content_ids = {node["content_id"] for node in response.json()}
self.assertSetEqual(set(expected_content_ids), response_content_ids)
def test_popular_ten_minute_cache(self):
self._create_session_logs()
response = self.client.get(reverse("kolibri:core:contentnode-popular"))
self.assertEqual(response["Cache-Control"], "max-age=600")
def _create_summary_logs(self):
facility = Facility.objects.create(name="MyFac")
user = FacilityUser.objects.create(username="user", facility=facility)
content_ids = ("f2332710c2fd483386cdeb5ecbdda81f",)
channel_id = "6199dde695db4ee4ab392222d5af1e5c"
ContentSummaryLog.objects.create(
channel_id=channel_id,
content_id=content_ids[0],
user_id=user.id,
start_timestamp=timezone.now(),
kind="audio",
progress=0.5,
)
# create log with progress of 1
# should not show up in api response
ContentSummaryLog.objects.create(
channel_id=channel_id,
content_id="ce603df7c46b424b934348995e1b05fb",
user_id=user.id,
progress=1,
start_timestamp=timezone.now(),
kind="audio",
)
# create log for non existent content id
# should not show up in api response
ContentSummaryLog.objects.create(
channel_id=uuid.uuid4().hex,
content_id=uuid.uuid4().hex,
user_id=user.id,
start_timestamp=timezone.now(),
kind="content",
progress=0.5,
)
user.set_password(DUMMY_PASSWORD)
user.save()
return user, content_ids
def test_resume(self):
user, expected_content_ids = self._create_summary_logs()
self.client.login(username=user.username, password=DUMMY_PASSWORD)
response = self.client.get(reverse("kolibri:core:contentnode-resume"))
response_content_ids = {node["content_id"] for node in response.json()}
self.assertSetEqual(set(expected_content_ids), response_content_ids)
def test_resume_zero_cache(self):
user, expected_content_ids = self._create_summary_logs()
self.client.login(username=user.username, password=DUMMY_PASSWORD)
response = self.client.get(reverse("kolibri:core:contentnode-resume"))
self.assertEqual(response["Cache-Control"], "max-age=0")
def test_next_steps_prereq(self):
facility = Facility.objects.create(name="MyFac")
user = FacilityUser.objects.create(username="user", facility=facility)
root = content.ContentNode.objects.get(title="root")
ContentSummaryLog.objects.create(
channel_id=root.channel_id,
content_id=root.content_id,
user_id=user.id,
progress=1,
start_timestamp=timezone.now(),
kind="audio",
)
user.set_password(DUMMY_PASSWORD)
user.save()
self.client.login(username=user.username, password=DUMMY_PASSWORD)
post_req = root.prerequisite_for.first()
expected_content_ids = (post_req.content_id,)
response = self.client.get(reverse("kolibri:core:contentnode-next-steps"))
response_content_ids = {node["content_id"] for node in response.json()}
self.assertSetEqual(set(expected_content_ids), response_content_ids)
def test_next_steps_prereq_zero_cache(self):
facility = Facility.objects.create(name="MyFac")
user = FacilityUser.objects.create(username="user", facility=facility)
root = content.ContentNode.objects.get(title="root")
ContentSummaryLog.objects.create(
channel_id=root.channel_id,
content_id=root.content_id,
user_id=user.id,
progress=1,
start_timestamp=timezone.now(),
kind="audio",
)
user.set_password(DUMMY_PASSWORD)
user.save()
self.client.login(username=user.username, password=DUMMY_PASSWORD)
response = self.client.get(reverse("kolibri:core:contentnode-next-steps"))
self.assertEqual(response["Cache-Control"], "max-age=0")
def test_next_steps_prereq_in_progress(self):
facility = Facility.objects.create(name="MyFac")
user = FacilityUser.objects.create(username="user", facility=facility)
root = content.ContentNode.objects.get(title="root")
ContentSummaryLog.objects.create(
channel_id=root.channel_id,
content_id=root.content_id,
user_id=user.id,
progress=1,
start_timestamp=timezone.now(),
kind="audio",
)
user.set_password(DUMMY_PASSWORD)
user.save()
self.client.login(username=user.username, password=DUMMY_PASSWORD)
post_req = root.prerequisite_for.first()
ContentSummaryLog.objects.create(
channel_id=post_req.channel_id,
content_id=post_req.content_id,
user_id=user.id,
progress=0.5,
start_timestamp=timezone.now(),
kind="audio",
)
expected_content_ids = []
response = self.client.get(reverse("kolibri:core:contentnode-next-steps"))
response_content_ids = {node["content_id"] for node in response.json()}
self.assertSetEqual(set(expected_content_ids), response_content_ids)
def test_next_steps_prereq_coach_content_not_coach(self):
facility = Facility.objects.create(name="MyFac")
user = FacilityUser.objects.create(username="user", facility=facility)
root = content.ContentNode.objects.get(title="root")
ContentSummaryLog.objects.create(
channel_id=root.channel_id,
content_id=root.content_id,
user_id=user.id,
progress=1,
start_timestamp=timezone.now(),
kind="audio",
)
user.set_password(DUMMY_PASSWORD)
user.save()
self.client.login(username=user.username, password=DUMMY_PASSWORD)
post_req = root.prerequisite_for.first()
post_req.coach_content = True
post_req.save()
response = self.client.get(reverse("kolibri:core:contentnode-next-steps"))
response_content_ids = {node["content_id"] for node in response.json()}
self.assertSetEqual(set(), response_content_ids)
def test_next_steps_prereq_coach_content_coach(self):
facility = Facility.objects.create(name="MyFac")
user = FacilityUser.objects.create(username="user", facility=facility)
facility.add_coach(user)
root = content.ContentNode.objects.get(title="root")
ContentSummaryLog.objects.create(
channel_id=root.channel_id,
content_id=root.content_id,
user_id=user.id,
progress=1,
start_timestamp=timezone.now(),
kind="audio",
)
user.set_password(DUMMY_PASSWORD)
user.save()
self.client.login(username=user.username, password=DUMMY_PASSWORD)
post_req = root.prerequisite_for.first()
post_req.coach_content = True
post_req.save()
expected_content_ids = (post_req.content_id,)
response = self.client.get(reverse("kolibri:core:contentnode-next-steps"))
response_content_ids = {node["content_id"] for node in response.json()}
self.assertSetEqual(set(expected_content_ids), response_content_ids)
def test_next_steps_sibling(self):
facility = Facility.objects.create(name="MyFac")
user = FacilityUser.objects.create(username="user", facility=facility)
node = content.ContentNode.objects.get(
content_id="ce603df7c46b424b934348995e1b05fb"
)
ContentSummaryLog.objects.create(
channel_id=node.channel_id,
content_id=node.content_id,
user_id=user.id,
progress=1,
start_timestamp=timezone.now(),
kind="audio",
)
user.set_password(DUMMY_PASSWORD)
user.save()
self.client.login(username=user.username, password=DUMMY_PASSWORD)
sibling = node.get_next_sibling()
expected_content_ids = (sibling.content_id,)
response = self.client.get(reverse("kolibri:core:contentnode-next-steps"))
response_content_ids = {node["content_id"] for node in response.json()}
self.assertSetEqual(set(expected_content_ids), response_content_ids)
def test_next_steps_sibling_in_progress(self):
facility = Facility.objects.create(name="MyFac")
user = FacilityUser.objects.create(username="user", facility=facility)
node = content.ContentNode.objects.get(
content_id="ce603df7c46b424b934348995e1b05fb"
)
ContentSummaryLog.objects.create(
channel_id=node.channel_id,
content_id=node.content_id,
user_id=user.id,
progress=1,
start_timestamp=timezone.now(),
kind="audio",
)
user.set_password(DUMMY_PASSWORD)
user.save()
self.client.login(username=user.username, password=DUMMY_PASSWORD)
sibling = node.get_next_sibling()
ContentSummaryLog.objects.create(
channel_id=sibling.channel_id,
content_id=sibling.content_id,
user_id=user.id,
progress=0.5,
start_timestamp=timezone.now(),
kind="audio",
)
expected_content_ids = []
response = self.client.get(reverse("kolibri:core:contentnode-next-steps"))
response_content_ids = {node["content_id"] for node in response.json()}
self.assertSetEqual(set(expected_content_ids), response_content_ids)
def test_next_steps_sibling_coach_content_not_coach(self):
facility = Facility.objects.create(name="MyFac")
user = FacilityUser.objects.create(username="user", facility=facility)
node = content.ContentNode.objects.get(
content_id="ce603df7c46b424b934348995e1b05fb"
)
ContentSummaryLog.objects.create(
channel_id=node.channel_id,
content_id=node.content_id,
user_id=user.id,
progress=1,
start_timestamp=timezone.now(),
kind="audio",
)
user.set_password(DUMMY_PASSWORD)
user.save()
self.client.login(username=user.username, password=DUMMY_PASSWORD)
sibling = node.get_next_sibling()
sibling.coach_content = True
sibling.save()
response = self.client.get(reverse("kolibri:core:contentnode-next-steps"))
response_content_ids = {node["content_id"] for node in response.json()}
self.assertSetEqual(set(), response_content_ids)
def test_next_steps_sibling_coach_content_coach(self):
facility = Facility.objects.create(name="MyFac")
user = FacilityUser.objects.create(username="user", facility=facility)
facility.add_coach(user)
node = content.ContentNode.objects.get(
content_id="ce603df7c46b424b934348995e1b05fb"
)
ContentSummaryLog.objects.create(
channel_id=node.channel_id,
content_id=node.content_id,
user_id=user.id,
progress=1,
start_timestamp=timezone.now(),
kind="audio",
)
user.set_password(DUMMY_PASSWORD)
user.save()
self.client.login(username=user.username, password=DUMMY_PASSWORD)
sibling = node.get_next_sibling()
sibling.coach_content = True
sibling.save()
expected_content_ids = (sibling.content_id,)
response = self.client.get(reverse("kolibri:core:contentnode-next-steps"))
response_content_ids = {node["content_id"] for node in response.json()}
self.assertSetEqual(set(expected_content_ids), response_content_ids)
def tearDown(self):
"""
clean up files/folders created during the test
"""
cache.clear()
super(ContentNodeAPITestCase, self).tearDown()
def mock_patch_decorator(func):
def wrapper(*args, **kwargs):
mock_object = mock.Mock()
mock_object.json.return_value = [{"id": 1, "name": "studio"}]
with mock.patch.object(requests, "get", return_value=mock_object):
return func(*args, **kwargs)
return wrapper
class KolibriStudioAPITestCase(APITestCase):
@classmethod
def setUpTestData(cls):
DeviceSettings.objects.create(is_provisioned=True)
cls.facility = Facility.objects.create(name="facility")
superuser = FacilityUser.objects.create(
username="superuser", facility=cls.facility
)
superuser.set_password(DUMMY_PASSWORD)
superuser.save()
cls.superuser = superuser
DevicePermissions.objects.create(user=superuser, is_superuser=True)
def setUp(self):
self.client.login(username=self.superuser.username, password=DUMMY_PASSWORD)
@mock_patch_decorator
def test_channel_list(self):
response = self.client.get(
reverse("kolibri:core:remotechannel-list"), format="json"
)
self.assertEqual(response.data[0]["id"], 1)
@mock_patch_decorator
def test_channel_retrieve_list(self):
response = self.client.get(
reverse("kolibri:core:remotechannel-retrieve-list", kwargs={"pk": 1}),
format="json",
)
self.assertEqual(response.data[0]["id"], 1)
@mock_patch_decorator
def test_no_permission_non_superuser_channel_list(self):
user = FacilityUser.objects.create(username="user", facility=self.facility)
user.set_password(DUMMY_PASSWORD)
user.save()
self.client.logout()
self.client.login(username=user.username, password=DUMMY_PASSWORD)
response = self.client.get(
reverse("kolibri:core:remotechannel-list"), format="json"
)
self.assertEqual(response.status_code, 403)
@mock_patch_decorator
def test_channel_retrieve(self):
response = self.client.get(
reverse("kolibri:core:remotechannel-detail", kwargs={"pk": "abc"}),
format="json",
)
self.assertEqual(response.data["name"], "studio")
@mock_patch_decorator
def test_channel_info_404(self):
mock_object = mock.Mock()
mock_object.status_code = 404
requests.get.return_value = mock_object
response = self.client.get(
reverse("kolibri:core:remotechannel-detail", kwargs={"pk": "abc"}),
format="json",
)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
@mock.patch.object(requests, "get", side_effect=requests.exceptions.ConnectionError)
def test_channel_info_offline(self, mock_get):
response = self.client.get(
reverse("kolibri:core:remotechannel-detail", kwargs={"pk": "abc"}),
format="json",
)
self.assertEqual(response.status_code, status.HTTP_503_SERVICE_UNAVAILABLE)
self.assertEqual(response.json()["status"], "offline")
@mock.patch.object(requests, "get", side_effect=requests.exceptions.ConnectionError)
def test_channel_list_offline(self, mock_get):
response = self.client.get(
reverse("kolibri:core:remotechannel-list"), format="json"
)
self.assertEqual(response.status_code, status.HTTP_503_SERVICE_UNAVAILABLE)
self.assertEqual(response.json()["status"], "offline")
def tearDown(self):
cache.clear()
|
|
# Copyright 2015 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import copy
import datetime
import mock
from oslo_serialization import jsonutils
from oslo_utils import timeutils
import six
from testtools import matchers
from cinder import context
from cinder import exception
from cinder import objects
from cinder.objects import base
from cinder.objects import fields
from cinder import test
from cinder.tests import fake_notifier
class MyOwnedObject(base.CinderPersistentObject, base.CinderObject):
VERSION = '1.0'
fields = {'baz': fields.Field(fields.Integer())}
class MyObj(base.CinderPersistentObject, base.CinderObject,
base.CinderObjectDictCompat):
VERSION = '1.6'
fields = {'foo': fields.Field(fields.Integer(), default=1),
'bar': fields.Field(fields.String()),
'missing': fields.Field(fields.String()),
'readonly': fields.Field(fields.Integer(), read_only=True),
'rel_object': fields.ObjectField('MyOwnedObject', nullable=True),
'rel_objects': fields.ListOfObjectsField('MyOwnedObject',
nullable=True),
}
@staticmethod
def _from_db_object(context, obj, db_obj):
self = MyObj()
self.foo = db_obj['foo']
self.bar = db_obj['bar']
self.missing = db_obj['missing']
self.readonly = 1
return self
def obj_load_attr(self, attrname):
setattr(self, attrname, 'loaded!')
@base.remotable_classmethod
def query(cls, context):
obj = cls(context=context, foo=1, bar='bar')
obj.obj_reset_changes()
return obj
@base.remotable
def marco(self, context):
return 'polo'
@base.remotable
def _update_test(self, context):
if context.project_id == 'alternate':
self.bar = 'alternate-context'
else:
self.bar = 'updated'
@base.remotable
def save(self, context):
self.obj_reset_changes()
@base.remotable
def refresh(self, context):
self.foo = 321
self.bar = 'refreshed'
self.obj_reset_changes()
@base.remotable
def modify_save_modify(self, context):
self.bar = 'meow'
self.save()
self.foo = 42
self.rel_object = MyOwnedObject(baz=42)
def obj_make_compatible(self, primitive, target_version):
super(MyObj, self).obj_make_compatible(primitive, target_version)
# NOTE(danms): Simulate an older version that had a different
# format for the 'bar' attribute
if target_version == '1.1' and 'bar' in primitive:
primitive['bar'] = 'old%s' % primitive['bar']
class MyObjDiffVers(MyObj):
VERSION = '1.5'
@classmethod
def obj_name(cls):
return 'MyObj'
class MyObj2(object):
@classmethod
def obj_name(cls):
return 'MyObj'
@base.remotable_classmethod
def query(cls, *args, **kwargs):
pass
class RandomMixInWithNoFields(object):
"""Used to test object inheritance using a mixin that has no fields."""
pass
class TestSubclassedObject(RandomMixInWithNoFields, MyObj):
fields = {'new_field': fields.Field(fields.String())}
class TestMetaclass(test.TestCase):
def test_obj_tracking(self):
@six.add_metaclass(base.CinderObjectMetaclass)
class NewBaseClass(object):
VERSION = '1.0'
fields = {}
@classmethod
def obj_name(cls):
return cls.__name__
class Fake1TestObj1(NewBaseClass):
@classmethod
def obj_name(cls):
return 'fake1'
class Fake1TestObj2(Fake1TestObj1):
pass
class Fake1TestObj3(Fake1TestObj1):
VERSION = '1.1'
class Fake2TestObj1(NewBaseClass):
@classmethod
def obj_name(cls):
return 'fake2'
class Fake1TestObj4(Fake1TestObj3):
VERSION = '1.2'
class Fake2TestObj2(Fake2TestObj1):
VERSION = '1.1'
class Fake1TestObj5(Fake1TestObj1):
VERSION = '1.1'
# Newest versions first in the list. Duplicate versions take the
# newest object.
expected = {'fake1': [Fake1TestObj4, Fake1TestObj5, Fake1TestObj2],
'fake2': [Fake2TestObj2, Fake2TestObj1]}
self.assertEqual(expected, NewBaseClass._obj_classes)
# The following should work, also.
self.assertEqual(expected, Fake1TestObj1._obj_classes)
self.assertEqual(expected, Fake1TestObj2._obj_classes)
self.assertEqual(expected, Fake1TestObj3._obj_classes)
self.assertEqual(expected, Fake1TestObj4._obj_classes)
self.assertEqual(expected, Fake1TestObj5._obj_classes)
self.assertEqual(expected, Fake2TestObj1._obj_classes)
self.assertEqual(expected, Fake2TestObj2._obj_classes)
def test_field_checking(self):
def create_class(field):
class TestField(base.CinderObject):
VERSION = '1.5'
fields = {'foo': field()}
return TestField
create_class(fields.BooleanField)
self.assertRaises(exception.ObjectFieldInvalid,
create_class, fields.Boolean)
self.assertRaises(exception.ObjectFieldInvalid,
create_class, int)
class TestObjToPrimitive(test.TestCase):
def test_obj_to_primitive_list(self):
class MyObjElement(base.CinderObject):
fields = {'foo': fields.IntegerField()}
def __init__(self, foo):
super(MyObjElement, self).__init__()
self.foo = foo
class MyList(base.ObjectListBase, base.CinderObject):
fields = {'objects': fields.ListOfObjectsField('MyObjElement')}
mylist = MyList()
mylist.objects = [MyObjElement(1), MyObjElement(2), MyObjElement(3)]
self.assertEqual([1, 2, 3],
[x['foo'] for x in base.obj_to_primitive(mylist)])
def test_obj_to_primitive_dict(self):
myobj = MyObj(foo=1, bar='foo')
self.assertEqual({'foo': 1, 'bar': 'foo'},
base.obj_to_primitive(myobj))
def test_obj_to_primitive_recursive(self):
class MyList(base.ObjectListBase, base.CinderObject):
fields = {'objects': fields.ListOfObjectsField('MyObj')}
mylist = MyList(objects=[MyObj(), MyObj()])
for i, value in enumerate(mylist):
value.foo = i
self.assertEqual([{'foo': 0}, {'foo': 1}],
base.obj_to_primitive(mylist))
class TestObjMakeList(test.TestCase):
def test_obj_make_list(self):
class MyList(base.ObjectListBase, base.CinderObject):
pass
db_objs = [{'foo': 1, 'bar': 'baz', 'missing': 'banana'},
{'foo': 2, 'bar': 'bat', 'missing': 'apple'},
]
mylist = base.obj_make_list('ctxt', MyList(), MyObj, db_objs)
self.assertEqual(2, len(mylist))
self.assertEqual('ctxt', mylist._context)
for index, item in enumerate(mylist):
self.assertEqual(db_objs[index]['foo'], item.foo)
self.assertEqual(db_objs[index]['bar'], item.bar)
self.assertEqual(db_objs[index]['missing'], item.missing)
def compare_obj(test, obj, db_obj, subs=None, allow_missing=None,
comparators=None):
"""Compare a CinderObject and a dict-like database object.
This automatically converts TZ-aware datetimes and iterates over
the fields of the object.
:param:test: The TestCase doing the comparison
:param:obj: The CinderObject to examine
:param:db_obj: The dict-like database object to use as reference
:param:subs: A dict of objkey=dbkey field substitutions
:param:allow_missing: A list of fields that may not be in db_obj
:param:comparators: Map of comparator functions to use for certain fields
"""
if subs is None:
subs = {}
if allow_missing is None:
allow_missing = []
if comparators is None:
comparators = {}
for key in obj.fields:
if key in allow_missing and not obj.obj_attr_is_set(key):
continue
obj_val = getattr(obj, key)
db_key = subs.get(key, key)
db_val = db_obj[db_key]
if isinstance(obj_val, datetime.datetime):
obj_val = obj_val.replace(tzinfo=None)
if key in comparators:
comparator = comparators[key]
comparator(db_val, obj_val)
else:
test.assertEqual(db_val, obj_val)
class _BaseTestCase(test.TestCase):
def setUp(self):
super(_BaseTestCase, self).setUp()
self.remote_object_calls = list()
self.user_id = 'fake-user'
self.project_id = 'fake-project'
self.context = context.RequestContext(self.user_id, self.project_id)
fake_notifier.stub_notifier(self.stubs)
self.addCleanup(fake_notifier.reset)
def compare_obj(self, obj, db_obj, subs=None, allow_missing=None,
comparators=None):
compare_obj(self, obj, db_obj, subs=subs, allow_missing=allow_missing,
comparators=comparators)
def json_comparator(self, expected, obj_val):
# json-ify an object field for comparison with its db str
# equivalent
self.assertEqual(expected, jsonutils.dumps(obj_val))
def str_comparator(self, expected, obj_val):
"""Compare an object field to a string in the db by performing
a simple coercion on the object field value.
"""
self.assertEqual(expected, str(obj_val))
def assertNotIsInstance(self, obj, cls, msg=None):
"""Python < v2.7 compatibility. Assert 'not isinstance(obj, cls)."""
try:
f = super(_BaseTestCase, self).assertNotIsInstance
except AttributeError:
self.assertThat(obj,
matchers.Not(matchers.IsInstance(cls)),
message=msg or '')
else:
f(obj, cls, msg=msg)
class _LocalTest(_BaseTestCase):
def setUp(self):
super(_LocalTest, self).setUp()
# Just in case
base.CinderObject.indirection_api = None
def assertRemotes(self):
self.assertEqual(self.remote_object_calls, [])
@contextlib.contextmanager
def things_temporarily_local():
_api = base.CinderObject.indirection_api
base.CinderObject.indirection_api = None
yield
base.CinderObject.indirection_api = _api
class _TestObject(object):
def test_object_attrs_in_init(self):
# Now check the test one in this file. Should be newest version
self.assertEqual('1.6', objects.MyObj.VERSION)
def test_hydration_type_error(self):
primitive = {'cinder_object.name': 'MyObj',
'cinder_object.namespace': 'cinder',
'cinder_object.version': '1.5',
'cinder_object.data': {'foo': 'a'}}
self.assertRaises(ValueError, MyObj.obj_from_primitive, primitive)
def test_hydration(self):
primitive = {'cinder_object.name': 'MyObj',
'cinder_object.namespace': 'cinder',
'cinder_object.version': '1.5',
'cinder_object.data': {'foo': 1}}
real_method = MyObj._obj_from_primitive
def _obj_from_primitive(*args):
return real_method(*args)
with mock.patch.object(MyObj, '_obj_from_primitive') as ofp:
ofp.side_effect = _obj_from_primitive
obj = MyObj.obj_from_primitive(primitive)
ofp.assert_called_once_with(None, '1.5', primitive)
self.assertEqual(obj.foo, 1)
def test_hydration_version_different(self):
primitive = {'cinder_object.name': 'MyObj',
'cinder_object.namespace': 'cinder',
'cinder_object.version': '1.2',
'cinder_object.data': {'foo': 1}}
obj = MyObj.obj_from_primitive(primitive)
self.assertEqual(obj.foo, 1)
self.assertEqual('1.2', obj.VERSION)
def test_hydration_bad_ns(self):
primitive = {'cinder_object.name': 'MyObj',
'cinder_object.namespace': 'foo',
'cinder_object.version': '1.5',
'cinder_object.data': {'foo': 1}}
self.assertRaises(exception.UnsupportedObjectError,
MyObj.obj_from_primitive, primitive)
def test_hydration_additional_unexpected_stuff(self):
primitive = {'cinder_object.name': 'MyObj',
'cinder_object.namespace': 'cinder',
'cinder_object.version': '1.5.1',
'cinder_object.data': {
'foo': 1,
'unexpected_thing': 'foobar'}}
obj = MyObj.obj_from_primitive(primitive)
self.assertEqual(1, obj.foo)
self.assertFalse(hasattr(obj, 'unexpected_thing'))
# NOTE(danms): If we call obj_from_primitive() directly
# with a version containing .z, we'll get that version
# in the resulting object. In reality, when using the
# serializer, we'll get that snipped off (tested
# elsewhere)
self.assertEqual('1.5.1', obj.VERSION)
def test_dehydration(self):
expected = {'cinder_object.name': 'MyObj',
'cinder_object.namespace': 'cinder',
'cinder_object.version': '1.6',
'cinder_object.data': {'foo': 1}}
obj = MyObj(foo=1)
obj.obj_reset_changes()
self.assertEqual(obj.obj_to_primitive(), expected)
def test_object_property(self):
obj = MyObj(foo=1)
self.assertEqual(obj.foo, 1)
def test_object_property_type_error(self):
obj = MyObj()
def fail():
obj.foo = 'a'
self.assertRaises(ValueError, fail)
def test_object_dict_syntax(self):
obj = MyObj(foo=123, bar='bar')
self.assertEqual(obj['foo'], 123)
self.assertEqual(sorted(obj.items(), key=lambda x: x[0]),
[('bar', 'bar'), ('foo', 123)])
self.assertEqual(sorted(list(obj.iteritems()), key=lambda x: x[0]),
[('bar', 'bar'), ('foo', 123)])
def test_load(self):
obj = MyObj()
self.assertEqual(obj.bar, 'loaded!')
def test_load_in_base(self):
class Foo(base.CinderObject):
fields = {'foobar': fields.Field(fields.Integer())}
obj = Foo()
with self.assertRaisesRegex(NotImplementedError, ".*foobar.*"):
obj.foobar
def test_loaded_in_primitive(self):
obj = MyObj(foo=1)
obj.obj_reset_changes()
self.assertEqual(obj.bar, 'loaded!')
expected = {'cinder_object.name': 'MyObj',
'cinder_object.namespace': 'cinder',
'cinder_object.version': '1.6',
'cinder_object.changes': ['bar'],
'cinder_object.data': {'foo': 1,
'bar': 'loaded!'}}
self.assertEqual(obj.obj_to_primitive(), expected)
def test_changes_in_primitive(self):
obj = MyObj(foo=123)
self.assertEqual(obj.obj_what_changed(), set(['foo']))
primitive = obj.obj_to_primitive()
self.assertIn('cinder_object.changes', primitive)
obj2 = MyObj.obj_from_primitive(primitive)
self.assertEqual(obj2.obj_what_changed(), set(['foo']))
obj2.obj_reset_changes()
self.assertEqual(obj2.obj_what_changed(), set())
def test_obj_class_from_name(self):
obj = base.CinderObject.obj_class_from_name('MyObj', '1.5')
self.assertEqual('1.5', obj.VERSION)
def test_obj_class_from_name_latest_compatible(self):
obj = base.CinderObject.obj_class_from_name('MyObj', '1.1')
self.assertEqual('1.6', obj.VERSION)
def test_unknown_objtype(self):
self.assertRaises(exception.UnsupportedObjectError,
base.CinderObject.obj_class_from_name, 'foo', '1.0')
def test_obj_class_from_name_supported_version(self):
error = None
try:
base.CinderObject.obj_class_from_name('MyObj', '1.25')
except exception.IncompatibleObjectVersion as error:
pass
self.assertIsNotNone(error)
self.assertEqual('1.6', error.kwargs['supported'])
def test_with_alternate_context(self):
ctxt1 = context.RequestContext('foo', 'foo')
ctxt2 = context.RequestContext('bar', 'alternate')
obj = MyObj.query(ctxt1)
obj._update_test(ctxt2)
self.assertEqual(obj.bar, 'alternate-context')
self.assertRemotes()
def test_orphaned_object(self):
obj = MyObj.query(self.context)
obj._context = None
self.assertRaises(exception.OrphanedObjectError,
obj._update_test)
self.assertRemotes()
def test_changed_1(self):
obj = MyObj.query(self.context)
obj.foo = 123
self.assertEqual(obj.obj_what_changed(), set(['foo']))
obj._update_test(self.context)
self.assertEqual(obj.obj_what_changed(), set(['foo', 'bar']))
self.assertEqual(obj.foo, 123)
self.assertRemotes()
def test_changed_2(self):
obj = MyObj.query(self.context)
obj.foo = 123
self.assertEqual(obj.obj_what_changed(), set(['foo']))
obj.save()
self.assertEqual(obj.obj_what_changed(), set([]))
self.assertEqual(obj.foo, 123)
self.assertRemotes()
def test_changed_3(self):
obj = MyObj.query(self.context)
obj.foo = 123
self.assertEqual(obj.obj_what_changed(), set(['foo']))
obj.refresh()
self.assertEqual(obj.obj_what_changed(), set([]))
self.assertEqual(obj.foo, 321)
self.assertEqual(obj.bar, 'refreshed')
self.assertRemotes()
def test_changed_4(self):
obj = MyObj.query(self.context)
obj.bar = 'something'
self.assertEqual(obj.obj_what_changed(), set(['bar']))
obj.modify_save_modify(self.context)
self.assertEqual(obj.obj_what_changed(), set(['foo', 'rel_object']))
self.assertEqual(obj.foo, 42)
self.assertEqual(obj.bar, 'meow')
self.assertIsInstance(obj.rel_object, MyOwnedObject)
self.assertRemotes()
def test_changed_with_sub_object(self):
class ParentObject(base.CinderObject):
fields = {'foo': fields.IntegerField(),
'bar': fields.ObjectField('MyObj'),
}
obj = ParentObject()
self.assertEqual(set(), obj.obj_what_changed())
obj.foo = 1
self.assertEqual(set(['foo']), obj.obj_what_changed())
bar = MyObj()
obj.bar = bar
self.assertEqual(set(['foo', 'bar']), obj.obj_what_changed())
obj.obj_reset_changes()
self.assertEqual(set(), obj.obj_what_changed())
bar.foo = 1
self.assertEqual(set(['bar']), obj.obj_what_changed())
def test_static_result(self):
obj = MyObj.query(self.context)
self.assertEqual(obj.bar, 'bar')
result = obj.marco()
self.assertEqual(result, 'polo')
self.assertRemotes()
def test_updates(self):
obj = MyObj.query(self.context)
self.assertEqual(obj.foo, 1)
obj._update_test()
self.assertEqual(obj.bar, 'updated')
self.assertRemotes()
def test_base_attributes(self):
dt = datetime.datetime(1955, 11, 5)
obj = MyObj(created_at=dt, updated_at=dt, deleted_at=None,
deleted=False)
expected = {'cinder_object.name': 'MyObj',
'cinder_object.namespace': 'cinder',
'cinder_object.version': '1.6',
'cinder_object.changes':
['deleted', 'created_at', 'deleted_at', 'updated_at'],
'cinder_object.data':
{'created_at': timeutils.isotime(dt),
'updated_at': timeutils.isotime(dt),
'deleted_at': None,
'deleted': False,
}
}
self.assertEqual(obj.obj_to_primitive(), expected)
def test_contains(self):
obj = MyObj()
self.assertNotIn('foo', obj)
obj.foo = 1
self.assertIn('foo', obj)
self.assertNotIn('does_not_exist', obj)
def test_obj_attr_is_set(self):
obj = MyObj(foo=1)
self.assertTrue(obj.obj_attr_is_set('foo'))
self.assertFalse(obj.obj_attr_is_set('bar'))
self.assertRaises(AttributeError, obj.obj_attr_is_set, 'bang')
def test_get(self):
obj = MyObj(foo=1)
# Foo has value, should not get the default
self.assertEqual(1, obj.get('foo', 2))
# Foo has value, should return the value without error
self.assertEqual(1, obj.get('foo'))
# Bar is not loaded, so we should get the default
self.assertEqual('not-loaded', obj.get('bar', 'not-loaded'))
# Bar without a default should lazy-load
self.assertEqual('loaded!', obj.get('bar'))
# Bar now has a default, but loaded value should be returned
self.assertEqual('loaded!', obj.get('bar', 'not-loaded'))
# Invalid attribute should return None
self.assertEqual(None, obj.get('nothing'))
def test_object_inheritance(self):
base_fields = base.CinderPersistentObject.fields.keys()
myobj_fields = (['foo', 'bar', 'missing',
'readonly', 'rel_object', 'rel_objects'] +
base_fields)
myobj3_fields = ['new_field']
self.assertTrue(issubclass(TestSubclassedObject, MyObj))
self.assertEqual(len(myobj_fields), len(MyObj.fields))
self.assertEqual(set(myobj_fields), set(MyObj.fields.keys()))
self.assertEqual(len(myobj_fields) + len(myobj3_fields),
len(TestSubclassedObject.fields))
self.assertEqual(set(myobj_fields) | set(myobj3_fields),
set(TestSubclassedObject.fields.keys()))
def test_obj_as_admin(self):
obj = MyObj(context=self.context)
def fake(*args, **kwargs):
self.assertTrue(obj._context.is_admin)
with mock.patch.object(obj, 'obj_reset_changes') as mock_fn:
mock_fn.side_effect = fake
with obj.obj_as_admin():
obj.save()
self.assertTrue(mock_fn.called)
self.assertFalse(obj._context.is_admin)
def test_obj_as_admin_orphaned(self):
def testme():
obj = MyObj()
with obj.obj_as_admin():
pass
self.assertRaises(exception.OrphanedObjectError, testme)
def test_get_changes(self):
obj = MyObj()
self.assertEqual({}, obj.obj_get_changes())
obj.foo = 123
self.assertEqual({'foo': 123}, obj.obj_get_changes())
obj.bar = 'test'
self.assertEqual({'foo': 123, 'bar': 'test'}, obj.obj_get_changes())
obj.obj_reset_changes()
self.assertEqual({}, obj.obj_get_changes())
def test_obj_fields(self):
class TestObj(base.CinderObject):
fields = {'foo': fields.Field(fields.Integer())}
obj_extra_fields = ['bar']
@property
def bar(self):
return 'this is bar'
obj = TestObj()
self.assertEqual(['foo', 'bar'], obj.obj_fields)
def test_obj_constructor(self):
obj = MyObj(context=self.context, foo=123, bar='abc')
self.assertEqual(123, obj.foo)
self.assertEqual('abc', obj.bar)
self.assertEqual(set(['foo', 'bar']), obj.obj_what_changed())
def test_obj_read_only(self):
obj = MyObj(context=self.context, foo=123, bar='abc')
obj.readonly = 1
self.assertRaises(exception.ReadOnlyFieldError, setattr,
obj, 'readonly', 2)
def test_obj_repr(self):
obj = MyObj(foo=123)
self.assertEqual('MyObj(bar=<?>,created_at=<?>,deleted=<?>,'
'deleted_at=<?>,foo=123,missing=<?>,readonly=<?>,'
'rel_object=<?>,rel_objects=<?>,updated_at=<?>)',
repr(obj))
def test_obj_make_obj_compatible(self):
subobj = MyOwnedObject(baz=1)
obj = MyObj(rel_object=subobj)
obj.obj_relationships = {
'rel_object': [('1.5', '1.1'), ('1.7', '1.2')],
}
primitive = obj.obj_to_primitive()['cinder_object.data']
with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat:
obj._obj_make_obj_compatible(copy.copy(primitive), '1.8',
'rel_object')
self.assertFalse(mock_compat.called)
with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat:
obj._obj_make_obj_compatible(copy.copy(primitive),
'1.7', 'rel_object')
mock_compat.assert_called_once_with(
primitive['rel_object']['cinder_object.data'], '1.2')
self.assertEqual('1.2',
primitive['rel_object']['cinder_object.version'])
with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat:
obj._obj_make_obj_compatible(copy.copy(primitive),
'1.6', 'rel_object')
mock_compat.assert_called_once_with(
primitive['rel_object']['cinder_object.data'], '1.1')
self.assertEqual('1.1',
primitive['rel_object']['cinder_object.version'])
with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat:
obj._obj_make_obj_compatible(copy.copy(primitive), '1.5',
'rel_object')
mock_compat.assert_called_once_with(
primitive['rel_object']['cinder_object.data'], '1.1')
self.assertEqual('1.1',
primitive['rel_object']['cinder_object.version'])
with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat:
_prim = copy.copy(primitive)
obj._obj_make_obj_compatible(_prim, '1.4', 'rel_object')
self.assertFalse(mock_compat.called)
self.assertNotIn('rel_object', _prim)
def test_obj_make_compatible_hits_sub_objects(self):
subobj = MyOwnedObject(baz=1)
obj = MyObj(foo=123, rel_object=subobj)
obj.obj_relationships = {'rel_object': [('1.0', '1.0')]}
with mock.patch.object(obj, '_obj_make_obj_compatible') as mock_compat:
obj.obj_make_compatible({'rel_object': 'foo'}, '1.10')
mock_compat.assert_called_once_with({'rel_object': 'foo'}, '1.10',
'rel_object')
def test_obj_make_compatible_skips_unset_sub_objects(self):
obj = MyObj(foo=123)
obj.obj_relationships = {'rel_object': [('1.0', '1.0')]}
with mock.patch.object(obj, '_obj_make_obj_compatible') as mock_compat:
obj.obj_make_compatible({'rel_object': 'foo'}, '1.10')
self.assertFalse(mock_compat.called)
def test_obj_make_compatible_complains_about_missing_rules(self):
subobj = MyOwnedObject(baz=1)
obj = MyObj(foo=123, rel_object=subobj)
obj.obj_relationships = {}
self.assertRaises(exception.ObjectActionError,
obj.obj_make_compatible, {}, '1.0')
def test_obj_make_compatible_handles_list_of_objects(self):
subobj = MyOwnedObject(baz=1)
obj = MyObj(rel_objects=[subobj])
obj.obj_relationships = {'rel_objects': [('1.0', '1.123')]}
def fake_make_compat(primitive, version):
self.assertEqual('1.123', version)
self.assertIn('baz', primitive)
with mock.patch.object(subobj, 'obj_make_compatible') as mock_mc:
mock_mc.side_effect = fake_make_compat
obj.obj_to_primitive('1.0')
self.assertTrue(mock_mc.called)
class TestObject(_LocalTest, _TestObject):
def test_set_defaults(self):
obj = MyObj()
obj.obj_set_defaults('foo')
self.assertTrue(obj.obj_attr_is_set('foo'))
self.assertEqual(1, obj.foo)
def test_set_defaults_no_default(self):
obj = MyObj()
self.assertRaises(exception.ObjectActionError,
obj.obj_set_defaults, 'bar')
def test_set_all_defaults(self):
obj = MyObj()
obj.obj_set_defaults()
self.assertEqual(set(['deleted', 'foo']), obj.obj_what_changed())
self.assertEqual(1, obj.foo)
class TestObjectListBase(test.TestCase):
def test_list_like_operations(self):
class MyElement(base.CinderObject):
fields = {'foo': fields.IntegerField()}
def __init__(self, foo):
super(MyElement, self).__init__()
self.foo = foo
class Foo(base.ObjectListBase, base.CinderObject):
fields = {'objects': fields.ListOfObjectsField('MyElement')}
objlist = Foo(context='foo',
objects=[MyElement(1), MyElement(2), MyElement(3)])
self.assertEqual(list(objlist), objlist.objects)
self.assertEqual(len(objlist), 3)
self.assertIn(objlist.objects[0], objlist)
self.assertEqual(list(objlist[:1]), [objlist.objects[0]])
self.assertEqual(objlist[:1]._context, 'foo')
self.assertEqual(objlist[2], objlist.objects[2])
self.assertEqual(objlist.count(objlist.objects[0]), 1)
self.assertEqual(objlist.index(objlist.objects[1]), 1)
objlist.sort(key=lambda x: x.foo, reverse=True)
self.assertEqual([3, 2, 1],
[x.foo for x in objlist])
def test_serialization(self):
class Foo(base.ObjectListBase, base.CinderObject):
fields = {'objects': fields.ListOfObjectsField('Bar')}
class Bar(base.CinderObject):
fields = {'foo': fields.Field(fields.String())}
obj = Foo(objects=[])
for i in 'abc':
bar = Bar(foo=i)
obj.objects.append(bar)
obj2 = base.CinderObject.obj_from_primitive(obj.obj_to_primitive())
self.assertFalse(obj is obj2)
self.assertEqual([x.foo for x in obj],
[y.foo for y in obj2])
def test_list_changes(self):
class Foo(base.ObjectListBase, base.CinderObject):
fields = {'objects': fields.ListOfObjectsField('Bar')}
class Bar(base.CinderObject):
fields = {'foo': fields.StringField()}
obj = Foo(objects=[])
self.assertEqual(set(['objects']), obj.obj_what_changed())
obj.objects.append(Bar(foo='test'))
self.assertEqual(set(['objects']), obj.obj_what_changed())
obj.obj_reset_changes()
# This should still look dirty because the child is dirty
self.assertEqual(set(['objects']), obj.obj_what_changed())
obj.objects[0].obj_reset_changes()
# This should now look clean because the child is clean
self.assertEqual(set(), obj.obj_what_changed())
def test_initialize_objects(self):
class Foo(base.ObjectListBase, base.CinderObject):
fields = {'objects': fields.ListOfObjectsField('Bar')}
class Bar(base.CinderObject):
fields = {'foo': fields.StringField()}
obj = Foo()
self.assertEqual([], obj.objects)
self.assertEqual(set(), obj.obj_what_changed())
def test_obj_repr(self):
class Foo(base.ObjectListBase, base.CinderObject):
fields = {'objects': fields.ListOfObjectsField('Bar')}
class Bar(base.CinderObject):
fields = {'uuid': fields.StringField()}
obj = Foo(objects=[Bar(uuid='fake-uuid')])
self.assertEqual('Foo(objects=[Bar(fake-uuid)])', repr(obj))
class TestObjectSerializer(_BaseTestCase):
def test_serialize_entity_primitive(self):
ser = base.CinderObjectSerializer()
for thing in (1, 'foo', [1, 2], {'foo': 'bar'}):
self.assertEqual(thing, ser.serialize_entity(None, thing))
def test_deserialize_entity_primitive(self):
ser = base.CinderObjectSerializer()
for thing in (1, 'foo', [1, 2], {'foo': 'bar'}):
self.assertEqual(thing, ser.deserialize_entity(None, thing))
def _test_deserialize_entity_newer(self, obj_version, backported_to,
my_version='1.6'):
ser = base.CinderObjectSerializer()
ser._conductor = mock.Mock()
ser._conductor.object_backport.return_value = 'backported'
class MyTestObj(MyObj):
VERSION = my_version
obj = MyTestObj()
obj.VERSION = obj_version
primitive = obj.obj_to_primitive()
ser.deserialize_entity(self.context, primitive)
if backported_to is None:
self.assertFalse(ser._conductor.object_backport.called)
def test_deserialize_entity_newer_revision_does_not_backport_zero(self):
self._test_deserialize_entity_newer('1.6.0', None)
def test_deserialize_entity_newer_revision_does_not_backport(self):
self._test_deserialize_entity_newer('1.6.1', None)
def test_deserialize_dot_z_with_extra_stuff(self):
primitive = {'cinder_object.name': 'MyObj',
'cinder_object.namespace': 'cinder',
'cinder_object.version': '1.6.1',
'cinder_object.data': {
'foo': 1,
'unexpected_thing': 'foobar'}}
ser = base.CinderObjectSerializer()
obj = ser.deserialize_entity(self.context, primitive)
self.assertEqual(1, obj.foo)
self.assertFalse(hasattr(obj, 'unexpected_thing'))
# NOTE(danms): The serializer is where the logic lives that
# avoids backports for cases where only a .z difference in
# the received object version is detected. As a result, we
# end up with a version of what we expected, effectively the
# .0 of the object.
self.assertEqual('1.6', obj.VERSION)
def test_object_serialization(self):
ser = base.CinderObjectSerializer()
obj = MyObj()
primitive = ser.serialize_entity(self.context, obj)
self.assertIn('cinder_object.name', primitive)
obj2 = ser.deserialize_entity(self.context, primitive)
self.assertIsInstance(obj2, MyObj)
self.assertEqual(self.context, obj2._context)
def test_object_serialization_iterables(self):
ser = base.CinderObjectSerializer()
obj = MyObj()
for iterable in (list, tuple, set):
thing = iterable([obj])
primitive = ser.serialize_entity(self.context, thing)
self.assertEqual(1, len(primitive))
for item in primitive:
self.assertNotIsInstance(item, base.CinderObject)
thing2 = ser.deserialize_entity(self.context, primitive)
self.assertEqual(1, len(thing2))
for item in thing2:
self.assertIsInstance(item, MyObj)
# dict case
thing = {'key': obj}
primitive = ser.serialize_entity(self.context, thing)
self.assertEqual(1, len(primitive))
for item in primitive.itervalues():
self.assertNotIsInstance(item, base.CinderObject)
thing2 = ser.deserialize_entity(self.context, primitive)
self.assertEqual(1, len(thing2))
for item in thing2.itervalues():
self.assertIsInstance(item, MyObj)
# object-action updates dict case
thing = {'foo': obj.obj_to_primitive()}
primitive = ser.serialize_entity(self.context, thing)
self.assertEqual(thing, primitive)
thing2 = ser.deserialize_entity(self.context, thing)
self.assertIsInstance(thing2['foo'], base.CinderObject)
|
|
import asyncio
import gzip
import logging
import os
from concurrent.futures import CancelledError
from json.decoder import JSONDecodeError
from ..specs.specs import resolve_executor
from ..status import status_mgr
from ..utilities.stat_utils import STATS_DPP_KEY, STATS_OUT_DP_URL_KEY
from ..utilities.extended_json import json
from .runners import runner_config
SINK = os.path.join(os.path.dirname(__file__),
'..', 'lib', 'internal', 'sink.py')
async def enqueue_errors(step, process, queue, debug):
out = process.stderr
errors = []
while True:
try:
line = await out.readline()
except ValueError:
logging.error('Received a too long log line (>64KB), discarded')
continue
if line == b'':
break
line = line.decode('utf8').rstrip()
if len(line) != 0:
if len(errors) == 0:
if line.startswith('ERROR') or line.startswith('Traceback'):
errors.append(step['run'])
if len(errors) > 0:
errors.append(line)
if len(errors) > 1000:
errors.pop(1)
if '__flow' in step:
line = "(F) {}: {}".format(step['__flow'], line)
else:
line = "{}: {}".format(step['run'], line)
if debug:
logging.info(line)
await queue.put(line)
return errors
async def dequeue_errors(queue, out):
while True:
line = await queue.get()
if line is None:
break
out.append(line)
if len(out) > 1000:
out.pop(0)
async def collect_stats(infile):
reader = asyncio.StreamReader()
reader_protocol = asyncio.StreamReaderProtocol(reader)
transport, _ = await asyncio.get_event_loop() \
.connect_read_pipe(lambda: reader_protocol, infile)
count = 0
dp = None
stats = None
while True:
try:
line = await reader.readline()
except ValueError:
logging.exception('Too large stats object!')
break
if line == b'':
break
stats = line
if dp is None:
try:
dp = json.loads(line.decode('ascii'))
except JSONDecodeError:
break
count += 1
transport.close()
if dp is None or count == 0:
return {}
try:
stats = json.loads(stats.decode('ascii'))
except JSONDecodeError:
stats = {}
return stats
def create_process(args, cwd, wfd, rfd):
pass_fds = {rfd, wfd}
if None in pass_fds:
pass_fds.remove(None)
rfd = asyncio.subprocess.PIPE if rfd is None else rfd
wfd = asyncio.subprocess.DEVNULL if wfd is None else wfd
ret = asyncio.create_subprocess_exec(*args,
stdin=rfd,
stdout=wfd,
stderr=asyncio.subprocess.PIPE,
pass_fds=pass_fds,
cwd=cwd)
return ret
async def process_death_waiter(process):
return_code = await process.wait()
return process, return_code
def find_caches(pipeline_steps, pipeline_cwd):
if not any(step.get('cache') for step in pipeline_steps):
# If no step requires caching then bail
return pipeline_steps
for i, step in reversed(list(enumerate(pipeline_steps))):
cache_filename = os.path.join(pipeline_cwd,
'.cache',
step['_cache_hash'])
if os.path.exists(cache_filename):
try:
canary = gzip.open(cache_filename, "rt")
canary.seek(1)
canary.close()
except Exception: #noqa
continue
logging.info('Found cache for step %d: %s', i, step['run'])
pipeline_steps = pipeline_steps[i+1:]
step = {
'run': 'cache_loader',
'parameters': {
'load-from': os.path.join('.cache', step['_cache_hash'])
}
}
step['executor'] = resolve_executor(step, '.', [])
pipeline_steps.insert(0, step)
break
return pipeline_steps
async def construct_process_pipeline(pipeline_steps, pipeline_cwd, errors, debug=False):
error_collectors = []
processes = []
error_queue = asyncio.Queue()
rfd = None
error_aggregator = \
asyncio.ensure_future(dequeue_errors(error_queue, errors))
pipeline_steps.append({
'run': '(sink)',
'executor': SINK,
'_cache_hash': pipeline_steps[-1]['_cache_hash']
})
for i, step in enumerate(pipeline_steps):
new_rfd, wfd = os.pipe()
if debug:
logging.info("- %s", step['run'])
runner = runner_config.get_runner(step.get('runner'))
args = runner.get_execution_args(step, pipeline_cwd, i)
process = await create_process(args, pipeline_cwd, wfd, rfd)
process.args = args[1]
if wfd is not None:
os.close(wfd)
if rfd is not None:
os.close(rfd)
processes.append(process)
rfd = new_rfd
error_collectors.append(
asyncio.ensure_future(enqueue_errors(step, process, error_queue, debug))
)
error_collectors.append(
asyncio.ensure_future(collect_stats(os.fdopen(rfd)))
)
def wait_for_finish(_error_collectors,
_error_queue,
_error_aggregator):
async def _func(failed_index=None):
*errors, count = await asyncio.gather(*_error_collectors)
if failed_index is not None:
errors = errors[failed_index]
else:
errors = None
await _error_queue.put(None)
await _error_aggregator
return count, errors
return _func
return processes, \
wait_for_finish(error_collectors,
error_queue,
error_aggregator)
async def async_execute_pipeline(pipeline_id,
pipeline_steps,
pipeline_cwd,
trigger,
execution_id,
use_cache,
dependencies,
debug):
if debug:
logging.info("%s Async task starting", execution_id[:8])
ps = status_mgr().get(pipeline_id)
if not ps.start_execution(execution_id):
logging.info("%s START EXECUTION FAILED %s, BAILING OUT", execution_id[:8], pipeline_id)
return False, {}, []
ps.update_execution(execution_id, [])
if use_cache:
if debug:
logging.info("%s Searching for existing caches", execution_id[:8])
pipeline_steps = find_caches(pipeline_steps, pipeline_cwd)
execution_log = []
if debug:
logging.info("%s Building process chain:", execution_id[:8])
processes, stop_error_collecting = \
await construct_process_pipeline(pipeline_steps, pipeline_cwd, execution_log, debug)
processes[0].stdin.write(json.dumps(dependencies).encode('utf8') + b'\n')
processes[0].stdin.write(b'{"name": "_", "resources": []}\n')
processes[0].stdin.close()
def kill_all_processes():
for to_kill in processes:
try:
to_kill.kill()
except ProcessLookupError:
pass
success = True
pending = [asyncio.ensure_future(process_death_waiter(process))
for process in processes]
index_for_pid = dict(
(p.pid, i)
for i, p in enumerate(processes)
)
failed_index = None
while len(pending) > 0:
done = []
try:
done, pending = \
await asyncio.wait(pending,
return_when=asyncio.FIRST_COMPLETED,
timeout=10)
except CancelledError:
success = False
kill_all_processes()
for waiter in done:
process, return_code = waiter.result()
if return_code == 0:
if debug:
logging.info("%s DONE %s", execution_id[:8], process.args)
processes = [p for p in processes if p.pid != process.pid]
else:
if return_code > 0 and failed_index is None:
failed_index = index_for_pid[process.pid]
if debug:
logging.error("%s FAILED %s: %s", execution_id[:8], process.args, return_code)
success = False
kill_all_processes()
if success and not ps.update_execution(execution_id, execution_log):
logging.error("%s FAILED to update %s", execution_id[:8], pipeline_id)
success = False
kill_all_processes()
stats, error_log = await stop_error_collecting(failed_index)
if success is False:
stats = None
ps.update_execution(execution_id, execution_log, hooks=True)
ps.finish_execution(execution_id, success, stats, error_log)
logging.info("%s DONE %s %s %r", execution_id[:8], 'V' if success else 'X', pipeline_id, stats)
return success, stats, error_log
def execute_pipeline(spec,
execution_id,
trigger='manual',
use_cache=True):
debug = trigger == 'manual' or os.environ.get('DPP_DEBUG')
logging.info("%s RUNNING %s", execution_id[:8], spec.pipeline_id)
loop = asyncio.get_event_loop()
if debug:
logging.info("%s Collecting dependencies", execution_id[:8])
dependencies = {}
for dep in spec.pipeline_details.get('dependencies', []):
if 'pipeline' in dep:
dep_pipeline_id = dep['pipeline']
pipeline_execution = status_mgr().get(dep_pipeline_id).get_last_successful_execution()
if pipeline_execution is not None:
result_dp = pipeline_execution.stats.get(STATS_DPP_KEY, {}).get(STATS_OUT_DP_URL_KEY)
if result_dp is not None:
dependencies[dep_pipeline_id] = result_dp
if debug:
logging.info("%s Running async task", execution_id[:8])
pipeline_task = \
asyncio.ensure_future(async_execute_pipeline(spec.pipeline_id,
spec.pipeline_details.get('pipeline', []),
spec.path,
trigger,
execution_id,
use_cache,
dependencies,
debug))
try:
if debug:
logging.info("%s Waiting for completion", execution_id[:8])
return loop.run_until_complete(pipeline_task)
except KeyboardInterrupt:
logging.info("Caught keyboard interrupt. Cancelling tasks...")
pipeline_task.cancel()
loop.run_forever()
logging.info("Caught keyboard interrupt. DONE!")
raise KeyboardInterrupt()
def finalize():
loop = asyncio.get_event_loop()
loop.close()
|
|
# -*- coding: utf-8 -*-
"""
S3 SVG codec
@copyright: 2013-2021 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3SVG",)
import os
from io import StringIO
from gluon import *
from gluon.contenttype import contenttype
from gluon.storage import Storage
from gluon.streamer import DEFAULT_CHUNK_SIZE
from ..s3codec import S3Codec
# =============================================================================
class S3SVG(S3Codec):
"""
Simple SVG format codec
"""
# -------------------------------------------------------------------------
def __init__(self):
"""
Constructor
"""
pass
# -------------------------------------------------------------------------
def extractResource(self, resource, list_fields):
"""
Extract the items from the resource
@param resource: the resource
@param list_fields: fields to include in list views
"""
title = self.crud_string(resource.tablename, "title_list")
get_vars = Storage(current.request.get_vars)
get_vars["iColumns"] = len(list_fields)
query, orderby, left = resource.datatable_filter(list_fields, get_vars)
resource.add_filter(query)
data = resource.select(list_fields,
left=left,
limit=None,
orderby=orderby,
represent=True,
show_links=False)
rfields = data["rfields"]
types = []
colnames = []
heading = {}
for rfield in rfields:
if rfield.show:
colnames.append(rfield.colname)
heading[rfield.colname] = rfield.label
if rfield.virtual:
types.append("string")
else:
types.append(rfield.ftype)
items = data["rows"]
return (title, types, colnames, heading, items)
# -------------------------------------------------------------------------
def encode(self, resource, **attr):
"""
Export data as a Scalable Vector Graphic
@param resource: the source of the data that is to be encoded
as an SVG. This may be:
resource: the resource
item: a list of pre-fetched values
the headings are in the first row
the data types are in the second row
@param attr: dictionary of parameters:
* title: The export filename
* list_fields: Fields to include in list views
"""
# Get the attributes
#list_fields = attr.get("list_fields")
#if not list_fields:
# list_fields = resource.list_fields()
# @ToDo: PostGIS can extract SVG from DB (like GeoJSON)
# http://postgis.refractions.net/documentation/manual-1.4/ST_AsSVG.html
if resource.prefix == "gis" and resource.name == "location":
#list_fields.append("wkt")
list_fields = ["wkt"]
#elif "location_id$wkt" not in list_fields:
else:
#list_fields.append("location_id$wkt")
list_fields = ["location_id$wkt"]
# Clear the WKT represent
current.s3db.gis_location.wkt.represent = None
# Extract the data from the resource
(_title, types, lfields, headers, items) = self.extractResource(resource,
list_fields)
# @ToDo: Support multiple records
wkt = items[0]["gis_location.wkt"]
if not wkt:
current.log.error("No Geometry!")
# Convert to SVG
title = attr.get("title", resource._ids[0])
filename = "%s.svg" % title
filepath = self.write_file(filename, wkt, **attr)
# Response headers
disposition = "attachment; filename=\"%s\"" % filename
response = current.response
response.headers["Content-Type"] = contenttype(".svg")
response.headers["Content-disposition"] = disposition
stream = open(filepath)
return response.stream(stream, chunk_size=DEFAULT_CHUNK_SIZE,
request=current.request)
# -------------------------------------------------------------------------
@staticmethod
def write_file(filename, wkt, **attr):
from xml.etree import ElementTree as et
# Create an SVG XML element
# @ToDo: Allow customisation of height/width
iheight = 74
height = str(iheight)
iwidth = 74
width = str(iwidth)
doc = et.Element("svg", width=width, height=height, version="1.1", xmlns="http://www.w3.org/2000/svg")
# Convert WKT
from shapely.wkt import loads as wkt_loads
try:
# Enable C-based speedups available from 1.2.10+
from shapely import speedups
speedups.enable()
except:
current.log.info("S3GIS",
"Upgrade Shapely for Performance enhancements")
shape = wkt_loads(wkt)
geom_type = shape.geom_type
if geom_type not in ("MultiPolygon", "Polygon"):
current.log.error("Unsupported Geometry", geom_type)
return
# Scale Points & invert Y axis
from shapely import affinity
bounds = shape.bounds # (minx, miny, maxx, maxy)
swidth = abs(bounds[2] - bounds[0])
sheight = abs(bounds[3] - bounds[1])
width_multiplier = iwidth / swidth
height_multiplier = iheight / sheight
multiplier = min(width_multiplier, height_multiplier) * 0.9 # Padding
shape = affinity.scale(shape, xfact=multiplier, yfact=-multiplier, origin="centroid")
# Center Shape
centroid = shape.centroid
xoff = (iwidth / 2) - centroid.x
yoff = (iheight / 2) - centroid.y
shape = affinity.translate(shape, xoff=xoff, yoff=yoff)
if geom_type == "MultiPolygon":
polygons = shape.geoms
elif geom_type == "Polygon":
polygons = [shape]
# @ToDo:
#elif geom_type == "LineString":
# _points = shape
#elif geom_type == "Point":
# _points = [shape]
points = []
pappend = points.append
for polygon in polygons:
_points = polygon.exterior.coords
for point in _points:
pappend("%s,%s" % (point[0], point[1]))
points = " ".join(points)
# Wrap in Square for Icon
# @ToDo: Anti-Aliased Rounded Corners
# @ToDo: Make optional
fill = "rgb(167, 192, 210)"
stroke = "rgb(114, 129, 145)"
et.SubElement(doc, "rect", width=width, height=height, fill=fill, stroke=stroke)
# @ToDo: Allow customisation of options
fill = "rgb(225, 225, 225)"
stroke = "rgb(165, 165, 165)"
et.SubElement(doc, "polygon", points=points, fill=fill, stroke=stroke)
# @ToDo: Add Attributes from list_fields
# Write out File
path = os.path.join(current.request.folder, "static", "cache", "svg")
if not os.path.exists(path):
os.makedirs(path)
filepath = os.path.join(path, filename)
with open(filepath, "w") as f:
# ElementTree 1.2 doesn't write the SVG file header errata, so do that manually
f.write("<?xml version=\"1.0\" standalone=\"no\"?>\n")
f.write("<!DOCTYPE svg PUBLIC \"-//W3C//DTD SVG 1.1//EN\"\n")
f.write("\"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd\">\n")
f.write(et.tostring(doc))
return filepath
# -------------------------------------------------------------------------
def decode(self, resource, source, **attr):
"""
Import data from a Scalable Vector Graphic
@param resource: the S3Resource
@param source: the source
@return: an S3XML ElementTree
@ToDo: Handle encodings within SVG other than UTF-8
"""
# @ToDo: Complete this!
raise NotImplementedError
#return root
# End =========================================================================
|
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import datetime
import os
import sys
import getpass
import bit.config as config
import bit.git as git
import bit._owncloud as owncloud
def list_upload(base_destination,list_of_files):
upload_dic={}
subfolders=[base_destination]
check=base_destination.split("/")
for i in range(len(check)):
c="/".join(check[:i-len(check)])
subfolders.append(c)
for f in list_of_files:
full=os.path.abspath(f)
if os.path.isdir(full):
subfol=base_destination+"/"+os.path.basename(full)
subfolders.append(subfol)
for root, directories, filenames in os.walk(full):
bad_dirs=[]
for directory in directories:
if os.path.basename(directory)[0] != ".":
subdir=os.path.join(root, directory).split(full)[-1]
subdir=subfol+subdir
subfolders.append(subdir)
else:
bad_dirs.append(os.path.basename(directory))
for filename in filenames:
if not any(x in filename for x in bad_dirs):
subfile=os.path.join(root,filename)
if os.path.isfile(subfile):
upload_dic[subfile]=subfol+subfile.split(full)[-1]
elif os.path.isfile(full):
upload_dic[full]=base_destination+"/"+os.path.basename(full)
subfolders=list(set(subfolders))
subfolders=[ xx for xx in subfolders if len(xx) > 0 ]
subfolders.sort()
return upload_dic, subfolders
def get_ownCloud_links(link_info,http):
link_info=str(link_info)
store=link_info.split("path=")[1].split(",")[0]
store=store.split("/")
store="%2F".join(store)
link=link_info.split("url=")[1].split(",")[0]
print("\nYour link:\n%s" %http+"/index.php/apps/files?dir="+store)
print("Public link:\n%s\n" %link)
return http+"/index.php/apps/files?dir="+store
def get_owncloud_base_folder(configdic,project_name,getfolder=None,pick_a_date=None,create_folder=None,subfolder=None):
if getfolder:
if not pick_a_date:
print("--getfolder implies --pick_a_date.\nPlease use -d in \
combination with -g.\nThank you!")
sys.exit()
else:
base_folder=configdic["owncloud_download_folder"]
elif create_folder:
base_folder=configdic["owncloud_download_folder"]
else:
base_folder=configdic["owncloud_upload_folder"]
if pick_a_date == None:
d = str(datetime.date.today())
else:
d = str(pick_a_date)
if subfolder:
d = d+"/"+str(subfolder)
base_destination=base_folder+"/"+project_name+"/"+d
return base_destination
def ownCloud_upload(input_files=None,message=None,gitssh=None,days_to_share=None,scripts=None,issue=None, subfolder=None, pick_a_date=None):
if type(message) == list:
message=[ str(xx) for xx in message ]
message=" ".join(message)
else:
message=str(message)
configdic=config.read_bitconfig()
for r in config.requirements:
if not gitssh:
if r not in ["user_group" ]:
while configdic[r] == None:
configdic=config.check_reqs([r],configdic,config_file=None, \
gitssh=None)
else:
if r not in [ "github_user", "github_pass","user_group" ]:
while configdic[r] == None:
configdic=config.check_reqs([r],configdic,config_file=None, \
gitssh=gitssh)
local_path=os.path.abspath(configdic["local_path"])
# check if files all come from the same project folder
size_local=len(local_path.split("/"))
parent_folder=[]
check_project=[]
for i in input_files:
f=os.path.abspath(i)
parent_folder.append(f.split("/")[size_local])
check_project.append(f.split("/")[size_local+1])
check_project=list(set(check_project))
if len(check_project) > 1:
print("Found more than one project:\n")
for p in check_project:
print(p)
sys.stdout.flush()
sys.exit(0)
else:
project_name=check_project[0]
parent_folder=parent_folder[0]
target_project=parent_folder+"/"+project_name
base_destination=get_owncloud_base_folder(configdic,target_project, subfolder=subfolder, pick_a_date=pick_a_date)
upload_dic, subfolders=list_upload(base_destination,input_files)
# login to owncloud
try:
oc=owncloud.Client(configdic["owncloud_address"])
oc.login(configdic["owncloud_user"],configdic["owncloud_pass"])
except:
print("Could not login to ownCloud.\nPlease make sure you are giving \
the right address to your owncloud and using the right login credentials.")
sys.exit(0)
# create required subfolders in ownCloud
for fold in subfolders:
try:
oc.file_info(fold)
except:
oc.mkdir(fold)
# Upload files
if len(upload_dic)>1:
print("Uploading %s files.." %str(len(upload_dic)))
sys.stdout.flush()
else:
print("Uploading %s file.." %str(len(upload_dic)))
sys.stdout.flush()
skipped_files=[]
for f in upload_dic:
file_handle = open(f, 'r', 8192)
file_handle.seek(0, os.SEEK_END)
size = file_handle.tell()
file_handle.seek(0)
if size == 0:
skipped_files.append(os.path.basename(f))
print("\t%s is empty. Skipping .. " %str(f))
sys.stdout.flush()
continue
if size > 1879048192:
print("\t%s\t(chunked)" %str(upload_dic[f]))
sys.stdout.flush()
oc.put_file(upload_dic[f],f)
else:
print("\t%s" %str(upload_dic[f]))
sys.stdout.flush()
oc.put_file(upload_dic[f],f,chunked=False)
print("Finished uploading.")
# Time stamp for expiration date
tshare = datetime.date.today()
tshare = tshare + datetime.timedelta(days=int(days_to_share))
tshare = time.mktime(tshare.timetuple())
link_info = oc.share_file_with_link(base_destination,expiration=tshare)
private_link=get_ownCloud_links(link_info,configdic["owncloud_address"])
oc.logout()
# Go to wiki folder and make a git sync
print("Logging changes..")
sys.stdout.flush()
user_name=getpass.getuser()
wikidir=local_path+"/"+target_project+"/wiki."+user_name
scriptsdir=local_path+"/"+target_project+"/scripts."+user_name
if os.path.isdir(wikidir):
logdir=wikidir
log_project=project_name+".wiki"
elif os.path.isdir(scriptsdir):
logdir=scriptsdir
log_project=project_name
else:
print("Could not find wiki."+user_name+" nor scripts."+user_name)
sys.exit(1)
os.chdir(logdir)
files_to_add=os.listdir(logdir)
git.git_sync(files_to_add,"bit sync",configdic["github_address"],\
configdic["github_organization"],log_project,\
github_user=configdic["github_user"],github_pass=configdic["github_pass"],\
gitssh=gitssh)
# Write log file
if len(skipped_files) > 0:
skipped_files=", ".join(skipped_files)
skipped_files="\n\n(skipped: %s)" %skipped_files
else:
skipped_files=""
logfile="uploads.md"
logtext="\n\n##### ["+base_destination.split("/")[3]+"\t::\t"+user_name+"]("+private_link+") : "\
+str(" ".join(message))+"\n"+\
str(datetime.datetime.now()).split(".")[0]+", "+str(", ".join(input_files))\
+skipped_files
log=open(logfile,"a")
log.write(logtext)
log.close()
# push the log
git.git_add(["uploads.md"])
git.git_commit(message)
git.git_push(configdic["github_address"],configdic["github_organization"],\
log_project,github_user=configdic["github_user"],\
github_pass=configdic["github_pass"],gitssh=gitssh)
if scripts:
print("Syncronizing your code..")
sys.stdout.flush()
os.chdir(local_path+"/"+target_project+"/scripts."+user_name)
#files_to_add=os.listdir(local_path+"/"+target_project+"/scripts."+user_name)
#git.git_sync(files_to_add,message,configdic["github_address"],\
git.git_sync(["-A"],message,configdic["github_address"],\
configdic["github_organization"],project_name,\
github_user=configdic["github_user"],\
github_pass=configdic["github_pass"],gitssh=gitssh)
if issue:
for r in [ "github_user", "github_pass"]:
while configdic[r] == None:
configdic=config.check_reqs([r],configdic,config_file=None, \
gitssh=None)
publink=str(link_info).split("url=")[1].split(",")[0]
issueMSG="Public link: %s; Private link: %s; Commit message: %s" \
%(publink, private_link,message)
git.git_write_comment(issueMSG,config.get_github_api(configdic["github_address"]),\
configdic["github_organization"],project_name,str(issue),\
github_user=configdic["github_user"],github_pass=configdic["github_pass"])
downloadreqs=["owncloud_address","owncloud_upload_folder",\
"owncloud_download_folder","owncloud_user",\
"owncloud_pass","local_path"]
def ownCloud_download(gitssh=None,pick_a_date=None):
configdic=config.read_bitconfig()
for r in downloadreqs:
while configdic[r] == None:
configdic=config.check_reqs([r],configdic,config_file=None, \
gitssh=gitssh)
local_path=os.path.abspath(configdic["local_path"])
size_local=len(local_path.split("/"))
f=os.path.abspath(str(pick_a_date))
parent_folder=f.split("/")[size_local]
project_name=f.split("/")[size_local+1]
target_project=parent_folder+"/"+project_name
base_destination=get_owncloud_base_folder(configdic,target_project,getfolder=True, pick_a_date=pick_a_date)
# login to owncloud
try:
oc=owncloud.Client(configdic["owncloud_address"] )
oc.login(configdic["owncloud_user"],configdic["owncloud_pass"])
except:
print("Could not login to ownCloud.\nPlease make sure you are giving \
the right address to your owncloud and using the right login credentials.")
sys.exit(0)
oc.get_directory_as_zip(base_destination, pick_a_date+".zip")
oc.logout()
print("Downloaded %s.zip" %pick_a_date)
sys.stdout.flush()
def ownCloud_create_folder(gitssh=None,pick_a_date=None,days_to_share=None):
configdic=config.read_bitconfig()
for r in downloadreqs:
while configdic[r] == None:
configdic=config.check_reqs([r],configdic,config_file=None, \
gitssh=gitssh)
local_path=os.path.abspath(configdic["local_path"])
size_local=len(local_path.split("/"))
f=os.path.abspath(str(pick_a_date))
parent_folder=f.split("/")[size_local]
project_name=f.split("/")[size_local+1]
target_project=parent_folder+"/"+project_name
base_destination=get_owncloud_base_folder(configdic,target_project,create_folder=True, pick_a_date=pick_a_date)
# login to owncloud
try:
oc=owncloud.Client(configdic["owncloud_address"] )
oc.login(configdic["owncloud_user"],configdic["owncloud_pass"])
except:
print("Could not login to ownCloud.\nPlease make sure you are giving \
the right address to your owncloud and using the right login credentials.")
sys.exit(0)
check=base_destination.split("/")
print(check)
for i in range(len(check)+1):
c="/".join(check[:i])
print(c)
try:
oc.file_info(c)
except:
oc.mkdir(c)
# Time stamp for expiration date
tshare = datetime.date.today()
tshare = tshare + datetime.timedelta(days=int(days_to_share))
tshare = time.mktime(tshare.timetuple())
link_info = oc.share_file_with_link(base_destination,expiration=tshare,public_upload=True)
private_link=get_ownCloud_links(link_info,configdic["owncloud_address"])
oc.logout()
|
|
from __future__ import unicode_literals
from collections import defaultdict
from django.core import checks
from django.core.exceptions import FieldDoesNotExist, ObjectDoesNotExist
from django.db import connection
from django.db import models, router, transaction, DEFAULT_DB_ALIAS
from django.db.models import signals, DO_NOTHING
from django.db.models.base import ModelBase
from django.db.models.fields.related import ForeignObject, ForeignObjectRel
from django.db.models.query_utils import PathInfo
from django.contrib.contenttypes.models import ContentType
from django.utils.encoding import smart_text, python_2_unicode_compatible
@python_2_unicode_compatible
class GenericForeignKey(object):
"""
Provides a generic relation to any object through content-type/object-id
fields.
"""
# Field flags
auto_created = False
concrete = False
editable = False
hidden = False
is_relation = True
many_to_many = False
many_to_one = False
one_to_many = True
one_to_one = False
related_model = None
def __init__(self, ct_field="content_type", fk_field="object_id", for_concrete_model=True):
self.ct_field = ct_field
self.fk_field = fk_field
self.for_concrete_model = for_concrete_model
self.editable = False
self.rel = None
self.column = None
def contribute_to_class(self, cls, name, **kwargs):
self.name = name
self.model = cls
self.cache_attr = "_%s_cache" % name
cls._meta.add_field(self, virtual=True)
# Only run pre-initialization field assignment on non-abstract models
if not cls._meta.abstract:
signals.pre_init.connect(self.instance_pre_init, sender=cls)
setattr(cls, name, self)
def __str__(self):
model = self.model
app = model._meta.app_label
return '%s.%s.%s' % (app, model._meta.object_name, self.name)
def check(self, **kwargs):
errors = []
errors.extend(self._check_field_name())
errors.extend(self._check_object_id_field())
errors.extend(self._check_content_type_field())
return errors
def _check_field_name(self):
if self.name.endswith("_"):
return [
checks.Error(
'Field names must not end with an underscore.',
hint=None,
obj=self,
id='fields.E001',
)
]
else:
return []
def _check_object_id_field(self):
try:
self.model._meta.get_field(self.fk_field)
except FieldDoesNotExist:
return [
checks.Error(
"The GenericForeignKey object ID references the non-existent field '%s'." % self.fk_field,
hint=None,
obj=self,
id='contenttypes.E001',
)
]
else:
return []
def _check_content_type_field(self):
""" Check if field named `field_name` in model `model` exists and is
valid content_type field (is a ForeignKey to ContentType). """
try:
field = self.model._meta.get_field(self.ct_field)
except FieldDoesNotExist:
return [
checks.Error(
"The GenericForeignKey content type references the non-existent field '%s.%s'." % (
self.model._meta.object_name, self.ct_field
),
hint=None,
obj=self,
id='contenttypes.E002',
)
]
else:
if not isinstance(field, models.ForeignKey):
return [
checks.Error(
"'%s.%s' is not a ForeignKey." % (
self.model._meta.object_name, self.ct_field
),
hint=(
"GenericForeignKeys must use a ForeignKey to "
"'contenttypes.ContentType' as the 'content_type' field."
),
obj=self,
id='contenttypes.E003',
)
]
elif field.rel.to != ContentType:
return [
checks.Error(
"'%s.%s' is not a ForeignKey to 'contenttypes.ContentType'." % (
self.model._meta.object_name, self.ct_field
),
hint=(
"GenericForeignKeys must use a ForeignKey to "
"'contenttypes.ContentType' as the 'content_type' field."
),
obj=self,
id='contenttypes.E004',
)
]
else:
return []
def instance_pre_init(self, signal, sender, args, kwargs, **_kwargs):
"""
Handles initializing an object with the generic FK instead of
content-type/object-id fields.
"""
if self.name in kwargs:
value = kwargs.pop(self.name)
if value is not None:
kwargs[self.ct_field] = self.get_content_type(obj=value)
kwargs[self.fk_field] = value._get_pk_val()
else:
kwargs[self.ct_field] = None
kwargs[self.fk_field] = None
def get_content_type(self, obj=None, id=None, using=None):
if obj is not None:
return ContentType.objects.db_manager(obj._state.db).get_for_model(
obj, for_concrete_model=self.for_concrete_model)
elif id is not None:
return ContentType.objects.db_manager(using).get_for_id(id)
else:
# This should never happen. I love comments like this, don't you?
raise Exception("Impossible arguments to GFK.get_content_type!")
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is not None:
raise ValueError("Custom queryset can't be used for this lookup.")
# For efficiency, group the instances by content type and then do one
# query per model
fk_dict = defaultdict(set)
# We need one instance for each group in order to get the right db:
instance_dict = {}
ct_attname = self.model._meta.get_field(self.ct_field).get_attname()
for instance in instances:
# We avoid looking for values if either ct_id or fkey value is None
ct_id = getattr(instance, ct_attname)
if ct_id is not None:
fk_val = getattr(instance, self.fk_field)
if fk_val is not None:
fk_dict[ct_id].add(fk_val)
instance_dict[ct_id] = instance
ret_val = []
for ct_id, fkeys in fk_dict.items():
instance = instance_dict[ct_id]
ct = self.get_content_type(id=ct_id, using=instance._state.db)
ret_val.extend(ct.get_all_objects_for_this_type(pk__in=fkeys))
# For doing the join in Python, we have to match both the FK val and the
# content type, so we use a callable that returns a (fk, class) pair.
def gfk_key(obj):
ct_id = getattr(obj, ct_attname)
if ct_id is None:
return None
else:
model = self.get_content_type(id=ct_id,
using=obj._state.db).model_class()
return (model._meta.pk.get_prep_value(getattr(obj, self.fk_field)),
model)
return (ret_val,
lambda obj: (obj._get_pk_val(), obj.__class__),
gfk_key,
True,
self.cache_attr)
def is_cached(self, instance):
return hasattr(instance, self.cache_attr)
def __get__(self, instance, instance_type=None):
if instance is None:
return self
try:
return getattr(instance, self.cache_attr)
except AttributeError:
rel_obj = None
# Make sure to use ContentType.objects.get_for_id() to ensure that
# lookups are cached (see ticket #5570). This takes more code than
# the naive ``getattr(instance, self.ct_field)``, but has better
# performance when dealing with GFKs in loops and such.
f = self.model._meta.get_field(self.ct_field)
ct_id = getattr(instance, f.get_attname(), None)
if ct_id is not None:
ct = self.get_content_type(id=ct_id, using=instance._state.db)
try:
rel_obj = ct.get_object_for_this_type(pk=getattr(instance, self.fk_field))
except ObjectDoesNotExist:
pass
setattr(instance, self.cache_attr, rel_obj)
return rel_obj
def __set__(self, instance, value):
ct = None
fk = None
if value is not None:
ct = self.get_content_type(obj=value)
fk = value._get_pk_val()
if fk is None:
raise ValueError(
'Cannot assign "%r": "%s" instance isn\'t saved in the database.' %
(value, value._meta.object_name)
)
setattr(instance, self.ct_field, ct)
setattr(instance, self.fk_field, fk)
setattr(instance, self.cache_attr, value)
class GenericRelation(ForeignObject):
"""Provides an accessor to generic related objects (e.g. comments)"""
# Field flags
auto_created = False
many_to_many = False
many_to_one = True
one_to_many = False
one_to_one = False
def __init__(self, to, **kwargs):
kwargs['verbose_name'] = kwargs.get('verbose_name', None)
kwargs['rel'] = GenericRel(
self, to,
related_query_name=kwargs.pop('related_query_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
)
# Override content-type/object-id field names on the related class
self.object_id_field_name = kwargs.pop("object_id_field", "object_id")
self.content_type_field_name = kwargs.pop("content_type_field", "content_type")
self.for_concrete_model = kwargs.pop("for_concrete_model", True)
kwargs['blank'] = True
kwargs['editable'] = False
kwargs['serialize'] = False
# This construct is somewhat of an abuse of ForeignObject. This field
# represents a relation from pk to object_id field. But, this relation
# isn't direct, the join is generated reverse along foreign key. So,
# the from_field is object_id field, to_field is pk because of the
# reverse join.
super(GenericRelation, self).__init__(
to, to_fields=[],
from_fields=[self.object_id_field_name], **kwargs)
def check(self, **kwargs):
errors = super(GenericRelation, self).check(**kwargs)
errors.extend(self._check_generic_foreign_key_existence())
return errors
def _check_generic_foreign_key_existence(self):
target = self.rel.to
if isinstance(target, ModelBase):
# Using `vars` is very ugly approach, but there is no better one,
# because GenericForeignKeys are not considered as fields and,
# therefore, are not included in `target._meta.local_fields`.
fields = target._meta.virtual_fields
if any(isinstance(field, GenericForeignKey) and
field.ct_field == self.content_type_field_name and
field.fk_field == self.object_id_field_name
for field in fields):
return []
else:
return [
checks.Error(
("The GenericRelation defines a relation with the model "
"'%s.%s', but that model does not have a GenericForeignKey.") % (
target._meta.app_label, target._meta.object_name
),
hint=None,
obj=self,
id='contenttypes.E004',
)
]
else:
return []
def resolve_related_fields(self):
self.to_fields = [self.model._meta.pk.name]
return [(self.rel.to._meta.get_field(self.object_id_field_name), self.model._meta.pk)]
def get_path_info(self):
opts = self.rel.to._meta
target = opts.pk
return [PathInfo(self.model._meta, opts, (target,), self.rel, True, False)]
def get_reverse_path_info(self):
opts = self.model._meta
from_opts = self.rel.to._meta
return [PathInfo(from_opts, opts, (opts.pk,), self, not self.unique, False)]
def get_choices_default(self):
return super(GenericRelation, self).get_choices(include_blank=False)
def value_to_string(self, obj):
qs = getattr(obj, self.name).all()
return smart_text([instance._get_pk_val() for instance in qs])
def contribute_to_class(self, cls, name, **kwargs):
kwargs['virtual_only'] = True
super(GenericRelation, self).contribute_to_class(cls, name, **kwargs)
# Save a reference to which model this class is on for future use
self.model = cls
# Add the descriptor for the relation
setattr(cls, self.name, ReverseGenericRelatedObjectsDescriptor(self, self.for_concrete_model))
def set_attributes_from_rel(self):
pass
def get_internal_type(self):
return "ManyToManyField"
def get_content_type(self):
"""
Returns the content type associated with this field's model.
"""
return ContentType.objects.get_for_model(self.model,
for_concrete_model=self.for_concrete_model)
def get_extra_restriction(self, where_class, alias, remote_alias):
field = self.rel.to._meta.get_field(self.content_type_field_name)
contenttype_pk = self.get_content_type().pk
cond = where_class()
lookup = field.get_lookup('exact')(field.get_col(remote_alias), contenttype_pk)
cond.add(lookup, 'AND')
return cond
def bulk_related_objects(self, objs, using=DEFAULT_DB_ALIAS):
"""
Return all objects related to ``objs`` via this ``GenericRelation``.
"""
return self.rel.to._base_manager.db_manager(using).filter(**{
"%s__pk" % self.content_type_field_name: ContentType.objects.db_manager(using).get_for_model(
self.model, for_concrete_model=self.for_concrete_model).pk,
"%s__in" % self.object_id_field_name: [obj.pk for obj in objs]
})
class ReverseGenericRelatedObjectsDescriptor(object):
"""
This class provides the functionality that makes the related-object
managers available as attributes on a model class, for fields that have
multiple "remote" values and have a GenericRelation defined in their model
(rather than having another model pointed *at* them). In the example
"article.publications", the publications attribute is a
ReverseGenericRelatedObjectsDescriptor instance.
"""
def __init__(self, field, for_concrete_model=True):
self.field = field
self.for_concrete_model = for_concrete_model
def __get__(self, instance, instance_type=None):
if instance is None:
return self
# Dynamically create a class that subclasses the related model's
# default manager.
rel_model = self.field.rel.to
superclass = rel_model._default_manager.__class__
RelatedManager = create_generic_related_manager(superclass)
qn = connection.ops.quote_name
content_type = ContentType.objects.db_manager(instance._state.db).get_for_model(
instance, for_concrete_model=self.for_concrete_model)
join_cols = self.field.get_joining_columns(reverse_join=True)[0]
manager = RelatedManager(
model=rel_model,
instance=instance,
source_col_name=qn(join_cols[0]),
target_col_name=qn(join_cols[1]),
content_type=content_type,
content_type_field_name=self.field.content_type_field_name,
object_id_field_name=self.field.object_id_field_name,
prefetch_cache_name=self.field.attname,
)
return manager
def __set__(self, instance, value):
# Force evaluation of `value` in case it's a queryset whose
# value could be affected by `manager.clear()`. Refs #19816.
value = tuple(value)
manager = self.__get__(instance)
db = router.db_for_write(manager.model, instance=manager.instance)
with transaction.atomic(using=db, savepoint=False):
manager.clear()
for obj in value:
manager.add(obj)
def create_generic_related_manager(superclass):
"""
Factory function for a manager that subclasses 'superclass' (which is a
Manager) and adds behavior for generic related objects.
"""
class GenericRelatedObjectManager(superclass):
def __init__(self, model=None, instance=None, symmetrical=None,
source_col_name=None, target_col_name=None, content_type=None,
content_type_field_name=None, object_id_field_name=None,
prefetch_cache_name=None):
super(GenericRelatedObjectManager, self).__init__()
self.model = model
self.content_type = content_type
self.symmetrical = symmetrical
self.instance = instance
self.source_col_name = source_col_name
self.target_col_name = target_col_name
self.content_type_field_name = content_type_field_name
self.object_id_field_name = object_id_field_name
self.prefetch_cache_name = prefetch_cache_name
self.pk_val = self.instance._get_pk_val()
self.core_filters = {
'%s__pk' % content_type_field_name: content_type.id,
'%s' % object_id_field_name: instance._get_pk_val(),
}
def __call__(self, **kwargs):
# We use **kwargs rather than a kwarg argument to enforce the
# `manager='manager_name'` syntax.
manager = getattr(self.model, kwargs.pop('manager'))
manager_class = create_generic_related_manager(manager.__class__)
return manager_class(
model=self.model,
instance=self.instance,
symmetrical=self.symmetrical,
source_col_name=self.source_col_name,
target_col_name=self.target_col_name,
content_type=self.content_type,
content_type_field_name=self.content_type_field_name,
object_id_field_name=self.object_id_field_name,
prefetch_cache_name=self.prefetch_cache_name,
)
do_not_call_in_templates = True
def __str__(self):
return repr(self)
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.prefetch_cache_name]
except (AttributeError, KeyError):
db = self._db or router.db_for_read(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).get_queryset().using(db).filter(**self.core_filters)
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = super(GenericRelatedObjectManager, self).get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
query = {
'%s__pk' % self.content_type_field_name: self.content_type.id,
'%s__in' % self.object_id_field_name: set(obj._get_pk_val() for obj in instances)
}
# We (possibly) need to convert object IDs to the type of the
# instances' PK in order to match up instances:
object_id_converter = instances[0]._meta.pk.to_python
return (queryset.filter(**query),
lambda relobj: object_id_converter(getattr(relobj, self.object_id_field_name)),
lambda obj: obj._get_pk_val(),
False,
self.prefetch_cache_name)
def add(self, *objs):
db = router.db_for_write(self.model, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
for obj in objs:
if not isinstance(obj, self.model):
raise TypeError("'%s' instance expected" % self.model._meta.object_name)
setattr(obj, self.content_type_field_name, self.content_type)
setattr(obj, self.object_id_field_name, self.pk_val)
obj.save()
add.alters_data = True
def remove(self, *objs, **kwargs):
if not objs:
return
bulk = kwargs.pop('bulk', True)
self._clear(self.filter(pk__in=[o.pk for o in objs]), bulk)
remove.alters_data = True
def clear(self, **kwargs):
bulk = kwargs.pop('bulk', True)
self._clear(self, bulk)
clear.alters_data = True
def _clear(self, queryset, bulk):
db = router.db_for_write(self.model, instance=self.instance)
queryset = queryset.using(db)
if bulk:
# `QuerySet.delete()` creates its own atomic block which
# contains the `pre_delete` and `post_delete` signal handlers.
queryset.delete()
else:
with transaction.atomic(using=db, savepoint=False):
for obj in queryset:
obj.delete()
_clear.alters_data = True
def create(self, **kwargs):
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
db = router.db_for_write(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).using(db).create(**kwargs)
create.alters_data = True
def get_or_create(self, **kwargs):
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
db = router.db_for_write(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).using(db).get_or_create(**kwargs)
get_or_create.alters_data = True
def update_or_create(self, **kwargs):
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
db = router.db_for_write(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).using(db).update_or_create(**kwargs)
update_or_create.alters_data = True
return GenericRelatedObjectManager
class GenericRel(ForeignObjectRel):
def __init__(self, field, to, related_name=None, limit_choices_to=None, related_query_name=None):
super(GenericRel, self).__init__(field=field, to=to, related_name=related_query_name or '+',
limit_choices_to=limit_choices_to, on_delete=DO_NOTHING,
related_query_name=related_query_name)
|
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: ui_field_outer_class.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='ui_field_outer_class.proto',
package='UiFieldOuterClass',
syntax='proto2',
serialized_pb=_b('\n\x1aui_field_outer_class.proto\x12\x11UiFieldOuterClass\"1\n\x0cUiFieldValue\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x13\n\x0bstringValue\x18\x02 \x01(\t\"\xe0\x02\n\x07UiField\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x12\n\nisOptional\x18\x03 \x01(\x08\x12\r\n\x05label\x18\x04 \x01(\t\x12\x37\n\ttextField\x18\x06 \x01(\x0b\x32$.UiFieldOuterClass.UiField.TextField\x12\x12\n\nisDisabled\x18\x0b \x01(\x08\x1a\xd6\x01\n\tTextField\x12\x11\n\tmaxLength\x18\x02 \x01(\x05\x12\x16\n\x0ekeyboardLayout\x18\x04 \x01(\x05\x12\x43\n\nvalidation\x18\x05 \x03(\x0b\x32/.UiFieldOuterClass.UiField.TextField.Validation\x12\x14\n\x0cinitialValue\x18\x06 \x01(\t\x12\x10\n\x08isMasked\x18\x08 \x01(\x08\x1a\x31\n\nValidation\x12\r\n\x05regex\x18\x01 \x01(\t\x12\x14\n\x0c\x65rrorMessage\x18\x02 \x01(\tBW\nBcom.google.commerce.payments.orchestration.proto.ui.common.genericB\x11UiFieldOuterClass')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_UIFIELDVALUE = _descriptor.Descriptor(
name='UiFieldValue',
full_name='UiFieldOuterClass.UiFieldValue',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='UiFieldOuterClass.UiFieldValue.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='stringValue', full_name='UiFieldOuterClass.UiFieldValue.stringValue', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=49,
serialized_end=98,
)
_UIFIELD_TEXTFIELD_VALIDATION = _descriptor.Descriptor(
name='Validation',
full_name='UiFieldOuterClass.UiField.TextField.Validation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='regex', full_name='UiFieldOuterClass.UiField.TextField.Validation.regex', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='errorMessage', full_name='UiFieldOuterClass.UiField.TextField.Validation.errorMessage', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=404,
serialized_end=453,
)
_UIFIELD_TEXTFIELD = _descriptor.Descriptor(
name='TextField',
full_name='UiFieldOuterClass.UiField.TextField',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='maxLength', full_name='UiFieldOuterClass.UiField.TextField.maxLength', index=0,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='keyboardLayout', full_name='UiFieldOuterClass.UiField.TextField.keyboardLayout', index=1,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='validation', full_name='UiFieldOuterClass.UiField.TextField.validation', index=2,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='initialValue', full_name='UiFieldOuterClass.UiField.TextField.initialValue', index=3,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='isMasked', full_name='UiFieldOuterClass.UiField.TextField.isMasked', index=4,
number=8, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_UIFIELD_TEXTFIELD_VALIDATION, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=239,
serialized_end=453,
)
_UIFIELD = _descriptor.Descriptor(
name='UiField',
full_name='UiFieldOuterClass.UiField',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='UiFieldOuterClass.UiField.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='isOptional', full_name='UiFieldOuterClass.UiField.isOptional', index=1,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='label', full_name='UiFieldOuterClass.UiField.label', index=2,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='textField', full_name='UiFieldOuterClass.UiField.textField', index=3,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='isDisabled', full_name='UiFieldOuterClass.UiField.isDisabled', index=4,
number=11, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_UIFIELD_TEXTFIELD, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=101,
serialized_end=453,
)
_UIFIELD_TEXTFIELD_VALIDATION.containing_type = _UIFIELD_TEXTFIELD
_UIFIELD_TEXTFIELD.fields_by_name['validation'].message_type = _UIFIELD_TEXTFIELD_VALIDATION
_UIFIELD_TEXTFIELD.containing_type = _UIFIELD
_UIFIELD.fields_by_name['textField'].message_type = _UIFIELD_TEXTFIELD
DESCRIPTOR.message_types_by_name['UiFieldValue'] = _UIFIELDVALUE
DESCRIPTOR.message_types_by_name['UiField'] = _UIFIELD
UiFieldValue = _reflection.GeneratedProtocolMessageType('UiFieldValue', (_message.Message,), dict(
DESCRIPTOR = _UIFIELDVALUE,
__module__ = 'ui_field_outer_class_pb2'
# @@protoc_insertion_point(class_scope:UiFieldOuterClass.UiFieldValue)
))
_sym_db.RegisterMessage(UiFieldValue)
UiField = _reflection.GeneratedProtocolMessageType('UiField', (_message.Message,), dict(
TextField = _reflection.GeneratedProtocolMessageType('TextField', (_message.Message,), dict(
Validation = _reflection.GeneratedProtocolMessageType('Validation', (_message.Message,), dict(
DESCRIPTOR = _UIFIELD_TEXTFIELD_VALIDATION,
__module__ = 'ui_field_outer_class_pb2'
# @@protoc_insertion_point(class_scope:UiFieldOuterClass.UiField.TextField.Validation)
))
,
DESCRIPTOR = _UIFIELD_TEXTFIELD,
__module__ = 'ui_field_outer_class_pb2'
# @@protoc_insertion_point(class_scope:UiFieldOuterClass.UiField.TextField)
))
,
DESCRIPTOR = _UIFIELD,
__module__ = 'ui_field_outer_class_pb2'
# @@protoc_insertion_point(class_scope:UiFieldOuterClass.UiField)
))
_sym_db.RegisterMessage(UiField)
_sym_db.RegisterMessage(UiField.TextField)
_sym_db.RegisterMessage(UiField.TextField.Validation)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\nBcom.google.commerce.payments.orchestration.proto.ui.common.genericB\021UiFieldOuterClass'))
# @@protoc_insertion_point(module_scope)
|
|
# -*- coding: utf-8 -*-
"""This file contains a plugin for parsing Google Analytics cookies."""
import urllib
from plaso.containers import time_events
from plaso.lib import eventdata
from plaso.lib import timelib
from plaso.parsers.cookie_plugins import interface
from plaso.parsers.cookie_plugins import manager
# TODO: determine if __utmc always 0?
class GoogleAnalyticsEvent(time_events.PosixTimeEvent):
"""A simple placeholder for a Google Analytics event."""
DATA_TYPE = u'cookie:google:analytics'
def __init__(
self, timestamp, timestamp_description, cookie_identifier, url,
domain_hash=None, extra_attributes=None, number_of_pages_viewed=None,
number_of_sessions=None, number_of_sources=None, visitor_identifier=None):
"""Initialize a Google Analytics event.
Args:
posix_time (int): POSIX time value, which contains the number of seconds
since January 1, 1970 00:00:00 UTC.
timestamp_description (str): description of the usage of the timestamp
value.
cookie_identifier (str): unique identifier of the cookie.
url (str): URL or path where the cookie got set.
domain_hash (Optional[str]): domain hash.
extra_attributes (Optional[dict[str,str]]): extra attributes.
number_of_pages_viewed (Optional[int]): number of pages viewed.
number_of_sessions (Optional[int]): number of sessions.
number_of_sources (Optional[int]): number of sources.
visitor_identifier (Optional[str]): visitor identifier.
"""
data_type = u'{0:s}:{1:s}'.format(self.DATA_TYPE, cookie_identifier)
super(GoogleAnalyticsEvent, self).__init__(
timestamp, timestamp_description, data_type=data_type)
self.cookie_name = u'__{0:s}'.format(cookie_identifier)
self.domain_hash = domain_hash
self.pages_viewed = number_of_pages_viewed
self.sessions = number_of_sessions
self.sources = number_of_sources
self.url = url
self.visitor_id = visitor_identifier
if not extra_attributes:
return
# TODO: refactor, this approach makes it very hard to tell
# which values are actually set.
for key, value in iter(extra_attributes.items()):
setattr(self, key, value)
class GoogleAnalyticsUtmaPlugin(interface.BaseCookiePlugin):
"""A browser cookie plugin for __utma Google Analytics cookies.
The structure of the cookie data:
<domain hash>.<visitor ID>.<first visit>.<previous visit>.<last visit>.
<number of sessions>
For example:
137167072.1215918423.1383170166.1383170166.1383170166.1
Or:
<last visit>
For example:
13113225820000000
"""
NAME = u'google_analytics_utma'
DESCRIPTION = u'Google Analytics utma cookie parser'
COOKIE_NAME = u'__utma'
URLS = [(
u'http://www.dfinews.com/articles/2012/02/'
u'google-analytics-cookies-and-forensic-implications')]
def GetEntries(
self, parser_mediator, cookie_data=None, url=None, **kwargs):
"""Extracts event objects from the cookie.
Args:
parser_mediator (ParserMediator): parser mediator.
cookie_data (bytes): cookie data.
url (str): URL or path where the cookie got set.
"""
fields = cookie_data.split(u'.')
number_of_fields = len(fields)
if number_of_fields not in (1, 6):
parser_mediator.ProduceExtractionError(
u'unsupported number of fields: {0:d} in cookie: {1:s}'.format(
number_of_fields, self.COOKIE_NAME))
return
if number_of_fields == 1:
domain_hash = None
visitor_identifier = None
first_visit_posix_time = None
previous_visit_posix_time = None
try:
# TODO: fix that we're losing precision here use dfdatetime.
last_visit_posix_time = int(fields[0], 10) / 10000000
except ValueError:
last_visit_posix_time = None
number_of_sessions = None
elif number_of_fields == 6:
domain_hash = fields[0]
visitor_identifier = fields[1]
# TODO: Double check this time is stored in UTC and not local time.
try:
first_visit_posix_time = int(fields[2], 10)
except ValueError:
first_visit_posix_time = None
try:
previous_visit_posix_time = int(fields[3], 10)
except ValueError:
previous_visit_posix_time = None
try:
last_visit_posix_time = int(fields[4], 10)
except ValueError:
last_visit_posix_time = None
try:
number_of_sessions = int(fields[5], 10)
except ValueError:
number_of_sessions = None
if first_visit_posix_time is not None:
event_object = GoogleAnalyticsEvent(
first_visit_posix_time, u'Analytics Creation Time', u'utma', url,
domain_hash=domain_hash, number_of_sessions=number_of_sessions,
visitor_identifier=visitor_identifier)
parser_mediator.ProduceEvent(event_object)
if previous_visit_posix_time is not None:
event_object = GoogleAnalyticsEvent(
previous_visit_posix_time, u'Analytics Previous Time', u'utma', url,
domain_hash=domain_hash, number_of_sessions=number_of_sessions,
visitor_identifier=visitor_identifier)
parser_mediator.ProduceEvent(event_object)
if last_visit_posix_time is not None:
timestamp_description = eventdata.EventTimestamp.LAST_VISITED_TIME
elif first_visit_posix_time is None and previous_visit_posix_time is None:
# If both creation_time and written_time are None produce an event
# object without a timestamp.
last_visit_posix_time = timelib.Timestamp.NONE_TIMESTAMP
timestamp_description = eventdata.EventTimestamp.NOT_A_TIME
if last_visit_posix_time is not None:
event_object = GoogleAnalyticsEvent(
last_visit_posix_time, timestamp_description, u'utma', url,
domain_hash=domain_hash, number_of_sessions=number_of_sessions,
visitor_identifier=visitor_identifier)
parser_mediator.ProduceEvent(event_object)
class GoogleAnalyticsUtmbPlugin(interface.BaseCookiePlugin):
"""A browser cookie plugin for __utmb Google Analytics cookies.
The structure of the cookie data:
<domain hash>.<pages viewed>.<unknown>.<last time>
For example:
137167072.1.10.1383170166
173272373.6.8.1440489514899
173272373.4.9.1373300660574
Or:
<last time>
For example:
13113225820000000
"""
NAME = u'google_analytics_utmb'
DESCRIPTION = u'Google Analytics utmb cookie parser'
COOKIE_NAME = u'__utmb'
URLS = [(
u'http://www.dfinews.com/articles/2012/02/'
u'google-analytics-cookies-and-forensic-implications')]
def GetEntries(
self, parser_mediator, cookie_data=None, url=None, **kwargs):
"""Extracts event objects from the cookie.
Args:
parser_mediator (ParserMediator): parser mediator.
cookie_data (bytes): cookie data.
url (str): URL or path where the cookie got set.
"""
fields = cookie_data.split(u'.')
number_of_fields = len(fields)
if number_of_fields not in (1, 4):
parser_mediator.ProduceExtractionError(
u'unsupported number of fields: {0:d} in cookie: {1:s}'.format(
number_of_fields, self.COOKIE_NAME))
return
if number_of_fields == 1:
domain_hash = None
try:
# TODO: fix that we're losing precision here use dfdatetime.
last_visit_posix_time = int(fields[0], 10) / 10000000
except ValueError:
last_visit_posix_time = None
number_of_pages_viewed = None
elif number_of_fields == 4:
domain_hash = fields[0]
try:
number_of_pages_viewed = int(fields[1], 10)
except ValueError:
number_of_pages_viewed = None
try:
if fields[2] in (u'8', u'9'):
# TODO: fix that we're losing precision here use dfdatetime.
last_visit_posix_time = int(fields[3], 10) / 1000
else:
last_visit_posix_time = int(fields[3], 10)
except ValueError:
last_visit_posix_time = None
if last_visit_posix_time is not None:
timestamp_description = eventdata.EventTimestamp.LAST_VISITED_TIME
else:
last_visit_posix_time = timelib.Timestamp.NONE_TIMESTAMP
timestamp_description = eventdata.EventTimestamp.NOT_A_TIME
event_object = GoogleAnalyticsEvent(
last_visit_posix_time, timestamp_description, u'utmb', url,
domain_hash=domain_hash, number_of_pages_viewed=number_of_pages_viewed)
parser_mediator.ProduceEvent(event_object)
class GoogleAnalyticsUtmtPlugin(interface.BaseCookiePlugin):
"""A browser cookie plugin for __utmt Google Analytics cookies.
The structure of the cookie data:
<last time>
For example:
13113215173000000
"""
NAME = u'google_analytics_utmt'
DESCRIPTION = u'Google Analytics utmt cookie parser'
COOKIE_NAME = u'__utmt'
def GetEntries(
self, parser_mediator, cookie_data=None, url=None, **kwargs):
"""Extracts event objects from the cookie.
Args:
parser_mediator (ParserMediator): parser mediator.
cookie_data (bytes): cookie data.
url (str): URL or path where the cookie got set.
"""
fields = cookie_data.split(u'.')
number_of_fields = len(fields)
if number_of_fields != 1:
parser_mediator.ProduceExtractionError(
u'unsupported number of fields: {0:d} in cookie: {1:s}'.format(
number_of_fields, self.COOKIE_NAME))
return
try:
# TODO: fix that we're losing precision here use dfdatetime.
last_visit_posix_time = int(fields[0], 10) / 10000000
except ValueError:
last_visit_posix_time = None
if last_visit_posix_time is not None:
timestamp_description = eventdata.EventTimestamp.LAST_VISITED_TIME
else:
last_visit_posix_time = timelib.Timestamp.NONE_TIMESTAMP
timestamp_description = eventdata.EventTimestamp.NOT_A_TIME
event_object = GoogleAnalyticsEvent(
last_visit_posix_time, timestamp_description, u'utmt', url)
parser_mediator.ProduceEvent(event_object)
class GoogleAnalyticsUtmzPlugin(interface.BaseCookiePlugin):
"""A browser cookie plugin for __utmz Google Analytics cookies.
The structure of the cookie data:
<domain hash>.<last time>.<sessions>.<sources>.<variables>
For example:
207318870.1383170190.1.1.utmcsr=google|utmccn=(organic)|utmcmd=organic|
utmctr=(not%20provided)
Or:
<last time>
For example:
13128990382000000
"""
NAME = u'google_analytics_utmz'
DESCRIPTION = u'Google Analytics utmz cookie parser'
COOKIE_NAME = u'__utmz'
URLS = [(
u'http://www.dfinews.com/articles/2012/02/'
u'google-analytics-cookies-and-forensic-implications')]
def GetEntries(
self, parser_mediator, cookie_data=None, url=None, **kwargs):
"""Extracts event objects from the cookie.
Args:
parser_mediator (ParserMediator): parser mediator.
cookie_data (bytes): cookie data.
url (str): URL or path where the cookie got set.
"""
fields = cookie_data.split(u'.')
number_of_fields = len(fields)
if number_of_fields > 5:
variables = u'.'.join(fields[4:])
fields = fields[0:4]
fields.append(variables)
number_of_fields = len(fields)
if number_of_fields not in (1, 5):
parser_mediator.ProduceExtractionError(
u'unsupported number of fields: {0:d} in cookie: {1:s}'.format(
number_of_fields, self.COOKIE_NAME))
return
if number_of_fields == 1:
domain_hash = None
try:
# TODO: fix that we're losing precision here use dfdatetime.
last_visit_posix_time = int(fields[0], 10) / 10000000
except ValueError:
last_visit_posix_time = None
number_of_sessions = None
number_of_sources = None
extra_attributes = {}
elif number_of_fields == 5:
domain_hash = fields[0]
try:
last_visit_posix_time = int(fields[1], 10)
except ValueError:
last_visit_posix_time = None
try:
number_of_sessions = int(fields[2], 10)
except ValueError:
number_of_sessions = None
try:
number_of_sources = int(fields[3], 10)
except ValueError:
number_of_sources = None
extra_variables = fields[4].split(u'|')
extra_attributes = {}
for variable in extra_variables:
key, _, value = variable.partition(u'=')
# Cookies can have a variety of different encodings, usually ASCII or
# UTF-8, and values may additionally be URL encoded. urllib only
# correctly url-decodes ASCII strings, so we'll convert our string
# to ASCII first.
try:
ascii_value = value.encode(u'ascii')
except UnicodeEncodeError:
ascii_value = value.encode(u'ascii', errors=u'replace')
parser_mediator.ProduceExtractionError(
u'Cookie contains non 7-bit ASCII characters, which have been '
u'replaced with a "?".')
utf_stream = urllib.unquote(ascii_value)
try:
value_line = utf_stream.decode(u'utf-8')
except UnicodeDecodeError:
value_line = utf_stream.decode(u'utf-8', errors=u'replace')
parser_mediator.ProduceExtractionError(
u'Cookie value did not decode to Unicode string. Non UTF-8 '
u'characters have been replaced.')
extra_attributes[key] = value_line
if last_visit_posix_time is not None:
timestamp_description = eventdata.EventTimestamp.LAST_VISITED_TIME
else:
last_visit_posix_time = timelib.Timestamp.NONE_TIMESTAMP
timestamp_description = eventdata.EventTimestamp.NOT_A_TIME
event_object = GoogleAnalyticsEvent(
last_visit_posix_time, timestamp_description, u'utmz', url,
domain_hash=domain_hash, extra_attributes=extra_attributes,
number_of_sessions=number_of_sessions,
number_of_sources=number_of_sources)
parser_mediator.ProduceEvent(event_object)
manager.CookiePluginsManager.RegisterPlugins([
GoogleAnalyticsUtmaPlugin, GoogleAnalyticsUtmbPlugin,
GoogleAnalyticsUtmtPlugin, GoogleAnalyticsUtmzPlugin])
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras-based transformer scaffold layer."""
from __future__ import absolute_import
from __future__ import division
# from __future__ import google_type_annotations
from __future__ import print_function
import tensorflow as tf
from official.nlp.modeling.layers import attention
from official.nlp.modeling.layers import dense_einsum
@tf.keras.utils.register_keras_serializable(package="Text")
class TransformerScaffold(tf.keras.layers.Layer):
"""Transformer scaffold layer.
This layer implements the Transformer from "Attention Is All You Need".
(https://arxiv.org/abs/1706.03762), with a customizable attention layer
option. Users can pass a class to `attention_cls` and associated config to
`attention_cfg`, in which case the scaffold will instantiate the class with
the config, or pass a class instance to `attention_cls`.
Attributes:
num_attention_heads: Number of attention heads.
intermediate_size: Size of the intermediate layer.
intermediate_activation: Activation for the intermediate layer.
attention_cls: A class to instantate, or a layer instance.
attention_cfg: The config with which to instantiate `attention_cls`. Ignored
if attention_cls is a layer instance.
dropout_rate: Dropout probability for the post-attention and output dropout.
attention_dropout_rate: Dropout probability for within the attention layer.
kernel_initializer: Initializer for dense layer kernels.
bias_initializer: Initializer for dense layer biases.
kernel_regularizer: Regularizer for dense layer kernels.
bias_regularizer: Regularizer for dense layer biases.
activity_regularizer: Regularizer for dense layer activity.
kernel_constraint: Constraint for dense layer kernels.
bias_constraint: Constraint for dense layer kernels.
"""
def __init__(self,
num_attention_heads,
intermediate_size,
intermediate_activation,
attention_cls=attention.Attention,
attention_cfg=None,
dropout_rate=0.0,
attention_dropout_rate=0.0,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(TransformerScaffold, self).__init__(**kwargs)
self._attention_cfg = attention_cfg
self._attention_cls = attention_cls
self._num_heads = num_attention_heads
self._intermediate_size = intermediate_size
self._intermediate_activation = intermediate_activation
self._attention_dropout_rate = attention_dropout_rate
self._dropout_rate = dropout_rate
self._kernel_initializer = tf.keras.initializers.get(kernel_initializer)
self._bias_initializer = tf.keras.initializers.get(bias_initializer)
self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)
self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer)
self._kernel_constraint = tf.keras.constraints.get(kernel_constraint)
self._bias_constraint = tf.keras.constraints.get(bias_constraint)
def build(self, input_shape):
input_tensor = input_shape[0] if len(input_shape) == 2 else input_shape
input_tensor_shape = tf.TensorShape(input_tensor)
if len(input_tensor_shape) != 3:
raise ValueError(
"TransformerScaffold expects a three-dimensional input of "
"shape [batch, sequence, width].")
batch_size, sequence_length, hidden_size = input_tensor_shape
if len(input_shape) == 2:
mask_tensor_shape = tf.TensorShape(input_shape[1])
expected_mask_tensor_shape = tf.TensorShape(
[batch_size, sequence_length, sequence_length])
if not expected_mask_tensor_shape.is_compatible_with(mask_tensor_shape):
raise ValueError("When passing a mask tensor to TransformerLayer, the "
"mask tensor must be of shape [batch, "
"sequence_length, sequence_length] (here %s). Got a "
"mask tensor of shape %s." %
(expected_mask_tensor_shape, mask_tensor_shape))
if hidden_size % self._num_heads != 0:
raise ValueError(
"The input size (%d) is not a multiple of the number of attention "
"heads (%d)" % (hidden_size, self._num_heads))
self._attention_head_size = int(hidden_size // self._num_heads)
if isinstance(self._attention_cls, tf.keras.layers.Layer):
self._attention_layer = self._attention_cls
else:
if self._attention_cfg is None:
attention_cfg = {
"num_heads": self._num_heads,
"head_size": self._attention_head_size,
"dropout_rate": self._attention_dropout_rate,
"kernel_initializer": self._kernel_initializer,
"bias_initializer": self._bias_initializer,
"kernel_regularizer": self._kernel_regularizer,
"bias_regularizer": self._bias_regularizer,
"activity_regularizer": self._activity_regularizer,
"kernel_constraint": self._kernel_constraint,
"bias_constraint": self._bias_constraint,
"name": "self_attention"
}
else:
attention_cfg = self._attention_cfg
self._attention_layer = self._attention_cls(**attention_cfg)
self._attention_output_dense = dense_einsum.DenseEinsum(
output_shape=hidden_size,
num_summed_dimensions=2,
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activity_regularizer=self._activity_regularizer,
kernel_constraint=self._kernel_constraint,
bias_constraint=self._bias_constraint,
name="self_attention_output")
self._attention_dropout = tf.keras.layers.Dropout(rate=self._dropout_rate)
self._attention_layer_norm = (
tf.keras.layers.LayerNormalization(
name="self_attention_layer_norm", axis=-1, epsilon=1e-12,
dtype=tf.float32))
self._intermediate_dense = dense_einsum.DenseEinsum(
output_shape=self._intermediate_size,
activation=self._intermediate_activation,
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activity_regularizer=self._activity_regularizer,
kernel_constraint=self._kernel_constraint,
bias_constraint=self._bias_constraint,
dtype=tf.float32, # This layer is always float32 for numeric stability.
name="intermediate")
self._output_dense = dense_einsum.DenseEinsum(
output_shape=hidden_size,
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activity_regularizer=self._activity_regularizer,
kernel_constraint=self._kernel_constraint,
bias_constraint=self._bias_constraint,
name="output")
self._output_dropout = tf.keras.layers.Dropout(rate=self._dropout_rate)
self._output_layer_norm = tf.keras.layers.LayerNormalization(
name="output_layer_norm", axis=-1, epsilon=1e-12, dtype=tf.float32)
super(TransformerScaffold, self).build(input_shape)
def get_config(self):
config = {
"attention_cls":
self._attention_layer,
"num_attention_heads":
self._num_heads,
"intermediate_size":
self._intermediate_size,
"intermediate_activation":
self._intermediate_activation,
"dropout_rate":
self._dropout_rate,
"attention_dropout_rate":
self._attention_dropout_rate,
"kernel_initializer":
tf.keras.initializers.serialize(self._kernel_initializer),
"bias_initializer":
tf.keras.initializers.serialize(self._bias_initializer),
"kernel_regularizer":
tf.keras.regularizers.serialize(self._kernel_regularizer),
"bias_regularizer":
tf.keras.regularizers.serialize(self._bias_regularizer),
"activity_regularizer":
tf.keras.regularizers.serialize(self._activity_regularizer),
"kernel_constraint":
tf.keras.constraints.serialize(self._kernel_constraint),
"bias_constraint":
tf.keras.constraints.serialize(self._bias_constraint)
}
base_config = super(TransformerScaffold, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs):
if isinstance(inputs, (list, tuple)) and len(inputs) == 2:
input_tensor, attention_mask = inputs
else:
input_tensor, attention_mask = (inputs, None)
attention_inputs = [input_tensor, input_tensor]
if attention_mask is not None:
attention_inputs.append(attention_mask)
attention_output = self._attention_layer(attention_inputs)
attention_output = self._attention_output_dense(attention_output)
attention_output = self._attention_dropout(attention_output)
# Use float32 in keras layer norm and the gelu activation in the
# intermediate dense layer for numeric stability
if self.dtype == tf.float16:
input_tensor = tf.cast(input_tensor, tf.float32)
attention_output = tf.cast(attention_output, tf.float32)
attention_output = self._attention_layer_norm(input_tensor +
attention_output)
intermediate_output = self._intermediate_dense(attention_output)
if self.dtype == tf.float16:
intermediate_output = tf.cast(intermediate_output, tf.float16)
layer_output = self._output_dense(intermediate_output)
layer_output = self._output_dropout(layer_output)
# Use float32 in keras layer norm for numeric stability
if self.dtype == tf.float16:
layer_output = tf.cast(layer_output, tf.float32)
layer_output = self._output_layer_norm(layer_output + attention_output)
if self.dtype == tf.float16:
layer_output = tf.cast(layer_output, tf.float16)
return layer_output
|
|
# Copyright 2014 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import functools
import os
import ddt
import mock
from oslo_utils import fileutils
from cinder import context
from cinder import exception
from cinder.image import image_utils
from cinder import test
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume
from cinder.volume.drivers import remotefs
from cinder.volume.drivers import smbfs
def requires_allocation_data_update(expected_size):
def wrapper(func):
@functools.wraps(func)
def inner(inst, *args, **kwargs):
with mock.patch.object(
inst._smbfs_driver,
'update_disk_allocation_data') as fake_update:
func(inst, *args, **kwargs)
fake_update.assert_called_once_with(inst.volume,
expected_size)
return inner
return wrapper
@ddt.ddt
class SmbFsTestCase(test.TestCase):
_FAKE_SHARE = '//1.2.3.4/share1'
_FAKE_SHARE_HASH = 'db0bf952c1734092b83e8990bd321131'
_FAKE_MNT_BASE = '/mnt'
_FAKE_VOLUME_NAME = 'volume-4f711859-4928-4cb7-801a-a50c37ceaccc'
_FAKE_TOTAL_SIZE = '2048'
_FAKE_TOTAL_AVAILABLE = '1024'
_FAKE_TOTAL_ALLOCATED = 1024
_FAKE_MNT_POINT = os.path.join(_FAKE_MNT_BASE, _FAKE_SHARE_HASH)
_FAKE_VOLUME_PATH = os.path.join(_FAKE_MNT_POINT, _FAKE_VOLUME_NAME)
_FAKE_VOLUME_SIZE = 1
_FAKE_SNAPSHOT_ID = '50811859-4928-4cb7-801a-a50c37ceacba'
_FAKE_SNAPSHOT_PATH = (
_FAKE_VOLUME_PATH + '-snapshot' + _FAKE_SNAPSHOT_ID)
_FAKE_SHARE_OPTS = '-o username=Administrator,password=12345'
_FAKE_OPTIONS_DICT = {'username': 'Administrator',
'password': '12345'}
_FAKE_ALLOCATION_DATA_PATH = os.path.join('fake_dir',
'fake_allocation_data')
def setUp(self):
super(SmbFsTestCase, self).setUp()
self._FAKE_SMBFS_CONFIG = mock.MagicMock(
smbfs_oversub_ratio = 2,
smbfs_used_ratio = 0.5,
smbfs_shares_config = '/fake/config/path',
smbfs_default_volume_format = 'raw',
smbfs_sparsed_volumes = False)
self._smbfs_driver = smbfs.SmbfsDriver(configuration=mock.Mock())
self._smbfs_driver._remotefsclient = mock.Mock()
self._smbfs_driver._local_volume_dir = mock.Mock(
return_value=self._FAKE_MNT_POINT)
self._smbfs_driver._execute = mock.Mock()
self._smbfs_driver.base = self._FAKE_MNT_BASE
self._smbfs_driver._alloc_info_file_path = (
self._FAKE_ALLOCATION_DATA_PATH)
self.context = context.get_admin_context()
self.volume = fake_volume.fake_volume_obj(
self.context,
id='4f711859-4928-4cb7-801a-a50c37ceaccc',
size=self._FAKE_VOLUME_SIZE,
provider_location=self._FAKE_SHARE,
display_name=self._FAKE_VOLUME_NAME,
status='available')
self.snapshot = fake_snapshot.fake_snapshot_obj(
self.context,
id=self._FAKE_SNAPSHOT_ID,
status='available',
volume_size=1)
self.snapshot.volume = self.volume
def _get_fake_allocation_data(self):
return {self._FAKE_SHARE_HASH: {
'total_allocated': self._FAKE_TOTAL_ALLOCATED}}
@mock.patch.object(smbfs, 'open', create=True)
@mock.patch('os.path.exists')
@mock.patch.object(fileutils, 'ensure_tree')
@mock.patch('json.load')
def _test_setup_allocation_data(self, mock_json_load, mock_ensure_tree,
mock_exists, mock_open,
allocation_data_exists=False):
mock_exists.return_value = allocation_data_exists
self._smbfs_driver._update_allocation_data_file = mock.Mock()
self._smbfs_driver._setup_allocation_data()
if allocation_data_exists:
fd = mock_open.return_value.__enter__.return_value
mock_json_load.assert_called_once_with(fd)
self.assertEqual(mock_json_load.return_value,
self._smbfs_driver._allocation_data)
else:
mock_ensure_tree.assert_called_once_with(
os.path.dirname(self._FAKE_ALLOCATION_DATA_PATH))
update_func = self._smbfs_driver._update_allocation_data_file
update_func.assert_called_once_with()
def test_setup_allocation_data_file_unexisting(self):
self._test_setup_allocation_data()
def test_setup_allocation_data_file_existing(self):
self._test_setup_allocation_data(allocation_data_exists=True)
def _test_update_allocation_data(self, virtual_size_gb=None,
volume_exists=True):
self._smbfs_driver._update_allocation_data_file = mock.Mock()
update_func = self._smbfs_driver._update_allocation_data_file
fake_alloc_data = self._get_fake_allocation_data()
if volume_exists:
fake_alloc_data[self._FAKE_SHARE_HASH][
self._FAKE_VOLUME_NAME] = self.volume.size
self._smbfs_driver._allocation_data = fake_alloc_data
self._smbfs_driver.update_disk_allocation_data(self.volume,
virtual_size_gb)
vol_allocated_size = fake_alloc_data[self._FAKE_SHARE_HASH].get(
self._FAKE_VOLUME_NAME, None)
if not virtual_size_gb:
expected_total_allocated = (self._FAKE_TOTAL_ALLOCATED -
self.volume.size)
self.assertIsNone(vol_allocated_size)
else:
expected_total_allocated = (self._FAKE_TOTAL_ALLOCATED +
virtual_size_gb -
self.volume.size)
self.assertEqual(virtual_size_gb, vol_allocated_size)
update_func.assert_called_once_with()
self.assertEqual(
expected_total_allocated,
fake_alloc_data[self._FAKE_SHARE_HASH]['total_allocated'])
def test_update_allocation_data_volume_deleted(self):
self._test_update_allocation_data()
def test_update_allocation_data_volume_extended(self):
self._test_update_allocation_data(
virtual_size_gb=self.volume.size + 1)
def test_update_allocation_data_volume_created(self):
self._test_update_allocation_data(
virtual_size_gb=self.volume.size)
@requires_allocation_data_update(expected_size=None)
def test_delete_volume(self):
drv = self._smbfs_driver
fake_vol_info = self._FAKE_VOLUME_PATH + '.info'
drv._ensure_share_mounted = mock.MagicMock()
fake_ensure_mounted = drv._ensure_share_mounted
drv._local_volume_dir = mock.Mock(
return_value=self._FAKE_MNT_POINT)
drv.get_active_image_from_info = mock.Mock(
return_value=self._FAKE_VOLUME_NAME)
drv._delete = mock.Mock()
drv._local_path_volume_info = mock.Mock(
return_value=fake_vol_info)
with mock.patch('os.path.exists', lambda x: True):
drv.delete_volume(self.volume)
fake_ensure_mounted.assert_called_once_with(self._FAKE_SHARE)
drv._delete.assert_any_call(
self._FAKE_VOLUME_PATH)
drv._delete.assert_any_call(fake_vol_info)
@mock.patch('os.path.exists')
@mock.patch.object(image_utils, 'check_qemu_img_version')
def _test_setup(self, mock_check_qemu_img_version,
mock_exists, config, share_config_exists=True):
mock_exists.return_value = share_config_exists
fake_ensure_mounted = mock.MagicMock()
self._smbfs_driver._ensure_shares_mounted = fake_ensure_mounted
self._smbfs_driver.configuration = config
if not (config.smbfs_shares_config and share_config_exists and
config.smbfs_oversub_ratio > 0 and
0 <= config.smbfs_used_ratio <= 1):
self.assertRaises(exception.SmbfsException,
self._smbfs_driver.do_setup,
None)
else:
self._smbfs_driver.do_setup(mock.sentinel.context)
mock_check_qemu_img_version.assert_called_once_with()
self.assertEqual({}, self._smbfs_driver.shares)
fake_ensure_mounted.assert_called_once_with()
def test_setup_missing_shares_config_option(self):
fake_config = copy.copy(self._FAKE_SMBFS_CONFIG)
fake_config.smbfs_shares_config = None
self._test_setup(config=fake_config,
share_config_exists=False)
def test_setup_missing_shares_config_file(self):
self._test_setup(config=self._FAKE_SMBFS_CONFIG,
share_config_exists=False)
def test_setup_invlid_oversub_ratio(self):
fake_config = copy.copy(self._FAKE_SMBFS_CONFIG)
fake_config.smbfs_oversub_ratio = -1
self._test_setup(config=fake_config)
def test_setup_invalid_used_ratio(self):
fake_config = copy.copy(self._FAKE_SMBFS_CONFIG)
fake_config.smbfs_used_ratio = -1
self._test_setup(config=fake_config)
def test_setup_invalid_used_ratio2(self):
fake_config = copy.copy(self._FAKE_SMBFS_CONFIG)
fake_config.smbfs_used_ratio = 1.1
self._test_setup(config=fake_config)
@mock.patch('os.path.exists')
@mock.patch.multiple(smbfs.SmbfsDriver,
_create_windows_image=mock.DEFAULT,
_create_regular_file=mock.DEFAULT,
_create_qcow2_file=mock.DEFAULT,
_create_sparsed_file=mock.DEFAULT,
get_volume_format=mock.DEFAULT,
local_path=mock.DEFAULT,
_set_rw_permissions_for_all=mock.DEFAULT)
def _test_create_volume(self, mock_exists, volume_exists=False,
volume_format=None, use_sparsed_file=False,
**mocks):
self._smbfs_driver.configuration = copy.copy(self._FAKE_SMBFS_CONFIG)
self._smbfs_driver.configuration.smbfs_sparsed_volumes = (
use_sparsed_file)
self._smbfs_driver.get_volume_format.return_value = volume_format
self._smbfs_driver.local_path.return_value = mock.sentinel.vol_path
mock_exists.return_value = volume_exists
if volume_exists:
self.assertRaises(exception.InvalidVolume,
self._smbfs_driver._do_create_volume,
self.volume)
return
self._smbfs_driver._do_create_volume(self.volume)
expected_create_args = [mock.sentinel.vol_path,
self.volume.size]
if volume_format in [self._smbfs_driver._DISK_FORMAT_VHDX,
self._smbfs_driver._DISK_FORMAT_VHD]:
expected_create_args.append(volume_format)
exp_create_method = self._smbfs_driver._create_windows_image
else:
if volume_format == self._smbfs_driver._DISK_FORMAT_QCOW2:
exp_create_method = self._smbfs_driver._create_qcow2_file
elif use_sparsed_file:
exp_create_method = self._smbfs_driver._create_sparsed_file
else:
exp_create_method = self._smbfs_driver._create_regular_file
exp_create_method.assert_called_once_with(*expected_create_args)
mock_set_permissions = self._smbfs_driver._set_rw_permissions_for_all
mock_set_permissions.assert_called_once_with(mock.sentinel.vol_path)
def test_create_existing_volume(self):
self._test_create_volume(volume_exists=True)
def test_create_vhdx(self):
self._test_create_volume(volume_format='vhdx')
def test_create_qcow2(self):
self._test_create_volume(volume_format='qcow2')
def test_create_sparsed(self):
self._test_create_volume(volume_format='raw',
use_sparsed_file=True)
def test_create_regular(self):
self._test_create_volume()
def _test_find_share(self, existing_mounted_shares=True,
eligible_shares=True):
if existing_mounted_shares:
mounted_shares = ('fake_share1', 'fake_share2', 'fake_share3')
else:
mounted_shares = None
self._smbfs_driver._mounted_shares = mounted_shares
self._smbfs_driver._is_share_eligible = mock.Mock(
return_value=eligible_shares)
self._smbfs_driver._get_total_allocated = mock.Mock(
side_effect=[3, 2, 1])
if not mounted_shares:
self.assertRaises(exception.SmbfsNoSharesMounted,
self._smbfs_driver._find_share,
self.volume.size)
elif not eligible_shares:
self.assertRaises(exception.SmbfsNoSuitableShareFound,
self._smbfs_driver._find_share,
self.volume.size)
else:
ret_value = self._smbfs_driver._find_share(
self.volume.size)
# The eligible share with the minimum allocated space
# will be selected
self.assertEqual('fake_share3', ret_value)
def test_find_share(self):
self._test_find_share()
def test_find_share_missing_mounted_shares(self):
self._test_find_share(existing_mounted_shares=False)
def test_find_share_missing_eligible_shares(self):
self._test_find_share(eligible_shares=False)
def _test_is_share_eligible(self, capacity_info, volume_size):
self._smbfs_driver._get_capacity_info = mock.Mock(
return_value=[float(x << 30) for x in capacity_info])
self._smbfs_driver.configuration = self._FAKE_SMBFS_CONFIG
return self._smbfs_driver._is_share_eligible(self._FAKE_SHARE,
volume_size)
def test_share_volume_above_used_ratio(self):
fake_capacity_info = (4, 1, 1)
fake_volume_size = 2
ret_value = self._test_is_share_eligible(fake_capacity_info,
fake_volume_size)
self.assertFalse(ret_value)
def test_eligible_share(self):
fake_capacity_info = (4, 4, 0)
fake_volume_size = 1
ret_value = self._test_is_share_eligible(fake_capacity_info,
fake_volume_size)
self.assertTrue(ret_value)
def test_share_volume_above_oversub_ratio(self):
fake_capacity_info = (4, 4, 7)
fake_volume_size = 2
ret_value = self._test_is_share_eligible(fake_capacity_info,
fake_volume_size)
self.assertFalse(ret_value)
def test_share_reserved_above_oversub_ratio(self):
fake_capacity_info = (4, 4, 10)
fake_volume_size = 1
ret_value = self._test_is_share_eligible(fake_capacity_info,
fake_volume_size)
self.assertFalse(ret_value)
def test_parse_options(self):
(opt_list,
opt_dict) = self._smbfs_driver.parse_options(
self._FAKE_SHARE_OPTS)
expected_ret = ([], self._FAKE_OPTIONS_DICT)
self.assertEqual(expected_ret, (opt_list, opt_dict))
def test_parse_credentials(self):
fake_smb_options = r'-o user=MyDomain\Administrator,noperm'
expected_flags = '-o username=Administrator,noperm'
flags = self._smbfs_driver.parse_credentials(fake_smb_options)
self.assertEqual(expected_flags, flags)
@mock.patch.object(smbfs.SmbfsDriver, '_get_local_volume_path_template')
@mock.patch.object(smbfs.SmbfsDriver, '_lookup_local_volume_path')
@mock.patch.object(smbfs.SmbfsDriver, 'get_volume_format')
def _test_get_volume_path(self, mock_get_volume_format, mock_lookup_volume,
mock_get_path_template, volume_exists=True):
drv = self._smbfs_driver
mock_get_path_template.return_value = self._FAKE_VOLUME_PATH
volume_format = 'raw'
expected_vol_path = self._FAKE_VOLUME_PATH + '.' + volume_format
mock_lookup_volume.return_value = (
expected_vol_path if volume_exists else None)
mock_get_volume_format.return_value = volume_format
ret_val = drv.local_path(self.volume)
if volume_exists:
self.assertFalse(mock_get_volume_format.called)
else:
mock_get_volume_format.assert_called_once_with(self.volume)
self.assertEqual(expected_vol_path, ret_val)
def test_get_existing_volume_path(self):
self._test_get_volume_path()
def test_get_new_volume_path(self):
self._test_get_volume_path(volume_exists=False)
@mock.patch.object(smbfs.SmbfsDriver, '_local_volume_dir')
def test_get_local_volume_path_template(self, mock_get_local_dir):
mock_get_local_dir.return_value = self._FAKE_MNT_POINT
ret_val = self._smbfs_driver._get_local_volume_path_template(
self.volume)
self.assertEqual(self._FAKE_VOLUME_PATH, ret_val)
@mock.patch('os.path.exists')
def test_lookup_local_volume_path(self, mock_exists):
expected_path = self._FAKE_VOLUME_PATH + '.vhdx'
mock_exists.side_effect = lambda x: x == expected_path
ret_val = self._smbfs_driver._lookup_local_volume_path(
self._FAKE_VOLUME_PATH)
extensions = [''] + [
".%s" % ext
for ext in self._smbfs_driver._SUPPORTED_IMAGE_FORMATS]
possible_paths = [self._FAKE_VOLUME_PATH + ext
for ext in extensions]
mock_exists.assert_has_calls(
[mock.call(path) for path in possible_paths])
self.assertEqual(expected_path, ret_val)
@mock.patch.object(smbfs.SmbfsDriver, '_get_local_volume_path_template')
@mock.patch.object(smbfs.SmbfsDriver, '_lookup_local_volume_path')
@mock.patch.object(smbfs.SmbfsDriver, '_qemu_img_info')
@mock.patch.object(smbfs.SmbfsDriver, '_get_volume_format_spec')
def _mock_get_volume_format(self, mock_get_format_spec, mock_qemu_img_info,
mock_lookup_volume, mock_get_path_template,
qemu_format=False, volume_format='raw',
volume_exists=True):
mock_get_path_template.return_value = self._FAKE_VOLUME_PATH
mock_lookup_volume.return_value = (
self._FAKE_VOLUME_PATH if volume_exists else None)
mock_qemu_img_info.return_value.file_format = volume_format
mock_get_format_spec.return_value = volume_format
ret_val = self._smbfs_driver.get_volume_format(self.volume,
qemu_format)
if volume_exists:
mock_qemu_img_info.assert_called_once_with(self._FAKE_VOLUME_PATH,
self._FAKE_VOLUME_NAME)
self.assertFalse(mock_get_format_spec.called)
else:
mock_get_format_spec.assert_called_once_with(self.volume)
self.assertFalse(mock_qemu_img_info.called)
return ret_val
def test_get_existing_raw_volume_format(self):
fmt = self._mock_get_volume_format()
self.assertEqual('raw', fmt)
def test_get_new_vhd_volume_format(self):
expected_fmt = 'vhd'
fmt = self._mock_get_volume_format(volume_format=expected_fmt,
volume_exists=False)
self.assertEqual(expected_fmt, fmt)
def test_get_new_vhd_legacy_volume_format(self):
img_fmt = 'vhd'
expected_fmt = 'vpc'
ret_val = self._mock_get_volume_format(volume_format=img_fmt,
volume_exists=False,
qemu_format=True)
self.assertEqual(expected_fmt, ret_val)
def test_initialize_connection(self):
self._smbfs_driver.get_active_image_from_info = mock.Mock(
return_value=self._FAKE_VOLUME_NAME)
self._smbfs_driver._get_mount_point_base = mock.Mock(
return_value=self._FAKE_MNT_BASE)
self._smbfs_driver.shares = {self._FAKE_SHARE: self._FAKE_SHARE_OPTS}
self._smbfs_driver.get_volume_format = mock.Mock(
return_value=mock.sentinel.format)
fake_data = {'export': self._FAKE_SHARE,
'format': mock.sentinel.format,
'name': self._FAKE_VOLUME_NAME,
'options': self._FAKE_SHARE_OPTS}
expected = {
'driver_volume_type': 'smbfs',
'data': fake_data,
'mount_point_base': self._FAKE_MNT_BASE}
ret_val = self._smbfs_driver.initialize_connection(
self.volume, None)
self.assertEqual(expected, ret_val)
def _test_extend_volume(self, extend_failed=False, image_format='raw'):
drv = self._smbfs_driver
drv.local_path = mock.Mock(
return_value=self._FAKE_VOLUME_PATH)
drv._check_extend_volume_support = mock.Mock(
return_value=True)
drv._is_file_size_equal = mock.Mock(
return_value=not extend_failed)
drv._qemu_img_info = mock.Mock(
return_value=mock.Mock(file_format=image_format))
drv._delete = mock.Mock()
with mock.patch.object(image_utils, 'resize_image') as fake_resize, \
mock.patch.object(image_utils, 'convert_image') as \
fake_convert:
if extend_failed:
self.assertRaises(exception.ExtendVolumeError,
drv.extend_volume,
self.volume, mock.sentinel.new_size)
else:
drv.extend_volume(self.volume, mock.sentinel.new_size)
if image_format in (drv._DISK_FORMAT_VHDX,
drv._DISK_FORMAT_VHD_LEGACY):
fake_tmp_path = self._FAKE_VOLUME_PATH + '.tmp'
fake_convert.assert_any_call(self._FAKE_VOLUME_PATH,
fake_tmp_path, 'raw')
fake_resize.assert_called_once_with(
fake_tmp_path, mock.sentinel.new_size)
fake_convert.assert_any_call(fake_tmp_path,
self._FAKE_VOLUME_PATH,
image_format)
else:
fake_resize.assert_called_once_with(
self._FAKE_VOLUME_PATH, mock.sentinel.new_size)
@requires_allocation_data_update(expected_size=mock.sentinel.new_size)
def test_extend_volume(self):
self._test_extend_volume()
def test_extend_volume_failed(self):
self._test_extend_volume(extend_failed=True)
@requires_allocation_data_update(expected_size=mock.sentinel.new_size)
def test_extend_vhd_volume(self):
self._test_extend_volume(image_format='vpc')
def _test_check_extend_support(self, has_snapshots=False,
is_eligible=True):
self._smbfs_driver.local_path = mock.Mock(
return_value=self._FAKE_VOLUME_PATH)
if has_snapshots:
active_file_path = self._FAKE_SNAPSHOT_PATH
else:
active_file_path = self._FAKE_VOLUME_PATH
self._smbfs_driver.get_active_image_from_info = mock.Mock(
return_value=active_file_path)
self._smbfs_driver._is_share_eligible = mock.Mock(
return_value=is_eligible)
if has_snapshots:
self.assertRaises(exception.InvalidVolume,
self._smbfs_driver._check_extend_volume_support,
self.volume, 2)
elif not is_eligible:
self.assertRaises(exception.ExtendVolumeError,
self._smbfs_driver._check_extend_volume_support,
self.volume, 2)
else:
self._smbfs_driver._check_extend_volume_support(
self.volume, 2)
self._smbfs_driver._is_share_eligible.assert_called_once_with(
self._FAKE_SHARE, 1)
def test_check_extend_support(self):
self._test_check_extend_support()
def test_check_extend_volume_with_snapshots(self):
self._test_check_extend_support(has_snapshots=True)
def test_check_extend_volume_uneligible_share(self):
self._test_check_extend_support(is_eligible=False)
@requires_allocation_data_update(expected_size=_FAKE_VOLUME_SIZE)
@mock.patch.object(remotefs.RemoteFSSnapDriver, 'create_volume')
def test_create_volume_base(self, mock_create_volume):
self._smbfs_driver.create_volume(self.volume)
mock_create_volume.assert_called_once_with(self.volume)
@requires_allocation_data_update(expected_size=_FAKE_VOLUME_SIZE)
@mock.patch.object(smbfs.SmbfsDriver,
'_create_volume_from_snapshot')
def test_create_volume_from_snapshot(self, mock_create_volume):
self._smbfs_driver.create_volume_from_snapshot(self.volume,
self.snapshot)
mock_create_volume.assert_called_once_with(self.volume,
self.snapshot)
@requires_allocation_data_update(expected_size=_FAKE_VOLUME_SIZE)
@mock.patch.object(smbfs.SmbfsDriver, '_create_cloned_volume')
def test_create_cloned_volume(self, mock_create_volume):
self._smbfs_driver.create_cloned_volume(self.volume,
mock.sentinel.src_vol)
mock_create_volume.assert_called_once_with(self.volume,
mock.sentinel.src_vol)
def test_create_volume_from_unavailable_snapshot(self):
self.snapshot.status = 'error'
self.assertRaises(
exception.InvalidSnapshot,
self._smbfs_driver.create_volume_from_snapshot,
self.volume, self.snapshot)
def test_copy_volume_from_snapshot(self):
drv = self._smbfs_driver
fake_volume_info = {self._FAKE_SNAPSHOT_ID: 'fake_snapshot_file_name'}
fake_img_info = mock.MagicMock()
fake_img_info.backing_file = self._FAKE_VOLUME_NAME
drv.get_volume_format = mock.Mock(
return_value='raw')
drv._local_path_volume_info = mock.Mock(
return_value=self._FAKE_VOLUME_PATH + '.info')
drv._local_volume_dir = mock.Mock(
return_value=self._FAKE_MNT_POINT)
drv._read_info_file = mock.Mock(
return_value=fake_volume_info)
drv._qemu_img_info = mock.Mock(
return_value=fake_img_info)
drv.local_path = mock.Mock(
return_value=self._FAKE_VOLUME_PATH[:-1])
drv._extend_volume = mock.Mock()
drv._set_rw_permissions_for_all = mock.Mock()
with mock.patch.object(image_utils, 'convert_image') as (
fake_convert_image):
drv._copy_volume_from_snapshot(
self.snapshot, self.volume,
self.volume.size)
drv._extend_volume.assert_called_once_with(
self.volume, self.volume.size)
fake_convert_image.assert_called_once_with(
self._FAKE_VOLUME_PATH, self._FAKE_VOLUME_PATH[:-1], 'raw')
def test_ensure_mounted(self):
self._smbfs_driver.shares = {self._FAKE_SHARE: self._FAKE_SHARE_OPTS}
self._smbfs_driver._ensure_share_mounted(self._FAKE_SHARE)
self._smbfs_driver._remotefsclient.mount.assert_called_once_with(
self._FAKE_SHARE, self._FAKE_SHARE_OPTS.split())
def _test_copy_image_to_volume(self, wrong_size_after_fetch=False):
drv = self._smbfs_driver
vol_size_bytes = self.volume.size << 30
fake_img_info = mock.MagicMock()
if wrong_size_after_fetch:
fake_img_info.virtual_size = 2 * vol_size_bytes
else:
fake_img_info.virtual_size = vol_size_bytes
drv.get_volume_format = mock.Mock(
return_value=drv._DISK_FORMAT_VHDX)
drv.local_path = mock.Mock(
return_value=self._FAKE_VOLUME_PATH)
drv._do_extend_volume = mock.Mock()
drv.configuration = mock.MagicMock()
drv.configuration.volume_dd_blocksize = (
mock.sentinel.block_size)
with mock.patch.object(image_utils, 'fetch_to_volume_format') as \
fake_fetch, mock.patch.object(image_utils, 'qemu_img_info') as \
fake_qemu_img_info:
fake_qemu_img_info.return_value = fake_img_info
if wrong_size_after_fetch:
self.assertRaises(
exception.ImageUnacceptable,
drv.copy_image_to_volume,
mock.sentinel.context, self.volume,
mock.sentinel.image_service,
mock.sentinel.image_id)
else:
drv.copy_image_to_volume(
mock.sentinel.context, self.volume,
mock.sentinel.image_service,
mock.sentinel.image_id)
fake_fetch.assert_called_once_with(
mock.sentinel.context, mock.sentinel.image_service,
mock.sentinel.image_id, self._FAKE_VOLUME_PATH,
drv._DISK_FORMAT_VHDX,
mock.sentinel.block_size)
drv._do_extend_volume.assert_called_once_with(
self._FAKE_VOLUME_PATH,
self.volume.size,
self.volume.name)
def test_copy_image_to_volume(self):
self._test_copy_image_to_volume()
def test_copy_image_to_volume_wrong_size_after_fetch(self):
self._test_copy_image_to_volume(wrong_size_after_fetch=True)
def test_get_capacity_info(self):
fake_block_size = 4096.0
fake_total_blocks = 1024
fake_avail_blocks = 512
fake_df = ('%s %s %s' % (fake_block_size, fake_total_blocks,
fake_avail_blocks), None)
self._smbfs_driver._get_mount_point_for_share = mock.Mock(
return_value=self._FAKE_MNT_POINT)
self._smbfs_driver._get_total_allocated = mock.Mock(
return_value=self._FAKE_TOTAL_ALLOCATED)
self._smbfs_driver._execute.return_value = fake_df
ret_val = self._smbfs_driver._get_capacity_info(self._FAKE_SHARE)
expected = (fake_block_size * fake_total_blocks,
fake_block_size * fake_avail_blocks,
self._FAKE_TOTAL_ALLOCATED)
self.assertEqual(expected, ret_val)
@ddt.data([False, False],
[True, True],
[False, True])
@ddt.unpack
def test_get_volume_format_spec(self,
volume_meta_contains_fmt,
volume_type_contains_fmt):
self._smbfs_driver.configuration = copy.copy(self._FAKE_SMBFS_CONFIG)
fake_vol_meta_fmt = 'vhd'
fake_vol_type_fmt = 'vhdx'
volume_metadata = {}
volume_type_extra_specs = {}
if volume_meta_contains_fmt:
volume_metadata['volume_format'] = fake_vol_meta_fmt
elif volume_type_contains_fmt:
volume_type_extra_specs['volume_format'] = fake_vol_type_fmt
volume_type = fake_volume.fake_volume_type_obj(self.context)
volume = fake_volume.fake_volume_obj(self.context)
# Optional arguments are not set in _from_db_object,
# so have to set explicitly here
volume.volume_type = volume_type
volume.metadata = volume_metadata
# Same for extra_specs and VolumeType
volume_type.extra_specs = volume_type_extra_specs
resulted_fmt = self._smbfs_driver._get_volume_format_spec(volume)
if volume_meta_contains_fmt:
expected_fmt = fake_vol_meta_fmt
elif volume_type_contains_fmt:
expected_fmt = fake_vol_type_fmt
else:
expected_fmt = self._FAKE_SMBFS_CONFIG.smbfs_default_volume_format
self.assertEqual(expected_fmt, resulted_fmt)
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Faq.question_details'
db.add_column(u'pleiapp_faq', 'question_details',
self.gf('mezzanine.core.fields.RichTextField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Faq.question_details'
db.delete_column(u'pleiapp_faq', 'question_details')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'generic.assignedkeyword': {
'Meta': {'ordering': "('_order',)", 'object_name': 'AssignedKeyword'},
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keyword': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'assignments'", 'to': u"orm['generic.Keyword']"}),
'object_pk': ('django.db.models.fields.IntegerField', [], {})
},
u'generic.keyword': {
'Meta': {'object_name': 'Keyword'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
u'pleiapp.category': {
'Meta': {'ordering': "('title',)", 'object_name': 'Category'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'visible': ('django.db.models.fields.NullBooleanField', [], {'default': 'True', 'null': 'True', 'blank': 'True'})
},
u'pleiapp.dictionary': {
'Meta': {'ordering': "('title',)", 'object_name': 'Dictionary'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'content': ('mezzanine.core.fields.RichTextField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'featured_image': ('mezzanine.core.fields.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_sitemap': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'keywords': ('mezzanine.generic.fields.KeywordsField', [], {'object_id_field': "'object_pk'", 'to': u"orm['generic.AssignedKeyword']", 'frozen_by_south': 'True'}),
'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'related_dictionary': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_dictionary_rel_+'", 'blank': 'True', 'to': u"orm['pleiapp.Dictionary']"}),
'related_faqs': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['pleiapp.Faq']", 'symmetrical': 'False', 'blank': 'True'}),
'related_resources': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['pleiapp.Resource']", 'symmetrical': 'False', 'blank': 'True'}),
'searchable_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'dictionarys'", 'to': u"orm['auth.User']"})
},
u'pleiapp.faq': {
'Meta': {'ordering': "('title',)", 'object_name': 'Faq'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'faqs'", 'blank': 'True', 'to': u"orm['pleiapp.Category']"}),
'content': ('mezzanine.core.fields.RichTextField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'detect_automatically': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'featured_image': ('mezzanine.core.fields.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_sitemap': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'keywords': ('mezzanine.generic.fields.KeywordsField', [], {'object_id_field': "'object_pk'", 'to': u"orm['generic.AssignedKeyword']", 'frozen_by_south': 'True'}),
'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'question_details': ('mezzanine.core.fields.RichTextField', [], {'null': 'True', 'blank': 'True'}),
'related_dictionary': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['pleiapp.Dictionary']", 'symmetrical': 'False', 'blank': 'True'}),
'related_faqs': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_faqs_rel_+'", 'blank': 'True', 'to': u"orm['pleiapp.Faq']"}),
'related_resources': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['pleiapp.Resource']", 'symmetrical': 'False', 'blank': 'True'}),
'searchable_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'topics': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'faqs'", 'blank': 'True', 'to': u"orm['pleiapp.Topic']"}),
'types': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'faqs'", 'blank': 'True', 'to': u"orm['pleiapp.Type']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'faqs'", 'to': u"orm['auth.User']"})
},
u'pleiapp.frontpageitem': {
'Meta': {'object_name': 'FrontPageItem'},
'featured_image': ('mezzanine.core.fields.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_type': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'visible': ('django.db.models.fields.NullBooleanField', [], {'default': 'True', 'null': 'True', 'blank': 'True'})
},
u'pleiapp.resource': {
'Meta': {'ordering': "('title',)", 'object_name': 'Resource'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'attached_document': ('mezzanine.core.fields.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'audio_file': ('mezzanine.core.fields.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'resources'", 'blank': 'True', 'to': u"orm['pleiapp.Category']"}),
'content': ('mezzanine.core.fields.RichTextField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'detect_automatically': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'featured_image': ('mezzanine.core.fields.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_sitemap': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'keywords': ('mezzanine.generic.fields.KeywordsField', [], {'object_id_field': "'object_pk'", 'to': u"orm['generic.AssignedKeyword']", 'frozen_by_south': 'True'}),
'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'link_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'related_dictionary': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['pleiapp.Dictionary']", 'symmetrical': 'False', 'blank': 'True'}),
'related_faqs': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['pleiapp.Faq']", 'symmetrical': 'False', 'blank': 'True'}),
'related_resources': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_resources_rel_+'", 'blank': 'True', 'to': u"orm['pleiapp.Resource']"}),
'searchable_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'toc': ('mezzanine.core.fields.RichTextField', [], {'null': 'True', 'blank': 'True'}),
'topics': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'resources'", 'blank': 'True', 'to': u"orm['pleiapp.Topic']"}),
'types': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'resources'", 'blank': 'True', 'to': u"orm['pleiapp.Type']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'resources'", 'to': u"orm['auth.User']"}),
'video_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '1024', 'null': 'True', 'blank': 'True'})
},
u'pleiapp.tagline': {
'Meta': {'object_name': 'Tagline'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('django.db.models.fields.TextField', [], {})
},
u'pleiapp.topic': {
'Meta': {'ordering': "('title',)", 'object_name': 'Topic'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'visible': ('django.db.models.fields.NullBooleanField', [], {'default': 'True', 'null': 'True', 'blank': 'True'})
},
u'pleiapp.type': {
'Meta': {'ordering': "('title',)", 'object_name': 'Type'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'visible': ('django.db.models.fields.NullBooleanField', [], {'default': 'True', 'null': 'True', 'blank': 'True'})
},
u'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['pleiapp']
|
|
# -*- coding: utf-8 -*-
"""
@file
@brief Defines a :epkg:`sphinx` extension which if all parameters are documented.
"""
import inspect
from docutils import nodes
import sphinx
from sphinx.util import logging
from sphinx.util.docfields import DocFieldTransformer, _is_single_paragraph
from .import_object_helper import import_any_object
def check_typed_make_field(self, types, domain, items, env=None, parameters=None,
function_name=None, docname=None, kind=None):
"""
Overwrites function
`make_field <https://github.com/sphinx-doc/sphinx/blob/master/sphinx/util/docfields.py#L197>`_.
Processes one argument of a function.
@param self from original function
@param types from original function
@param domain from original function
@param items from original function
@param env from original function
@param parameters list of known arguments for the function or method
@param function_name function name these arguments belong to
@param docname document which contains the object
@param kind tells which kind of object *function_name* is (function, method or class)
Example of warnings it raises:
::
[docassert] 'onefunction' has no parameter 'a' (in '...project_name\\subproject\\myexampleb.py').
[docassert] 'onefunction' has undocumented parameters 'a, b' (...project_name\\subproject\\myexampleb.py').
"""
if parameters is None:
parameters = None
check_params = {}
else:
parameters = list(parameters)
if kind == "method":
parameters = parameters[1:]
def kg(p):
"local function"
return p if isinstance(p, str) else p.name
check_params = {kg(p): 0 for p in parameters}
logger = logging.getLogger("docassert")
def check_item(fieldarg, content, logger):
"local function"
if fieldarg not in check_params:
if function_name is not None:
logger.warning("[docassert] '{0}' has no parameter '{1}' (in '{2}').".format(
function_name, fieldarg, docname))
else:
check_params[fieldarg] += 1
if check_params[fieldarg] > 1:
logger.warning("[docassert] '{1}' of '{0}' is duplicated (in '{2}').".format(
function_name, fieldarg, docname))
if isinstance(items, list):
for fieldarg, content in items:
check_item(fieldarg, content, logger)
mini = None if len(check_params) == 0 else min(check_params.values())
if mini == 0:
check_params = list(check_params.items())
nodoc = list(sorted(k for k, v in check_params if v == 0))
if len(nodoc) > 0:
if len(nodoc) == 1 and nodoc[0] == 'self':
# Behavior should be improved.
pass
else:
logger.warning("[docassert] '{0}' has undocumented parameters '{1}' (in '{2}').".format(
function_name, ", ".join(nodoc), docname))
else:
# Documentation related to the return.
pass
class OverrideDocFieldTransformer:
"""
Overrides one function with assigning it to a method
"""
def __init__(self, replaced):
"""
Constructor
@param replaced should be *DocFieldTransformer.transform*
"""
self.replaced = replaced
def override_transform(self, other_self, node):
"""
Transform a single field list *node*.
Overwrite function `transform
<https://github.com/sphinx-doc/sphinx/blob/
master/sphinx/util/docfields.py#L271>`_.
It only adds extra verification and returns results from
the replaced function.
@param other_self the builder
@param node node the replaced function changes or replace
The function parses the original function and checks that the list
of arguments declared by the function is the same the list of
documented arguments.
"""
typemap = other_self.typemap
entries = []
groupindices = {}
types = {}
# step 1: traverse all fields and collect field types and content
for field in node:
fieldname, fieldbody = field
try:
# split into field type and argument
fieldtype, fieldarg = fieldname.astext().split(None, 1)
except ValueError:
# maybe an argument-less field type?
fieldtype, fieldarg = fieldname.astext(), ''
if fieldtype == "Parameters":
# numpydoc style
keyfieldtype = 'parameter'
elif fieldtype == "param":
keyfieldtype = 'param'
else:
continue
typedesc, is_typefield = typemap.get(keyfieldtype, (None, None))
# sort out unknown fields
extracted = []
if keyfieldtype == 'parameter':
# numpydoc
for child in fieldbody.children:
if isinstance(child, nodes.definition_list):
for child2 in child.children:
extracted.append(child2)
elif typedesc is None or typedesc.has_arg != bool(fieldarg):
# either the field name is unknown, or the argument doesn't
# match the spec; capitalize field name and be done with it
new_fieldname = fieldtype[0:1].upper() + fieldtype[1:]
if fieldarg:
new_fieldname += ' ' + fieldarg
fieldname[0] = nodes.Text(new_fieldname)
entries.append(field)
continue
typename = typedesc.name
# collect the content, trying not to keep unnecessary paragraphs
if extracted:
content = extracted
elif _is_single_paragraph(fieldbody):
content = fieldbody.children[0].children
else:
content = fieldbody.children
# if the field specifies a type, put it in the types collection
if is_typefield:
# filter out only inline nodes; others will result in invalid
# markup being written out
content = [n for n in content if isinstance(
n, (nodes.Inline, nodes.Text))]
if content:
types.setdefault(typename, {})[fieldarg] = content
continue
# also support syntax like ``:param type name:``
if typedesc.is_typed:
try:
argtype, argname = fieldarg.split(None, 1)
except ValueError:
pass
else:
types.setdefault(typename, {})[argname] = [
nodes.Text(argtype)]
fieldarg = argname
translatable_content = nodes.inline(
fieldbody.rawsource, translatable=True)
translatable_content.document = fieldbody.parent.document
translatable_content.source = fieldbody.parent.source
translatable_content.line = fieldbody.parent.line
translatable_content += content
# Import object, get the list of parameters
docs = fieldbody.parent.source.split(":docstring of")[-1].strip()
myfunc = None
funckind = None
function_name = None
excs = []
try:
myfunc, function_name, funckind = import_any_object(docs)
except ImportError as e:
excs.append(e)
if myfunc is None:
if len(excs) > 0:
reasons = "\n".join(" {0}".format(e) for e in excs)
else:
reasons = "unknown"
logger = logging.getLogger("docassert")
logger.warning(
"[docassert] unable to import object '{0}', reasons:\n{1}".format(docs, reasons))
myfunc = None
if myfunc is None:
signature = None
parameters = None
else:
try:
signature = inspect.signature(myfunc)
parameters = signature.parameters
except (TypeError, ValueError):
logger = logging.getLogger("docassert")
logger.warning(
"[docassert] unable to get signature of '{0}'.".format(docs))
signature = None
parameters = None
# grouped entries need to be collected in one entry, while others
# get one entry per field
if extracted:
# numpydoc
group_entries = []
for ext in extracted:
name = ext.astext().split('\n')[0].split()[0]
group_entries.append((name, ext))
entries.append([typedesc, group_entries])
elif typedesc.is_grouped:
if typename in groupindices:
group = entries[groupindices[typename]]
else:
groupindices[typename] = len(entries)
group = [typedesc, []]
entries.append(group)
entry = typedesc.make_entry(fieldarg, [translatable_content])
group[1].append(entry)
else:
entry = typedesc.make_entry(fieldarg, [translatable_content])
entries.append([typedesc, entry])
# step 2: all entries are collected, check the parameters list.
try:
env = other_self.directive.state.document.settings.env
except AttributeError as e:
logger = logging.getLogger("docassert")
logger.warning("[docassert] {0}".format(e))
env = None
docname = fieldbody.parent.source.split(':docstring')[0]
for entry in entries:
if isinstance(entry, nodes.field):
logger = logging.getLogger("docassert")
logger.warning(
"[docassert] unable to check [nodes.field] {0}".format(entry))
else:
fieldtype, content = entry
fieldtypes = types.get(fieldtype.name, {})
check_typed_make_field(other_self, fieldtypes, other_self.directive.domain,
content, env=env, parameters=parameters,
function_name=function_name, docname=docname,
kind=funckind)
return self.replaced(other_self, node)
def setup_docassert(app):
"""
Setup for ``docassert`` extension (sphinx).
This changes ``DocFieldTransformer.transform`` and replaces
it by a function which calls the current function and does
extra checking on the list of parameters.
.. warning:: This class does not handle methods if the parameter name
for the class is different from *self*. Classes included in other
classes are not properly handled.
"""
inst = OverrideDocFieldTransformer(DocFieldTransformer.transform)
def local_transform(me, node):
"local function"
return inst.override_transform(me, node)
DocFieldTransformer.transform = local_transform
return {'version': sphinx.__display_version__, 'parallel_read_safe': True}
def setup(app):
"setup for docassert"
return setup_docassert(app)
|
|
# Copyright (c) 2009-2021 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from time import sleep
from chip import ChipDeviceCtrl
from common.utils import *
from common.pigweed_client import PigweedClient
from device_service import device_service_pb2
from button_service import button_service_pb2
from locking_service import locking_service_pb2
from pw_status import Status
import logging
log = logging.getLogger(__name__)
BLE_DEVICE_NAME = "MBED-lock"
DEVICE_NODE_ID = 1234
RPC_PROTOS = [device_service_pb2, button_service_pb2, locking_service_pb2]
@pytest.mark.smoketest
def test_smoke_test(device):
device.reset(duration=1)
ret = device.wait_for_output("Mbed lock-app example application start")
assert ret != None and len(ret) > 0
ret = device.wait_for_output("Mbed lock-app example application run")
assert ret != None and len(ret) > 0
def test_wifi_provisioning(device, network):
network_ssid = network[0]
network_pass = network[1]
devCtrl = ChipDeviceCtrl.ChipDeviceController()
device_details = get_device_details(device)
assert device_details != None and len(device_details) != 0
assert check_chip_ble_devices_advertising(
devCtrl, BLE_DEVICE_NAME, device_details)
ret = connect_device_over_ble(devCtrl, int(device_details["Discriminator"]), int(
device_details["SetUpPINCode"]), DEVICE_NODE_ID)
assert ret != None and ret == DEVICE_NODE_ID
ret = device.wait_for_output("Device completed Rendezvous process")
assert ret != None and len(ret) > 0
ret = commissioning_wifi(devCtrl, network_ssid,
network_pass, DEVICE_NODE_ID)
assert ret == 0
ret = device.wait_for_output("StationConnected")
assert ret != None and len(ret) > 0
ret = device.wait_for_output("address set")
assert ret != None and len(ret) > 0
device_ip_address = ret[-1].partition("address set:")[2].strip()
ret = resolve_device(devCtrl, DEVICE_NODE_ID)
assert ret != None and len(ret) == 2
ip_address = ret[0]
port = ret[1]
assert device_ip_address == ip_address
assert close_connection(devCtrl, DEVICE_NODE_ID)
assert close_ble(devCtrl)
def test_lock_ctrl(device, network):
network_ssid = network[0]
network_pass = network[1]
devCtrl = ChipDeviceCtrl.ChipDeviceController()
device_details = get_device_details(device)
assert device_details != None and len(device_details) != 0
assert check_chip_ble_devices_advertising(
devCtrl, BLE_DEVICE_NAME, device_details)
ret = connect_device_over_ble(devCtrl, int(device_details["Discriminator"]), int(
device_details["SetUpPINCode"]), DEVICE_NODE_ID)
assert ret != None and ret == DEVICE_NODE_ID
ret = device.wait_for_output("Device completed Rendezvous process")
assert ret != None and len(ret) > 0
ret = commissioning_wifi(devCtrl, network_ssid,
network_pass, DEVICE_NODE_ID)
assert ret == 0
ret = resolve_device(devCtrl, DEVICE_NODE_ID)
assert ret != None and len(ret) == 2
err, res = send_zcl_command(
devCtrl, "OnOff Off {} 1 0".format(DEVICE_NODE_ID))
assert err == 0
ret = device.wait_for_output("Unlock Action has been completed", 20)
assert ret != None and len(ret) > 0
err, res = send_zcl_command(
devCtrl, "OnOff On {} 1 0".format(DEVICE_NODE_ID))
assert err == 0
ret = device.wait_for_output("Lock Action has been completed", 20)
assert ret != None and len(ret) > 0
err, res = send_zcl_command(
devCtrl, "OnOff Toggle {} 1 0".format(DEVICE_NODE_ID))
assert err == 0
ret = device.wait_for_output("Unlock Action has been completed", 20)
assert ret != None and len(ret) > 0
assert close_connection(devCtrl, DEVICE_NODE_ID)
assert close_ble(devCtrl)
def test_device_info_rpc(device):
pw_client = PigweedClient(device, RPC_PROTOS)
status, payload = pw_client.rpcs.chip.rpc.Device.GetDeviceInfo()
assert status.ok() == True
assert payload.vendor_id != None and payload.product_id != None and payload.serial_number != None
device_details = get_device_details(device)
assert device_details != None and len(device_details) != 0
assert int(device_details["VendorID"]) == payload.vendor_id
assert int(device_details["ProductID"]) == payload.product_id
assert int(device_details["Discriminator"]
) == payload.pairing_info.discriminator
assert int(device_details["SetUpPINCode"]) == payload.pairing_info.code
def test_device_factory_reset_rpc(device):
pw_client = PigweedClient(device, RPC_PROTOS)
status, payload = pw_client.rpcs.chip.rpc.Device.FactoryReset()
assert status.ok() == True
def test_device_reboot_rpc(device):
pw_client = PigweedClient(device, RPC_PROTOS)
status, payload = pw_client.rpcs.chip.rpc.Device.Reboot()
assert status == Status.UNIMPLEMENTED
def test_device_ota_rpc(device):
pw_client = PigweedClient(device, RPC_PROTOS)
status, payload = pw_client.rpcs.chip.rpc.Device.TriggerOta()
assert status == Status.UNIMPLEMENTED
def test_lock_ctrl_rpc(device):
pw_client = PigweedClient(device, RPC_PROTOS)
# Check locked
status, payload = pw_client.rpcs.chip.rpc.Locking.Set(locked=True)
assert status.ok() == True
status, payload = pw_client.rpcs.chip.rpc.Locking.Get()
assert status.ok() == True
assert payload.locked == True
# Check unlocked
status, payload = pw_client.rpcs.chip.rpc.Locking.Set(locked=False)
assert status.ok() == True
status, payload = pw_client.rpcs.chip.rpc.Locking.Get()
assert status.ok() == True
assert payload.locked == False
def test_button_ctrl_rpc(device):
pw_client = PigweedClient(device, RPC_PROTOS)
# Check button 0 (locking)
status, payload = pw_client.rpcs.chip.rpc.Locking.Get()
assert status.ok() == True
initial_state = bool(payload.locked)
compare_state = not initial_state
status, payload = pw_client.rpcs.chip.rpc.Button.Event(idx=0, pushed=True)
assert status.ok() == True
sleep(2)
status, payload = pw_client.rpcs.chip.rpc.Locking.Get()
assert status.ok() == True
assert payload.locked == compare_state
compare_state = initial_state
status, payload = pw_client.rpcs.chip.rpc.Button.Event(idx=0, pushed=True)
assert status.ok() == True
sleep(2)
status, payload = pw_client.rpcs.chip.rpc.Locking.Get()
assert status.ok() == True
assert payload.locked == compare_state
|
|
# coding: utf-8
"""
Server API
Reference for Server API (REST/Json)
OpenAPI spec version: 2.0.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class UpdateDirectorRequest(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, name=None, description_short=None, description=None, meta_title=None, meta_description=None, meta_keywords=None, link_rewrite=None, active=None):
"""
UpdateDirectorRequest - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'name': 'str',
'description_short': 'list[I18nFieldInput]',
'description': 'list[I18nFieldInput]',
'meta_title': 'list[I18nFieldInput]',
'meta_description': 'list[I18nFieldInput]',
'meta_keywords': 'list[I18nFieldInput]',
'link_rewrite': 'list[I18nFieldInput]',
'active': 'bool'
}
self.attribute_map = {
'name': 'name',
'description_short': 'description_short',
'description': 'description',
'meta_title': 'meta_title',
'meta_description': 'meta_description',
'meta_keywords': 'meta_keywords',
'link_rewrite': 'link_rewrite',
'active': 'active'
}
self._name = name
self._description_short = description_short
self._description = description
self._meta_title = meta_title
self._meta_description = meta_description
self._meta_keywords = meta_keywords
self._link_rewrite = link_rewrite
self._active = active
@property
def name(self):
"""
Gets the name of this UpdateDirectorRequest.
:return: The name of this UpdateDirectorRequest.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this UpdateDirectorRequest.
:param name: The name of this UpdateDirectorRequest.
:type: str
"""
self._name = name
@property
def description_short(self):
"""
Gets the description_short of this UpdateDirectorRequest.
:return: The description_short of this UpdateDirectorRequest.
:rtype: list[I18nFieldInput]
"""
return self._description_short
@description_short.setter
def description_short(self, description_short):
"""
Sets the description_short of this UpdateDirectorRequest.
:param description_short: The description_short of this UpdateDirectorRequest.
:type: list[I18nFieldInput]
"""
self._description_short = description_short
@property
def description(self):
"""
Gets the description of this UpdateDirectorRequest.
:return: The description of this UpdateDirectorRequest.
:rtype: list[I18nFieldInput]
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this UpdateDirectorRequest.
:param description: The description of this UpdateDirectorRequest.
:type: list[I18nFieldInput]
"""
self._description = description
@property
def meta_title(self):
"""
Gets the meta_title of this UpdateDirectorRequest.
:return: The meta_title of this UpdateDirectorRequest.
:rtype: list[I18nFieldInput]
"""
return self._meta_title
@meta_title.setter
def meta_title(self, meta_title):
"""
Sets the meta_title of this UpdateDirectorRequest.
:param meta_title: The meta_title of this UpdateDirectorRequest.
:type: list[I18nFieldInput]
"""
self._meta_title = meta_title
@property
def meta_description(self):
"""
Gets the meta_description of this UpdateDirectorRequest.
:return: The meta_description of this UpdateDirectorRequest.
:rtype: list[I18nFieldInput]
"""
return self._meta_description
@meta_description.setter
def meta_description(self, meta_description):
"""
Sets the meta_description of this UpdateDirectorRequest.
:param meta_description: The meta_description of this UpdateDirectorRequest.
:type: list[I18nFieldInput]
"""
self._meta_description = meta_description
@property
def meta_keywords(self):
"""
Gets the meta_keywords of this UpdateDirectorRequest.
:return: The meta_keywords of this UpdateDirectorRequest.
:rtype: list[I18nFieldInput]
"""
return self._meta_keywords
@meta_keywords.setter
def meta_keywords(self, meta_keywords):
"""
Sets the meta_keywords of this UpdateDirectorRequest.
:param meta_keywords: The meta_keywords of this UpdateDirectorRequest.
:type: list[I18nFieldInput]
"""
self._meta_keywords = meta_keywords
@property
def link_rewrite(self):
"""
Gets the link_rewrite of this UpdateDirectorRequest.
:return: The link_rewrite of this UpdateDirectorRequest.
:rtype: list[I18nFieldInput]
"""
return self._link_rewrite
@link_rewrite.setter
def link_rewrite(self, link_rewrite):
"""
Sets the link_rewrite of this UpdateDirectorRequest.
:param link_rewrite: The link_rewrite of this UpdateDirectorRequest.
:type: list[I18nFieldInput]
"""
self._link_rewrite = link_rewrite
@property
def active(self):
"""
Gets the active of this UpdateDirectorRequest.
:return: The active of this UpdateDirectorRequest.
:rtype: bool
"""
return self._active
@active.setter
def active(self, active):
"""
Sets the active of this UpdateDirectorRequest.
:param active: The active of this UpdateDirectorRequest.
:type: bool
"""
self._active = active
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
|
# coding=utf-8
# Copyright 2022 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MS COCO."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import io
import json
import os
import random
import zipfile
from tensor2tensor.data_generators import generator_utils
from tensor2tensor.data_generators import image_utils
from tensor2tensor.data_generators import imagenet
from tensor2tensor.data_generators import problem
from tensor2tensor.data_generators import text_encoder
from tensor2tensor.data_generators import translate_ende
from tensor2tensor.utils import registry
import tensorflow.compat.v1 as tf
# URLs and filenames for MSCOCO data.
_MSCOCO_ROOT_URL = "http://msvocds.blob.core.windows.net/"
_MSCOCO_URLS = [
"coco2014/train2014.zip", "coco2014/val2014.zip", "coco2014/test2014.zip",
"annotations-1-0-3/captions_train-val2014.zip"
]
_MSCOCO_TRAIN_PREFIX = "train2014"
_MSCOCO_EVAL_PREFIX = "val2014"
_MSCOCO_TRAIN_CAPTION_FILE = "annotations/captions_train2014.json"
_MSCOCO_EVAL_CAPTION_FILE = "annotations/captions_val2014.json"
def _get_mscoco(directory):
"""Download and extract MSCOCO datasets to directory unless it is there."""
for url in _MSCOCO_URLS:
filename = os.path.basename(url)
download_url = os.path.join(_MSCOCO_ROOT_URL, url)
path = generator_utils.maybe_download(directory, filename, download_url)
unzip_dir = os.path.join(directory, filename.strip(".zip"))
if not tf.gfile.Exists(unzip_dir):
zipfile.ZipFile(path, "r").extractall(directory)
def mscoco_generator(data_dir,
tmp_dir,
training,
how_many,
start_from=0,
eos_list=None,
vocab_filename=None):
"""Image generator for MSCOCO captioning problem with token-wise captions.
Args:
data_dir: path to the data directory.
tmp_dir: path to temporary storage directory.
training: a Boolean; if true, we use the train set, otherwise the test set.
how_many: how many images and labels to generate.
start_from: from which image to start.
eos_list: optional list of end of sentence tokens, otherwise use default
value `1`.
vocab_filename: file within `tmp_dir` to read vocabulary from.
Yields:
A dictionary representing the images with the following fields:
* image/encoded: the string encoding the image as JPEG,
* image/format: the string "jpeg" representing image format,
* image/class/label: a list of integers representing the caption,
* image/height: an integer representing the height,
* image/width: an integer representing the width.
Every field is actually a list of the corresponding type.
"""
eos_list = [1] if eos_list is None else eos_list
def get_vocab():
"""Get vocab for caption text encoder."""
if data_dir is not None and vocab_filename is not None:
vocab_filepath = os.path.join(data_dir, vocab_filename)
if tf.gfile.Exists(vocab_filepath):
tf.logging.info("Found vocab file: %s", vocab_filepath)
vocab_symbolizer = text_encoder.SubwordTextEncoder(vocab_filepath)
return vocab_symbolizer
else:
raise ValueError("Vocab file does not exist: %s" % vocab_filepath)
return None
vocab_symbolizer = get_vocab()
_get_mscoco(tmp_dir)
caption_filepath = (
_MSCOCO_TRAIN_CAPTION_FILE if training else _MSCOCO_EVAL_CAPTION_FILE)
caption_filepath = os.path.join(tmp_dir, caption_filepath)
prefix = _MSCOCO_TRAIN_PREFIX if training else _MSCOCO_EVAL_PREFIX
caption_file = io.open(caption_filepath)
caption_json = json.load(caption_file)
# Dictionary from image_id to ((filename, height, width), captions).
image_dict = {}
for image in caption_json["images"]:
image_dict[image["id"]] = [(image["file_name"], image["height"],
image["width"]), []]
annotations = caption_json["annotations"]
annotation_count = len(annotations)
image_count = len(image_dict)
tf.logging.info("Processing %d images and %d labels\n" % (image_count,
annotation_count))
for annotation in annotations:
image_id = annotation["image_id"]
image_dict[image_id][1].append(annotation["caption"])
data = list(image_dict.values())[start_from:start_from + how_many]
random.shuffle(data)
for image_info, labels in data:
image_filename = image_info[0]
image_filepath = os.path.join(tmp_dir, prefix, image_filename)
with tf.gfile.Open(image_filepath, "rb") as f:
encoded_image_data = f.read()
height, width = image_info[1], image_info[2]
for label in labels:
if vocab_filename is None or vocab_symbolizer is None:
label = [ord(c) for c in label] + eos_list
else:
label = vocab_symbolizer.encode(label) + eos_list
yield {
"image/encoded": [encoded_image_data],
"image/format": ["jpeg"],
"image/class/label": label,
"image/height": [height],
"image/width": [width]
}
@registry.register_problem
class ImageMsCocoCharacters(image_utils.Image2TextProblem):
"""MSCOCO, character level."""
@property
def is_character_level(self):
return True
@property
def target_space_id(self):
return problem.SpaceID.EN_CHR
@property
def train_shards(self):
return 100
@property
def dev_shards(self):
return 10
def preprocess_example(self, example, mode, _):
return imagenet.imagenet_preprocess_example(example, mode)
def generator(self, data_dir, tmp_dir, is_training):
if is_training:
return mscoco_generator(data_dir, tmp_dir, True, 80000)
else:
return mscoco_generator(data_dir, tmp_dir, False, 40000)
raise NotImplementedError()
@registry.register_problem
class ImageMsCocoTokens32k(ImageMsCocoCharacters):
"""MSCOCO, 8k tokens vocab."""
@property
def is_character_level(self):
return False
@property
def vocab_problem(self):
return translate_ende.TranslateEndeWmt32k()
@property
def target_space_id(self):
return problem.SpaceID.EN_TOK
@property
def train_shards(self):
return 100
@property
def dev_shards(self):
return 10
def generator(self, data_dir, tmp_dir, is_training):
# We use the translate vocab file as the vocabulary for captions.
# This requires having the vocab file present in the data_dir for the
# generation pipeline to succeed.
vocab_filename = self.vocab_problem.vocab_filename
if is_training:
return mscoco_generator(
data_dir,
tmp_dir,
True,
80000,
vocab_filename=vocab_filename)
else:
return mscoco_generator(
data_dir,
tmp_dir,
False,
40000,
vocab_filename=vocab_filename)
@registry.register_problem
class ImageTextMsCocoMultiResolution(ImageMsCocoTokens32k):
"""MSCoCo at multiple resolutions."""
def dataset_filename(self):
return "image_ms_coco_tokens32k"
def preprocess_example(self, example, mode, hparams):
image = example["inputs"]
# Get resize method. Include a default if not specified, or if it's not in
# TensorFlow's collection of pre-implemented resize methods.
resize_method = getattr(hparams, "resize_method", "BICUBIC")
resize_method = getattr(tf.image.ResizeMethod, resize_method, resize_method)
highest_res = hparams.resolutions[-1]
if resize_method == "DILATED":
# Resize image so that dilated subsampling is properly divisible.
scaled_image = image_utils.resize_by_area(image, highest_res)
scaled_images = image_utils.make_multiscale_dilated(
scaled_image, hparams.resolutions, num_channels=self.num_channels)
else:
scaled_images = image_utils.make_multiscale(
image, hparams.resolutions,
resize_method=resize_method, num_channels=self.num_channels)
# Pack tuple of scaled images into one tensor. We do this by enforcing the
# columns to match for every resolution.
example["inputs"] = tf.concat([
tf.reshape(scaled_image,
[res**2 // highest_res, highest_res, self.num_channels])
for scaled_image, res in zip(scaled_images, hparams.resolutions)],
axis=0)
return example
@registry.register_problem
class ImageTextMsCoco(ImageMsCocoTokens32k):
"""Problem for using MsCoco for generating images from text."""
_MSCOCO_IMAGE_SIZE = 32
def dataset_filename(self):
return "image_ms_coco_tokens32k"
def preprocess_example(self, example, mode, unused_hparams):
example["inputs"] = image_utils.resize_by_area(
example["inputs"], self._MSCOCO_IMAGE_SIZE)
return example
|
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
from collections import defaultdict
import unittest
from mock import patch
from swift.proxy.controllers.base import headers_to_container_info, \
headers_to_account_info, headers_to_object_info, get_container_info, \
get_container_memcache_key, get_account_info, get_account_memcache_key, \
get_object_env_key, get_info, get_object_info, \
Controller, GetOrHeadHandler, _set_info_cache, _set_object_info_cache, \
bytes_to_skip
from swift.common.swob import Request, HTTPException, HeaderKeyDict, \
RESPONSE_REASONS
from swift.common import exceptions
from swift.common.utils import split_path
from swift.common.http import is_success
from swift.common.storage_policy import StoragePolicy
from test.unit import fake_http_connect, FakeRing, FakeMemcache
from swift.proxy import server as proxy_server
from swift.common.request_helpers import get_sys_meta_prefix
from test.unit import patch_policies
class FakeResponse(object):
base_headers = {}
def __init__(self, status_int=200, headers=None, body=''):
self.status_int = status_int
self._headers = headers or {}
self.body = body
@property
def headers(self):
if is_success(self.status_int):
self._headers.update(self.base_headers)
return self._headers
class AccountResponse(FakeResponse):
base_headers = {
'x-account-container-count': 333,
'x-account-object-count': 1000,
'x-account-bytes-used': 6666,
}
class ContainerResponse(FakeResponse):
base_headers = {
'x-container-object-count': 1000,
'x-container-bytes-used': 6666,
}
class ObjectResponse(FakeResponse):
base_headers = {
'content-length': 5555,
'content-type': 'text/plain'
}
class DynamicResponseFactory(object):
def __init__(self, *statuses):
if statuses:
self.statuses = iter(statuses)
else:
self.statuses = itertools.repeat(200)
self.stats = defaultdict(int)
response_type = {
'obj': ObjectResponse,
'container': ContainerResponse,
'account': AccountResponse,
}
def _get_response(self, type_):
self.stats[type_] += 1
class_ = self.response_type[type_]
return class_(next(self.statuses))
def get_response(self, environ):
(version, account, container, obj) = split_path(
environ['PATH_INFO'], 2, 4, True)
if obj:
resp = self._get_response('obj')
elif container:
resp = self._get_response('container')
else:
resp = self._get_response('account')
resp.account = account
resp.container = container
resp.obj = obj
return resp
class FakeApp(object):
recheck_container_existence = 30
recheck_account_existence = 30
def __init__(self, response_factory=None, statuses=None):
self.responses = response_factory or \
DynamicResponseFactory(*statuses or [])
self.sources = []
def __call__(self, environ, start_response):
self.sources.append(environ.get('swift.source'))
response = self.responses.get_response(environ)
reason = RESPONSE_REASONS[response.status_int][0]
start_response('%d %s' % (response.status_int, reason),
[(k, v) for k, v in response.headers.items()])
# It's a bit strange, but the get_info cache stuff relies on the
# app setting some keys in the environment as it makes requests
# (in particular GETorHEAD_base) - so our fake does the same
_set_info_cache(self, environ, response.account,
response.container, response)
if response.obj:
_set_object_info_cache(self, environ, response.account,
response.container, response.obj,
response)
return iter(response.body)
class FakeCache(FakeMemcache):
def __init__(self, stub=None, **pre_cached):
super(FakeCache, self).__init__()
if pre_cached:
self.store.update(pre_cached)
self.stub = stub
def get(self, key):
return self.stub or self.store.get(key)
@patch_policies([StoragePolicy(0, 'zero', True, object_ring=FakeRing())])
class TestFuncs(unittest.TestCase):
def setUp(self):
self.app = proxy_server.Application(None, FakeMemcache(),
account_ring=FakeRing(),
container_ring=FakeRing())
def test_GETorHEAD_base(self):
base = Controller(self.app)
req = Request.blank('/v1/a/c/o/with/slashes')
ring = FakeRing()
nodes = list(ring.get_part_nodes(0)) + list(ring.get_more_nodes(0))
with patch('swift.proxy.controllers.base.'
'http_connect', fake_http_connect(200)):
resp = base.GETorHEAD_base(req, 'object', iter(nodes), 'part',
'/a/c/o/with/slashes')
self.assertTrue('swift.object/a/c/o/with/slashes' in resp.environ)
self.assertEqual(
resp.environ['swift.object/a/c/o/with/slashes']['status'], 200)
req = Request.blank('/v1/a/c/o')
with patch('swift.proxy.controllers.base.'
'http_connect', fake_http_connect(200)):
resp = base.GETorHEAD_base(req, 'object', iter(nodes), 'part',
'/a/c/o')
self.assertTrue('swift.object/a/c/o' in resp.environ)
self.assertEqual(resp.environ['swift.object/a/c/o']['status'], 200)
req = Request.blank('/v1/a/c')
with patch('swift.proxy.controllers.base.'
'http_connect', fake_http_connect(200)):
resp = base.GETorHEAD_base(req, 'container', iter(nodes), 'part',
'/a/c')
self.assertTrue('swift.container/a/c' in resp.environ)
self.assertEqual(resp.environ['swift.container/a/c']['status'], 200)
req = Request.blank('/v1/a')
with patch('swift.proxy.controllers.base.'
'http_connect', fake_http_connect(200)):
resp = base.GETorHEAD_base(req, 'account', iter(nodes), 'part',
'/a')
self.assertTrue('swift.account/a' in resp.environ)
self.assertEqual(resp.environ['swift.account/a']['status'], 200)
def test_get_info(self):
app = FakeApp()
# Do a non cached call to account
env = {}
info_a = get_info(app, env, 'a')
# Check that you got proper info
self.assertEqual(info_a['status'], 200)
self.assertEqual(info_a['bytes'], 6666)
self.assertEqual(info_a['total_object_count'], 1000)
# Make sure the env cache is set
self.assertEqual(env.get('swift.account/a'), info_a)
# Make sure the app was called
self.assertEqual(app.responses.stats['account'], 1)
# Do an env cached call to account
info_a = get_info(app, env, 'a')
# Check that you got proper info
self.assertEqual(info_a['status'], 200)
self.assertEqual(info_a['bytes'], 6666)
self.assertEqual(info_a['total_object_count'], 1000)
# Make sure the env cache is set
self.assertEqual(env.get('swift.account/a'), info_a)
# Make sure the app was NOT called AGAIN
self.assertEqual(app.responses.stats['account'], 1)
# This time do env cached call to account and non cached to container
info_c = get_info(app, env, 'a', 'c')
# Check that you got proper info
self.assertEqual(info_c['status'], 200)
self.assertEqual(info_c['bytes'], 6666)
self.assertEqual(info_c['object_count'], 1000)
# Make sure the env cache is set
self.assertEqual(env.get('swift.account/a'), info_a)
self.assertEqual(env.get('swift.container/a/c'), info_c)
# Make sure the app was called for container
self.assertEqual(app.responses.stats['container'], 1)
# This time do a non cached call to account than non cached to
# container
app = FakeApp()
env = {} # abandon previous call to env
info_c = get_info(app, env, 'a', 'c')
# Check that you got proper info
self.assertEqual(info_c['status'], 200)
self.assertEqual(info_c['bytes'], 6666)
self.assertEqual(info_c['object_count'], 1000)
# Make sure the env cache is set
self.assertEqual(env.get('swift.account/a'), info_a)
self.assertEqual(env.get('swift.container/a/c'), info_c)
# check app calls both account and container
self.assertEqual(app.responses.stats['account'], 1)
self.assertEqual(app.responses.stats['container'], 1)
# This time do an env cached call to container while account is not
# cached
del(env['swift.account/a'])
info_c = get_info(app, env, 'a', 'c')
# Check that you got proper info
self.assertEqual(info_a['status'], 200)
self.assertEqual(info_c['bytes'], 6666)
self.assertEqual(info_c['object_count'], 1000)
# Make sure the env cache is set and account still not cached
self.assertEqual(env.get('swift.container/a/c'), info_c)
# no additional calls were made
self.assertEqual(app.responses.stats['account'], 1)
self.assertEqual(app.responses.stats['container'], 1)
# Do a non cached call to account not found with ret_not_found
app = FakeApp(statuses=(404,))
env = {}
info_a = get_info(app, env, 'a', ret_not_found=True)
# Check that you got proper info
self.assertEqual(info_a['status'], 404)
self.assertEqual(info_a['bytes'], None)
self.assertEqual(info_a['total_object_count'], None)
# Make sure the env cache is set
self.assertEqual(env.get('swift.account/a'), info_a)
# and account was called
self.assertEqual(app.responses.stats['account'], 1)
# Do a cached call to account not found with ret_not_found
info_a = get_info(app, env, 'a', ret_not_found=True)
# Check that you got proper info
self.assertEqual(info_a['status'], 404)
self.assertEqual(info_a['bytes'], None)
self.assertEqual(info_a['total_object_count'], None)
# Make sure the env cache is set
self.assertEqual(env.get('swift.account/a'), info_a)
# add account was NOT called AGAIN
self.assertEqual(app.responses.stats['account'], 1)
# Do a non cached call to account not found without ret_not_found
app = FakeApp(statuses=(404,))
env = {}
info_a = get_info(app, env, 'a')
# Check that you got proper info
self.assertEqual(info_a, None)
self.assertEqual(env['swift.account/a']['status'], 404)
# and account was called
self.assertEqual(app.responses.stats['account'], 1)
# Do a cached call to account not found without ret_not_found
info_a = get_info(None, env, 'a')
# Check that you got proper info
self.assertEqual(info_a, None)
self.assertEqual(env['swift.account/a']['status'], 404)
# add account was NOT called AGAIN
self.assertEqual(app.responses.stats['account'], 1)
def test_get_container_info_swift_source(self):
app = FakeApp()
req = Request.blank("/v1/a/c", environ={'swift.cache': FakeCache()})
get_container_info(req.environ, app, swift_source='MC')
self.assertEqual(app.sources, ['GET_INFO', 'MC'])
def test_get_object_info_swift_source(self):
app = FakeApp()
req = Request.blank("/v1/a/c/o",
environ={'swift.cache': FakeCache()})
get_object_info(req.environ, app, swift_source='LU')
self.assertEqual(app.sources, ['LU'])
def test_get_container_info_no_cache(self):
req = Request.blank("/v1/AUTH_account/cont",
environ={'swift.cache': FakeCache({})})
resp = get_container_info(req.environ, FakeApp())
self.assertEqual(resp['storage_policy'], '0')
self.assertEqual(resp['bytes'], 6666)
self.assertEqual(resp['object_count'], 1000)
def test_get_container_info_no_account(self):
responses = DynamicResponseFactory(404, 200)
app = FakeApp(responses)
req = Request.blank("/v1/AUTH_does_not_exist/cont")
info = get_container_info(req.environ, app)
self.assertEqual(info['status'], 0)
def test_get_container_info_no_auto_account(self):
responses = DynamicResponseFactory(404, 200)
app = FakeApp(responses)
req = Request.blank("/v1/.system_account/cont")
info = get_container_info(req.environ, app)
self.assertEqual(info['status'], 200)
self.assertEqual(info['bytes'], 6666)
self.assertEqual(info['object_count'], 1000)
def test_get_container_info_cache(self):
cache_stub = {
'status': 404, 'bytes': 3333, 'object_count': 10,
# simplejson sometimes hands back strings, sometimes unicodes
'versions': u"\u1F4A9"}
req = Request.blank("/v1/account/cont",
environ={'swift.cache': FakeCache(cache_stub)})
resp = get_container_info(req.environ, FakeApp())
self.assertEqual(resp['storage_policy'], '0')
self.assertEqual(resp['bytes'], 3333)
self.assertEqual(resp['object_count'], 10)
self.assertEqual(resp['status'], 404)
self.assertEqual(resp['versions'], "\xe1\xbd\x8a\x39")
def test_get_container_info_env(self):
cache_key = get_container_memcache_key("account", "cont")
env_key = 'swift.%s' % cache_key
req = Request.blank("/v1/account/cont",
environ={env_key: {'bytes': 3867},
'swift.cache': FakeCache({})})
resp = get_container_info(req.environ, 'xxx')
self.assertEqual(resp['bytes'], 3867)
def test_get_account_info_swift_source(self):
app = FakeApp()
req = Request.blank("/v1/a", environ={'swift.cache': FakeCache()})
get_account_info(req.environ, app, swift_source='MC')
self.assertEqual(app.sources, ['MC'])
def test_get_account_info_no_cache(self):
app = FakeApp()
req = Request.blank("/v1/AUTH_account",
environ={'swift.cache': FakeCache({})})
resp = get_account_info(req.environ, app)
self.assertEqual(resp['bytes'], 6666)
self.assertEqual(resp['total_object_count'], 1000)
def test_get_account_info_cache(self):
# The original test that we prefer to preserve
cached = {'status': 404,
'bytes': 3333,
'total_object_count': 10}
req = Request.blank("/v1/account/cont",
environ={'swift.cache': FakeCache(cached)})
resp = get_account_info(req.environ, FakeApp())
self.assertEqual(resp['bytes'], 3333)
self.assertEqual(resp['total_object_count'], 10)
self.assertEqual(resp['status'], 404)
# Here is a more realistic test
cached = {'status': 404,
'bytes': '3333',
'container_count': '234',
'total_object_count': '10',
'meta': {}}
req = Request.blank("/v1/account/cont",
environ={'swift.cache': FakeCache(cached)})
resp = get_account_info(req.environ, FakeApp())
self.assertEqual(resp['status'], 404)
self.assertEqual(resp['bytes'], '3333')
self.assertEqual(resp['container_count'], 234)
self.assertEqual(resp['meta'], {})
self.assertEqual(resp['total_object_count'], '10')
def test_get_account_info_env(self):
cache_key = get_account_memcache_key("account")
env_key = 'swift.%s' % cache_key
req = Request.blank("/v1/account",
environ={env_key: {'bytes': 3867},
'swift.cache': FakeCache({})})
resp = get_account_info(req.environ, 'xxx')
self.assertEqual(resp['bytes'], 3867)
def test_get_object_info_env(self):
cached = {'status': 200,
'length': 3333,
'type': 'application/json',
'meta': {}}
env_key = get_object_env_key("account", "cont", "obj")
req = Request.blank("/v1/account/cont/obj",
environ={env_key: cached,
'swift.cache': FakeCache({})})
resp = get_object_info(req.environ, 'xxx')
self.assertEqual(resp['length'], 3333)
self.assertEqual(resp['type'], 'application/json')
def test_get_object_info_no_env(self):
app = FakeApp()
req = Request.blank("/v1/account/cont/obj",
environ={'swift.cache': FakeCache({})})
resp = get_object_info(req.environ, app)
self.assertEqual(app.responses.stats['account'], 0)
self.assertEqual(app.responses.stats['container'], 0)
self.assertEqual(app.responses.stats['obj'], 1)
self.assertEqual(resp['length'], 5555)
self.assertEqual(resp['type'], 'text/plain')
def test_options(self):
base = Controller(self.app)
base.account_name = 'a'
base.container_name = 'c'
origin = 'http://m.com'
self.app.cors_allow_origin = [origin]
req = Request.blank('/v1/a/c/o',
environ={'swift.cache': FakeCache()},
headers={'Origin': origin,
'Access-Control-Request-Method': 'GET'})
with patch('swift.proxy.controllers.base.'
'http_connect', fake_http_connect(200)):
resp = base.OPTIONS(req)
self.assertEqual(resp.status_int, 200)
def test_options_with_null_allow_origin(self):
base = Controller(self.app)
base.account_name = 'a'
base.container_name = 'c'
def my_container_info(*args):
return {
'cors': {
'allow_origin': '*',
}
}
base.container_info = my_container_info
req = Request.blank('/v1/a/c/o',
environ={'swift.cache': FakeCache()},
headers={'Origin': '*',
'Access-Control-Request-Method': 'GET'})
with patch('swift.proxy.controllers.base.'
'http_connect', fake_http_connect(200)):
resp = base.OPTIONS(req)
self.assertEqual(resp.status_int, 200)
def test_options_unauthorized(self):
base = Controller(self.app)
base.account_name = 'a'
base.container_name = 'c'
self.app.cors_allow_origin = ['http://NOT_IT']
req = Request.blank('/v1/a/c/o',
environ={'swift.cache': FakeCache()},
headers={'Origin': 'http://m.com',
'Access-Control-Request-Method': 'GET'})
with patch('swift.proxy.controllers.base.'
'http_connect', fake_http_connect(200)):
resp = base.OPTIONS(req)
self.assertEqual(resp.status_int, 401)
def test_headers_to_container_info_missing(self):
resp = headers_to_container_info({}, 404)
self.assertEqual(resp['status'], 404)
self.assertEqual(resp['read_acl'], None)
self.assertEqual(resp['write_acl'], None)
def test_headers_to_container_info_meta(self):
headers = {'X-Container-Meta-Whatevs': 14,
'x-container-meta-somethingelse': 0}
resp = headers_to_container_info(headers.items(), 200)
self.assertEqual(len(resp['meta']), 2)
self.assertEqual(resp['meta']['whatevs'], 14)
self.assertEqual(resp['meta']['somethingelse'], 0)
def test_headers_to_container_info_sys_meta(self):
prefix = get_sys_meta_prefix('container')
headers = {'%sWhatevs' % prefix: 14,
'%ssomethingelse' % prefix: 0}
resp = headers_to_container_info(headers.items(), 200)
self.assertEqual(len(resp['sysmeta']), 2)
self.assertEqual(resp['sysmeta']['whatevs'], 14)
self.assertEqual(resp['sysmeta']['somethingelse'], 0)
def test_headers_to_container_info_values(self):
headers = {
'x-container-read': 'readvalue',
'x-container-write': 'writevalue',
'x-container-sync-key': 'keyvalue',
'x-container-meta-access-control-allow-origin': 'here',
}
resp = headers_to_container_info(headers.items(), 200)
self.assertEqual(resp['read_acl'], 'readvalue')
self.assertEqual(resp['write_acl'], 'writevalue')
self.assertEqual(resp['cors']['allow_origin'], 'here')
headers['x-unused-header'] = 'blahblahblah'
self.assertEqual(
resp,
headers_to_container_info(headers.items(), 200))
def test_container_info_without_req(self):
base = Controller(self.app)
base.account_name = 'a'
base.container_name = 'c'
container_info = \
base.container_info(base.account_name,
base.container_name)
self.assertEqual(container_info['status'], 0)
def test_headers_to_account_info_missing(self):
resp = headers_to_account_info({}, 404)
self.assertEqual(resp['status'], 404)
self.assertEqual(resp['bytes'], None)
self.assertEqual(resp['container_count'], None)
def test_headers_to_account_info_meta(self):
headers = {'X-Account-Meta-Whatevs': 14,
'x-account-meta-somethingelse': 0}
resp = headers_to_account_info(headers.items(), 200)
self.assertEqual(len(resp['meta']), 2)
self.assertEqual(resp['meta']['whatevs'], 14)
self.assertEqual(resp['meta']['somethingelse'], 0)
def test_headers_to_account_info_sys_meta(self):
prefix = get_sys_meta_prefix('account')
headers = {'%sWhatevs' % prefix: 14,
'%ssomethingelse' % prefix: 0}
resp = headers_to_account_info(headers.items(), 200)
self.assertEqual(len(resp['sysmeta']), 2)
self.assertEqual(resp['sysmeta']['whatevs'], 14)
self.assertEqual(resp['sysmeta']['somethingelse'], 0)
def test_headers_to_account_info_values(self):
headers = {
'x-account-object-count': '10',
'x-account-container-count': '20',
}
resp = headers_to_account_info(headers.items(), 200)
self.assertEqual(resp['total_object_count'], '10')
self.assertEqual(resp['container_count'], '20')
headers['x-unused-header'] = 'blahblahblah'
self.assertEqual(
resp,
headers_to_account_info(headers.items(), 200))
def test_headers_to_object_info_missing(self):
resp = headers_to_object_info({}, 404)
self.assertEqual(resp['status'], 404)
self.assertEqual(resp['length'], None)
self.assertEqual(resp['etag'], None)
def test_headers_to_object_info_meta(self):
headers = {'X-Object-Meta-Whatevs': 14,
'x-object-meta-somethingelse': 0}
resp = headers_to_object_info(headers.items(), 200)
self.assertEqual(len(resp['meta']), 2)
self.assertEqual(resp['meta']['whatevs'], 14)
self.assertEqual(resp['meta']['somethingelse'], 0)
def test_headers_to_object_info_sys_meta(self):
prefix = get_sys_meta_prefix('object')
headers = {'%sWhatevs' % prefix: 14,
'%ssomethingelse' % prefix: 0}
resp = headers_to_object_info(headers.items(), 200)
self.assertEqual(len(resp['sysmeta']), 2)
self.assertEqual(resp['sysmeta']['whatevs'], 14)
self.assertEqual(resp['sysmeta']['somethingelse'], 0)
def test_headers_to_object_info_values(self):
headers = {
'content-length': '1024',
'content-type': 'application/json',
}
resp = headers_to_object_info(headers.items(), 200)
self.assertEqual(resp['length'], '1024')
self.assertEqual(resp['type'], 'application/json')
headers['x-unused-header'] = 'blahblahblah'
self.assertEqual(
resp,
headers_to_object_info(headers.items(), 200))
def test_base_have_quorum(self):
base = Controller(self.app)
# just throw a bunch of test cases at it
self.assertEqual(base.have_quorum([201, 404], 3), False)
self.assertEqual(base.have_quorum([201, 201], 4), False)
self.assertEqual(base.have_quorum([201, 201, 404, 404], 4), False)
self.assertEqual(base.have_quorum([201, 503, 503, 201], 4), False)
self.assertEqual(base.have_quorum([201, 201], 3), True)
self.assertEqual(base.have_quorum([404, 404], 3), True)
self.assertEqual(base.have_quorum([201, 201], 2), True)
self.assertEqual(base.have_quorum([404, 404], 2), True)
self.assertEqual(base.have_quorum([201, 404, 201, 201], 4), True)
def test_best_response_overrides(self):
base = Controller(self.app)
responses = [
(302, 'Found', '', 'The resource has moved temporarily.'),
(100, 'Continue', '', ''),
(404, 'Not Found', '', 'Custom body'),
]
server_type = "Base DELETE"
req = Request.blank('/v1/a/c/o', method='DELETE')
statuses, reasons, headers, bodies = zip(*responses)
# First test that you can't make a quorum with only overridden
# responses
overrides = {302: 204, 100: 204}
resp = base.best_response(req, statuses, reasons, bodies, server_type,
headers=headers, overrides=overrides)
self.assertEqual(resp.status, '503 Service Unavailable')
# next make a 404 quorum and make sure the last delete (real) 404
# status is the one returned.
overrides = {100: 404}
resp = base.best_response(req, statuses, reasons, bodies, server_type,
headers=headers, overrides=overrides)
self.assertEqual(resp.status, '404 Not Found')
self.assertEqual(resp.body, 'Custom body')
def test_range_fast_forward(self):
req = Request.blank('/')
handler = GetOrHeadHandler(None, req, None, None, None, None, {})
handler.fast_forward(50)
self.assertEqual(handler.backend_headers['Range'], 'bytes=50-')
handler = GetOrHeadHandler(None, req, None, None, None, None,
{'Range': 'bytes=23-50'})
handler.fast_forward(20)
self.assertEqual(handler.backend_headers['Range'], 'bytes=43-50')
self.assertRaises(HTTPException,
handler.fast_forward, 80)
handler = GetOrHeadHandler(None, req, None, None, None, None,
{'Range': 'bytes=23-'})
handler.fast_forward(20)
self.assertEqual(handler.backend_headers['Range'], 'bytes=43-')
handler = GetOrHeadHandler(None, req, None, None, None, None,
{'Range': 'bytes=-100'})
handler.fast_forward(20)
self.assertEqual(handler.backend_headers['Range'], 'bytes=-80')
def test_transfer_headers_with_sysmeta(self):
base = Controller(self.app)
good_hdrs = {'x-base-sysmeta-foo': 'ok',
'X-Base-sysmeta-Bar': 'also ok'}
bad_hdrs = {'x-base-sysmeta-': 'too short'}
hdrs = dict(good_hdrs)
hdrs.update(bad_hdrs)
dst_hdrs = HeaderKeyDict()
base.transfer_headers(hdrs, dst_hdrs)
self.assertEqual(HeaderKeyDict(good_hdrs), dst_hdrs)
def test_generate_request_headers(self):
base = Controller(self.app)
src_headers = {'x-remove-base-meta-owner': 'x',
'x-base-meta-size': '151M',
'new-owner': 'Kun'}
req = Request.blank('/v1/a/c/o', headers=src_headers)
dst_headers = base.generate_request_headers(req, transfer=True)
expected_headers = {'x-base-meta-owner': '',
'x-base-meta-size': '151M',
'connection': 'close'}
for k, v in expected_headers.items():
self.assertTrue(k in dst_headers)
self.assertEqual(v, dst_headers[k])
self.assertFalse('new-owner' in dst_headers)
def test_generate_request_headers_with_sysmeta(self):
base = Controller(self.app)
good_hdrs = {'x-base-sysmeta-foo': 'ok',
'X-Base-sysmeta-Bar': 'also ok'}
bad_hdrs = {'x-base-sysmeta-': 'too short'}
hdrs = dict(good_hdrs)
hdrs.update(bad_hdrs)
req = Request.blank('/v1/a/c/o', headers=hdrs)
dst_headers = base.generate_request_headers(req, transfer=True)
for k, v in good_hdrs.items():
self.assertTrue(k.lower() in dst_headers)
self.assertEqual(v, dst_headers[k.lower()])
for k, v in bad_hdrs.items():
self.assertFalse(k.lower() in dst_headers)
def test_generate_request_headers_with_no_orig_req(self):
base = Controller(self.app)
src_headers = {'x-remove-base-meta-owner': 'x',
'x-base-meta-size': '151M',
'new-owner': 'Kun'}
dst_headers = base.generate_request_headers(None,
additional=src_headers)
expected_headers = {'x-base-meta-size': '151M',
'connection': 'close'}
for k, v in expected_headers.items():
self.assertDictContainsSubset(expected_headers, dst_headers)
self.assertEqual('', dst_headers['Referer'])
def test_client_chunk_size(self):
class TestSource(object):
def __init__(self, chunks):
self.chunks = list(chunks)
self.status = 200
def read(self, _read_size):
if self.chunks:
return self.chunks.pop(0)
else:
return ''
def getheader(self, header):
if header.lower() == "content-length":
return str(sum(len(c) for c in self.chunks))
def getheaders(self):
return [('content-length', self.getheader('content-length'))]
source = TestSource((
'abcd', '1234', 'abc', 'd1', '234abcd1234abcd1', '2'))
req = Request.blank('/v1/a/c/o')
node = {}
handler = GetOrHeadHandler(self.app, req, None, None, None, None, {},
client_chunk_size=8)
app_iter = handler._make_app_iter(req, node, source)
client_chunks = list(app_iter)
self.assertEqual(client_chunks, [
'abcd1234', 'abcd1234', 'abcd1234', 'abcd12'])
def test_client_chunk_size_resuming(self):
class TestSource(object):
def __init__(self, chunks):
self.chunks = list(chunks)
self.status = 200
def read(self, _read_size):
if self.chunks:
chunk = self.chunks.pop(0)
if chunk is None:
raise exceptions.ChunkReadTimeout()
else:
return chunk
else:
return ''
def getheader(self, header):
if header.lower() == "content-length":
return str(sum(len(c) for c in self.chunks
if c is not None))
def getheaders(self):
return [('content-length', self.getheader('content-length'))]
node = {'ip': '1.2.3.4', 'port': 6000, 'device': 'sda'}
source1 = TestSource(['abcd', '1234', 'abc', None])
source2 = TestSource(['efgh5678'])
req = Request.blank('/v1/a/c/o')
handler = GetOrHeadHandler(
self.app, req, 'Object', None, None, None, {},
client_chunk_size=8)
app_iter = handler._make_app_iter(req, node, source1)
with patch.object(handler, '_get_source_and_node',
lambda: (source2, node)):
client_chunks = list(app_iter)
self.assertEqual(client_chunks, ['abcd1234', 'efgh5678'])
def test_bytes_to_skip(self):
# if you start at the beginning, skip nothing
self.assertEqual(bytes_to_skip(1024, 0), 0)
# missed the first 10 bytes, so we've got 1014 bytes of partial
# record
self.assertEqual(bytes_to_skip(1024, 10), 1014)
# skipped some whole records first
self.assertEqual(bytes_to_skip(1024, 4106), 1014)
# landed on a record boundary
self.assertEqual(bytes_to_skip(1024, 1024), 0)
self.assertEqual(bytes_to_skip(1024, 2048), 0)
# big numbers
self.assertEqual(bytes_to_skip(2 ** 20, 2 ** 32), 0)
self.assertEqual(bytes_to_skip(2 ** 20, 2 ** 32 + 1), 2 ** 20 - 1)
self.assertEqual(bytes_to_skip(2 ** 20, 2 ** 32 + 2 ** 19), 2 ** 19)
# odd numbers
self.assertEqual(bytes_to_skip(123, 0), 0)
self.assertEqual(bytes_to_skip(123, 23), 100)
self.assertEqual(bytes_to_skip(123, 247), 122)
# prime numbers
self.assertEqual(bytes_to_skip(11, 7), 4)
self.assertEqual(bytes_to_skip(97, 7873823), 55)
|
|
# -*- coding: utf-8 -*-
"""The parsers and plugins manager."""
from __future__ import unicode_literals
import pysigscan
from plaso.filters import parser_filter
from plaso.lib import specification
class ParsersManager(object):
"""The parsers and plugins manager."""
_parser_classes = {}
@classmethod
def CreateSignatureScanner(cls, specification_store):
"""Creates a signature scanner for format specifications with signatures.
Args:
specification_store (FormatSpecificationStore): format specifications
with signatures.
Returns:
pysigscan.scanner: signature scanner.
"""
scanner_object = pysigscan.scanner()
for format_specification in specification_store.specifications:
for signature in format_specification.signatures:
pattern_offset = signature.offset
if pattern_offset is None:
signature_flags = pysigscan.signature_flags.NO_OFFSET
elif pattern_offset < 0:
pattern_offset *= -1
signature_flags = pysigscan.signature_flags.RELATIVE_FROM_END
else:
signature_flags = pysigscan.signature_flags.RELATIVE_FROM_START
scanner_object.add_signature(
signature.identifier, pattern_offset, signature.pattern,
signature_flags)
return scanner_object
@classmethod
def CheckFilterExpression(cls, parser_filter_expression):
"""Checks parser and plugin names in a parser filter expression.
Args:
parser_filter_expression (str): parser filter expression,
where None represents all parsers and plugins.
A parser filter expression is a comma separated value string that
denotes which parsers and plugins should be used. See
filters/parser_filter.py for details of the expression syntax.
This function does not support presets, and requires a parser filter
expression where presets have been expanded.
Returns:
tuple: containing:
* set(str): parser filter expression elements that contain known parser
and/or plugin names.
* set(str): parser filter expression elements that contain unknown parser
and/or plugin names.
"""
if not parser_filter_expression:
return set(cls._parser_classes.keys()), set()
known_parser_elements = set()
unknown_parser_elements = set()
for element in parser_filter_expression.split(','):
parser_expression = element
if element.startswith('!'):
parser_expression = element[1:]
parser_name, _, plugin_name = parser_expression.partition('/')
parser_class = cls._parser_classes.get(parser_name, None)
if not parser_class:
unknown_parser_elements.add(element)
continue
if not plugin_name:
known_parser_elements.add(element)
continue
if parser_class.SupportsPlugins():
plugins = dict(parser_class.GetPlugins())
if plugin_name in plugins:
known_parser_elements.add(element)
else:
unknown_parser_elements.add(element)
return known_parser_elements, unknown_parser_elements
@classmethod
def DeregisterParser(cls, parser_class):
"""Deregisters a parser class.
The parser classes are identified based on their lower case name.
Args:
parser_class (type): parser class (subclass of BaseParser).
Raises:
KeyError: if parser class is not set for the corresponding name.
"""
parser_name = parser_class.NAME.lower()
if parser_name not in cls._parser_classes:
raise KeyError('Parser class not set for name: {0:s}.'.format(
parser_class.NAME))
del cls._parser_classes[parser_name]
@classmethod
def GetFormatsWithSignatures(cls, parser_filter_expression=None):
"""Retrieves the format specifications that have signatures.
This method will create a specification store for parsers that define
a format specification with signatures and a list of parser names for
those that do not.
Args:
parser_filter_expression (Optional[str]): parser filter expression,
where None represents all parsers and plugins.
A parser filter expression is a comma separated value string that
denotes which parsers and plugins should be used. See
filters/parser_filter.py for details of the expression syntax.
This function does not support presets, and requires a parser filter
expression where presets have been expanded.
Returns:
tuple: containing:
* FormatSpecificationStore: format specifications with signatures.
* list[str]: names of parsers that do not have format specifications with
signatures, or have signatures but also need to be applied 'brute
force'.
"""
specification_store = specification.FormatSpecificationStore()
remainder_list = []
for parser_name, parser_class in cls._GetParsers(
parser_filter_expression=parser_filter_expression):
format_specification = parser_class.GetFormatSpecification()
if format_specification and format_specification.signatures:
specification_store.AddSpecification(format_specification)
# The plist parser is a special case, where it both defines a signature
# and also needs to be applied 'brute-force' to non-matching files,
# as the signature matches binary plists, but not XML or JSON plists.
if parser_name == 'plist':
remainder_list.append(parser_name)
else:
remainder_list.append(parser_name)
return specification_store, remainder_list
@classmethod
def GetNamesOfParsersWithPlugins(cls):
"""Retrieves the names of all parsers with plugins.
Returns:
list[str]: names of all parsers with plugins.
"""
parser_names = []
for parser_name, parser_class in cls._GetParsers():
if parser_class.SupportsPlugins():
parser_names.append(parser_name)
return sorted(parser_names)
@classmethod
def GetParserAndPluginNames(cls, parser_filter_expression=None):
"""Retrieves the parser and parser plugin names.
Args:
parser_filter_expression (Optional[str]): parser filter expression,
where None represents all parsers and plugins.
A parser filter expression is a comma separated value string that
denotes which parsers and plugins should be used. See
filters/parser_filter.py for details of the expression syntax.
This function does not support presets, and requires a parser
filter expression where presets have been expanded.
Returns:
list[str]: parser and parser plugin names.
"""
parser_and_plugin_names = []
for parser_name, parser_class in cls._GetParsers(
parser_filter_expression=parser_filter_expression):
parser_and_plugin_names.append(parser_name)
if parser_class.SupportsPlugins():
for plugin_name, _ in parser_class.GetPlugins():
parser_and_plugin_names.append(
'{0:s}/{1:s}'.format(parser_name, plugin_name))
return parser_and_plugin_names
@classmethod
def GetParserPluginsInformation(cls, parser_filter_expression=None):
"""Retrieves the parser plugins information.
Args:
parser_filter_expression (Optional[str]): parser filter expression,
where None represents all parsers and plugins.
A parser filter expression is a comma separated value string that
denotes which parsers and plugins should be used. See
filters/parser_filter.py for details of the expression syntax.
This function does not support presets, and requires a parser
filter expression where presets have been expanded.
Returns:
list[tuple[str, str]]: pairs of parser plugin names and descriptions.
"""
parser_plugins_information = []
for _, parser_class in cls._GetParsers(
parser_filter_expression=parser_filter_expression):
if parser_class.SupportsPlugins():
for plugin_name, plugin_class in parser_class.GetPlugins():
description = ''
data_format = getattr(plugin_class, 'DATA_FORMAT', '')
if data_format:
if data_format.endswith(' file'):
description = 'Parser for {0:s}s.'.format(data_format)
else:
description = 'Parser for {0:s}.'.format(data_format)
parser_plugins_information.append((plugin_name, description))
return parser_plugins_information
# Note this method is used by l2tpreg.
@classmethod
def GetParserObjectByName(cls, parser_name):
"""Retrieves a specific parser object by its name.
Args:
parser_name (str): name of the parser.
Returns:
BaseParser: parser object or None.
"""
parser_class = cls._parser_classes.get(parser_name, None)
if parser_class:
return parser_class()
return None
@classmethod
def GetParserObjects(cls, parser_filter_expression=None):
"""Retrieves the parser objects.
Args:
parser_filter_expression (Optional[str]): parser filter expression,
where None represents all parsers and plugins.
A parser filter expression is a comma separated value string that
denotes which parsers and plugins should be used. See
filters/parser_filter.py for details of the expression syntax.
This function does not support presets, and requires a parser
filter expression where presets have been expanded.
Returns:
dict[str, BaseParser]: parsers per name.
"""
parser_filter_helper = parser_filter.ParserFilterExpressionHelper()
excludes, includes = parser_filter_helper.SplitExpression(
parser_filter_expression)
parser_objects = {}
for parser_name, parser_class in cls._parser_classes.items():
# If there are no includes all parsers are included by default.
if not includes and parser_name in excludes:
continue
if includes and parser_name not in includes:
continue
parser_object = parser_class()
if parser_class.SupportsPlugins():
plugin_includes = None
if parser_name in includes:
plugin_includes = includes[parser_name]
parser_object.EnablePlugins(plugin_includes)
parser_objects[parser_name] = parser_object
return parser_objects
@classmethod
def _GetParsers(cls, parser_filter_expression=None):
"""Retrieves the registered parsers and plugins.
Args:
parser_filter_expression (Optional[str]): parser filter expression,
where None represents all parsers and plugins.
A parser filter expression is a comma separated value string that
denotes which parsers and plugins should be used. See
filters/parser_filter.py for details of the expression syntax.
This function does not support presets, and requires a parser
filter expression where presets have been expanded.
Yields:
tuple: containing:
* str: name of the parser:
* type: parser class (subclass of BaseParser).
"""
parser_filter_helper = parser_filter.ParserFilterExpressionHelper()
excludes, includes = parser_filter_helper.SplitExpression(
parser_filter_expression)
for parser_name, parser_class in cls._parser_classes.items():
# If there are no includes all parsers are included by default.
if not includes and parser_name in excludes:
continue
if includes and parser_name not in includes:
continue
yield parser_name, parser_class
@classmethod
def GetParsersInformation(cls):
"""Retrieves the parsers information.
Returns:
list[tuple[str, str]]: parser names and descriptions.
"""
parsers_information = []
for _, parser_class in cls._GetParsers():
description = ''
data_format = getattr(parser_class, 'DATA_FORMAT', '')
if data_format:
if data_format.endswith(' file'):
description = 'Parser for {0:s}s.'.format(data_format)
else:
description = 'Parser for {0:s}.'.format(data_format)
parsers_information.append((parser_class.NAME, description))
return parsers_information
@classmethod
def RegisterParser(cls, parser_class):
"""Registers a parser class.
The parser classes are identified based on their lower case name.
Args:
parser_class (type): parser class (subclass of BaseParser).
Raises:
KeyError: if parser class is already set for the corresponding name.
"""
parser_name = parser_class.NAME.lower()
if parser_name in cls._parser_classes:
raise KeyError('Parser class already set for name: {0:s}.'.format(
parser_class.NAME))
cls._parser_classes[parser_name] = parser_class
@classmethod
def RegisterParsers(cls, parser_classes):
"""Registers parser classes.
The parser classes are identified based on their lower case name.
Args:
parser_classes (list[type]): parsers classes (subclasses of BaseParser).
Raises:
KeyError: if parser class is already set for the corresponding name.
"""
for parser_class in parser_classes:
cls.RegisterParser(parser_class)
|
|
#!/usr/bin/env python2
# Import libraries
try:
import pygame_sdl2
pygame_sdl2.import_as_pygame()
import pygame, math, random, sys, menu, settings
from pygame.locals import *
except:
import pygame, math, random, sys, menu, settings
from pygame.locals import *
def play():
cnt = 0
dec = False
eclipse = 0
dececc = False
# Define varibale for Level
isLevelOne = True
# Define variables for freeze power
timeForFreezePower = False
freezePowerSent = False
freezepowercord = []
freezePower = False
freezeTimer = 10
# Define variables for bullet power
timeForBulletPower = False
bulletPowerSent = False
bulletpowercord = []
bulletPower = False
bulletTimer = 10
# Define variables for danger power
timeForDangerPower = False
dangerPowerSent = False
dangerpowercord = []
dangerPower = False
dangerTimer = 10
# Set the width and height of the window
width, height = int(pygame.display.Info().current_w), int(pygame.display.Info().current_h)
# Create the window
screen = pygame.display.set_mode((width, height), pygame.HWSURFACE | pygame.DOUBLEBUF)
# Initialize the joystick module
pygame.joystick.init()
# Check if there are any joysticks
joystick_count = pygame.joystick.get_count()
# If there are any joysticks, initialize the first one, else quit the joystick module
if joystick_count:
joystick = pygame.joystick.Joystick(0)
joystick.init()
JS = True
# Set joystick position
if JS:
stick0 = "right"
else:
pygame.joystick.quit()
JS = False
# Set pressed keys
keys = [False, False, False, False]
JSkeys = [False, False, False, False]
# Set player position
playerpos=[100,100]
# Make an list for the accuracy
acc=[0,0]
# Make an list for where the arrows are
arrows=[]
# Set the timer for spawning badgers
badtimer=100
badtimer1=0
# Make an list for where the badgers are
badguys=[[800,100]]
# Set your health value
healthvalue=194
# Set the wait times
waitforexit=0
waitforarrows=0
waitforballoons=0
waitforballoons2=2
# Set displaying balloon on/off
balloon1display = False
balloon2display = False
# Set start ticks
startticks = pygame.time.get_ticks()
# Initialize the mixer (for sound)
pygame.mixer.init()
# Set title
pygame.display.set_caption("Galaxy Wars")
# Load images
# Load the players image
player = pygame.image.load("resources/images/dude.png")
# Load the power freeze image
power_freeze = pygame.image.load("resources/images/power_freeze.png")
# Load thunder image
thunder = pygame.image.load("resources/images/thunder.png")
# Load backgrounds
bgmorning = pygame.image.load("resources/images/bgmorning.jpg")
# Load the power bullet image
power_bullet = pygame.image.load("resources/images/power_bullet.png")
# Load the power danger image
power_danger = pygame.image.load("resources/images/power_danger.png")
# Green Bullet
bullet_green = pygame.image.load("resources/images/bullet_green.png")
# Load the background image
bgmain = pygame.image.load("resources/images/starfield.png")
# Red Sun level 1
sunred = pygame.image.load("resources/images/SunRed.png")
# Blue Sun Danger
sunblue = pygame.image.load("resources/images/SunBlue.png")
# Load the image of the castles
castle = pygame.image.load("resources/images/castle.png")
# Load the image for the arrows
arrow = pygame.image.load("resources/images/bullet.png")
# Load the image for the badgers
badguyimg1 = pygame.image.load("resources/images/badguy.png")
badguyimg = badguyimg1
# Load the overlays
greenoverlay = pygame.image.load("resources/images/greenoverlay.png")
redoverlay = pygame.image.load("resources/images/redoverlay.png")
# Load the healthbar images
healthbar = pygame.image.load("resources/images/healthbar.png")
health = pygame.image.load("resources/images/health.png")
# Load the text balloons
balloon1 = pygame.image.load("resources/images/balloon1.png")
balloon2 = pygame.image.load("resources/images/balloon2.png")
# Load audio
hit = pygame.mixer.Sound("resources/audio/explode.wav")
enemy = pygame.mixer.Sound("resources/audio/enemy.wav")
shoot = pygame.mixer.Sound("resources/audio/shoot.wav")
# Set the audio volume
hit.set_volume(0.15)
enemy.set_volume(0.15)
shoot.set_volume(0.15)
# Set the background music
pygame.mixer.music.load('resources/audio/background.mp3')
pygame.mixer.music.play(-1, 0.0)
pygame.mixer.music.set_volume(0.30)
# Set positions
castle1 = (0,height/16)
castle2 = (0,height/3.5)
castle3 = (0,height/2)
castle4 = (0,height/1.4)
# Set display mode
prevFS = settings.getFullscreen()
if settings.getFullscreen() == True:
screen = pygame.display.set_mode((width, height), pygame.HWSURFACE | pygame.DOUBLEBUF | pygame.FULLSCREEN)
elif settings.getFullscreen() == False:
screen = pygame.display.set_mode((width, height), pygame.HWSURFACE | pygame.DOUBLEBUF)
# Keep looping through
running = 1
exitcode = 0
while running:
# Set display mode if changed
if prevFS != settings.getFullscreen():
if settings.getFullscreen() == True:
screen = pygame.display.set_mode((width, height), pygame.HWSURFACE | pygame.DOUBLEBUF | pygame.FULLSCREEN)
prevFS = settings.getFullscreen()
elif settings.getFullscreen() == False:
screen = pygame.display.set_mode((width, height), pygame.HWSURFACE | pygame.DOUBLEBUF)
prevFS = settings.getFullscreen()
# Set joystick buttons, if there are any
if JS:
buttonUP = joystick.get_button(4)
buttonRIGHT = joystick.get_button(5)
buttonDOWN = joystick.get_button(6)
buttonLEFT = joystick.get_button(7)
buttonX = joystick.get_button(14)
stick0a = joystick.get_axis(0)
stick0b = joystick.get_axis(1)
# Check for stick0's position
if JS:
if stick0a <= -0.5 and stick0b <= -0.5:
stick0 = "leftup"
elif stick0a <= -0.5 and stick0b >= 0.5:
stick0 = "leftdown"
elif stick0a >= 0.5 and stick0b <= -0.5:
stick0 = "rightup"
elif stick0a >= 0.5 and stick0b >= 0.5:
stick0 = "rightdown"
elif stick0a <= -0.5:
stick0 = "left"
elif stick0a >= 0.5:
stick0 = "right"
elif stick0b <= -0.5:
stick0 = "up"
elif stick0b >= 0.5:
stick0 = "down"
badtimer-=1
# Clear the screen before drawing it again
screen.fill(0)
# Draw the background
if isLevelOne:
screen.blit(bgmorning, (0, 0))
else:
screen.blit(bgmain, (0,0))
if acc[0] > 25:
isLevelOne = False
if dangerPower and isLevelOne == False:
eclipse += 10
if eclipse > 100:
screen.blit(sunblue, (width // 2, 0))
else:
screen.blit(sunred, (width // 2, 0))
if eclipse % 20 == 0:
screen.blit(thunder, (width // 2, 0))
if eclipse > 200:
eclipse = 0
else:
screen.blit(sunred, (width // 2, 0))
# Draw the castles
screen.blit(castle, castle1)
screen.blit(castle, castle2)
screen.blit(castle, castle3)
screen.blit(castle, castle4)
# Set player position and rotation
if JS == False:
position = pygame.mouse.get_pos()
angle = math.atan2(position[1]-(playerpos[1]+32),position[0]-(playerpos[0]+26))
playerrot = pygame.transform.rotate(player, 360-angle*57.29)
playerpos1 = (playerpos[0]-playerrot.get_rect().width/2, playerpos[1]-playerrot.get_rect().height/2)
screen.blit(playerrot, playerpos1)
elif JS == True:
if stick0 == "left":
playerrot = pygame.transform.rotate(player, 180)
playerpos1 = (playerpos[0]-playerrot.get_rect().width/2, playerpos[1]-playerrot.get_rect().height/2)
screen.blit(playerrot, playerpos1)
elif stick0 == "right":
playerrot = pygame.transform.rotate(player, 0)
playerpos1 = (playerpos[0]-playerrot.get_rect().width/2, playerpos[1]-playerrot.get_rect().height/2)
screen.blit(playerrot, playerpos1)
elif stick0 == "up":
playerrot = pygame.transform.rotate(player, 90)
playerpos1 = (playerpos[0]-playerrot.get_rect().width/2, playerpos[1]-playerrot.get_rect().height/2)
screen.blit(playerrot, playerpos1)
elif stick0 == "down":
playerrot = pygame.transform.rotate(player, 270)
playerpos1 = (playerpos[0]-playerrot.get_rect().width/2, playerpos[1]-playerrot.get_rect().height/2)
screen.blit(playerrot, playerpos1)
elif stick0 == "leftup":
playerrot = pygame.transform.rotate(player, 135)
playerpos1 = (playerpos[0]-playerrot.get_rect().width/2, playerpos[1]-playerrot.get_rect().height/2)
screen.blit(playerrot, playerpos1)
elif stick0 == "leftdown":
playerrot = pygame.transform.rotate(player, 225)
playerpos1 = (playerpos[0]-playerrot.get_rect().width/2, playerpos[1]-playerrot.get_rect().height/2)
screen.blit(playerrot, playerpos1)
elif stick0 == "rightup":
playerrot = pygame.transform.rotate(player, 45)
playerpos1 = (playerpos[0]-playerrot.get_rect().width/2, playerpos[1]-playerrot.get_rect().height/2)
screen.blit(playerrot, playerpos1)
elif stick0 == "rightdown":
playerrot = pygame.transform.rotate(player, 315)
playerpos1 = (playerpos[0]-playerrot.get_rect().width/2, playerpos[1]-playerrot.get_rect().height/2)
screen.blit(playerrot, playerpos1)
# Draw arrows
for bullet in arrows:
index=0
velx = 0
vely = 0
if bulletPower and dangerPower is False:
velx=math.cos(bullet[0])*30
vely=math.sin(bullet[0])*30
elif dangerPower is False:
velx=math.cos(bullet[0])*20
vely=math.sin(bullet[0])*20
elif dangerPower:
velx=math.cos(bullet[0])*10
vely=math.sin(bullet[0])*10
bullet[1]+=velx
bullet[2]+=vely
if bullet[1]<-64 or bullet[1]>width or bullet[2]<-64 or bullet[2]>height:
arrows.pop(index)
index+=1
for projectile in arrows:
arrow1 = pygame.transform.rotate(arrow, 360-projectile[0]*57.29)
bullet_green1 = pygame.transform.rotate(bullet_green, 360-projectile[0]*57.29)
if bulletPower:
screen.blit(bullet_green1, (projectile[1], projectile[2]))
else:
screen.blit(arrow1, (projectile[1], projectile[2]))
# Draw badguys
if badtimer==0:
badheight1 = height/9.6
badheight2 = height/1.1
badheight = random.randint(int(badheight1), int(badheight2))
badguys.append([width, badheight])
badtimer=100-(badtimer1*2)
if badtimer1>=35:
badtimer1=35
else:
badtimer1+=5
index=0
for badguy in badguys:
if badguy[0]<-64:
badguys.pop(index)
if freezePower is False and dangerPower is False:
badguy[0]-=7
if cnt < 30 and dec is False:
badguy[1] += 7
cnt += 1
else:
dec = True
cnt -= 1
badguy[1] -= 7
if cnt is 0:
dec = False
elif dangerPower:
badguy[0] -= 20
xx = random.randint(0, 1)
if cnt < 30 and dec is False:
if xx == 1:
badguy[1] += 7
cnt += 1
else:
dec = True
cnt -= 1
if xx == 1:
badguy[1] -= 7
if cnt is 0:
dec = False
# Attack castle
badrect=pygame.Rect(badguyimg.get_rect())
badrect.top=badguy[1]
badrect.left=badguy[0]
if badrect.left<64:
hit.play()
healthvalue -= random.randint(5,20)
badguys.pop(index)
# Check for collisions
index1=0
for bullet in arrows:
bullrect=pygame.Rect(arrow.get_rect())
bullrect.left=bullet[1]
bullrect.top=bullet[2]
if badrect.colliderect(bullrect):
enemy.play()
acc[0]+=1
if acc[0] % 3 == 0 and isLevelOne == False:
timeForDangerPower = True
else:
timeForDangerPower = False
if acc[0] % 5 == 0:
timeForFreezePower = True
else:
timeForFreezePower = False
if acc[0] % 7 == 0:
timeForBulletPower = True
else:
timeForBulletPower = False
badguys.pop(index)
if bulletPower == False:
arrows.pop(index1)
index1+=1
# Next bad guy
index+=1
# Draw badgers
for badguy in badguys:
screen.blit(badguyimg, badguy)
# Draw power if power sent is true
if dangerPowerSent:
dangerpowercord[0] -= 7
screen.blit(power_danger, dangerpowercord)
if dangerpowercord[0] < 0:
dangerPowerSent = False
if bulletPowerSent:
bulletpowercord[0] -= 7
screen.blit(power_bullet, bulletpowercord)
if bulletpowercord[0] < 0:
bulletPowerSent = False
if freezePowerSent:
freezepowercord[0] -= 7
screen.blit(power_freeze, freezepowercord)
if freezepowercord[0] < 0:
freezePowerSent = False
if timeForDangerPower and dangerPowerSent == False:
powerheight1 = height/9.6
powerheight2 = height/1.1
powerheight = random.randint(int(powerheight1), int(powerheight2))
dangerpowercord = [width, playerpos[1]]
print "dangerPowerSent"
dangerPowerSent = True
timeForDangerPower = False
screen.blit(power_danger, dangerpowercord)
if timeForBulletPower and bulletPowerSent == False:
powerheight1 = height/9.6
powerheight2 = height/1.1
powerheight = random.randint(int(powerheight1), int(powerheight2))
bulletpowercord = [width, powerheight]
print "bulletPowerSent"
bulletPowerSent = True
timeForBulletPower = False
screen.blit(power_bullet, bulletpowercord)
if timeForFreezePower and freezePowerSent == False:
powerheight1 = height/9.6
powerheight2 = height/1.1
powerheight = random.randint(int(powerheight1), int(powerheight2))
freezepowercord = [width, powerheight]
print "freezePowerSent"
freezePowerSent = True
timeForFreezePower = False
screen.blit(power_freeze, freezepowercord)
if dangerPower:
dangerTimer += 10
if dangerTimer > 2000:
dangerPower = False
dangerTimer = 0
if bulletPower:
bulletTimer += 10
if bulletTimer > 2000:
bulletPower = False
bulletTimer = 0
if freezePower:
freezeTimer += 10
if freezeTimer > 1000:
freezePower = False
freezeTimer = 0
# Check if power is Taken
if dangerPowerSent:
dangerpowerRect = pygame.Rect(power_danger.get_rect())
playerRect = pygame.Rect(player.get_rect())
dangerpowerRect.top = dangerpowercord[1]
dangerpowerRect.left = dangerpowercord[0]
playerRect.top = playerpos[1]
playerRect.left = playerpos[0]
if dangerpowerRect.colliderect(playerRect):
dangerPower = True
dangerPowerSent = False
if bulletPowerSent:
bullpowerRect = pygame.Rect(power_bullet.get_rect())
playerRect = pygame.Rect(player.get_rect())
bullpowerRect.top = bulletpowercord[1]
bullpowerRect.left = bulletpowercord[0]
playerRect.top = playerpos[1]
playerRect.left = playerpos[0]
if bullpowerRect.colliderect(playerRect):
bulletPower = True
bulletPowerSent = False
if freezePowerSent:
powerRect = pygame.Rect(power_freeze.get_rect())
playerRect = pygame.Rect(player.get_rect())
powerRect.top = freezepowercord[1]
powerRect.left = freezepowercord[0]
playerRect.top = playerpos[1]
playerRect.left = playerpos[0]
if powerRect.colliderect(playerRect):
freezePower = True
print "Collision Detected"
freezePowerSent = False
# Draw clock
font = pygame.font.Font("freesansbold.ttf", 24)
survivedtext = font.render(str("Score: " + str(acc[0])) , True, (255,0,0))
leveltext = ""
if isLevelOne:
leveltext= font.render(str("Level: 1") , True, (0,0,255))
else:
leveltext= font.render(str("Level: 2") , True, (0,0,255))
levelrect = leveltext.get_rect()
levelrect.topleft = [250, 5]
screen.blit(leveltext, levelrect)
textRect = survivedtext.get_rect()
textRect.topright=[width-5, 5]
screen.blit(survivedtext, textRect)
# Draw health bar
screen.blit(healthbar, (5,5))
for health1 in range(healthvalue):
screen.blit(health, (health1+8,8))
# Loop through the events
for event in pygame.event.get():
# Check if the event is the X button
if event.type==pygame.QUIT:
# If it is stop the music and go back to the main menu
pygame.mixer.music.stop()
menu.launch()
if event.type == pygame.KEYDOWN:
# Move up
if event.key==K_w:
keys[0]=True
elif event.key==K_UP:
keys[0]=True
# Move left
elif event.key==K_a:
keys[1]=True
elif event.key==K_LEFT:
keys[1]=True
# Move down
elif event.key==K_s:
keys[2]=True
elif event.key==K_DOWN:
keys[2]=True
# Move right
elif event.key==K_d:
keys[3]=True
elif event.key==K_RIGHT:
keys[3]=True
# Quit by pressing escape
elif event.key==K_ESCAPE:
pygame.mixer.music.stop()
menu.launch()
# Fullscreen by pressing F4
elif event.key==K_F4:
settings.changeFullscreen()
elif event.key==pygame.K_SPACE:
if waitforarrows == 0:
shoot.play()
position=pygame.mouse.get_pos()
acc[1]+=1
arrows.append([math.atan2(position[1]-(playerpos1[1]+32),position[0]-(playerpos1[0]+26)),playerpos1[0]+32,playerpos1[1]+32])
# Set wait time for arrows in frames
waitforarrows=15
if waitforballoons2:
waitforballoons2-=1
else:
# Choose balloon
balloonnr = random.randint(1, 2)
if balloonnr == 1:
balloon1display = True
elif balloonnr == 2:
balloon2display = True
waitforballoons2=2
if event.type == pygame.KEYUP:
# Move up
if event.key==pygame.K_w:
keys[0]=False
elif event.key==pygame.K_UP:
keys[0]=False
# Move left
elif event.key==pygame.K_a:
keys[1]=False
elif event.key==pygame.K_LEFT:
keys[1]=False
# Move down
elif event.key==pygame.K_s:
keys[2]=False
elif event.key==pygame.K_DOWN:
keys[2]=False
# Move right
elif event.key==pygame.K_d:
keys[3]=False
elif event.key==pygame.K_RIGHT:
keys[3]=False
if waitforarrows:
waitforarrows-=1
waitforballoons = waitforarrows
# Display balloon
if waitforballoons:
waitforballoons-=1
if balloon1display:
screen.blit(balloon1, (playerpos[0]+10, playerpos[1]-60))
elif balloon2display:
screen.blit(balloon2, (playerpos[0]+10, playerpos[1]-60))
else:
balloon1display = False
balloon2display = False
# Check if there are any joysticks
if JS:
# Check if UP is pressed
if buttonUP:
JSkeys[0]=True
# Check if RIGHT is pressed
elif buttonRIGHT:
JSkeys[3]=True
# Check if DOWN is pressed
elif buttonDOWN:
JSkeys[2]=True
# Check if LEFT is pressed
elif buttonLEFT:
JSkeys[1]=True
else:
JSkeys[0]=False
JSkeys[1]=False
JSkeys[2]=False
JSkeys[3]=False
# Check of X is pressed
if buttonX:
if waitforarrows == 0:
shoot.play()
acc[1]+=1
if stick0 == "left":
arrows.append([3,playerpos1[0]+32,playerpos1[1]+32])
elif stick0 == "right":
arrows.append([0,playerpos1[0]+32,playerpos1[1]+32])
elif stick0 == "up":
arrows.append([-1.5,playerpos1[0]+32,playerpos1[1]+32])
elif stick0 == "down":
arrows.append([1.5,playerpos1[0]+32,playerpos1[1]+32])
elif stick0 == "leftup":
arrows.append([-2.25,playerpos1[0]+32,playerpos1[1]+32])
elif stick0 == "leftdown":
arrows.append([2.25,playerpos1[0]+32,playerpos1[1]+32])
elif stick0 == "rightup":
arrows.append([-0.75,playerpos1[0]+32,playerpos1[1]+32])
elif stick0 == "rightdown":
arrows.append([0.75,playerpos1[0]+32,playerpos1[1]+32])
# Set wait time for arrows in frames
waitforarrows=15
if waitforballoons2:
waitforballoons2-=1
else:
# Choose balloon
balloonnr = random.randint(1, 2)
if balloonnr == 1:
balloon1display = True
elif balloonnr == 2:
balloon2display = True
waitforballoons2=2
# Move player
# Up
if keys[0]:
playerpos[1]-=5
# Down
elif keys[2]:
playerpos[1]+=5
# Left
if keys[1]:
playerpos[0]-=5
# Right
elif keys[3]:
playerpos[0]+=5
# Move player with JoyStick
# Up
if JSkeys[0]:
playerpos[1]-=5
# Down
elif JSkeys[2]:
playerpos[1]+=5
# Left
if JSkeys[1]:
playerpos[0]-=5
# Right
elif JSkeys[3]:
playerpos[0]+=5
# Win/Lose check
# Win
# Lose
if healthvalue<=0:
running=0
exitcode=0
if acc[1]!=0:
accuracy=acc[0]*1.0/acc[1]*100
else:
accuracy=0
# Flip the display
pygame.display.flip()
# Stop the music
pygame.mixer.music.stop()
# Win/lose display
# Lose
if exitcode==0:
# Initialize the font
pygame.font.init()
# Set font
font = pygame.font.Font("freesansbold.ttf", 24)
bigfont = pygame.font.Font("freesansbold.ttf", 48)
# Render text
gameover = bigfont.render("Game over!", True, (255,0,0))
gameoverRect = gameover.get_rect()
gameoverRect.centerx = screen.get_rect().centerx
gameoverRect.centery = screen.get_rect().centery-24
text = font.render("Accuracy: "+str(accuracy)+"%", True, (255,0,0))
textRect = text.get_rect()
textRect.centerx = screen.get_rect().centerx
textRect.centery = screen.get_rect().centery+24
# Draw red overlay
for x in range(width/redoverlay.get_width()+1):
for y in range(height/redoverlay.get_height()+1):
screen.blit(redoverlay,(x*100,y*100))
# Draw text
screen.blit(gameover, gameoverRect)
screen.blit(text, textRect)
# Win
else:
# Initialize the font
pygame.font.init()
# Set font
font = pygame.font.Font("freesansbold.ttf", 24)
bigfont = pygame.font.Font("freesansbold.ttf", 48)
# Render text
youwin = bigfont.render("You win!", True, (0,255,0))
youwinRect = youwin.get_rect()
youwinRect.centerx = screen.get_rect().centerx
youwinRect.centery = screen.get_rect().centery-24
text = font.render("Accuracy: "+str(accuracy)+"%", True, (0,255,0))
textRect = text.get_rect()
textRect.centerx = screen.get_rect().centerx
textRect.centery = screen.get_rect().centery+24
# Draw green overlay
for x in range(width/greenoverlay.get_width()+1):
for y in range(height/greenoverlay.get_height()+1):
screen.blit(greenoverlay,(x*100,y*100))
# Draw text
screen.blit(youwin, youwinRect)
screen.blit(text, textRect)
# Exit automatic when the game is stopped
while 1:
waitforexit+=1
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if waitforexit == 1000:
menu.launch()
# Update the screen
pygame.display.flip()
|
|
#!/usr/bin/env python
from click.testing import CliRunner
from enasearch.cli import cli
def test_get_results():
"""Test get_results command of enasearch"""
runner = CliRunner()
result = runner.invoke(cli, ['get_results'])
assert result.exit_code == 0
assert result.output.find('tsa_set') != -1
def test_get_taxonomy_results():
"""Test get_taxonomy_results command of enasearch"""
runner = CliRunner()
result = runner.invoke(cli, ['get_taxonomy_results'])
assert result.exit_code == 0
assert result.output.find('sequence_update') != -1
def test_get_filter_fields():
"""Test get_filter_fields command of enasearch"""
runner = CliRunner()
result = runner.invoke(cli, ['get_filter_fields', '--result', 'assembly'])
assert result.exit_code == 0
assert result.output.find('assembly_name') != -1
def test_get_returnable_fields():
"""Test get_returnable_fields command of enasearch"""
runner = CliRunner()
result = runner.invoke(cli, ['get_returnable_fields', '--result', 'read_study'])
assert result.exit_code == 0
assert result.output.find('study_alias') != -1
def test_get_run_fields():
"""Test get_run_fields command of enasearch"""
runner = CliRunner()
result = runner.invoke(cli, ['get_run_fields'])
assert result.exit_code == 0
assert result.output.find('library_selection') != -1
def test_get_analysis_fields():
"""Test get_analysis_fields command of enasearch"""
runner = CliRunner()
result = runner.invoke(cli, ['get_analysis_fields'])
assert result.exit_code == 0
assert result.output.find('submitted_md5') != -1
def test_get_sortable_fields():
"""Test get_sortable_fields command of enasearch"""
runner = CliRunner()
result = runner.invoke(cli, ['get_sortable_fields', '--result', 'sequence_update'])
assert result.exit_code == 0
assert result.output.find('tissue_lib') != -1
def test_get_filter_types():
"""Test get_filter_types command of enasearch"""
runner = CliRunner()
result = runner.invoke(cli, ['get_filter_types'])
assert result.exit_code == 0
assert result.output.find('geo_south') != -1
def test_get_display_options():
"""Test get_display_options command of enasearch"""
runner = CliRunner()
result = runner.invoke(cli, ['get_display_options'])
assert result.exit_code == 0
assert result.output.find('report') != -1
def test_get_download_options():
"""Test get_download_options command of enasearch"""
runner = CliRunner()
result = runner.invoke(cli, ['get_download_options'])
assert result.exit_code == 0
assert result.output.find('gzip') != -1
def test_search_data():
"""Test search_data command of enasearch"""
runner = CliRunner()
# 1st test
result = runner.invoke(cli, [
'search_data',
'--query',
'tissue_lib="lambda gt11" AND dataclass=STD',
'--result',
'coding_release',
'--display',
'xml'])
assert result.exit_code == 0
assert result.output.find('glutathione S-transferase subunit gYc') != -1
# 2nd test
result = runner.invoke(cli, [
'search_data',
'--free_text_search',
'--query',
'SMP1+homo',
'--result',
'sequence_release',
'--display',
'fasta'])
assert result.exit_code == 0
assert result.output.find('ENA|AF081282|AF081282.1') != -1
# 3rd test
result = runner.invoke(cli, [
'search_data',
'--free_text_search',
'--query',
'kinase+homo+sapiens',
'--result',
'wgs_set',
'--display',
'report',
'--fields',
'accession,environmental_sample'])
assert result.exit_code == 0
assert result.output.find('query=kinase homo sapiens') != -1
def test_retrieve_data():
"""Test retrieve_data command of enasearch"""
runner = CliRunner()
# 1st test
result = runner.invoke(cli, [
'retrieve_data',
'--ids',
'ERA000010-ERA000020',
'--display',
'xml'])
assert result.exit_code == 0
assert result.output.find('g1k-bgi-20080814-2') != -1
# 2nd test
result = runner.invoke(cli, [
'retrieve_data',
'--ids',
'PRJEB2772,AL513382',
'--display',
'html'])
assert result.exit_code == 0
assert result.output.find('BN000065') != -1
# 3rd test
result = runner.invoke(cli, [
'retrieve_data',
'--ids',
'A00145',
'--display',
'fasta',
'--subseq_range',
'3-63'])
assert result.exit_code == 0
assert result.output.find('ENA|A00145|A00145.1') != -1
# 4th test
result = runner.invoke(cli, [
'retrieve_data',
'--ids',
'AL513382',
'--display',
'text',
'--offset',
'0',
'--length',
'100',
'--expanded'])
assert result.exit_code == 0
assert result.output.find('taatttttaa') != -1
# 5th test
result = runner.invoke(cli, [
'retrieve_data',
'--ids',
'AL513382',
'--display',
'text',
'--header'])
assert result.exit_code == 0
assert result.output.find('UniProtKB/Swiss-Prot; Q08456; FIMI_SALTI') != -1
# 6th test
result = runner.invoke(cli, [
'retrieve_data',
'--ids',
'PRJEB2772',
'--display',
'xml'])
assert result.exit_code == 0
assert result.output.find('ERP001030') != -1
def test_retrieve_taxons():
"""Test retrieve_taxons command of enasearch"""
runner = CliRunner()
# 1st test
result = runner.invoke(cli, [
'retrieve_taxons',
'--ids',
'6543',
'--display',
'fasta',
'--result',
'sequence_release'])
assert result.exit_code == 0
assert result.output.find('ENA|KX834745|KX834745.1') != -1
# 2nd test
result = runner.invoke(cli, [
'retrieve_taxons',
'--ids',
'Human,Cat,Mouse,Zebrafish',
'--display',
'xml'])
assert result.exit_code == 0
assert result.output.find('Danio frankei') != -1
def test_retrieve_run_report():
"""Test retrieve_run_report command of enasearch"""
runner = CliRunner()
# 1st test
result = runner.invoke(cli, [
'retrieve_run_report',
'--accession',
'SRX017289'])
assert result.exit_code == 0
assert result.output.find('SAMN00009557') != -1
# 2nd test
result = runner.invoke(cli, [
'retrieve_run_report',
'--accession',
'SRX017289',
'--fields',
'study_accession,study_title,sra_aspera'])
assert result.exit_code == 0
assert result.output.find('PRJNA123835') != -1
# 3rd test
result = runner.invoke(cli, [
'retrieve_run_report',
'--accession',
'SRX017289',
'--fields',
'study_accession',
'--fields',
'study_title',
'--fields',
'sra_aspera'])
assert result.exit_code == 0
assert result.output.find('PRJNA123835') != -1
def test_retrieve_analysis_report():
"""Test retrieve_analysis_report command of enasearch"""
runner = CliRunner()
# 1st test
result = runner.invoke(cli, [
'retrieve_analysis_report',
'--accession',
'ERZ009929'])
assert result.exit_code == 0
assert result.output.find('PRJEB1970') != -1
# 2nd test
result = runner.invoke(cli, [
'retrieve_analysis_report',
'--accession',
'ERZ009929',
'--fields',
'analysis_accession'])
assert result.exit_code == 0
assert result.output.find('PRJEB1970') == -1
# 3rd test
result = runner.invoke(cli, [
'retrieve_analysis_report',
'--accession',
'ERZ009929',
'--fields',
'analysis_accession,sample_accession,scientific_name'])
assert result.exit_code == 0
assert result.output.find('PRJEB1970') == -1
assert result.output.find('SAMEA2072680') != -1
|
|
"""
Openflow tests on an l2 table
"""
import sys
import os
import time
import logging
from oftest import config
import oftest.base_tests as base_tests
import ofp
from oftest.testutils import *
from oftest.parse import parse_mac
import openflow_base_tests
from utils import *
from p4_pd_rpc.ttypes import *
from res_pd_rpc.ttypes import *
sys.path.append(os.path.join(sys.path[0], '..', '..', '..', '..',
'targets', 'switch', 'openflow_mapping'))
from l2 import *
### TODO: generate expected packets
#######################
# SOME OPENFLOW UTILS #
#######################
# common shorthands
flow_add = ofp.message.flow_add
flow_delete = ofp.message.flow_delete
group_add = ofp.message.group_add
group_mod = ofp.message.group_mod
group_delete = ofp.message.group_delete
table_stats_req = ofp.message.table_stats_request
table_stats_reply = ofp.message.table_stats_reply
packet_in = ofp.message.packet_in
packet_out = ofp.message.packet_out
buf = ofp.OFP_NO_BUFFER
# dmac table fields
eth_dst_addr = "l2_metadata_lkp_mac_da"
ingress_vlan = "ingress_metadata_bd"
TEST_ETH_DST = "00:01:02:03:04:05"
TEST_VLAN = 3
def get_oxm(field_obj):
"""
Returns an oxm and an arg-dict for updating an arg-list to
simple_tcp_packet
"""
if field_obj.field == "OFPXMT_OFB_VLAN_VID":
return (ofp.oxm.vlan_vid(field_obj.testval),
{"vlan_vid": field_obj.testval, "dl_vlan_enable": True})
elif field_obj.field == "OFPXMT_OFB_ETH_DST":
return (ofp.oxm.eth_dst(parse_mac(field_obj.testval)),
{"eth_dst": field_obj.testval})
def get_match(match_fields):
"""
Returns a packet and an OXM list that the packet matches,
according to match_fields.
"""
match, args = ofp.match(), {}
for _, field_obj in match_fields.items():
oxm, pkt_arg = get_oxm(field_obj)
match.oxm_list.append(oxm)
args.update(pkt_arg)
return (str(simple_tcp_packet(**args)), match)
def get_action(action, arg):
if action == "OUTPUT":
ofpaction = ofp.action.output(arg, ofp.OFPCML_NO_BUFFER)
elif action == "PUSH_MPLS":
ofpaction = ofp.action.push_mpls()
elif action == "SET_MPLS_TTL":
ofpaction = ofp.action.set_mpls_ttl(arg)
elif action == "DEC_MPLS_TTL":
ofpaction = ofp.action.dec_mpls_ttl()
elif action == "POP_MPLS":
ofpaction = ofp.action.pop_mpls()
elif action == "SET_FIELD":
oxm, _ = get_oxm(arg)
ofpaction = ofp.action.set_field(oxm)
elif action == "PUSH_VLAN":
ofpaction = ofp.action.push_vlan()
elif action == "GROUP":
ofpaction = ofp.action.group(arg)
elif action == "SET_NW_TTL":
ofpaction = ofp.action.set_nw_ttl(arg)
else:
logging.info("No get_action for %s", action)
exit(1)
return ofpaction
def get_apply_actions(actions):
"""
Returns a 1 element list of APPLY_ACTIONS instructions,
with actions specified in actions.
"""
instruction = ofp.instruction.apply_actions()
for action, arg in actions.items():
instruction.actions.append(get_action(action, arg))
return [instruction]
def get_group_all(gid, action_sets):
buckets = []
for b in action_sets:
buckets.append(ofp.bucket(actions=[get_action(a, arg) for a, arg in b.items()]))
return group_add(group_type=ofp.OFPGT_ALL, group_id=gid, buckets=buckets)
def get_group_mod(gid, action_sets):
buckets = []
for b in action_sets:
buckets.append(ofp.bucket(actions=[get_action(a, arg) for a, arg in b.items()]))
return group_mod(group_type=ofp.OFPGT_ALL, group_id=gid, buckets=buckets)
##############################
# TABLE/TEST SETUP FUNCTIONS #
##############################
def setup_default_table_configurations(client, sess_hdl, dev_tgt):
ifindex = 1
action_spec = dc_set_bd_action_spec_t(
action_bd=TEST_VLAN,
action_vrf=0,
action_rmac_group=0,
action_ipv4_unicast_enabled=True,
action_ipv6_unicast_enabled=False,
action_bd_label=0,
action_igmp_snooping_enabled=0,
action_mld_snooping_enabled=0,
action_ipv4_urpf_mode=0,
action_ipv6_urpf_mode=0,
action_stp_group=0,
action_stats_idx=0,
action_learning_enabled=0)
mbr_hdl = client.bd_action_profile_add_member_with_set_bd(
sess_hdl, dev_tgt,
action_spec)
match_spec = dc_port_vlan_mapping_match_spec_t(
ingress_metadata_ifindex=ifindex,
vlan_tag__0__valid=True,
vlan_tag__0__vid=TEST_VLAN,
vlan_tag__1__valid=0,
vlan_tag__1__vid=0)
client.port_vlan_mapping_add_entry(
sess_hdl, dev_tgt,
match_spec, mbr_hdl)
def setup_pre(mc, sess_hdl, dev_tgt):
mgrp_hdl = mc.mc_mgrp_create(sess_hdl, dev_tgt.dev_id, 1)
port_map = [0] * 32
lag_map = [0] * 32
# port 1, port 2, port 3
port_map[0] = (1 << 1) | (1 << 2) | (1 << 3)
node_hdl = mc.mc_node_create(sess_hdl, dev_tgt.dev_id, 0,
bytes_to_string(port_map),
bytes_to_string(lag_map))
mc.mc_associate_node(sess_hdl, dev_tgt.dev_id, mgrp_hdl, node_hdl)
def setup(self):
sess_hdl = self.conn_mgr.client_init(16)
dev_tgt = DevTarget_t(0, hex_to_i16(0xFFFF))
setup_default_table_configurations(self.client, sess_hdl, dev_tgt)
setup_pre(self.mc, sess_hdl, dev_tgt)
##############
# TEST CASES #
##############
class Output(openflow_base_tests.OFTestInterface):
"""
Forwards matching packet.
"""
def __init__(self):
openflow_base_tests.OFTestInterface.__init__(self, "dc")
def runTest(self):
setup(self)
ports = sorted(config["port_map"].keys())
table, out_port = openflow_tables["dmac"], ports[0]
table.match_fields[eth_dst_addr].testval = TEST_ETH_DST
table.match_fields[ingress_vlan].testval = TEST_VLAN
pkt, match = get_match(table.match_fields)
output = {
"OUTPUT": out_port
}
instr = get_apply_actions(output)
req = flow_add(table_id=table.id, match=match, instructions=instr,
buffer_id=buf, priority=1, cookie=41)
exp_pkt = simple_tcp_packet(dl_vlan_enable=True, vlan_vid=TEST_VLAN,
eth_dst=TEST_ETH_DST)
self.controller.message_send(req)
do_barrier(self.controller)
self.dataplane.send(ports[0], pkt)
verify_packet(self, exp_pkt, out_port)
req = flow_delete(cookie=41, table_id=0)
self.controller.message_send(req)
do_barrier(self.controller)
class NWTTL(openflow_base_tests.OFTestInterface):
"""
Sets ttl of matching packet.
"""
def __init__(self):
openflow_base_tests.OFTestInterface.__init__(self, "dc")
def runTest(self):
setup(self)
ttl, ports = 0x37, sorted(config["port_map"].keys())
table, out_port = openflow_tables["dmac"], ports[2]
table.match_fields[eth_dst_addr].testval = TEST_ETH_DST
table.match_fields[ingress_vlan].testval = TEST_VLAN
pkt, match = get_match(table.match_fields)
nw = {
"OUTPUT": out_port,
"SET_NW_TTL" : ttl
}
instr = get_apply_actions(nw)
req = flow_add(table_id=table.id, match=match, instructions=instr,
buffer_id=buf, priority=2, cookie=42)
exp_pkt = str(simple_tcp_packet(ip_ttl=ttl, dl_vlan_enable=True,
vlan_vid=TEST_VLAN, eth_dst=TEST_ETH_DST))
self.controller.message_send(req)
do_barrier(self.controller)
self.dataplane.send(ports[0], pkt)
verify_packets(self, exp_pkt, [out_port])
req = flow_delete(cookie=42, table_id=0)
self.controller.message_send(req)
do_barrier(self.controller)
class GroupAdd(openflow_base_tests.OFTestInterface):
"""
Create a group that pushes a vlan, sets vlan id
and forwards out a port
"""
def __init__(self):
openflow_base_tests.OFTestInterface.__init__(self, "dc")
def runTest(self):
setup(self)
group_id, ports = (1 << 24) + 4, sorted(config["port_map"].keys())
outport1, outport2 = ports[0], ports[1]
bucket1 = {
"PUSH_VLAN": None,
"SET_FIELD": OFMatchField("OFPXMT_OFB_VLAN_VID", val=10),
"OUTPUT" : outport1
}
bucket2 = {
"PUSH_VLAN": None,
"SET_FIELD": OFMatchField("OFPXMT_OFB_VLAN_VID", val=19),
"OUTPUT" : outport2
}
req = get_group_all(group_id, [bucket1, bucket2])
self.controller.message_send(req)
do_barrier(self.controller)
table = openflow_tables["dmac"]
table.match_fields[eth_dst_addr].testval = TEST_ETH_DST
table.match_fields[ingress_vlan].testval = TEST_VLAN
pkt, match = get_match(table.match_fields)
groupall = {
"GROUP": group_id
}
instr = get_apply_actions (groupall)
req = flow_add(table_id=table.id, match=match, instructions=instr,
buffer_id=buf, priority=3, cookie=43)
exp_pkt1 = simple_tcp_packet(dl_vlan_enable=True, vlan_vid=10,
eth_dst=TEST_ETH_DST)
exp_pkt2 = simple_tcp_packet(dl_vlan_enable=True, vlan_vid=19,
eth_dst=TEST_ETH_DST)
self.controller.message_send(req)
do_barrier(self.controller)
self.dataplane.send(ports[0], pkt)
verify_packet(self, exp_pkt1, outport1)
verify_packet(self, exp_pkt2, outport2)
req = flow_delete(cookie=43, table_id=0)
self.controller.message_send(req)
do_barrier(self.controller)
req = group_delete(group_type=ofp.OFPGT_ALL, group_id=(1 << 24) + 4)
self.controller.message_send(req)
do_barrier(self.controller)
class GroupMod(openflow_base_tests.OFTestInterface):
"""
Modifies the group created in GroupAdd, then verifies.
This test must be run after GroupAdd.
"""
def __init__(self):
openflow_base_tests.OFTestInterface.__init__(self, "dc")
def runTest(self):
setup(self)
group_id, ports = (1 << 24) + 9, sorted(config["port_map"].keys())
outport1, outport2 = ports[0], ports[1]
bucket1 = {
"PUSH_VLAN": None,
"SET_FIELD": OFMatchField("OFPXMT_OFB_VLAN_VID", val=6),
"OUTPUT" : outport1
}
bucket2 = {
"PUSH_VLAN": None,
"SET_FIELD": OFMatchField("OFPXMT_OFB_VLAN_VID", val=4),
"OUTPUT" : outport2
}
req = get_group_all(group_id, [bucket1, bucket2])
self.controller.message_send(req)
do_barrier(self.controller)
table = openflow_tables["dmac"]
table.match_fields[eth_dst_addr].testval = TEST_ETH_DST
table.match_fields[ingress_vlan].testval = TEST_VLAN
pkt, match = get_match(table.match_fields)
groupall = {
"GROUP": group_id
}
instr = get_apply_actions (groupall)
req = flow_add(table_id=table.id, match=match, instructions=instr,
buffer_id=buf, priority=3, cookie=44)
exp_pkt1 = simple_tcp_packet(dl_vlan_enable=True, vlan_vid=6,
eth_dst=TEST_ETH_DST)
exp_pkt2 = simple_tcp_packet(dl_vlan_enable=True, vlan_vid=4,
eth_dst=TEST_ETH_DST)
self.controller.message_send(req)
do_barrier(self.controller)
self.dataplane.send(ports[0], pkt)
verify_packet(self, exp_pkt1, outport1)
verify_packet(self, exp_pkt2, outport2)
outport1, outport2, outport3 = ports[1], ports[2], ports[3]
bucket1 = {
"SET_NW_TTL": 7,
"OUTPUT": outport1
}
bucket2 = {
"SET_NW_TTL": 17,
"OUTPUT": outport2
}
bucket3 = {
"SET_NW_TTL": 27,
"OUTPUT": outport3
}
req = get_group_mod(group_id, [bucket1, bucket2, bucket3])
self.controller.message_send(req)
do_barrier(self.controller)
exp_pkt1 = simple_tcp_packet(dl_vlan_enable=True, vlan_vid=TEST_VLAN,
ip_ttl=7, eth_dst=TEST_ETH_DST)
exp_pkt2 = simple_tcp_packet(dl_vlan_enable=True, vlan_vid=TEST_VLAN,
ip_ttl=17, eth_dst=TEST_ETH_DST)
exp_pkt3 = simple_tcp_packet(dl_vlan_enable=True, vlan_vid=TEST_VLAN,
ip_ttl=27, eth_dst=TEST_ETH_DST)
self.dataplane.send(ports[0], pkt)
verify_packet(self, exp_pkt1, outport1)
verify_packet(self, exp_pkt2, outport2)
verify_packet(self, exp_pkt3, outport3)
req = flow_delete(cookie=44, table_id=0)
self.controller.message_send(req)
do_barrier(self.controller)
req = group_delete(group_type=ofp.OFPGT_ALL, group_id=(1 << 24) + 9)
self.controller.message_send(req)
do_barrier(self.controller)
class TableStatsGet(openflow_base_tests.OFTestInterface):
"""
"""
def __init__(self):
openflow_base_tests.OFTestInterface.__init__(self, "dc")
def runTest(self):
setup(self)
req = table_stats_req()
(reply, pkt) = self.controller.transact(req)
initial_matched_count = reply.entries[0].matched_count
initial_lookup_count = reply.entries[0].lookup_count
ports = sorted(config["port_map"].keys())
out_port = ports[0]
table = openflow_tables["dmac"]
table.match_fields[eth_dst_addr].testval = TEST_ETH_DST
table.match_fields[ingress_vlan].testval = TEST_VLAN
hit_pkt, match = get_match(table.match_fields)
output = {
"OUTPUT": out_port
}
instr = get_apply_actions(output)
req = flow_add(table_id=table.id, match=match, instructions=instr,
buffer_id=buf, priority=1, cookie=45)
self.controller.message_send(req)
do_barrier(self.controller)
num_hit_packets = 10
for _ in xrange(num_hit_packets):
self.dataplane.send(ports[0], hit_pkt)
miss_pkt = str(simple_tcp_packet(eth_dst="00:77:22:55:99:11",
dl_vlan_enable=True, vlan_vid=3))
num_miss_packets = 7
for _ in xrange(num_miss_packets):
self.dataplane.send(ports[0], miss_pkt)
time.sleep(3)
req = table_stats_req()
(reply, pkt) = self.controller.transact(req)
req = flow_delete(cookie=45, table_id=0)
self.controller.message_send(req)
do_barrier(self.controller)
assert reply.entries[0].lookup_count == num_miss_packets + num_hit_packets + initial_lookup_count
assert reply.entries[0].matched_count == num_hit_packets + initial_matched_count
class PacketIn(openflow_base_tests.OFTestInterface):
"""
"""
def __init__(self):
openflow_base_tests.OFTestInterface.__init__(self, "dc")
def runTest(self):
setup(self)
ports = sorted(config["port_map"].keys())
in_port = ports[0]
table = openflow_tables["dmac"]
table.match_fields[eth_dst_addr].testval = TEST_ETH_DST
table.match_fields[ingress_vlan].testval = TEST_VLAN
pkt, match = get_match(table.match_fields)
output = {
"OUTPUT": ofp.const.OFPP_CONTROLLER
}
instr = get_apply_actions(output)
req = flow_add(table_id=table.id, match=match, instructions=instr,
buffer_id=buf, priority=1, cookie=46)
self.controller.message_send(req)
do_barrier(self.controller)
self.dataplane.send(in_port, pkt)
verify_packet_in(self, str(pkt), in_port, ofp.const.OFPR_ACTION,
controller=self.controller)
req = flow_delete(cookie=46, table_id=0)
self.controller.message_send(req)
do_barrier(self.controller)
|
|
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the Google name nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Generates a fake TestExpectations file consisting of flaky tests from the bot
corresponding to the give port."""
import json
import logging
import os.path
import urllib
import urllib2
from webkitpy.layout_tests.models.test_expectations import TestExpectations, PASS
from webkitpy.layout_tests.models.test_expectations import TestExpectationLine
_log = logging.getLogger(__name__)
class ResultsJSON(object):
"""Contains the contents of a results.json file.
results.json v4 format:
{
'version': 4,
'builder name' : {
'blinkRevision': [],
'tests': {
'directory' { # Each path component is a dictionary.
'testname.html': {
'expected' : 'FAIL', # Expectation name.
'results': [], # Run-length encoded result.
'times': [],
'bugs': [], # Bug URLs.
}
}
}
}
'buildNumbers': [],
'secondsSinceEpoch': [],
'chromeRevision': [],
'failure_map': {} # Map from letter code to expectation name.
}
"""
TESTS_KEY = 'tests'
FAILURE_MAP_KEY = 'failure_map'
RESULTS_KEY = 'results'
EXPECTATIONS_KEY = 'expected'
BUGS_KEY = 'bugs'
RLE_LENGTH = 0
RLE_VALUE = 1
# results.json was originally designed to support
# multiple builders in one json file, so the builder_name
# is needed to figure out which builder this json file
# refers to (and thus where the results are stored)
def __init__(self, builder_name, json_dict):
self.builder_name = builder_name
self._json = json_dict
def _walk_trie(self, trie, parent_path):
for name, value in trie.items():
full_path = os.path.join(parent_path, name)
# FIXME: If we ever have a test directory self.RESULTS_KEY
# ("results"), this logic will break!
if self.RESULTS_KEY not in value:
for path, results in self._walk_trie(value, full_path):
yield path, results
else:
yield full_path, value
def walk_results(self, full_path=''):
tests_trie = self._json[self.builder_name][self.TESTS_KEY]
return self._walk_trie(tests_trie, parent_path='')
def expectation_for_type(self, type_char):
return self._json[self.builder_name][self.FAILURE_MAP_KEY][type_char]
# Knowing how to parse the run-length-encoded values in results.json
# is a detail of this class.
def occurances_and_type_from_result_item(self, item):
return item[self.RLE_LENGTH], item[self.RLE_VALUE]
class BotTestExpectationsFactory(object):
RESULTS_URL_PREFIX = 'http://test-results.appspot.com/testfile?master=ChromiumWebkit&testtype=webkit_tests&name=results-small.json&builder='
def __init__(self, builders):
self.builders = builders
def _results_json_for_port(self, port_name, builder_category):
builder = self.builders.builder_name_for_port_name(port_name)
if not builder:
return None
return self._results_json_for_builder(builder)
def _results_json_for_builder(self, builder):
results_url = self.RESULTS_URL_PREFIX + urllib.quote(builder)
try:
_log.debug('Fetching flakiness data from appengine.')
return ResultsJSON(builder, json.load(urllib2.urlopen(results_url)))
except urllib2.URLError as error:
_log.warning('Could not retrieve flakiness data from the bot. url: %s', results_url)
_log.warning(error)
def expectations_for_port(self, port_name, builder_category='layout'):
# FIXME: This only grabs release builder's flakiness data. If we're running debug,
# when we should grab the debug builder's data.
# FIXME: What should this do if there is no debug builder for a port, e.g. we have
# no debug XP builder? Should it use the release bot or another Windows debug bot?
# At the very least, it should log an error.
results_json = self._results_json_for_port(port_name, builder_category)
if not results_json:
return None
return BotTestExpectations(results_json, self.builders)
def expectations_for_builder(self, builder):
results_json = self._results_json_for_builder(builder)
if not results_json:
return None
return BotTestExpectations(results_json, self.builders)
class BotTestExpectations(object):
# FIXME: Get this from the json instead of hard-coding it.
RESULT_TYPES_TO_IGNORE = ['N', 'X', 'Y'] # NO_DATA, SKIP, NOTRUN
# TODO(ojan): Remove this once crbug.com/514378 is fixed.
# The JSON can contain results for expectations, not just actual result types.
NON_RESULT_TYPES = ['S', 'X'] # SLOW, SKIP
# specifiers arg is used in unittests to avoid the static dependency on builders.
def __init__(self, results_json, builders, specifiers=None):
self.results_json = results_json
self.specifiers = specifiers or set(builders.specifiers_for_builder(results_json.builder_name))
def _line_from_test_and_flaky_types(self, test_path, flaky_types):
line = TestExpectationLine()
line.original_string = test_path
line.name = test_path
line.filename = test_path
line.path = test_path # FIXME: Should this be normpath?
line.matching_tests = [test_path]
line.bugs = ["crbug.com/FILE_A_BUG_BEFORE_COMMITTING_THIS"]
line.expectations = sorted(flaky_types)
line.specifiers = self.specifiers
return line
def flakes_by_path(self, only_ignore_very_flaky):
"""Sets test expectations to bot results if there are at least two distinct results."""
flakes_by_path = {}
for test_path, entry in self.results_json.walk_results():
flaky_types = self._flaky_types_in_results(entry, only_ignore_very_flaky)
if len(flaky_types) <= 1:
continue
flakes_by_path[test_path] = sorted(flaky_types)
return flakes_by_path
def unexpected_results_by_path(self):
"""For tests with unexpected results, returns original expectations + results."""
def exp_to_string(exp):
return TestExpectations.EXPECTATIONS_TO_STRING.get(exp, None).upper()
def string_to_exp(string):
# Needs a bit more logic than the method above,
# since a PASS is 0 and evaluates to False.
result = TestExpectations.EXPECTATIONS.get(string.lower(), None)
if not result is None:
return result
raise ValueError(string)
unexpected_results_by_path = {}
for test_path, entry in self.results_json.walk_results():
# Expectations for this test. No expectation defaults to PASS.
exp_string = entry.get(self.results_json.EXPECTATIONS_KEY, u'PASS')
# All run-length-encoded results for this test.
results_dict = entry.get(self.results_json.RESULTS_KEY, {})
# Set of expectations for this test.
expectations = set(map(string_to_exp, exp_string.split(' ')))
# Set of distinct results for this test.
result_types = self._all_types_in_results(results_dict)
# Distinct results as non-encoded strings.
result_strings = map(self.results_json.expectation_for_type, result_types)
# Distinct resulting expectations.
result_exp = map(string_to_exp, result_strings)
expected = lambda e: TestExpectations.result_was_expected(e, expectations, False)
additional_expectations = set(e for e in result_exp if not expected(e))
# Test did not have unexpected results.
if not additional_expectations:
continue
expectations.update(additional_expectations)
unexpected_results_by_path[test_path] = sorted(map(exp_to_string, expectations))
return unexpected_results_by_path
def all_results_by_path(self):
"""Returns all seen result types for each test.
Returns a dictionary from each test path that has a result to a list of distinct, sorted result
strings. For example, if the test results are as follows:
a.html IMAGE IMAGE PASS PASS PASS TIMEOUT PASS TEXT
b.html PASS PASS PASS PASS PASS PASS PASS PASS
c.html
This method will return:
{
'a.html': ['IMAGE', 'TEXT', 'TIMEOUT', 'PASS'],
'b.html': ['PASS'],
}
"""
results_by_path = {}
for test_path, entry in self.results_json.walk_results():
results_dict = entry.get(self.results_json.RESULTS_KEY, {})
result_types = self._all_types_in_results(results_dict)
if not result_types:
continue
# Distinct results as non-encoded strings.
result_strings = map(self.results_json.expectation_for_type, result_types)
results_by_path[test_path] = sorted(result_strings)
return results_by_path
def expectation_lines(self, only_ignore_very_flaky):
lines = []
for test_path, entry in self.results_json.walk_results():
flaky_types = self._flaky_types_in_results(entry, only_ignore_very_flaky)
if len(flaky_types) > 1:
line = self._line_from_test_and_flaky_types(test_path, flaky_types)
lines.append(line)
return lines
def _all_types_in_results(self, run_length_encoded_results):
results = set()
for result_item in run_length_encoded_results:
_, result_types = self.results_json.occurances_and_type_from_result_item(result_item)
for result_type in result_types:
if result_type not in self.RESULT_TYPES_TO_IGNORE:
results.add(result_type)
return results
def _result_to_enum(self, result):
return TestExpectations.EXPECTATIONS[result.lower()]
def _flaky_types_in_results(self, results_entry, only_ignore_very_flaky):
flaky_results = set()
# Always include pass as an expected result. Passes will never turn the bot red.
# This fixes cases where the expectations have an implicit Pass, e.g. [ Slow ].
latest_expectations = [PASS]
if self.results_json.EXPECTATIONS_KEY in results_entry:
expectations_list = results_entry[self.results_json.EXPECTATIONS_KEY].split(' ')
latest_expectations += [self._result_to_enum(expectation) for expectation in expectations_list]
for result_item in results_entry[self.results_json.RESULTS_KEY]:
_, result_types_str = self.results_json.occurances_and_type_from_result_item(result_item)
result_types = []
for result_type in result_types_str:
# TODO(ojan): Remove this if-statement once crbug.com/514378 is fixed.
if result_type not in self.NON_RESULT_TYPES:
result_types.append(self.results_json.expectation_for_type(result_type))
# It didn't flake if it didn't retry.
if len(result_types) <= 1:
continue
# If the test ran as expected after only one retry, it's not very flaky.
# It's only very flaky if it failed the first run and the first retry
# and then ran as expected in one of the subsequent retries.
# If there are only two entries, then that means it failed on the first
# try and ran as expected on the second because otherwise we'd have
# a third entry from the next try.
if only_ignore_very_flaky and len(result_types) == 2:
continue
has_unexpected_results = False
for result_type in result_types:
result_enum = self._result_to_enum(result_type)
# TODO(ojan): We really should be grabbing the expected results from the time
# of the run instead of looking at the latest expected results. That's a lot
# more complicated though. So far we've been looking at the aggregated
# results_small.json off test_results.appspot, which has all the information
# for the last 100 runs. In order to do this, we'd need to look at the
# individual runs' full_results.json, which would be slow and more complicated.
# The only thing we lose by not fixing this is that a test that was flaky
# and got fixed will still get printed out until 100 runs have passed.
if not TestExpectations.result_was_expected(result_enum, latest_expectations, test_needs_rebaselining=False):
has_unexpected_results = True
break
if has_unexpected_results:
flaky_results = flaky_results.union(set(result_types))
return flaky_results
|
|
# -*- coding: utf-8 -*-
# Django settings for olympia project.
import logging
import os
import socket
import dj_database_url
from django.utils.functional import lazy
from heka.config import client_from_dict_config
ALLOWED_HOSTS = [
'.allizom.org',
'.mozilla.org',
'.mozilla.com',
'.mozilla.net',
]
LOG_TABLE_SUFFIX = ''
EVENT_TABLE_SUFFIX = ''
# jingo-minify settings
CACHEBUST_IMGS = True
try:
# If we have build ids available, we'll grab them here and add them to our
# CACHE_PREFIX. This will let us not have to flush memcache during updates
# and it will let us preload data into it before a production push.
from build import BUILD_ID_CSS, BUILD_ID_JS
build_id = "%s%s" % (BUILD_ID_CSS[:2], BUILD_ID_JS[:2])
except ImportError:
build_id = ""
# jingo-minify: Style sheet media attribute default
CSS_MEDIA_DEFAULT = 'all'
# Make filepaths relative to the root of olympia.
ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
path = lambda *a: os.path.join(ROOT, *a)
# We need to track this because hudson can't just call its checkout "olympia".
# It puts it in a dir called "workspace". Way to be, hudson.
ROOT_PACKAGE = os.path.basename(ROOT)
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DEBUG_PROPAGATE_EXCEPTIONS = True
# need to view JS errors on a remote device? (requires node)
# > npm install now
# > node media/js/debug/remote_debug_server.node.js
# REMOTE_JS_DEBUG = 'localhost:37767'
# then connect to http://localhost:37767/ to view
REMOTE_JS_DEBUG = False
# LESS CSS OPTIONS (Debug only).
LESS_PREPROCESS = True # Compile LESS with Node, rather than client-side JS?
LESS_LIVE_REFRESH = False # Refresh the CSS on save?
LESS_BIN = 'lessc'
# Path to stylus (to compile .styl files).
STYLUS_BIN = 'stylus'
# Path to cleancss (our CSS minifier).
CLEANCSS_BIN = 'cleancss'
# Path to uglifyjs (our JS minifier).
UGLIFY_BIN = 'uglifyjs' # Set as None to use YUI instead (at your risk).
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
FLIGTAR = 'amo-admins+fligtar-rip@mozilla.org'
EDITORS_EMAIL = 'amo-editors@mozilla.org'
SENIOR_EDITORS_EMAIL = 'amo-editors+somethingbad@mozilla.org'
THEMES_EMAIL = 'theme-reviews@mozilla.org'
ABUSE_EMAIL = 'amo-admins+ivebeenabused@mozilla.org'
NOBODY_EMAIL = 'nobody@mozilla.org'
DATABASE_URL = os.environ.get('DATABASE_URL',
'mysql://root:@localhost/olympia')
DATABASES = {'default': dj_database_url.parse(DATABASE_URL)}
DATABASES['default']['OPTIONS'] = {'init_command': 'SET storage_engine=InnoDB'}
DATABASES['default']['TEST_CHARSET'] = 'utf8'
DATABASES['default']['TEST_COLLATION'] = 'utf8_general_ci'
# A database to be used by the services scripts, which does not use Django.
# The settings can be copied from DATABASES, but since its not a full Django
# database connection, only some values are supported.
SERVICES_DATABASE = {
'NAME': DATABASES['default']['NAME'],
'USER': DATABASES['default']['USER'],
'PASSWORD': DATABASES['default']['PASSWORD'],
'HOST': DATABASES['default']['HOST'],
'PORT': DATABASES['default']['PORT'],
}
DATABASE_ROUTERS = ('multidb.PinningMasterSlaveRouter',)
# For use django-mysql-pool backend.
DATABASE_POOL_ARGS = {
'max_overflow': 10,
'pool_size': 5,
'recycle': 300
}
# Put the aliases for your slave databases in this list.
SLAVE_DATABASES = []
PASSWORD_HASHERS = (
'users.models.SHA512PasswordHasher',
)
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Los_Angeles'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-US'
# Accepted locales
# Note: If you update this list, don't forget to also update the locale
# permissions in the database.
AMO_LANGUAGES = (
'af', 'ar', 'bg', 'bn-BD', 'ca', 'cs', 'da', 'de', 'el', 'en-US', 'es',
'eu', 'fa', 'fi', 'fr', 'ga-IE', 'he', 'hu', 'id', 'it', 'ja', 'ko', 'mk',
'mn', 'nl', 'pl', 'pt-BR', 'pt-PT', 'ro', 'ru', 'sk', 'sl', 'sq', 'sv-SE',
'uk', 'vi', 'zh-CN', 'zh-TW',
)
# Explicit conversion of a shorter language code into a more specific one.
SHORTER_LANGUAGES = {
'en': 'en-US', 'ga': 'ga-IE', 'pt': 'pt-PT', 'sv': 'sv-SE', 'zh': 'zh-CN'
}
# Not shown on the site, but .po files exist and these are available on the
# L10n dashboard. Generally languages start here and move into AMO_LANGUAGES.
HIDDEN_LANGUAGES = ('cy', 'hr', 'sr', 'sr-Latn', 'tr')
def lazy_langs(languages):
from product_details import product_details
if not product_details.languages:
return {}
return dict([(i.lower(), product_details.languages[i]['native'])
for i in languages])
# Where product details are stored see django-mozilla-product-details
PROD_DETAILS_DIR = path('lib/product_json')
# Override Django's built-in with our native names
LANGUAGES = lazy(lazy_langs, dict)(AMO_LANGUAGES)
RTL_LANGUAGES = ('ar', 'fa', 'fa-IR', 'he')
LANGUAGE_URL_MAP = dict([(i.lower(), i) for i in AMO_LANGUAGES])
# Tower / L10n
STANDALONE_DOMAINS = ['messages', 'javascript']
TOWER_KEYWORDS = {
'_lazy': None,
}
TOWER_ADD_HEADERS = True
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# The host currently running the site. Only use this in code for good reason;
# the site is designed to run on a cluster and should continue to support that
HOSTNAME = socket.gethostname()
# The front end domain of the site. If you're not running on a cluster this
# might be the same as HOSTNAME but don't depend on that. Use this when you
# need the real domain.
DOMAIN = HOSTNAME
# Full base URL for your main site including protocol. No trailing slash.
# Example: https://addons.mozilla.org
SITE_URL = 'http://%s' % DOMAIN
# Domain of the services site. This is where your API, and in-product pages
# live.
SERVICES_DOMAIN = 'services.%s' % DOMAIN
# Full URL to your API service. No trailing slash.
# Example: https://services.addons.mozilla.org
SERVICES_URL = 'http://%s' % SERVICES_DOMAIN
# When True, the addon API should include performance data.
API_SHOW_PERF_DATA = True
# The domain of the mobile site.
MOBILE_DOMAIN = 'm.%s' % DOMAIN
# The full url of the mobile site.
MOBILE_SITE_URL = 'http://%s' % MOBILE_DOMAIN
OAUTH_CALLBACK_VIEW = 'api.views.request_token_ready'
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = path('user-media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/user-media/'
# Absolute path to a temporary storage area
TMP_PATH = path('tmp')
# Tarballs in DUMPED_APPS_PATH deleted 30 days after they have been written.
DUMPED_APPS_DAYS_DELETE = 3600 * 24 * 30
# Tarballs in DUMPED_USERS_PATH deleted 30 days after they have been written.
DUMPED_USERS_DAYS_DELETE = 3600 * 24 * 30
# paths that don't require an app prefix
SUPPORTED_NONAPPS = (
'about', 'admin', 'apps', 'blocklist', 'credits', 'developer_agreement',
'developer_faq', 'developers', 'editors', 'faq', 'jsi18n', 'localizers',
'review_guide', 'google1f3e37b7351799a5.html', 'robots.txt', 'statistics',
'services', 'sunbird', 'static', 'user-media',
)
DEFAULT_APP = 'firefox'
# paths that don't require a locale prefix
SUPPORTED_NONLOCALES = (
'google1f3e37b7351799a5.html', 'robots.txt', 'services', 'downloads',
'blocklist', 'static', 'user-media',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'r#%9w^o_80)7f%!_ir5zx$tu3mupw9u%&s!)-_q%gy7i+fhx#)'
# Templates
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'lib.template_loader.Loader',
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
# We don't want jingo's template loaded to pick up templates for third party
# apps that don't use Jinja2. The Following is a list of prefixes for jingo to
# ignore.
JINGO_EXCLUDE_APPS = (
'djcelery',
'django_extensions',
'admin',
'browserid',
'toolbar_statsd',
'registration',
'debug_toolbar',
'waffle',
)
JINGO_EXCLUDE_PATHS = (
'users/email',
'reviews/emails',
'editors/emails',
'amo/emails',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.media',
'django.core.context_processors.request',
'session_csrf.context_processor',
'django.contrib.messages.context_processors.messages',
'amo.context_processors.app',
'amo.context_processors.i18n',
'amo.context_processors.global_settings',
'amo.context_processors.static_url',
'jingo_minify.helpers.build_ids',
)
TEMPLATE_DIRS = (
path('media/docs'),
path('templates'),
)
def JINJA_CONFIG():
import jinja2
from django.conf import settings
from django.core.cache import cache
config = {'extensions': ['tower.template.i18n', 'amo.ext.cache',
'jinja2.ext.do',
'jinja2.ext.with_', 'jinja2.ext.loopcontrols'],
'finalize': lambda x: x if x is not None else ''}
if False and not settings.DEBUG:
# We're passing the _cache object directly to jinja because
# Django can't store binary directly; it enforces unicode on it.
# Details: http://jinja.pocoo.org/2/documentation/api#bytecode-cache
# and in the errors you get when you try it the other way.
bc = jinja2.MemcachedBytecodeCache(cache._cache,
"%sj2:" % settings.CACHE_PREFIX)
config['cache_size'] = -1 # Never clear the cache
config['bytecode_cache'] = bc
return config
MIDDLEWARE_CLASSES = (
# AMO URL middleware comes first so everyone else sees nice URLs.
'django_statsd.middleware.GraphiteRequestTimingMiddleware',
'django_statsd.middleware.GraphiteMiddleware',
'amo.middleware.LocaleAndAppURLMiddleware',
# Mobile detection should happen in Zeus.
'mobility.middleware.DetectMobileMiddleware',
'mobility.middleware.XMobileMiddleware',
'amo.middleware.RemoveSlashMiddleware',
# Munging REMOTE_ADDR must come before ThreadRequest.
'commonware.middleware.SetRemoteAddrFromForwardedFor',
'commonware.middleware.FrameOptionsHeader',
'commonware.middleware.StrictTransportMiddleware',
'multidb.middleware.PinningRouterMiddleware',
'waffle.middleware.WaffleMiddleware',
'csp.middleware.CSPMiddleware',
'amo.middleware.CommonMiddleware',
'amo.middleware.NoVarySessionMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'commonware.log.ThreadRequestMiddleware',
'apps.search.middleware.ElasticsearchExceptionMiddleware',
'session_csrf.CsrfMiddleware',
'api.middleware.RestOAuthMiddleware',
# This should come after authentication middleware
'access.middleware.ACLMiddleware',
'commonware.middleware.ScrubRequestOnException',
)
# Auth
AUTHENTICATION_BACKENDS = (
'users.backends.AmoUserBackend',
'django_browserid.auth.BrowserIDBackend'
)
AUTH_USER_MODEL = 'users.UserProfile'
# Override this in the site settings.
ROOT_URLCONF = 'lib.urls_base'
INSTALLED_APPS = (
# Import ordering issues ahoy.
'djcelery',
'amo', # amo comes first so it always takes precedence.
'abuse',
'access',
'addons',
'api',
'applications',
'bandwagon',
'blocklist',
'browse',
'compat',
'cronjobs',
'csp',
'devhub',
'discovery',
'editors',
'files',
'jingo_minify',
'localizers',
'lib.es',
'moz_header',
'pages',
'perf',
'product_details',
'reviews',
'search',
'sharing',
'stats',
'tags',
'tower', # for ./manage.py extract
'translations',
'users',
'versions',
'zadmin',
# Third party apps
'django_extensions',
'gunicorn',
'raven.contrib.django',
'piston',
'waffle',
# Django contrib apps
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.staticfiles',
# Has to load after auth
'django_browserid',
'django_statsd',
)
# These apps are only needed in a testing environment. They are added to
# INSTALLED_APPS by the amo.runner.TestRunner test runner.
TEST_INSTALLED_APPS = (
'translations.tests.testapp',
)
# Tells the extract script what files to look for l10n in and what function
# handles the extraction. The Tower library expects this.
DOMAIN_METHODS = {
'messages': [
('apps/**.py',
'tower.management.commands.extract.extract_tower_python'),
('apps/**/templates/**.html',
'tower.management.commands.extract.extract_tower_template'),
('templates/**.html',
'tower.management.commands.extract.extract_tower_template'),
('**/templates/**.lhtml',
'tower.management.commands.extract.extract_tower_template'),
],
'javascript': [
# We can't say **.js because that would dive into mochikit and timeplot
# and all the other baggage we're carrying. Timeplot, in particular,
# crashes the extractor with bad unicode data.
('static/js/*.js', 'javascript'),
('static/js/amo2009/**.js', 'javascript'),
('static/js/common/**.js', 'javascript'),
('static/js/impala/**.js', 'javascript'),
('static/js/zamboni/**.js', 'javascript'),
],
}
# Bundles is a dictionary of two dictionaries, css and js, which list css files
# and js files that can be bundled together by the minify app.
MINIFY_BUNDLES = {
'css': {
# CSS files common to the entire site.
'zamboni/css': (
'css/legacy/main.css',
'css/legacy/main-mozilla.css',
'css/legacy/jquery-lightbox.css',
'css/legacy/autocomplete.css',
'css/zamboni/zamboni.css',
'moz_header/header.css',
'moz_header/footer.css',
'css/zamboni/tags.css',
'css/zamboni/tabs.css',
'css/impala/formset.less',
'css/impala/suggestions.less',
'css/impala/header.less',
'css/impala/moz-tab.css',
'css/impala/footer.less',
'css/impala/faux-zamboni.less',
'css/impala/collection-stats.less',
'css/zamboni/themes.less',
),
'zamboni/impala': (
'css/impala/base.css',
'css/legacy/jquery-lightbox.css',
'css/impala/site.less',
'css/impala/typography.less',
'moz_header/header.css',
'moz_header/footer.css',
'css/impala/forms.less',
'css/common/invisible-upload.less',
'css/impala/header.less',
'css/impala/footer.less',
'css/impala/moz-tab.css',
'css/impala/hovercards.less',
'css/impala/toplist.less',
'css/impala/carousel.less',
'css/impala/reviews.less',
'css/impala/buttons.less',
'css/impala/promos.less',
'css/impala/addon_details.less',
'css/impala/policy.less',
'css/impala/expando.less',
'css/impala/popups.less',
'css/impala/l10n.less',
'css/impala/contributions.less',
'css/impala/lightbox.less',
'css/impala/prose.less',
'css/impala/sharing.less',
'css/impala/abuse.less',
'css/impala/paginator.less',
'css/impala/listing.less',
'css/impala/versions.less',
'css/impala/users.less',
'css/impala/collections.less',
'css/impala/tooltips.less',
'css/impala/search.less',
'css/impala/suggestions.less',
'css/impala/colorpicker.less',
'css/impala/personas.less',
'css/impala/login.less',
'css/impala/dictionaries.less',
'css/impala/apps.less',
'css/impala/formset.less',
'css/impala/tables.less',
'css/impala/compat.less',
'css/impala/localizers.less',
),
'zamboni/stats': (
'css/impala/stats.less',
),
'zamboni/discovery-pane': (
'css/zamboni/discovery-pane.css',
'css/impala/promos.less',
'css/legacy/jquery-lightbox.css',
),
'zamboni/devhub': (
'css/impala/tooltips.less',
'css/zamboni/developers.css',
'css/zamboni/docs.less',
'css/impala/developers.less',
'css/impala/personas.less',
'css/devhub/listing.less',
'css/devhub/popups.less',
'css/devhub/compat.less',
'css/impala/formset.less',
'css/devhub/forms.less',
'css/common/invisible-upload.less',
'css/devhub/submission.less',
'css/devhub/refunds.less',
'css/devhub/buttons.less',
'css/devhub/in-app-config.less',
),
'zamboni/devhub_impala': (
'css/impala/developers.less',
'css/devhub/listing.less',
'css/devhub/popups.less',
'css/devhub/compat.less',
'css/devhub/dashboard.less',
'css/devhub/forms.less',
'css/common/invisible-upload.less',
'css/devhub/submission.less',
'css/devhub/search.less',
'css/devhub/refunds.less',
),
'zamboni/editors': (
'css/zamboni/editors.styl',
),
'zamboni/themes_review': (
'css/zamboni/developers.css',
'css/zamboni/editors.styl',
'css/zamboni/themes_review.styl',
),
'zamboni/files': (
'css/lib/syntaxhighlighter/shCoreDefault.css',
'css/zamboni/files.css',
),
'zamboni/mobile': (
'css/zamboni/mobile.css',
'css/mobile/typography.less',
'css/mobile/forms.less',
'css/mobile/header.less',
'css/mobile/search.less',
'css/mobile/listing.less',
'css/mobile/footer.less',
),
'zamboni/admin': (
'css/zamboni/admin-django.css',
'css/zamboni/admin-mozilla.css',
'css/zamboni/admin_features.css',
# Datepicker styles and jQuery UI core.
'css/zamboni/jquery-ui/custom-1.7.2.css',
),
},
'js': {
# JS files common to the entire site (pre-impala).
'common': (
'js/lib/jquery-1.6.4.js',
'js/lib/underscore.js',
'js/zamboni/browser.js',
'js/amo2009/addons.js',
'js/zamboni/init.js',
'js/impala/capabilities.js',
'js/lib/format.js',
'js/lib/jquery.cookie.js',
'js/zamboni/storage.js',
'js/zamboni/apps.js',
'js/zamboni/buttons.js',
'js/zamboni/tabs.js',
'js/common/keys.js',
# jQuery UI
'js/lib/jquery-ui/jquery.ui.core.js',
'js/lib/jquery-ui/jquery.ui.position.js',
'js/lib/jquery-ui/jquery.ui.widget.js',
'js/lib/jquery-ui/jquery.ui.mouse.js',
'js/lib/jquery-ui/jquery.ui.autocomplete.js',
'js/lib/jquery-ui/jquery.ui.datepicker.js',
'js/lib/jquery-ui/jquery.ui.sortable.js',
'js/zamboni/helpers.js',
'js/zamboni/global.js',
'js/amo2009/global.js',
'js/common/ratingwidget.js',
'js/lib/jquery-ui/jqModal.js',
'js/zamboni/l10n.js',
'js/zamboni/debouncer.js',
# Homepage
'js/impala/promos.js',
'js/zamboni/homepage.js',
# Add-ons details page
'js/lib/jquery-ui/ui.lightbox.js',
'js/zamboni/contributions.js',
'js/zamboni/addon_details.js',
'js/impala/abuse.js',
'js/zamboni/reviews.js',
# Personas
'js/lib/jquery.hoverIntent.js',
'js/zamboni/personas_core.js',
'js/zamboni/personas.js',
# Collections
'js/zamboni/collections.js',
# Performance
'js/zamboni/perf.js',
# Users
'js/zamboni/users.js',
# Fix-up outgoing links
'js/zamboni/outgoing_links.js',
# Hover delay for global header
'moz_header/menu.js',
# Password length and strength
'js/zamboni/password-strength.js',
# Search suggestions
'js/impala/forms.js',
'js/impala/ajaxcache.js',
'js/impala/suggestions.js',
'js/impala/site_suggestions.js',
),
# Impala and Legacy: Things to be loaded at the top of the page
'preload': (
'js/lib/jquery-1.6.4.js',
'js/impala/preloaded.js',
'js/zamboni/analytics.js',
),
# Impala: Things to be loaded at the bottom
'impala': (
'js/lib/underscore.js',
'js/impala/carousel.js',
'js/zamboni/browser.js',
'js/amo2009/addons.js',
'js/zamboni/init.js',
'js/impala/capabilities.js',
'js/lib/format.js',
'js/lib/jquery.cookie.js',
'js/zamboni/storage.js',
'js/zamboni/apps.js',
'js/zamboni/buttons.js',
'js/lib/jquery.pjax.js',
'js/impala/footer.js',
'js/common/keys.js',
# BrowserID
'js/zamboni/browserid_support.js',
# jQuery UI
'js/lib/jquery-ui/jquery.ui.core.js',
'js/lib/jquery-ui/jquery.ui.position.js',
'js/lib/jquery-ui/jquery.ui.widget.js',
'js/lib/jquery-ui/jquery.ui.mouse.js',
'js/lib/jquery-ui/jquery.ui.autocomplete.js',
'js/lib/jquery-ui/jquery.ui.datepicker.js',
'js/lib/jquery-ui/jquery.ui.sortable.js',
'js/lib/truncate.js',
'js/zamboni/truncation.js',
'js/impala/ajaxcache.js',
'js/zamboni/helpers.js',
'js/zamboni/global.js',
'js/lib/stick.js',
'js/impala/global.js',
'js/common/ratingwidget.js',
'js/lib/jquery-ui/jqModal.js',
'js/zamboni/l10n.js',
'js/impala/forms.js',
# Homepage
'js/impala/promos.js',
'js/impala/homepage.js',
# Add-ons details page
'js/lib/jquery-ui/ui.lightbox.js',
'js/zamboni/contributions.js',
'js/impala/addon_details.js',
'js/impala/abuse.js',
'js/impala/reviews.js',
# Browse listing pages
'js/impala/listing.js',
# Personas
'js/lib/jquery.hoverIntent.js',
'js/zamboni/personas_core.js',
'js/zamboni/personas.js',
# Persona creation
'js/common/upload-image.js',
'js/lib/jquery.minicolors.js',
'js/impala/persona_creation.js',
# Collections
'js/zamboni/collections.js',
'js/impala/collections.js',
# Performance
'js/zamboni/perf.js',
# Users
'js/zamboni/users.js',
'js/impala/users.js',
# Search
'js/impala/serializers.js',
'js/impala/search.js',
'js/impala/suggestions.js',
'js/impala/site_suggestions.js',
# Login
'js/impala/login.js',
# Fix-up outgoing links
'js/zamboni/outgoing_links.js',
),
'zamboni/discovery': (
'js/lib/jquery-1.6.4.js',
'js/lib/underscore.js',
'js/zamboni/browser.js',
'js/zamboni/init.js',
'js/impala/capabilities.js',
'js/lib/format.js',
'js/impala/carousel.js',
# Add-ons details
'js/lib/jquery.cookie.js',
'js/zamboni/storage.js',
'js/zamboni/buttons.js',
'js/lib/jquery-ui/ui.lightbox.js',
# Personas
'js/lib/jquery.hoverIntent.js',
'js/zamboni/personas_core.js',
'js/zamboni/personas.js',
'js/zamboni/debouncer.js',
'js/lib/truncate.js',
'js/zamboni/truncation.js',
'js/impala/promos.js',
'js/zamboni/discovery_addons.js',
'js/zamboni/discovery_pane.js',
),
'zamboni/discovery-video': (
'js/lib/popcorn-1.0.js',
'js/zamboni/discovery_video.js',
),
'zamboni/devhub': (
'js/lib/truncate.js',
'js/zamboni/truncation.js',
'js/common/upload-base.js',
'js/common/upload-addon.js',
'js/common/upload-image.js',
'js/impala/formset.js',
'js/zamboni/devhub.js',
'js/zamboni/validator.js',
),
'zamboni/editors': (
'js/lib/highcharts.src.js',
'js/zamboni/editors.js',
'js/lib/jquery.hoverIntent.js', # Used by jquery.zoomBox.
'js/lib/jquery.zoomBox.js', # Used by themes_review.
'js/zamboni/themes_review.js',
),
'zamboni/files': (
'js/lib/diff_match_patch_uncompressed.js',
'js/lib/syntaxhighlighter/xregexp-min.js',
'js/lib/syntaxhighlighter/shCore.js',
'js/lib/syntaxhighlighter/shLegacy.js',
'js/lib/syntaxhighlighter/shBrushAppleScript.js',
'js/lib/syntaxhighlighter/shBrushAS3.js',
'js/lib/syntaxhighlighter/shBrushBash.js',
'js/lib/syntaxhighlighter/shBrushCpp.js',
'js/lib/syntaxhighlighter/shBrushCSharp.js',
'js/lib/syntaxhighlighter/shBrushCss.js',
'js/lib/syntaxhighlighter/shBrushDiff.js',
'js/lib/syntaxhighlighter/shBrushJava.js',
'js/lib/syntaxhighlighter/shBrushJScript.js',
'js/lib/syntaxhighlighter/shBrushPhp.js',
'js/lib/syntaxhighlighter/shBrushPlain.js',
'js/lib/syntaxhighlighter/shBrushPython.js',
'js/lib/syntaxhighlighter/shBrushSass.js',
'js/lib/syntaxhighlighter/shBrushSql.js',
'js/lib/syntaxhighlighter/shBrushVb.js',
'js/lib/syntaxhighlighter/shBrushXml.js',
'js/zamboni/storage.js',
'js/zamboni/files.js',
),
'zamboni/localizers': (
'js/zamboni/localizers.js',
),
'zamboni/mobile': (
'js/lib/jquery-1.6.4.js',
'js/lib/underscore.js',
'js/lib/jqmobile.js',
'js/lib/jquery.cookie.js',
'js/zamboni/apps.js',
'js/zamboni/browser.js',
'js/zamboni/init.js',
'js/impala/capabilities.js',
'js/lib/format.js',
'js/zamboni/mobile/buttons.js',
'js/lib/truncate.js',
'js/zamboni/truncation.js',
'js/impala/footer.js',
'js/zamboni/personas_core.js',
'js/zamboni/mobile/personas.js',
'js/zamboni/helpers.js',
'js/zamboni/mobile/general.js',
'js/common/ratingwidget.js',
'js/zamboni/browserid_support.js',
),
'zamboni/stats': (
'js/lib/jquery-datepicker.js',
'js/lib/highcharts.src.js',
'js/impala/stats/csv_keys.js',
'js/impala/stats/helpers.js',
'js/impala/stats/dateutils.js',
'js/impala/stats/manager.js',
'js/impala/stats/controls.js',
'js/impala/stats/overview.js',
'js/impala/stats/topchart.js',
'js/impala/stats/chart.js',
'js/impala/stats/table.js',
'js/impala/stats/stats.js',
),
'zamboni/admin': (
'js/zamboni/admin.js',
'js/zamboni/admin_features.js',
'js/zamboni/admin_validation.js',
),
# This is included when DEBUG is True. Bundle in <head>.
'debug': (
'js/debug/less_setup.js',
'js/lib/less.js',
'js/debug/less_live.js',
),
}
}
# Caching
# Prefix for cache keys (will prevent collisions when running parallel copies)
CACHE_PREFIX = 'amo:%s:' % build_id
KEY_PREFIX = CACHE_PREFIX
FETCH_BY_ID = True
# Number of seconds a count() query should be cached. Keep it short because
# it's not possible to invalidate these queries.
CACHE_COUNT_TIMEOUT = 60
# To enable pylibmc compression (in bytes)
PYLIBMC_MIN_COMPRESS_LEN = 0 # disabled
# External tools.
JAVA_BIN = '/usr/bin/java'
# Add-on download settings.
PRIVATE_MIRROR_URL = '/_privatefiles'
# File paths
ADDON_ICONS_DEFAULT_PATH = os.path.join(ROOT, 'static', 'img', 'addon-icons')
CA_CERT_BUNDLE_PATH = os.path.join(ROOT, 'apps/amo/certificates/roots.pem')
# URL paths
# paths for images, e.g. mozcdn.com/amo or '/static'
VAMO_URL = 'https://versioncheck.addons.mozilla.org'
NEW_PERSONAS_UPDATE_URL = VAMO_URL + '/%(locale)s/themes/update-check/%(id)d'
# Outgoing URL bouncer
REDIRECT_URL = 'http://outgoing.mozilla.org/v1/'
REDIRECT_SECRET_KEY = ''
PFS_URL = 'https://pfs.mozilla.org/plugins/PluginFinderService.php'
# Allow URLs from these servers. Use full domain names.
REDIRECT_URL_WHITELIST = ['addons.mozilla.org']
# Default to short expiration; check "remember me" to override
SESSION_ENGINE = 'django.contrib.sessions.backends.signed_cookies'
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
SESSION_COOKIE_AGE = 1209600
SESSION_COOKIE_SECURE = True
SESSION_COOKIE_HTTPONLY = True
SESSION_COOKIE_DOMAIN = ".%s" % DOMAIN # bug 608797
MESSAGE_STORAGE = 'django.contrib.messages.storage.cookie.CookieStorage'
# These should have app+locale at the start to avoid redirects
LOGIN_URL = "/users/login"
LOGOUT_URL = "/users/logout"
LOGIN_REDIRECT_URL = "/"
LOGOUT_REDIRECT_URL = "/"
# When logging in with browser ID, a username is created automatically.
# In the case of duplicates, the process is recursive up to this number
# of times.
MAX_GEN_USERNAME_TRIES = 50
# PayPal Settings
PAYPAL_API_VERSION = '78'
PAYPAL_APP_ID = ''
# URLs for various calls.
PAYPAL_API_URL = 'https://api-3t.paypal.com/nvp'
PAYPAL_CGI_URL = 'https://www.paypal.com/cgi-bin/webscr'
PAYPAL_PAY_URL = 'https://svcs.paypal.com/AdaptivePayments/'
PAYPAL_FLOW_URL = 'https://paypal.com/webapps/adaptivepayment/flow/pay'
PAYPAL_PERMISSIONS_URL = 'https://svcs.paypal.com/Permissions/'
PAYPAL_JS_URL = 'https://www.paypalobjects.com/js/external/dg.js'
# Permissions for the live or sandbox servers
PAYPAL_EMBEDDED_AUTH = {'USER': '', 'PASSWORD': '', 'SIGNATURE': ''}
# The PayPal cert that we'll use for checking.
# When None, the Mozilla CA bundle is used to look it up.
PAYPAL_CERT = None
# Contribution limit, one time and monthly
MAX_CONTRIBUTION = 1000
# Email settings
ADDONS_EMAIL = "Mozilla Add-ons <nobody@mozilla.org>"
DEFAULT_FROM_EMAIL = ADDONS_EMAIL
# Email goes to the console by default. s/console/smtp/ for regular delivery
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Please use all lowercase for the blacklist.
EMAIL_BLACKLIST = (
'nobody@mozilla.org',
)
# Please use all lowercase for the QA whitelist.
EMAIL_QA_WHITELIST = ()
# URL for Add-on Validation FAQ.
VALIDATION_FAQ_URL = ('https://wiki.mozilla.org/AMO:Editors/EditorGuide/'
'AddonReviews#Step_2:_Automatic_validation')
# Celery
BROKER_URL = 'amqp://olympia:olympia@localhost:5672/olympia'
BROKER_CONNECTION_TIMEOUT = 0.1
CELERY_RESULT_BACKEND = 'amqp'
CELERY_IGNORE_RESULT = True
CELERY_SEND_TASK_ERROR_EMAILS = True
CELERYD_HIJACK_ROOT_LOGGER = False
CELERY_IMPORTS = ('lib.video.tasks', 'lib.es.management.commands.reindex')
# We have separate celeryds for processing devhub & images as fast as possible
# Some notes:
# - always add routes here instead of @task(queue=<name>)
# - when adding a queue, be sure to update deploy.py so that it gets restarted
CELERY_ROUTES = {
# Priority.
# If your tasks need to be run as soon as possible, add them here so they
# are routed to the priority queue.
'addons.tasks.index_addons': {'queue': 'priority'},
'addons.tasks.unindex_addons': {'queue': 'priority'},
'addons.tasks.save_theme': {'queue': 'priority'},
'addons.tasks.save_theme_reupload': {'queue': 'priority'},
'bandwagon.tasks.index_collections': {'queue': 'priority'},
'bandwagon.tasks.unindex_collections': {'queue': 'priority'},
'users.tasks.index_users': {'queue': 'priority'},
'users.tasks.unindex_users': {'queue': 'priority'},
# Other queues we prioritize below.
# AMO Devhub.
'devhub.tasks.validator': {'queue': 'devhub'},
'devhub.tasks.compatibility_check': {'queue': 'devhub'},
'devhub.tasks.file_validator': {'queue': 'devhub'},
# Videos.
'lib.video.tasks.resize_video': {'queue': 'devhub'},
# Images.
'bandwagon.tasks.resize_icon': {'queue': 'images'},
'users.tasks.resize_photo': {'queue': 'images'},
'users.tasks.delete_photo': {'queue': 'images'},
'devhub.tasks.resize_icon': {'queue': 'images'},
'devhub.tasks.resize_preview': {'queue': 'images'},
# AMO validator.
'zadmin.tasks.bulk_validate_file': {'queue': 'limited'},
}
# This is just a place to store these values, you apply them in your
# task decorator, for example:
# @task(time_limit=CELERY_TIME_LIMITS['lib...']['hard'])
# Otherwise your task will use the default settings.
CELERY_TIME_LIMITS = {
'lib.video.tasks.resize_video': {'soft': 360, 'hard': 600},
# The reindex management command can take up to 3 hours to run.
'lib.es.management.commands.reindex': {'soft': 10800, 'hard': 14400},
}
# When testing, we always want tasks to raise exceptions. Good for sanity.
CELERY_EAGER_PROPAGATES_EXCEPTIONS = True
# Time in seconds before celery.exceptions.SoftTimeLimitExceeded is raised.
# The task can catch that and recover but should exit ASAP. Note that there is
# a separate, shorter timeout for validation tasks.
CELERYD_TASK_SOFT_TIME_LIMIT = 60 * 30
# Fixture Magic
CUSTOM_DUMPS = {
'addon': { # ./manage.py custom_dump addon id
'primary': 'addons.addon', # This is our reference model.
'dependents': [ # These are items we wish to dump.
# Magic turns this into current_version.files.all()[0].
'current_version.files.all.0',
'current_version.apps.all.0',
'addonuser_set.all.0',
],
'order': ('translations.translation',
'files.platform', 'addons.addon',
'versions.license', 'versions.version', 'files.file'),
'excludes': {
'addons.addon': ('_current_version',),
}
}
}
# Hera (http://github.com/clouserw/hera)
HERA = [{'USERNAME': '',
'PASSWORD': '',
'LOCATION': ''}]
# Logging
LOG_LEVEL = logging.DEBUG
HAS_SYSLOG = True # syslog is used if HAS_SYSLOG and NOT DEBUG.
SYSLOG_TAG = "http_app_addons"
SYSLOG_TAG2 = "http_app_addons2"
# See PEP 391 and log_settings.py for formatting help. Each section of
# LOGGING will get merged into the corresponding section of
# log_settings.py. Handlers and log levels are set up automatically based
# on LOG_LEVEL and DEBUG unless you set them here. Messages will not
# propagate through a logger unless propagate: True is set.
LOGGING_CONFIG = None
LOGGING = {
'loggers': {
'amqplib': {'handlers': ['null']},
'caching.invalidation': {'handlers': ['null']},
'caching': {'level': logging.WARNING},
'elasticsearch': {'handlers': ['null']},
'rdflib': {'handlers': ['null']},
'suds': {'handlers': ['null']},
'z.task': {'level': logging.INFO},
'z.es': {'level': logging.INFO},
'z.heka': {'level': logging.INFO},
's.client': {'level': logging.INFO},
},
}
HEKA_CONF = {
'logger': 'olympia',
'plugins': {
'cef': ('heka_cef.cef_plugin:config_plugin', {
'syslog_facility': 'LOCAL4',
'syslog_ident': 'http_app_addons_marketplace',
'syslog_priority': 'ALERT'}),
# Sentry accepts messages over UDP, you'll need to
# configure this URL so that logstash can relay the message
# properly
'raven': ('heka_raven.raven_plugin:config_plugin',
{'dsn': 'udp://username:password@127.0.0.1:9000/2'})},
'stream': {
'class': 'heka.streams.UdpStream',
'host': '127.0.0.1',
'port': 5565}}
HEKA = client_from_dict_config(HEKA_CONF)
USE_HEKA_FOR_CEF = False
USE_HEKA_FOR_TASTYPIE = False
CEF_PRODUCT = "amo"
# CSP Settings
CSP_REPORT_URI = '/services/csp/report'
CSP_REPORT_ONLY = True
CSP_DEFAULT_SRC = ("*", "data:")
CSP_SCRIPT_SRC = ("'self'",
"https://www.google.com", # Recaptcha
"https://mozorg.cdn.mozilla.net", # Tabzilla.
"https://www.paypalobjects.com",
"https://ssl.google-analytics.com",
)
CSP_STYLE_SRC = ("*", "'unsafe-inline'")
CSP_OBJECT_SRC = ("'none'",)
CSP_FRAME_SRC = ("https://ssl.google-analytics.com",)
# Should robots.txt deny everything or disallow a calculated list of URLs we
# don't want to be crawled? Default is false, disallow everything.
# Also see http://www.google.com/support/webmasters/bin/answer.py?answer=93710
ENGAGE_ROBOTS = False
# Read-only mode setup.
READ_ONLY = False
# Turn on read-only mode in local_settings.py by putting this line
# at the VERY BOTTOM: read_only_mode(globals())
def read_only_mode(env):
env['READ_ONLY'] = True
# Replace the default (master) db with a slave connection.
if not env.get('SLAVE_DATABASES'):
raise Exception("We need at least one slave database.")
slave = env['SLAVE_DATABASES'][0]
env['DATABASES']['default'] = env['DATABASES'][slave]
# No sessions without the database, so disable auth.
env['AUTHENTICATION_BACKENDS'] = ('users.backends.NoAuthForYou',)
# Add in the read-only middleware before csrf middleware.
extra = 'amo.middleware.ReadOnlyMiddleware'
before = 'session_csrf.CsrfMiddleware'
m = list(env['MIDDLEWARE_CLASSES'])
m.insert(m.index(before), extra)
env['MIDDLEWARE_CLASSES'] = tuple(m)
# Uploaded file limits
MAX_ICON_UPLOAD_SIZE = 4 * 1024 * 1024
MAX_IMAGE_UPLOAD_SIZE = 4 * 1024 * 1024
MAX_VIDEO_UPLOAD_SIZE = 4 * 1024 * 1024
MAX_PHOTO_UPLOAD_SIZE = MAX_ICON_UPLOAD_SIZE
MAX_PERSONA_UPLOAD_SIZE = 300 * 1024
MAX_REVIEW_ATTACHMENT_UPLOAD_SIZE = 5 * 1024 * 1024
# RECAPTCHA: overload all three statements to local_settings.py with your keys.
RECAPTCHA_PUBLIC_KEY = ''
RECAPTCHA_PRIVATE_KEY = ''
RECAPTCHA_URL = ('https://www.google.com/recaptcha/api/challenge?k=%s' %
RECAPTCHA_PUBLIC_KEY)
RECAPTCHA_AJAX_URL = (
'https://www.google.com/recaptcha/api/js/recaptcha_ajax.js')
# Send Django signals asynchronously on a background thread.
ASYNC_SIGNALS = True
# Performance notes on add-ons
PERFORMANCE_NOTES = False
# Used to flag slow addons.
# If slowness of addon is THRESHOLD percent slower, show a warning.
PERF_THRESHOLD = 25
# Performance for persona pagination, we hardcode the number of
# available pages when the filter is up-and-coming.
PERSONA_DEFAULT_PAGES = 10
REDIS_LOCATION = os.environ.get('REDIS_LOCATION', 'localhost:6379')
REDIS_BACKENDS = {
'master': 'redis://{location}?socket_timeout=0.5'.format(
location=REDIS_LOCATION)}
# Full path or executable path (relative to $PATH) of the spidermonkey js
# binary. It must be a version compatible with amo-validator
SPIDERMONKEY = None
VALIDATE_ADDONS = True
# Number of seconds before celery tasks will abort addon validation:
VALIDATOR_TIMEOUT = 110
# When True include full tracebacks in JSON. This is useful for QA on preview.
EXPOSE_VALIDATOR_TRACEBACKS = False
# Max number of warnings/errors to show from validator. Set to None for no
# limit.
VALIDATOR_MESSAGE_LIMIT = 500
# Feature flags
UNLINK_SITE_STATS = True
# Set to True if we're allowed to use X-SENDFILE.
XSENDFILE = True
XSENDFILE_HEADER = 'X-SENDFILE'
MOBILE_COOKIE = 'mamo'
# If the users's Firefox has a version number greater than this we consider it
# a beta.
MIN_BETA_VERSION = '3.7'
DEFAULT_SUGGESTED_CONTRIBUTION = 5
# Path to `ps`.
PS_BIN = '/bin/ps'
BLOCKLIST_COOKIE = 'BLOCKLIST_v1'
# The maximum file size that is shown inside the file viewer.
FILE_VIEWER_SIZE_LIMIT = 1048576
# The maximum file size that you can have inside a zip file.
FILE_UNZIP_SIZE_LIMIT = 104857600
# How long to delay tasks relying on file system to cope with NFS lag.
NFS_LAG_DELAY = 3
# A whitelist of domains that the authentication script will redirect to upon
# successfully logging in or out.
VALID_LOGIN_REDIRECTS = {
'builder': 'https://builder.addons.mozilla.org',
'builderstage': 'https://builder-addons.allizom.org',
'buildertrunk': 'https://builder-addons-dev.allizom.org',
}
# Secret key we send to builder so we can trust responses from the builder.
BUILDER_SECRET_KEY = 'love will tear us apart'
# The builder URL we hit to upgrade jetpacks.
BUILDER_UPGRADE_URL = 'https://addons.mozilla.org/services/builder'
BUILDER_VERSIONS_URL = ('https://builder.addons.mozilla.org/repackage/' +
'sdk-versions/')
## elasticsearch
ES_HOSTS = [os.environ.get('ELASTICSEARCH_LOCATION', '127.0.0.1:9200')]
ES_URLS = ['http://%s' % h for h in ES_HOSTS]
ES_INDEXES = {
'default': 'addons',
'stats': 'addons_stats',
}
ES_TIMEOUT = 30
ES_DEFAULT_NUM_REPLICAS = 2
ES_DEFAULT_NUM_SHARDS = 5
ES_USE_PLUGINS = False
# Default AMO user id to use for tasks.
TASK_USER_ID = 4757633
# If this is False, tasks and other jobs that send non-critical emails should
# use a fake email backend.
SEND_REAL_EMAIL = False
STATSD_HOST = 'localhost'
STATSD_PORT = 8125
STATSD_PREFIX = 'amo'
# The django statsd client to use, see django-statsd for more.
STATSD_CLIENT = 'django_statsd.clients.normal'
GRAPHITE_HOST = 'localhost'
GRAPHITE_PORT = 2003
GRAPHITE_PREFIX = 'amo'
GRAPHITE_TIMEOUT = 1
# URL to the service that triggers addon performance tests. See devhub.perf.
PERF_TEST_URL = 'http://areweperftestingyet.com/trigger.cgi'
PERF_TEST_TIMEOUT = 5 # seconds
# IP addresses of servers we use as proxies.
KNOWN_PROXIES = []
# Blog URL
DEVELOPER_BLOG_URL = 'http://blog.mozilla.com/addons/feed/'
LOGIN_RATELIMIT_USER = 5
LOGIN_RATELIMIT_ALL_USERS = '15/m'
CSRF_FAILURE_VIEW = 'amo.views.csrf_failure'
# Testing responsiveness without rate limits.
CELERY_DISABLE_RATE_LIMITS = True
# Super temporary. Or Not.
MARKETPLACE = False
# Default file storage mechanism that holds media.
DEFAULT_FILE_STORAGE = 'amo.utils.LocalFileStorage'
# Defined in the site, this is to allow settings patch to work for tests.
NO_ADDONS_MODULES = ()
# Where to find ffmpeg and totem if it's not in the PATH.
FFMPEG_BINARY = 'ffmpeg'
TOTEM_BINARIES = {'thumbnailer': 'totem-video-thumbnailer',
'indexer': 'totem-video-indexer'}
VIDEO_LIBRARIES = ['lib.video.totem', 'lib.video.ffmpeg']
# This is the signing server for signing fully reviewed files.
SIGNING_SERVER = ''
# This is the signing server for signing preliminary reviewed files.
PRELIMINARY_SIGNING_SERVER = ''
# And how long we'll give the server to respond.
SIGNING_SERVER_TIMEOUT = 10
# True when the Django app is running from the test suite.
IN_TEST_SUITE = False
# The configuration for the client that speaks to solitude.
# A tuple of the solitude hosts.
SOLITUDE_HOSTS = ('',)
# The oAuth key and secret that solitude needs.
SOLITUDE_KEY = ''
SOLITUDE_SECRET = ''
# The timeout we'll give solitude.
SOLITUDE_TIMEOUT = 10
# The OAuth keys to connect to the solitude host specified above.
SOLITUDE_OAUTH = {'key': '', 'secret': ''}
# Temporary flag to work with navigator.mozPay() on devices that don't
# support it natively.
SIMULATE_NAV_PAY = False
# When the dev. agreement gets updated and you need users to re-accept it
# change this date. You won't want to do this for minor format changes.
# The tuple is passed through to datetime.date, so please use a valid date
# tuple. If the value is None, then it will just not be used at all.
DEV_AGREEMENT_LAST_UPDATED = None
# If you want to allow self-reviews for add-ons/apps, then enable this.
# In production we do not want to allow this.
ALLOW_SELF_REVIEWS = False
# Modify the user-agents we check for in django-mobility
# (Android has since changed its user agent).
MOBILE_USER_AGENTS = ('mozilla.+mobile|android|fennec|iemobile|'
'iphone|opera (?:mini|mobi)')
# Credentials for accessing Google Analytics stats.
GOOGLE_ANALYTICS_CREDENTIALS = {}
# Which domain to access GA stats for. If not set, defaults to DOMAIN.
GOOGLE_ANALYTICS_DOMAIN = None
# Used for general web API access.
GOOGLE_API_CREDENTIALS = ''
# Google translate settings.
GOOGLE_TRANSLATE_API_URL = 'https://www.googleapis.com/language/translate/v2'
GOOGLE_TRANSLATE_REDIRECT_URL = (
'https://translate.google.com/#auto/{lang}/{text}')
# Domain to allow cross-frame requests from for privacy policy and TOS.
BROWSERID_DOMAIN = 'login.persona.org'
# Adjust these settings if you need to use a custom verifier.
BROWSERID_VERIFICATION_URL = 'https://verifier.login.persona.org/verify'
BROWSERID_JS_URL = 'https://login.persona.org/include.js'
# The issuer for unverified Persona email addresses.
# We only trust one issuer to grant us unverified emails.
# If UNVERIFIED_ISSUER is set to None, forceIssuer will not
# be sent to the client or the verifier.
NATIVE_BROWSERID_DOMAIN = 'firefoxos.persona.org'
UNVERIFIED_ISSUER = 'firefoxos.persona.org'
# This is a B2G (or other native) verifier. Adjust accordingly.
NATIVE_BROWSERID_VERIFICATION_URL = ('https://%s/verify'
% NATIVE_BROWSERID_DOMAIN)
NATIVE_BROWSERID_JS_URL = ('https://%s/include.js'
% NATIVE_BROWSERID_DOMAIN)
# These domains get `x-frame-options: allow-from` for Privacy Policy / TOS.
LEGAL_XFRAME_ALLOW_FROM = [
BROWSERID_DOMAIN,
UNVERIFIED_ISSUER,
'fxos.login.persona.org',
]
# Language pack fetcher settings
LANGPACK_OWNER_EMAIL = 'addons-team@mozilla.com'
LANGPACK_DOWNLOAD_BASE = 'https://ftp.mozilla.org/pub/mozilla.org/'
LANGPACK_PATH_DEFAULT = '%s/releases/%s/win32/xpi/'
# E.g. https://ftp.mozilla.org/pub/mozilla.org/firefox/releases/23.0/SHA512SUMS
LANGPACK_MANIFEST_PATH = '../../SHA512SUMS'
LANGPACK_MAX_SIZE = 5 * 1024 * 1024 # 5MB should be more than enough
# Basket subscription url for newsletter signups
BASKET_URL = 'https://basket.mozilla.com'
# This saves us when we upgrade jingo-minify (jsocol/jingo-minify@916b054c).
JINGO_MINIFY_USE_STATIC = True
# Monolith settings.
MONOLITH_SERVER = None
MONOLITH_INDEX = 'time_*'
MONOLITH_MAX_DATE_RANGE = 365
# Whitelist IP addresses of the allowed clients that can post email
# through the API.
WHITELISTED_CLIENTS_EMAIL_API = []
# Allow URL style format override. eg. "?format=json"
URL_FORMAT_OVERRIDE = 'format'
# Add on used to collect stats (!technical dept around!)
ADDON_COLLECTOR_ID = 11950
# Connection to the hive server.
HIVE_CONNECTION = {
'host': 'peach-gw.peach.metrics.scl3.mozilla.com',
'port': 10000,
'user': 'amo_prod',
'password': '',
'auth_mechanism': 'PLAIN',
}
# Static
STATIC_ROOT = path('site-static')
STATIC_URL = '/static/'
JINGO_MINIFY_ROOT = path('static')
STATICFILES_DIRS = (
path('static'),
JINGO_MINIFY_ROOT
)
NETAPP_STORAGE = TMP_PATH
|
|
"""Perform streaming post-alignment preparation -- de-duplication and sorting.
Centralizes a pipelined approach to generating sorted, de-duplicated BAM output
from sequencer results.
samblaster: http://arxiv.org/pdf/1403.7486v1.pdf
biobambam bammarkduplicates: http://arxiv.org/abs/1306.0836
"""
import contextlib
import math
import os
import toolz as tz
from bcbio import bam, broad, utils
from bcbio.bam import ref
from bcbio.distributed.transaction import file_transaction, tx_tmpdir
from bcbio.log import logger
from bcbio.pipeline import config_utils
from bcbio.pipeline import datadict as dd
from bcbio.provenance import do
from bcbio.variation import coverage
import six
pysam = utils.LazyImport("pysam")
@contextlib.contextmanager
def tobam_cl(data, out_file, is_paired=False):
"""Prepare command line for producing de-duplicated sorted output.
- If no deduplication, sort and prepare a BAM file.
- If paired, then use samblaster and prepare discordant outputs.
- If unpaired, use biobambam's bammarkduplicates
"""
do_dedup = _check_dedup(data)
umi_consensus = dd.get_umi_consensus(data)
with file_transaction(data, out_file) as tx_out_file:
if not do_dedup:
yield (sam_to_sortbam_cl(data, tx_out_file), tx_out_file)
elif umi_consensus:
yield (_sam_to_grouped_umi_cl(data, umi_consensus, tx_out_file), tx_out_file)
elif is_paired and _need_sr_disc_reads(data) and not _too_many_contigs(dd.get_ref_file(data)):
sr_file = "%s-sr.bam" % os.path.splitext(out_file)[0]
disc_file = "%s-disc.bam" % os.path.splitext(out_file)[0]
with file_transaction(data, sr_file) as tx_sr_file:
with file_transaction(data, disc_file) as tx_disc_file:
yield (samblaster_dedup_sort(data, tx_out_file, tx_sr_file, tx_disc_file),
tx_out_file)
else:
yield (_biobambam_dedup_sort(data, tx_out_file), tx_out_file)
def _too_many_contigs(ref_file):
"""Check for more contigs than the maximum samblaster deduplication supports.
"""
max_contigs = 32768
return len(list(ref.file_contigs(ref_file))) >= max_contigs
def _need_sr_disc_reads(data):
"""Check if we need split and discordant reads in downstream processing.
We use samblaster when needed and otherwise use an approach that does not
extract these reads to be less resource intensive.
"""
from bcbio import structural
return "lumpy" in structural.get_svcallers(data)
def _get_cores_memory(data, downscale=2):
"""Retrieve cores and memory, using samtools as baseline.
For memory, scaling down because we share with alignment and de-duplication.
"""
resources = config_utils.get_resources("samtools", data["config"])
num_cores = data["config"]["algorithm"].get("num_cores", 1)
max_mem = config_utils.adjust_memory(resources.get("memory", "2G"),
downscale, "decrease").upper()
return num_cores, max_mem
def sam_to_sortbam_cl(data, tx_out_file, name_sort=False):
"""Convert to sorted BAM output.
Set name_sort to True to sort reads by queryname
"""
samtools = config_utils.get_program("samtools", data["config"])
cores, mem = _get_cores_memory(data, downscale=2)
tmp_file = "%s-sorttmp" % utils.splitext_plus(tx_out_file)[0]
sort_flag = "-n" if name_sort else ""
return ("{samtools} sort -@ {cores} -m {mem} {sort_flag} "
"-T {tmp_file} -o {tx_out_file} /dev/stdin".format(**locals()))
def samblaster_dedup_sort(data, tx_out_file, tx_sr_file, tx_disc_file):
"""Deduplicate and sort with samblaster, produces split read and discordant pair files.
"""
samblaster = config_utils.get_program("samblaster", data["config"])
samtools = config_utils.get_program("samtools", data["config"])
tmp_prefix = "%s-sorttmp" % utils.splitext_plus(tx_out_file)[0]
tobam_cmd = ("{samtools} sort {sort_opt} -@ {cores} -m {mem} -T {tmp_prefix}-{dext} {out_file} -")
# full BAM -- associate more memory and cores
cores, mem = _get_cores_memory(data, downscale=2)
# Potentially downsample to maximum coverage here if not splitting and whole genome sample
ds_cmd = None if data.get("align_split") else bam.get_maxcov_downsample_cl(data, "samtools")
sort_opt = "-n" if data.get("align_split") and dd.get_mark_duplicates(data) else ""
if ds_cmd:
dedup_cmd = "%s %s > %s" % (tobam_cmd.format(out_file="", dext="full", **locals()), ds_cmd, tx_out_file)
else:
dedup_cmd = tobam_cmd.format(out_file="-o %s" % tx_out_file, dext="full", **locals())
# split and discordant BAMs -- give less memory/cores since smaller files
sort_opt = ""
cores, mem = _get_cores_memory(data, downscale=4)
splitter_cmd = tobam_cmd.format(out_file="-o %s" % tx_sr_file, dext="spl", **locals())
discordant_cmd = tobam_cmd.format(out_file="-o %s" % tx_disc_file, dext="disc", **locals())
# samblaster 0.1.22 and better require the -M flag for compatibility with bwa-mem
cmd = ("{samblaster} --addMateTags -M --splitterFile >({splitter_cmd}) --discordantFile >({discordant_cmd}) "
"| {dedup_cmd}")
return cmd.format(**locals())
def _biobambam_dedup_sort(data, tx_out_file):
"""Perform streaming deduplication and sorting with biobambam's bamsormadup
"""
samtools = config_utils.get_program("samtools", data["config"])
cores, mem = _get_cores_memory(data, downscale=2)
tmp_file = "%s-sorttmp" % utils.splitext_plus(tx_out_file)[0]
if data.get("align_split"):
sort_opt = "-n" if data.get("align_split") and _check_dedup(data) else ""
cmd = "{samtools} sort %s -@ {cores} -m {mem} -O bam -T {tmp_file}-namesort -o {tx_out_file} -" % sort_opt
else:
# scale core usage to avoid memory issues with larger WGS samples
cores = max(1, int(math.ceil(cores * 0.75)))
ds_cmd = bam.get_maxcov_downsample_cl(data, "bamsormadup")
bamsormadup = config_utils.get_program("bamsormadup", data)
cmd = ("{bamsormadup} inputformat=sam threads={cores} tmpfile={tmp_file}-markdup "
"SO=coordinate %s > {tx_out_file}" % ds_cmd)
return cmd.format(**locals())
def _sam_to_grouped_umi_cl(data, umi_consensus, tx_out_file):
"""Mark duplicates on aligner output and convert to grouped UMIs by position.
Works with either a separate umi_file or UMI embedded in the read names.
"""
tmp_file = "%s-sorttmp" % utils.splitext_plus(tx_out_file)[0]
jvm_opts = _get_fgbio_jvm_opts(data, os.path.dirname(tmp_file), 1)
cores, mem = _get_cores_memory(data)
bamsormadup = config_utils.get_program("bamsormadup", data)
cmd = ("{bamsormadup} tmpfile={tmp_file}-markdup inputformat=sam threads={cores} outputformat=bam "
"level=0 SO=coordinate | ")
# UMIs in a separate file
if os.path.exists(umi_consensus) and os.path.isfile(umi_consensus):
cmd += "fgbio {jvm_opts} AnnotateBamWithUmis -i /dev/stdin -f {umi_consensus} -o {tx_out_file}"
# UMIs embedded in read name
else:
cmd += ("%s %s bamtag - | samtools view -b > {tx_out_file}" %
(utils.get_program_python("umis"),
config_utils.get_program("umis", data["config"])))
return cmd.format(**locals())
def _get_fgbio_jvm_opts(data, tmpdir, scale_factor=None):
cores, mem = _get_cores_memory(data)
resources = config_utils.get_resources("fgbio", data["config"])
jvm_opts = resources.get("jvm_opts", ["-Xms750m", "-Xmx4g"])
if scale_factor and cores > scale_factor:
jvm_opts = config_utils.adjust_opts(jvm_opts, {"algorithm": {"memory_adjust":
{"direction": "increase",
"magnitude": cores // scale_factor}}})
jvm_opts += broad.get_default_jvm_opts()
jvm_opts = " ".join(jvm_opts)
return jvm_opts + " --tmp-dir %s" % tmpdir
def _estimate_fgbio_defaults(avg_coverage):
"""Provide fgbio defaults based on input sequence depth and coverage.
For higher depth/duplication we want to use `--min-reads` to allow
consensus calling in the duplicates:
https://fulcrumgenomics.github.io/fgbio/tools/latest/CallMolecularConsensusReads.html
If duplicated adjusted depth leaves a coverage of 800x or higher
(giving us ~4 reads at 0.5% detection frequency),
then we use `--min-reads 2`, otherwise `--min-reads 1`
"""
out = {}
if avg_coverage >= 800:
out["--min-reads"] = 2
else:
out["--min-reads"] = 1
return out
def correct_umis(data):
"""Correct umis against the whitelist in correct_umi_file
http://fulcrumgenomics.github.io/fgbio/tools/latest/CorrectUmis.html
"""
input_bam = dd.get_work_bam(data)
output_bam = os.path.join(utils.safe_makedir(os.path.join(os.getcwd(),
"align", dd.get_sample_name(data))),
"%s-umis_corrected%s" % utils.splitext_plus(os.path.basename(input_bam)))
jvm_opts = _get_fgbio_jvm_opts(data, os.path.dirname(output_bam), 2)
# Improve speeds by avoiding compression read/write bottlenecks
io_opts = "--async-io=true --compression=0"
umis_whitelist = tz.get_in(["config", "algorithm", "correct_umis"], data)
fgbio = config_utils.get_program("fgbio", data["config"])
samtools = config_utils.get_program("samtools", data["config"])
if not utils.file_exists(output_bam):
umi_method, umi_tag = _check_umi_type(input_bam)
cmd = ("unset JAVA_HOME && "
"{fgbio} {jvm_opts} {io_opts} CorrectUmis "
"-t {umi_tag} -m 3 -d 1 -x "
"-U {umis_whitelist} "
"-i {input_bam} -o /dev/stdout | {samtools} view -bh > {output_bam}")
do.run(cmd.format(**locals()), "Correcting UMIs")
bam.index(output_bam, data["config"])
return output_bam
def umi_consensus(data):
"""Convert UMI grouped reads into fastq pair for re-alignment.
"""
align_bam = dd.get_work_bam(data)
if dd.get_umi_type(data) == "dragen":
umi_method = "adjacency"
umi_tag = "RX"
else:
umi_method, umi_tag = _check_umi_type(align_bam)
base_name = utils.splitext_plus(align_bam)[0]
f1_out = f"{base_name}-cumi-1.fq.gz"
f2_out = f"{base_name}-cumi-2.fq.gz"
f_family_size_histogram = f"{base_name}.family_size_histogram.tsv"
avg_coverage = coverage.get_average_coverage("rawumi", dd.get_variant_regions(data), data)
fgbio = config_utils.get_program("fgbio", data["config"])
bamtofastq = config_utils.get_program("bamtofastq", data["config"])
if not utils.file_uptodate(f1_out, align_bam):
with file_transaction(data, f1_out, f2_out, f_family_size_histogram) as (tx_f1_out, tx_f2_out, tx_fhist_out):
jvm_opts = _get_fgbio_jvm_opts(data, os.path.dirname(tx_f1_out), 2)
# Improve speeds by avoiding compression read/write bottlenecks
io_opts = "--async-io=true --compression=0"
est_options = _estimate_fgbio_defaults(avg_coverage)
group_opts, cons_opts, filter_opts = _get_fgbio_options(data, est_options, umi_method)
cons_method = "CallDuplexConsensusReads" if umi_method == "paired" else "CallMolecularConsensusReads"
tempfile = "%s-bamtofastq-tmp" % utils.splitext_plus(f1_out)[0]
ref_file = dd.get_ref_file(data)
cmd = ("unset JAVA_HOME && "
"{fgbio} {jvm_opts} {io_opts} GroupReadsByUmi {group_opts} -t {umi_tag} -s {umi_method} "
"-i {align_bam} -f {tx_fhist_out} | "
"{fgbio} {jvm_opts} {io_opts} {cons_method} {cons_opts} --sort-order=:none: "
"-i /dev/stdin -o /dev/stdout | "
"{fgbio} {jvm_opts} {io_opts} FilterConsensusReads {filter_opts} -r {ref_file} "
"-i /dev/stdin -o /dev/stdout | "
"{bamtofastq} collate=1 T={tempfile} F={tx_f1_out} F2={tx_f2_out} tags=cD,cM,cE gz=1")
do.run(cmd.format(**locals()), "UMI consensus fastq generation")
return f1_out, f2_out, avg_coverage
def _check_umi_type(bam_file):
"""Determine the type of UMI from BAM tags: standard or paired.
"""
with pysam.Samfile(bam_file, "rb") as in_bam:
for read in in_bam:
cur_umi = None
for tag in ["RX", "XC"]:
try:
cur_umi = read.get_tag(tag)
break
except KeyError:
pass
if cur_umi:
if "-" in cur_umi and len(cur_umi.split("-")) == 2:
return "paired", tag
else:
return "adjacency", tag
def _get_fgbio_options(data, estimated_defaults, umi_method):
"""Get adjustable, through resources, or default options for fgbio.
"""
group_opts = ["--edits", "--min-map-q"]
cons_opts = ["--min-input-base-quality"]
if umi_method != "paired":
cons_opts += ["--min-reads", "--max-reads"]
filter_opts = ["--min-reads", "--min-base-quality", "--max-base-error-rate"]
defaults = {"--min-reads": "1",
"--max-reads": "100000",
"--min-map-q": "1",
"--min-base-quality": "13",
"--max-base-error-rate": "0.1",
"--min-input-base-quality": "2",
"--edits": "1"}
defaults.update(estimated_defaults)
ropts = config_utils.get_resources("fgbio", data["config"]).get("options", [])
assert len(ropts) % 2 == 0, "Expect even number of options for fgbio" % ropts
ropts = dict(tz.partition(2, ropts))
# Back compatibility for older base quality settings
if "--min-consensus-base-quality" in ropts:
ropts["--min-base-quality"] = ropts.pop("--min-consensus-base-quality")
defaults.update(ropts)
group_out = " ".join(["%s=%s" % (x, defaults[x]) for x in group_opts])
cons_out = " ".join(["%s=%s" % (x, defaults[x]) for x in cons_opts])
filter_out = " ".join(["%s=%s" % (x, defaults[x]) for x in filter_opts])
if umi_method != "paired":
cons_out += " --output-per-base-tags=false"
return group_out, cons_out, filter_out
def _check_dedup(data):
"""Check configuration for de-duplication.
Defaults to no de-duplication for RNA-seq and small RNA, the
back compatible default. Allow overwriting with explicit
`mark_duplicates: true` setting.
Also defaults to false for no alignment inputs.
"""
if dd.get_analysis(data).lower() in ["rna-seq", "smallrna-seq"] or not dd.get_aligner(data):
dup_param = utils.get_in(data, ("config", "algorithm", "mark_duplicates"), False)
else:
dup_param = utils.get_in(data, ("config", "algorithm", "mark_duplicates"), True)
if dup_param and isinstance(dup_param, six.string_types):
logger.info("Warning: bcbio no longer support explicit setting of mark_duplicate algorithm. "
"Using best-practice choice based on input data.")
dup_param = True
return dup_param
def dedup_bam(in_bam, data):
"""Perform non-stream based duplicate marking of BAM input files using biobambam.
"""
if _check_dedup(data):
out_file = os.path.join(utils.safe_makedir(os.path.join(os.getcwd(), "align", dd.get_sample_name(data))),
"%s-dedup%s" % utils.splitext_plus(os.path.basename(in_bam)))
if not utils.file_exists(out_file):
with tx_tmpdir(data) as tmpdir:
with file_transaction(data, out_file) as tx_out_file:
bammarkduplicates = config_utils.get_program("bammarkduplicates", data["config"])
base_tmp = os.path.join(tmpdir, os.path.splitext(os.path.basename(tx_out_file))[0])
cores, mem = _get_cores_memory(data, downscale=2)
cmd = ("{bammarkduplicates} tmpfile={base_tmp}-markdup "
"markthreads={cores} I={in_bam} O={tx_out_file}")
do.run(cmd.format(**locals()), f"Mark duplication of {in_bam} with biobambam.")
bam.index(out_file, data["config"])
return out_file
else:
return in_bam
|
|
""" Provider that returns vector representation of features in a data source.
This is a provider that does not return an image, but rather queries
a data source for raw features and replies with a vector representation
such as GeoJSON. For example, it's possible to retrieve data for
locations of OpenStreetMap points of interest or street centerlines
contained within a tile's boundary.
Many Polymaps (http://polymaps.org) examples use GeoJSON vector data tiles,
which can be effectively created using this provider.
Vector functionality is provided by OGR (http://www.gdal.org/ogr/).
Thank you, Frank Warmerdam.
Currently two serializations and three encodings are supported for a total
of six possible kinds of output with these tile name extensions:
GeoJSON (.geojson):
See http://geojson.org/geojson-spec.html
Arc GeoServices JSON (.arcjson):
See http://www.esri.com/library/whitepapers/pdfs/geoservices-rest-spec.pdf
GeoBSON (.geobson) and Arc GeoServices BSON (.arcbson):
BSON-encoded GeoJSON and Arc JSON, see http://bsonspec.org/#/specification
GeoAMF (.geoamf) and Arc GeoServices AMF (.arcamf):
AMF0-encoded GeoJSON and Arc JSON, see:
http://opensource.adobe.com/wiki/download/attachments/1114283/amf0_spec_121207.pdf
Possible future supported formats might include KML and others. Get in touch
via Github to suggest other formats: http://github.com/migurski/TileStache.
Common parameters:
driver:
String used to identify an OGR driver. Currently, "ESRI Shapefile",
"PostgreSQL", "MySQL", Oracle, Spatialite and "GeoJSON" are supported as
data source drivers, with "postgis" and "shapefile" accepted as synonyms.
Not case-sensitive.
OGR's complete list of potential formats can be found here:
http://www.gdal.org/ogr/ogr_formats.html. Feel free to get in touch via
Github to suggest new formats: http://github.com/migurski/TileStache.
parameters:
Dictionary of parameters for each driver.
PostgreSQL:
"dbname" parameter is required, with name of database.
"host", "user", and "password" are optional connection parameters.
One of "table" or "query" is required, with a table name in the first
case and a complete SQL query in the second.
Shapefile and GeoJSON:
"file" parameter is required, with filesystem path to data file.
properties:
Optional list or dictionary of case-sensitive output property names.
If omitted, all fields from the data source will be included in response.
If a list, treated as a whitelist of field names to include in response.
If a dictionary, treated as a whitelist and re-mapping of field names.
clipped:
Default is true.
Boolean flag for optionally clipping the output geometries to the
bounds of the enclosing tile, or the string value "padded" for clipping
to the bounds of the tile plus 5%. This results in incomplete geometries,
dramatically smaller file sizes, and improves performance and
compatibility with Polymaps (http://polymaps.org).
projected:
Default is false.
Boolean flag for optionally returning geometries in projected rather than
geographic coordinates. Typically this means EPSG:900913 a.k.a. spherical
mercator projection. Stylistically a poor fit for GeoJSON, but useful
when returning Arc GeoServices responses.
precision:
Default is 6.
Optional number of decimal places to use for floating point values.
spacing:
Optional number of tile pixels for spacing geometries in responses. Used
to cut down on the number of returned features by ensuring that only those
features at least this many pixels apart are returned. Order of features
in the data source matters: early features beat out later features.
verbose:
Default is false.
Boolean flag for optionally expanding output with additional whitespace
for readability. Results in larger but more readable GeoJSON responses.
id_property:
Default is None.
Sets the id of the geojson feature to the specified field of the data source.
This can be used, for example, to identify a unique key field for the feature.
Example TileStache provider configuration:
"vector-postgis-points":
{
"provider": {"name": "vector", "driver": "PostgreSQL",
"parameters": {"dbname": "geodata", "user": "geodata",
"table": "planet_osm_point"}}
}
"vector-postgis-lines":
{
"provider": {"name": "vector", "driver": "postgis",
"parameters": {"dbname": "geodata", "user": "geodata",
"table": "planet_osm_line"}}
}
"vector-shapefile-points":
{
"provider": {"name": "vector", "driver": "ESRI Shapefile",
"parameters": {"file": "oakland-uptown-point.shp"},
"properties": ["NAME", "HIGHWAY"]}
}
"vector-shapefile-lines":
{
"provider": {"name": "vector", "driver": "shapefile",
"parameters": {"file": "oakland-uptown-line.shp"},
"properties": {"NAME": "name", "HIGHWAY": "highway"}}
}
"vector-postgis-query":
{
"provider": {"name": "vector", "driver": "PostgreSQL",
"parameters": {"dbname": "geodata", "user": "geodata",
"query": "SELECT osm_id, name, highway, way FROM planet_osm_line WHERE SUBSTR(name, 1, 1) = '1'"}}
}
"vector-sf-streets":
{
"provider": {"name": "vector", "driver": "GeoJSON",
"parameters": {"file": "stclines.json"},
"properties": ["STREETNAME"]}
}
Caveats:
Your data source must have a valid defined projection, or OGR will not know
how to correctly filter and reproject it. Although response tiles are typically
in web (spherical) mercator projection, the actual vector content of responses
is unprojected back to plain WGS84 latitude and longitude.
If you are using PostGIS and spherical mercator a.k.a. SRID 900913,
you can save yourself a world of trouble by using this definition:
http://github.com/straup/postgis-tools/raw/master/spatial_ref_900913-8.3.sql
"""
from re import compile
from urlparse import urlparse, urljoin
try:
from json import JSONEncoder, loads as json_loads
except ImportError:
from simplejson import JSONEncoder, loads as json_loads
try:
from osgeo import ogr, osr
except ImportError:
# At least we'll be able to build the documentation.
pass
from TileStache.Core import KnownUnknown
from TileStache.Geography import getProjectionByName
from Arc import reserialize_to_arc, pyamf_classes
class VectorResponse:
""" Wrapper class for Vector response that makes it behave like a PIL.Image object.
TileStache.getTile() expects to be able to save one of these to a buffer.
Constructor arguments:
- content: Vector data to be serialized, typically a dictionary.
- verbose: Boolean flag to expand response for better legibility.
"""
def __init__(self, content, verbose, precision=6):
self.content = content
self.verbose = verbose
self.precision = precision
def save(self, out, format):
"""
"""
#
# Serialize
#
if format == 'WKT':
if 'wkt' in self.content['crs']:
out.write(self.content['crs']['wkt'])
else:
out.write(_sref_4326().ExportToWkt())
return
if format in ('GeoJSON', 'GeoBSON', 'GeoAMF'):
content = self.content
if 'wkt' in content['crs']:
content['crs'] = {'type': 'link', 'properties': {'href': '0.wkt', 'type': 'ogcwkt'}}
else:
del content['crs']
elif format in ('ArcJSON', 'ArcBSON', 'ArcAMF'):
content = reserialize_to_arc(self.content, format == 'ArcAMF')
else:
raise KnownUnknown('Vector response only saves .geojson, .arcjson, .geobson, .arcbson, .geoamf, .arcamf and .wkt tiles, not "%s"' % format)
#
# Encode
#
if format in ('GeoJSON', 'ArcJSON'):
indent = self.verbose and 2 or None
encoded = JSONEncoder(indent=indent).iterencode(content)
float_pat = compile(r'^-?\d+\.\d+$')
for atom in encoded:
if float_pat.match(atom):
out.write(('%%.%if' % self.precision) % float(atom))
else:
out.write(atom)
elif format in ('GeoBSON', 'ArcBSON'):
import bson
encoded = bson.dumps(content)
out.write(encoded)
elif format in ('GeoAMF', 'ArcAMF'):
import pyamf
for class_name in pyamf_classes.items():
pyamf.register_class(*class_name)
encoded = pyamf.encode(content, 0).read()
out.write(encoded)
def _sref_4326():
"""
"""
sref = osr.SpatialReference()
proj = getProjectionByName('WGS84')
sref.ImportFromProj4(proj.srs)
return sref
def _tile_perimeter(coord, projection, padded):
""" Get a tile's outer edge for a coordinate and a projection.
Returns a list of 17 (x, y) coordinates corresponding to a clockwise
circumambulation of a tile boundary in a given projection. Projection
is like those found in TileStache.Geography, used for tile output.
If padded argument is True, pad bbox by 5% on all sides.
"""
if padded:
ul = projection.coordinateProj(coord.left(0.05).up(0.05))
lr = projection.coordinateProj(coord.down(1.05).right(1.05))
else:
ul = projection.coordinateProj(coord)
lr = projection.coordinateProj(coord.right().down())
xmin, ymin, xmax, ymax = ul.x, ul.y, lr.x, lr.y
xspan, yspan = xmax - xmin, ymax - ymin
perimeter = [
(xmin, ymin),
(xmin + 1 * xspan/4, ymin),
(xmin + 2 * xspan/4, ymin),
(xmin + 3 * xspan/4, ymin),
(xmax, ymin),
(xmax, ymin + 1 * yspan/4),
(xmax, ymin + 2 * yspan/4),
(xmax, ymin + 3 * yspan/4),
(xmax, ymax),
(xmax - 1 * xspan/4, ymax),
(xmax - 2 * xspan/4, ymax),
(xmax - 3 * xspan/4, ymax),
(xmin, ymax),
(xmin, ymax - 1 * yspan/4),
(xmin, ymax - 2 * yspan/4),
(xmin, ymax - 3 * yspan/4),
(xmin, ymin)
]
return perimeter
def _tile_perimeter_width(coord, projection):
""" Get the width in projected coordinates of the coordinate tile polygon.
Uses _tile_perimeter().
"""
perimeter = _tile_perimeter(coord, projection, False)
return perimeter[8][0] - perimeter[0][0]
def _tile_perimeter_geom(coord, projection, padded):
""" Get an OGR Geometry object for a coordinate tile polygon.
Uses _tile_perimeter().
"""
perimeter = _tile_perimeter(coord, projection, padded)
wkt = 'POLYGON((%s))' % ', '.join(['%.3f %.3f' % xy for xy in perimeter])
geom = ogr.CreateGeometryFromWkt(wkt)
ref = osr.SpatialReference()
ref.ImportFromProj4(projection.srs)
geom.AssignSpatialReference(ref)
return geom
def _feature_properties(feature, layer_definition, whitelist=None):
""" Returns a dictionary of feature properties for a feature in a layer.
Third argument is an optional list or dictionary of properties to
whitelist by case-sensitive name - leave it None to include everything.
A dictionary will cause property names to be re-mapped.
OGR property types:
OFTInteger (0), OFTIntegerList (1), OFTReal (2), OFTRealList (3),
OFTString (4), OFTStringList (5), OFTWideString (6), OFTWideStringList (7),
OFTBinary (8), OFTDate (9), OFTTime (10), OFTDateTime (11).
"""
properties = {}
okay_types = ogr.OFTInteger, ogr.OFTReal, ogr.OFTString, ogr.OFTWideString
for index in range(layer_definition.GetFieldCount()):
field_definition = layer_definition.GetFieldDefn(index)
field_type = field_definition.GetType()
if field_type not in okay_types:
try:
name = [oft for oft in dir(ogr) if oft.startswith('OFT') and getattr(ogr, oft) == field_type][0]
except IndexError:
raise KnownUnknown("Found an OGR field type I've never even seen: %d" % field_type)
else:
raise KnownUnknown("Found an OGR field type I don't know what to do with: ogr.%s" % name)
name = field_definition.GetNameRef()
if type(whitelist) in (list, dict) and name not in whitelist:
continue
property = type(whitelist) is dict and whitelist[name] or name
properties[property] = feature.GetField(name)
return properties
def _append_with_delim(s, delim, data, key):
if key in data:
return s + delim + str(data[key])
else:
return s
def _open_layer(driver_name, parameters, dirpath):
""" Open a layer, return it and its datasource.
Dirpath comes from configuration, and is used to locate files.
"""
#
# Set up the driver
#
okay_drivers = {'postgis': 'PostgreSQL', 'esri shapefile': 'ESRI Shapefile',
'postgresql': 'PostgreSQL', 'shapefile': 'ESRI Shapefile',
'geojson': 'GeoJSON', 'spatialite': 'SQLite', 'oracle': 'OCI', 'mysql': 'MySQL'}
if driver_name.lower() not in okay_drivers:
raise KnownUnknown('Got a driver type Vector doesn\'t understand: "%s". Need one of %s.' % (driver_name, ', '.join(okay_drivers.keys())))
driver_name = okay_drivers[driver_name.lower()]
driver = ogr.GetDriverByName(str(driver_name))
#
# Set up the datasource
#
if driver_name == 'PostgreSQL':
if 'dbname' not in parameters:
raise KnownUnknown('Need at least a "dbname" parameter for postgis')
conn_parts = []
for part in ('dbname', 'user', 'host', 'password', 'port'):
if part in parameters:
conn_parts.append("%s='%s'" % (part, parameters[part]))
source_name = 'PG:' + ' '.join(conn_parts)
elif driver_name == 'MySQL':
if 'dbname' not in parameters:
raise KnownUnknown('Need a "dbname" parameter for MySQL')
if 'table' not in parameters:
raise KnownUnknown('Need a "table" parameter for MySQL')
conn_parts = []
for part in ('host', 'port', 'user', 'password'):
if part in parameters:
conn_parts.append("%s=%s" % (part, parameters[part]))
source_name = 'MySql:' + parameters["dbname"] + "," + ','.join(conn_parts) + ",tables=" + parameters['table']
elif driver_name == 'OCI':
if 'host' not in parameters:
raise KnownUnknown('Need a "host" parameter for oracle')
if 'table' not in parameters:
raise KnownUnknown('Need a "table" parameter for oracle')
source_name = 'OCI:'
source_name = _append_with_delim(source_name, '', parameters, 'user')
source_name = _append_with_delim(source_name, '/', parameters, 'password')
if 'user' in parameters:
source_name = source_name + '@'
source_name = source_name + parameters['host']
source_name = _append_with_delim(source_name, ':', parameters, 'port')
source_name = _append_with_delim(source_name, '/', parameters, 'dbname')
source_name = source_name + ":" + parameters['table']
elif driver_name in ('ESRI Shapefile', 'GeoJSON', 'SQLite'):
if 'file' not in parameters:
raise KnownUnknown('Need at least a "file" parameter for a shapefile')
file_href = urljoin(dirpath, parameters['file'])
scheme, h, file_path, q, p, f = urlparse(file_href)
if scheme not in ('file', ''):
raise KnownUnknown('Shapefiles need to be local, not %s' % file_href)
source_name = file_path
datasource = driver.Open(str(source_name))
if datasource is None:
raise KnownUnknown('Couldn\'t open datasource %s' % source_name)
#
# Set up the layer
#
if driver_name == 'PostgreSQL' or driver_name == 'OCI' or driver_name == 'MySQL':
if 'query' in parameters:
layer = datasource.ExecuteSQL(str(parameters['query']))
elif 'table' in parameters:
layer = datasource.GetLayerByName(str(parameters['table']))
else:
raise KnownUnknown('Need at least a "query" or "table" parameter for postgis or oracle')
elif driver_name == 'SQLite':
layer = datasource.GetLayerByName(str(parameters['layer']))
else:
layer = datasource.GetLayer(0)
if layer.GetSpatialRef() is None and driver_name != 'SQLite':
raise KnownUnknown('Couldn\'t get a layer from data source %s' % source_name)
#
# Return the layer and the datasource.
#
# Technically, the datasource is no longer needed
# but layer segfaults when it falls out of scope.
#
return layer, datasource
def _get_features(coord, properties, projection, layer, clipped, projected, spacing, id_property):
""" Return a list of features in an OGR layer with properties in GeoJSON form.
Optionally clip features to coordinate bounding box, and optionally
limit returned features to only those separated by number of pixels
given as spacing.
"""
#
# Prepare output spatial reference - always WGS84.
#
if projected:
output_sref = osr.SpatialReference()
output_sref.ImportFromProj4(projection.srs)
else:
output_sref = _sref_4326()
#
# Load layer information
#
definition = layer.GetLayerDefn()
layer_sref = layer.GetSpatialRef()
if layer_sref == None:
layer_sref = _sref_4326()
#
# Spatially filter the layer
#
bbox = _tile_perimeter_geom(coord, projection, clipped == 'padded')
bbox.TransformTo(layer_sref)
layer.SetSpatialFilter(bbox)
features = []
mask = None
if spacing is not None:
buffer = spacing * _tile_perimeter_width(coord, projection) / 256.
for feature in layer:
geometry = feature.geometry().Clone()
if not geometry.Intersect(bbox):
continue
if mask and geometry.Intersect(mask):
continue
if clipped:
geometry = geometry.Intersection(bbox)
if geometry is None:
# may indicate a TopologyException
continue
# mask out subsequent features if spacing is defined
if mask and buffer:
mask = geometry.Buffer(buffer, 2).Union(mask)
elif spacing is not None:
mask = geometry.Buffer(buffer, 2)
geometry.AssignSpatialReference(layer_sref)
geometry.TransformTo(output_sref)
geom = json_loads(geometry.ExportToJson())
prop = _feature_properties(feature, definition, properties)
geojson_feature = {'type': 'Feature', 'properties': prop, 'geometry': geom}
if id_property != None and id_property in prop:
geojson_feature['id'] = prop[id_property]
features.append(geojson_feature)
return features
class Provider:
""" Vector Provider for OGR datasources.
See module documentation for explanation of constructor arguments.
"""
def __init__(self, layer, driver, parameters, clipped, verbose, projected, spacing, properties, precision, id_property):
self.layer = layer
self.driver = driver
self.clipped = clipped
self.verbose = verbose
self.projected = projected
self.spacing = spacing
self.parameters = parameters
self.properties = properties
self.precision = precision
self.id_property = id_property
def renderTile(self, width, height, srs, coord):
""" Render a single tile, return a VectorResponse instance.
"""
layer, ds = _open_layer(self.driver, self.parameters, self.layer.config.dirpath)
features = _get_features(coord, self.properties, self.layer.projection, layer, self.clipped, self.projected, self.spacing, self.id_property)
response = {'type': 'FeatureCollection', 'features': features}
if self.projected:
sref = osr.SpatialReference()
sref.ImportFromProj4(self.layer.projection.srs)
response['crs'] = {'wkt': sref.ExportToWkt()}
if srs == getProjectionByName('spherical mercator').srs:
response['crs']['wkid'] = 102113
else:
response['crs'] = {'srid': 4326, 'wkid': 4326}
return VectorResponse(response, self.verbose, self.precision)
def getTypeByExtension(self, extension):
""" Get mime-type and format by file extension.
This only accepts "geojson" for the time being.
"""
if extension.lower() == 'geojson':
return 'text/json', 'GeoJSON'
elif extension.lower() == 'arcjson':
return 'text/json', 'ArcJSON'
elif extension.lower() == 'geobson':
return 'application/x-bson', 'GeoBSON'
elif extension.lower() == 'arcbson':
return 'application/x-bson', 'ArcBSON'
elif extension.lower() == 'geoamf':
return 'application/x-amf', 'GeoAMF'
elif extension.lower() == 'arcamf':
return 'application/x-amf', 'ArcAMF'
elif extension.lower() == 'wkt':
return 'text/x-wkt', 'WKT'
raise KnownUnknown('Vector Provider only makes .geojson, .arcjson, .geobson, .arcbson, .geoamf, .arcamf and .wkt tiles, not "%s"' % extension)
|
|
import numpy as np
from scipy.linalg import lstsq
from scipy.optimize import leastsq
from coordinateSystems import GeographicSystem
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
def travel_time(X, X_ctr, c, t0=0.0, get_r=False):
""" Units are meters, seconds.
X is a (N,3) vector of source locations, and X_ctr is (N,3) receiver
locations.
t0 may be used to specify a fixed amount of additonal time to be
added to the travel time.
If get_r is True, return the travel time *and* calculated range.
Otherwise
just return the travel time.
"""
ranges = (np.sum((X[:, np.newaxis] - X_ctr)**2, axis=2)**0.5).T
time = t0+ranges/c
if get_r == True:
return time, ranges
if get_r == False:
return time
def received_power(power_emit, r, wavelength=4.762, recv_gain=1.0):
""" Calculate the free space loss. Defaults are for no receive gain and
a 63 MHz wave.
Units: watts, meters
"""
four_pi_r = 4.0 * np.pi * r
free_space_loss = (wavelength*wavelength) / (four_pi_r * four_pi_r)
return recv_gain * power_emit * free_space_loss
def precalc_station_terms(stations_ECEF):
""" Given a (N_stn, 3) array of station locations,
return dxvec and drsq which are (N, N, 3) and (N, N),
respectively, where the first dimension is the solution
for a different master station
"""
dxvec = stations_ECEF-stations_ECEF[:, np.newaxis]
r1 = (np.sum(stations_ECEF**2, axis=1))
drsq = r1-r1[:, np.newaxis]
return dxvec, drsq
def linear_first_guess(t_i, dxvec, drsq, c=3.0e8):
""" Given a vector of (N_stn) arrival times,
calcluate a first-guess solution for the source location.
Return f=(x, y, z, t), the source retrieval locations.
"""
g = 0.5*(drsq - (c**2)*(t_i**2))
K = np.vstack((dxvec.T, -c*t_i)).T
f, residuals, rank, singular = lstsq(K, g)
return f
def predict(p, stations_ECEF2, c=3.0e8):
""" Predict arrival times for a particular fit of x,y,z,t source
locations. p are the parameters to retrieve
"""
return (p[3] + ((np.sum((p[:3] - stations_ECEF2) *
(p[:3] - stations_ECEF2), axis=1))**0.5)) / c
def residuals(p, t_i, stations_ECEF2):
return t_i - predict(p, stations_ECEF2)
def dfunc(p, t_i, stations_ECEF2, c=3.0e8):
return -np.vstack(((p[:3] - stations_ECEF2).T / (c *
(np.sum((p[:3] - stations_ECEF2) *
(p[:3] - stations_ECEF2), axis=1))**0.5),
np.array([1./c]*np.shape(stations_ECEF2)[0])))
def gen_retrieval_math(i, selection, t_all, t_mins, dxvec, drsq, center_ECEF,
stations_ECEF, dt_rms, min_stations=5,
max_z_guess=25.0e3):
""" t_all is a N_stations x N_points masked array of arrival times at
each station.
t_min is an N-point array of the index of the first unmasked station
to receive a signal
center_ECEF for the altitude check
This streamlines the generator function, which emits a stream of
nonlinear least-squares solutions.
"""
m = t_mins[i]
stations_ECEF2=stations_ECEF[selection]
# Make a linear first guess
p0 = linear_first_guess(np.array(t_all[:,i][selection]-t_all[m,i]),
dxvec[m][selection],
drsq[m][selection])
t_i =t_all[:,i][selection]-t_all[m,i]
# Checking altitude in lat/lon/alt from local coordinates
latlon = np.array(GeographicSystem().fromECEF(p0[0], p0[1],p0[2]))
if (latlon[2]<0) | (latlon[2]>25000):
latlon[2] = 7000
new = GeographicSystem().toECEF(latlon[0], latlon[1], latlon[2])
p0[:3]=np.array(new)
plsq = np.array([np.nan]*5)
plsq[:4], cov, infodict, mesg,ier = leastsq(residuals, p0,
args=(t_i, stations_ECEF2),
Dfun=dfunc,col_deriv=1,full_output=True)
plsq[4] = np.sum(infodict['fvec']*infodict['fvec'])/(
dt_rms*dt_rms*(float(np.shape(stations_ECEF2)[0]-4)))
return plsq
def gen_retrieval(t_all, t_mins, dxvec, drsq, center_ECEF, stations_ECEF,
dt_rms, min_stations=5, max_z_guess=25.0e3):
""" t_all is a N_stations x N_points masked array of arrival times at
each station.
t_min is an N-point array of the index of the first unmasked station
to receive a signal
center_ECEF for the altitude check
This is a generator function, which emits a stream of nonlinear
least-squares solutions.
"""
for i in range(t_all.shape[1]):
selection=~np.ma.getmask(t_all[:,i])
if np.all(selection == True):
selection = np.array([True]*len(t_all[:,i]))
yield gen_retrieval_math(i, selection, t_all, t_mins, dxvec, drsq,
center_ECEF, stations_ECEF, dt_rms,
min_stations, max_z_guess=25.0e3)
elif np.sum(selection)>=min_stations:
yield gen_retrieval_math(i, selection, t_all, t_mins, dxvec, drsq,
center_ECEF, stations_ECEF, dt_rms,
min_stations, max_z_guess=25.0e3)
else:
yield np.array([np.nan]*5)
def gen_retrieval_full(t_all, t_mins, dxvec, drsq, center_ECEF, stations_ECEF,
dt_rms, c0, min_stations=5, max_z_guess=25.0e3):
""" t_all is a N_stations x N_points masked array of arrival times at
each station.
t_min is an N-point array of the index of the first unmasked station
to receive a signal
center_ECEF for the altitude check
This is a generator function, which emits a stream of nonlinear
least-squares solutions.
Timing comes out of least-squares function as t*c from the initial
station
"""
for i in range(t_all.shape[1]):
selection=~np.ma.getmask(t_all[:,i])
plsq = np.array([np.nan]*7)
if np.all(selection == True):
selection = np.array([True]*len(t_all[:,i]))
plsq[:5] = gen_retrieval_math(i, selection, t_all, t_mins, dxvec,
drsq, center_ECEF, stations_ECEF, dt_rms,
min_stations, max_z_guess=25.0e3)
plsq[5] = plsq[3]/c0 + t_all[t_mins[i],i]
plsq[6] = np.shape(stations_ECEF[selection])[0]
yield plsq
elif np.sum(selection)>=min_stations:
plsq[:5] = gen_retrieval_math(i, selection, t_all, t_mins, dxvec,
drsq, center_ECEF, stations_ECEF, dt_rms,
min_stations, max_z_guess=25.0e3)
plsq[5] = plsq[3]/c0 + t_all[t_mins[i],i]
plsq[6] = np.shape(stations_ECEF[selection])[0]
yield plsq
else:
plsq[6] = np.shape(stations_ECEF[selection])[0]
yield plsq
def eigsorted(cov):
vals, vecs = np.linalg.eigh(cov)
order = vals.argsort()[::-1]
return vals[order], vecs[:,order]
def array_from_generator2(generator, rows):
"""Creates a numpy array from a specified number
of values from the generator provided."""
data = []
for row in range(rows):
try:
data.append(next(generator))
except StopIteration:
break
return np.array(data)
def black_boxtesting(x,y,z,n,
stations_local,ordered_threshs,stations_ecef,center_ecef,
tanps,
c0,dt_rms,tanp,projl,chi2_filter,min_stations=5,ntsd=3):
""" This funtion incorporates most of the Monte Carlo functions and calls
into one big block of code.
x,y,z are the source location in the local tangent plane (m)
n is the number of iterations
stations_local is the the (N-stations, 3) array of station locations
in the local tangent plane
ordered_threshs is the N-station array of thresholds in the same order
as the station arrays (in dBm).
stations_ecef is (N,3) array in ECEF coordinates
center_ecef is just the center location of the network, just easier
to pass into the fuction separately to save some calculation
c0 is th speed of light
dt_rms is the standard deviation of the timing error (Gaussian, in s)
tanp is the tangent plane object
projl is the map projection object
chi2_filter is the maximum allowed reduced chi2 filter for the
calculation (use at most 5)
min_stations is the minimum number of stations required to receive
a source. This must be at least 5, can be higher to filter out more
poor solutions
Returned are the w,h,theta values of the covariance ellipses in one
array and the standard deviation of the altitude solutions separately.
Covariance ellipses are by default set at 3 standard deviations.
"""
points = np.array([np.zeros(n)+x, np.zeros(n)+y, np.zeros(n)+z]).T
powers = np.empty(n)
# For the theoretical distribution:
for i in range(len(powers)):
powers[i] = np.max(1./np.random.uniform(0,1000,2000))
# Calculate distance and power retrieved at each station and mask
# the stations which have higher thresholds than the retrieved power
points_f_ecef = (tanp.fromLocal(points.T)).T
dt, ran = travel_time(points, stations_local, c0, get_r=True)
pwr = received_power(powers, ran)
masking = 10.*np.log10(pwr/1e-3) < ordered_threshs[:,np.newaxis]
masking2 = np.empty_like(masking)
for i in range(len(stations_ecef[:,0])):
masking2[i] = tanps[i].toLocal(points_f_ecef.T)[2]<0
masking = masking | masking2
pwr = np.ma.masked_where(masking, pwr)
dt = np.ma.masked_where(masking, dt)
ran = np.ma.masked_where(masking, ran)
# Add error to the retreived times
dt_e = dt + np.random.normal(scale=dt_rms, size=np.shape(dt))
dt_mins = np.argmin(dt_e, axis=0)
# Precalculate some terms in ecef (fastest calculation)
points_f_ecef = (tanp.fromLocal(points.T)).T
full_dxvec, full_drsq = precalc_station_terms(stations_ecef)
# Run the retrieved locations calculation
# gen_retrieval returns a tuple of four positions, x,y,z,t.
dtype=[('x', float), ('y', float), ('z', float), ('t', float),
('chi2', float)]
# Prime the generator function - pauses at the first yield statement.
point_gen = gen_retrieval(dt_e, dt_mins, full_dxvec, full_drsq,
center_ecef, stations_ecef, dt_rms,
min_stations)
# Suck up the values produced by the generator, produce named array.
retrieved_locations = array_from_generator2(point_gen,rows=n)
# retrieved_locations = np.fromiter(point_gen, dtype=dtype)
retrieved_locations = np.array([(a,b,c,e) for (a,b,c,d,e) in
retrieved_locations])
chi2 = retrieved_locations[:,3]
retrieved_locations = retrieved_locations[:,:3]
retrieved_locations = np.ma.masked_invalid(retrieved_locations)
#Convert back to local tangent plane
soluts = tanp.toLocal(retrieved_locations.T)
proj_soluts = projl.fromECEF(retrieved_locations[:,0],
retrieved_locations[:,1],
retrieved_locations[:,2])
good = proj_soluts[2] > 0
proj_soluts = (proj_soluts[0][good],proj_soluts[1][good],
proj_soluts[2][good])
proj_soluts = np.ma.masked_invalid(proj_soluts)
cov = np.cov(proj_soluts[0][chi2[good]<chi2_filter], proj_soluts[1][chi2[good]<chi2_filter])
vals, vecs = eigsorted(cov)
theta = np.degrees(np.arctan2(*vecs[:,0][::-1]))
w, h = 2 * ntsd * np.sqrt(vals)
return np.array([w,h,theta]),np.std(proj_soluts[2][chi2[good]<chi2_filter]
)
|
|
"""
Adapted from https://github.com/tornadomeet/ResNet/blob/master/symbol_resnet.py
Original author Wei Wu
Referenced https://github.com/bamos/densenet.pytorch/blob/master/densenet.py
Original author bamos
Referenced https://github.com/andreasveit/densenet-pytorch/blob/master/densenet.py
Original author andreasveit
Referenced https://github.com/Nicatio/Densenet/blob/master/mxnet/symbol_densenet.py
Original author Nicatio
Implemented the following paper: DenseNet-BC
Gao Huang, Zhuang Liu, Kilian Q. Weinberger, Laurens van der Maaten. "Densely Connected Convolutional Networks"
Coded by Lin Xiong Mar-1, 2017
"""
import mxnet as mx
import math
import logging
logging.basicConfig(level=logging.DEBUG)
BITW = -1 # set in get_symbol
BITA = -1 # set in get_symbol
def BasicBlock(data, growth_rate, stride, name, bottle_neck=True, drop_out=0.0, bn_mom=0.9, workspace=512):
"""Return BaiscBlock Unit symbol for building DenseBlock
Parameters
----------
data : str
Input data
growth_rate : int
Number of output channels
stride : tupe
Stride used in convolution
drop_out : float
Probability of an element to be zeroed. Default = 0.2
name : str
Base name of the operators
workspace : int
Workspace used in convolution operator
"""
# import pdb
# pdb.set_trace()
if bottle_neck:
# the same as https://github.com/facebook/fb.resnet.torch#notes, a bit difference with origin paper
bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn1')
act1 = mx.sym.QActivation(data=bn1, backward_only=True, name=name + '_relu1', act_bit=BITA)
conv1 = mx.sym.QConvolution(data=act1, num_filter=int(growth_rate * 4), kernel=(1, 1), stride=(1, 1), pad=(0, 0),
no_bias=1, workspace=workspace, name=name + '_conv1', weight_bit=BITW, act_bit=BITA)
if drop_out > 0:
conv1 = mx.symbol.Dropout(data=conv1, p=drop_out, name=name + '_dp1')
bn2 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn2')
act2 = mx.sym.QActivation(data=bn2, backward_only=True, name=name + '_relu2', act_bit=BITA)
conv2 = mx.sym.QConvolution(data=act2, num_filter=int(growth_rate), kernel=(3, 3), stride=stride, pad=(1, 1),
no_bias=1, workspace=workspace, name=name + '_conv2', weight_bit=BITW, act_bit=BITA)
if drop_out > 0:
conv2 = mx.symbol.Dropout(data=conv2, p=drop_out, name=name + '_dp2')
# return mx.symbol.Concat(data, conv2, name=name + '_concat0')
return conv2
else:
bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn1')
act1 = mx.sym.QActivation(data=bn1, backward_only=True, name=name + '_relu1', act_bit=BITA)
conv1 = mx.sym.QConvolution(data=act1, num_filter=int(growth_rate), kernel=(3, 3), stride=(1, 1), pad=(1, 1),
no_bias=1, workspace=workspace, name=name + '_conv1', weight_bit=BITW, act_bit=BITA)
if drop_out > 0:
conv1 = mx.symbol.Dropout(data=conv1, p=drop_out, name=name + '_dp1')
# return mx.symbol.Concat(data, conv1, name=name + '_concat0')
return conv1
def DenseBlock(units_num, data, growth_rate, name, bottle_neck=True, drop_out=0.0, bn_mom=0.9, workspace=512):
"""Return DenseBlock Unit symbol for building DenseNet
Parameters
----------
units_num : int
the number of BasicBlock in each DenseBlock
data : str
Input data
growth_rate : int
Number of output channels
drop_out : float
Probability of an element to be zeroed. Default = 0.2
workspace : int
Workspace used in convolution operator
"""
# import pdb
# pdb.set_trace()
for i in range(units_num):
Block = BasicBlock(data, growth_rate=growth_rate, stride=(1, 1), name=name + '_unit%d' % (i + 1),
bottle_neck=bottle_neck, drop_out=drop_out,
bn_mom=bn_mom, workspace=workspace)
data = mx.symbol.Concat(data, Block, name=name + '_concat%d' % (i + 1))
return data
def TransitionBlock(num_stage, data, num_filter, stride, name, drop_out=0.0, bn_mom=0.9, workspace=512):
"""Return TransitionBlock Unit symbol for building DenseNet
Parameters
----------
num_stage : int
Number of stage
data : str
Input data
num : int
Number of output channels
stride : tupe
Stride used in convolution
name : str
Base name of the operators
drop_out : float
Probability of an element to be zeroed. Default = 0.2
workspace : int
Workspace used in convolution operator
"""
bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn1')
act1 = mx.sym.QActivation(data=bn1, backward_only=True, name=name + '_relu1', act_bit=BITA)
conv1 = mx.sym.QConvolution(data=act1, num_filter=num_filter,
kernel=(1, 1), stride=stride, pad=(0, 0), no_bias=1,
workspace=workspace, name=name + '_conv1', weight_bit=BITW, act_bit=BITA)
if drop_out > 0:
conv1 = mx.symbol.Dropout(data=conv1, p=drop_out, name=name + '_dp1')
return mx.symbol.Pooling(conv1, global_pool=False, kernel=(2, 2), stride=(2, 2), pool_type='avg',
name=name + '_pool%d' % (num_stage + 1))
def DenseNet(units, num_stage, growth_rate, num_class, data_type, reduction=0.5, drop_out=0., bottle_neck=True,
bn_mom=0.9, workspace=512):
"""Return DenseNet symbol of imagenet
Parameters
----------
units : list
Number of units in each stage
num_stage : int
Number of stage
growth_rate : int
Number of output channels
num_class : int
Ouput size of symbol
data_type : str
the type of dataset
reduction : float
Compression ratio. Default = 0.5
drop_out : float
Probability of an element to be zeroed. Default = 0.2
workspace : int
Workspace used in convolution operator
"""
num_unit = len(units)
assert (num_unit == num_stage)
init_channels = 2 * growth_rate
n_channels = init_channels
data = mx.sym.Variable(name='data')
data = mx.sym.BatchNorm(data=data, fix_gamma=True, eps=2e-5, momentum=bn_mom, name='bn_data')
if data_type == 'imagenet':
body = mx.sym.Convolution(data=data, num_filter=growth_rate * 2, kernel=(7, 7), stride=(2, 2), pad=(3, 3),
no_bias=1, name="conv0", workspace=workspace)
body = mx.sym.BatchNorm(data=body, fix_gamma=False, eps=2e-5, momentum=bn_mom, name='bn0')
body = mx.sym.Activation(data=body, act_type='relu', name='relu0')
body = mx.symbol.Pooling(data=body, kernel=(3, 3), stride=(2, 2), pad=(1, 1), pool_type='max')
elif data_type == 'vggface':
body = mx.sym.Convolution(data=data, num_filter=growth_rate * 2, kernel=(7, 7), stride=(2, 2), pad=(3, 3),
no_bias=1, name="conv0", workspace=workspace)
body = mx.sym.BatchNorm(data=body, fix_gamma=False, eps=2e-5, momentum=bn_mom, name='bn0')
body = mx.sym.Activation(data=body, act_type='relu', name='relu0')
body = mx.symbol.Pooling(data=body, kernel=(3, 3), stride=(2, 2), pad=(1, 1), pool_type='max')
elif data_type == 'msface':
body = mx.sym.Convolution(data=data, num_filter=growth_rate * 2, kernel=(7, 7), stride=(2, 2), pad=(3, 3),
no_bias=1, name="conv0", workspace=workspace)
body = mx.sym.BatchNorm(data=body, fix_gamma=False, eps=2e-5, momentum=bn_mom, name='bn0')
body = mx.sym.Activation(data=body, act_type='relu', name='relu0')
body = mx.symbol.Pooling(data=body, kernel=(3, 3), stride=(2, 2), pad=(1, 1), pool_type='max')
else:
raise ValueError("do not support {} yet".format(data_type))
for i in range(num_stage - 1):
body = DenseBlock(units[i], body, growth_rate=growth_rate, name='DBstage%d' % (i + 1), bottle_neck=bottle_neck,
drop_out=drop_out, bn_mom=bn_mom, workspace=workspace)
n_channels += units[i] * growth_rate
n_channels = int(math.floor(n_channels * reduction))
body = TransitionBlock(i, body, n_channels, stride=(1, 1), name='TBstage%d' % (i + 1), drop_out=drop_out,
bn_mom=bn_mom, workspace=workspace)
body = DenseBlock(units[num_stage - 1], body, growth_rate=growth_rate, name='DBstage%d' % (num_stage),
bottle_neck=bottle_neck, drop_out=drop_out, bn_mom=bn_mom, workspace=workspace)
bn1 = mx.sym.BatchNorm(data=body, fix_gamma=False, eps=2e-5, momentum=bn_mom, name='bn1')
relu1 = mx.sym.Activation(data=bn1, act_type='relu', name='relu1')
pool1 = mx.symbol.Pooling(data=relu1, global_pool=True, kernel=(7, 7), pool_type='avg', name='pool1')
flat = mx.symbol.Flatten(data=pool1)
fc1 = mx.symbol.FullyConnected(data=flat, num_hidden=num_class, name='fc1')
# mx.visualization.print_summary(fc1, shape={'data':(512,3,224,224)})
# digraph = mx.visualization.plot_network(ret, save_format='jpg')
# digraph.render()
return mx.symbol.SoftmaxOutput(data=fc1, name='softmax')
def get_symbol(num_classes, num_layers, image_shape, conv_workspace=256, bn_mom=0.9, drop_out=0.0, reduction=0.5,
data_type="imagenet", growth_rate=32, bits_w=1, bits_a=1, use_bottle_neck=False, **kwargs):
global BITW, BITA
BITW = bits_w
BITA = bits_a
logging.info("Created binary densenet with bit_w={} and bit_a={} and bottleneck={}.".format(BITW, BITA, use_bottle_neck))
if data_type == "imagenet":
if num_layers == 21:
units = [2, 2, 2, 2]
elif num_layers == 45:
units = [2, 4, 8, 6]
elif num_layers == 85:
units = [4, 8, 16, 12]
elif num_layers == 121:
units = [6, 12, 24, 16]
elif num_layers == 169:
units = [6, 12, 32, 32]
elif num_layers == 201:
units = [6, 12, 48, 32]
elif num_layers == 161:
units = [6, 12, 36, 24]
else:
raise ValueError("no experiments done on detph {}, you can do it youself".format(num_layers))
return DenseNet(units=units, num_stage=4, growth_rate=64 if num_layers == 161 else growth_rate, num_class=num_classes,
data_type="imagenet", reduction=reduction, drop_out=drop_out, bottle_neck=use_bottle_neck,
bn_mom=bn_mom, workspace=conv_workspace)
elif data_type == "vggface":
if num_layers == 121:
units = [6, 12, 24, 16]
elif num_layers == 169:
units = [6, 12, 32, 32]
elif num_layers == 201:
units = [6, 12, 48, 32]
elif num_layers == 161:
units = [6, 12, 36, 24]
else:
raise ValueError("no experiments done on detph {}, you can do it youself".format(num_layers))
return DenseNet(units=units, num_stage=4, growth_rate=64 if num_layers == 161 else growth_rate, num_class=num_classes,
data_type="vggface", reduction=reduction, drop_out=drop_out, bottle_neck=use_bottle_neck,
bn_mom=bn_mom, workspace=conv_workspace)
elif data_type == "msface":
if num_layers == 121:
units = [6, 12, 24, 16]
elif num_layers == 169:
units = [6, 12, 32, 32]
elif num_layers == 201:
units = [6, 12, 48, 32]
elif num_layers == 161:
units = [6, 12, 36, 24]
else:
raise ValueError("no experiments done on detph {}, you can do it youself".format(num_layers))
return DenseNet(units=units, num_stage=4, growth_rate=64 if num_layers == 161 else growth_rate, num_class=num_classes,
data_type="msface", reduction=reduction, drop_out=drop_out, bottle_neck=use_bottle_neck,
bn_mom=bn_mom, workspace=conv_workspace)
else:
raise ValueError("do not support {} yet".format(data_type))
|
|
import datetime
import pickle
import unittest
import uuid
from copy import deepcopy
from django.core.exceptions import FieldError
from django.db import DatabaseError, connection, models
from django.db.models import CharField, Q, TimeField, UUIDField
from django.db.models.aggregates import (
Avg, Count, Max, Min, StdDev, Sum, Variance,
)
from django.db.models.expressions import (
Case, Col, Combinable, Exists, Expression, ExpressionList,
ExpressionWrapper, F, Func, OrderBy, OuterRef, Random, RawSQL, Ref,
Subquery, Value, When,
)
from django.db.models.functions import (
Coalesce, Concat, Length, Lower, Substr, Upper,
)
from django.db.models.sql import constants
from django.db.models.sql.datastructures import Join
from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature
from django.test.utils import Approximate
from .models import (
UUID, UUIDPK, Company, Employee, Experiment, Number, RemoteEmployee,
Result, SimulationRun, Time,
)
class BasicExpressionsTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.example_inc = Company.objects.create(
name="Example Inc.", num_employees=2300, num_chairs=5,
ceo=Employee.objects.create(firstname="Joe", lastname="Smith", salary=10)
)
cls.foobar_ltd = Company.objects.create(
name="Foobar Ltd.", num_employees=3, num_chairs=4,
ceo=Employee.objects.create(firstname="Frank", lastname="Meyer", salary=20)
)
cls.max = Employee.objects.create(firstname='Max', lastname='Mustermann', salary=30)
cls.gmbh = Company.objects.create(name='Test GmbH', num_employees=32, num_chairs=1, ceo=cls.max)
def setUp(self):
self.company_query = Company.objects.values(
"name", "num_employees", "num_chairs"
).order_by(
"name", "num_employees", "num_chairs"
)
def test_annotate_values_aggregate(self):
companies = Company.objects.annotate(
salaries=F('ceo__salary'),
).values('num_employees', 'salaries').aggregate(
result=Sum(
F('salaries') + F('num_employees'),
output_field=models.IntegerField()
),
)
self.assertEqual(companies['result'], 2395)
def test_annotate_values_filter(self):
companies = Company.objects.annotate(
foo=RawSQL('%s', ['value']),
).filter(foo='value').order_by('name')
self.assertQuerysetEqual(
companies,
['<Company: Example Inc.>', '<Company: Foobar Ltd.>', '<Company: Test GmbH>'],
)
def test_annotate_values_count(self):
companies = Company.objects.annotate(foo=RawSQL('%s', ['value']))
self.assertEqual(companies.count(), 3)
@unittest.skipIf(connection.vendor == 'oracle', "Oracle doesn't support using boolean type in SELECT")
def test_filtering_on_annotate_that_uses_q(self):
self.assertEqual(
Company.objects.annotate(
num_employees_check=ExpressionWrapper(Q(num_employees__gt=3), output_field=models.BooleanField())
).filter(num_employees_check=True).count(),
2,
)
def test_filter_inter_attribute(self):
# We can filter on attribute relationships on same model obj, e.g.
# find companies where the number of employees is greater
# than the number of chairs.
self.assertSequenceEqual(
self.company_query.filter(num_employees__gt=F("num_chairs")), [
{
"num_chairs": 5,
"name": "Example Inc.",
"num_employees": 2300,
},
{
"num_chairs": 1,
"name": "Test GmbH",
"num_employees": 32
},
],
)
def test_update(self):
# We can set one field to have the value of another field
# Make sure we have enough chairs
self.company_query.update(num_chairs=F("num_employees"))
self.assertSequenceEqual(
self.company_query, [
{
"num_chairs": 2300,
"name": "Example Inc.",
"num_employees": 2300
},
{
"num_chairs": 3,
"name": "Foobar Ltd.",
"num_employees": 3
},
{
"num_chairs": 32,
"name": "Test GmbH",
"num_employees": 32
}
],
)
def test_arithmetic(self):
# We can perform arithmetic operations in expressions
# Make sure we have 2 spare chairs
self.company_query.update(num_chairs=F("num_employees") + 2)
self.assertSequenceEqual(
self.company_query, [
{
'num_chairs': 2302,
'name': 'Example Inc.',
'num_employees': 2300
},
{
'num_chairs': 5,
'name': 'Foobar Ltd.',
'num_employees': 3
},
{
'num_chairs': 34,
'name': 'Test GmbH',
'num_employees': 32
}
],
)
def test_order_of_operations(self):
# Law of order of operations is followed
self.company_query.update(num_chairs=F('num_employees') + 2 * F('num_employees'))
self.assertSequenceEqual(
self.company_query, [
{
'num_chairs': 6900,
'name': 'Example Inc.',
'num_employees': 2300
},
{
'num_chairs': 9,
'name': 'Foobar Ltd.',
'num_employees': 3
},
{
'num_chairs': 96,
'name': 'Test GmbH',
'num_employees': 32
}
],
)
def test_parenthesis_priority(self):
# Law of order of operations can be overridden by parentheses
self.company_query.update(num_chairs=(F('num_employees') + 2) * F('num_employees'))
self.assertSequenceEqual(
self.company_query, [
{
'num_chairs': 5294600,
'name': 'Example Inc.',
'num_employees': 2300
},
{
'num_chairs': 15,
'name': 'Foobar Ltd.',
'num_employees': 3
},
{
'num_chairs': 1088,
'name': 'Test GmbH',
'num_employees': 32
}
],
)
def test_update_with_fk(self):
# ForeignKey can become updated with the value of another ForeignKey.
self.assertEqual(Company.objects.update(point_of_contact=F('ceo')), 3)
self.assertQuerysetEqual(
Company.objects.all(),
['Joe Smith', 'Frank Meyer', 'Max Mustermann'],
lambda c: str(c.point_of_contact),
ordered=False
)
def test_update_with_none(self):
Number.objects.create(integer=1, float=1.0)
Number.objects.create(integer=2)
Number.objects.filter(float__isnull=False).update(float=Value(None))
self.assertQuerysetEqual(
Number.objects.all(),
[None, None],
lambda n: n.float,
ordered=False
)
def test_filter_with_join(self):
# F Expressions can also span joins
Company.objects.update(point_of_contact=F('ceo'))
c = Company.objects.first()
c.point_of_contact = Employee.objects.create(firstname="Guido", lastname="van Rossum")
c.save()
self.assertQuerysetEqual(
Company.objects.filter(ceo__firstname=F('point_of_contact__firstname')),
['Foobar Ltd.', 'Test GmbH'],
lambda c: c.name,
ordered=False
)
Company.objects.exclude(
ceo__firstname=F("point_of_contact__firstname")
).update(name="foo")
self.assertEqual(
Company.objects.exclude(
ceo__firstname=F('point_of_contact__firstname')
).get().name,
"foo",
)
msg = "Joined field references are not permitted in this query"
with self.assertRaisesMessage(FieldError, msg):
Company.objects.exclude(
ceo__firstname=F('point_of_contact__firstname')
).update(name=F('point_of_contact__lastname'))
def test_object_update(self):
# F expressions can be used to update attributes on single objects
self.gmbh.num_employees = F('num_employees') + 4
self.gmbh.save()
self.gmbh.refresh_from_db()
self.assertEqual(self.gmbh.num_employees, 36)
def test_new_object_save(self):
# We should be able to use Funcs when inserting new data
test_co = Company(name=Lower(Value('UPPER')), num_employees=32, num_chairs=1, ceo=self.max)
test_co.save()
test_co.refresh_from_db()
self.assertEqual(test_co.name, "upper")
def test_new_object_create(self):
test_co = Company.objects.create(name=Lower(Value('UPPER')), num_employees=32, num_chairs=1, ceo=self.max)
test_co.refresh_from_db()
self.assertEqual(test_co.name, "upper")
def test_object_create_with_aggregate(self):
# Aggregates are not allowed when inserting new data
msg = 'Aggregate functions are not allowed in this query (num_employees=Max(Value(1))).'
with self.assertRaisesMessage(FieldError, msg):
Company.objects.create(
name='Company', num_employees=Max(Value(1)), num_chairs=1,
ceo=Employee.objects.create(firstname="Just", lastname="Doit", salary=30),
)
def test_object_update_fk(self):
# F expressions cannot be used to update attributes which are foreign
# keys, or attributes which involve joins.
test_gmbh = Company.objects.get(pk=self.gmbh.pk)
msg = 'F(ceo)": "Company.point_of_contact" must be a "Employee" instance.'
with self.assertRaisesMessage(ValueError, msg):
test_gmbh.point_of_contact = F('ceo')
test_gmbh.point_of_contact = self.gmbh.ceo
test_gmbh.save()
test_gmbh.name = F('ceo__last_name')
msg = 'Joined field references are not permitted in this query'
with self.assertRaisesMessage(FieldError, msg):
test_gmbh.save()
def test_update_inherited_field_value(self):
msg = 'Joined field references are not permitted in this query'
with self.assertRaisesMessage(FieldError, msg):
RemoteEmployee.objects.update(adjusted_salary=F('salary') * 5)
def test_object_update_unsaved_objects(self):
# F expressions cannot be used to update attributes on objects which do
# not yet exist in the database
acme = Company(name='The Acme Widget Co.', num_employees=12, num_chairs=5, ceo=self.max)
acme.num_employees = F("num_employees") + 16
msg = (
'Failed to insert expression "Col(expressions_company, '
'expressions.Company.num_employees) + Value(16)" on '
'expressions.Company.num_employees. F() expressions can only be '
'used to update, not to insert.'
)
with self.assertRaisesMessage(ValueError, msg):
acme.save()
acme.num_employees = 12
acme.name = Lower(F('name'))
msg = (
'Failed to insert expression "Lower(Col(expressions_company, '
'expressions.Company.name))" on expressions.Company.name. F() '
'expressions can only be used to update, not to insert.'
)
with self.assertRaisesMessage(ValueError, msg):
acme.save()
def test_ticket_11722_iexact_lookup(self):
Employee.objects.create(firstname="John", lastname="Doe")
Employee.objects.create(firstname="Test", lastname="test")
queryset = Employee.objects.filter(firstname__iexact=F('lastname'))
self.assertQuerysetEqual(queryset, ["<Employee: Test test>"])
def test_ticket_16731_startswith_lookup(self):
Employee.objects.create(firstname="John", lastname="Doe")
e2 = Employee.objects.create(firstname="Jack", lastname="Jackson")
e3 = Employee.objects.create(firstname="Jack", lastname="jackson")
self.assertSequenceEqual(
Employee.objects.filter(lastname__startswith=F('firstname')),
[e2, e3] if connection.features.has_case_insensitive_like else [e2]
)
qs = Employee.objects.filter(lastname__istartswith=F('firstname')).order_by('pk')
self.assertSequenceEqual(qs, [e2, e3])
def test_ticket_18375_join_reuse(self):
# Reverse multijoin F() references and the lookup target the same join.
# Pre #18375 the F() join was generated first and the lookup couldn't
# reuse that join.
qs = Employee.objects.filter(company_ceo_set__num_chairs=F('company_ceo_set__num_employees'))
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_ticket_18375_kwarg_ordering(self):
# The next query was dict-randomization dependent - if the "gte=1"
# was seen first, then the F() will reuse the join generated by the
# gte lookup, if F() was seen first, then it generated a join the
# other lookups could not reuse.
qs = Employee.objects.filter(
company_ceo_set__num_chairs=F('company_ceo_set__num_employees'),
company_ceo_set__num_chairs__gte=1,
)
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_ticket_18375_kwarg_ordering_2(self):
# Another similar case for F() than above. Now we have the same join
# in two filter kwargs, one in the lhs lookup, one in F. Here pre
# #18375 the amount of joins generated was random if dict
# randomization was enabled, that is the generated query dependent
# on which clause was seen first.
qs = Employee.objects.filter(
company_ceo_set__num_employees=F('pk'),
pk=F('company_ceo_set__num_employees')
)
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_ticket_18375_chained_filters(self):
# F() expressions do not reuse joins from previous filter.
qs = Employee.objects.filter(
company_ceo_set__num_employees=F('pk')
).filter(
company_ceo_set__num_employees=F('company_ceo_set__num_employees')
)
self.assertEqual(str(qs.query).count('JOIN'), 2)
def test_order_by_exists(self):
mary = Employee.objects.create(firstname='Mary', lastname='Mustermann', salary=20)
mustermanns_by_seniority = Employee.objects.filter(lastname='Mustermann').order_by(
# Order by whether the employee is the CEO of a company
Exists(Company.objects.filter(ceo=OuterRef('pk'))).desc()
)
self.assertSequenceEqual(mustermanns_by_seniority, [self.max, mary])
def test_order_by_multiline_sql(self):
raw_order_by = (
RawSQL('''
CASE WHEN num_employees > 1000
THEN num_chairs
ELSE 0 END
''', []).desc(),
RawSQL('''
CASE WHEN num_chairs > 1
THEN 1
ELSE 0 END
''', []).asc()
)
for qs in (
Company.objects.all(),
Company.objects.distinct(),
):
with self.subTest(qs=qs):
self.assertSequenceEqual(
qs.order_by(*raw_order_by),
[self.example_inc, self.gmbh, self.foobar_ltd],
)
def test_outerref(self):
inner = Company.objects.filter(point_of_contact=OuterRef('pk'))
msg = (
'This queryset contains a reference to an outer query and may only '
'be used in a subquery.'
)
with self.assertRaisesMessage(ValueError, msg):
inner.exists()
outer = Employee.objects.annotate(is_point_of_contact=Exists(inner))
self.assertIs(outer.exists(), True)
def test_exist_single_field_output_field(self):
queryset = Company.objects.values('pk')
self.assertIsInstance(Exists(queryset).output_field, models.BooleanField)
def test_subquery(self):
Company.objects.filter(name='Example Inc.').update(
point_of_contact=Employee.objects.get(firstname='Joe', lastname='Smith'),
ceo=self.max,
)
Employee.objects.create(firstname='Bob', lastname='Brown', salary=40)
qs = Employee.objects.annotate(
is_point_of_contact=Exists(Company.objects.filter(point_of_contact=OuterRef('pk'))),
is_not_point_of_contact=~Exists(Company.objects.filter(point_of_contact=OuterRef('pk'))),
is_ceo_of_small_company=Exists(Company.objects.filter(num_employees__lt=200, ceo=OuterRef('pk'))),
is_ceo_small_2=~~Exists(Company.objects.filter(num_employees__lt=200, ceo=OuterRef('pk'))),
largest_company=Subquery(Company.objects.order_by('-num_employees').filter(
models.Q(ceo=OuterRef('pk')) | models.Q(point_of_contact=OuterRef('pk'))
).values('name')[:1], output_field=models.CharField())
).values(
'firstname',
'is_point_of_contact',
'is_not_point_of_contact',
'is_ceo_of_small_company',
'is_ceo_small_2',
'largest_company',
).order_by('firstname')
results = list(qs)
# Could use Coalesce(subq, Value('')) instead except for the bug in
# cx_Oracle mentioned in #23843.
bob = results[0]
if bob['largest_company'] == '' and connection.features.interprets_empty_strings_as_nulls:
bob['largest_company'] = None
self.assertEqual(results, [
{
'firstname': 'Bob',
'is_point_of_contact': False,
'is_not_point_of_contact': True,
'is_ceo_of_small_company': False,
'is_ceo_small_2': False,
'largest_company': None,
},
{
'firstname': 'Frank',
'is_point_of_contact': False,
'is_not_point_of_contact': True,
'is_ceo_of_small_company': True,
'is_ceo_small_2': True,
'largest_company': 'Foobar Ltd.',
},
{
'firstname': 'Joe',
'is_point_of_contact': True,
'is_not_point_of_contact': False,
'is_ceo_of_small_company': False,
'is_ceo_small_2': False,
'largest_company': 'Example Inc.',
},
{
'firstname': 'Max',
'is_point_of_contact': False,
'is_not_point_of_contact': True,
'is_ceo_of_small_company': True,
'is_ceo_small_2': True,
'largest_company': 'Example Inc.'
}
])
# A less elegant way to write the same query: this uses a LEFT OUTER
# JOIN and an IS NULL, inside a WHERE NOT IN which is probably less
# efficient than EXISTS.
self.assertCountEqual(
qs.filter(is_point_of_contact=True).values('pk'),
Employee.objects.exclude(company_point_of_contact_set=None).values('pk')
)
def test_in_subquery(self):
# This is a contrived test (and you really wouldn't write this query),
# but it is a succinct way to test the __in=Subquery() construct.
small_companies = Company.objects.filter(num_employees__lt=200).values('pk')
subquery_test = Company.objects.filter(pk__in=Subquery(small_companies))
self.assertCountEqual(subquery_test, [self.foobar_ltd, self.gmbh])
subquery_test2 = Company.objects.filter(pk=Subquery(small_companies.filter(num_employees=3)))
self.assertCountEqual(subquery_test2, [self.foobar_ltd])
def test_uuid_pk_subquery(self):
u = UUIDPK.objects.create()
UUID.objects.create(uuid_fk=u)
qs = UUIDPK.objects.filter(id__in=Subquery(UUID.objects.values('uuid_fk__id')))
self.assertCountEqual(qs, [u])
def test_nested_subquery(self):
inner = Company.objects.filter(point_of_contact=OuterRef('pk'))
outer = Employee.objects.annotate(is_point_of_contact=Exists(inner))
contrived = Employee.objects.annotate(
is_point_of_contact=Subquery(
outer.filter(pk=OuterRef('pk')).values('is_point_of_contact'),
output_field=models.BooleanField(),
),
)
self.assertCountEqual(contrived.values_list(), outer.values_list())
def test_nested_subquery_outer_ref_2(self):
first = Time.objects.create(time='09:00')
second = Time.objects.create(time='17:00')
third = Time.objects.create(time='21:00')
SimulationRun.objects.bulk_create([
SimulationRun(start=first, end=second, midpoint='12:00'),
SimulationRun(start=first, end=third, midpoint='15:00'),
SimulationRun(start=second, end=first, midpoint='00:00'),
])
inner = Time.objects.filter(time=OuterRef(OuterRef('time')), pk=OuterRef('start')).values('time')
middle = SimulationRun.objects.annotate(other=Subquery(inner)).values('other')[:1]
outer = Time.objects.annotate(other=Subquery(middle, output_field=models.TimeField()))
# This is a contrived example. It exercises the double OuterRef form.
self.assertCountEqual(outer, [first, second, third])
def test_nested_subquery_outer_ref_with_autofield(self):
first = Time.objects.create(time='09:00')
second = Time.objects.create(time='17:00')
SimulationRun.objects.create(start=first, end=second, midpoint='12:00')
inner = SimulationRun.objects.filter(start=OuterRef(OuterRef('pk'))).values('start')
middle = Time.objects.annotate(other=Subquery(inner)).values('other')[:1]
outer = Time.objects.annotate(other=Subquery(middle, output_field=models.IntegerField()))
# This exercises the double OuterRef form with AutoField as pk.
self.assertCountEqual(outer, [first, second])
def test_annotations_within_subquery(self):
Company.objects.filter(num_employees__lt=50).update(ceo=Employee.objects.get(firstname='Frank'))
inner = Company.objects.filter(
ceo=OuterRef('pk')
).values('ceo').annotate(total_employees=models.Sum('num_employees')).values('total_employees')
outer = Employee.objects.annotate(total_employees=Subquery(inner)).filter(salary__lte=Subquery(inner))
self.assertSequenceEqual(
outer.order_by('-total_employees').values('salary', 'total_employees'),
[{'salary': 10, 'total_employees': 2300}, {'salary': 20, 'total_employees': 35}],
)
def test_subquery_references_joined_table_twice(self):
inner = Company.objects.filter(
num_chairs__gte=OuterRef('ceo__salary'),
num_employees__gte=OuterRef('point_of_contact__salary'),
)
# Another contrived example (there is no need to have a subquery here)
outer = Company.objects.filter(pk__in=Subquery(inner.values('pk')))
self.assertFalse(outer.exists())
def test_subquery_filter_by_aggregate(self):
Number.objects.create(integer=1000, float=1.2)
Employee.objects.create(salary=1000)
qs = Number.objects.annotate(
min_valuable_count=Subquery(
Employee.objects.filter(
salary=OuterRef('integer'),
).annotate(cnt=Count('salary')).filter(cnt__gt=0).values('cnt')[:1]
),
)
self.assertEqual(qs.get().float, 1.2)
def test_aggregate_subquery_annotation(self):
with self.assertNumQueries(1) as ctx:
aggregate = Company.objects.annotate(
ceo_salary=Subquery(
Employee.objects.filter(
id=OuterRef('ceo_id'),
).values('salary')
),
).aggregate(
ceo_salary_gt_20=Count('pk', filter=Q(ceo_salary__gt=20)),
)
self.assertEqual(aggregate, {'ceo_salary_gt_20': 1})
# Aggregation over a subquery annotation doesn't annotate the subquery
# twice in the inner query.
sql = ctx.captured_queries[0]['sql']
self.assertLessEqual(sql.count('SELECT'), 3)
# GROUP BY isn't required to aggregate over a query that doesn't
# contain nested aggregates.
self.assertNotIn('GROUP BY', sql)
def test_explicit_output_field(self):
class FuncA(Func):
output_field = models.CharField()
class FuncB(Func):
pass
expr = FuncB(FuncA())
self.assertEqual(expr.output_field, FuncA.output_field)
def test_outerref_mixed_case_table_name(self):
inner = Result.objects.filter(result_time__gte=OuterRef('experiment__assigned'))
outer = Result.objects.filter(pk__in=Subquery(inner.values('pk')))
self.assertFalse(outer.exists())
def test_outerref_with_operator(self):
inner = Company.objects.filter(num_employees=OuterRef('ceo__salary') + 2)
outer = Company.objects.filter(pk__in=Subquery(inner.values('pk')))
self.assertEqual(outer.get().name, 'Test GmbH')
def test_annotation_with_outerref(self):
gmbh_salary = Company.objects.annotate(
max_ceo_salary_raise=Subquery(
Company.objects.annotate(
salary_raise=OuterRef('num_employees') + F('num_employees'),
).order_by('-salary_raise').values('salary_raise')[:1],
output_field=models.IntegerField(),
),
).get(pk=self.gmbh.pk)
self.assertEqual(gmbh_salary.max_ceo_salary_raise, 2332)
def test_pickle_expression(self):
expr = Value(1, output_field=models.IntegerField())
expr.convert_value # populate cached property
self.assertEqual(pickle.loads(pickle.dumps(expr)), expr)
def test_incorrect_field_in_F_expression(self):
with self.assertRaisesMessage(FieldError, "Cannot resolve keyword 'nope' into field."):
list(Employee.objects.filter(firstname=F('nope')))
def test_incorrect_joined_field_in_F_expression(self):
with self.assertRaisesMessage(FieldError, "Cannot resolve keyword 'nope' into field."):
list(Company.objects.filter(ceo__pk=F('point_of_contact__nope')))
class IterableLookupInnerExpressionsTests(TestCase):
@classmethod
def setUpTestData(cls):
ceo = Employee.objects.create(firstname='Just', lastname='Doit', salary=30)
# MySQL requires that the values calculated for expressions don't pass
# outside of the field's range, so it's inconvenient to use the values
# in the more general tests.
Company.objects.create(name='5020 Ltd', num_employees=50, num_chairs=20, ceo=ceo)
Company.objects.create(name='5040 Ltd', num_employees=50, num_chairs=40, ceo=ceo)
Company.objects.create(name='5050 Ltd', num_employees=50, num_chairs=50, ceo=ceo)
Company.objects.create(name='5060 Ltd', num_employees=50, num_chairs=60, ceo=ceo)
Company.objects.create(name='99300 Ltd', num_employees=99, num_chairs=300, ceo=ceo)
def test_in_lookup_allows_F_expressions_and_expressions_for_integers(self):
# __in lookups can use F() expressions for integers.
queryset = Company.objects.filter(num_employees__in=([F('num_chairs') - 10]))
self.assertQuerysetEqual(queryset, ['<Company: 5060 Ltd>'], ordered=False)
self.assertQuerysetEqual(
Company.objects.filter(num_employees__in=([F('num_chairs') - 10, F('num_chairs') + 10])),
['<Company: 5040 Ltd>', '<Company: 5060 Ltd>'],
ordered=False
)
self.assertQuerysetEqual(
Company.objects.filter(
num_employees__in=([F('num_chairs') - 10, F('num_chairs'), F('num_chairs') + 10])
),
['<Company: 5040 Ltd>', '<Company: 5050 Ltd>', '<Company: 5060 Ltd>'],
ordered=False
)
def test_expressions_in_lookups_join_choice(self):
midpoint = datetime.time(13, 0)
t1 = Time.objects.create(time=datetime.time(12, 0))
t2 = Time.objects.create(time=datetime.time(14, 0))
SimulationRun.objects.create(start=t1, end=t2, midpoint=midpoint)
SimulationRun.objects.create(start=t1, end=None, midpoint=midpoint)
SimulationRun.objects.create(start=None, end=t2, midpoint=midpoint)
SimulationRun.objects.create(start=None, end=None, midpoint=midpoint)
queryset = SimulationRun.objects.filter(midpoint__range=[F('start__time'), F('end__time')])
self.assertQuerysetEqual(
queryset,
['<SimulationRun: 13:00:00 (12:00:00 to 14:00:00)>'],
ordered=False
)
for alias in queryset.query.alias_map.values():
if isinstance(alias, Join):
self.assertEqual(alias.join_type, constants.INNER)
queryset = SimulationRun.objects.exclude(midpoint__range=[F('start__time'), F('end__time')])
self.assertQuerysetEqual(queryset, [], ordered=False)
for alias in queryset.query.alias_map.values():
if isinstance(alias, Join):
self.assertEqual(alias.join_type, constants.LOUTER)
def test_range_lookup_allows_F_expressions_and_expressions_for_integers(self):
# Range lookups can use F() expressions for integers.
Company.objects.filter(num_employees__exact=F("num_chairs"))
self.assertQuerysetEqual(
Company.objects.filter(num_employees__range=(F('num_chairs'), 100)),
['<Company: 5020 Ltd>', '<Company: 5040 Ltd>', '<Company: 5050 Ltd>'],
ordered=False
)
self.assertQuerysetEqual(
Company.objects.filter(num_employees__range=(F('num_chairs') - 10, F('num_chairs') + 10)),
['<Company: 5040 Ltd>', '<Company: 5050 Ltd>', '<Company: 5060 Ltd>'],
ordered=False
)
self.assertQuerysetEqual(
Company.objects.filter(num_employees__range=(F('num_chairs') - 10, 100)),
['<Company: 5020 Ltd>', '<Company: 5040 Ltd>', '<Company: 5050 Ltd>', '<Company: 5060 Ltd>'],
ordered=False
)
self.assertQuerysetEqual(
Company.objects.filter(num_employees__range=(1, 100)),
[
'<Company: 5020 Ltd>', '<Company: 5040 Ltd>', '<Company: 5050 Ltd>',
'<Company: 5060 Ltd>', '<Company: 99300 Ltd>',
],
ordered=False
)
@unittest.skipUnless(connection.vendor == 'sqlite',
"This defensive test only works on databases that don't validate parameter types")
def test_complex_expressions_do_not_introduce_sql_injection_via_untrusted_string_inclusion(self):
"""
This tests that SQL injection isn't possible using compilation of
expressions in iterable filters, as their compilation happens before
the main query compilation. It's limited to SQLite, as PostgreSQL,
Oracle and other vendors have defense in depth against this by type
checking. Testing against SQLite (the most permissive of the built-in
databases) demonstrates that the problem doesn't exist while keeping
the test simple.
"""
queryset = Company.objects.filter(name__in=[F('num_chairs') + '1)) OR ((1==1'])
self.assertQuerysetEqual(queryset, [], ordered=False)
def test_in_lookup_allows_F_expressions_and_expressions_for_datetimes(self):
start = datetime.datetime(2016, 2, 3, 15, 0, 0)
end = datetime.datetime(2016, 2, 5, 15, 0, 0)
experiment_1 = Experiment.objects.create(
name='Integrity testing',
assigned=start.date(),
start=start,
end=end,
completed=end.date(),
estimated_time=end - start,
)
experiment_2 = Experiment.objects.create(
name='Taste testing',
assigned=start.date(),
start=start,
end=end,
completed=end.date(),
estimated_time=end - start,
)
Result.objects.create(
experiment=experiment_1,
result_time=datetime.datetime(2016, 2, 4, 15, 0, 0),
)
Result.objects.create(
experiment=experiment_1,
result_time=datetime.datetime(2016, 3, 10, 2, 0, 0),
)
Result.objects.create(
experiment=experiment_2,
result_time=datetime.datetime(2016, 1, 8, 5, 0, 0),
)
within_experiment_time = [F('experiment__start'), F('experiment__end')]
queryset = Result.objects.filter(result_time__range=within_experiment_time)
self.assertQuerysetEqual(queryset, ["<Result: Result at 2016-02-04 15:00:00>"])
within_experiment_time = [F('experiment__start'), F('experiment__end')]
queryset = Result.objects.filter(result_time__range=within_experiment_time)
self.assertQuerysetEqual(queryset, ["<Result: Result at 2016-02-04 15:00:00>"])
class FTests(SimpleTestCase):
def test_deepcopy(self):
f = F("foo")
g = deepcopy(f)
self.assertEqual(f.name, g.name)
def test_deconstruct(self):
f = F('name')
path, args, kwargs = f.deconstruct()
self.assertEqual(path, 'django.db.models.expressions.F')
self.assertEqual(args, (f.name,))
self.assertEqual(kwargs, {})
def test_equal(self):
f = F('name')
same_f = F('name')
other_f = F('username')
self.assertEqual(f, same_f)
self.assertNotEqual(f, other_f)
def test_hash(self):
d = {F('name'): 'Bob'}
self.assertIn(F('name'), d)
self.assertEqual(d[F('name')], 'Bob')
def test_not_equal_Value(self):
f = F('name')
value = Value('name')
self.assertNotEqual(f, value)
self.assertNotEqual(value, f)
class ExpressionsTests(TestCase):
def test_F_reuse(self):
f = F('id')
n = Number.objects.create(integer=-1)
c = Company.objects.create(
name="Example Inc.", num_employees=2300, num_chairs=5,
ceo=Employee.objects.create(firstname="Joe", lastname="Smith")
)
c_qs = Company.objects.filter(id=f)
self.assertEqual(c_qs.get(), c)
# Reuse the same F-object for another queryset
n_qs = Number.objects.filter(id=f)
self.assertEqual(n_qs.get(), n)
# The original query still works correctly
self.assertEqual(c_qs.get(), c)
def test_patterns_escape(self):
r"""
Special characters (e.g. %, _ and \) stored in database are
properly escaped when using a pattern lookup with an expression
refs #16731
"""
Employee.objects.bulk_create([
Employee(firstname="%Joh\\nny", lastname="%Joh\\n"),
Employee(firstname="Johnny", lastname="%John"),
Employee(firstname="Jean-Claude", lastname="Claud_"),
Employee(firstname="Jean-Claude", lastname="Claude"),
Employee(firstname="Jean-Claude", lastname="Claude%"),
Employee(firstname="Johnny", lastname="Joh\\n"),
Employee(firstname="Johnny", lastname="John"),
Employee(firstname="Johnny", lastname="_ohn"),
])
self.assertQuerysetEqual(
Employee.objects.filter(firstname__contains=F('lastname')),
["<Employee: %Joh\\nny %Joh\\n>", "<Employee: Jean-Claude Claude>", "<Employee: Johnny John>"],
ordered=False,
)
self.assertQuerysetEqual(
Employee.objects.filter(firstname__startswith=F('lastname')),
["<Employee: %Joh\\nny %Joh\\n>", "<Employee: Johnny John>"],
ordered=False,
)
self.assertQuerysetEqual(
Employee.objects.filter(firstname__endswith=F('lastname')),
["<Employee: Jean-Claude Claude>"],
ordered=False,
)
def test_insensitive_patterns_escape(self):
r"""
Special characters (e.g. %, _ and \) stored in database are
properly escaped when using a case insensitive pattern lookup with an
expression -- refs #16731
"""
Employee.objects.bulk_create([
Employee(firstname="%Joh\\nny", lastname="%joh\\n"),
Employee(firstname="Johnny", lastname="%john"),
Employee(firstname="Jean-Claude", lastname="claud_"),
Employee(firstname="Jean-Claude", lastname="claude"),
Employee(firstname="Jean-Claude", lastname="claude%"),
Employee(firstname="Johnny", lastname="joh\\n"),
Employee(firstname="Johnny", lastname="john"),
Employee(firstname="Johnny", lastname="_ohn"),
])
self.assertQuerysetEqual(
Employee.objects.filter(firstname__icontains=F('lastname')),
["<Employee: %Joh\\nny %joh\\n>", "<Employee: Jean-Claude claude>", "<Employee: Johnny john>"],
ordered=False,
)
self.assertQuerysetEqual(
Employee.objects.filter(firstname__istartswith=F('lastname')),
["<Employee: %Joh\\nny %joh\\n>", "<Employee: Johnny john>"],
ordered=False,
)
self.assertQuerysetEqual(
Employee.objects.filter(firstname__iendswith=F('lastname')),
["<Employee: Jean-Claude claude>"],
ordered=False,
)
class SimpleExpressionTests(SimpleTestCase):
def test_equal(self):
self.assertEqual(Expression(), Expression())
self.assertEqual(
Expression(models.IntegerField()),
Expression(output_field=models.IntegerField())
)
self.assertNotEqual(
Expression(models.IntegerField()),
Expression(models.CharField())
)
def test_hash(self):
self.assertEqual(hash(Expression()), hash(Expression()))
self.assertEqual(
hash(Expression(models.IntegerField())),
hash(Expression(output_field=models.IntegerField()))
)
self.assertNotEqual(
hash(Expression(models.IntegerField())),
hash(Expression(models.CharField())),
)
class ExpressionsNumericTests(TestCase):
@classmethod
def setUpTestData(cls):
Number(integer=-1).save()
Number(integer=42).save()
Number(integer=1337).save()
Number.objects.update(float=F('integer'))
def test_fill_with_value_from_same_object(self):
"""
We can fill a value in all objects with an other value of the
same object.
"""
self.assertQuerysetEqual(
Number.objects.all(),
['<Number: -1, -1.000>', '<Number: 42, 42.000>', '<Number: 1337, 1337.000>'],
ordered=False
)
def test_increment_value(self):
"""
We can increment a value of all objects in a query set.
"""
self.assertEqual(Number.objects.filter(integer__gt=0).update(integer=F('integer') + 1), 2)
self.assertQuerysetEqual(
Number.objects.all(),
['<Number: -1, -1.000>', '<Number: 43, 42.000>', '<Number: 1338, 1337.000>'],
ordered=False
)
def test_filter_not_equals_other_field(self):
"""
We can filter for objects, where a value is not equals the value
of an other field.
"""
self.assertEqual(Number.objects.filter(integer__gt=0).update(integer=F('integer') + 1), 2)
self.assertQuerysetEqual(
Number.objects.exclude(float=F('integer')),
['<Number: 43, 42.000>', '<Number: 1338, 1337.000>'],
ordered=False
)
def test_complex_expressions(self):
"""
Complex expressions of different connection types are possible.
"""
n = Number.objects.create(integer=10, float=123.45)
self.assertEqual(Number.objects.filter(pk=n.pk).update(
float=F('integer') + F('float') * 2), 1)
self.assertEqual(Number.objects.get(pk=n.pk).integer, 10)
self.assertEqual(Number.objects.get(pk=n.pk).float, Approximate(256.900, places=3))
class ExpressionOperatorTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.n = Number.objects.create(integer=42, float=15.5)
cls.n1 = Number.objects.create(integer=-42, float=-15.5)
def test_lefthand_addition(self):
# LH Addition of floats and integers
Number.objects.filter(pk=self.n.pk).update(
integer=F('integer') + 15,
float=F('float') + 42.7
)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 57)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(58.200, places=3))
def test_lefthand_subtraction(self):
# LH Subtraction of floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer') - 15, float=F('float') - 42.7)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 27)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(-27.200, places=3))
def test_lefthand_multiplication(self):
# Multiplication of floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer') * 15, float=F('float') * 42.7)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 630)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(661.850, places=3))
def test_lefthand_division(self):
# LH Division of floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer') / 2, float=F('float') / 42.7)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 21)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(0.363, places=3))
def test_lefthand_modulo(self):
# LH Modulo arithmetic on integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer') % 20)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 2)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(15.500, places=3))
def test_lefthand_bitwise_and(self):
# LH Bitwise ands on integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer').bitand(56))
Number.objects.filter(pk=self.n1.pk).update(integer=F('integer').bitand(-56))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 40)
self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -64)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(15.500, places=3))
def test_lefthand_bitwise_left_shift_operator(self):
Number.objects.update(integer=F('integer').bitleftshift(2))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 168)
self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -168)
def test_lefthand_bitwise_right_shift_operator(self):
Number.objects.update(integer=F('integer').bitrightshift(2))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 10)
self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -11)
def test_lefthand_bitwise_or(self):
# LH Bitwise or on integers
Number.objects.update(integer=F('integer').bitor(48))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 58)
self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -10)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(15.500, places=3))
def test_lefthand_power(self):
# LH Powert arithmetic operation on floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer') ** 2, float=F('float') ** 1.5)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 1764)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(61.02, places=2))
def test_right_hand_addition(self):
# Right hand operators
Number.objects.filter(pk=self.n.pk).update(integer=15 + F('integer'), float=42.7 + F('float'))
# RH Addition of floats and integers
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 57)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(58.200, places=3))
def test_right_hand_subtraction(self):
Number.objects.filter(pk=self.n.pk).update(integer=15 - F('integer'), float=42.7 - F('float'))
# RH Subtraction of floats and integers
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, -27)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(27.200, places=3))
def test_right_hand_multiplication(self):
# RH Multiplication of floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=15 * F('integer'), float=42.7 * F('float'))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 630)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(661.850, places=3))
def test_right_hand_division(self):
# RH Division of floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=640 / F('integer'), float=42.7 / F('float'))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 15)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(2.755, places=3))
def test_right_hand_modulo(self):
# RH Modulo arithmetic on integers
Number.objects.filter(pk=self.n.pk).update(integer=69 % F('integer'))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 27)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(15.500, places=3))
def test_righthand_power(self):
# RH Powert arithmetic operation on floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=2 ** F('integer'), float=1.5 ** F('float'))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 4398046511104)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(536.308, places=3))
class FTimeDeltaTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.sday = sday = datetime.date(2010, 6, 25)
cls.stime = stime = datetime.datetime(2010, 6, 25, 12, 15, 30, 747000)
midnight = datetime.time(0)
delta0 = datetime.timedelta(0)
delta1 = datetime.timedelta(microseconds=253000)
delta2 = datetime.timedelta(seconds=44)
delta3 = datetime.timedelta(hours=21, minutes=8)
delta4 = datetime.timedelta(days=10)
delta5 = datetime.timedelta(days=90)
# Test data is set so that deltas and delays will be
# strictly increasing.
cls.deltas = []
cls.delays = []
cls.days_long = []
# e0: started same day as assigned, zero duration
end = stime + delta0
e0 = Experiment.objects.create(
name='e0', assigned=sday, start=stime, end=end,
completed=end.date(), estimated_time=delta0,
)
cls.deltas.append(delta0)
cls.delays.append(e0.start - datetime.datetime.combine(e0.assigned, midnight))
cls.days_long.append(e0.completed - e0.assigned)
# e1: started one day after assigned, tiny duration, data
# set so that end time has no fractional seconds, which
# tests an edge case on sqlite.
delay = datetime.timedelta(1)
end = stime + delay + delta1
e1 = Experiment.objects.create(
name='e1', assigned=sday, start=stime + delay, end=end,
completed=end.date(), estimated_time=delta1,
)
cls.deltas.append(delta1)
cls.delays.append(e1.start - datetime.datetime.combine(e1.assigned, midnight))
cls.days_long.append(e1.completed - e1.assigned)
# e2: started three days after assigned, small duration
end = stime + delta2
e2 = Experiment.objects.create(
name='e2', assigned=sday - datetime.timedelta(3), start=stime,
end=end, completed=end.date(), estimated_time=datetime.timedelta(hours=1),
)
cls.deltas.append(delta2)
cls.delays.append(e2.start - datetime.datetime.combine(e2.assigned, midnight))
cls.days_long.append(e2.completed - e2.assigned)
# e3: started four days after assigned, medium duration
delay = datetime.timedelta(4)
end = stime + delay + delta3
e3 = Experiment.objects.create(
name='e3', assigned=sday, start=stime + delay, end=end,
completed=end.date(), estimated_time=delta3,
)
cls.deltas.append(delta3)
cls.delays.append(e3.start - datetime.datetime.combine(e3.assigned, midnight))
cls.days_long.append(e3.completed - e3.assigned)
# e4: started 10 days after assignment, long duration
end = stime + delta4
e4 = Experiment.objects.create(
name='e4', assigned=sday - datetime.timedelta(10), start=stime,
end=end, completed=end.date(), estimated_time=delta4 - datetime.timedelta(1),
)
cls.deltas.append(delta4)
cls.delays.append(e4.start - datetime.datetime.combine(e4.assigned, midnight))
cls.days_long.append(e4.completed - e4.assigned)
# e5: started a month after assignment, very long duration
delay = datetime.timedelta(30)
end = stime + delay + delta5
e5 = Experiment.objects.create(
name='e5', assigned=sday, start=stime + delay, end=end,
completed=end.date(), estimated_time=delta5,
)
cls.deltas.append(delta5)
cls.delays.append(e5.start - datetime.datetime.combine(e5.assigned, midnight))
cls.days_long.append(e5.completed - e5.assigned)
cls.expnames = [e.name for e in Experiment.objects.all()]
def test_multiple_query_compilation(self):
# Ticket #21643
queryset = Experiment.objects.filter(end__lt=F('start') + datetime.timedelta(hours=1))
q1 = str(queryset.query)
q2 = str(queryset.query)
self.assertEqual(q1, q2)
def test_query_clone(self):
# Ticket #21643 - Crash when compiling query more than once
qs = Experiment.objects.filter(end__lt=F('start') + datetime.timedelta(hours=1))
qs2 = qs.all()
list(qs)
list(qs2)
# Intentionally no assert
def test_delta_add(self):
for i, delta in enumerate(self.deltas):
test_set = [e.name for e in Experiment.objects.filter(end__lt=F('start') + delta)]
self.assertEqual(test_set, self.expnames[:i])
test_set = [e.name for e in Experiment.objects.filter(end__lt=delta + F('start'))]
self.assertEqual(test_set, self.expnames[:i])
test_set = [e.name for e in Experiment.objects.filter(end__lte=F('start') + delta)]
self.assertEqual(test_set, self.expnames[:i + 1])
def test_delta_subtract(self):
for i, delta in enumerate(self.deltas):
test_set = [e.name for e in Experiment.objects.filter(start__gt=F('end') - delta)]
self.assertEqual(test_set, self.expnames[:i])
test_set = [e.name for e in Experiment.objects.filter(start__gte=F('end') - delta)]
self.assertEqual(test_set, self.expnames[:i + 1])
def test_exclude(self):
for i, delta in enumerate(self.deltas):
test_set = [e.name for e in Experiment.objects.exclude(end__lt=F('start') + delta)]
self.assertEqual(test_set, self.expnames[i:])
test_set = [e.name for e in Experiment.objects.exclude(end__lte=F('start') + delta)]
self.assertEqual(test_set, self.expnames[i + 1:])
def test_date_comparison(self):
for i, days in enumerate(self.days_long):
test_set = [e.name for e in Experiment.objects.filter(completed__lt=F('assigned') + days)]
self.assertEqual(test_set, self.expnames[:i])
test_set = [e.name for e in Experiment.objects.filter(completed__lte=F('assigned') + days)]
self.assertEqual(test_set, self.expnames[:i + 1])
@skipUnlessDBFeature("supports_mixed_date_datetime_comparisons")
def test_mixed_comparisons1(self):
for i, delay in enumerate(self.delays):
test_set = [e.name for e in Experiment.objects.filter(assigned__gt=F('start') - delay)]
self.assertEqual(test_set, self.expnames[:i])
test_set = [e.name for e in Experiment.objects.filter(assigned__gte=F('start') - delay)]
self.assertEqual(test_set, self.expnames[:i + 1])
def test_mixed_comparisons2(self):
for i, delay in enumerate(self.delays):
delay = datetime.timedelta(delay.days)
test_set = [e.name for e in Experiment.objects.filter(start__lt=F('assigned') + delay)]
self.assertEqual(test_set, self.expnames[:i])
test_set = [
e.name for e in Experiment.objects.filter(start__lte=F('assigned') + delay + datetime.timedelta(1))
]
self.assertEqual(test_set, self.expnames[:i + 1])
def test_delta_update(self):
for delta in self.deltas:
exps = Experiment.objects.all()
expected_durations = [e.duration() for e in exps]
expected_starts = [e.start + delta for e in exps]
expected_ends = [e.end + delta for e in exps]
Experiment.objects.update(start=F('start') + delta, end=F('end') + delta)
exps = Experiment.objects.all()
new_starts = [e.start for e in exps]
new_ends = [e.end for e in exps]
new_durations = [e.duration() for e in exps]
self.assertEqual(expected_starts, new_starts)
self.assertEqual(expected_ends, new_ends)
self.assertEqual(expected_durations, new_durations)
def test_invalid_operator(self):
with self.assertRaises(DatabaseError):
list(Experiment.objects.filter(start=F('start') * datetime.timedelta(0)))
def test_durationfield_add(self):
zeros = [e.name for e in Experiment.objects.filter(start=F('start') + F('estimated_time'))]
self.assertEqual(zeros, ['e0'])
end_less = [e.name for e in Experiment.objects.filter(end__lt=F('start') + F('estimated_time'))]
self.assertEqual(end_less, ['e2'])
delta_math = [
e.name for e in
Experiment.objects.filter(end__gte=F('start') + F('estimated_time') + datetime.timedelta(hours=1))
]
self.assertEqual(delta_math, ['e4'])
queryset = Experiment.objects.annotate(shifted=ExpressionWrapper(
F('start') + Value(None, output_field=models.DurationField()),
output_field=models.DateTimeField(),
))
self.assertIsNone(queryset.first().shifted)
@skipUnlessDBFeature('supports_temporal_subtraction')
def test_date_subtraction(self):
queryset = Experiment.objects.annotate(
completion_duration=ExpressionWrapper(
F('completed') - F('assigned'), output_field=models.DurationField()
)
)
at_least_5_days = {e.name for e in queryset.filter(completion_duration__gte=datetime.timedelta(days=5))}
self.assertEqual(at_least_5_days, {'e3', 'e4', 'e5'})
at_least_120_days = {e.name for e in queryset.filter(completion_duration__gte=datetime.timedelta(days=120))}
self.assertEqual(at_least_120_days, {'e5'})
less_than_5_days = {e.name for e in queryset.filter(completion_duration__lt=datetime.timedelta(days=5))}
self.assertEqual(less_than_5_days, {'e0', 'e1', 'e2'})
queryset = Experiment.objects.annotate(difference=ExpressionWrapper(
F('completed') - Value(None, output_field=models.DateField()),
output_field=models.DurationField(),
))
self.assertIsNone(queryset.first().difference)
queryset = Experiment.objects.annotate(shifted=ExpressionWrapper(
F('completed') - Value(None, output_field=models.DurationField()),
output_field=models.DateField(),
))
self.assertIsNone(queryset.first().shifted)
@skipUnlessDBFeature('supports_temporal_subtraction')
def test_time_subtraction(self):
Time.objects.create(time=datetime.time(12, 30, 15, 2345))
queryset = Time.objects.annotate(
difference=ExpressionWrapper(
F('time') - Value(datetime.time(11, 15, 0), output_field=models.TimeField()),
output_field=models.DurationField(),
)
)
self.assertEqual(
queryset.get().difference,
datetime.timedelta(hours=1, minutes=15, seconds=15, microseconds=2345)
)
queryset = Time.objects.annotate(difference=ExpressionWrapper(
F('time') - Value(None, output_field=models.TimeField()),
output_field=models.DurationField(),
))
self.assertIsNone(queryset.first().difference)
queryset = Time.objects.annotate(shifted=ExpressionWrapper(
F('time') - Value(None, output_field=models.DurationField()),
output_field=models.TimeField(),
))
self.assertIsNone(queryset.first().shifted)
@skipUnlessDBFeature('supports_temporal_subtraction')
def test_datetime_subtraction(self):
under_estimate = [
e.name for e in Experiment.objects.filter(estimated_time__gt=F('end') - F('start'))
]
self.assertEqual(under_estimate, ['e2'])
over_estimate = [
e.name for e in Experiment.objects.filter(estimated_time__lt=F('end') - F('start'))
]
self.assertEqual(over_estimate, ['e4'])
queryset = Experiment.objects.annotate(difference=ExpressionWrapper(
F('start') - Value(None, output_field=models.DateTimeField()),
output_field=models.DurationField(),
))
self.assertIsNone(queryset.first().difference)
queryset = Experiment.objects.annotate(shifted=ExpressionWrapper(
F('start') - Value(None, output_field=models.DurationField()),
output_field=models.DateTimeField(),
))
self.assertIsNone(queryset.first().shifted)
@skipUnlessDBFeature('supports_temporal_subtraction')
def test_datetime_subtraction_microseconds(self):
delta = datetime.timedelta(microseconds=8999999999999999)
Experiment.objects.update(end=F('start') + delta)
qs = Experiment.objects.annotate(
delta=ExpressionWrapper(F('end') - F('start'), output_field=models.DurationField())
)
for e in qs:
self.assertEqual(e.delta, delta)
def test_duration_with_datetime(self):
# Exclude e1 which has very high precision so we can test this on all
# backends regardless of whether or not it supports
# microsecond_precision.
over_estimate = Experiment.objects.exclude(name='e1').filter(
completed__gt=self.stime + F('estimated_time'),
).order_by('name')
self.assertQuerysetEqual(over_estimate, ['e3', 'e4', 'e5'], lambda e: e.name)
def test_duration_with_datetime_microseconds(self):
delta = datetime.timedelta(microseconds=8999999999999999)
qs = Experiment.objects.annotate(dt=ExpressionWrapper(
F('start') + delta,
output_field=models.DateTimeField(),
))
for e in qs:
self.assertEqual(e.dt, e.start + delta)
def test_date_minus_duration(self):
more_than_4_days = Experiment.objects.filter(
assigned__lt=F('completed') - Value(datetime.timedelta(days=4), output_field=models.DurationField())
)
self.assertQuerysetEqual(more_than_4_days, ['e3', 'e4', 'e5'], lambda e: e.name)
def test_negative_timedelta_update(self):
# subtract 30 seconds, 30 minutes, 2 hours and 2 days
experiments = Experiment.objects.filter(name='e0').annotate(
start_sub_seconds=F('start') + datetime.timedelta(seconds=-30),
).annotate(
start_sub_minutes=F('start_sub_seconds') + datetime.timedelta(minutes=-30),
).annotate(
start_sub_hours=F('start_sub_minutes') + datetime.timedelta(hours=-2),
).annotate(
new_start=F('start_sub_hours') + datetime.timedelta(days=-2),
)
expected_start = datetime.datetime(2010, 6, 23, 9, 45, 0)
# subtract 30 microseconds
experiments = experiments.annotate(new_start=F('new_start') + datetime.timedelta(microseconds=-30))
expected_start += datetime.timedelta(microseconds=+746970)
experiments.update(start=F('new_start'))
e0 = Experiment.objects.get(name='e0')
self.assertEqual(e0.start, expected_start)
class ValueTests(TestCase):
def test_update_TimeField_using_Value(self):
Time.objects.create()
Time.objects.update(time=Value(datetime.time(1), output_field=TimeField()))
self.assertEqual(Time.objects.get().time, datetime.time(1))
def test_update_UUIDField_using_Value(self):
UUID.objects.create()
UUID.objects.update(uuid=Value(uuid.UUID('12345678901234567890123456789012'), output_field=UUIDField()))
self.assertEqual(UUID.objects.get().uuid, uuid.UUID('12345678901234567890123456789012'))
def test_deconstruct(self):
value = Value('name')
path, args, kwargs = value.deconstruct()
self.assertEqual(path, 'django.db.models.expressions.Value')
self.assertEqual(args, (value.value,))
self.assertEqual(kwargs, {})
def test_deconstruct_output_field(self):
value = Value('name', output_field=CharField())
path, args, kwargs = value.deconstruct()
self.assertEqual(path, 'django.db.models.expressions.Value')
self.assertEqual(args, (value.value,))
self.assertEqual(len(kwargs), 1)
self.assertEqual(kwargs['output_field'].deconstruct(), CharField().deconstruct())
def test_equal(self):
value = Value('name')
self.assertEqual(value, Value('name'))
self.assertNotEqual(value, Value('username'))
def test_hash(self):
d = {Value('name'): 'Bob'}
self.assertIn(Value('name'), d)
self.assertEqual(d[Value('name')], 'Bob')
def test_equal_output_field(self):
value = Value('name', output_field=CharField())
same_value = Value('name', output_field=CharField())
other_value = Value('name', output_field=TimeField())
no_output_field = Value('name')
self.assertEqual(value, same_value)
self.assertNotEqual(value, other_value)
self.assertNotEqual(value, no_output_field)
def test_raise_empty_expressionlist(self):
msg = 'ExpressionList requires at least one expression'
with self.assertRaisesMessage(ValueError, msg):
ExpressionList()
class FieldTransformTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.sday = sday = datetime.date(2010, 6, 25)
cls.stime = stime = datetime.datetime(2010, 6, 25, 12, 15, 30, 747000)
cls.ex1 = Experiment.objects.create(
name='Experiment 1',
assigned=sday,
completed=sday + datetime.timedelta(2),
estimated_time=datetime.timedelta(2),
start=stime,
end=stime + datetime.timedelta(2),
)
def test_month_aggregation(self):
self.assertEqual(
Experiment.objects.aggregate(month_count=Count('assigned__month')),
{'month_count': 1}
)
def test_transform_in_values(self):
self.assertQuerysetEqual(
Experiment.objects.values('assigned__month'),
["{'assigned__month': 6}"]
)
def test_multiple_transforms_in_values(self):
self.assertQuerysetEqual(
Experiment.objects.values('end__date__month'),
["{'end__date__month': 6}"]
)
class ReprTests(SimpleTestCase):
def test_expressions(self):
self.assertEqual(
repr(Case(When(a=1))),
"<Case: CASE WHEN <Q: (AND: ('a', 1))> THEN Value(None), ELSE Value(None)>"
)
self.assertEqual(
repr(When(Q(age__gte=18), then=Value('legal'))),
"<When: WHEN <Q: (AND: ('age__gte', 18))> THEN Value(legal)>"
)
self.assertEqual(repr(Col('alias', 'field')), "Col(alias, field)")
self.assertEqual(repr(F('published')), "F(published)")
self.assertEqual(repr(F('cost') + F('tax')), "<CombinedExpression: F(cost) + F(tax)>")
self.assertEqual(
repr(ExpressionWrapper(F('cost') + F('tax'), models.IntegerField())),
"ExpressionWrapper(F(cost) + F(tax))"
)
self.assertEqual(repr(Func('published', function='TO_CHAR')), "Func(F(published), function=TO_CHAR)")
self.assertEqual(repr(OrderBy(Value(1))), 'OrderBy(Value(1), descending=False)')
self.assertEqual(repr(Random()), "Random()")
self.assertEqual(repr(RawSQL('table.col', [])), "RawSQL(table.col, [])")
self.assertEqual(repr(Ref('sum_cost', Sum('cost'))), "Ref(sum_cost, Sum(F(cost)))")
self.assertEqual(repr(Value(1)), "Value(1)")
self.assertEqual(
repr(ExpressionList(F('col'), F('anothercol'))),
'ExpressionList(F(col), F(anothercol))'
)
self.assertEqual(
repr(ExpressionList(OrderBy(F('col'), descending=False))),
'ExpressionList(OrderBy(F(col), descending=False))'
)
def test_functions(self):
self.assertEqual(repr(Coalesce('a', 'b')), "Coalesce(F(a), F(b))")
self.assertEqual(repr(Concat('a', 'b')), "Concat(ConcatPair(F(a), F(b)))")
self.assertEqual(repr(Length('a')), "Length(F(a))")
self.assertEqual(repr(Lower('a')), "Lower(F(a))")
self.assertEqual(repr(Substr('a', 1, 3)), "Substr(F(a), Value(1), Value(3))")
self.assertEqual(repr(Upper('a')), "Upper(F(a))")
def test_aggregates(self):
self.assertEqual(repr(Avg('a')), "Avg(F(a))")
self.assertEqual(repr(Count('a')), "Count(F(a))")
self.assertEqual(repr(Count('*')), "Count('*')")
self.assertEqual(repr(Max('a')), "Max(F(a))")
self.assertEqual(repr(Min('a')), "Min(F(a))")
self.assertEqual(repr(StdDev('a')), "StdDev(F(a), sample=False)")
self.assertEqual(repr(Sum('a')), "Sum(F(a))")
self.assertEqual(repr(Variance('a', sample=True)), "Variance(F(a), sample=True)")
def test_distinct_aggregates(self):
self.assertEqual(repr(Count('a', distinct=True)), "Count(F(a), distinct=True)")
self.assertEqual(repr(Count('*', distinct=True)), "Count('*', distinct=True)")
def test_filtered_aggregates(self):
filter = Q(a=1)
self.assertEqual(repr(Avg('a', filter=filter)), "Avg(F(a), filter=(AND: ('a', 1)))")
self.assertEqual(repr(Count('a', filter=filter)), "Count(F(a), filter=(AND: ('a', 1)))")
self.assertEqual(repr(Max('a', filter=filter)), "Max(F(a), filter=(AND: ('a', 1)))")
self.assertEqual(repr(Min('a', filter=filter)), "Min(F(a), filter=(AND: ('a', 1)))")
self.assertEqual(repr(StdDev('a', filter=filter)), "StdDev(F(a), filter=(AND: ('a', 1)), sample=False)")
self.assertEqual(repr(Sum('a', filter=filter)), "Sum(F(a), filter=(AND: ('a', 1)))")
self.assertEqual(
repr(Variance('a', sample=True, filter=filter)),
"Variance(F(a), filter=(AND: ('a', 1)), sample=True)"
)
self.assertEqual(
repr(Count('a', filter=filter, distinct=True)), "Count(F(a), distinct=True, filter=(AND: ('a', 1)))"
)
class CombinableTests(SimpleTestCase):
bitwise_msg = 'Use .bitand() and .bitor() for bitwise logical operations.'
def test_negation(self):
c = Combinable()
self.assertEqual(-c, c * -1)
def test_and(self):
with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg):
Combinable() & Combinable()
def test_or(self):
with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg):
Combinable() | Combinable()
def test_reversed_and(self):
with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg):
object() & Combinable()
def test_reversed_or(self):
with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg):
object() | Combinable()
|
|
"""Structure container types.
A pstruct.type is used to create a data structure that is keyed by field names.
There are a few basic methods that are provided for a user to derive information
from an instantiated type. A pstruct.type's interface inherits from
ptype.container and will always have a .value that's a list. In most cases, a
pstruct.type can be treated as a python dict.
The pstruct interface provides the following methods on top of the methods
required to provide a mapping-type interface.
class interface(pstruct.type):
# the fields describing the format of the structure
_fields_ = [
(sub-type, 'name'),
...
]
def alias(self, name, target)
'''Alias the key ``name`` to ``target``.'''
def unalias(self, name):
'''Remove the alias ``name``.'''
def append(self, object):
'''Append ``object`` to structure keyed by /object.shortname()/'''
Example usage:
# define a type
from ptypes import pstruct
class type(pstruct.type):
_fields_ = [(subtype1, 'name1'),(subtype2, 'name2']
# instantiate and load a type
instance = type()
instance.load()
# fetch a particular sub-element
print(instance['name1'])
# assign a sub-element
instance['name2'] = new-instance
# create an alias
instance.alias('alternative-name', 'name1')
# remove an alias
instance.unalias('alternative-name')
"""
import functools, operator, itertools
from . import ptype, utils, pbinary, error
__all__ = ['type', 'make']
from . import config
Config = config.defaults
Log = Config.log.getChild('pstruct')
# Setup some version-agnostic types and utilities that we can perform checks with
__izip_longest__ = utils.izip_longest
string_types = utils.string_types
class __structure_interface__(ptype.container):
def __init__(self, *args, **kwds):
super(__structure_interface__, self).__init__(*args, **kwds)
self.__fastindex__ = {}
def alias(self, target, *aliases):
'''Add any of the specified aliases to point to the target field.'''
res = self.__getindex__(target)
for item in aliases:
self.__fastindex__[item.lower()] = res
return res
def unalias(self, *aliases):
'''Remove the specified aliases from the structure.'''
lowerfields = {name.lower() for _, name in self._fields_ or []}
loweritems = {item.lower() for item in aliases}
if lowerfields & loweritems:
message = "Unable to remove the specified fields ({:s}) from the available aliases.".format(', '.join(item for item in lowerfields & loweritems))
raise error.UserError(self, '__structure_interface__.unalias', message)
indices = [self.__fastindex__.pop(item) for item in loweritems if item in self.__fastindex__]
return len(indices)
def append(self, object):
'''L.append(object) -- append an element to a pstruct.type and return its offset.'''
return self.__append__(object)
def __append__(self, object):
current, name = len(self.value), object.shortname()
offset = super(__structure_interface__, self).__append__(object)
self.value[current].setoffset(offset, recurse=True)
self.__fastindex__[name.lower()] = current
return offset
def __getindex__(self, name):
'''x.__getitem__(y) <==> x[y]'''
if not isinstance(name, string_types):
raise error.UserError(self, '__structure_interface__.__getindex__', message='Element names must be of a str type.')
try:
index = self.__fastindex__[name.lower()]
if 0 <= index < len(self.value):
return index
except KeyError:
pass
for index, (_, fld) in enumerate(self._fields_ or []):
if fld.lower() == name.lower():
return self.__fastindex__.setdefault(name.lower(), index)
continue
raise KeyError(name)
## informational methods
def __properties__(self):
result = super(__structure_interface__, self).__properties__()
if self.initializedQ():
if len(self.value) < len(self._fields_ or []):
result['abated'] = True
elif len(self.value) > len(self._fields_ or []):
result['inflated'] = True
return result
return result
## list methods
def keys(self):
'''D.keys() -> list of all of the names of D's fields'''
return [ name for name in self.__keys__() ]
def values(self):
'''D.keys() -> list of all of the values of D's fields'''
return [res for res in self.__values__()]
def items(self):
'''D.items() -> list of D's (name, value) fields, as 2-tuples'''
return [(name, item) for name, item in self.__items__()]
## iterator methods
def iterkeys(self):
'''D.iterkeys() -> an iterator over the names of D's fields'''
for name in self.__keys__():
yield name
return
def itervalues(self):
'''D.itervalues() -> an iterator over the values of D's fields'''
for res in self.__values__():
yield res
return
def iteritems(self):
'''D.iteritems() -> an iterator over the (name, value) fields of D'''
for name, item in self.__items__():
yield name, item
return
## internal dict methods
def __keys__(self):
for _, name in self._fields_ or []:
yield name
return
def __values__(self):
for item in self.value:
yield item
return
def __items__(self):
for (_, name), item in zip(self._fields_ or [], self.value):
yield name, item
return
## method overloads
def __contains__(self, name):
'''D.__contains__(k) -> True if D has a field named k, else False'''
if not isinstance(name, string_types):
raise error.UserError(self, '__structure_interface__.__contains__', message='Element names must be of a str type.')
return name.lower() in self.__fastindex__
def __iter__(self):
'''D.__iter__() <==> iter(D)'''
if self.value is None:
raise error.InitializationError(self, '__structure_interface__.__iter__')
for name in self.iterkeys():
yield name
return
def __getitem__(self, name):
'''x.__getitem__(y) <==> x[y]'''
if not isinstance(name, string_types):
raise error.UserError(self, '__structure_interface__.__contains__', message='Element names must be of a str type.')
return super(__structure_interface__, self).__getitem__(name)
def __setitem__(self, name, value):
'''x.__setitem__(i, y) <==> x[i]=y'''
index = self.__getindex__(name)
result = super(__structure_interface__, self).__setitem__(index, value)
result.__name__ = name
return result
def __getstate__(self):
return super(__structure_interface__, self).__getstate__(), self.__fastindex__,
def __setstate__(self, state):
state, self.__fastindex__, = state
super(__structure_interface__, self).__setstate__(state)
class type(__structure_interface__):
'''
A container for managing structured/named data
Settable properties:
_fields_:array( tuple( ptype, name ), ... )<w>
This contains which elements the structure is composed of
'''
_fields_ = None # list of (type, name) tuples
ignored = ptype.container.__slots__['ignored'] | {'_fields_'}
def initializedQ(self):
if utils.callable_eq(self.blocksize, ptype.container.blocksize):
return super(type, self).initializedQ()
res = self.value is not None
try:
res = res and self.size() >= self.blocksize()
except Exception as E:
path = str().join(map("<{:s}>".format, self.backtrace()))
Log.warning("type.initializedQ : {:s} : .blocksize() raised an exception when attempting to determine the initialization state of the instance : {!s} : {:s}".format(self.instance(), E, path), exc_info=True)
finally:
return res
def copy(self, **attrs):
result = super(type, self).copy(**attrs)
result._fields_ = self._fields_[:]
return result
def alloc(self, **fields):
"""Allocate the current instance. Attach any elements defined in **fields to container."""
result = super(type, self).alloc()
if fields:
# we need to iterate through all of the fields first
# in order to consolidate any aliases that were specified.
# this is a hack, and really we should first be sorting our
# fields that were provided by the fields in the structure.
names = [name for _, name in self._fields_ or []]
fields = {names[self.__getindex__(name)] : item for name, item in fields.items()}
# now we can iterate through our structure fields to allocate
# them using the fields given to us by the caller.
offset = result.getoffset()
for idx, (t, name) in enumerate(self._fields_ or []):
if name not in fields:
if ptype.isresolveable(t):
result.value[idx] = self.new(t, __name__=name, offset=offset).a
offset += result.value[idx].blocksize()
continue
item = fields[name]
if ptype.isresolveable(item) or ptype.istype(item):
result.value[idx] = self.new(item, __name__=name, offset=offset).a
elif isinstance(item, ptype.generic):
result.value[idx] = self.new(item, __name__=name, offset=offset)
elif isinstance(item, dict):
result.value[idx].alloc(**item)
else:
result.value[idx].set(item)
offset += result.value[idx].blocksize()
self.setoffset(self.getoffset(), recurse=True)
return result
def __append_type(self, offset, cons, name, **attrs):
lowername = name.lower()
if lowername in self.__fastindex__ and self.__fastindex__[lowername] < len(self.value):
_, name = name, u"{:s}_{:x}".format(name, (offset - self.getoffset()) if Config.pstruct.use_offset_on_duplicate else len(self.value))
path = str().join(map("<{:s}>".format, self.backtrace()))
Log.warning("type.load : {:s} : Duplicate element name {!r}. Using generated name {!r} : {:s}".format(self.instance(), _, name, path))
res = self.new(cons, __name__=name, offset=offset, **attrs)
current = len(self.value)
self.value.append(res)
self.__fastindex__[lowername] = current
if ptype.iscontainer(cons) or ptype.isresolveable(cons):
return res.load()
return res
def load(self, **attrs):
with utils.assign(self, **attrs):
self.value = []
# check if the user implement a custom blocksize so we can keep track
# of how far to populate our structure or if we don't even need to do
# anything
# XXX: it might be safer to call .blocksize() and check for InitializationError
current = None if utils.callable_eq(self.blocksize, type.blocksize) else 0
if current is not None and self.blocksize() <= 0:
offset = self.getoffset()
# Populate the structure with undefined fields so that things are still
# somewhat initialized...
for i, (t, name) in enumerate(self._fields_ or []):
self.__append_type(offset, ptype.undefined, name)
return super(type, self).load()
try:
offset = self.getoffset()
for i, (t, name) in enumerate(self._fields_ or []):
# create each element
item = self.__append_type(offset, t, name)
# check if we've hit our blocksize
bs = item.blocksize()
if current is not None:
try:
res = self.blocksize()
except Exception as E:
path = str().join(map("<{:s}>".format, self.backtrace()))
Log.debug("type.load : {:s} : Custom blocksize raised an exception at offset {:#x}, field {!r} : {:s}".format(self.instance(), current, item.instance(), path), exc_info=True)
else:
if current + bs > res:
path = str().join(map("<{:s}>".format, self.backtrace()))
Log.info("type.load : {:s} : Custom blocksize caused structure to terminate at offset {:#x}, field {!r} : {:s}".format(self.instance(), current, item.instance(), path))
break
current += bs
offset += bs
except error.LoadError as E:
raise error.LoadError(self, exception=E)
# add any missing elements with a 0 blocksize
count = len(self._fields_ or []) - len(self.value)
if count > 0:
for i, (t, name) in enumerate(self._fields_[-count:]):
item = self.__append_type(offset, t, name, blocksize=lambda: 0)
offset += item.blocksize()
# complete the second pass
result = super(type, self).load()
return result
def repr(self, **options):
return self.details(**options) + '\n'
def details(self, **options):
gettypename = lambda t: t.typename() if ptype.istype(t) else t.__name__
if self.value is None:
f = functools.partial(u"[{:x}] {:s} {:s} ???".format, self.getoffset())
res = (f(utils.repr_class(gettypename(t)), name) for t, name in self._fields_ or [])
return '\n'.join(res)
result, offset = [], self.getoffset()
fmt = functools.partial(u"[{:x}] {:s} {:s} {:s}".format, offset)
for fld, item in __izip_longest__(self._fields_ or [], self.value):
t, name = fld or (item.__class__, item.name())
if item is None:
i = utils.repr_class(gettypename(t))
item = ptype.undefined().a
result.append(fmt(i, name, item.summary(**options)))
continue
offset = self.getoffset(getattr(item, '__name__', None) or name)
instance = utils.repr_instance(item.classname(), item.name() or name)
value = u'???' if item.value is None else item.summary(**options)
properties = ','.join(u"{:s}={!r}".format(k, v) for k, v in item.properties().items())
result.append(u"[{:x}] {:s}{:s} {:s}".format(offset, instance, u" {{{:s}}}".format(properties) if properties else u"", value))
offset += item.size()
if len(result) > 0:
return '\n'.join(result)
return u"[{:x}] Empty[]".format(self.getoffset())
def __setvalue__(self, *values, **fields):
result = self
if result.initializedQ():
value, = values or ((),)
if isinstance(value, dict):
value = fields.update(value)
if value:
if len(result._fields_) != len(value):
raise error.UserError(result, 'type.set', message='Refusing to assign iterable to instance due to differing lengths')
result = super(type, result).__setvalue__(*value)
for name, item in fields.items():
idx = self.__getindex__(name)
if ptype.isresolveable(item) or ptype.istype(item):
result.value[idx] = self.new(item, __name__=name).a
elif isinstance(item, ptype.generic):
result.value[idx] = self.new(item, __name__=name)
elif isinstance(item, dict):
result.value[idx].set(**item)
else:
result.value[idx].set(item)
continue
result.setoffset(result.getoffset(), recurse=True)
return result
return result.a.__setvalue__(*values, **fields)
def __getstate__(self):
return super(type, self).__getstate__(), self._fields_,
def __setstate__(self, state):
state, self._fields_, = state
super(type, self).__setstate__(state)
def make(fields, **attrs):
"""Given a set of initialized ptype objects, return a pstruct object describing it.
This will automatically create padding in the structure for any holes that were found.
"""
fields = [item for item in fields]
items = sorted(fields, key=operator.methodcaller('getoffset'))
grouped = [(offset, [item for item in items]) for offset, items in itertools.groupby(items, key=operator.methodcaller('getoffset'))]
baseoffset = next(position for position, _ in grouped)
# FIXME: we need to build a segment tree of all of our items that are grouped
# so that we can figure out what elements are overlapped and how we
# should group them into a union.
if attrs.get('offset', baseoffset) > baseoffset:
raise ValueError("{:s}.make : Unable to specify a base offset ({:#x}) after the offset of any existing fields ({:#x} > {:#x}).".format(__name__, attrs.get('offset', baseoffset), attrs.get('offset', baseoffset), grouped[0][0]))
# define a closure that will take the provided fields and make them into
# a dynamic.union type that we can add into a structure.
from . import dynamic
def make_union(name, items):
definition = []
for index, instance in enumerate(items):
definition.append((instance.__class__, "anonymous_{:d}".format(1 + index) if getattr(instance, '__name__', None) is None else instance.shortname()))
# create the union that we plan on returning
class union_t(dynamic.union):
pass
union_t.__name__ = name
union_t._fields_ = definition
return union_t
# iterate through all of the fields that we've grouped, and pad them
# into a structure type that we can return.
result, offset = [], attrs.setdefault('offset', baseoffset)
for expected, items in grouped:
if len(items) > 1:
object_t = make_union("__union_{:x}".format(expected), items)
location, name, size = expected, "field_{:x}".format(expected), object_t().a.blocksize()
elif len(items) > 0:
object = items[0]
location, size = object.getoffset(), object.blocksize()
object_t, name = object.__class__, "field_{:x}".format(expected) if getattr(object, '__name__', None) is None else object.shortname()
else:
Log.warning("{:s}.make : An unexpected number of items ({:d}) were found for the specified offset ({:+#x}).".format(__name__, len(items), expected))
continue
delta = location - offset
if delta > 0:
result.append((ptype.clone(ptype.block, length=delta), u'__padding_{:x}'.format(offset)))
offset += delta
if size > 0:
result.append((object_t, name))
offset += size
continue
# we need to hack up the offset and place it into the position attribute
# so that the user's choice will work appear in the constructed type.
attrs['__position__'] = attrs.pop('offset'),
return ptype.clone(type, _fields_=result, **attrs)
if __name__ == '__main__':
class Result(Exception): pass
class Success(Result): pass
class Failure(Result): pass
TestCaseList = []
def TestCase(fn):
def harness(**kwds):
name = fn.__name__
try:
res = fn(**kwds)
raise Failure
except Success as E:
print('%s: %r'% (name, E))
return True
except Failure as E:
print('%s: %r'% (name, E))
except Exception as E:
print('%s: %r : %r'% (name, Failure(), E))
return False
TestCaseList.append(harness)
return fn
if __name__ == '__main__':
import ptypes
from ptypes import ptype, pstruct, provider, pint
class uint8(ptype.type):
length = 1
class uint16(ptype.type):
length = 2
class uint32(ptype.type):
length = 4
@TestCase
def test_structure_serialize():
class st(pstruct.type):
_fields_ = [
(uint8, 'a'),
(uint8, 'b'),
(uint8, 'c'),
]
source = provider.bytes(b'ABCDEFG')
x = st(source=source)
x = x.l
if x.serialize() == b'ABC':
raise Success
@TestCase
def test_structure_fetch():
class st(pstruct.type):
_fields_ = [
(uint8, 'a'),
(uint16, 'b'),
(uint8, 'c'),
]
source = provider.bytes(b'ABCDEFG')
x = st(source=source)
x = x.l
if x['b'].serialize() == b'BC':
raise Success
@TestCase
def test_structure_assign_same():
class st(pstruct.type):
_fields_ = [
(uint8, 'a'),
(uint32, 'b'),
(uint8, 'c'),
]
source = provider.bytes(b'ABCDEFG')
v = uint32().set(b'XXXX')
x = st(source=source)
x = x.l
x['b'] = v
if x.serialize() == b'AXXXXF':
raise Success
@TestCase
def test_structure_assign_diff():
class st(pstruct.type):
_fields_ = [
(uint8, 'a'),
(uint32, 'b'),
(uint8, 'c'),
]
source = provider.bytes(b'ABCDEFG')
v = uint16().set(b'XX')
x = st(source=source)
x = x.l
x['b'] = v
x.setoffset(x.getoffset(),recurse=True)
if x.serialize() == b'AXXF' and x['c'].getoffset() == 3:
raise Success
@TestCase
def test_structure_assign_partial():
class st(pstruct.type):
_fields_ = [
(uint32, 'a'),
(uint32, 'b'),
(uint32, 'c'),
]
source = provider.bytes(b'AAAABBBBCCC')
x = st(source=source)
try:
x = x.l
raise Failure
except ptypes.error.LoadError:
pass
if x.v is not None and not x.initializedQ() and x['b'].serialize() == b'BBBB' and x['c'].size() == 3:
raise Success
@TestCase
def test_structure_set_uninitialized_flat():
class st(pstruct.type):
_fields_ = [
(pint.uint32_t, 'a'),
(pint.uint32_t, 'b'),
(pint.uint32_t, 'c'),
]
a = st(source=provider.empty())
a.set(a=5, b=10, c=20)
if a.serialize() == b'\x05\x00\x00\x00\x0a\x00\x00\x00\x14\x00\x00\x00':
raise Success
@TestCase
def test_structure_set_uninitialized_complex():
class sa(pstruct.type):
_fields_ = [(pint.uint16_t, 'b')]
class st(pstruct.type):
_fields_ = [(pint.uint32_t, 'a'), (sa, 'b')]
a = st(source=provider.empty())
a.set((5, (10,)))
if a['b']['b'].int() == 10:
raise Success
@TestCase
def test_structure_alloc_value():
class st(pstruct.type):
_fields_ = [(pint.uint16_t,'a'),(pint.uint32_t,'b')]
a = st().alloc(a=0xdead,b=0x0d0e0a0d)
if a['a'].int() == 0xdead and a['b'].int() == 0x0d0e0a0d:
raise Success
@TestCase
def test_structure_alloc_instance():
class st(pstruct.type):
_fields_ = [(pint.uint16_t,'a'),(pint.uint32_t,'b')]
a = st().alloc(a=pint.uint32_t().set(0x0d0e0a0d),b=0x0d0e0a0d)
if a['a'].int() == 0x0d0e0a0d and a['b'].int() == 0x0d0e0a0d:
raise Success
@TestCase
def test_structure_alloc_dynamic_value():
class st(pstruct.type):
def __b(self):
return ptype.clone(pint.int_t, length=self['a'].li.int())
_fields_ = [
(pint.int8_t, 'a'),
(__b, 'b'),
]
a = st().alloc(a=3)
if a['b'].size() == a['a'].int():
raise Success
@TestCase
def test_structure_alloc_dynamic_instance():
class st(pstruct.type):
def __b(self):
return ptype.clone(pint.int_t, length=self['a'].li.int())
_fields_ = [
(pint.int_t, 'a'),
(__b, 'b'),
]
a = st().alloc(a=pint.int32_t().set(4))
if a['b'].size() == a['a'].int():
raise Success
@TestCase
def test_structure_alloc_container_dynamic_instance():
class st1(pstruct.type): _fields_=[(pint.int8_t,'a'),(lambda s: ptype.clone(pint.int_t,length=s['a'].li.int()), 'b')]
class st2(pstruct.type):
def __b(self):
if self['a'].li.int() == 2:
return st1
return ptype.undefined
_fields_ = [
(pint.int8_t, 'a'),
(__b, 'b'),
]
a = st2().alloc(b=st1().alloc(a=2))
if a['b']['a'].int() == a['b']['b'].size():
raise Success
@TestCase
def test_structure_set_initialized_value():
class st(pstruct.type):
_fields_ = [
(pint.int32_t, 'a'),
]
a = st().a.set(a=20)
if a['a'].int() == 20:
raise Success
@TestCase
def test_structure_set_initialized_type():
class st(pstruct.type):
_fields_ = [
(pint.int_t, 'a'),
]
a = st().a.set(a=pint.uint32_t)
if a['a'].size() == 4:
raise Success
@TestCase
def test_structure_set_initialized_instance():
class st(pstruct.type):
_fields_ = [
(pint.int_t, 'a'),
]
a = st().a.set(a=pint.uint32_t().set(20))
if a['a'].size() == 4 and a['a'].int() == 20:
raise Success
@TestCase
def test_structure_set_initialized_container():
class st1(pstruct.type): _fields_=[(pint.int8_t,'a'),(pint.uint32_t,'b')]
class st2(pstruct.type):
_fields_ = [
(pint.int32_t, 'a'),
(ptype.undefined, 'b'),
]
a = st2().a.set(b=st1)
if isinstance(a['b'],st1):
raise Success
@TestCase
def test_structure_set_uninitialized_value():
class st2(pstruct.type):
_fields_ = [
(pint.int32_t, 'a'),
(ptype.undefined, 'b'),
]
a = st2().set(a=5)
if a['a'].int() == 5:
raise Success
@TestCase
def test_structure_alloc_field_blocksize():
class t(ptype.block):
def blocksize(self):
res = self.getoffset()
return 0 if res == 0 else 1
class st(pstruct.type):
_fields_ = [
(pint.int8_t, 'a'),
(t, 'b'),
]
a = st().alloc(a=3)
if a.size() == 2:
raise Success
@TestCase
def test_structure_alloc_dynamic_field_blocksize():
class t(ptype.block):
def blocksize(self):
res = self.getoffset()
return 0 if res == 0 else 1
class st(pstruct.type):
_fields_ = [
(pint.int8_t, 'a'),
(lambda _: t, 'b'),
]
a = st().alloc(a=3)
if a.size() == 2:
raise Success
@TestCase
def test_make_structure_padding():
items = []
items.append(pint.uint16_t(offset=0x10))
items.append(pint.uint8_t(offset=0))
t = pstruct.make(items)
instance = t().a
if len(instance.value) == 3 and instance.value[1].size() == 0xf:
raise Success
@TestCase
def test_make_structure_offset_0():
items = []
items.append(pint.uint16_t(offset=0x1e))
items.append(pint.uint8_t(offset=0x10))
t = pstruct.make(items, offset=0)
instance = t().a
if len(instance.value) == 4 and instance.getoffset() == 0 and instance.value[0].size() == 0x10:
raise Success
@TestCase
def test_make_structure_offset_1():
items = []
items.append(pint.uint8_t(offset=0x10))
items.append(pint.uint16_t(offset=0x1e))
t = pstruct.make(items, offset=0x8)
instance = t().a
if len(instance.value) == 4 and instance.getoffset() == 8 and instance.value[0].size() == 0x8:
raise Success
@TestCase
def test_make_structure_offset_2():
items = []
items.append(pint.uint8_t(offset=0x10))
items.append(pint.uint16_t(offset=0x1e))
t = pstruct.make(items, offset=0x10)
instance = t().a
if len(instance.value) == 3 and instance.getoffset() == 0x10 and instance.value[0].size() == 0x1:
raise Success
@TestCase
def test_make_structure_union():
items = []
items.append(pint.uint16_t(offset=0x10))
items.append(pint.uint32_t(offset=4))
items.append(pint.uint16_t(offset=4))
items.append(pint.uint8_t(offset=0))
t = pstruct.make(items)
instance = t().a
union, expected = instance.field(4), [4, 2]
if len(instance.value) == 5 and union.getoffset() == 4 and union.blocksize() == union.size() == 4 and all(union[fld].size() == size for fld, size in zip(union.keys(), expected)):
raise Success
@TestCase
def test_structure_alias_0():
class t(pstruct.type):
_fields_ = [
(pint.uint32_t, 'a'),
(pint.uint32_t, 'b'),
(pint.uint32_t, 'c'),
]
x = t().alloc(a=1, b=2, c=3)
x.alias('a', 'myfield')
if x['myfield'].int() == 1:
raise Success
@TestCase
def test_structure_alias_1():
class t(pstruct.type):
_fields_ = [
(pint.uint32_t, 'a'),
(pint.uint32_t, 'b'),
(pint.uint32_t, 'c'),
]
x = t().alloc(a=1, b=2, c=3)
x.alias('a', 'myfield')
x.set(myfield=5)
if x['myfield'].int() == 5:
raise Success
@TestCase
def test_structure_alias_2():
class t(pstruct.type):
_fields_ = [
(pint.uint32_t, 'a'),
(pint.uint32_t, 'b'),
(pint.uint32_t, 'c'),
]
def __init__(self, **attrs):
super(t, self).__init__(**attrs)
self.alias('b', 'myfield')
x = t().alloc(a=1, c=3, myfield=20)
if x['myfield'].int() == 20:
raise Success
@TestCase
def test_structure_unalias_0():
class t(pstruct.type):
_fields_ = [
(pint.uint32_t, 'a'),
(pint.uint32_t, 'b'),
(pint.uint32_t, 'c'),
]
x = t().alloc(a=1, b=2, c=3)
try:
x.unalias('a')
except Exception:
raise Success
@TestCase
def test_structure_unalias_1():
class t(pstruct.type):
_fields_ = [
(pint.uint32_t, 'a'),
(pint.uint32_t, 'b'),
(pint.uint32_t, 'c'),
]
x = t().alloc(a=1, b=2, c=3)
if not x.unalias('item'):
raise Success
@TestCase
def test_structure_unalias_2():
class t(pstruct.type):
_fields_ = [
(pint.uint32_t, 'a'),
(pint.uint32_t, 'b'),
(pint.uint32_t, 'c'),
]
x = t().alloc(a=1, b=2, c=3)
x.alias('a', 'fuck1', 'fuck2')
if x.unalias('fuck1', 'fuck2') == 2:
raise Success
if __name__ == '__main__':
import logging
ptypes.config.defaults.log.setLevel(logging.DEBUG)
results = []
for t in TestCaseList:
results.append( t() )
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module handles the reloading of service configuration files when they are changed.
in :mod:`config_files`, when a configuration file is changed, its base name is looked up
in this module's :data:`FILES_THAT_REQUIRE_RELOADS` map, and - if found - the function to
which it is mapped will be added to the :data:`NEEDED_RELOADS` set. Thus, to implement this
for a new service, all that need be done is to write a function to perform configuration
reloads, then add the names of configuration files to the map, pointing at the new function.
"""
import logging
import os
import subprocess
import typing
from functools import partial
import psutil
from .configuration import Configuration
from .utils import getYesNoResponse as getYN
#: Holds the list of reloads needed due to configuration file changes
NEEDED_RELOADS = set()
#: True if the host system has systemd D-Bus - actual value set at runtime
HAS_SYSTEMD = False
try:
output = subprocess.check_output(["systemctl", "--no-pager"], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
logging.debug("Host system does NOT have systemd - stack trace:", exc_info=True,stack_info=True)
else:
HAS_SYSTEMD = True
def reloadATSConfigs(conf:Configuration) -> bool:
"""
This function will reload configuration files for the Apache Trafficserver caching HTTP
proxy. It does this by calling ``traffic_ctl config reload`
:param conf: An object representing the configuration of :program:`traffic_ops_ort`
:returns: whether or not the reload succeeded (as indicated by the exit code of
``traffic_ctl``)
:raises OSError: when something goes wrong executing the child process
"""
# First of all, ATS must be running for this to work
if not setATSStatus(True, conf):
logging.error("Cannot reload configs, ATS not running!")
return False
cmd = [os.path.join(conf.tsroot, "bin", "traffic_ctl"), "config", "reload"]
cmdStr = ' '.join(cmd)
if ( conf.mode is Configuration.Modes.INTERACTIVE and
not getYN("Run command '%s' to reload configuration?" % cmdStr, default='Y')):
logging.warning("Configuration will not be reloaded for Apache Trafficserver!")
logging.warning("Changes will NOT be applied!")
return True
logging.info("Apache Trafficserver configuration reload will be done via: %s", cmdStr)
if conf.mode is Configuration.Modes.REPORT:
return True
sub = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = sub.communicate()
if sub.returncode:
logging.debug("========== PROCESS STDOUT ==========")
logging.debug("%s", out.decode())
logging.debug("========== PROCESS STDERR ==========")
logging.debug("%s", err.decode())
logging.debug("====================================")
return False
return True
def restartATS(conf:Configuration) -> bool:
"""
A convenience function for calling :func:`setATSStatus` for restarts.
:param conf: An object representing the configuration of :program:`traffic_ops_ort`
:returns: whether or not the restart was successful (or unnecessary)
"""
doRestart = ( conf.mode is Configuration.Modes.BADASS or
conf.mode is Configuration.Modes.REPORT or
( conf.mode is Configuration.Modes.INTERACTIVE and
getYN("Restart ATS?", default='Y')))
return setATSStatus(True, conf, restart=doRestart)
def restartService(service:str, conf:Configuration) -> bool:
"""
Restarts a generic systemd service
:param service: The name of the service to be restarted
:param conf: An object representing the configuration of :program:`traffic_ops_ort`
:returns: Whether or not the restart was successful
"""
global HAS_SYSTEMD
if not HAS_SYSTEMD:
logging.warning("This system doesn't have systemd, services cannot be restarted")
return True
if conf.mode is not Configuration.Modes.REPORT and (
conf.mode is not Configuration.Modes.INTERACTIVE or getYN("Restart %s?" % service, 'Y')):
logging.info("Restarting %s", service)
try:
sub = subprocess.Popen(["systemctl", "restart", service],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = sub.communicate()
logging.debug("stdout: %s\nstderr: %s", out, err)
except (OSError, subprocess.CalledProcessError) as e:
logging.error("An error occurred when restarting %s: %s", service, e)
logging.debug("%r", e, exc_info=True, stack_info=True)
return False
return True
#: A big ol' map of filenames to the services which require reloads when said files change
FILES_THAT_REQUIRE_RELOADS = {"records.config": reloadATSConfigs,
"remap.config": reloadATSConfigs,
"parent.config": reloadATSConfigs,
"cache.config": reloadATSConfigs,
"hosting.config": reloadATSConfigs,
"astats.config": reloadATSConfigs,
"logs_xml.config": reloadATSConfigs,
"ssl_multicert.config": reloadATSConfigs,
"regex_revalidate.config": reloadATSConfigs,
"plugin.config": restartATS,
"ntpd.conf": partial(restartService, "ntpd"),
"50-ats.rules": restartATS}
def doReloads(conf:Configuration) -> bool:
"""
Performs all necessary service restarts/configuration reloads
:param conf: An object representing the configuration of :program:`traffic_ops_ort`
:returns: whether or not the reloads/restarts went successfully
"""
global NEEDED_RELOADS
# If ATS is being restarted, configuration reloads will be implicit
if restartATS in NEEDED_RELOADS and reloadATSConfigs in NEEDED_RELOADS:
NEEDED_RELOADS.discard(reloadATSConfigs)
for reload in NEEDED_RELOADS:
try:
if not reload(conf):
return False
except OSError as e:
logging.error("An error occurred when reloading service configuration files: %s",e)
logging.debug("%s", e, exc_info=True, stack_info=True)
return False
return True
def getProcessesIfRunning(name:str) -> typing.Optional[psutil.Process]:
"""
Retrieves a process by name, if it exists.
.. warning:: Process names don't have to be unique, this will return the process with the
lowest PID that matches ``name``. This can also only return processes visible to the
user running the Python interpreter.
:param name: the name for which to search
:returns: a process if one is found that matches ``name``, else :const:`None`
:raises OSError: if the process table cannot be iterated
"""
logging.debug("Iterating process list - looking for %s", name)
for process in psutil.process_iter():
# Found
if process.name() == name:
logging.debug("Running process found (pid: %d)", process.pid)
return process
logging.debug("No process named '%s' was found", name)
return None
def setATSStatus(status:bool, conf:Configuration, restart:bool = False) -> bool:
"""
Sets the status of the system's ATS process.
:param status: Specifies whether ATS should be running (:const:`True`) or not (:const:`False`)
:param restart: If this is :const:`True`, then ATS will be restarted if it is already running
.. note:: ``restart`` has no effect if ``status`` is :const:`False`
:returns: whether or not the status setting was successful (or unnecessary)
:raises OSError: when there is a problem executing the subprocess
"""
existingProcess = getProcessesIfRunning("[TS_MAIN]")
# ATS is not running
if existingProcess is None:
if not status:
logging.info("ATS already stopped - nothing to do")
return True
logging.info("ATS not running, will be started")
arg = "start"
# ATS is running, but has a bad status
elif status and existingProcess.status() not in {psutil.STATUS_RUNNING, psutil.STATUS_SLEEPING}:
logging.warning("ATS already running, but status is %s - restarting",
existingProcess.status())
arg = "restart"
# ATS is running and should be stopped
elif not status:
logging.info("ATS is running, will be stopped")
arg = "stop"
# ATS is running fine, but we want to restart it
elif restart:
logging.info("ATS process found - restarting")
arg = "restart"
# ATS is running fine already
else:
logging.info("ATS already running - nothing to do")
return True
tsexe = os.path.join(conf.tsroot, "bin", "trafficserver")
if ( conf.mode is Configuration.Modes.INTERACTIVE and
not getYN("Run command '%s %s'?" % (tsexe, arg))):
logging.warning("ATS status will not be set - Traffic Ops may not expect this!")
return True
logging.info("ATS status will be set using: %s %s", tsexe, arg)
if conf.mode is not Configuration.Modes.REPORT:
sub = subprocess.Popen([tsexe, arg], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = sub.communicate()
if sub.returncode:
logging.error("Failed to start/stop/restart trafficserver!")
logging.warning("Is the 'trafficserver' script located at %s?", tsexe)
logging.debug(out.decode())
logging.debug(err.decode())
return False
return True
def setServiceStatus(chkconfig:dict, mode:Configuration.Modes) -> bool:
"""
Sets the status of a service based on its 'chkconfig'.
A 'chkconfig' consists of a list of run-levels with either 'on' or 'off' as values.
This allowed specifying what run-levels needed a service. It's now totally deprecated,
but the Traffic Ops back-end doesn't know that yet...
:param chkconfig: A single chkconfig
:param mode: The current run-mode
:returns: whether or not the service's status was set successfully
"""
global HAS_SYSTEMD
try:
status = "enable" if "on" in chkconfig["value"] else "disable"
service = chkconfig['name']
except KeyError as e:
logging.error("'%r' could not be parsed as a chkconfig object!", chkconfig)
logging.debug("%s", e, exc_info=True, stack_info=True)
return False
if (mode is Configuration.Modes.INTERACTIVE and
not getYN("%s %s?" % (service, status), default='Y')):
logging.warning("%s will not be %sd - some things may break!", service, status)
return True
logging.info("%s will be %sd", service, status)
if mode is not Configuration.Modes.REPORT:
try:
sub = subprocess.Popen(["systemctl", status, service],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = sub.communicate()
logging.debug("output")
except (OSError, subprocess.CalledProcessError) as e:
logging.error("An error occurred when %sing %s: %s", status[:-1], service, e)
logging.debug("%r", e, exc_info=True, stack_info=True)
return False
return True
|
|
from .operations import Operations
from .migration import MigrationContext
from . import util
class EnvironmentContext(object):
"""Represent the state made available to an ``env.py`` script.
:class:`.EnvironmentContext` is normally instantiated
by the commands present in the :mod:`alembic.command`
module. From within an ``env.py`` script, the current
:class:`.EnvironmentContext` is available via the
``alembic.context`` datamember.
:class:`.EnvironmentContext` is also a Python context
manager, that is, is intended to be used using the
``with:`` statement. A typical use of :class:`.EnvironmentContext`::
from alembic.config import Config
from alembic.script import ScriptDirectory
config = Config()
config.set_main_option("script_location", "myapp:migrations")
script = ScriptDirectory.from_config(config)
def my_function(rev, context):
'''do something with revision "rev", which
will be the current database revision,
and "context", which is the MigrationContext
that the env.py will create'''
with EnvironmentContext(
config,
script,
fn = my_function,
as_sql = False,
starting_rev = 'base',
destination_rev = 'head',
tag = "sometag"
):
script.run_env()
The above script will invoke the ``env.py`` script
within the migration environment. If and when ``env.py``
calls :meth:`.MigrationContext.run_migrations`, the
``my_function()`` function above will be called
by the :class:`.MigrationContext`, given the context
itself as well as the current revision in the database.
.. note::
For most API usages other than full blown
invocation of migration scripts, the :class:`.MigrationContext`
and :class:`.ScriptDirectory` objects can be created and
used directly. The :class:`.EnvironmentContext` object
is *only* needed when you need to actually invoke the
``env.py`` module present in the migration environment.
"""
_migration_context = None
config = None
"""An instance of :class:`.Config` representing the
configuration file contents as well as other variables
set programmatically within it."""
script = None
"""An instance of :class:`.ScriptDirectory` which provides
programmatic access to version files within the ``versions/``
directory.
"""
def __init__(self, config, script, **kw):
"""Construct a new :class:`.EnvironmentContext`.
:param config: a :class:`.Config` instance.
:param script: a :class:`.ScriptDirectory` instance.
:param \**kw: keyword options that will be ultimately
passed along to the :class:`.MigrationContext` when
:meth:`.EnvironmentContext.configure` is called.
"""
self.config = config
self.script = script
self.context_opts = kw
def __enter__(self):
"""Establish a context which provides a
:class:`.EnvironmentContext` object to
env.py scripts.
The :class:`.EnvironmentContext` will
be made available as ``from alembic import context``.
"""
from .context import _install_proxy
_install_proxy(self)
return self
def __exit__(self, *arg, **kw):
from . import context, op
context._remove_proxy()
op._remove_proxy()
def is_offline_mode(self):
"""Return True if the current migrations environment
is running in "offline mode".
This is ``True`` or ``False`` depending
on the the ``--sql`` flag passed.
This function does not require that the :class:`.MigrationContext`
has been configured.
"""
return self.context_opts.get('as_sql', False)
def is_transactional_ddl(self):
"""Return True if the context is configured to expect a
transactional DDL capable backend.
This defaults to the type of database in use, and
can be overridden by the ``transactional_ddl`` argument
to :meth:`.configure`
This function requires that a :class:`.MigrationContext`
has first been made available via :meth:`.configure`.
"""
return self.get_context().impl.transactional_ddl
def requires_connection(self):
return not self.is_offline_mode()
def get_head_revision(self):
"""Return the hex identifier of the 'head' script revision.
If the script directory has multiple heads, this
method raises a :class:`.CommandError`;
:meth:`.EnvironmentContext.get_head_revisions` should be preferred.
This function does not require that the :class:`.MigrationContext`
has been configured.
.. seealso:: :meth:`.EnvironmentContext.get_head_revisions`
"""
return self.script.as_revision_number("head")
def get_head_revisions(self):
"""Return the hex identifier of the 'heads' script revision(s).
This returns a tuple containing the version number of all
heads in the script directory.
This function does not require that the :class:`.MigrationContext`
has been configured.
.. versionadded:: 0.7.0
"""
return self.script.as_revision_number("heads")
def get_starting_revision_argument(self):
"""Return the 'starting revision' argument,
if the revision was passed using ``start:end``.
This is only meaningful in "offline" mode.
Returns ``None`` if no value is available
or was configured.
This function does not require that the :class:`.MigrationContext`
has been configured.
"""
if self._migration_context is not None:
return self.script.as_revision_number(
self.get_context()._start_from_rev)
elif 'starting_rev' in self.context_opts:
return self.script.as_revision_number(
self.context_opts['starting_rev'])
else:
# this should raise only in the case that a command
# is being run where the "starting rev" is never applicable;
# this is to catch scripts which rely upon this in
# non-sql mode or similar
raise util.CommandError(
"No starting revision argument is available.")
def get_revision_argument(self):
"""Get the 'destination' revision argument.
This is typically the argument passed to the
``upgrade`` or ``downgrade`` command.
If it was specified as ``head``, the actual
version number is returned; if specified
as ``base``, ``None`` is returned.
This function does not require that the :class:`.MigrationContext`
has been configured.
"""
return self.script.as_revision_number(
self.context_opts['destination_rev'])
def get_tag_argument(self):
"""Return the value passed for the ``--tag`` argument, if any.
The ``--tag`` argument is not used directly by Alembic,
but is available for custom ``env.py`` configurations that
wish to use it; particularly for offline generation scripts
that wish to generate tagged filenames.
This function does not require that the :class:`.MigrationContext`
has been configured.
.. seealso::
:meth:`.EnvironmentContext.get_x_argument` - a newer and more
open ended system of extending ``env.py`` scripts via the command
line.
"""
return self.context_opts.get('tag', None)
def get_x_argument(self, as_dictionary=False):
"""Return the value(s) passed for the ``-x`` argument, if any.
The ``-x`` argument is an open ended flag that allows any user-defined
value or values to be passed on the command line, then available
here for consumption by a custom ``env.py`` script.
The return value is a list, returned directly from the ``argparse``
structure. If ``as_dictionary=True`` is passed, the ``x`` arguments
are parsed using ``key=value`` format into a dictionary that is
then returned.
For example, to support passing a database URL on the command line,
the standard ``env.py`` script can be modified like this::
cmd_line_url = context.get_x_argument(
as_dictionary=True).get('dbname')
if cmd_line_url:
engine = create_engine(cmd_line_url)
else:
engine = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
This then takes effect by running the ``alembic`` script as::
alembic -x dbname=postgresql://user:pass@host/dbname upgrade head
This function does not require that the :class:`.MigrationContext`
has been configured.
.. versionadded:: 0.6.0
.. seealso::
:meth:`.EnvironmentContext.get_tag_argument`
:attr:`.Config.cmd_opts`
"""
if self.config.cmd_opts is not None:
value = self.config.cmd_opts.x or []
else:
value = []
if as_dictionary:
value = dict(
arg.split('=', 1) for arg in value
)
return value
def configure(self,
connection=None,
url=None,
dialect_name=None,
transactional_ddl=None,
transaction_per_migration=False,
output_buffer=None,
starting_rev=None,
tag=None,
template_args=None,
render_as_batch=False,
target_metadata=None,
include_symbol=None,
include_object=None,
include_schemas=False,
compare_type=False,
compare_server_default=False,
render_item=None,
literal_binds=False,
upgrade_token="upgrades",
downgrade_token="downgrades",
alembic_module_prefix="op.",
sqlalchemy_module_prefix="sa.",
user_module_prefix=None,
**kw
):
"""Configure a :class:`.MigrationContext` within this
:class:`.EnvironmentContext` which will provide database
connectivity and other configuration to a series of
migration scripts.
Many methods on :class:`.EnvironmentContext` require that
this method has been called in order to function, as they
ultimately need to have database access or at least access
to the dialect in use. Those which do are documented as such.
The important thing needed by :meth:`.configure` is a
means to determine what kind of database dialect is in use.
An actual connection to that database is needed only if
the :class:`.MigrationContext` is to be used in
"online" mode.
If the :meth:`.is_offline_mode` function returns ``True``,
then no connection is needed here. Otherwise, the
``connection`` parameter should be present as an
instance of :class:`sqlalchemy.engine.Connection`.
This function is typically called from the ``env.py``
script within a migration environment. It can be called
multiple times for an invocation. The most recent
:class:`~sqlalchemy.engine.Connection`
for which it was called is the one that will be operated upon
by the next call to :meth:`.run_migrations`.
General parameters:
:param connection: a :class:`~sqlalchemy.engine.Connection`
to use
for SQL execution in "online" mode. When present, is also
used to determine the type of dialect in use.
:param url: a string database url, or a
:class:`sqlalchemy.engine.url.URL` object.
The type of dialect to be used will be derived from this if
``connection`` is not passed.
:param dialect_name: string name of a dialect, such as
"postgresql", "mssql", etc.
The type of dialect to be used will be derived from this if
``connection`` and ``url`` are not passed.
:param transactional_ddl: Force the usage of "transactional"
DDL on or off;
this otherwise defaults to whether or not the dialect in
use supports it.
:param transaction_per_migration: if True, nest each migration script
in a transaction rather than the full series of migrations to
run.
.. versionadded:: 0.6.5
:param output_buffer: a file-like object that will be used
for textual output
when the ``--sql`` option is used to generate SQL scripts.
Defaults to
``sys.stdout`` if not passed here and also not present on
the :class:`.Config`
object. The value here overrides that of the :class:`.Config`
object.
:param output_encoding: when using ``--sql`` to generate SQL
scripts, apply this encoding to the string output.
:param literal_binds: when using ``--sql`` to generate SQL
scripts, pass through the ``literal_binds`` flag to the compiler
so that any literal values that would ordinarily be bound
parameters are converted to plain strings.
.. warning:: Dialects can typically only handle simple datatypes
like strings and numbers for auto-literal generation. Datatypes
like dates, intervals, and others may still require manual
formatting, typically using :meth:`.Operations.inline_literal`.
.. note:: the ``literal_binds`` flag is ignored on SQLAlchemy
versions prior to 0.8 where this feature is not supported.
.. versionadded:: 0.7.6
.. seealso::
:meth:`.Operations.inline_literal`
:param starting_rev: Override the "starting revision" argument
when using ``--sql`` mode.
:param tag: a string tag for usage by custom ``env.py`` scripts.
Set via the ``--tag`` option, can be overridden here.
:param template_args: dictionary of template arguments which
will be added to the template argument environment when
running the "revision" command. Note that the script environment
is only run within the "revision" command if the --autogenerate
option is used, or if the option "revision_environment=true"
is present in the alembic.ini file.
:param version_table: The name of the Alembic version table.
The default is ``'alembic_version'``.
:param version_table_schema: Optional schema to place version
table within.
Parameters specific to the autogenerate feature, when
``alembic revision`` is run with the ``--autogenerate`` feature:
:param target_metadata: a :class:`sqlalchemy.schema.MetaData`
object that
will be consulted during autogeneration. The tables present
will be compared against
what is locally available on the target
:class:`~sqlalchemy.engine.Connection`
to produce candidate upgrade/downgrade operations.
:param compare_type: Indicates type comparison behavior during
an autogenerate
operation. Defaults to ``False`` which disables type
comparison. Set to
``True`` to turn on default type comparison, which has varied
accuracy depending on backend. See :ref:`compare_types`
for an example as well as information on other type
comparison options.
.. seealso::
:ref:`compare_types`
:paramref:`.EnvironmentContext.configure.compare_server_default`
:param compare_server_default: Indicates server default comparison
behavior during
an autogenerate operation. Defaults to ``False`` which disables
server default
comparison. Set to ``True`` to turn on server default comparison,
which has
varied accuracy depending on backend.
To customize server default comparison behavior, a callable may
be specified
which can filter server default comparisons during an
autogenerate operation.
defaults during an autogenerate operation. The format of this
callable is::
def my_compare_server_default(context, inspected_column,
metadata_column, inspected_default, metadata_default,
rendered_metadata_default):
# return True if the defaults are different,
# False if not, or None to allow the default implementation
# to compare these defaults
return None
context.configure(
# ...
compare_server_default = my_compare_server_default
)
``inspected_column`` is a dictionary structure as returned by
:meth:`sqlalchemy.engine.reflection.Inspector.get_columns`, whereas
``metadata_column`` is a :class:`sqlalchemy.schema.Column` from
the local model environment.
A return value of ``None`` indicates to allow default server default
comparison
to proceed. Note that some backends such as Postgresql actually
execute
the two defaults on the database side to compare for equivalence.
.. seealso::
:paramref:`.EnvironmentContext.configure.compare_type`
:param include_object: A callable function which is given
the chance to return ``True`` or ``False`` for any object,
indicating if the given object should be considered in the
autogenerate sweep.
The function accepts the following positional arguments:
* ``object``: a :class:`~sqlalchemy.schema.SchemaItem` object such
as a :class:`~sqlalchemy.schema.Table`,
:class:`~sqlalchemy.schema.Column`,
:class:`~sqlalchemy.schema.Index`
:class:`~sqlalchemy.schema.UniqueConstraint`,
or :class:`~sqlalchemy.schema.ForeignKeyConstraint` object
* ``name``: the name of the object. This is typically available
via ``object.name``.
* ``type``: a string describing the type of object; currently
``"table"``, ``"column"``, ``"index"``, ``"unique_constraint"``,
or ``"foreign_key_constraint"``
.. versionadded:: 0.7.0 Support for indexes and unique constraints
within the
:paramref:`~.EnvironmentContext.configure.include_object` hook.
.. versionadded:: 0.7.1 Support for foreign keys within the
:paramref:`~.EnvironmentContext.configure.include_object` hook.
* ``reflected``: ``True`` if the given object was produced based on
table reflection, ``False`` if it's from a local :class:`.MetaData`
object.
* ``compare_to``: the object being compared against, if available,
else ``None``.
E.g.::
def include_object(object, name, type_, reflected, compare_to):
if (type_ == "column" and
not reflected and
object.info.get("skip_autogenerate", False)):
return False
else:
return True
context.configure(
# ...
include_object = include_object
)
:paramref:`.EnvironmentContext.configure.include_object` can also
be used to filter on specific schemas to include or omit, when
the :paramref:`.EnvironmentContext.configure.include_schemas`
flag is set to ``True``. The :attr:`.Table.schema` attribute
on each :class:`.Table` object reflected will indicate the name of the
schema from which the :class:`.Table` originates.
.. versionadded:: 0.6.0
.. seealso::
:paramref:`.EnvironmentContext.configure.include_schemas`
:param include_symbol: A callable function which, given a table name
and schema name (may be ``None``), returns ``True`` or ``False``,
indicating if the given table should be considered in the
autogenerate sweep.
.. deprecated:: 0.6.0
:paramref:`.EnvironmentContext.configure.include_symbol`
is superceded by the more generic
:paramref:`.EnvironmentContext.configure.include_object`
parameter.
E.g.::
def include_symbol(tablename, schema):
return tablename not in ("skip_table_one", "skip_table_two")
context.configure(
# ...
include_symbol = include_symbol
)
.. seealso::
:paramref:`.EnvironmentContext.configure.include_schemas`
:paramref:`.EnvironmentContext.configure.include_object`
:param render_as_batch: if True, commands which alter elements
within a table will be placed under a ``with batch_alter_table():``
directive, so that batch migrations will take place.
.. versionadded:: 0.7.0
.. seealso::
:ref:`batch_migrations`
:param include_schemas: If True, autogenerate will scan across
all schemas located by the SQLAlchemy
:meth:`~sqlalchemy.engine.reflection.Inspector.get_schema_names`
method, and include all differences in tables found across all
those schemas. When using this option, you may want to also
use the :paramref:`.EnvironmentContext.configure.include_object`
option to specify a callable which
can filter the tables/schemas that get included.
.. seealso::
:paramref:`.EnvironmentContext.configure.include_object`
:param render_item: Callable that can be used to override how
any schema item, i.e. column, constraint, type,
etc., is rendered for autogenerate. The callable receives a
string describing the type of object, the object, and
the autogen context. If it returns False, the
default rendering method will be used. If it returns None,
the item will not be rendered in the context of a Table
construct, that is, can be used to skip columns or constraints
within op.create_table()::
def my_render_column(type_, col, autogen_context):
if type_ == "column" and isinstance(col, MySpecialCol):
return repr(col)
else:
return False
context.configure(
# ...
render_item = my_render_column
)
Available values for the type string include: ``"column"``,
``"primary_key"``, ``"foreign_key"``, ``"unique"``, ``"check"``,
``"type"``, ``"server_default"``.
.. seealso::
:ref:`autogen_render_types`
:param upgrade_token: When autogenerate completes, the text of the
candidate upgrade operations will be present in this template
variable when ``script.py.mako`` is rendered. Defaults to
``upgrades``.
:param downgrade_token: When autogenerate completes, the text of the
candidate downgrade operations will be present in this
template variable when ``script.py.mako`` is rendered. Defaults to
``downgrades``.
:param alembic_module_prefix: When autogenerate refers to Alembic
:mod:`alembic.operations` constructs, this prefix will be used
(i.e. ``op.create_table``) Defaults to "``op.``".
Can be ``None`` to indicate no prefix.
:param sqlalchemy_module_prefix: When autogenerate refers to
SQLAlchemy
:class:`~sqlalchemy.schema.Column` or type classes, this prefix
will be used
(i.e. ``sa.Column("somename", sa.Integer)``) Defaults to "``sa.``".
Can be ``None`` to indicate no prefix.
Note that when dialect-specific types are rendered, autogenerate
will render them using the dialect module name, i.e. ``mssql.BIT()``,
``postgresql.UUID()``.
:param user_module_prefix: When autogenerate refers to a SQLAlchemy
type (e.g. :class:`.TypeEngine`) where the module name is not
under the ``sqlalchemy`` namespace, this prefix will be used
within autogenerate. If left at its default of
``None``, the ``__module__`` attribute of the type is used to
render the import module. It's a good practice to set this
and to have all custom types be available from a fixed module space,
in order to future-proof migration files against reorganizations
in modules.
.. versionchanged:: 0.7.0
:paramref:`.EnvironmentContext.configure.user_module_prefix`
no longer defaults to the value of
:paramref:`.EnvironmentContext.configure.sqlalchemy_module_prefix`
when left at ``None``; the ``__module__`` attribute is now used.
.. versionadded:: 0.6.3 added
:paramref:`.EnvironmentContext.configure.user_module_prefix`
.. seealso::
:ref:`autogen_module_prefix`
Parameters specific to individual backends:
:param mssql_batch_separator: The "batch separator" which will
be placed between each statement when generating offline SQL Server
migrations. Defaults to ``GO``. Note this is in addition to the
customary semicolon ``;`` at the end of each statement; SQL Server
considers the "batch separator" to denote the end of an
individual statement execution, and cannot group certain
dependent operations in one step.
:param oracle_batch_separator: The "batch separator" which will
be placed between each statement when generating offline
Oracle migrations. Defaults to ``/``. Oracle doesn't add a
semicolon between statements like most other backends.
"""
opts = self.context_opts
if transactional_ddl is not None:
opts["transactional_ddl"] = transactional_ddl
if output_buffer is not None:
opts["output_buffer"] = output_buffer
elif self.config.output_buffer is not None:
opts["output_buffer"] = self.config.output_buffer
if starting_rev:
opts['starting_rev'] = starting_rev
if tag:
opts['tag'] = tag
if template_args and 'template_args' in opts:
opts['template_args'].update(template_args)
opts["transaction_per_migration"] = transaction_per_migration
opts['target_metadata'] = target_metadata
opts['include_symbol'] = include_symbol
opts['include_object'] = include_object
opts['include_schemas'] = include_schemas
opts['render_as_batch'] = render_as_batch
opts['upgrade_token'] = upgrade_token
opts['downgrade_token'] = downgrade_token
opts['sqlalchemy_module_prefix'] = sqlalchemy_module_prefix
opts['alembic_module_prefix'] = alembic_module_prefix
opts['user_module_prefix'] = user_module_prefix
opts['literal_binds'] = literal_binds
if render_item is not None:
opts['render_item'] = render_item
if compare_type is not None:
opts['compare_type'] = compare_type
if compare_server_default is not None:
opts['compare_server_default'] = compare_server_default
opts['script'] = self.script
opts.update(kw)
self._migration_context = MigrationContext.configure(
connection=connection,
url=url,
dialect_name=dialect_name,
environment_context=self,
opts=opts
)
def run_migrations(self, **kw):
"""Run migrations as determined by the current command line
configuration
as well as versioning information present (or not) in the current
database connection (if one is present).
The function accepts optional ``**kw`` arguments. If these are
passed, they are sent directly to the ``upgrade()`` and
``downgrade()``
functions within each target revision file. By modifying the
``script.py.mako`` file so that the ``upgrade()`` and ``downgrade()``
functions accept arguments, parameters can be passed here so that
contextual information, usually information to identify a particular
database in use, can be passed from a custom ``env.py`` script
to the migration functions.
This function requires that a :class:`.MigrationContext` has
first been made available via :meth:`.configure`.
"""
with Operations.context(self._migration_context):
self.get_context().run_migrations(**kw)
def execute(self, sql, execution_options=None):
"""Execute the given SQL using the current change context.
The behavior of :meth:`.execute` is the same
as that of :meth:`.Operations.execute`. Please see that
function's documentation for full detail including
caveats and limitations.
This function requires that a :class:`.MigrationContext` has
first been made available via :meth:`.configure`.
"""
self.get_context().execute(sql,
execution_options=execution_options)
def static_output(self, text):
"""Emit text directly to the "offline" SQL stream.
Typically this is for emitting comments that
start with --. The statement is not treated
as a SQL execution, no ; or batch separator
is added, etc.
"""
self.get_context().impl.static_output(text)
def begin_transaction(self):
"""Return a context manager that will
enclose an operation within a "transaction",
as defined by the environment's offline
and transactional DDL settings.
e.g.::
with context.begin_transaction():
context.run_migrations()
:meth:`.begin_transaction` is intended to
"do the right thing" regardless of
calling context:
* If :meth:`.is_transactional_ddl` is ``False``,
returns a "do nothing" context manager
which otherwise produces no transactional
state or directives.
* If :meth:`.is_offline_mode` is ``True``,
returns a context manager that will
invoke the :meth:`.DefaultImpl.emit_begin`
and :meth:`.DefaultImpl.emit_commit`
methods, which will produce the string
directives ``BEGIN`` and ``COMMIT`` on
the output stream, as rendered by the
target backend (e.g. SQL Server would
emit ``BEGIN TRANSACTION``).
* Otherwise, calls :meth:`sqlalchemy.engine.Connection.begin`
on the current online connection, which
returns a :class:`sqlalchemy.engine.Transaction`
object. This object demarcates a real
transaction and is itself a context manager,
which will roll back if an exception
is raised.
Note that a custom ``env.py`` script which
has more specific transactional needs can of course
manipulate the :class:`~sqlalchemy.engine.Connection`
directly to produce transactional state in "online"
mode.
"""
return self.get_context().begin_transaction()
def get_context(self):
"""Return the current :class:`.MigrationContext` object.
If :meth:`.EnvironmentContext.configure` has not been
called yet, raises an exception.
"""
if self._migration_context is None:
raise Exception("No context has been configured yet.")
return self._migration_context
def get_bind(self):
"""Return the current 'bind'.
In "online" mode, this is the
:class:`sqlalchemy.engine.Connection` currently being used
to emit SQL to the database.
This function requires that a :class:`.MigrationContext`
has first been made available via :meth:`.configure`.
"""
return self.get_context().bind
def get_impl(self):
return self.get_context().impl
|
|
#!/usr/bin/python
# Copyright 2014 Brno University of Technology (author: Karel Vesely)
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
# WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
# MERCHANTABLITY OR NON-INFRINGEMENT.
# See the Apache 2 License for the specific language governing permissions and
# limitations under the License.
# Generated Nnet prototype, to be initialized by 'nnet-initialize'.
import math, random, sys
###
### Parse options
###
from optparse import OptionParser
usage="%prog [options] <feat-dim> <num-leaves> <num-hid-layers> <num-hid-neurons> >nnet-proto-file"
parser = OptionParser(usage)
parser.add_option('--no-proto-head', dest='with_proto_head',
help='Do not put <NnetProto> head-tag in the prototype [default: %default]',
default=True, action='store_false');
parser.add_option('--no-softmax', dest='with_softmax',
help='Do not put <SoftMax> in the prototype [default: %default]',
default=True, action='store_false');
parser.add_option('--block-softmax-dims', dest='block_softmax_dims',
help='Generate <BlockSoftmax> with dims D1:D2:D3 [default: %default]',
default="", type='string');
parser.add_option('--activation-type', dest='activation_type',
help='Select type of activation function : (<Sigmoid>|<Tanh>) [default: %default]',
default='<Sigmoid>', type='string');
parser.add_option('--hid-bias-mean', dest='hid_bias_mean',
help='Set bias for hidden activations [default: %default]',
default=-2.0, type='float');
parser.add_option('--hid-bias-range', dest='hid_bias_range',
help='Set bias range for hidden activations (+/- 1/2 range around mean) [default: %default]',
default=4.0, type='float');
parser.add_option('--param-stddev-factor', dest='param_stddev_factor',
help='Factor to rescale Normal distriburtion for initalizing weight matrices [default: %default]',
default=0.1, type='float');
parser.add_option('--bottleneck-dim', dest='bottleneck_dim',
help='Make bottleneck network with desired bn-dim (0 = no bottleneck) [default: %default]',
default=0, type='int');
parser.add_option('--no-glorot-scaled-stddev', dest='with_glorot',
help='Generate normalized weights according to X.Glorot paper, but mapping U->N with same variance (factor sqrt(x/(dim_in+dim_out)))',
action='store_false', default=True);
parser.add_option('--no-smaller-input-weights', dest='smaller_input_weights',
help='Disable 1/12 reduction of stddef in input layer [default: %default]',
action='store_false', default=True);
parser.add_option('--no-bottleneck-trick', dest='bottleneck_trick',
help='Disable smaller initial weights and learning rate around bottleneck',
action='store_false', default=True);
parser.add_option('--max-norm', dest='max_norm',
help='Max radius of neuron-weights in L2 space (if longer weights get shrinked, not applied to last layer, 0.0 = disable) [default: %default]',
default=0.0, type='float');
(o,args) = parser.parse_args()
if len(args) != 4 :
parser.print_help()
sys.exit(1)
(feat_dim, num_leaves, num_hid_layers, num_hid_neurons) = map(int,args);
### End parse options
# Check
assert(feat_dim > 0)
assert(num_leaves > 0)
assert(num_hid_layers >= 0)
assert(num_hid_neurons > 0)
if o.block_softmax_dims:
assert(sum(map(int, o.block_softmax_dims.split(':'))) == num_leaves)
# Optionaly scale
def Glorot(dim1, dim2):
if o.with_glorot:
# 35.0 = magic number, gives ~1.0 in inner layers for hid-dim 1024dim,
return 35.0 * math.sqrt(2.0/(dim1+dim2));
else:
return 1.0
###
### Print prototype of the network
###
# NO HIDDEN LAYER, ADDING BOTTLENECK!
# No hidden layer while adding bottleneck means:
# - add bottleneck layer + hidden layer + output layer
if num_hid_layers == 0 and o.bottleneck_dim != 0:
assert(o.bottleneck_dim > 0)
assert(num_hid_layers == 0)
if o.with_proto_head : print "<NnetProto>"
if o.bottleneck_trick:
# 25% smaller stddev -> small bottleneck range, 10x smaller learning rate
print "<LinearTransform> <InputDim> %d <OutputDim> %d <ParamStddev> %f <LearnRateCoef> %f" % \
(feat_dim, o.bottleneck_dim, \
(o.param_stddev_factor * Glorot(feat_dim, o.bottleneck_dim) * 0.75 ), 0.1)
# 25% smaller stddev -> smaller gradient in prev. layer, 10x smaller learning rate for weigts & biases
print "<AffineTransform> <InputDim> %d <OutputDim> %d <BiasMean> %f <BiasRange> %f <ParamStddev> %f <LearnRateCoef> %f <BiasLearnRateCoef> %f <MaxNorm> %f" % \
(o.bottleneck_dim, num_hid_neurons, o.hid_bias_mean, o.hid_bias_range, \
(o.param_stddev_factor * Glorot(o.bottleneck_dim, num_hid_neurons) * 0.75 ), 0.1, 0.1, o.max_norm)
else:
print "<LinearTransform> <InputDim> %d <OutputDim> %d <ParamStddev> %f" % \
(feat_dim, o.bottleneck_dim, \
(o.param_stddev_factor * Glorot(feat_dim, o.bottleneck_dim)))
print "<AffineTransform> <InputDim> %d <OutputDim> %d <BiasMean> %f <BiasRange> %f <ParamStddev> %f <MaxNorm> %f" % \
(o.bottleneck_dim, num_hid_neurons, o.hid_bias_mean, o.hid_bias_range, \
(o.param_stddev_factor * Glorot(o.bottleneck_dim, num_hid_neurons)), o.max_norm)
print "%s <InputDim> %d <OutputDim> %d" % (o.activation_type, num_hid_neurons, num_hid_neurons) # Non-linearity
# Last AffineTransform (10x smaller learning rate on bias)
print "<AffineTransform> <InputDim> %d <OutputDim> %d <BiasMean> %f <BiasRange> %f <ParamStddev> %f <LearnRateCoef> %f <BiasLearnRateCoef> %f" % \
(num_hid_neurons, num_leaves, 0.0, 0.0, \
(o.param_stddev_factor * Glorot(num_hid_neurons, num_leaves)), 1.0, 0.1)
# Optionaly append softmax
if o.with_softmax:
if o.block_softmax_dims == "":
print "<Softmax> <InputDim> %d <OutputDim> %d" % (num_leaves, num_leaves)
else:
print "<BlockSoftmax> <InputDim> %d <OutputDim> %d <BlockDims> %s" % (num_leaves, num_leaves, o.block_softmax_dims)
print "</NnetProto>"
# We are done!
sys.exit(0)
# NO HIDDEN LAYERS!
# Add only last layer (logistic regression)
if num_hid_layers == 0:
if o.with_proto_head : print "<NnetProto>"
print "<AffineTransform> <InputDim> %d <OutputDim> %d <BiasMean> %f <BiasRange> %f <ParamStddev> %f" % \
(feat_dim, num_leaves, 0.0, 0.0, (o.param_stddev_factor * Glorot(feat_dim, num_leaves)))
if o.with_softmax:
if o.block_softmax_dims == "":
print "<Softmax> <InputDim> %d <OutputDim> %d" % (num_leaves, num_leaves)
else:
print "<BlockSoftmax> <InputDim> %d <OutputDim> %d <BlockDims> %s" % (num_leaves, num_leaves, o.block_softmax_dims)
print "</NnetProto>"
# We are done!
sys.exit(0)
# THE USUAL DNN PROTOTYPE STARTS HERE!
# Assuming we have >0 hidden layers,
assert(num_hid_layers > 0)
# Begin the prototype,
if o.with_proto_head : print "<NnetProto>"
# First AffineTranform,
print "<AffineTransform> <InputDim> %d <OutputDim> %d <BiasMean> %f <BiasRange> %f <ParamStddev> %f <MaxNorm> %f" % \
(feat_dim, num_hid_neurons, o.hid_bias_mean, o.hid_bias_range, \
(o.param_stddev_factor * Glorot(feat_dim, num_hid_neurons) * \
(math.sqrt(1.0/12.0) if o.smaller_input_weights else 1.0)), o.max_norm)
# Note.: compensating dynamic range mismatch between input features and Sigmoid-hidden layers,
# i.e. mapping the std-dev of N(0,1) (input features) to std-dev of U[0,1] (sigmoid-outputs).
# This is done by multiplying with stddev(U[0,1]) = sqrt(1/12).
# The stddev of weights is consequently reduced by 0.29x.
print "%s <InputDim> %d <OutputDim> %d" % (o.activation_type, num_hid_neurons, num_hid_neurons)
# Internal AffineTransforms,
for i in range(num_hid_layers-1):
print "<AffineTransform> <InputDim> %d <OutputDim> %d <BiasMean> %f <BiasRange> %f <ParamStddev> %f <MaxNorm> %f" % \
(num_hid_neurons, num_hid_neurons, o.hid_bias_mean, o.hid_bias_range, \
(o.param_stddev_factor * Glorot(num_hid_neurons, num_hid_neurons)), o.max_norm)
print "%s <InputDim> %d <OutputDim> %d" % (o.activation_type, num_hid_neurons, num_hid_neurons)
# Optionaly add bottleneck,
if o.bottleneck_dim != 0:
assert(o.bottleneck_dim > 0)
if o.bottleneck_trick:
# 25% smaller stddev -> small bottleneck range, 10x smaller learning rate
print "<LinearTransform> <InputDim> %d <OutputDim> %d <ParamStddev> %f <LearnRateCoef> %f" % \
(num_hid_neurons, o.bottleneck_dim, \
(o.param_stddev_factor * Glorot(num_hid_neurons, o.bottleneck_dim) * 0.75 ), 0.1)
# 25% smaller stddev -> smaller gradient in prev. layer, 10x smaller learning rate for weigts & biases
print "<AffineTransform> <InputDim> %d <OutputDim> %d <BiasMean> %f <BiasRange> %f <ParamStddev> %f <LearnRateCoef> %f <BiasLearnRateCoef> %f <MaxNorm> %f" % \
(o.bottleneck_dim, num_hid_neurons, o.hid_bias_mean, o.hid_bias_range, \
(o.param_stddev_factor * Glorot(o.bottleneck_dim, num_hid_neurons) * 0.75 ), 0.1, 0.1, o.max_norm)
else:
# Same learninig-rate and stddev-formula everywhere,
print "<LinearTransform> <InputDim> %d <OutputDim> %d <ParamStddev> %f" % \
(num_hid_neurons, o.bottleneck_dim, \
(o.param_stddev_factor * Glorot(num_hid_neurons, o.bottleneck_dim)))
print "<AffineTransform> <InputDim> %d <OutputDim> %d <BiasMean> %f <BiasRange> %f <ParamStddev> %f <MaxNorm> %f" % \
(o.bottleneck_dim, num_hid_neurons, o.hid_bias_mean, o.hid_bias_range, \
(o.param_stddev_factor * Glorot(o.bottleneck_dim, num_hid_neurons)), o.max_norm)
print "%s <InputDim> %d <OutputDim> %d" % (o.activation_type, num_hid_neurons, num_hid_neurons)
# Last AffineTransform (10x smaller learning rate on bias)
print "<AffineTransform> <InputDim> %d <OutputDim> %d <BiasMean> %f <BiasRange> %f <ParamStddev> %f <LearnRateCoef> %f <BiasLearnRateCoef> %f" % \
(num_hid_neurons, num_leaves, 0.0, 0.0, \
(o.param_stddev_factor * Glorot(num_hid_neurons, num_leaves)), 1.0, 0.1)
# Optionaly append softmax
if o.with_softmax:
if o.block_softmax_dims == "":
print "<Softmax> <InputDim> %d <OutputDim> %d" % (num_leaves, num_leaves)
else:
print "<BlockSoftmax> <InputDim> %d <OutputDim> %d <BlockDims> %s" % (num_leaves, num_leaves, o.block_softmax_dims)
# End the prototype
print "</NnetProto>"
# We are done!
sys.exit(0)
|
|
# Copyright 2015 Novo Nordisk Foundation Center for Biosustainability, DTU.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, print_function
import collections
import logging
import time
import types
from functools import reduce
import inspyred
import numpy
from inspyred.ec.generators import diversify as diversify_function
from pandas import DataFrame
from cameo import config
from cameo.core.result import Result
from cameo.flux_analysis.simulation import pfba, lmoma, moma, room, logger as simulation_logger
from cameo.flux_analysis.structural import (find_blocked_reactions_nullspace, find_coupled_reactions_nullspace,
nullspace,
create_stoichiometric_array)
from cobra.flux_analysis import find_essential_genes, find_essential_reactions
from cameo.strain_design.heuristic.evolutionary import archives
from cameo.strain_design.heuristic.evolutionary import decoders
from cameo.strain_design.heuristic.evolutionary import evaluators
from cameo.strain_design.heuristic.evolutionary import generators
from cameo.strain_design.heuristic.evolutionary import observers
from cameo.strain_design.heuristic.evolutionary import plotters
from cameo.strain_design.heuristic.evolutionary import stats
from cameo.strain_design.heuristic.evolutionary import variators
from cameo.strain_design.heuristic.evolutionary.archives import Individual
from cameo.strain_design.heuristic.evolutionary.objective_functions import MultiObjectiveFunction, ObjectiveFunction
from cameo.util import RandomGenerator as Random, reduce_reaction_set
from cameo.util import in_ipnb
from cameo.util import partition
__all__ = ['ReactionKnockoutOptimization', 'GeneKnockoutOptimization', 'CofactorSwapOptimization']
REACTION_KNOCKOUT_TYPE = "reaction"
SWAP_TYPE = "cofactor-swap"
GENE_KNOCKOUT_TYPE = "gene"
NADH_NADPH = (['nad_c', 'nadh_c'], ['nadp_c', 'nadph_c'])
SIZE = 'Size'
BIOMASS = 'Biomass'
KNOCKOUTS = 'Knockouts'
REACTIONS = 'Reactions'
logger = logging.getLogger(__name__)
PRE_CONFIGURED = {
inspyred.ec.GA: [
[
variators.set_mutation,
variators.set_indel,
variators.set_n_point_crossover
],
inspyred.ec.selectors.tournament_selection,
inspyred.ec.replacers.generational_replacement,
archives.BestSolutionArchive(),
],
inspyred.ec.SA: [
[
variators.set_mutation,
variators.set_indel
],
inspyred.ec.selectors.default_selection,
inspyred.ec.replacers.simulated_annealing_replacement,
archives.BestSolutionArchive()
],
inspyred.ec.emo.NSGA2: [
[
variators.set_mutation,
variators.set_indel,
variators.set_n_point_crossover
],
inspyred.ec.selectors.tournament_selection,
inspyred.ec.replacers.nsga_replacement,
inspyred.ec.archivers.population_archiver
],
inspyred.ec.emo.PAES: [
[
variators.set_mutation,
variators.set_indel,
variators.set_n_point_crossover
],
inspyred.ec.selectors.default_selection,
inspyred.ec.replacers.paes_replacement,
inspyred.ec.archivers.adaptive_grid_archiver
]
}
def set_distance_function(candidate1, candidate2):
return len(set(candidate1).symmetric_difference(set(candidate2)))
class HeuristicOptimization(object):
"""
Blueprint for any model optimization based on heuristic methods.
Attributes
----------
model : cobra.Model
A constraint-based model.
heuristic_method : inspyred.ec.EvolutionaryComputation
An evolutionary algorithm.
objective_function : objective function or list(objective function)
The objectives for the algorithm to maximize.
seed : int
A seed for random. It is auto-generated if None is given.
termination : inspyred.ec.terminators
A termination criteria for the algorithm. The default is inspyred.ec.terminators.evaluation_termination
Methods
-------
run(view=config.default_view, maximize=True, **kwargs)
See Also
--------
*inspyred.ec
*cameo.config.default_view
"""
def __init__(self, model=None, heuristic_method=inspyred.ec.GA, objective_function=None, seed=None,
termination=inspyred.ec.terminators.evaluation_termination, plot=True, progress=True,
*args, **kwargs):
super(HeuristicOptimization, self).__init__(*args, **kwargs)
logger.debug("Seed: %s" % seed)
self.plot = plot
self.progress = progress
self.observers = []
self.model = model
self._random = None
self.termination = termination
self._objective_function = objective_function
self._heuristic_method = None
self.heuristic_method = heuristic_method
self.heuristic_method.terminator = termination
@property
def archiver(self):
return self._heuristic_method.archiver
@archiver.setter
def archiver(self, archiver):
self._heuristic_method.archiver = archiver
@property
def objective_function(self):
return self._objective_function
@objective_function.setter
def objective_function(self, objective_function):
if not isinstance(objective_function, ObjectiveFunction):
raise TypeError("objective function is not instance of ObjectiveFunction")
elif self._heuristic_method.__module__ == inspyred.ec.ec.__name__ and isinstance(objective_function,
MultiObjectiveFunction):
raise TypeError("single objective heuristic do not support multiple objective functions")
else:
self._objective_function = objective_function
@property
def random(self):
if self._random is None:
self._random = Random()
return self._random
@property
def heuristic_method(self):
return self._heuristic_method
@heuristic_method.setter
def heuristic_method(self, heuristic_method):
if heuristic_method.__module__ == inspyred.ec.ec.__name__ and isinstance(self.objective_function,
MultiObjectiveFunction):
raise TypeError("single objective heuristics do not support multiple objective functions")
self._heuristic_method = heuristic_method(self.random)
def run(self, evaluator=None, generator=None, view=config.default_view, maximize=True, max_time=None, **kwargs):
"""
Runs the evolutionary algorithm.
Parameters
----------
evaluator : function
A function that evaluates candidates.
generator : function
A function that yields candidates.
view : cameo.parallel.SequentialView, cameo.parallel.MultiprocessingView
A view for single or multiprocessing.
maximize : bool
The sense of the optimization algorithm.
max_time : tuple
A tuple with (minutes, seconds) or (hours, minutes, seconds)
kwargs : dict
See inspyred documentation for more information.
Returns
-------
list
A list of individuals from the last iteration.
"""
if isinstance(self.heuristic_method.archiver, archives.BestSolutionArchive):
self.heuristic_method.archiver.reset()
if kwargs.get('seed', None) is None:
kwargs['seed'] = int(time.time())
self._heuristic_method._random.seed(kwargs['seed'])
for observer in self.observers:
observer.reset()
t = time.time()
if max_time is not None:
terminator = self.heuristic_method.terminator
if isinstance(terminator, collections.Iterable):
terminator = list(terminator)
terminator.append(inspyred.ec.terminators.time_termination)
else:
terminator = [terminator, inspyred.ec.terminators.time_termination]
self.heuristic_method.terminator = terminator
kwargs['start_time'] = t
kwargs['max_time'] = max_time
print(time.strftime("Starting optimization at %a, %d %b %Y %H:%M:%S", time.localtime(t)))
res = self.heuristic_method.evolve(generator=generator,
maximize=maximize,
evaluator=evaluator,
**kwargs)
for observer in self.observers:
observer.end()
runtime = time.time() - t
print(time.strftime("Finished after %H:%M:%S", time.gmtime(runtime)))
return res
class EvaluatorWrapper(object):
def __init__(self, view, evaluator):
if not hasattr(view, 'map'):
raise ValueError("View %s does not contain the required map function")
if not (hasattr(evaluator, '__call__') or isinstance(evaluator, types.FunctionType)):
raise ValueError("evaluator %s must be a function or callable")
self.view = view
self.evaluator = evaluator
self.__name__ = "Wrapped %s" % EvaluatorWrapper.__class__.__name__
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.evaluator.reset()
def __call__(self, candidates, args):
population_chunks = (chunk for chunk in partition(candidates, len(self.view)))
try:
chunked_results = self.view.map(self.evaluator, population_chunks)
except KeyboardInterrupt as e:
self.view.shutdown()
raise e
fitness = reduce(list.__add__, chunked_results)
return fitness
class TargetOptimization(HeuristicOptimization):
"""
Abstract class for target optimization.
"""
def __init__(self, simulation_method=pfba, wt_reference=None, *args, **kwargs):
"""
Class for generic optimization algorithms for knockout (or similar) strain design methods
Attributes
----------
simulation_method : see flux_analysis.simulation
The method used to simulate the model.
wt_reference : dict, cameo.flux_analysis.simulation.FluxDistributionResult
A dict (dict-like) object with flux values from a reference state.
simulation_method : method
the simulation method to use for evaluating results
evaluator : TargetEvaluator
the class used to evaluate results
"""
super(TargetOptimization, self).__init__(*args, **kwargs)
self._simulation_kwargs = dict()
self._simulation_kwargs['reference'] = wt_reference
self._simulation_method = None
self.simulation_method = simulation_method
self.representation = None
self._evaluator = None
self._target_type = None
self._decoder = None
self._metadata = {}
@property
def metadata(self):
return self._metadata
@property
def simulation_method(self):
return self._simulation_method
@simulation_method.setter
def simulation_method(self, simulation_method):
if simulation_method in [lmoma, moma, room] and self._simulation_kwargs.get("reference", None) is None:
logger.warning("No WT reference found, generating using pfba.")
self._simulation_kwargs['reference'] = pfba(self.model).fluxes
logger.warning("Reference successfully computed.")
self._simulation_method = simulation_method
@property
def simulation_kwargs(self):
return self._simulation_kwargs
@simulation_kwargs.setter
def simulation_kwargs(self, simulation_kwargs):
if self.simulation_method in [lmoma, moma, room] and simulation_kwargs.get("reference", None) is None:
logger.warning("No WT reference found, generating using pfba.")
simulation_kwargs['reference'] = pfba(self.model).fluxes
logger.warning("Reference successfully computed.")
self._simulation_kwargs = simulation_kwargs
@HeuristicOptimization.heuristic_method.setter
def heuristic_method(self, heuristic_method):
HeuristicOptimization.heuristic_method.fset(self, heuristic_method)
self._set_observer()
try:
configuration = PRE_CONFIGURED[heuristic_method]
self._setup(*configuration)
except KeyError:
logger.warning("Please verify the variator is compatible with set representation")
def _setup(self, variator, selector, replacer, archiver):
logger.debug("Setting up algorithm: %s" % self.heuristic_method)
self.heuristic_method.variator = variator
self.heuristic_method.selector = selector
self.heuristic_method.replacer = replacer
self.heuristic_method.archiver = archiver
self.heuristic_method.terminator = self.termination
@HeuristicOptimization.objective_function.setter
def objective_function(self, objective_function):
HeuristicOptimization.objective_function.fset(self, objective_function)
self._set_observer()
def _set_observer(self):
self.observers = []
if in_ipnb() and self.plot:
if config.use_bokeh:
if len(self.objective_function) > 1:
self.observers.append(plotters.IPythonBokehParetoPlotter(self.objective_function))
else:
self.observers.append(plotters.IPythonBokehFitnessPlotter())
else:
if config.use_bokeh:
pass
else:
pass
if self.progress:
self.observers.append(observers.ProgressObserver())
def run(self, max_size=10, variable_size=True, diversify=False, view=config.default_view, **kwargs):
"""
Runs the evolutionary algorithm.
Parameters
----------
max_size : int
Maximum size of a solution, e.g., the maximum number of reactions or genes to knock-out or swap.
variable_size : boolean
If true, the solution size can change meaning that the combination of knockouts can have different sizes up
to max_size. Otherwise it only produces knockout solutions with a fixed number of knockouts.
diversify : bool
It true, the generator will not be allowed to generate repeated candidates in the initial population.
view : cameo.parallel.SequentialView, cameo.parallel.MultiprocessingView
A view for single or multiprocessing.
Returns
-------
TargetOptimizationResult
The result of the optimization.
"""
if kwargs.get('seed', None) is None:
kwargs['seed'] = int(time.time())
self.heuristic_method.observer = self.observers
log_level = simulation_logger.level
simulation_logger.setLevel(logging.CRITICAL)
if diversify:
generator = diversify_function(generators.set_generator)
else:
generator = generators.set_generator
with EvaluatorWrapper(view, self._evaluator) as evaluator:
super(TargetOptimization, self).run(distance_function=set_distance_function,
representation=self.representation,
evaluator=evaluator,
generator=generator,
max_size=max_size,
**kwargs)
simulation_logger.setLevel(log_level)
return TargetOptimizationResult(model=self.model,
heuristic_method=self.heuristic_method,
simulation_method=self.simulation_method,
simulation_kwargs=self._simulation_kwargs,
solutions=self.heuristic_method.archive,
objective_function=self.objective_function,
target_type=self._target_type,
decoder=self._decoder,
evaluator=self._evaluator,
seed=kwargs['seed'],
metadata=self.metadata,
view=view)
class KnockoutOptimization(TargetOptimization):
"""
Abstract knockout optimization class.
"""
def __init__(self, simulation_method=pfba, wt_reference=None, *args, **kwargs):
super(KnockoutOptimization, self).__init__(simulation_method=simulation_method,
wt_reference=wt_reference,
*args, **kwargs)
class SolutionSimplification(object):
"""
Solution Simplification Method
"""
def __init__(self, evaluator):
if not isinstance(evaluator, evaluators.Evaluator):
raise ValueError("Evaluator must be instance of "
"'cameo.strain_design.heuristic.evolutionary.evaluators.Evaluator'")
self._evaluator = evaluator
def __call__(self, population):
return [self.simplify(individual) for individual in population]
def simplify(self, individual):
new_individual = Individual(individual.candidate, individual.fitness, individual.maximize,
birthdate=individual.birthdate)
for target in individual.candidate:
new_individual.candidate.remove(target)
new_fitness = self._evaluator.evaluate_individual(tuple(new_individual))
if isinstance(new_fitness, inspyred.ec.emo.Pareto):
if new_fitness < individual.fitness:
new_individual.candidate.add(target)
else:
if new_fitness < individual.fitness or numpy.isnan(new_fitness):
new_individual.candidate.add(target)
return new_individual
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._evaluator.reset()
class TargetOptimizationResult(Result):
def __init__(self, model=None, heuristic_method=None, simulation_method=None, simulation_kwargs=None,
solutions=None, objective_function=None, target_type=None, decoder=None, evaluator=None,
seed=None, metadata=None, view=None, simplify=True, *args, **kwargs):
super(TargetOptimizationResult, self).__init__(*args, **kwargs)
self.seed = seed
self.model = model
self.heuristic_method = heuristic_method
self.simulation_method = simulation_method
self.simulation_kwargs = simulation_kwargs or {}
self.objective_function = objective_function
self.target_type = target_type
self._decoder = decoder
self._evaluator = evaluator
self._metadata = metadata
self._view = view
if simplify:
solutions = self._simplify_solutions(solutions)
self._solutions = self._decode_solutions(solutions)
def __len__(self):
return len(self._solutions)
def __getstate__(self):
d = dict(self.__dict__)
d['heuristic_method'].logger = None
d['heuristic_method']._kwargs['_ec'].logger = None
return d
def __setstate__(self, d):
self.__dict__.update(d)
def _repr_html_(self):
template = """
<h4>Result:</h4>
<ul>
<li>model: %s</li>
<li>heuristic: %s</li>
<li>objective function: %s</li>
<li>simulation method: %s</li>
<li>target type: %s</li>
<ul>
"""
model_id = self.model.id
heuristic = self.heuristic_method.__class__.__name__
of_string = self.objective_function._repr_latex_()
simulation = self.simulation_method.__name__
solutions = self.data_frame._repr_html_()
results = template % (model_id, heuristic, of_string, simulation, self.target_type)
return results + solutions
def __iter__(self):
for _, row in self._solutions.iterrows():
yield [row['targets'], row['fitness']]
def __iadd__(self, other):
if not isinstance(other, self.__class__):
raise AssertionError("Cannot merge result with %s" % type(other))
if self.model.id != other.model.id:
raise AssertionError("Cannot merge results from different models")
if self.target_type != other.target_type:
raise AssertionError("Cannot merge results with resulting from different strategies")
if self.heuristic_method.__class__.__name__ != other.heuristic_method.__class__.__name__:
raise AssertionError("Cannot merge results from different heuristic methods")
self._solutions = self._solutions.append(other._solutions, ignore_index=True)
self._solutions.drop_duplicates(subset="knockouts", take_last=True, inplace=True)
return self
# TODO: find out how to plot an histogram (?) in bokeh
def stats(self):
stats_data = None
if in_ipnb():
if config.use_bokeh:
stats_data = stats.BokehStatsData(self)
else:
stats_data = stats.CLIStatsData(self)
stats_data.display()
@property
def data_frame(self):
return DataFrame(self._solutions)
def _decode_solutions(self, solutions):
decoded_solutions = DataFrame(columns=["targets", "fitness"])
index = 0
for solution in solutions:
combinations = self._decoder(solution.candidate, flat=True, decompose=True)
for targets in combinations:
if len(targets) > 0:
decoded_solutions.loc[index] = [tuple(targets), solution.fitness]
index += 1
decoded_solutions.drop_duplicates(inplace=True, subset="targets")
decoded_solutions.reset_index(inplace=True)
return decoded_solutions
def _simplify_solutions(self, solutions):
simplification = SolutionSimplification(self._evaluator)
chunks = (chunk for chunk in partition(solutions, len(self._view)))
try:
chunked_results = self._view.map(simplification, chunks)
except KeyboardInterrupt as e:
self.view.shutdown()
raise e
solutions = reduce(list.__add__, chunked_results)
return solutions
class ReactionKnockoutOptimization(KnockoutOptimization):
"""
Knockout optimization using reactions.
Attributes
----------
model : cobra.Model
A constraint-based model.
heuristic_method : inspyred.ec.EvolutionaryComputation
An evolutionary algorithm.
objective_function : objective function or list(objective function)
The objectives for the algorithm to maximize.
seed : int
A seed for random. It is auto-generated if None is given.
termination : inspyred.ec.terminators
A termination criteria for the algorithm. The default is inspyred.ec.terminators.evaluation_termination.
simulation_method: flux_analysis.simulation
The method used to simulate the model.
wt_reference: dict
A reference initial state for the optimization. It is required for flux_analysis.simulation.lmoma and
flux_analysis.simulation.room. If not given, it will be computed using flux_analysis.simulation.pfba
reactions: list
A list of valid reactions to knockout. If None, then all reactions in the model will be knockout candidates
except the ones defined in essential_reactions
essential_reactions: list
A list of reactions that cannot be knocked out. If None, then all essential reactions will be removed from
the valid reactions set.
use_nullspace_simplification: Boolean (default True)
Use a basis for the nullspace to find groups of reactions whose fluxes are multiples of each other and dead
end reactions. From each of these groups only 1 reaction will be included as a possible knockout.
Methods
-------
run(view=config.default_view, maximize=True, **kwargs)
See Also
--------
*inspyred.ec
*cameo.config.default_view
Examples
--------
>>> from cameo import models
>>> model = models.bigg.iJO1366
>>> from cameo.strain_design.heuristic.evolutionary.objective_functions import biomass_product_coupled_yield
>>> bpcy = biomass_product_coupled_yield(model.reactions.Ec_biomass_iJO1366_core_53p95,
>>> model.reactions.EX_succ_e),
>>> model.reactions.EX_glc__D_e)
>>> knockout_optimization = ReactionKnockoutOptimization(model=model, objective_function=bpcy,
>>> essential_reactions=["ATPM"])
>>> knockout_optimization.run(max_evaluations=50000)
"""
def __init__(self, reactions=None, essential_reactions=None, use_nullspace_simplification=True, *args, **kwargs):
super(ReactionKnockoutOptimization, self).__init__(*args, **kwargs)
if reactions is None:
self.reactions = set([r.id for r in self.model.reactions])
else:
self.reactions = reactions
logger.debug("Computing essential reactions...")
if essential_reactions is None:
self.essential_reactions = set(r.id for r in find_essential_reactions(self.model, processes=1))
else:
self.essential_reactions = set([r.id for r in find_essential_reactions(self.model, processes=1)])
self.essential_reactions.update(essential_reactions)
if use_nullspace_simplification:
ns = nullspace(create_stoichiometric_array(self.model))
dead_ends = set(find_blocked_reactions_nullspace(self.model, ns=ns))
exchanges = set(self.model.boundary)
reactions = [
r for r in self.model.reactions
if (r not in exchanges) and (
r not in dead_ends) and (
r.id not in self.essential_reactions)
]
groups = find_coupled_reactions_nullspace(self.model, ns=ns)
groups_keys = [set(group) for group in groups if any(r.id in reactions for r in group)]
reduced_set = reduce_reaction_set(reactions, groups_keys)
to_keep = [r.id for r in reduced_set]
else:
groups = None
to_keep = set(r.id for r in self.model.reactions)
to_keep.difference_update(r.id for r in self.model.boundary)
to_keep.difference_update(self.essential_reactions)
to_keep = list(to_keep)
self.representation = to_keep
self._target_type = REACTION_KNOCKOUT_TYPE
self._decoder = decoders.ReactionSetDecoder(self.representation, self.model, groups=groups)
self._evaluator = evaluators.KnockoutEvaluator(model=self.model,
decoder=self._decoder,
objective_function=self.objective_function,
simulation_method=self._simulation_method,
simulation_kwargs=self._simulation_kwargs)
class GeneKnockoutOptimization(KnockoutOptimization):
"""
Knockout optimization using genes.
Attributes
----------
model : cobra.Model
A constraint-based model.
heuristic_method : inspyred.ec.EvolutionaryComputation
An evolutionary algorithm.
objective_function : objective function or list(objective function)
The objectives for the algorithm to maximize.
seed : int
A seed for random. It is auto-generated if None is given.
termination : inspyred.ec.terminators
A termination criteria for the algorithm. The default is inspyred.ec.terminators.evaluation_termination.
simulation_method: flux_analysis.simulation
The method used to simulate the model.
wt_reference: dict
A reference initial state for the optimization. It is required for flux_analysis.simulation.lmoma and
flux_analysis.simulation.room. If not given, it will be computed using flux_analysis.simulation.pfba
genes: list
A list of valid genes to knockout. If None, then all genes in the model will be knockout candidates except the
ones defined in essential_genes
essential_genes: list
A list of genes that cannot be knocked out. If None, then all essential genes will be removed from the valid
genes set.
use_nullspace_simplification: Boolean (default True)
Use a basis for the nullspace dead end reactions. Gene present only in dead end reactions will be ignored.
Methods
-------
run(view=config.default_view, maximize=True, **kwargs)
See Also
--------
*inspyred.ec
*cameo.config.default_view
Examples
--------
>>> from cameo import models
>>> model = models.bigg.iJO1366
>>> from cameo.strain_design.heuristic.evolutionary.objective_functions import biomass_product_coupled_yield
>>> bpcy = biomass_product_coupled_yield(model.reactions.Ec_biomass_iJO1366_core_53p95,
>>> model.reactions.EX_succ_e),
>>> model.reactions.EX_glc__D_e)
>>> knockout_optimization = GeneKnockoutOptimization(model=model, objective_function=bpcy)
>>> knockout_optimization.run(max_evaluations=50000)
"""
def __init__(self, genes=None, essential_genes=None, use_nullspace_simplification=True, *args, **kwargs):
super(GeneKnockoutOptimization, self).__init__(*args, **kwargs)
if genes is None:
self.genes = set([g.id for g in self.model.genes])
else:
self.genes = genes
if essential_genes is None:
self.essential_genes = {g.id for g in find_essential_genes(self.model, processes=1)}
else:
self.essential_genes = set([g.id for g in find_essential_genes(self.model, processes=1)] + essential_genes)
# TODO: use genes from groups
if use_nullspace_simplification:
ns = nullspace(create_stoichiometric_array(self.model))
dead_end_reactions = find_blocked_reactions_nullspace(self.model, ns=ns)
dead_end_genes = {g.id for g in self.model.genes if all(r in dead_end_reactions for r in g.reactions)}
exclude_genes = self.essential_genes.union(dead_end_genes)
genes = [g for g in self.model.genes if g.id not in exclude_genes]
self.representation = [g.id for g in genes]
else:
self.representation = list(self.genes.difference(self.essential_genes))
self._target_type = GENE_KNOCKOUT_TYPE
self._decoder = decoders.GeneSetDecoder(self.representation, self.model)
self._evaluator = evaluators.KnockoutEvaluator(model=self.model,
decoder=self._decoder,
objective_function=self.objective_function,
simulation_method=self._simulation_method,
simulation_kwargs=self._simulation_kwargs)
class CofactorSwapOptimization(TargetOptimization):
"""
Optimize co-factor swapping
As suggested in [1]_, flux through a given reaction can sometimes be optimized by swapping complementary
co-factor. This class implements a search for reactions when swapped improve the given objective. Briefly,
the approach is to
- find reactions that have all the targeted co-factor pairs e.g. (nad_c -> nadp_c, nadh_c -> nadph_c)
- add reactions that have the co-factors swapped and then by a search algorithm switching one off in favor of the
other
The implementation here differs from that in [1]_ in that we use a general purpose search algorithm rather than
formulating the search as a mixed integer linear programming problem.
References
----------
.. [1] King, Zachary A., and Adam M. Feist. "Optimizing Cofactor Specificity of Oxidoreductase Enzymes for the
Generation of Microbial Production Strains - OptSwap." Industrial Biotechnology 9, no. 4 (August 1,
2013): 236-46. - doi:10.1089/ind.2013.0005.
Parameters
----------
model : cobra.Model
the model to operator on
cofactor_id_swaps : tuple
a tuple of length 2 that defines two lists of metabolite identifiers that should be interchanged during the
swap optimization see e.g. `NADH_NADPH` which is also the default.
candidate_reactions : list
reactions to consider for co-factor swap - if not given then search for all reactions that include the given
cofactors
skip_reactions : list
reactions to not consider for co-factor swap, defaults to the objective function if not provided
args, kwargs : keyword arguments
passed on to super-classes, see in particular `objective_function`, `heuristic_method`, `termination`,
`simulation_method`, `wt_reference`, of `HeuristicOptimization` and `max_size` of `HeuristicOptimization.run`
Examples
--------
>>> from cameo import models
>>> from cameo.strain_design.heuristic.evolutionary.objective_functions import product_yield
>>> model = models.bigg.iJO1366
>>> model.objective = model.reactions.EX_thr__L_e
>>> model.reactions.BIOMASS_Ec_iJO1366_core_53p95M.lower_bound = 0.1
>>> py = product_yield(model.reactions.EX_thr__L_e, model.reactions.EX_glc__D_e)
>>> swap_optimization = CofactorSwapOptimization(model=model, objective_function=py)
>>> swap_optimization.run(max_evaluations=2000, max_size=2)
"""
def __init__(self, cofactor_id_swaps=NADH_NADPH, candidate_reactions=None, skip_reactions=None, *args, **kwargs):
super(CofactorSwapOptimization, self).__init__(*args, **kwargs)
self._target_type = SWAP_TYPE
swap_pairs = ([self.model.metabolites.get_by_id(m) for m in cofactor_id_swaps[0]],
[self.model.metabolites.get_by_id(m) for m in cofactor_id_swaps[1]])
self.metadata['swap_pairs'] = swap_pairs
self.representation = candidate_reactions or self.find_swappable_reactions(self.model, swap_pairs)
if skip_reactions:
self.representation -= skip_reactions
self._decoder = decoders.ReactionSetDecoder(self.representation, self.model)
self._evaluator = evaluators.SwapEvaluator(model=self.model,
decoder=self._decoder,
objective_function=self.objective_function,
simulation_method=self._simulation_method,
simulation_kwargs=self._simulation_kwargs,
swap_pair=swap_pairs)
@staticmethod
def find_swappable_reactions(model, swaps):
"""
Get all reactions that can undergo co-factor swapping
Find reactions that have one set of the cofactors targeted for swapping and are mass balanced and updates the
`candidate_reactions` attribute
Parameters
----------
model: cobra.Model
A model with reactions to search on.
swaps: tuple
Pair of cofactors to swap.
"""
def swap_search(mets):
has_pairs = all(mets.get(m, False) for m in swaps[0]) or all(mets.get(m, False) for m in swaps[1])
contains_all = all(mets.get(m, False) for m in swaps[0]) and all(mets.get(m, False) for m in swaps[1])
return has_pairs and not contains_all
candidates = model.reactions.query(search_function=swap_search, attribute='metabolites')
return [r.id for r in candidates]
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=unnecessary-pass
"""Data iterators for common data formats."""
from __future__ import absolute_import
from collections import namedtuple
import sys
import ctypes
import logging
import threading
import numpy as np
from ..base import _LIB
from ..base import c_str_array, mx_uint, py_str
from ..base import DataIterHandle, NDArrayHandle
from ..base import mx_real_t
from ..base import check_call, build_param_doc as _build_param_doc
from ..ndarray import NDArray
from ..ndarray.sparse import CSRNDArray
from ..ndarray import _ndarray_cls
from ..ndarray import array
from ..ndarray import concat
from .utils import _init_data, _has_instance, _getdata_by_idx
class DataDesc(namedtuple('DataDesc', ['name', 'shape'])):
"""DataDesc is used to store name, shape, type and layout
information of the data or the label.
The `layout` describes how the axes in `shape` should be interpreted,
for example for image data setting `layout=NCHW` indicates
that the first axis is number of examples in the batch(N),
C is number of channels, H is the height and W is the width of the image.
For sequential data, by default `layout` is set to ``NTC``, where
N is number of examples in the batch, T the temporal axis representing time
and C is the number of channels.
Parameters
----------
cls : DataDesc
The class.
name : str
Data name.
shape : tuple of int
Data shape.
dtype : np.dtype, optional
Data type.
layout : str, optional
Data layout.
"""
def __new__(cls, name, shape, dtype=mx_real_t, layout='NCHW'): # pylint: disable=super-on-old-class
ret = super(cls, DataDesc).__new__(cls, name, shape)
ret.dtype = dtype
ret.layout = layout
return ret
def __repr__(self):
return "DataDesc[%s,%s,%s,%s]" % (self.name, self.shape, self.dtype,
self.layout)
@staticmethod
def get_batch_axis(layout):
"""Get the dimension that corresponds to the batch size.
When data parallelism is used, the data will be automatically split and
concatenated along the batch-size dimension. Axis can be -1, which means
the whole array will be copied for each data-parallelism device.
Parameters
----------
layout : str
layout string. For example, "NCHW".
Returns
-------
int
An axis indicating the batch_size dimension.
"""
if layout is None:
return 0
return layout.find('N')
@staticmethod
def get_list(shapes, types):
"""Get DataDesc list from attribute lists.
Parameters
----------
shapes : a tuple of (name_, shape_)
types : a tuple of (name_, np.dtype)
"""
if types is not None:
type_dict = dict(types)
return [DataDesc(x[0], x[1], type_dict[x[0]]) for x in shapes]
else:
return [DataDesc(x[0], x[1]) for x in shapes]
class DataBatch(object):
"""A data batch.
MXNet's data iterator returns a batch of data for each `next` call.
This data contains `batch_size` number of examples.
If the input data consists of images, then shape of these images depend on
the `layout` attribute of `DataDesc` object in `provide_data` parameter.
If `layout` is set to 'NCHW' then, images should be stored in a 4-D matrix
of shape ``(batch_size, num_channel, height, width)``.
If `layout` is set to 'NHWC' then, images should be stored in a 4-D matrix
of shape ``(batch_size, height, width, num_channel)``.
The channels are often in RGB order.
Parameters
----------
data : list of `NDArray`, each array containing `batch_size` examples.
A list of input data.
label : list of `NDArray`, each array often containing a 1-dimensional array. optional
A list of input labels.
pad : int, optional
The number of examples padded at the end of a batch. It is used when the
total number of examples read is not divisible by the `batch_size`.
These extra padded examples are ignored in prediction.
index : numpy.array, optional
The example indices in this batch.
bucket_key : int, optional
The bucket key, used for bucketing module.
provide_data : list of `DataDesc`, optional
A list of `DataDesc` objects. `DataDesc` is used to store
name, shape, type and layout information of the data.
The *i*-th element describes the name and shape of ``data[i]``.
provide_label : list of `DataDesc`, optional
A list of `DataDesc` objects. `DataDesc` is used to store
name, shape, type and layout information of the label.
The *i*-th element describes the name and shape of ``label[i]``.
"""
def __init__(self, data, label=None, pad=None, index=None,
bucket_key=None, provide_data=None, provide_label=None):
if data is not None:
assert isinstance(data, (list, tuple)), "Data must be list of NDArrays"
if label is not None:
assert isinstance(label, (list, tuple)), "Label must be list of NDArrays"
self.data = data
self.label = label
self.pad = pad
self.index = index
self.bucket_key = bucket_key
self.provide_data = provide_data
self.provide_label = provide_label
def __str__(self):
data_shapes = [d.shape for d in self.data]
if self.label:
label_shapes = [l.shape for l in self.label]
else:
label_shapes = None
return "{}: data shapes: {} label shapes: {}".format(
self.__class__.__name__,
data_shapes,
label_shapes)
class DataIter(object):
"""The base class for an MXNet data iterator.
All I/O in MXNet is handled by specializations of this class. Data iterators
in MXNet are similar to standard-iterators in Python. On each call to `next`
they return a `DataBatch` which represents the next batch of data. When
there is no more data to return, it raises a `StopIteration` exception.
Parameters
----------
batch_size : int, optional
The batch size, namely the number of items in the batch.
See Also
--------
NDArrayIter : Data-iterator for MXNet NDArray or numpy-ndarray objects.
CSVIter : Data-iterator for csv data.
LibSVMIter : Data-iterator for libsvm data.
ImageIter : Data-iterator for images.
"""
def __init__(self, batch_size=0):
self.batch_size = batch_size
def __iter__(self):
return self
def reset(self):
"""Reset the iterator to the begin of the data."""
pass
def next(self):
"""Get next data batch from iterator.
Returns
-------
DataBatch
The data of next batch.
Raises
------
StopIteration
If the end of the data is reached.
"""
if self.iter_next():
return DataBatch(data=self.getdata(), label=self.getlabel(), \
pad=self.getpad(), index=self.getindex())
else:
raise StopIteration
def __next__(self):
return self.next()
def iter_next(self):
"""Move to the next batch.
Returns
-------
boolean
Whether the move is successful.
"""
pass
def getdata(self):
"""Get data of current batch.
Returns
-------
list of NDArray
The data of the current batch.
"""
pass
def getlabel(self):
"""Get label of the current batch.
Returns
-------
list of NDArray
The label of the current batch.
"""
pass
def getindex(self):
"""Get index of the current batch.
Returns
-------
index : numpy.array
The indices of examples in the current batch.
"""
return None
def getpad(self):
"""Get the number of padding examples in the current batch.
Returns
-------
int
Number of padding examples in the current batch.
"""
pass
class ResizeIter(DataIter):
"""Resize a data iterator to a given number of batches.
Parameters
----------
data_iter : DataIter
The data iterator to be resized.
size : int
The number of batches per epoch to resize to.
reset_internal : bool
Whether to reset internal iterator on ResizeIter.reset.
Examples
--------
>>> nd_iter = mx.io.NDArrayIter(mx.nd.ones((100,10)), batch_size=25)
>>> resize_iter = mx.io.ResizeIter(nd_iter, 2)
>>> for batch in resize_iter:
... print(batch.data)
[<NDArray 25x10 @cpu(0)>]
[<NDArray 25x10 @cpu(0)>]
"""
def __init__(self, data_iter, size, reset_internal=True):
super(ResizeIter, self).__init__()
self.data_iter = data_iter
self.size = size
self.reset_internal = reset_internal
self.cur = 0
self.current_batch = None
self.provide_data = data_iter.provide_data
self.provide_label = data_iter.provide_label
self.batch_size = data_iter.batch_size
if hasattr(data_iter, 'default_bucket_key'):
self.default_bucket_key = data_iter.default_bucket_key
def reset(self):
self.cur = 0
if self.reset_internal:
self.data_iter.reset()
def iter_next(self):
if self.cur == self.size:
return False
try:
self.current_batch = self.data_iter.next()
except StopIteration:
self.data_iter.reset()
self.current_batch = self.data_iter.next()
self.cur += 1
return True
def getdata(self):
return self.current_batch.data
def getlabel(self):
return self.current_batch.label
def getindex(self):
return self.current_batch.index
def getpad(self):
return self.current_batch.pad
class PrefetchingIter(DataIter):
"""Performs pre-fetch for other data iterators.
This iterator will create another thread to perform ``iter_next`` and then
store the data in memory. It potentially accelerates the data read, at the
cost of more memory usage.
Parameters
----------
iters : DataIter or list of DataIter
The data iterators to be pre-fetched.
rename_data : None or list of dict
The *i*-th element is a renaming map for the *i*-th iter, in the form of
{'original_name' : 'new_name'}. Should have one entry for each entry
in iter[i].provide_data.
rename_label : None or list of dict
Similar to ``rename_data``.
Examples
--------
>>> iter1 = mx.io.NDArrayIter({'data':mx.nd.ones((100,10))}, batch_size=25)
>>> iter2 = mx.io.NDArrayIter({'data':mx.nd.ones((100,10))}, batch_size=25)
>>> piter = mx.io.PrefetchingIter([iter1, iter2],
... rename_data=[{'data': 'data_1'}, {'data': 'data_2'}])
>>> print(piter.provide_data)
[DataDesc[data_1,(25, 10L),<type 'numpy.float32'>,NCHW],
DataDesc[data_2,(25, 10L),<type 'numpy.float32'>,NCHW]]
"""
def __init__(self, iters, rename_data=None, rename_label=None):
super(PrefetchingIter, self).__init__()
if not isinstance(iters, list):
iters = [iters]
self.n_iter = len(iters)
assert self.n_iter > 0
self.iters = iters
self.rename_data = rename_data
self.rename_label = rename_label
self.batch_size = self.provide_data[0][1][0]
self.data_ready = [threading.Event() for i in range(self.n_iter)]
self.data_taken = [threading.Event() for i in range(self.n_iter)]
for i in self.data_taken:
i.set()
self.started = True
self.current_batch = [None for i in range(self.n_iter)]
self.next_batch = [None for i in range(self.n_iter)]
def prefetch_func(self, i):
"""Thread entry"""
while True:
self.data_taken[i].wait()
if not self.started:
break
try:
self.next_batch[i] = self.iters[i].next()
except StopIteration:
self.next_batch[i] = None
self.data_taken[i].clear()
self.data_ready[i].set()
self.prefetch_threads = [threading.Thread(target=prefetch_func, args=[self, i]) \
for i in range(self.n_iter)]
for thread in self.prefetch_threads:
thread.setDaemon(True)
thread.start()
def __del__(self):
self.started = False
for i in self.data_taken:
i.set()
for thread in self.prefetch_threads:
thread.join()
@property
def provide_data(self):
if self.rename_data is None:
return sum([i.provide_data for i in self.iters], [])
else:
return sum([[
DataDesc(r[x.name], x.shape, x.dtype)
if isinstance(x, DataDesc) else DataDesc(*x)
for x in i.provide_data
] for r, i in zip(self.rename_data, self.iters)], [])
@property
def provide_label(self):
if self.rename_label is None:
return sum([i.provide_label for i in self.iters], [])
else:
return sum([[
DataDesc(r[x.name], x.shape, x.dtype)
if isinstance(x, DataDesc) else DataDesc(*x)
for x in i.provide_label
] for r, i in zip(self.rename_label, self.iters)], [])
def reset(self):
for i in self.data_ready:
i.wait()
for i in self.iters:
i.reset()
for i in self.data_ready:
i.clear()
for i in self.data_taken:
i.set()
def iter_next(self):
for i in self.data_ready:
i.wait()
if self.next_batch[0] is None:
for i in self.next_batch:
assert i is None, "Number of entry mismatches between iterators"
return False
else:
for batch in self.next_batch:
assert batch.pad == self.next_batch[0].pad, \
"Number of entry mismatches between iterators"
self.current_batch = DataBatch(sum([batch.data for batch in self.next_batch], []),
sum([batch.label for batch in self.next_batch], []),
self.next_batch[0].pad,
self.next_batch[0].index,
provide_data=self.provide_data,
provide_label=self.provide_label)
for i in self.data_ready:
i.clear()
for i in self.data_taken:
i.set()
return True
def next(self):
if self.iter_next():
return self.current_batch
else:
raise StopIteration
def getdata(self):
return self.current_batch.data
def getlabel(self):
return self.current_batch.label
def getindex(self):
return self.current_batch.index
def getpad(self):
return self.current_batch.pad
class NDArrayIter(DataIter):
"""Returns an iterator for ``mx.nd.NDArray``, ``numpy.ndarray``, ``h5py.Dataset``
``mx.nd.sparse.CSRNDArray`` or ``scipy.sparse.csr_matrix``.
Examples
--------
>>> data = np.arange(40).reshape((10,2,2))
>>> labels = np.ones([10, 1])
>>> dataiter = mx.io.NDArrayIter(data, labels, 3, True, last_batch_handle='discard')
>>> for batch in dataiter:
... print batch.data[0].asnumpy()
... batch.data[0].shape
...
[[[ 36. 37.]
[ 38. 39.]]
[[ 16. 17.]
[ 18. 19.]]
[[ 12. 13.]
[ 14. 15.]]]
(3L, 2L, 2L)
[[[ 32. 33.]
[ 34. 35.]]
[[ 4. 5.]
[ 6. 7.]]
[[ 24. 25.]
[ 26. 27.]]]
(3L, 2L, 2L)
[[[ 8. 9.]
[ 10. 11.]]
[[ 20. 21.]
[ 22. 23.]]
[[ 28. 29.]
[ 30. 31.]]]
(3L, 2L, 2L)
>>> dataiter.provide_data # Returns a list of `DataDesc`
[DataDesc[data,(3, 2L, 2L),<type 'numpy.float32'>,NCHW]]
>>> dataiter.provide_label # Returns a list of `DataDesc`
[DataDesc[softmax_label,(3, 1L),<type 'numpy.float32'>,NCHW]]
In the above example, data is shuffled as `shuffle` parameter is set to `True`
and remaining examples are discarded as `last_batch_handle` parameter is set to `discard`.
Usage of `last_batch_handle` parameter:
>>> dataiter = mx.io.NDArrayIter(data, labels, 3, True, last_batch_handle='pad')
>>> batchidx = 0
>>> for batch in dataiter:
... batchidx += 1
...
>>> batchidx # Padding added after the examples read are over. So, 10/3+1 batches are created.
4
>>> dataiter = mx.io.NDArrayIter(data, labels, 3, True, last_batch_handle='discard')
>>> batchidx = 0
>>> for batch in dataiter:
... batchidx += 1
...
>>> batchidx # Remaining examples are discarded. So, 10/3 batches are created.
3
>>> dataiter = mx.io.NDArrayIter(data, labels, 3, False, last_batch_handle='roll_over')
>>> batchidx = 0
>>> for batch in dataiter:
... batchidx += 1
...
>>> batchidx # Remaining examples are rolled over to the next iteration.
3
>>> dataiter.reset()
>>> dataiter.next().data[0].asnumpy()
[[[ 36. 37.]
[ 38. 39.]]
[[ 0. 1.]
[ 2. 3.]]
[[ 4. 5.]
[ 6. 7.]]]
(3L, 2L, 2L)
`NDArrayIter` also supports multiple input and labels.
>>> data = {'data1':np.zeros(shape=(10,2,2)), 'data2':np.zeros(shape=(20,2,2))}
>>> label = {'label1':np.zeros(shape=(10,1)), 'label2':np.zeros(shape=(20,1))}
>>> dataiter = mx.io.NDArrayIter(data, label, 3, True, last_batch_handle='discard')
`NDArrayIter` also supports ``mx.nd.sparse.CSRNDArray``
with `last_batch_handle` set to `discard`.
>>> csr_data = mx.nd.array(np.arange(40).reshape((10,4))).tostype('csr')
>>> labels = np.ones([10, 1])
>>> dataiter = mx.io.NDArrayIter(csr_data, labels, 3, last_batch_handle='discard')
>>> [batch.data[0] for batch in dataiter]
[
<CSRNDArray 3x4 @cpu(0)>,
<CSRNDArray 3x4 @cpu(0)>,
<CSRNDArray 3x4 @cpu(0)>]
Parameters
----------
data: array or list of array or dict of string to array
The input data.
label: array or list of array or dict of string to array, optional
The input label.
batch_size: int
Batch size of data.
shuffle: bool, optional
Whether to shuffle the data.
Only supported if no h5py.Dataset inputs are used.
last_batch_handle : str, optional
How to handle the last batch. This parameter can be 'pad', 'discard' or
'roll_over'.
If 'pad', the last batch will be padded with data starting from the begining
If 'discard', the last batch will be discarded
If 'roll_over', the remaining elements will be rolled over to the next iteration and
note that it is intended for training and can cause problems if used for prediction.
data_name : str, optional
The data name.
label_name : str, optional
The label name.
"""
def __init__(self, data, label=None, batch_size=1, shuffle=False,
last_batch_handle='pad', data_name='data',
label_name='softmax_label'):
super(NDArrayIter, self).__init__(batch_size)
self.data = _init_data(data, allow_empty=False, default_name=data_name)
self.label = _init_data(label, allow_empty=True, default_name=label_name)
if ((_has_instance(self.data, CSRNDArray) or
_has_instance(self.label, CSRNDArray)) and
(last_batch_handle != 'discard')):
raise NotImplementedError("`NDArrayIter` only supports ``CSRNDArray``" \
" with `last_batch_handle` set to `discard`.")
self.idx = np.arange(self.data[0][1].shape[0])
self.shuffle = shuffle
self.last_batch_handle = last_batch_handle
self.batch_size = batch_size
self.cursor = -self.batch_size
self.num_data = self.idx.shape[0]
# shuffle
self.reset()
self.data_list = [x[1] for x in self.data] + [x[1] for x in self.label]
self.num_source = len(self.data_list)
# used for 'roll_over'
self._cache_data = None
self._cache_label = None
@property
def provide_data(self):
"""The name and shape of data provided by this iterator."""
return [
DataDesc(k, tuple([self.batch_size] + list(v.shape[1:])), v.dtype)
for k, v in self.data
]
@property
def provide_label(self):
"""The name and shape of label provided by this iterator."""
return [
DataDesc(k, tuple([self.batch_size] + list(v.shape[1:])), v.dtype)
for k, v in self.label
]
def hard_reset(self):
"""Ignore roll over data and set to start."""
if self.shuffle:
self._shuffle_data()
self.cursor = -self.batch_size
self._cache_data = None
self._cache_label = None
def reset(self):
"""Resets the iterator to the beginning of the data."""
if self.shuffle:
self._shuffle_data()
# the range below indicate the last batch
if self.last_batch_handle == 'roll_over' and \
self.num_data - self.batch_size < self.cursor < self.num_data:
# (self.cursor - self.num_data) represents the data we have for the last batch
self.cursor = self.cursor - self.num_data - self.batch_size
else:
self.cursor = -self.batch_size
def iter_next(self):
"""Increments the coursor by batch_size for next batch
and check current cursor if it exceed the number of data points."""
self.cursor += self.batch_size
return self.cursor < self.num_data
def next(self):
"""Returns the next batch of data."""
if not self.iter_next():
raise StopIteration
data = self.getdata()
label = self.getlabel()
# iter should stop when last batch is not complete
if data[0].shape[0] != self.batch_size:
# in this case, cache it for next epoch
self._cache_data = data
self._cache_label = label
raise StopIteration
return DataBatch(data=data, label=label, \
pad=self.getpad(), index=None)
def _getdata(self, data_source, start=None, end=None):
"""Load data from underlying arrays."""
assert start is not None or end is not None, 'should at least specify start or end'
start = start if start is not None else 0
if end is None:
end = data_source[0][1].shape[0] if data_source else 0
s = slice(start, end)
return [
x[1][s]
if isinstance(x[1], (np.ndarray, NDArray)) else
# h5py (only supports indices in increasing order)
array(x[1][sorted(self.idx[s])][[
list(self.idx[s]).index(i)
for i in sorted(self.idx[s])
]]) for x in data_source
]
def _concat(self, first_data, second_data):
"""Helper function to concat two NDArrays."""
assert len(first_data) == len(
second_data), 'data source should contain the same size'
if first_data and second_data:
return [
concat(
first_data[x],
second_data[x],
dim=0
) for x in range(len(first_data))
]
elif (not first_data) and (not second_data):
return []
else:
return [
first_data[0] if first_data else second_data[0]
for x in range(len(first_data))
]
def _batchify(self, data_source):
"""Load data from underlying arrays, internal use only."""
assert self.cursor < self.num_data, 'DataIter needs reset.'
# first batch of next epoch with 'roll_over'
if self.last_batch_handle == 'roll_over' and \
-self.batch_size < self.cursor < 0:
assert self._cache_data is not None or self._cache_label is not None, \
'next epoch should have cached data'
cache_data = self._cache_data if self._cache_data is not None else self._cache_label
second_data = self._getdata(
data_source, end=self.cursor + self.batch_size)
if self._cache_data is not None:
self._cache_data = None
else:
self._cache_label = None
return self._concat(cache_data, second_data)
# last batch with 'pad'
elif self.last_batch_handle == 'pad' and \
self.cursor + self.batch_size > self.num_data:
pad = self.batch_size - self.num_data + self.cursor
first_data = self._getdata(data_source, start=self.cursor)
second_data = self._getdata(data_source, end=pad)
return self._concat(first_data, second_data)
# normal case
else:
if self.cursor + self.batch_size < self.num_data:
end_idx = self.cursor + self.batch_size
# get incomplete last batch
else:
end_idx = self.num_data
return self._getdata(data_source, self.cursor, end_idx)
def getdata(self):
"""Get data."""
return self._batchify(self.data)
def getlabel(self):
"""Get label."""
return self._batchify(self.label)
def getpad(self):
"""Get pad value of DataBatch."""
if self.last_batch_handle == 'pad' and \
self.cursor + self.batch_size > self.num_data:
return self.cursor + self.batch_size - self.num_data
# check the first batch
elif self.last_batch_handle == 'roll_over' and \
-self.batch_size < self.cursor < 0:
return -self.cursor
else:
return 0
def _shuffle_data(self):
"""Shuffle the data."""
# shuffle index
np.random.shuffle(self.idx)
# get the data by corresponding index
self.data = _getdata_by_idx(self.data, self.idx)
self.label = _getdata_by_idx(self.label, self.idx)
class MXDataIter(DataIter):
"""A python wrapper a C++ data iterator.
This iterator is the Python wrapper to all native C++ data iterators, such
as `CSVIter`, `ImageRecordIter`, `MNISTIter`, etc. When initializing
`CSVIter` for example, you will get an `MXDataIter` instance to use in your
Python code. Calls to `next`, `reset`, etc will be delegated to the
underlying C++ data iterators.
Usually you don't need to interact with `MXDataIter` directly unless you are
implementing your own data iterators in C++. To do that, please refer to
examples under the `src/io` folder.
Parameters
----------
handle : DataIterHandle, required
The handle to the underlying C++ Data Iterator.
data_name : str, optional
Data name. Default to "data".
label_name : str, optional
Label name. Default to "softmax_label".
See Also
--------
src/io : The underlying C++ data iterator implementation, e.g., `CSVIter`.
"""
def __init__(self, handle, data_name='data', label_name='softmax_label', **_):
super(MXDataIter, self).__init__()
self.handle = handle
# debug option, used to test the speed with io effect eliminated
self._debug_skip_load = False
# load the first batch to get shape information
self.first_batch = None
self.first_batch = self.next()
data = self.first_batch.data[0]
label = self.first_batch.label[0]
# properties
self.provide_data = [DataDesc(data_name, data.shape, data.dtype)]
self.provide_label = [DataDesc(label_name, label.shape, label.dtype)]
self.batch_size = data.shape[0]
def __del__(self):
check_call(_LIB.MXDataIterFree(self.handle))
def debug_skip_load(self):
# Set the iterator to simply return always first batch. This can be used
# to test the speed of network without taking the loading delay into
# account.
self._debug_skip_load = True
logging.info('Set debug_skip_load to be true, will simply return first batch')
def reset(self):
self._debug_at_begin = True
self.first_batch = None
check_call(_LIB.MXDataIterBeforeFirst(self.handle))
def next(self):
if self._debug_skip_load and not self._debug_at_begin:
return DataBatch(data=[self.getdata()], label=[self.getlabel()], pad=self.getpad(),
index=self.getindex())
if self.first_batch is not None:
batch = self.first_batch
self.first_batch = None
return batch
self._debug_at_begin = False
next_res = ctypes.c_int(0)
check_call(_LIB.MXDataIterNext(self.handle, ctypes.byref(next_res)))
if next_res.value:
return DataBatch(data=[self.getdata()], label=[self.getlabel()], pad=self.getpad(),
index=self.getindex())
else:
raise StopIteration
def iter_next(self):
if self.first_batch is not None:
return True
next_res = ctypes.c_int(0)
check_call(_LIB.MXDataIterNext(self.handle, ctypes.byref(next_res)))
return next_res.value
def getdata(self):
hdl = NDArrayHandle()
check_call(_LIB.MXDataIterGetData(self.handle, ctypes.byref(hdl)))
return _ndarray_cls(hdl, False)
def getlabel(self):
hdl = NDArrayHandle()
check_call(_LIB.MXDataIterGetLabel(self.handle, ctypes.byref(hdl)))
return _ndarray_cls(hdl, False)
def getindex(self):
index_size = ctypes.c_uint64(0)
index_data = ctypes.POINTER(ctypes.c_uint64)()
check_call(_LIB.MXDataIterGetIndex(self.handle,
ctypes.byref(index_data),
ctypes.byref(index_size)))
if index_size.value:
address = ctypes.addressof(index_data.contents)
dbuffer = (ctypes.c_uint64* index_size.value).from_address(address)
np_index = np.frombuffer(dbuffer, dtype=np.uint64)
return np_index.copy()
else:
return None
def getpad(self):
pad = ctypes.c_int(0)
check_call(_LIB.MXDataIterGetPadNum(self.handle, ctypes.byref(pad)))
return pad.value
def _make_io_iterator(handle):
"""Create an io iterator by handle."""
name = ctypes.c_char_p()
desc = ctypes.c_char_p()
num_args = mx_uint()
arg_names = ctypes.POINTER(ctypes.c_char_p)()
arg_types = ctypes.POINTER(ctypes.c_char_p)()
arg_descs = ctypes.POINTER(ctypes.c_char_p)()
check_call(_LIB.MXDataIterGetIterInfo( \
handle, ctypes.byref(name), ctypes.byref(desc), \
ctypes.byref(num_args), \
ctypes.byref(arg_names), \
ctypes.byref(arg_types), \
ctypes.byref(arg_descs)))
iter_name = py_str(name.value)
narg = int(num_args.value)
param_str = _build_param_doc(
[py_str(arg_names[i]) for i in range(narg)],
[py_str(arg_types[i]) for i in range(narg)],
[py_str(arg_descs[i]) for i in range(narg)])
doc_str = ('%s\n\n' +
'%s\n' +
'Returns\n' +
'-------\n' +
'MXDataIter\n'+
' The result iterator.')
doc_str = doc_str % (desc.value, param_str)
def creator(*args, **kwargs):
"""Create an iterator.
The parameters listed below can be passed in as keyword arguments.
Parameters
----------
name : string, required.
Name of the resulting data iterator.
Returns
-------
dataiter: Dataiter
The resulting data iterator.
"""
param_keys = []
param_vals = []
for k, val in kwargs.items():
param_keys.append(k)
param_vals.append(str(val))
# create atomic symbol
param_keys = c_str_array(param_keys)
param_vals = c_str_array(param_vals)
iter_handle = DataIterHandle()
check_call(_LIB.MXDataIterCreateIter(
handle,
mx_uint(len(param_keys)),
param_keys, param_vals,
ctypes.byref(iter_handle)))
if len(args):
raise TypeError('%s can only accept keyword arguments' % iter_name)
return MXDataIter(iter_handle, **kwargs)
creator.__name__ = iter_name
creator.__doc__ = doc_str
return creator
def _init_io_module():
"""List and add all the data iterators to current module."""
plist = ctypes.POINTER(ctypes.c_void_p)()
size = ctypes.c_uint()
check_call(_LIB.MXListDataIters(ctypes.byref(size), ctypes.byref(plist)))
module_obj = sys.modules[__name__]
for i in range(size.value):
hdl = ctypes.c_void_p(plist[i])
dataiter = _make_io_iterator(hdl)
setattr(module_obj, dataiter.__name__, dataiter)
_init_io_module()
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base Estimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import inspect
import os
import tempfile
import numpy as np
import six
from tensorflow.core.framework import summary_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as tf_session
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator import run_config
from tensorflow.python.estimator.export.export import build_all_signature_defs
from tensorflow.python.estimator.export.export import get_timestamped_export_dir
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import metrics as metrics_lib
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import evaluation
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver
from tensorflow.python.training import training
from tensorflow.python.util import compat
_VALID_MODEL_FN_ARGS = set(
['features', 'labels', 'mode', 'params', 'config'])
class Estimator(object):
"""Estimator class to train and evaluate TensorFlow models.
The `Estimator` object wraps a model which is specified by a `model_fn`,
which, given inputs and a number of other parameters, returns the ops
necessary to perform training, evaluation, or predictions.
All outputs (checkpoints, event files, etc.) are written to `model_dir`, or a
subdirectory thereof. If `model_dir` is not set, a temporary directory is
used.
The `config` argument can be passed `RunConfig` object containing information
about the execution environment. It is passed on to the `model_fn`, if the
`model_fn` has a parameter named "config" (and input functions in the same
manner). If the `config` parameter is not passed, it is instantiated by the
`Estimator`. Not passing config means that defaults useful for local execution
are used. `Estimator` makes config available to the model (for instance, to
allow specialization based on the number of workers available), and also uses
some of its fields to control internals, especially regarding checkpointing.
The `params` argument contains hyperparameters. It is passed to the
`model_fn`, if the `model_fn` has a parameter named "params", and to the input
functions in the same manner. `Estimator` only passes params along, it does
not inspect it. The structure of `params` is therefore entirely up to the
developer.
None of `Estimator`'s methods can be overridden in subclasses (its
constructor enforces this). Subclasses should use `model_fn` to configure
the base class, and may add methods implementing specialized functionality.
"""
def __init__(self, model_fn, model_dir=None, config=None, params=None):
"""Constructs an `Estimator` instance.
Args:
model_fn: Model function. Follows the signature:
* Args:
* `features`: single `Tensor` or `dict` of `Tensor`s
(depending on data passed to `train`),
* `labels`: `Tensor` or `dict` of `Tensor`s (for multi-head
models). If mode is `ModeKeys.PREDICT`, `labels=None` will be
passed. If the `model_fn`'s signature does not accept
`mode`, the `model_fn` must still be able to handle
`labels=None`.
* `mode`: Optional. Specifies if this training, evaluation or
prediction. See `ModeKeys`.
* `params`: Optional `dict` of hyperparameters. Will receive what
is passed to Estimator in `params` parameter. This allows
to configure Estimators from hyper parameter tuning.
* `config`: Optional configuration object. Will receive what is passed
to Estimator in `config` parameter, or the default `config`.
Allows updating things in your model_fn based on configuration
such as `num_ps_replicas`, or `model_dir`.
* Returns:
`EstimatorSpec`
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
config: Configuration object.
params: `dict` of hyper parameters that will be passed into `model_fn`.
Keys are names of parameters, values are basic python types.
Raises:
ValueError: parameters of `model_fn` don't match `params`.
ValueError: if this is called via a subclass and if that class overrides
a member of `Estimator`.
"""
Estimator._assert_members_are_not_overridden(self)
# Model directory.
self._model_dir = model_dir
if self._model_dir is None:
self._model_dir = tempfile.mkdtemp()
logging.warning('Using temporary folder as model directory: %s',
self._model_dir)
if config is None:
self._config = run_config.RunConfig()
logging.info('Using default config.')
else:
if not isinstance(config, run_config.RunConfig):
raise ValueError(
'config must be an instance of RunConfig, but provided %s.' %
config)
self._config = config
logging.info('Using config: %s', str(vars(self._config)))
if self._config.session_config is None:
self._session_config = config_pb2.ConfigProto(allow_soft_placement=True)
else:
self._session_config = self._config.session_config
self._device_fn = _get_replica_device_setter(self._config)
if model_fn is None:
raise ValueError('model_fn must be provided to Estimator.')
_verify_model_fn_args(model_fn, params)
self._model_fn = model_fn
self._params = params or {}
@property
def model_dir(self):
return self._model_dir
@property
def config(self):
return copy.deepcopy(self._config)
@property
def params(self):
return copy.deepcopy(self._params)
def train(self, input_fn, hooks=None, steps=None, max_steps=None):
"""Trains a model given training data input_fn.
Args:
input_fn: Input function returning a tuple of:
features - `Tensor` or dictionary of string feature name to `Tensor`.
labels - `Tensor` or dictionary of `Tensor` with labels.
hooks: List of `SessionRunHook` subclass instances. Used for callbacks
inside the training loop.
steps: Number of steps for which to train model. If `None`, train forever
or train until input_fn generates the `OutOfRange` or `StopIteration`
error. 'steps' works incrementally. If you call two times
train(steps=10) then training occurs in total 20 steps. If `OutOfRange`
or `StopIteration` error occurs in the middle, training stops before 20
steps. If you don't want to have incremental behaviour please set
`max_steps` instead. If set, `max_steps` must be `None`.
max_steps: Number of total steps for which to train model. If `None`,
train forever or train until input_fn generates the `OutOfRange` or
`StopIteration` error. If set, `steps` must be `None`. If `OutOfRange`
or `StopIteration` error occurs in the middle, training stops before
`max_steps` steps.
Two calls to `train(steps=100)` means 200 training
iterations. On the other hand, two calls to `train(max_steps=100)` means
that the second call will not do any iteration since first call did
all 100 steps.
Returns:
`self`, for chaining.
Raises:
ValueError: If both `steps` and `max_steps` are not `None`.
ValueError: If either `steps` or `max_steps` is <= 0.
"""
if (steps is not None) and (max_steps is not None):
raise ValueError('Can not provide both steps and max_steps.')
if steps is not None and steps <= 0:
raise ValueError('Must specify steps > 0, given: {}'.format(steps))
if max_steps is not None and max_steps <= 0:
raise ValueError(
'Must specify max_steps > 0, given: {}'.format(max_steps))
if max_steps is not None:
start_step = _load_global_step_from_checkpoint_dir(self._model_dir)
if max_steps <= start_step:
logging.info('Skipping training since max_steps has already saved.')
return self
hooks = _check_hooks_type(hooks)
if steps is not None or max_steps is not None:
hooks.append(training.StopAtStepHook(steps, max_steps))
loss = self._train_model(input_fn=input_fn, hooks=hooks)
logging.info('Loss for final step: %s.', loss)
return self
def evaluate(self, input_fn, steps=None, hooks=None, checkpoint_path=None,
name=None):
"""Evaluates the model given evaluation data input_fn.
For each step, calls `input_fn`, which returns one batch of data.
Evaluates until:
- `steps` batches are processed, or
- `input_fn` raises an end-of-input exception (`OutOfRangeError` or
`StopIteration`).
Args:
input_fn: Input function returning a tuple of:
features - Dictionary of string feature name to `Tensor` or
`SparseTensor`.
labels - `Tensor` or dictionary of `Tensor` with labels.
steps: Number of steps for which to evaluate model. If `None`, evaluates
until `input_fn` raises an end-of-input exception.
hooks: List of `SessionRunHook` subclass instances. Used for callbacks
inside the evaluation call.
checkpoint_path: Path of a specific checkpoint to evaluate. If `None`, the
latest checkpoint in `model_dir` is used.
name: Name of the evaluation if user needs to run multiple evaluations on
different data sets, such as on training data vs test data. Metrics for
different evaluations are saved in separate folders, and appear
separately in tensorboard.
Returns:
A dict containing the evaluation metrics specified in `model_fn` keyed by
name, as well as an entry `global_step` which contains the value of the
global step for which this evaluation was performed.
Raises:
ValueError: If `steps <= 0`.
ValueError: If no model has been trained, namely `model_dir`, or the
given `checkpoint_path` is empty.
"""
hooks = _check_hooks_type(hooks)
if steps is not None:
if steps <= 0:
raise ValueError('Must specify steps > 0, given: {}'.format(steps))
hooks.append(evaluation._StopAfterNEvalsHook( # pylint: disable=protected-access
num_evals=steps))
return self._evaluate_model(
input_fn=input_fn,
hooks=hooks,
checkpoint_path=checkpoint_path,
name=name)
def predict(self,
input_fn,
predict_keys=None,
hooks=None,
checkpoint_path=None):
"""Returns predictions for given features.
Args:
input_fn: Input function returning features which is a dictionary of
string feature name to `Tensor` or `SparseTensor`. If it returns a
tuple, first item is extracted as features. Prediction continues until
`input_fn` raises an end-of-input exception (`OutOfRangeError` or
`StopIteration`).
predict_keys: list of `str`, name of the keys to predict. It is used if
the `EstimatorSpec.predictions` is a `dict`. If `predict_keys` is used
then rest of the predictions will be filtered from the dictionary. If
`None`, returns all.
hooks: List of `SessionRunHook` subclass instances. Used for callbacks
inside the prediction call.
checkpoint_path: Path of a specific checkpoint to predict. If `None`, the
latest checkpoint in `model_dir` is used.
Yields:
Evaluated values of `predictions` tensors.
Raises:
ValueError: Could not find a trained model in model_dir.
ValueError: if batch length of predictions are not same.
ValueError: If there is a conflict between `predict_keys` and
`predictions`. For example if `predict_keys` is not `None` but
`EstimatorSpec.predictions` is not a `dict`.
"""
hooks = _check_hooks_type(hooks)
# Check that model has been trained.
if not checkpoint_path:
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise ValueError('Could not find trained model in model_dir: {}.'.format(
self._model_dir))
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
training.create_global_step(g)
features = self._get_features_from_input_fn(input_fn)
estimator_spec = self._call_model_fn(features, None,
model_fn_lib.ModeKeys.PREDICT)
predictions = self._extract_keys(estimator_spec.predictions, predict_keys)
with training.MonitoredSession(
session_creator=training.ChiefSessionCreator(
checkpoint_filename_with_path=checkpoint_path,
scaffold=estimator_spec.scaffold,
config=self._session_config),
hooks=hooks) as mon_sess:
while not mon_sess.should_stop():
preds_evaluated = mon_sess.run(predictions)
if not isinstance(predictions, dict):
for pred in preds_evaluated:
yield pred
else:
for i in range(self._extract_batch_length(preds_evaluated)):
yield {
key: value[i]
for key, value in six.iteritems(preds_evaluated)
}
def _assert_members_are_not_overridden(self):
estimator_members = set([m for m in Estimator.__dict__.keys()
if not m.startswith('__')])
subclass_members = set(self.__class__.__dict__.keys())
common_members = estimator_members & subclass_members
overriden_members = [m for m in common_members
if Estimator.__dict__[m] != self.__class__.__dict__[m]]
if overriden_members:
raise ValueError(
'Subclasses of Estimator cannot override members of Estimator. '
'{} does override {}'.format(self.__class__, overriden_members))
def export_savedmodel(
self, export_dir_base, serving_input_receiver_fn,
assets_extra=None,
as_text=False,
checkpoint_path=None):
"""Exports inference graph as a SavedModel into given dir.
This method builds a new graph by first calling the
serving_input_receiver_fn to obtain feature `Tensor`s, and then calling
this `Estimator`'s model_fn to generate the model graph based on those
features. It restores the given checkpoint (or, lacking that, the most
recent checkpoint) into this graph in a fresh session. Finally it creates
a timestamped export directory below the given export_dir_base, and writes
a `SavedModel` into it containing a single `MetaGraphDef` saved from this
session.
The exported `MetaGraphDef` will provide one `SignatureDef` for each
element of the export_outputs dict returned from the model_fn, named using
the same keys. One of these keys is always
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY, indicating which
signature will be served when a serving request does not specify one.
For each signature, the outputs are provided by the corresponding
`ExportOutput`s, and the inputs are always the input receivers provided by
the serving_input_receiver_fn.
Extra assets may be written into the SavedModel via the extra_assets
argument. This should be a dict, where each key gives a destination path
(including the filename) relative to the assets.extra directory. The
corresponding value gives the full path of the source file to be copied.
For example, the simple case of copying a single file without renaming it
is specified as `{'my_asset_file.txt': '/path/to/my_asset_file.txt'}`.
Args:
export_dir_base: A string containing a directory in which to create
timestamped subdirectories containing exported SavedModels.
serving_input_receiver_fn: A function that takes no argument and
returns a `ServingInputReceiver`.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported SavedModel, or `None` if no extra assets are needed.
as_text: whether to write the SavedModel proto in text format.
checkpoint_path: The checkpoint path to export. If `None` (the default),
the most recent checkpoint found within the model directory is chosen.
Returns:
The string path to the exported directory.
Raises:
ValueError: if no serving_input_receiver_fn is provided, no export_outputs
are provided, or no checkpoint can be found.
"""
if serving_input_receiver_fn is None:
raise ValueError('serving_input_receiver_fn must be defined.')
with ops.Graph().as_default() as g:
training.create_global_step(g)
random_seed.set_random_seed(self._config.tf_random_seed)
serving_input_receiver = serving_input_receiver_fn()
# Call the model_fn and collect the export_outputs.
estimator_spec = self._call_model_fn(
features=serving_input_receiver.features,
labels=None,
mode=model_fn_lib.ModeKeys.PREDICT)
# Build the SignatureDefs from receivers and all outputs
signature_def_map = build_all_signature_defs(
serving_input_receiver.receiver_tensors,
estimator_spec.export_outputs)
if not checkpoint_path:
# Locate the latest checkpoint
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise ValueError("Couldn't find trained model at %s." % self._model_dir)
export_dir = get_timestamped_export_dir(export_dir_base)
# TODO(soergel): Consider whether MonitoredSession makes sense here
with tf_session.Session() as session:
saver_for_restore = estimator_spec.scaffold.saver or saver.Saver(
sharded=True)
saver_for_restore.restore(session, checkpoint_path)
# TODO(b/36111876): replace legacy_init_op with main_op mechanism
# pylint: disable=protected-access
local_init_op = (
estimator_spec.scaffold.local_init_op or
monitored_session.Scaffold._default_local_init_op())
# pylint: enable=protected-access
# Perform the export
builder = saved_model_builder.SavedModelBuilder(export_dir)
builder.add_meta_graph_and_variables(
session, [tag_constants.SERVING],
signature_def_map=signature_def_map,
assets_collection=ops.get_collection(
ops.GraphKeys.ASSET_FILEPATHS),
legacy_init_op=local_init_op)
builder.save(as_text)
# Add the extra assets
if assets_extra:
assets_extra_path = os.path.join(compat.as_bytes(export_dir),
compat.as_bytes('assets.extra'))
for dest_relative, source in assets_extra.items():
dest_absolute = os.path.join(compat.as_bytes(assets_extra_path),
compat.as_bytes(dest_relative))
dest_path = os.path.dirname(dest_absolute)
gfile.MakeDirs(dest_path)
gfile.Copy(source, dest_absolute)
return export_dir
def _get_features_from_input_fn(self, input_fn):
result = input_fn()
if not ops.get_default_graph().get_collection(ops.GraphKeys.QUEUE_RUNNERS):
logging.warning('Input graph does not contain a QueueRunner. '
'That means predict yields forever. '
'This is probably a mistake.')
if isinstance(result, (list, tuple)):
return result[0]
return result
def _extract_batch_length(self, preds_evaluated):
"""Extracts batch length of predictions."""
batch_length = None
for key, value in six.iteritems(preds_evaluated):
batch_length = batch_length or value.shape[0]
if value.shape[0] != batch_length:
raise ValueError('Batch length of predictions should be same. %s has '
'different batch length then others.' % key)
return batch_length
def _extract_keys(self, predictions, predict_keys):
"""Extracts `predict_keys` from `predictions`."""
if not predict_keys:
return predictions
if not isinstance(predictions, dict):
raise ValueError(
'predict_keys argument is not valid in case of non-dict predictions.')
existing_keys = predictions.keys()
predictions = {
key: value
for key, value in six.iteritems(predictions) if key in predict_keys
}
if not predictions:
raise ValueError('Expected to run at least one output from %s, '
'provided %s.' % (existing_keys, predict_keys))
return predictions
def _call_model_fn(self, features, labels, mode):
"""Calls model function.
Args:
features: features dict.
labels: labels dict.
mode: ModeKeys
Returns:
An `EstimatorSpec` object.
Raises:
ValueError: if model_fn returns invalid objects.
"""
model_fn_args = _get_arguments(self._model_fn).args
kwargs = {}
if 'mode' in model_fn_args:
kwargs['mode'] = mode
if 'params' in model_fn_args:
kwargs['params'] = self.params
if 'config' in model_fn_args:
kwargs['config'] = self.config
model_fn_results = self._model_fn(
features=features, labels=labels, **kwargs)
if not isinstance(model_fn_results, model_fn_lib.EstimatorSpec):
raise ValueError('model_fn should return an EstimatorSpec.')
return model_fn_results
def _train_model(self, input_fn, hooks):
all_hooks = []
with ops.Graph().as_default() as g, g.device(self._device_fn):
random_seed.set_random_seed(self._config.tf_random_seed)
global_step_tensor = training.create_global_step(g)
with ops.device('/cpu:0'):
features, labels = input_fn()
estimator_spec = self._call_model_fn(features, labels,
model_fn_lib.ModeKeys.TRAIN)
ops.add_to_collection(ops.GraphKeys.LOSSES, estimator_spec.loss)
all_hooks.extend([
training.NanTensorHook(estimator_spec.loss),
training.LoggingTensorHook(
{
'loss': estimator_spec.loss,
'step': global_step_tensor
},
every_n_iter=100)
])
all_hooks.extend(hooks)
all_hooks.extend(estimator_spec.training_hooks)
if not (estimator_spec.scaffold.saver or
ops.get_collection(ops.GraphKeys.SAVERS)):
ops.add_to_collection(ops.GraphKeys.SAVERS,
training.Saver(
sharded=True,
max_to_keep=self._config.keep_checkpoint_max,
defer_build=True,
save_relative_paths=True))
chief_hooks = []
if (self._config.save_checkpoints_secs or
self._config.save_checkpoints_steps):
saver_hook_exists = any([
isinstance(h, training.CheckpointSaverHook)
for h in (all_hooks + chief_hooks +
estimator_spec.training_chief_hooks)
])
if not saver_hook_exists:
chief_hooks = [
training.CheckpointSaverHook(
self._model_dir,
save_secs=self._config.save_checkpoints_secs,
save_steps=self._config.save_checkpoints_steps,
scaffold=estimator_spec.scaffold)
]
with training.MonitoredTrainingSession(
master=self._config.master,
is_chief=self._config.is_chief,
checkpoint_dir=self._model_dir,
scaffold=estimator_spec.scaffold,
hooks=all_hooks,
chief_only_hooks=chief_hooks + estimator_spec.training_chief_hooks,
save_checkpoint_secs=0, # Saving is handled by a hook.
save_summaries_steps=self._config.save_summary_steps,
config=self._session_config) as mon_sess:
loss = None
while not mon_sess.should_stop():
_, loss = mon_sess.run([estimator_spec.train_op, estimator_spec.loss])
return loss
def _evaluate_model(self,
input_fn,
hooks=None,
checkpoint_path=None,
name=''):
"""Evaluates the model using the training.evaluation library."""
# Check that model has been trained (if nothing has been set explicitly).
if not checkpoint_path:
latest_path = saver.latest_checkpoint(self._model_dir)
if not latest_path:
raise ValueError('Could not find trained model in model_dir: {}.'.
format(self._model_dir))
checkpoint_path = latest_path
# Setup output directory.
eval_dir = os.path.join(self._model_dir, 'eval' if not name else
'eval_' + name)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
global_step_tensor = training.create_global_step(g)
features, labels = input_fn()
estimator_spec = self._call_model_fn(
features, labels, model_fn_lib.ModeKeys.EVAL)
if model_fn_lib.MetricKeys.LOSS in estimator_spec.eval_metric_ops:
raise ValueError(
'Metric with name "%s" is not allowed, because Estimator ' % (
model_fn_lib.MetricKeys.LOSS) +
'already defines a default metric with the same name.')
estimator_spec.eval_metric_ops[
model_fn_lib.MetricKeys.LOSS] = metrics_lib.mean(estimator_spec.loss)
update_op, eval_dict = _extract_metric_update_ops(
estimator_spec.eval_metric_ops)
if ops.GraphKeys.GLOBAL_STEP in eval_dict:
raise ValueError(
'Metric with name `global_step` is not allowed, because Estimator '
'already defines a default metric with the same name.')
eval_dict[ops.GraphKeys.GLOBAL_STEP] = global_step_tensor
eval_results = evaluation._evaluate_once( # pylint: disable=protected-access
checkpoint_path=checkpoint_path,
master=self._config.evaluation_master,
scaffold=estimator_spec.scaffold,
eval_ops=update_op,
final_ops=eval_dict,
hooks=hooks,
config=self._session_config)
_write_dict_to_summary(
output_dir=eval_dir,
dictionary=eval_results,
current_global_step=eval_results[ops.GraphKeys.GLOBAL_STEP])
return eval_results
def _check_hooks_type(hooks):
"""Returns hooks if all are SessionRunHook, raises TypeError otherwise."""
hooks = list(hooks or [])
for h in hooks:
if not isinstance(h, training.SessionRunHook):
raise TypeError('Hooks must be a SessionRunHook, given: {}'.format(h))
return hooks
def _get_replica_device_setter(config):
"""Creates a replica device setter if required as a default device_fn.
`Estimator` uses ReplicaDeviceSetter as a default device placer. It sets the
distributed related arguments such as number of ps_replicas based on given
config.
Args:
config: A `RunConfig` instance.
Returns:
A replica device setter, or None.
"""
ps_ops = [
'Variable', 'VariableV2', 'AutoReloadVariable', 'MutableHashTable',
'MutableHashTableOfTensors', 'MutableDenseHashTable'
]
if config.task_type:
worker_device = '/job:%s/task:%d' % (config.task_type, config.task_id)
else:
worker_device = '/job:worker'
if config.num_ps_replicas > 0:
return training.replica_device_setter(
ps_tasks=config.num_ps_replicas,
worker_device=worker_device,
merge_devices=True,
ps_ops=ps_ops,
cluster=config.cluster_spec)
else:
return None
def _get_arguments(func):
"""Returns a spec of given func."""
if hasattr(func, '__code__'):
# Regular function.
return inspect.getargspec(func)
elif hasattr(func, '__call__'):
# Callable object.
return _get_arguments(func.__call__)
elif hasattr(func, 'func'):
# Partial function.
return _get_arguments(func.func)
def _verify_model_fn_args(model_fn, params):
"""Verifies model fn arguments."""
fn_spec = _get_arguments(model_fn)
if 'features' not in fn_spec.args:
raise ValueError('model_fn (%s) must include features argument.' % model_fn)
if 'labels' not in fn_spec.args:
raise ValueError('model_fn (%s) must include labels argument.' % model_fn)
if params is not None and 'params' not in fn_spec.args:
raise ValueError('model_fn (%s) does not include params argument, '
'but params (%s) is passed to Estimator.' % (model_fn,
params))
if params is None and 'params' in fn_spec.args:
logging.warning('Estimator\'s model_fn (%s) includes params '
'argument, but params are not passed to Estimator.',
model_fn)
non_valid_args = list(set(fn_spec.args) - _VALID_MODEL_FN_ARGS)
if non_valid_args:
raise ValueError('model_fn (%s) has following not expected args: %s' %
(model_fn, non_valid_args))
def _load_global_step_from_checkpoint_dir(checkpoint_dir):
try:
checkpoint_reader = training.NewCheckpointReader(
training.latest_checkpoint(checkpoint_dir))
return checkpoint_reader.get_tensor(ops.GraphKeys.GLOBAL_STEP)
except: # pylint: disable=bare-except
return 0
def _extract_metric_update_ops(eval_dict):
"""Separate update operations from metric value operations."""
update_ops = []
value_ops = {}
# Sort metrics lexicographically so graph is identical every time.
for name, metric_ops in sorted(six.iteritems(eval_dict)):
value_ops[name] = metric_ops[0]
update_ops.append(metric_ops[1])
if update_ops:
update_op = control_flow_ops.group(*update_ops)
else:
update_op = None
return update_op, value_ops
def _dict_to_str(dictionary):
"""Get a `str` representation of a `dict`.
Args:
dictionary: The `dict` to be represented as `str`.
Returns:
A `str` representing the `dictionary`.
"""
return ', '.join('%s = %s' % (k, v)
for k, v in sorted(six.iteritems(dictionary)))
def _write_dict_to_summary(output_dir,
dictionary,
current_global_step):
"""Writes a `dict` into summary file in given output directory.
Args:
output_dir: `str`, directory to write the summary file in.
dictionary: the `dict` to be written to summary file.
current_global_step: `int`, the current global step.
"""
logging.info('Saving dict for global step %d: %s', current_global_step,
_dict_to_str(dictionary))
summary_writer = writer_cache.FileWriterCache.get(output_dir)
summary_proto = summary_pb2.Summary()
for key in dictionary:
if dictionary[key] is None:
continue
value = summary_proto.value.add()
value.tag = key
if (isinstance(dictionary[key], np.float32) or
isinstance(dictionary[key], float)):
value.simple_value = float(dictionary[key])
else:
logging.warn('Skipping summary for %s, must be a float or np.float32.',
key)
summary_writer.add_summary(summary_proto, current_global_step)
summary_writer.flush()
|
|
#!/usr/local/bin/python
#-
# Copyright (c) 2010-2011 Varnish Software AS
# All rights reserved.
#
# Author: Poul-Henning Kamp <phk@phk.freebsd.dk>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# Read the vmod.spec file and produce the vmod.h and vmod.c files.
#
# vmod.h contains the prototypes for the published functions, the module
# C-code should include this file to ensure type-consistency.
#
# vmod.c contains the symbols which VCC and varnishd will use to access
# the module: A structure of properly typed function pointers, the
# size of this structure in bytes, and the definition of the structure
# as a string, suitable for inclusion in the C-source of the compile VCL
# program.
import sys
import re
if len(sys.argv) == 2:
specfile = sys.argv[1]
else:
specfile = "vmod.vcc"
ctypes = {
'IP': "struct sockaddr_storage *",
'STRING': "const char *",
'STRING_LIST': "const char *, ...",
'BOOL': "unsigned",
'BACKEND': "struct director *",
'ENUM': "const char *",
'TIME': "double",
'REAL': "double",
'DURATION': "double",
'INT': "int",
'HEADER': "enum gethdr_e, const char *",
'PRIV_VCL': "struct vmod_priv *",
'PRIV_CALL': "struct vmod_priv *",
'VOID': "void",
}
#######################################################################
initname = ""
modname = "???"
pstruct = ""
pinit = ""
tdl = ""
plist = ""
slist = ""
def do_func(fname, rval, args, vargs):
global pstruct
global pinit
global plist
global slist
global tdl
#print(fname, rval, args)
# C argument list
cargs = "(struct sess *"
for i in args:
cargs += ", " + i
cargs += ")"
# Prototypes for vmod implementation and interface typedef
proto = ctypes[rval] + " vmod_" + fname + cargs
sproto = ctypes[rval] + " td_" + modname + "_" + fname + cargs
# append to lists of prototypes
plist += proto + ";\n"
tdl += "typedef " + sproto + ";\n"
# Append to struct members
pstruct += "\ttd_" + modname + "_" + fname + "\t*" + fname + ";\n"
# Append to struct initializer
pinit += "\tvmod_" + fname + ",\n"
# Compose the vmod spec-string
s = modname + '.' + fname + "\\0"
s += "Vmod_Func_" + modname + "." + fname + "\\0"
s += rval + '\\0'
for i in vargs:
s += i + '\\0'
slist += '\t"' + s + '",\n'
#######################################################################
def partition(string, separator):
if (hasattr(string,"partition")):
return string.partition(separator)
i = string.find(separator)
if i >= 0:
return (string[:i],separator,string[i+len(separator):])
return (string, '', '')
#######################################################################
def is_c_name(s):
return None != re.match("^[a-z][a-z0-9_]*$", s)
#######################################################################
def parse_enum(tq):
assert tq[0] == '{'
assert tq[-1] == '}'
f = tq[1:-1].split(',')
s="ENUM\\0"
b=dict()
for i in f:
i = i.strip()
if not is_c_name(i):
raise Exception("Enum value '%s' is illegal" % i)
if i in b:
raise Exception("Duplicate Enum value '%s'" % i)
b[i] = True
s = s + i.strip() + '\\0'
return s
#######################################################################
f = open(specfile, "r")
def nextline():
while True:
l0 = f.readline()
if l0 == "":
return l0
l0 = re.sub("#.*$", "", l0)
l0 = re.sub("\s\s*", " ", l0.strip())
if l0 != "":
return l0
while True:
l0 = nextline()
if l0 == "":
break;
l = partition(l0, " ")
if l[0] == "Module":
modname = l[2].strip();
if not is_c_name(modname):
raise Exception("Module name '%s' is illegal" % modname)
continue
if l[0] == "Init":
initname = l[2].strip();
if not is_c_name(initname):
raise Exception("Init name '%s' is illegal" % initname)
continue
if l[0] != "Function":
raise Exception("Expected 'Function' line, got '%s'" % l[0])
# Find the return type of the function
l = partition(l[2].strip(), " ")
rt_type = l[0]
if rt_type not in ctypes:
raise Exception("Return type '%s' not a valid type" % rt_type)
# Find the function name
l = partition(l[2].strip(), "(")
fname = l[0].strip()
if not is_c_name(fname):
raise Exception("Function name '%s' is illegal" % fname)
if l[1] != '(':
raise Exception("Missing '('")
l = l[2]
while -1 == l.find(")"):
l1 = nextline()
if l1 == "":
raise Exception("End Of Input looking for ')'")
l = l + l1
if -1 != l.find("("):
raise Exception("Nesting trouble with '(...)' ")
if l[-1:] != ')':
raise Exception("Junk after ')'")
l = l[:-1]
args = list()
vargs = list()
for i in re.finditer("([A-Z_]+)\s*({[^}]+})?(,|$)", l):
at = i.group(1)
tq = i.group(2)
if at not in ctypes:
raise Exception(
"Argument type '%s' not a valid type" % at)
args.append(ctypes[at])
if at == "ENUM":
if tq == None:
raise Exception(
"Argument type '%s' needs qualifier {...}"
% at)
at=parse_enum(tq)
elif tq != None:
raise Exception(
"Argument type '%s' cannot be qualified with {...}"
% at)
vargs.append(at)
do_func(fname, rt_type, args, vargs)
#######################################################################
def dumps(s):
while True:
l = partition(s, "\n")
if len(l[0]) == 0:
break
fc.write('\t"' + l[0] + '\\n"\n')
s = l[2]
#######################################################################
if initname != "":
plist += "int " + initname
plist += "(struct vmod_priv *, const struct VCL_conf *);\n"
pstruct += "\tvmod_init_f\t*_init;\n"
pinit += "\t" + initname + ",\n"
slist += '\t"INIT\\0Vmod_Func_' + modname + '._init",\n'
#######################################################################
def file_header(fo):
fo.write("""/*
* NB: This file is machine generated, DO NOT EDIT!
*
* Edit vmod.vcc and run vmod.py instead
*/
""")
#######################################################################
fc = open("vcc_if.c", "w")
fh = open("vcc_if.h", "w")
file_header(fc)
file_header(fh)
fh.write('struct sess;\n')
fh.write('struct VCL_conf;\n')
fh.write('struct vmod_priv;\n')
fh.write("\n");
fh.write(plist)
fc.write('#include "config.h"\n')
fc.write('\n')
fc.write('#include "vrt.h"\n')
fc.write('#include "vcc_if.h"\n')
fc.write('#include "vmod_abi.h"\n')
fc.write("\n");
fc.write("\n");
fc.write(tdl);
fc.write("\n");
fc.write('const char Vmod_Name[] = "' + modname + '";\n')
fc.write("const struct Vmod_Func_" + modname + " {\n")
fc.write(pstruct + "} Vmod_Func = {\n" + pinit + "};\n")
fc.write("\n");
fc.write("const int Vmod_Len = sizeof(Vmod_Func);\n")
fc.write("\n");
fc.write('const char Vmod_Proto[] =\n')
dumps(tdl);
fc.write('\t"\\n"\n')
dumps("struct Vmod_Func_" + modname + " {\n")
dumps(pstruct + "} Vmod_Func_" + modname + ";\n")
fc.write('\t;\n')
fc.write("\n");
fc.write('const char * const Vmod_Spec[] = {\n' + slist + '\t0\n};\n')
fc.write('const char Vmod_Varnish_ABI[] = VMOD_ABI_Version;\n')
fh.write('extern const void * const Vmod_Id;\n')
fc.write('const void * const Vmod_Id = &Vmod_Id;\n')
fc.write("\n")
|
|
# Configuration file for ipython-notebook.
c = get_config()
#------------------------------------------------------------------------------
# NotebookApp configuration
#------------------------------------------------------------------------------
# NotebookApp will inherit config from: BaseIPythonApplication, Application
# The url for MathJax.js.
# c.NotebookApp.mathjax_url = ''
# Supply extra arguments that will be passed to Jinja environment.
# c.NotebookApp.jinja_environment_options = {}
# The IP address the notebook server will listen on.
# c.NotebookApp.ip = 'localhost'
# DEPRECATED use base_url
# c.NotebookApp.base_project_url = '/'
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.NotebookApp.verbose_crash = False
# The random bytes used to secure cookies. By default this is a new random
# number every time you start the Notebook. Set it to a value in a config file
# to enable logins to persist across server sessions.
#
# Note: Cookie secrets should be kept private, do not share config files with
# cookie_secret stored in plaintext (you can read the value from a file).
# c.NotebookApp.cookie_secret = ''
# The number of additional ports to try if the specified port is not available.
# c.NotebookApp.port_retries = 50
# Whether to open in a browser after starting. The specific browser used is
# platform dependent and determined by the python standard library `webbrowser`
# module, unless it is overridden using the --browser (NotebookApp.browser)
# configuration option.
c.NotebookApp.open_browser = True
# The notebook manager class to use.
# c.NotebookApp.notebook_manager_class = 'IPython.html.services.notebooks.filenbmanager.FileNotebookManager'
# The date format used by logging formatters for %(asctime)s
# c.NotebookApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
# The port the notebook server will listen on.
# c.NotebookApp.port = 8888
# Whether to overwrite existing config files when copying
# c.NotebookApp.overwrite = False
# Set the Access-Control-Allow-Origin header
#
# Use '*' to allow any origin to access your server.
#
# Takes precedence over allow_origin_pat.
# c.NotebookApp.allow_origin = ''
# Whether to enable MathJax for typesetting math/TeX
#
# MathJax is the javascript library IPython uses to render math/LaTeX. It is
# very large, so you may want to disable it if you have a slow internet
# connection, or for offline use of the notebook.
#
# When disabled, equations etc. will appear as their untransformed TeX source.
# c.NotebookApp.enable_mathjax = True
# Use a regular expression for the Access-Control-Allow-Origin header
#
# Requests from an origin matching the expression will get replies with:
#
# Access-Control-Allow-Origin: origin
#
# where `origin` is the origin of the request.
#
# Ignored if allow_origin is set.
# c.NotebookApp.allow_origin_pat = ''
# The full path to an SSL/TLS certificate file.
# c.NotebookApp.certfile = u''
# The base URL for the notebook server.
#
# Leading and trailing slashes can be omitted, and will automatically be added.
# c.NotebookApp.base_url = '/'
# The directory to use for notebooks and kernels.
c.NotebookApp.notebook_dir = u'/home/zubieta/Documents/Notebooks'
#
# c.NotebookApp.file_to_run = ''
# The IPython profile to use.
# c.NotebookApp.profile = u'default'
# paths for Javascript extensions. By default, this is just
# IPYTHONDIR/nbextensions
# c.NotebookApp.nbextensions_path = []
# The Logging format template
# c.NotebookApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This options can also be specified through the environment
# variable IPYTHONDIR.
# c.NotebookApp.ipython_dir = u''
# Set the log level by value or name.
# c.NotebookApp.log_level = 30
# Hashed password to use for web authentication.
#
# To generate, type in a python/IPython shell:
#
# from IPython.lib import passwd; passwd()
#
# The string should be of the form type:salt:hashed-password.
# c.NotebookApp.password = u''
# Set the Access-Control-Allow-Credentials: true header
# c.NotebookApp.allow_credentials = False
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.NotebookApp.extra_config_file = u''
# Extra paths to search for serving static files.
#
# This allows adding javascript/css to be available from the notebook server
# machine, or overriding individual files in the IPython
# c.NotebookApp.extra_static_paths = []
# Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
# c.NotebookApp.trust_xheaders = False
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.NotebookApp.copy_config_files = False
# The full path to a private key file for usage with SSL/TLS.
# c.NotebookApp.keyfile = u''
# Supply overrides for the tornado.web.Application that the IPython notebook
# uses.
# c.NotebookApp.webapp_settings = {}
# Specify what command to use to invoke a web browser when opening the notebook.
# If not specified, the default browser will be determined by the `webbrowser`
# standard library module, which allows setting of the BROWSER environment
# variable to override it.
c.NotebookApp.browser = u'luakit %s'
#------------------------------------------------------------------------------
# IPKernelApp configuration
#------------------------------------------------------------------------------
# IPython: an enhanced interactive Python shell.
# IPKernelApp will inherit config from: BaseIPythonApplication, Application,
# InteractiveShellApp
# Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
# c.IPKernelApp.exec_PYTHONSTARTUP = True
# The importstring for the DisplayHook factory
# c.IPKernelApp.displayhook_class = 'IPython.kernel.zmq.displayhook.ZMQDisplayHook'
# Set the IP or interface on which the kernel will listen.
# c.IPKernelApp.ip = u''
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
c.IPKernelApp.pylab = u'inline'
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.IPKernelApp.verbose_crash = False
# The Kernel subclass to be used.
#
# This should allow easy re-use of the IPKernelApp entry point to configure and
# launch kernels other than IPython's own.
# c.IPKernelApp.kernel_class = 'IPython.kernel.zmq.ipkernel.Kernel'
# Run the module as a script.
# c.IPKernelApp.module_to_run = ''
# The date format used by logging formatters for %(asctime)s
# c.IPKernelApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
# set the shell (ROUTER) port [default: random]
# c.IPKernelApp.shell_port = 0
# set the control (ROUTER) port [default: random]
# c.IPKernelApp.control_port = 0
# Whether to overwrite existing config files when copying
# c.IPKernelApp.overwrite = False
# Execute the given command string.
# c.IPKernelApp.code_to_run = ''
# set the stdin (ROUTER) port [default: random]
# c.IPKernelApp.stdin_port = 0
# Set the log level by value or name.
# c.IPKernelApp.log_level = 30
# lines of code to run at IPython startup.
# c.IPKernelApp.exec_lines = []
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.IPKernelApp.extra_config_file = u''
# The importstring for the OutStream factory
# c.IPKernelApp.outstream_class = 'IPython.kernel.zmq.iostream.OutStream'
# Whether to create profile dir if it doesn't exist
# c.IPKernelApp.auto_create = False
# set the heartbeat port [default: random]
# c.IPKernelApp.hb_port = 0
#
# c.IPKernelApp.transport = 'tcp'
# redirect stdout to the null device
# c.IPKernelApp.no_stdout = False
# Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
# c.IPKernelApp.hide_initial_ns = True
# dotted module name of an IPython extension to load.
# c.IPKernelApp.extra_extension = ''
# A file to be run
# c.IPKernelApp.file_to_run = ''
# The IPython profile to use.
# c.IPKernelApp.profile = u'default'
#
# c.IPKernelApp.parent_appname = u''
# kill this process if its parent dies. On Windows, the argument specifies the
# HANDLE of the parent process, otherwise it is simply boolean.
# c.IPKernelApp.parent_handle = 0
# JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
# c.IPKernelApp.connection_file = ''
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.IPKernelApp.pylab_import_all = True
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This options can also be specified through the environment
# variable IPYTHONDIR.
# c.IPKernelApp.ipython_dir = u''
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.IPKernelApp.matplotlib = None
# ONLY USED ON WINDOWS Interrupt this process when the parent is signaled.
# c.IPKernelApp.interrupt = 0
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.IPKernelApp.copy_config_files = False
# List of files to run at IPython startup.
# c.IPKernelApp.exec_files = []
# Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk3', 'none',
# 'osx', 'pyglet', 'qt', 'qt4', 'tk', 'wx').
# c.IPKernelApp.gui = None
# A list of dotted module names of IPython extensions to load.
c.IPKernelApp.extensions = ['base16_mplrc']
# redirect stderr to the null device
# c.IPKernelApp.no_stderr = False
# The Logging format template
# c.IPKernelApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
# set the iopub (PUB) port [default: random]
# c.IPKernelApp.iopub_port = 0
#------------------------------------------------------------------------------
# ZMQInteractiveShell configuration
#------------------------------------------------------------------------------
# A subclass of InteractiveShell for ZMQ.
# ZMQInteractiveShell will inherit config from: InteractiveShell
# Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
# c.ZMQInteractiveShell.color_info = True
# A list of ast.NodeTransformer subclass instances, which will be applied to
# user input before code is run.
# c.ZMQInteractiveShell.ast_transformers = []
#
# c.ZMQInteractiveShell.history_length = 10000
# Don't call post-execute functions that have failed in the past.
# c.ZMQInteractiveShell.disable_failing_post_execute = False
# Show rewritten input, e.g. for autocall.
# c.ZMQInteractiveShell.show_rewritten_input = True
# Set the color scheme (NoColor, Linux, or LightBG).
# c.ZMQInteractiveShell.colors = 'Linux'
#
# c.ZMQInteractiveShell.separate_in = '\n'
# Deprecated, use PromptManager.in2_template
# c.ZMQInteractiveShell.prompt_in2 = ' .\\D.: '
#
# c.ZMQInteractiveShell.separate_out = ''
# Deprecated, use PromptManager.in_template
# c.ZMQInteractiveShell.prompt_in1 = 'In [\\#]: '
# Enable deep (recursive) reloading by default. IPython can use the deep_reload
# module which reloads changes in modules recursively (it replaces the reload()
# function, so you don't need to change anything to use it). deep_reload()
# forces a full reload of modules whose code may have changed, which the default
# reload() function does not. When deep_reload is off, IPython will use the
# normal reload(), but deep_reload will still be available as dreload().
# c.ZMQInteractiveShell.deep_reload = False
# Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
# c.ZMQInteractiveShell.autocall = 0
#
# c.ZMQInteractiveShell.separate_out2 = ''
# Deprecated, use PromptManager.justify
# c.ZMQInteractiveShell.prompts_pad_left = True
#
# c.ZMQInteractiveShell.readline_parse_and_bind = ['tab: complete', '"\\C-l": clear-screen', 'set show-all-if-ambiguous on', '"\\C-o": tab-insert', '"\\C-r": reverse-search-history', '"\\C-s": forward-search-history', '"\\C-p": history-search-backward', '"\\C-n": history-search-forward', '"\\e[A": history-search-backward', '"\\e[B": history-search-forward', '"\\C-k": kill-line', '"\\C-u": unix-line-discard']
# Enable magic commands to be called without the leading %.
# c.ZMQInteractiveShell.automagic = True
#
# c.ZMQInteractiveShell.debug = False
#
# c.ZMQInteractiveShell.object_info_string_level = 0
#
# c.ZMQInteractiveShell.ipython_dir = ''
#
# c.ZMQInteractiveShell.readline_remove_delims = '-/~'
# Start logging to the default log file.
# c.ZMQInteractiveShell.logstart = False
# The name of the logfile to use.
# c.ZMQInteractiveShell.logfile = ''
#
# c.ZMQInteractiveShell.wildcards_case_sensitive = True
# Save multi-line entries as one entry in readline history
# c.ZMQInteractiveShell.multiline_history = True
# Start logging to the given file in append mode.
# c.ZMQInteractiveShell.logappend = ''
#
# c.ZMQInteractiveShell.xmode = 'Context'
#
# c.ZMQInteractiveShell.quiet = False
# Deprecated, use PromptManager.out_template
# c.ZMQInteractiveShell.prompt_out = 'Out[\\#]: '
# Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 20 (if you provide a value
# less than 20, it is reset to 0 and a warning is issued). This limit is
# defined because otherwise you'll spend more time re-flushing a too small cache
# than working
# c.ZMQInteractiveShell.cache_size = 1000
# 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run
# interactively (displaying output from expressions).
# c.ZMQInteractiveShell.ast_node_interactivity = 'last_expr'
# Automatically call the pdb debugger after every exception.
# c.ZMQInteractiveShell.pdb = False
#------------------------------------------------------------------------------
# KernelManager configuration
#------------------------------------------------------------------------------
# Manages a single kernel in a subprocess on this host.
#
# This version starts kernels with Popen.
# KernelManager will inherit config from: ConnectionFileMixin
# The Popen Command to launch the kernel. Override this if you have a custom
# kernel. If kernel_cmd is specified in a configuration file, IPython does not
# pass any arguments to the kernel, because it cannot make any assumptions about
# the arguments that the kernel understands. In particular, this means that the
# kernel does not receive the option --debug if it given on the IPython command
# line.
# c.KernelManager.kernel_cmd = []
# Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
# c.KernelManager.ip = u''
#
# c.KernelManager.transport = 'tcp'
# Should we autorestart the kernel if it dies.
# c.KernelManager.autorestart = False
#------------------------------------------------------------------------------
# ProfileDir configuration
#------------------------------------------------------------------------------
# An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
# Set the profile location directly. This overrides the logic used by the
# `profile` option.
# c.ProfileDir.location = u''
#------------------------------------------------------------------------------
# Session configuration
#------------------------------------------------------------------------------
# Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
# Username for the Session. Default is your system username.
# c.Session.username = u'zubieta'
# The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
# c.Session.unpacker = 'json'
# Threshold (in bytes) beyond which a buffer should be sent without copying.
# c.Session.copy_threshold = 65536
# The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
# c.Session.packer = 'json'
# The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
# c.Session.digest_history_size = 65536
# The UUID identifying this session.
# c.Session.session = u''
# The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
# c.Session.signature_scheme = 'hmac-sha256'
# execution key, for extra authentication.
# c.Session.key = ''
# Debug output in the Session
# c.Session.debug = False
# The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
# c.Session.item_threshold = 64
# path to file containing execution key.
# c.Session.keyfile = ''
# Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
# c.Session.buffer_threshold = 1024
# Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
# c.Session.metadata = {}
#------------------------------------------------------------------------------
# InlineBackend configuration
#------------------------------------------------------------------------------
# An object to store configuration of the inline backend.
# The figure format to enable (deprecated use `figure_formats` instead)
# c.InlineBackend.figure_format = u''
# A set of figure formats to enable: 'png', 'retina', 'jpeg', 'svg', 'pdf'.
# c.InlineBackend.figure_formats = set(['png'])
# Extra kwargs to be passed to fig.canvas.print_figure.
#
# Logical examples include: bbox_inches, quality (for jpeg figures), etc.
# c.InlineBackend.print_figure_kwargs = {'bbox_inches': 'tight'}
# Close all figures at the end of each cell.
#
# When True, ensures that each cell starts with no active figures, but it also
# means that one must keep track of references in order to edit or redraw
# figures in subsequent cells. This mode is ideal for the notebook, where
# residual plots from other cells might be surprising.
#
# When False, one must call figure() to create new figures. This means that
# gcf() and getfigs() can reference figures created in other cells, and the
# active figure can continue to be edited with pylab/pyplot methods that
# reference the current active figure. This mode facilitates iterative editing
# of figures, and behaves most consistently with other matplotlib backends, but
# figure barriers between cells must be explicit.
# c.InlineBackend.close_figures = True
# Subset of matplotlib rcParams that should be different for the inline backend.
# c.InlineBackend.rc = {'font.size': 10, 'figure.figsize': (6.0, 4.0), 'figure.facecolor': (1, 1, 1, 0), 'savefig.dpi': 72, 'figure.subplot.bottom': 0.125, 'figure.edgecolor': (1, 1, 1, 0)}
#------------------------------------------------------------------------------
# MappingKernelManager configuration
#------------------------------------------------------------------------------
# A KernelManager that handles notebook mapping and HTTP error handling
# MappingKernelManager will inherit config from: MultiKernelManager
#
# c.MappingKernelManager.root_dir = u'/home/zubieta/.ipython'
# The kernel manager class. This is configurable to allow subclassing of the
# KernelManager for customized behavior.
# c.MappingKernelManager.kernel_manager_class = 'IPython.kernel.ioloop.IOLoopKernelManager'
#------------------------------------------------------------------------------
# NotebookManager configuration
#------------------------------------------------------------------------------
# Glob patterns to hide in file and directory listings.
# c.NotebookManager.hide_globs = [u'__pycache__']
#------------------------------------------------------------------------------
# FileNotebookManager configuration
#------------------------------------------------------------------------------
# FileNotebookManager will inherit config from: NotebookManager
# The directory name in which to keep notebook checkpoints
#
# This is a path relative to the notebook's own directory.
#
# By default, it is .ipynb_checkpoints
# c.FileNotebookManager.checkpoint_dir = '.ipynb_checkpoints'
# Glob patterns to hide in file and directory listings.
# c.FileNotebookManager.hide_globs = [u'__pycache__']
# Automatically create a Python script when saving the notebook.
#
# For easier use of import, %run and %load across notebooks, a <notebook-
# name>.py script will be created next to any <notebook-name>.ipynb on each
# save. This can also be set with the short `--script` flag.
# c.FileNotebookManager.save_script = False
#
c.FileNotebookManager.notebook_dir = u'/home/zubieta/Documents/Notebooks'
#------------------------------------------------------------------------------
# NotebookNotary configuration
#------------------------------------------------------------------------------
# A class for computing and verifying notebook signatures.
# The secret key with which notebooks are signed.
# c.NotebookNotary.secret = ''
# The file where the secret key is stored.
# c.NotebookNotary.secret_file = u''
# The hashing algorithm used to sign notebooks.
# c.NotebookNotary.algorithm = 'sha256'
|
|
"""Constants for the Garmin Connect integration."""
from homeassistant.const import DEVICE_CLASS_TIMESTAMP
DOMAIN = "garmin_connect"
ATTRIBUTION = "Data provided by garmin.com"
GARMIN_ENTITY_LIST = {
"totalSteps": ["Total Steps", "steps", "mdi:walk", None, True],
"dailyStepGoal": ["Daily Step Goal", "steps", "mdi:walk", None, True],
"totalKilocalories": ["Total KiloCalories", "kcal", "mdi:food", None, True],
"activeKilocalories": ["Active KiloCalories", "kcal", "mdi:food", None, True],
"bmrKilocalories": ["BMR KiloCalories", "kcal", "mdi:food", None, True],
"consumedKilocalories": ["Consumed KiloCalories", "kcal", "mdi:food", None, False],
"burnedKilocalories": ["Burned KiloCalories", "kcal", "mdi:food", None, True],
"remainingKilocalories": [
"Remaining KiloCalories",
"kcal",
"mdi:food",
None,
False,
],
"netRemainingKilocalories": [
"Net Remaining KiloCalories",
"kcal",
"mdi:food",
None,
False,
],
"netCalorieGoal": ["Net Calorie Goal", "cal", "mdi:food", None, False],
"totalDistanceMeters": ["Total Distance Mtr", "m", "mdi:walk", None, True],
"wellnessStartTimeLocal": [
"Wellness Start Time",
"",
"mdi:clock",
DEVICE_CLASS_TIMESTAMP,
False,
],
"wellnessEndTimeLocal": [
"Wellness End Time",
"",
"mdi:clock",
DEVICE_CLASS_TIMESTAMP,
False,
],
"wellnessDescription": ["Wellness Description", "", "mdi:clock", None, False],
"wellnessDistanceMeters": ["Wellness Distance Mtr", "m", "mdi:walk", None, False],
"wellnessActiveKilocalories": [
"Wellness Active KiloCalories",
"kcal",
"mdi:food",
None,
False,
],
"wellnessKilocalories": ["Wellness KiloCalories", "kcal", "mdi:food", None, False],
"highlyActiveSeconds": ["Highly Active Time", "min", "mdi:fire", None, False],
"activeSeconds": ["Active Time", "min", "mdi:fire", None, True],
"sedentarySeconds": ["Sedentary Time", "min", "mdi:seat", None, True],
"sleepingSeconds": ["Sleeping Time", "min", "mdi:sleep", None, True],
"measurableAwakeDuration": ["Awake Duration", "min", "mdi:sleep", None, True],
"measurableAsleepDuration": ["Sleep Duration", "min", "mdi:sleep", None, True],
"floorsAscendedInMeters": ["Floors Ascended Mtr", "m", "mdi:stairs", None, False],
"floorsDescendedInMeters": [
"Floors Descended Mtr",
"m",
"mdi:stairs",
None,
False,
],
"floorsAscended": ["Floors Ascended", "floors", "mdi:stairs", None, True],
"floorsDescended": ["Floors Descended", "floors", "mdi:stairs", None, True],
"userFloorsAscendedGoal": [
"Floors Ascended Goal",
"floors",
"mdi:stairs",
None,
True,
],
"minHeartRate": ["Min Heart Rate", "bpm", "mdi:heart-pulse", None, True],
"maxHeartRate": ["Max Heart Rate", "bpm", "mdi:heart-pulse", None, True],
"restingHeartRate": ["Resting Heart Rate", "bpm", "mdi:heart-pulse", None, True],
"minAvgHeartRate": ["Min Avg Heart Rate", "bpm", "mdi:heart-pulse", None, False],
"maxAvgHeartRate": ["Max Avg Heart Rate", "bpm", "mdi:heart-pulse", None, False],
"abnormalHeartRateAlertsCount": [
"Abnormal HR Counts",
"",
"mdi:heart-pulse",
None,
False,
],
"lastSevenDaysAvgRestingHeartRate": [
"Last 7 Days Avg Heart Rate",
"bpm",
"mdi:heart-pulse",
None,
False,
],
"averageStressLevel": ["Avg Stress Level", "", "mdi:flash-alert", None, True],
"maxStressLevel": ["Max Stress Level", "", "mdi:flash-alert", None, True],
"stressQualifier": ["Stress Qualifier", "", "mdi:flash-alert", None, False],
"stressDuration": ["Stress Duration", "min", "mdi:flash-alert", None, False],
"restStressDuration": [
"Rest Stress Duration",
"min",
"mdi:flash-alert",
None,
True,
],
"activityStressDuration": [
"Activity Stress Duration",
"min",
"mdi:flash-alert",
None,
True,
],
"uncategorizedStressDuration": [
"Uncat. Stress Duration",
"min",
"mdi:flash-alert",
None,
True,
],
"totalStressDuration": [
"Total Stress Duration",
"min",
"mdi:flash-alert",
None,
True,
],
"lowStressDuration": ["Low Stress Duration", "min", "mdi:flash-alert", None, True],
"mediumStressDuration": [
"Medium Stress Duration",
"min",
"mdi:flash-alert",
None,
True,
],
"highStressDuration": [
"High Stress Duration",
"min",
"mdi:flash-alert",
None,
True,
],
"stressPercentage": ["Stress Percentage", "%", "mdi:flash-alert", None, False],
"restStressPercentage": [
"Rest Stress Percentage",
"%",
"mdi:flash-alert",
None,
False,
],
"activityStressPercentage": [
"Activity Stress Percentage",
"%",
"mdi:flash-alert",
None,
False,
],
"uncategorizedStressPercentage": [
"Uncat. Stress Percentage",
"%",
"mdi:flash-alert",
None,
False,
],
"lowStressPercentage": [
"Low Stress Percentage",
"%",
"mdi:flash-alert",
None,
False,
],
"mediumStressPercentage": [
"Medium Stress Percentage",
"%",
"mdi:flash-alert",
None,
False,
],
"highStressPercentage": [
"High Stress Percentage",
"%",
"mdi:flash-alert",
None,
False,
],
"moderateIntensityMinutes": [
"Moderate Intensity",
"min",
"mdi:flash-alert",
None,
False,
],
"vigorousIntensityMinutes": [
"Vigorous Intensity",
"min",
"mdi:run-fast",
None,
False,
],
"intensityMinutesGoal": ["Intensity Goal", "min", "mdi:run-fast", None, False],
"bodyBatteryChargedValue": [
"Body Battery Charged",
"%",
"mdi:battery-charging-100",
None,
True,
],
"bodyBatteryDrainedValue": [
"Body Battery Drained",
"%",
"mdi:battery-alert-variant-outline",
None,
True,
],
"bodyBatteryHighestValue": [
"Body Battery Highest",
"%",
"mdi:battery-heart",
None,
True,
],
"bodyBatteryLowestValue": [
"Body Battery Lowest",
"%",
"mdi:battery-heart-outline",
None,
True,
],
"bodyBatteryMostRecentValue": [
"Body Battery Most Recent",
"%",
"mdi:battery-positive",
None,
True,
],
"averageSpo2": ["Average SPO2", "%", "mdi:diabetes", None, True],
"lowestSpo2": ["Lowest SPO2", "%", "mdi:diabetes", None, True],
"latestSpo2": ["Latest SPO2", "%", "mdi:diabetes", None, True],
"latestSpo2ReadingTimeLocal": [
"Latest SPO2 Time",
"",
"mdi:diabetes",
DEVICE_CLASS_TIMESTAMP,
False,
],
"averageMonitoringEnvironmentAltitude": [
"Average Altitude",
"%",
"mdi:image-filter-hdr",
None,
False,
],
"highestRespirationValue": [
"Highest Respiration",
"brpm",
"mdi:progress-clock",
None,
False,
],
"lowestRespirationValue": [
"Lowest Respiration",
"brpm",
"mdi:progress-clock",
None,
False,
],
"latestRespirationValue": [
"Latest Respiration",
"brpm",
"mdi:progress-clock",
None,
False,
],
"latestRespirationTimeGMT": [
"Latest Respiration Update",
"",
"mdi:progress-clock",
DEVICE_CLASS_TIMESTAMP,
False,
],
}
|
|
"""
Make sure that a service object is aliasing container and host_config
correctly
"""
from copy import deepcopy
import os
from os.path import join
import tempfile
import unittest
from control.exceptions import InvalidControlfile
from control.service import Startable, Buildable, BSService
from control.service.service import ImageService, Service
class TestCreatingServices(unittest.TestCase):
"""Test the Service class constructor"""
def test_missing_image(self):
"""
Make sure that a Startable missing an image is classified as
incorrect.
"""
serv = {}
cntrlfile = "./Controlfile"
with self.assertRaises(InvalidControlfile):
Startable(serv, cntrlfile)
serv = {
"container": {
"name": "test",
}
}
cntrlfile = "./Controlfile"
with self.assertRaises(InvalidControlfile):
Startable(serv, cntrlfile)
def test_build_only(self):
"""
Test to make sure that controlfiles that only define the name
of the image tha control should build is handled corrcetly.
"""
# TODO: Move this into the controlfile test file
serv = {"image": "test:latest", "service": "server"}
cntrlfile = "./Controlfile"
result = Buildable(deepcopy(serv), cntrlfile)
self.assertEqual(
result.image,
"test:latest")
self.assertEqual(
result.service,
"server")
def test_basic_runnable(self):
"""
This container is the bare minimum that you need to run a container
"""
serv = {
"image": "busybox",
"container": {
"name": "test"
}
}
cntrlfile = "./Controlfile"
result = Startable(deepcopy(serv), cntrlfile)
self.assertEqual(result.image, "busybox")
self.assertEqual(result.service, "test")
self.assertEqual(
result.container,
{
"name": "test",
"hostname": "test"
})
self.assertEqual(result.host_config, {})
serv['service'] = "server"
result = Startable(deepcopy(serv), cntrlfile)
self.assertEqual(
result.image,
"busybox")
self.assertEqual(
result.service,
"server")
self.assertEqual(
result.container,
{
"name": "test",
"hostname": "test"
})
self.assertEqual(result.host_config, {})
def test_guessing_container_name(self):
"""
Make sure that if the container name is specified the container is
given the service name.
"""
serv = {
"service": "test",
"image": "busybox",
"container": {
"env": [
"VERSION=0.1"
]
}
}
cntrlfile = "./Controlfile"
result = Startable(serv, cntrlfile)
self.assertEqual(result.image, "busybox")
self.assertEqual(result.service, "test")
self.assertEqual(
result.container,
{
"name": "test",
"hostname": "test",
"environment": ["VERSION=0.1"]
})
self.assertEqual(result.host_config, {})
def test_startable_split_dockerfile(self):
"""
This container is the bare minimum that you need to run a container
"""
serv = {
"image": "busybox",
"dockerfile": {
"prod": "MadeProd",
"dev": "MadeDev",
},
"container": {
"name": "test"
}
}
cntrlfile = "./Controlfile"
result = BSService(deepcopy(serv), cntrlfile)
self.assertEqual(result.image, "busybox")
self.assertEqual(result.service, "test")
self.assertEqual(
result.dockerfile['dev'],
join(os.getcwd(), serv['dockerfile']['dev']))
self.assertEqual(
result.dockerfile['prod'],
join(os.getcwd(), serv['dockerfile']['prod']))
self.assertEqual(
result.container,
{
"name": "test",
"hostname": "test"
})
self.assertEqual(result.host_config, {})
def test_fromline_recognition(self):
"""
Test that specifying a fromline substitution is picked up when the
Controlfile is parsed.
This test does not verify that the fromline is substituted at build
time!
"""
serv = {
"image": "fromline-test",
}
cntrlfile = "./Controlfile"
result = Buildable(serv, cntrlfile)
self.assertEqual(result.fromline['dev'], "")
self.assertEqual(result.fromline['prod'], "")
serv = {
"image": "fromline-test",
"fromline": "alpine:latest",
}
cntrlfile = "./Controlfile"
result = Buildable(serv, cntrlfile)
self.assertEqual(result.fromline['dev'], "alpine:latest")
self.assertEqual(result.fromline['prod'], "alpine:latest")
serv = {
"image": "fromline-test",
"fromline": {
"dev": "alpine:latest",
"prod": "alpine:stable"
}
}
cntrlfile = "./Controlfile"
result = Buildable(serv, cntrlfile)
self.assertEqual(result.fromline['dev'], "alpine:latest")
self.assertEqual(result.fromline['prod'], "alpine:stable")
serv = {
"image": "fromline-test",
"fromline": {
"prod": "alpine:stable"
}
}
cntrlfile = "./Controlfile"
result = Buildable(serv, cntrlfile)
self.assertEqual(result.fromline['dev'], "")
self.assertEqual(result.fromline['prod'], "alpine:stable")
def test_empty_dockerfiles(self):
"""
An empty dockerfile string signals to Control that this service should
never be built.
"""
serv = {
"image": "busybox",
"dockerfile": ""
}
cntrlfile = "./Controlfile"
result = Buildable(serv, cntrlfile)
self.assertEqual(result.dockerfile['dev'], '')
self.assertEqual(result.dockerfile['prod'], '')
def test_weird_dockerfiles(self):
"""
Make sure that if someone specifies weird dockerfile environments a
warning is logged
"""
serv = {
"image": "busybox",
"dockerfile": {
"base": "Dockerfile.base"
}
}
cntrlfile = "./Controlfile"
result = Buildable(serv, cntrlfile)
self.assertEqual(result.dockerfile['dev'], '')
self.assertEqual(result.dockerfile['prod'], '')
def test_setting_controlfile(self):
"""Test that the controlfile is set correctly"""
serv = {
"image": "busybox",
"container": {
"name": "test"
}
}
cntrlfile = "./Controlfile"
result = Service(deepcopy(serv), cntrlfile)
self.assertEqual(result.controlfile, "./Controlfile")
result = ImageService(deepcopy(serv), cntrlfile)
self.assertEqual(result.controlfile, "./Controlfile")
result = Startable(deepcopy(serv), cntrlfile)
self.assertEqual(result.controlfile, "./Controlfile")
result = Buildable(deepcopy(serv), cntrlfile)
self.assertEqual(result.controlfile, "./Controlfile")
result = BSService(deepcopy(serv), cntrlfile)
self.assertEqual(result.controlfile, "./Controlfile")
class TestContainerOptions(unittest.TestCase):
"""Test the options come in correctly, and are put into Docker properly"""
def setUp(self):
self.serv = {
"dockerfile": "Dockerfile.example",
"expected_timeout": 3,
"image": "busybox",
"required": False,
"service": "server",
"container": {
"cmd": "/usr/cat",
"cpu_group": 10,
"cpu_period": 10,
"cpu_shares": 1,
"detach": True,
"devices": "/dev/mdadm",
"dns": ["8.8.8.8"],
"dns_search": ["example", "example.com"],
"entrypoint": "/bin/bash",
"env": ["FOO=bar", "DOMAIN=example.com"],
"group_add": ["cdrom"],
"hostname": "testme",
"ipc_mode": "shared",
"labels": {"label": "me"},
"links": [("networklink", "networklink")],
"mem_limit": "100m",
"memswap_limit": 100,
"name": "test",
"network_disabled": True,
"network_mode": 'bridge',
"port_bindings": {8080: ('0.0.0.0', 8080), '8888/udp': 8888},
"ports": [8080, (8888, 'udp'), 8443],
"privileged": True,
"read_only": True,
"shm_size": '100M',
"stdin_open": True,
"tty": True,
"user": "foo",
"volumes": ["/etc", "named:/var/lib", "/mnt/docker:/var/tmp"],
"volumes_from": ["datacontainer"],
"working_dir": "/etc",
}
}
self.cntrlfile = "./Controlfile"
def test_full(self):
"""
Throw the beans at the thing and make sure that everything makes its
way into the right spot
"""
result = BSService(deepcopy(self.serv), self.cntrlfile)
self.assertEqual(result.service, "server")
self.assertEqual(result.image, "busybox")
self.assertEqual(result.expected_timeout, 3)
self.assertEqual(result.required, False)
self.assertEqual(
result.dockerfile,
{
"prod": join(os.getcwd(), 'Dockerfile.example'),
"dev": join(os.getcwd(), 'Dockerfile.example'),
})
# import pytest; pytest.set_trace()
self.assertEqual(result.volumes_for(prod=False), self.serv['container']['volumes'])
self.assertEqual(
result.container,
{
"command": "/usr/cat",
"cpu_shares": 1,
"detach": True,
"entrypoint": "/bin/bash",
"environment": ["FOO=bar", "DOMAIN=example.com"],
"hostname": "testme",
"labels": {"label": "me"},
"name": "test",
"network_disabled": True,
"ports": [8080, (8888, 'udp'), 8443],
"stdin_open": True,
"tty": True,
"user": "foo",
"working_dir": "/etc"
})
self.assertEqual(
result.host_config,
{
"cpu_period": 10,
"devices": "/dev/mdadm",
"dns": ["8.8.8.8"],
"dns_search": ["example", "example.com"],
"group_add": ["cdrom"],
"ipc_mode": "shared",
"links": [("networklink", "networklink")],
"mem_limit": "100m",
"memswap_limit": 100,
"network_mode": 'bridge',
"port_bindings": {8080: ('0.0.0.0', 8080), '8888/udp': 8888},
"privileged": True,
"read_only": True,
"shm_size": '100M',
"volumes_from": ["datacontainer"],
})
self.assertNotIn(
'cpu_group',
result.host_config,
msg='Docker API version changed. Control supports 1.21-1.23')
def test_generate_container(self):
"""make sure that the create_container config is as expected"""
result = BSService(deepcopy(self.serv), self.cntrlfile)
js = result.prepare_container_options(prod=False)
self.assertEqual(js['volumes'],
["/etc", "/var/lib", "/var/tmp"])
self.assertIn('Binds', js['host_config'])
self.assertEqual(
js['host_config']['Binds'],
["named:/var/lib", "/mnt/docker:/var/tmp"])
self.assertEqual(js['environment'], self.serv['container']['env'])
self.assertEqual(js['user'], self.serv['container']['user'])
class TestEnvFile(unittest.TestCase):
"""Test the cases for environment variable files being included correctly"""
def test_all_envs_from_file(self):
"""
Ensure that if there were no env vars specified in the container
declaration, that we still get env vars
"""
temp_dir = tempfile.TemporaryDirectory()
with open(join(temp_dir.name, 'envfile'), 'w') as f:
f.write('FOO=bar')
serv = {
"image": "busybox",
"container": {
"env_file": join(temp_dir.name, 'envfile'),
"name": "test"
}
}
cntrlfile = "./Controlfile"
result = Startable(serv, cntrlfile)
js = result.prepare_container_options(prod=False)
self.assertEqual(js['environment'], {"FOO": "bar"})
temp_dir.cleanup()
def test_mixed_env_vars(self):
"""
Make sure that Control gracefully merges env var lists. The
declaration that is more specific to the container should be the
preferred source (prefer Servicefile env vars over envfile vars
in case of collision)
"""
temp_dir = tempfile.TemporaryDirectory()
with open(join(temp_dir.name, 'envfile'), 'w') as f:
f.write('FOO=bar\n')
f.write('FOOBAR=baz')
serv = {
"image": "busybox",
"container": {
"name": "test",
"env_file": join(temp_dir.name, 'envfile'),
"environment": [
"FOOBAR=control",
"BAZ=foobar"
]
}
}
cntrlfile = "./Controlfile"
result = Startable(serv, cntrlfile)
js = result.prepare_container_options(prod=False)
self.assertEqual(
js['environment'],
{
"FOO": "bar",
"FOOBAR": "control",
"BAZ": "foobar"
})
temp_dir.cleanup()
def test_missing_envfile(self):
"""
Make sure that the program does not crash, and prints a warning
if the envfile cannot be found
"""
temp_dir = tempfile.TemporaryDirectory()
serv = {
"image": "busybox",
"container": {
"name": "test",
"env_file": join(temp_dir.name, 'envfile'),
"env": {
"FOOBAR": "control",
"BAZ": "foobar"
}
}
}
cntrlfile = "./Controlfile"
result = Startable(serv, cntrlfile)
with self.assertLogs(result.logger, level='WARNING') as cm:
result.prepare_container_options(prod=False)
self.assertEqual(
cm.output,
[
'WARNING:control.service.Startable:'
'Env file is missing: {}'.format(join(temp_dir.name, 'envfile'))
]
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.