gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
from sympy import (
Abs, adjoint, arg, atan2, conjugate, cos, DiracDelta, E, exp, expand,
Expr, Function, Heaviside, I, im, log, nan, oo, pi, Rational, re, S,
sign, sin, sqrt, Symbol, symbols, transpose, zoo, exp_polar, Piecewise
)
from sympy.utilities.pytest import XFAIL
from sympy.utilities.randtest import comp
def N_equals(a, b):
"""Check whether two complex numbers are numerically close"""
return comp(a.n(), b.n(), 1.e-6)
def test_re():
x, y = symbols('x,y')
a, b = symbols('a,b', real=True)
r = Symbol('r', real=True)
i = Symbol('i', imaginary=True)
assert re(nan) == nan
assert re(oo) == oo
assert re(-oo) == -oo
assert re(0) == 0
assert re(1) == 1
assert re(-1) == -1
assert re(E) == E
assert re(-E) == -E
assert re(x) == re(x)
assert re(x*I) == -im(x)
assert re(r*I) == 0
assert re(r) == r
assert re(i*I) == I * i
assert re(i) == 0
assert re(x + y) == re(x + y)
assert re(x + r) == re(x) + r
assert re(re(x)) == re(x)
assert re(2 + I) == 2
assert re(x + I) == re(x)
assert re(x + y*I) == re(x) - im(y)
assert re(x + r*I) == re(x)
assert re(log(2*I)) == log(2)
assert re((2 + I)**2).expand(complex=True) == 3
assert re(conjugate(x)) == re(x)
assert conjugate(re(x)) == re(x)
assert re(x).as_real_imag() == (re(x), 0)
assert re(i*r*x).diff(r) == re(i*x)
assert re(i*r*x).diff(i) == I*r*im(x)
assert re(
sqrt(a + b*I)) == (a**2 + b**2)**Rational(1, 4)*cos(atan2(b, a)/2)
assert re(a * (2 + b*I)) == 2*a
assert re((1 + sqrt(a + b*I))/2) == \
(a**2 + b**2)**Rational(1, 4)*cos(atan2(b, a)/2)/2 + Rational(1, 2)
def test_im():
x, y = symbols('x,y')
a, b = symbols('a,b', real=True)
r = Symbol('r', real=True)
i = Symbol('i', imaginary=True)
assert im(nan) == nan
assert im(oo*I) == oo
assert im(-oo*I) == -oo
assert im(0) == 0
assert im(1) == 0
assert im(-1) == 0
assert im(E*I) == E
assert im(-E*I) == -E
assert im(x) == im(x)
assert im(x*I) == re(x)
assert im(r*I) == r
assert im(r) == 0
assert im(i*I) == 0
assert im(i) == -I * i
assert im(x + y) == im(x + y)
assert im(x + r) == im(x)
assert im(x + r*I) == im(x) + r
assert im(im(x)*I) == im(x)
assert im(2 + I) == 1
assert im(x + I) == im(x) + 1
assert im(x + y*I) == im(x) + re(y)
assert im(x + r*I) == im(x) + r
assert im(log(2*I)) == pi/2
assert im((2 + I)**2).expand(complex=True) == 4
assert im(conjugate(x)) == -im(x)
assert conjugate(im(x)) == im(x)
assert im(x).as_real_imag() == (im(x), 0)
assert im(i*r*x).diff(r) == im(i*x)
assert im(i*r*x).diff(i) == -I * re(r*x)
assert im(
sqrt(a + b*I)) == (a**2 + b**2)**Rational(1, 4)*sin(atan2(b, a)/2)
assert im(a * (2 + b*I)) == a*b
assert im((1 + sqrt(a + b*I))/2) == \
(a**2 + b**2)**Rational(1, 4)*sin(atan2(b, a)/2)/2
def test_sign():
assert sign(1.2) == 1
assert sign(-1.2) == -1
assert sign(3*I) == I
assert sign(-3*I) == -I
assert sign(0) == 0
assert sign(nan) == nan
assert sign(2 + 2*I).doit() == sqrt(2)*(2 + 2*I)/4
assert sign(2 + 3*I).simplify() == sign(2 + 3*I)
assert sign(2 + 2*I).simplify() == sign(1 + I)
x = Symbol('x')
assert sign(x).is_bounded is True
assert sign(x).is_complex is True
assert sign(x).is_imaginary is None
assert sign(x).is_integer is None
assert sign(x).is_real is None
assert sign(x).is_zero is None
assert sign(x).doit() == sign(x)
assert sign(1.2*x) == sign(x)
assert sign(2*x) == sign(x)
assert sign(I*x) == I*sign(x)
assert sign(-2*I*x) == -I*sign(x)
assert sign(conjugate(x)) == conjugate(sign(x))
p = Symbol('p', positive=True)
n = Symbol('n', negative=True)
m = Symbol('m', negative=True)
assert sign(2*p*x) == sign(x)
assert sign(n*x) == -sign(x)
assert sign(n*m*x) == sign(x)
x = Symbol('x', imaginary=True)
assert sign(x).is_imaginary is True
assert sign(x).is_integer is False
assert sign(x).is_real is False
assert sign(x).is_zero is False
assert sign(x).diff(x) == 2*DiracDelta(-I*x)
assert sign(x).doit() == x / Abs(x)
assert conjugate(sign(x)) == -sign(x)
x = Symbol('x', real=True)
assert sign(x).is_imaginary is False
assert sign(x).is_integer is True
assert sign(x).is_real is True
assert sign(x).is_zero is None
assert sign(x).diff(x) == 2*DiracDelta(x)
assert sign(x).doit() == sign(x)
assert conjugate(sign(x)) == sign(x)
x = Symbol('x', nonzero=True)
assert sign(x).is_imaginary is None
assert sign(x).is_integer is None
assert sign(x).is_real is None
assert sign(x).is_zero is False
assert sign(x).doit() == x / Abs(x)
assert sign(Abs(x)) == 1
assert Abs(sign(x)) == 1
x = Symbol('x', positive=True)
assert sign(x).is_imaginary is False
assert sign(x).is_integer is True
assert sign(x).is_real is True
assert sign(x).is_zero is False
assert sign(x).doit() == x / Abs(x)
assert sign(Abs(x)) == 1
assert Abs(sign(x)) == 1
x = 0
assert sign(x).is_imaginary is False
assert sign(x).is_integer is True
assert sign(x).is_real is True
assert sign(x).is_zero is True
assert sign(x).doit() == 0
assert sign(Abs(x)) == 0
assert Abs(sign(x)) == 0
nz = Symbol('nz', nonzero=True, integer=True)
assert sign(nz).is_imaginary is False
assert sign(nz).is_integer is True
assert sign(nz).is_real is True
assert sign(nz).is_zero is False
assert sign(nz)**2 == 1
assert (sign(nz)**3).args == (sign(nz), 3)
x, y = Symbol('x', real=True), Symbol('y')
assert sign(x).rewrite(Piecewise) == \
Piecewise((1, x > 0), (-1, x < 0), (0, True))
assert sign(y).rewrite(Piecewise) == sign(y)
# evaluate what can be evaluated
assert sign(exp_polar(I*pi)*pi) is S.NegativeOne
eq = -sqrt(10 + 6*sqrt(3)) + sqrt(1 + sqrt(3)) + sqrt(3 + 3*sqrt(3))
# if there is a fast way to know when and when you cannot prove an
# expression like this is zero then the equality to zero is ok
assert sign(eq).func is sign or sign(eq) == 0
# but sometimes it's hard to do this so it's better not to load
# abs down with tests that will be very slow
q = 1 + sqrt(2) - 2*sqrt(3) + 1331*sqrt(6)
p = expand(q**3)**Rational(1, 3)
d = p - q
assert sign(d).func is sign or sign(d) == 0
def test_as_real_imag():
n = pi**1000
# the special code for working out the real
# and complex parts of a power with Integer exponent
# should not run if there is no imaginary part, hence
# this should not hang
assert n.as_real_imag() == (n, 0)
# issue 6261
x = Symbol('x')
assert sqrt(x).as_real_imag() == \
((re(x)**2 + im(x)**2)**(S(1)/4)*cos(atan2(im(x), re(x))/2),
(re(x)**2 + im(x)**2)**(S(1)/4)*sin(atan2(im(x), re(x))/2))
# issue 3853
a, b = symbols('a,b', real=True)
assert ((1 + sqrt(a + b*I))/2).as_real_imag() == \
(
(a**2 + b**2)**Rational(
1, 4)*cos(atan2(b, a)/2)/2 + Rational(1, 2),
(a**2 + b**2)**Rational(1, 4)*sin(atan2(b, a)/2)/2)
assert sqrt(a**2).as_real_imag() == (sqrt(a**2), 0)
i = symbols('i', imaginary=True)
assert sqrt(i**2).as_real_imag() == (0, sqrt(-i**2))
@XFAIL
def test_sign_issue_3068():
n = pi**1000
i = int(n)
assert (n - i).round() == 1 # doesn't hang
assert sign(n - i) == 1
# perhaps it's not possible to get the sign right when
# only 1 digit is being requested for this situtation;
# 2 digits works
assert (n - x).n(1, subs={x: i}) > 0
assert (n - x).n(2, subs={x: i}) > 0
def test_Abs():
x, y = symbols('x,y')
assert sign(sign(x)) == sign(x)
assert sign(x*y).func is sign
assert Abs(0) == 0
assert Abs(1) == 1
assert Abs(-1) == 1
assert Abs(I) == 1
assert Abs(-I) == 1
assert Abs(nan) == nan
assert Abs(I * pi) == pi
assert Abs(-I * pi) == pi
assert Abs(I * x) == Abs(x)
assert Abs(-I * x) == Abs(x)
assert Abs(-2*x) == 2*Abs(x)
assert Abs(-2.0*x) == 2.0*Abs(x)
assert Abs(2*pi*x*y) == 2*pi*Abs(x*y)
assert Abs(conjugate(x)) == Abs(x)
assert conjugate(Abs(x)) == Abs(x)
a = Symbol('a', positive=True)
assert Abs(2*pi*x*a) == 2*pi*a*Abs(x)
assert Abs(2*pi*I*x*a) == 2*pi*a*Abs(x)
x = Symbol('x', real=True)
n = Symbol('n', integer=True)
assert Abs((-1)**n) == 1
assert x**(2*n) == Abs(x)**(2*n)
assert Abs(x).diff(x) == sign(x)
assert abs(x) == Abs(x) # Python built-in
assert Abs(x)**3 == x**2*Abs(x)
assert Abs(x)**4 == x**4
assert (
Abs(x)**(3*n)).args == (Abs(x), 3*n) # leave symbolic odd unchanged
assert (1/Abs(x)).args == (Abs(x), -1)
assert 1/Abs(x)**3 == 1/(x**2*Abs(x))
x = Symbol('x', imaginary=True)
assert Abs(x).diff(x) == -sign(x)
eq = -sqrt(10 + 6*sqrt(3)) + sqrt(1 + sqrt(3)) + sqrt(3 + 3*sqrt(3))
# if there is a fast way to know when and when you cannot prove an
# expression like this is zero then the equality to zero is ok
assert abs(eq).func is Abs or abs(eq) == 0
# but sometimes it's hard to do this so it's better not to load
# abs down with tests that will be very slow
q = 1 + sqrt(2) - 2*sqrt(3) + 1331*sqrt(6)
p = expand(q**3)**Rational(1, 3)
d = p - q
assert abs(d).func is Abs or abs(d) == 0
def test_Abs_rewrite():
x = Symbol('x', real=True)
a = Abs(x).rewrite(Heaviside).expand()
assert a == x*Heaviside(x) - x*Heaviside(-x)
for i in [-2, -1, 0, 1, 2]:
assert a.subs(x, i) == abs(i)
y = Symbol('y')
assert Abs(y).rewrite(Heaviside) == Abs(y)
x, y = Symbol('x', real=True), Symbol('y')
assert Abs(x).rewrite(Piecewise) == Piecewise((x, x >= 0), (-x, True))
assert Abs(y).rewrite(Piecewise) == Abs(y)
def test_Abs_real():
# test some properties of abs that only apply
# to real numbers
x = Symbol('x', complex=True)
assert sqrt(x**2) != Abs(x)
assert Abs(x**2) != x**2
x = Symbol('x', real=True)
assert sqrt(x**2) == Abs(x)
assert Abs(x**2) == x**2
# if the symbol is zero, the following will still apply
nn = Symbol('nn', nonnegative=True, real=True)
np = Symbol('np', nonpositive=True, real=True)
assert Abs(nn) == nn
assert Abs(np) == -np
def test_Abs_properties():
x = Symbol('x')
assert Abs(x).is_real is True
assert Abs(x).is_positive is None
assert Abs(x).is_nonnegative is True
w = Symbol('w', complex=True, zero=False)
assert Abs(w).is_real is True
assert Abs(w).is_positive is True
assert Abs(w).is_zero is False
q = Symbol('q', positive=True)
assert Abs(q).is_real is True
assert Abs(q).is_positive is True
assert Abs(q).is_zero is False
def test_abs():
# this tests that abs calls Abs; don't rename to
# test_Abs since that test is already above
a = Symbol('a', positive=True)
assert abs(I*(1 + a)**2) == (1 + a)**2
def test_arg():
assert arg(0) == nan
assert arg(1) == 0
assert arg(-1) == pi
assert arg(I) == pi/2
assert arg(-I) == -pi/2
assert arg(1 + I) == pi/4
assert arg(-1 + I) == 3*pi/4
assert arg(1 - I) == -pi/4
p = Symbol('p', positive=True)
assert arg(p) == 0
n = Symbol('n', negative=True)
assert arg(n) == pi
x = Symbol('x')
assert conjugate(arg(x)) == arg(x)
def test_arg_rewrite():
assert arg(1 + I) == atan2(1, 1)
x = Symbol('x', real=True)
y = Symbol('y', real=True)
assert arg(x + I*y).rewrite(atan2) == atan2(y, x)
def test_adjoint():
a = Symbol('a', antihermitian=True)
b = Symbol('b', hermitian=True)
assert adjoint(a) == -a
assert adjoint(I*a) == I*a
assert adjoint(b) == b
assert adjoint(I*b) == -I*b
assert adjoint(a*b) == -b*a
assert adjoint(I*a*b) == I*b*a
x, y = symbols('x y')
assert adjoint(adjoint(x)) == x
assert adjoint(x + y) == adjoint(x) + adjoint(y)
assert adjoint(x - y) == adjoint(x) - adjoint(y)
assert adjoint(x * y) == adjoint(x) * adjoint(y)
assert adjoint(x / y) == adjoint(x) / adjoint(y)
assert adjoint(-x) == -adjoint(x)
x, y = symbols('x y', commutative=False)
assert adjoint(adjoint(x)) == x
assert adjoint(x + y) == adjoint(x) + adjoint(y)
assert adjoint(x - y) == adjoint(x) - adjoint(y)
assert adjoint(x * y) == adjoint(y) * adjoint(x)
assert adjoint(x / y) == 1 / adjoint(y) * adjoint(x)
assert adjoint(-x) == -adjoint(x)
def test_conjugate():
a = Symbol('a', real=True)
b = Symbol('b', imaginary=True)
assert conjugate(a) == a
assert conjugate(I*a) == -I*a
assert conjugate(b) == -b
assert conjugate(I*b) == I*b
assert conjugate(a*b) == -a*b
assert conjugate(I*a*b) == I*a*b
x, y = symbols('x y')
assert conjugate(conjugate(x)) == x
assert conjugate(x + y) == conjugate(x) + conjugate(y)
assert conjugate(x - y) == conjugate(x) - conjugate(y)
assert conjugate(x * y) == conjugate(x) * conjugate(y)
assert conjugate(x / y) == conjugate(x) / conjugate(y)
assert conjugate(-x) == -conjugate(x)
def test_conjugate_transpose():
x = Symbol('x')
assert conjugate(transpose(x)) == adjoint(x)
assert transpose(conjugate(x)) == adjoint(x)
assert adjoint(transpose(x)) == conjugate(x)
assert transpose(adjoint(x)) == conjugate(x)
assert adjoint(conjugate(x)) == transpose(x)
assert conjugate(adjoint(x)) == transpose(x)
class Symmetric(Expr):
def _eval_adjoint(self):
return None
def _eval_conjugate(self):
return None
def _eval_transpose(self):
return self
x = Symmetric()
assert conjugate(x) == adjoint(x)
assert transpose(x) == x
def test_transpose():
a = Symbol('a', complex=True)
assert transpose(a) == a
assert transpose(I*a) == I*a
x, y = symbols('x y')
assert transpose(transpose(x)) == x
assert transpose(x + y) == transpose(x) + transpose(y)
assert transpose(x - y) == transpose(x) - transpose(y)
assert transpose(x * y) == transpose(x) * transpose(y)
assert transpose(x / y) == transpose(x) / transpose(y)
assert transpose(-x) == -transpose(x)
x, y = symbols('x y', commutative=False)
assert transpose(transpose(x)) == x
assert transpose(x + y) == transpose(x) + transpose(y)
assert transpose(x - y) == transpose(x) - transpose(y)
assert transpose(x * y) == transpose(y) * transpose(x)
assert transpose(x / y) == 1 / transpose(y) * transpose(x)
assert transpose(-x) == -transpose(x)
def test_issue_4035():
x = Symbol('x')
assert Abs(x).expand(trig=True) == Abs(x)
assert sign(x).expand(trig=True) == sign(x)
assert arg(x).expand(trig=True) == arg(x)
def test_issue_3206():
x = Symbol('x')
assert Abs(Abs(x)) == Abs(x)
def test_issue_4754_derivative_conjugate():
x = Symbol('x', real=True)
y = Symbol('y', imaginary=True)
f = Function('f')
assert (f(x).conjugate()).diff(x) == (f(x).diff(x)).conjugate()
assert (f(y).conjugate()).diff(y) == -(f(y).diff(y)).conjugate()
def test_derivatives_issue1658():
x = Symbol('x', real=True)
y = Symbol('y', imaginary=True)
f = Function('f')
assert re(f(x)).diff(x) == re(f(x).diff(x))
assert im(f(x)).diff(x) == im(f(x).diff(x))
assert re(f(y)).diff(y) == -I*im(f(y).diff(y))
assert im(f(y)).diff(y) == -I*re(f(y).diff(y))
assert Abs(f(x)).diff(x).subs(f(x), 1 + I*x).doit() == x/sqrt(1 + x**2)
assert arg(f(x)).diff(x).subs(f(x), 1 + I*x**2).doit() == 2*x/(1 + x**4)
assert Abs(f(y)).diff(y).subs(f(y), 1 + y).doit() == -y/sqrt(1 - y**2)
assert arg(f(y)).diff(y).subs(f(y), I + y**2).doit() == 2*y/(1 + y**4)
def test_periodic_argument():
from sympy import (periodic_argument, unbranched_argument, oo,
principal_branch, polar_lift, pi)
x = Symbol('x')
p = Symbol('p', positive=True)
assert unbranched_argument(2 + I) == periodic_argument(2 + I, oo)
assert unbranched_argument(1 + x) == periodic_argument(1 + x, oo)
assert N_equals(unbranched_argument((1 + I)**2), pi/2)
assert N_equals(unbranched_argument((1 - I)**2), -pi/2)
assert N_equals(periodic_argument((1 + I)**2, 3*pi), pi/2)
assert N_equals(periodic_argument((1 - I)**2, 3*pi), -pi/2)
assert unbranched_argument(principal_branch(x, pi)) == \
periodic_argument(x, pi)
assert unbranched_argument(polar_lift(2 + I)) == unbranched_argument(2 + I)
assert periodic_argument(polar_lift(2 + I), 2*pi) == \
periodic_argument(2 + I, 2*pi)
assert periodic_argument(polar_lift(2 + I), 3*pi) == \
periodic_argument(2 + I, 3*pi)
assert periodic_argument(polar_lift(2 + I), pi) == \
periodic_argument(polar_lift(2 + I), pi)
assert unbranched_argument(polar_lift(1 + I)) == pi/4
assert periodic_argument(2*p, p) == periodic_argument(p, p)
assert periodic_argument(pi*p, p) == periodic_argument(p, p)
assert Abs(polar_lift(1 + I)) == Abs(1 + I)
@XFAIL
def test_principal_branch_fail():
# TODO XXX why does abs(x)._eval_evalf() not fall back to global evalf?
assert N_equals(principal_branch((1 + I)**2, pi/2), 0)
def test_principal_branch():
from sympy import principal_branch, polar_lift, exp_polar
p = Symbol('p', positive=True)
x = Symbol('x')
neg = Symbol('x', negative=True)
assert principal_branch(polar_lift(x), p) == principal_branch(x, p)
assert principal_branch(polar_lift(2 + I), p) == principal_branch(2 + I, p)
assert principal_branch(2*x, p) == 2*principal_branch(x, p)
assert principal_branch(1, pi) == exp_polar(0)
assert principal_branch(-1, 2*pi) == exp_polar(I*pi)
assert principal_branch(-1, pi) == exp_polar(0)
assert principal_branch(exp_polar(3*pi*I)*x, 2*pi) == \
principal_branch(exp_polar(I*pi)*x, 2*pi)
assert principal_branch(neg*exp_polar(pi*I), 2*pi) == neg*exp_polar(-I*pi)
assert N_equals(principal_branch((1 + I)**2, 2*pi), 2*I)
assert N_equals(principal_branch((1 + I)**2, 3*pi), 2*I)
assert N_equals(principal_branch((1 + I)**2, 1*pi), 2*I)
# test argument sanitization
assert principal_branch(x, I).func is principal_branch
assert principal_branch(x, -4).func is principal_branch
assert principal_branch(x, -oo).func is principal_branch
assert principal_branch(x, zoo).func is principal_branch
@XFAIL
def test_issue_6167_6151():
n = pi**1000
i = int(n)
assert sign(n - i) == 1
assert abs(n - i) == n - i
eps = pi**-1500
big = pi**1000
one = cos(x)**2 + sin(x)**2
e = big*one - big + eps
assert sign(simplify(e)) == 1
for xi in (111, 11, 1, S(1)/10):
assert sign(e.subs(x, xi)) == 1
| |
#!/usr/bin/env python
''' Runs checks for mesos style. '''
import os
import re
import string
import subprocess
import sys
class LinterBase(object):
'''
This is an abstract class that provides the base functionality for
linting files in the mesos project. Its 'main()' function
walks through the set of files passed to it and runs some
standard linting over them. This includes checking for license headers
and checking for non-supported characters. From there it calls a
'run_lint()' function that can be overridden to provide
customizable style checks for a specific class of files (e.g. C++,
Python, etc.).
Any class that extends from 'LinterBase' should override the
following class variables / functions:
linter_type
source_dirs
exclude_files
source_files
comment_prefix
run_lint()
Please see the comments below for details on how to override each
variable.
'''
# The name of the linter to help with printing which linter files
# are currently being processed by.
linter_type = ''
# Root source paths (will be traversed recursively).
source_dirs = []
# Add file paths and patterns which should not be checked
# This should include 3rdparty libraries, includes and machine generated
# source.
exclude_files = ''
# A regex of possible matches for your source files.
source_files = ''
# A prefix at the beginning of the line to demark comments (e.g. '//')
comment_prefix = ''
def find_candidates(self, root_dir):
'''
Search through the all files rooted at 'root_dir' and compare
them against 'self.exclude_files' and 'self.source_files' to
come up with a set of candidate files to lint.
'''
exclude_file_regex = re.compile(self.exclude_files)
source_criteria_regex = re.compile(self.source_files)
for root, dirs, files in os.walk(root_dir):
for name in files:
path = os.path.join(root, name)
if exclude_file_regex.search(path) is not None:
continue
if source_criteria_regex.search(name) is not None:
yield path
def run_lint(self, source_paths):
'''
A custom function to provide linting for 'linter_type'.
It takes a list of source files to lint and returns the number
of errors found during the linting process.
It should print any errors as it encounters them to provide
feedback to the caller.
'''
pass
def check_license_header(self, source_paths):
''' Checks the license headers of the given files. '''
error_count = 0
for path in source_paths:
with open(path) as source_file:
head = source_file.readline()
# Check that opening comment has correct style.
# TODO(bbannier) We allow `Copyright` for currently deviating files.
# This should be removed one we have a uniform license format.
regex = r'^{comment_prefix} [Licensed|Copyright]'.\
format(comment_prefix=self.comment_prefix)
if not re.match(regex, head):
sys.stderr.write(
"{path}:1: A license header should appear on the file's "
"first line starting with '{comment_prefix} Licensed'.: {head}".\
format(path=path, head=head, comment_prefix=self.comment_prefix))
error_count += 1
return error_count
def check_encoding(self, source_paths):
'''
Checks for encoding errors in the given files. Source
code files must contain only printable ascii characters.
This excludes the extended ascii characters 128-255.
http://www.asciitable.com/
'''
error_count = 0
for path in source_paths:
with open(path) as source_file:
for line_number, line in enumerate(source_file):
# If we find an error, add 1 to both the character and
# the line offset to give them 1-based indexing
# instead of 0 (as is common in most editors).
char_errors = [offset for offset, char in enumerate(line)
if char not in string.printable]
if char_errors:
sys.stderr.write(
"{path}:{line_number}: Non-printable characters"
" found at [{offsets}]: \"{line}\"\n".format(
path=path,
line_number=line_number + 1,
offsets=', '.join([str(offset + 1) for offset
in char_errors]),
line=line.rstrip()))
error_count += 1
return error_count
def main(self, file_list):
'''
This function takes a list of files and lints them for the
class of files defined by 'linter_type'.
'''
# Verify that source roots are accessible from current working directory.
# A common error could be to call the style checker from other
# (possibly nested) paths.
for source_dir in self.source_dirs:
if not os.path.exists(source_dir):
print "Could not find '{dir}'".format(dir=source_dir)
print 'Please run from the root of the mesos source directory'
exit(1)
# Add all source file candidates to candidates list.
candidates = []
for source_dir in self.source_dirs:
for candidate in self.find_candidates(source_dir):
candidates.append(candidate)
# If file paths are specified, check all file paths that are
# candidates; else check all candidates.
file_paths = file_list if len(file_list) > 0 else candidates
# Compute the set intersect of the input file paths and candidates.
# This represents the reduced set of candidates to run lint on.
candidates_set = set(candidates)
clean_file_paths_set = set(map(lambda x: x.rstrip(), file_paths))
filtered_candidates_set = clean_file_paths_set.intersection(
candidates_set)
if filtered_candidates_set:
plural = '' if len(filtered_candidates_set) == 1 else 's'
print 'Checking {num_files} {linter} file{plural}'.\
format(num_files=len(filtered_candidates_set),
linter=self.linter_type,
plural=plural)
license_errors = self.check_license_header(filtered_candidates_set)
encoding_errors = self.check_encoding(list(filtered_candidates_set))
lint_errors = self.run_lint(list(filtered_candidates_set))
total_errors = license_errors + encoding_errors + lint_errors
sys.stderr.write('Total errors found: {num_errors}\n'.\
format(num_errors=total_errors))
return total_errors
else:
print "No {linter} files to lint".format(linter=self.linter_type)
return 0
class CppLinter(LinterBase):
linter_type = 'C++'
source_dirs = ['src',
'include',
os.path.join('3rdparty', 'libprocess'),
os.path.join('3rdparty', 'stout')]
exclude_files = '(protobuf\-2\.4\.1|gmock\-1\.6\.0|glog\-0\.3\.3|boost\-1\.53\.0|libev\-4\.15|java/jni|\.pb\.cc|\.pb\.h|\.md|\.virtualenv)'
source_files = '\.(cpp|hpp|cc|h)$'
comment_prefix = '\/\/'
def run_lint(self, source_paths):
'''
Runs cpplint over given files.
http://google-styleguide.googlecode.com/svn/trunk/cpplint/cpplint.py
'''
# See cpplint.py for full list of rules.
active_rules = [
'build/class',
'build/deprecated',
'build/endif_comment',
'build/nullptr',
'readability/todo',
'readability/namespace',
'runtime/vlog',
'whitespace/blank_line',
'whitespace/comma',
'whitespace/end_of_line',
'whitespace/ending_newline',
'whitespace/forcolon',
'whitespace/indent',
'whitespace/line_length',
'whitespace/operators',
'whitespace/semicolon',
'whitespace/tab',
'whitespace/comments',
'whitespace/todo']
rules_filter = '--filter=-,+' + ',+'.join(active_rules)
p = subprocess.Popen(
['python', 'support/cpplint.py', rules_filter] + source_paths,
stderr=subprocess.PIPE,
close_fds=True)
# Lines are stored and filtered, only showing found errors instead
# of e.g., 'Done processing XXX.' which tends to be dominant output.
for line in p.stderr:
if re.match('^(Done processing |Total errors found: )', line):
continue
sys.stderr.write(line)
p.wait()
return p.returncode
class PyLinter(LinterBase):
linter_type = 'Python'
source_dirs = ['src/cli_new']
exclude_files = '(protobuf\-2\.4\.1|gmock\-1\.6\.0|glog\-0\.3\.3|boost\-1\.53\.0|libev\-4\.15|java/jni|\.virtualenv)'
source_files = '\.(py)$'
comment_prefix = '#'
def run_lint(self, source_paths):
'''
Runs pylint over given files.
https://google.github.io/styleguide/pyguide.html
'''
cli_dir = os.path.abspath(self.source_dirs[0])
source_files = ' '.join(source_paths)
p = subprocess.Popen(
['. {virtualenv_dir}/bin/activate; \
PYTHONPATH={lib_dir}:{bin_dir} pylint --rcfile={config} --ignore={ignore} {files}'.\
format(virtualenv_dir=os.path.join(cli_dir, '.virtualenv'),
lib_dir=os.path.join(cli_dir,'lib'),
bin_dir=os.path.join(cli_dir,'bin'),
config=os.path.join(cli_dir, 'pylint.config'),
ignore=os.path.join(cli_dir, 'bin', 'mesos'),
files=source_files)],
shell=True, stdout=subprocess.PIPE)
num_errors = 0
for line in p.stdout:
if not line.startswith('*'):
num_errors += 1
sys.stderr.write(line)
return num_errors
def __should_build_virtualenv(self, file_list):
cli_dir = os.path.abspath(self.source_dirs[0])
if not os.path.isdir(os.path.join(cli_dir, '.virtualenv')):
print 'Virtualenv for python linter not detected ... building'
return True
basenames = []
if file_list:
basenames = [os.path.basename(file) for file in file_list]
if 'pip-requirements.txt' in basenames:
print 'The "pip-requirements.txt" file has changed.'
print 'Rebuilding virtualenv ...'
return True
if 'mesos.bash_completion' in basenames:
print 'The "mesos.bash_completion" file has changed.'
print 'Rebuilding virtualenv ...'
return True
return False
def __build_virtualenv(self):
'''
Rebuild the virtualenv.
'''
cli_dir = os.path.abspath(self.source_dirs[0])
p = subprocess.Popen(
[os.path.join(cli_dir, 'bootstrap')],
stdout=subprocess.PIPE)
output = ''
for line in p.stdout:
output += line
p.wait()
if p.returncode != 0:
sys.stderr.write(output)
sys.exit(1);
def main(self, file_list):
'''
Override main to rebuild our virtualenv if necessary.
'''
if self.__should_build_virtualenv(file_list):
self.__build_virtualenv()
return super(PyLinter, self).main(file_list)
if __name__ == '__main__':
cpp_linter = CppLinter()
cpp_errors = cpp_linter.main(sys.argv[1:])
py_linter = PyLinter()
py_errors = py_linter.main(sys.argv[1:])
sys.exit(cpp_errors + py_errors)
| |
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd.
# MIT License. See license.txt
# util __init__.py
from __future__ import unicode_literals
import webnotes
import easywebdav
import python_webdav.client as pywebdav_client
user_time_zone = None
user_format = None
current_date = None
no_value_fields = ['Section Break', 'Column Break', 'HTML', 'Table', 'FlexTable',
'Button', 'Image', 'Graph']
default_fields = ['doctype', 'name', 'owner', 'creation', 'modified', 'modified_by',
'parent', 'parentfield', 'parenttype', 'idx', 'docstatus']
# used in import_docs.py
# TODO: deprecate it
def getCSVelement(v):
"""
Returns the CSV value of `v`, For example:
* apple becomes "apple"
* hi"there becomes "hi""there"
"""
v = cstr(v)
if not v: return ''
if (',' in v) or ('\n' in v) or ('"' in v):
if '"' in v: v = v.replace('"', '""')
return '"'+v+'"'
else: return v or ''
def get_fullname(profile):
"""get the full name (first name + last name) of the user from Profile"""
p = webnotes.conn.sql("""select first_name, last_name from `tabProfile`
where name=%s""", profile, as_dict=1)
if p:
profile = " ".join(filter(None,
[p[0].get('first_name'), p[0].get('last_name')])) or profile
return profile
def auth():
a=webnotes.conn.sql("select value from `tabSingles` where doctype='LDAP Settings' and field='dms_server'",as_list=1)
if a:
client_object = pywebdav_client.Client(a[0][0])
client_object.set_connection(username='swapnil', password='swapnil')
return ["Done",client_object]
else:
return ["Error","Server is not defined"]
def document_attach(source,target,auth_id,operation):
if operation=="upload":
f=auth_id.upload_file(source,target)
else:
auth_id.download_file(source,dest_path=target)
def get_formatted_email(user):
"""get email id of user formatted as: John Doe <johndoe@example.com>"""
if user == "Administrator":
return user
from email.utils import formataddr
fullname = get_fullname(user)
return formataddr((fullname, user))
def extract_email_id(email):
"""fetch only the email part of the email id"""
from email.utils import parseaddr
fullname, email_id = parseaddr(email)
if isinstance(email_id, basestring) and not isinstance(email_id, unicode):
email_id = email_id.decode("utf-8", "ignore")
return email_id
def validate_email_add(email_str):
"""Validates the email string"""
email = extract_email_id(email_str)
import re
return re.match("[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?", email.lower())
def get_request_site_address(full_address=False):
"""get app url from request"""
import os, conf
if hasattr(conf, "host_name"):
host_name = conf.host_name
else:
try:
protocol = 'HTTPS' in os.environ.get('SERVER_PROTOCOL') and 'https://' or 'http://'
host_name = protocol + os.environ.get('HTTP_HOST')
except TypeError:
return 'http://localhost'
if full_address:
return host_name + os.environ.get("REQUEST_URI", "")
else:
return host_name
def random_string(length):
"""generate a random string"""
import string
from random import choice
return ''.join([choice(string.letters + string.digits) for i in range(length)])
def load_json(arg):
# already a dictionary?
if not isinstance(arg, basestring):
return arg
import json
return json.loads(arg, encoding='utf-8')
# Get Traceback
# ==============================================================================
def getTraceback():
"""
Returns the traceback of the Exception
"""
import sys, traceback
exc_type, value, tb = sys.exc_info()
trace_list = traceback.format_tb(tb, None) + \
traceback.format_exception_only(exc_type, value)
body = "Traceback (innermost last):\n" + "%-20s %s" % \
(unicode((b"").join(trace_list[:-1]), 'utf-8'), unicode(trace_list[-1], 'utf-8'))
if webnotes.logger:
webnotes.logger.error('Db:'+(webnotes.conn and webnotes.conn.cur_db_name or '') \
+ ' - ' + body)
return body
def log(event, details):
webnotes.logger.info(details)
# datetime functions
def getdate(string_date):
"""
Coverts string date (yyyy-mm-dd) to datetime.date object
"""
import datetime
if isinstance(string_date, datetime.date):
return string_date
elif isinstance(string_date, datetime.datetime):
return datetime.date()
if " " in string_date:
string_date = string_date.split(" ")[0]
return datetime.datetime.strptime(string_date, "%Y-%m-%d").date()
def add_to_date(date, years=0, months=0, days=0):
"""Adds `days` to the given date"""
format = isinstance(date, basestring)
if date:
date = getdate(date)
else:
raise Exception, "Start date required"
from dateutil.relativedelta import relativedelta
date += relativedelta(years=years, months=months, days=days)
if format:
return date.strftime("%Y-%m-%d")
else:
return date
def add_days(date, days):
return add_to_date(date, days=days)
def add_months(date, months):
return add_to_date(date, months=months)
def add_years(date, years):
return add_to_date(date, years=years)
def date_diff(string_ed_date, string_st_date):
return (getdate(string_ed_date) - getdate(string_st_date)).days
def time_diff(string_ed_date, string_st_date):
return get_datetime(string_ed_date) - get_datetime(string_st_date)
def time_diff_in_seconds(string_ed_date, string_st_date):
return time_diff(string_ed_date, string_st_date).seconds
def time_diff_in_hours(string_ed_date, string_st_date):
return round(float(time_diff(string_ed_date, string_st_date).seconds) / 3600, 6)
def now_datetime():
from datetime import datetime
return convert_utc_to_user_timezone(datetime.utcnow())
def get_user_time_zone():
global user_time_zone
if not user_time_zone:
user_time_zone = webnotes.cache().get_value("time_zone")
if not user_time_zone:
user_time_zone = webnotes.conn.get_value('Control Panel', None, 'time_zone') \
or 'Asia/Calcutta'
webnotes.cache().set_value("time_zone", user_time_zone)
return user_time_zone
def convert_utc_to_user_timezone(utc_timestamp):
from pytz import timezone
utcnow = timezone('UTC').localize(utc_timestamp)
return utcnow.astimezone(timezone(get_user_time_zone()))
def now():
"""return current datetime as yyyy-mm-dd hh:mm:ss"""
if current_date:
return getdate(current_date).strftime("%Y-%m-%d") + " " + now_datetime().strftime('%H:%M:%S')
else:
return now_datetime().strftime('%Y-%m-%d %H:%M:%S')
def nowdate():
"""return current date as yyyy-mm-dd"""
return now_datetime().strftime('%Y-%m-%d')
def today():
return nowdate()
def nowtime():
"""return current time in hh:mm"""
return now_datetime().strftime('%H:%M')
def get_first_day(dt, d_years=0, d_months=0):
"""
Returns the first day of the month for the date specified by date object
Also adds `d_years` and `d_months` if specified
"""
import datetime
dt = getdate(dt)
# d_years, d_months are "deltas" to apply to dt
overflow_years, month = divmod(dt.month + d_months - 1, 12)
year = dt.year + d_years + overflow_years
return datetime.date(year, month + 1, 1)
def get_last_day(dt):
"""
Returns last day of the month using:
`get_first_day(dt, 0, 1) + datetime.timedelta(-1)`
"""
import datetime
return get_first_day(dt, 0, 1) + datetime.timedelta(-1)
def get_datetime(datetime_str):
from datetime import datetime
if isinstance(datetime_str, datetime):
return datetime_str.replace(microsecond=0, tzinfo=None)
return datetime.strptime(datetime_str, '%Y-%m-%d %H:%M:%S')
def get_datetime_str(datetime_obj):
if isinstance(datetime_obj, basestring):
datetime_obj = get_datetime(datetime_obj)
return datetime_obj.strftime('%Y-%m-%d %H:%M:%S')
def formatdate(string_date=None):
"""
Convers the given string date to :data:`user_format`
User format specified in :term:`Control Panel`
Examples:
* dd-mm-yyyy
* mm-dd-yyyy
* dd/mm/yyyy
"""
if string_date:
string_date = getdate(string_date)
else:
string_date = now_datetime().date()
global user_format
if not user_format:
user_format = webnotes.conn.get_default("date_format")
out = user_format
return out.replace("dd", string_date.strftime("%d"))\
.replace("mm", string_date.strftime("%m"))\
.replace("yyyy", string_date.strftime("%Y"))
def global_date_format(date):
"""returns date as 1 January 2012"""
formatted_date = getdate(date).strftime("%d %B %Y")
return formatted_date.startswith("0") and formatted_date[1:] or formatted_date
def dict_to_str(args, sep='&'):
"""
Converts a dictionary to URL
"""
import urllib
t = []
for k in args.keys():
t.append(str(k)+'='+urllib.quote(str(args[k] or '')))
return sep.join(t)
def timestamps_equal(t1, t2):
"""Returns true if same the two string timestamps are same"""
scrub = lambda x: x.replace(':', ' ').replace('-',' ').split()
t1, t2 = scrub(t1), scrub(t2)
if len(t1) != len(t2):
return
for i in range(len(t1)):
if t1[i]!=t2[i]:
return
return 1
def has_common(l1, l2):
"""Returns truthy value if there are common elements in lists l1 and l2"""
return set(l1) & set(l2)
def flt(s, precision=None):
"""Convert to float (ignore commas)"""
if isinstance(s, basestring):
s = s.replace(',','')
try:
num = float(s)
if precision is not None:
num = _round(num, precision)
except Exception:
num = 0
return num
def cint(s):
"""Convert to integer"""
try: num = int(float(s))
except: num = 0
return num
def cstr(s):
if isinstance(s, unicode):
return s
elif s==None:
return ''
elif isinstance(s, basestring):
return unicode(s, 'utf-8')
else:
return unicode(s)
def _round(num, precision=0):
"""round method for round halfs to nearest even algorithm"""
precision = cint(precision)
multiplier = 10 ** precision
# avoid rounding errors
num = round(num * multiplier if precision else num, 8)
import math
floor = math.floor(num)
decimal_part = num - floor
if decimal_part == 0.5:
num = floor if (floor % 2 == 0) else floor + 1
else:
num = round(num)
return (num / multiplier) if precision else num
def encode(obj, encoding="utf-8"):
if isinstance(obj, list):
out = []
for o in obj:
if isinstance(o, unicode):
out.append(o.encode(encoding))
else:
out.append(o)
return out
elif isinstance(obj, unicode):
return obj.encode(encoding)
else:
return obj
def parse_val(v):
"""Converts to simple datatypes from SQL query results"""
import datetime
if isinstance(v, (datetime.date, datetime.datetime)):
v = unicode(v)
elif isinstance(v, datetime.timedelta):
v = ":".join(unicode(v).split(":")[:2])
elif isinstance(v, long):
v = int(v)
return v
def fmt_money(amount, precision=None, currency=None):
"""
Convert to string with commas for thousands, millions etc
"""
number_format = webnotes.conn.get_default("number_format") or "#,###.##"
decimal_str, comma_str, precision = get_number_format_info(number_format)
amount = '%.*f' % (precision, flt(amount))
if amount.find('.') == -1:
decimals = ''
else:
decimals = amount.split('.')[1]
parts = []
minus = ''
if flt(amount) < 0:
minus = '-'
amount = cstr(abs(flt(amount))).split('.')[0]
if len(amount) > 3:
parts.append(amount[-3:])
amount = amount[:-3]
val = number_format=="#,##,###.##" and 2 or 3
while len(amount) > val:
parts.append(amount[-val:])
amount = amount[:-val]
parts.append(amount)
parts.reverse()
amount = comma_str.join(parts) + (precision and (decimal_str + decimals) or "")
amount = minus + amount
if currency:
symbol = webnotes.conn.get_value("Currency", currency, "symbol")
if symbol:
amount = symbol + " " + amount
return amount
number_format_info = {
"#.###": ("", ".", 0),
"#,###": ("", ",", 0),
"#,###.##": (".", ",", 2),
"#,##,###.##": (".", ",", 2),
"#.###,##": (",", ".", 2),
"# ###.##": (".", " ", 2),
"#,###.###": (".", ",", 3),
}
def get_number_format_info(format):
return number_format_info.get(format) or (".", ",", 2)
#
# convet currency to words
#
def money_in_words(number, main_currency = None, fraction_currency=None):
"""
Returns string in words with currency and fraction currency.
"""
d = get_defaults()
if not main_currency:
main_currency = d.get('currency', 'INR')
if not fraction_currency:
fraction_currency = webnotes.conn.get_value("Currency", main_currency, "fraction") or "Cent"
n = "%.2f" % flt(number)
main, fraction = n.split('.')
if len(fraction)==1: fraction += '0'
number_format = webnotes.conn.get_value("Currency", main_currency, "number_format") or \
webnotes.conn.get_default("number_format") or "#,###.##"
in_million = True
if number_format == "#,##,###.##": in_million = False
out = main_currency + ' ' + in_words(main, in_million).title()
if cint(fraction):
out = out + ' and ' + in_words(fraction, in_million).title() + ' ' + fraction_currency
return out + ' only.'
#
# convert number to words
#
def in_words(integer, in_million=True):
"""
Returns string in words for the given integer.
"""
n=int(integer)
known = {0: 'zero', 1: 'one', 2: 'two', 3: 'three', 4: 'four', 5: 'five', 6: 'six', 7: 'seven', 8: 'eight', 9: 'nine', 10: 'ten',
11: 'eleven', 12: 'twelve', 13: 'thirteen', 14: 'fourteen', 15: 'fifteen', 16: 'sixteen', 17: 'seventeen', 18: 'eighteen',
19: 'nineteen', 20: 'twenty', 30: 'thirty', 40: 'forty', 50: 'fifty', 60: 'sixty', 70: 'seventy', 80: 'eighty', 90: 'ninety'}
def psn(n, known, xpsn):
import sys;
if n in known: return known[n]
bestguess, remainder = str(n), 0
if n<=20:
webnotes.errprint(sys.stderr)
webnotes.errprint(n)
webnotes.errprint("How did this happen?")
assert 0
elif n < 100:
bestguess= xpsn((n//10)*10, known, xpsn) + '-' + xpsn(n%10, known, xpsn)
return bestguess
elif n < 1000:
bestguess= xpsn(n//100, known, xpsn) + ' ' + 'hundred'
remainder = n%100
else:
if in_million:
if n < 1000000:
bestguess= xpsn(n//1000, known, xpsn) + ' ' + 'thousand'
remainder = n%1000
elif n < 1000000000:
bestguess= xpsn(n//1000000, known, xpsn) + ' ' + 'million'
remainder = n%1000000
else:
bestguess= xpsn(n//1000000000, known, xpsn) + ' ' + 'billion'
remainder = n%1000000000
else:
if n < 100000:
bestguess= xpsn(n//1000, known, xpsn) + ' ' + 'thousand'
remainder = n%1000
elif n < 10000000:
bestguess= xpsn(n//100000, known, xpsn) + ' ' + 'lakh'
remainder = n%100000
else:
bestguess= xpsn(n//10000000, known, xpsn) + ' ' + 'crore'
remainder = n%10000000
if remainder:
if remainder >= 100:
comma = ','
else:
comma = ''
return bestguess + comma + ' ' + xpsn(remainder, known, xpsn)
else:
return bestguess
return psn(n, known, psn)
# Get Defaults
# ==============================================================================
def get_defaults(key=None):
"""
Get dictionary of default values from the :term:`Control Panel`, or a value if key is passed
"""
return webnotes.conn.get_defaults(key)
def set_default(key, val):
"""
Set / add a default value to :term:`Control Panel`
"""
return webnotes.conn.set_default(key, val)
def remove_blanks(d):
"""
Returns d with empty ('' or None) values stripped
"""
empty_keys = []
for key in d:
if d[key]=='' or d[key]==None:
# del d[key] raises runtime exception, using a workaround
empty_keys.append(key)
for key in empty_keys:
del d[key]
return d
def pprint_dict(d, level=1, no_blanks=True):
"""
Pretty print a dictionary with indents
"""
if no_blanks:
remove_blanks(d)
# make indent
indent, ret = '', ''
for i in range(0,level): indent += '\t'
# add lines
comment, lines = '', []
kl = d.keys()
kl.sort()
# make lines
for key in kl:
if key != '##comment':
tmp = {key: d[key]}
lines.append(indent + str(tmp)[1:-1] )
# add comment string
if '##comment' in kl:
ret = ('\n' + indent) + '# ' + d['##comment'] + '\n'
# open
ret += indent + '{\n'
# lines
ret += indent + ',\n\t'.join(lines)
# close
ret += '\n' + indent + '}'
return ret
def get_common(d1,d2):
"""
returns (list of keys) the common part of two dicts
"""
return [p for p in d1 if p in d2 and d1[p]==d2[p]]
def get_common_dict(d1, d2):
"""
return common dictionary of d1 and d2
"""
ret = {}
for key in d1:
if key in d2 and d2[key]==d1[key]:
ret[key] = d1[key]
return ret
def get_diff_dict(d1, d2):
"""
return common dictionary of d1 and d2
"""
diff_keys = set(d2.keys()).difference(set(d1.keys()))
ret = {}
for d in diff_keys: ret[d] = d2[d]
return ret
def get_file_timestamp(fn):
"""
Returns timestamp of the given file
"""
import os
from webnotes.utils import cint
try:
return str(cint(os.stat(fn).st_mtime))
except OSError, e:
if e.args[0]!=2:
raise e
else:
return None
# to be deprecated
def make_esc(esc_chars):
"""
Function generator for Escaping special characters
"""
return lambda s: ''.join(['\\' + c if c in esc_chars else c for c in s])
# esc / unescape characters -- used for command line
def esc(s, esc_chars):
"""
Escape special characters
"""
if not s:
return ""
for c in esc_chars:
esc_str = '\\' + c
s = s.replace(c, esc_str)
return s
def unesc(s, esc_chars):
"""
UnEscape special characters
"""
for c in esc_chars:
esc_str = '\\' + c
s = s.replace(esc_str, c)
return s
def is_html(text):
out = False
for key in ["<br>", "<p", "<img", "<div"]:
if key in text:
out = True
break
return out
def strip_html(text):
"""
removes anything enclosed in and including <>
"""
import re
return re.compile(r'<.*?>').sub('', text)
def escape_html(text):
html_escape_table = {
"&": "&",
'"': """,
"'": "'",
">": ">",
"<": "<",
}
return "".join(html_escape_table.get(c,c) for c in text)
def get_doctype_label(dt=None):
"""
Gets label of a doctype
"""
if dt:
res = webnotes.conn.sql("""\
SELECT name, dt_label FROM `tabDocType Label`
WHERE name=%s""", dt)
return res and res[0][0] or dt
else:
res = webnotes.conn.sql("SELECT name, dt_label FROM `tabDocType Label`")
dt_label_dict = {}
for r in res:
dt_label_dict[r[0]] = r[1]
return dt_label_dict
def get_label_doctype(label):
"""
Gets doctype from its label
"""
res = webnotes.conn.sql("""\
SELECT name FROM `tabDocType Label`
WHERE dt_label=%s""", label)
return res and res[0][0] or label
def pretty_date(iso_datetime):
"""
Takes an ISO time and returns a string representing how
long ago the date represents.
Ported from PrettyDate by John Resig
"""
if not iso_datetime: return ''
from datetime import datetime
import math
if isinstance(iso_datetime, basestring):
iso_datetime = datetime.strptime(iso_datetime, '%Y-%m-%d %H:%M:%S')
now_dt = datetime.strptime(now(), '%Y-%m-%d %H:%M:%S')
dt_diff = now_dt - iso_datetime
# available only in python 2.7+
# dt_diff_seconds = dt_diff.total_seconds()
dt_diff_seconds = dt_diff.days * 86400.0 + dt_diff.seconds
dt_diff_days = math.floor(dt_diff_seconds / 86400.0)
# differnt cases
if dt_diff_seconds < 60.0:
return 'just now'
elif dt_diff_seconds < 120.0:
return '1 minute ago'
elif dt_diff_seconds < 3600.0:
return '%s minutes ago' % cint(math.floor(dt_diff_seconds / 60.0))
elif dt_diff_seconds < 7200.0:
return '1 hour ago'
elif dt_diff_seconds < 86400.0:
return '%s hours ago' % cint(math.floor(dt_diff_seconds / 3600.0))
elif dt_diff_days == 1.0:
return 'Yesterday'
elif dt_diff_days < 7.0:
return '%s days ago' % cint(dt_diff_days)
elif dt_diff_days < 31.0:
return '%s week(s) ago' % cint(math.ceil(dt_diff_days / 7.0))
elif dt_diff_days < 365.0:
return '%s months ago' % cint(math.ceil(dt_diff_days / 30.0))
else:
return 'more than %s year(s) ago' % cint(math.floor(dt_diff_days / 365.0))
def execute_in_shell(cmd, verbose=0):
# using Popen instead of os.system - as recommended by python docs
from subprocess import Popen
import tempfile
with tempfile.TemporaryFile() as stdout:
with tempfile.TemporaryFile() as stderr:
p = Popen(cmd, shell=True, stdout=stdout, stderr=stderr)
p.wait()
stdout.seek(0)
out = stdout.read()
stderr.seek(0)
err = stderr.read()
if verbose:
if err: print err
if out: print out
return err, out
def comma_or(some_list):
return comma_sep(some_list, " or ")
def comma_and(some_list):
return comma_sep(some_list, " and ")
def comma_sep(some_list, sep):
if isinstance(some_list, (list, tuple)):
# list(some_list) is done to preserve the existing list
some_list = [unicode(s) for s in list(some_list)]
if not some_list:
return ""
elif len(some_list) == 1:
return some_list[0]
else:
some_list = ["'%s'" % s for s in some_list]
return ", ".join(some_list[:-1]) + sep + some_list[-1]
else:
return some_list
def filter_strip_join(some_list, sep):
"""given a list, filter None values, strip spaces and join"""
return (cstr(sep)).join((cstr(a).strip() for a in filter(None, some_list)))
def get_path(*path):
import os
return os.path.join(get_base_path(), *path)
def get_base_path():
import conf
import os
return os.path.dirname(os.path.abspath(conf.__file__))
def get_url(uri=None):
url = get_request_site_address()
if not url or "localhost" in url:
subdomain = webnotes.conn.get_value("Website Settings", "Website Settings",
"subdomain")
if subdomain:
if "http" not in subdomain:
url = "http://" + subdomain
if uri:
import urllib
url = urllib.basejoin(url, uri)
return url
def get_url_to_form(doctype, name, base_url=None, label=None):
if not base_url:
base_url = get_url()
if not label: label = name
return """<a href="%(base_url)s/app.html#!Form/%(doctype)s/%(name)s">%(label)s</a>""" % locals()
def encode_dict(d, encoding="utf-8"):
for key in d:
if isinstance(d[key], basestring) and isinstance(d[key], unicode):
d[key] = d[key].encode(encoding)
return d
def decode_dict(d, encoding="utf-8"):
for key in d:
if isinstance(d[key], basestring) and not isinstance(d[key], unicode):
d[key] = d[key].decode(encoding, "ignore")
return d
import operator
operator_map = {
# startswith
"^": lambda (a, b): (a or "").startswith(b),
# in or not in a list
"in": lambda (a, b): operator.contains(b, a),
"not in": lambda (a, b): not operator.contains(b, a),
# comparison operators
"=": lambda (a, b): operator.eq(a, b),
"!=": lambda (a, b): operator.ne(a, b),
">": lambda (a, b): operator.gt(a, b),
"<": lambda (a, b): operator.lt(a, b),
">=": lambda (a, b): operator.ge(a, b),
"<=": lambda (a, b): operator.le(a, b),
"not None": lambda (a, b): a and True or False,
"None": lambda (a, b): (not a) and True or False
}
def compare(val1, condition, val2):
if condition in operator_map:
return operator_map[condition]((val1, val2))
return False
| |
#!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import collections
import fnmatch
import os
import sys
VALID_TOOLCHAINS = [
'clang-newlib',
'newlib',
'glibc',
'pnacl',
'win',
'linux',
'mac',
]
# 'KEY' : ( <TYPE>, [Accepted Values], <Required?>)
DSC_FORMAT = {
'DISABLE': (bool, [True, False], False),
'SEL_LDR': (bool, [True, False], False),
# Disable this project from being included in the NaCl packaged app.
'DISABLE_PACKAGE': (bool, [True, False], False),
# Don't generate the additional files to allow this project to run as a
# packaged app (i.e. manifest.json, background.js, etc.).
'NO_PACKAGE_FILES': (bool, [True, False], False),
'TOOLS' : (list, VALID_TOOLCHAINS, False),
'CONFIGS' : (list, ['Debug', 'Release'], False),
'PREREQ' : (list, '', False),
'TARGETS' : (list, {
'NAME': (str, '', True),
# main = nexe target
# lib = library target
# so = shared object target, automatically added to NMF
# so-standalone = shared object target, not put into NMF
'TYPE': (str,
['main', 'lib', 'static-lib', 'so', 'so-standalone',
'linker-script'],
True),
'SOURCES': (list, '', True),
'EXTRA_SOURCES': (list, '', False),
'CFLAGS': (list, '', False),
'CFLAGS_GCC': (list, '', False),
'CXXFLAGS': (list, '', False),
'DEFINES': (list, '', False),
'LDFLAGS': (list, '', False),
'INCLUDES': (list, '', False),
'LIBS' : (dict, VALID_TOOLCHAINS, False),
'DEPS' : (list, '', False)
}, False),
'HEADERS': (list, {
'FILES': (list, '', True),
'DEST': (str, '', True),
}, False),
'SEARCH': (list, '', False),
'POST': (str, '', False),
'PRE': (str, '', False),
'DEST': (str, ['getting_started', 'examples/api',
'examples/demo', 'examples/tutorial',
'src', 'tests'], True),
'NAME': (str, '', False),
'DATA': (list, '', False),
'TITLE': (str, '', False),
'GROUP': (str, '', False),
'EXPERIMENTAL': (bool, [True, False], False),
'PERMISSIONS': (list, '', False),
'SOCKET_PERMISSIONS': (list, '', False),
'FILESYSTEM_PERMISSIONS': (list, '', False),
'MULTI_PLATFORM': (bool, [True, False], False),
'MIN_CHROME_VERSION': (str, '', False),
}
class ValidationError(Exception):
pass
def ValidateFormat(src, dsc_format):
# Verify all required keys are there
for key in dsc_format:
exp_type, exp_value, required = dsc_format[key]
if required and key not in src:
raise ValidationError('Missing required key %s.' % key)
# For each provided key, verify it's valid
for key in src:
# Verify the key is known
if key not in dsc_format:
raise ValidationError('Unexpected key %s.' % key)
exp_type, exp_value, required = dsc_format[key]
value = src[key]
# Verify the value is non-empty if required
if required and not value:
raise ValidationError('Expected non-empty value for %s.' % key)
# If the expected type is a dict, but the provided type is a list
# then the list applies to all keys of the dictionary, so we reset
# the expected type and value.
if exp_type is dict:
if type(value) is list:
exp_type = list
exp_value = ''
# Verify the key is of the expected type
if exp_type != type(value):
raise ValidationError('Key %s expects %s not %s.' % (
key, exp_type.__name__.upper(), type(value).__name__.upper()))
# If it's a bool, the expected values are always True or False.
if exp_type is bool:
continue
# If it's a string and there are expected values, make sure it matches
if exp_type is str:
if type(exp_value) is list and exp_value:
if value not in exp_value:
raise ValidationError("Value '%s' not expected for %s." %
(value, key))
continue
# if it's a list, then we need to validate the values
if exp_type is list:
# If we expect a dictionary, then call this recursively
if type(exp_value) is dict:
for val in value:
ValidateFormat(val, exp_value)
continue
# If we expect a list of strings
if type(exp_value) is str:
for val in value:
if type(val) is not str:
raise ValidationError('Value %s in %s is not a string.' %
(val, key))
continue
# if we expect a particular string
if type(exp_value) is list:
for val in value:
if val not in exp_value:
raise ValidationError('Value %s not expected in %s.' %
(val, key))
continue
# if we are expecting a dict, verify the keys are allowed
if exp_type is dict:
print "Expecting dict\n"
for sub in value:
if sub not in exp_value:
raise ValidationError('Sub key %s not expected in %s.' %
(sub, key))
continue
# If we got this far, it's an unexpected type
raise ValidationError('Unexpected type %s for key %s.' %
(str(type(src[key])), key))
def LoadProject(filename):
with open(filename, 'r') as descfile:
try:
desc = eval(descfile.read(), {}, {})
except Exception as e:
raise ValidationError(e)
if desc.get('DISABLE', False):
return None
ValidateFormat(desc, DSC_FORMAT)
desc['FILEPATH'] = os.path.abspath(filename)
desc.setdefault('TOOLS', VALID_TOOLCHAINS)
return desc
def LoadProjectTreeUnfiltered(srcpath):
# Build the tree
out = collections.defaultdict(list)
for root, _, files in os.walk(srcpath):
for filename in files:
if fnmatch.fnmatch(filename, '*.dsc'):
filepath = os.path.join(root, filename)
try:
desc = LoadProject(filepath)
except ValidationError as e:
raise ValidationError("Failed to validate: %s: %s" % (filepath, e))
if desc:
key = desc['DEST']
out[key].append(desc)
return out
def LoadProjectTree(srcpath, include, exclude=None):
out = LoadProjectTreeUnfiltered(srcpath)
return FilterTree(out, MakeDefaultFilterFn(include, exclude))
def GenerateProjects(tree):
for key in tree:
for val in tree[key]:
yield key, val
def FilterTree(tree, filter_fn):
out = collections.defaultdict(list)
for branch, desc in GenerateProjects(tree):
if filter_fn(desc):
out[branch].append(desc)
return out
def MakeDefaultFilterFn(include, exclude):
def DefaultFilterFn(desc):
matches_include = not include or DescMatchesFilter(desc, include)
matches_exclude = exclude and DescMatchesFilter(desc, exclude)
# Exclude list overrides include list.
if matches_exclude:
return False
return matches_include
return DefaultFilterFn
def DescMatchesFilter(desc, filters):
for key, expected in filters.iteritems():
# For any filtered key which is unspecified, assumed False
value = desc.get(key, False)
# If we provide an expected list, match at least one
if type(expected) not in (list, tuple):
expected = set([expected])
if type(value) != list:
value = set([value])
if not set(expected) & set(value):
return False
# If we fall through, then we matched the filters
return True
def PrintProjectTree(tree):
for key in tree:
print key + ':'
for val in tree[key]:
print '\t' + val['NAME']
def main(args):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-e', '--experimental',
help='build experimental examples and libraries', action='store_true')
parser.add_argument('-t', '--toolchain',
help='Build using toolchain. Can be passed more than once.',
action='append')
parser.add_argument('project_root', default='.')
options = parser.parse_args(args)
filters = {}
if options.toolchain:
filters['TOOLS'] = options.toolchain
if not options.experimental:
filters['EXPERIMENTAL'] = False
try:
tree = LoadProjectTree(options.project_root, include=filters)
except ValidationError as e:
sys.stderr.write(str(e) + '\n')
return 1
PrintProjectTree(tree)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| |
import asyncio
import asyncpg
import jinja2
import os
import ujson
from random import randint
from operator import itemgetter
from urllib.parse import parse_qs
async def setup():
global pool
pool = await asyncpg.create_pool(
user=os.getenv('PGUSER', 'benchmarkdbuser'),
password=os.getenv('PGPASS', 'benchmarkdbpass'),
database='hello_world',
host='tfb-database',
port=5432
)
READ_ROW_SQL = 'SELECT "randomnumber" FROM "world" WHERE id = $1'
WRITE_ROW_SQL = 'UPDATE "world" SET "randomnumber"=$1 WHERE id=$2'
ADDITIONAL_ROW = [0, 'Additional fortune added at request time.']
JSON_RESPONSE = {
'type': 'http.response.start',
'status': 200,
'headers': [
[b'content-type', b'application/json'],
]
}
HTML_RESPONSE = {
'type': 'http.response.start',
'status': 200,
'headers': [
[b'content-type', b'text/html; charset=utf-8'],
]
}
PLAINTEXT_RESPONSE = {
'type': 'http.response.start',
'status': 200,
'headers': [
[b'content-type', b'text/plain; charset=utf-8'],
]
}
pool = None
key = itemgetter(1)
json_dumps = ujson.dumps
template = None
path = os.path.join('templates', 'fortune.html')
with open(path, 'r') as template_file:
template_text = template_file.read()
template = jinja2.Template(template_text)
loop = asyncio.get_event_loop()
loop.run_until_complete(setup())
def get_num_queries(scope):
try:
query_string = scope['query_string']
query_count = int(parse_qs(query_string)[b'queries'][0])
except (KeyError, IndexError, ValueError):
return 1
if query_count < 1:
return 1
if query_count > 500:
return 500
return query_count
async def json_serialization(scope, receive, send):
"""
Test type 1: JSON Serialization
"""
content = json_dumps({'message': 'Hello, world!'}).encode('utf-8')
await send(JSON_RESPONSE)
await send({
'type': 'http.response.body',
'body': content,
'more_body': False
})
async def single_database_query(scope, receive, send):
"""
Test type 2: Single database object
"""
row_id = randint(1, 10000)
connection = await pool.acquire()
try:
number = await connection.fetchval(READ_ROW_SQL, row_id)
world = {'id': row_id, 'randomNumber': number}
finally:
await pool.release(connection)
content = json_dumps(world).encode('utf-8')
await send(JSON_RESPONSE)
await send({
'type': 'http.response.body',
'body': content,
'more_body': False
})
async def multiple_database_queries(scope, receive, send):
"""
Test type 3: Multiple database queries
"""
num_queries = get_num_queries(scope)
row_ids = [randint(1, 10000) for _ in range(num_queries)]
worlds = []
connection = await pool.acquire()
try:
statement = await connection.prepare(READ_ROW_SQL)
for row_id in row_ids:
number = await statement.fetchval(row_id)
worlds.append({'id': row_id, 'randomNumber': number})
finally:
await pool.release(connection)
content = json_dumps(worlds).encode('utf-8')
await send(JSON_RESPONSE)
await send({
'type': 'http.response.body',
'body': content,
'more_body': False
})
async def fortunes(scope, receive, send):
"""
Test type 4: Fortunes
"""
connection = await pool.acquire()
try:
fortunes = await connection.fetch('SELECT * FROM Fortune')
finally:
await pool.release(connection)
fortunes.append(ADDITIONAL_ROW)
fortunes.sort(key=key)
content = template.render(fortunes=fortunes).encode('utf-8')
await send(HTML_RESPONSE)
await send({
'type': 'http.response.body',
'body': content,
'more_body': False
})
async def database_updates(scope, receive, send):
"""
Test type 5: Database updates
"""
num_queries = get_num_queries(scope)
updates = [(randint(1, 10000), randint(1, 10000)) for _ in range(num_queries)]
worlds = [{'id': row_id, 'randomNumber': number} for row_id, number in updates]
connection = await pool.acquire()
try:
statement = await connection.prepare(READ_ROW_SQL)
for row_id, _ in updates:
await statement.fetchval(row_id)
await connection.executemany(WRITE_ROW_SQL, updates)
finally:
await pool.release(connection)
content = json_dumps(worlds).encode('utf-8')
await send(JSON_RESPONSE)
await send({
'type': 'http.response.body',
'body': content,
'more_body': False
})
async def plaintext(scope, receive, send):
"""
Test type 6: Plaintext
"""
content = b'Hello, world!'
await send(PLAINTEXT_RESPONSE)
await send({
'type': 'http.response.body',
'body': content,
'more_body': False
})
async def handle_404(scope, receive, send):
content = b'Not found'
await send(PLAINTEXT_RESPONSE)
await send({
'type': 'http.response.body',
'body': content,
'more_body': False
})
routes = {
'/json': json_serialization,
'/db': single_database_query,
'/queries': multiple_database_queries,
'/fortunes': fortunes,
'/updates': database_updates,
'/plaintext': plaintext,
}
async def main(scope, receive, send):
path = scope['path']
handler = routes.get(path, handle_404)
await handler(scope, receive, send)
| |
import json
import pytz
try:
import mock
except ImportError:
from unittest import mock
from datetime import datetime
from django.conf import settings
from django.contrib.auth.models import User
from django.test import TestCase
from rest_framework.authtoken.models import Token
from rest_framework.test import APIClient
from ..models import ReportTaskStatus
class ViewTest(TestCase):
def setUp(self):
self.adminclient = APIClient()
self.normalclient = APIClient()
self.otherclient = APIClient()
# Admin User setup
self.adminusername = 'testadminuser'
self.adminpassword = 'testadminpass'
self.adminuser = User.objects.create_superuser(
self.adminusername,
'testadminuser@example.com',
self.adminpassword)
admintoken = Token.objects.create(user=self.adminuser)
self.admintoken = admintoken.key
self.adminclient.credentials(
HTTP_AUTHORIZATION='Token ' + self.admintoken)
# Normal User setup
self.normalusername = 'testnormaluser'
self.normalpassword = 'testnormalpass'
self.normaluser = User.objects.create_user(
self.normalusername,
'testnormaluser@example.com',
self.normalpassword)
normaltoken = Token.objects.create(user=self.normaluser)
self.normaltoken = normaltoken.key
self.normalclient.credentials(
HTTP_AUTHORIZATION='Token ' + self.normaltoken)
class ReportsViewTest(ViewTest):
def midnight(self, timestamp):
return timestamp.replace(hour=0, minute=0, second=0, microsecond=0,
tzinfo=pytz.timezone(settings.TIME_ZONE))
def test_get_returns_list_of_reports(self):
response = self.normalclient.get('/api/v1/reports/')
self.assertEqual(response.status_code, 200)
self.assertTrue(isinstance(response.data, dict))
self.assertTrue(isinstance(response.data['reports'], dict))
@mock.patch("reports.tasks.detailed_report.generate_report.apply_async")
def test_auth_required(self, mock_generation):
data = {}
# Without authentication
request = self.otherclient.post('/api/v1/reports/', json.dumps(data),
content_type='application/json')
self.assertEqual(request.status_code, 401,
"Authentication should be required.")
# With authenticated
request = self.normalclient.post('/api/v1/reports/', json.dumps(data),
content_type='application/json')
self.assertEqual(request.status_code, 202)
@mock.patch("reports.tasks.detailed_report.generate_report.apply_async")
def test_post_successful(self, mock_generation):
data = {
'start_date': '2016-01-01',
'end_date': '2016-02-01',
'email_to': ['foo@example.com'],
'email_subject': 'The Email Subject'
}
request = self.adminclient.post('/api/v1/reports/',
json.dumps(data),
content_type='application/json')
self.assertEqual(request.status_code, 202)
self.assertEqual(request.data, {"report_generation_requested": True})
task_status = ReportTaskStatus.objects.last()
mock_generation.assert_called_once_with(kwargs={
"start_date": '2016-01-01',
"end_date": '2016-02-01',
"email_recipients": ['foo@example.com'],
"email_sender": settings.DEFAULT_FROM_EMAIL,
"email_subject": 'The Email Subject',
"task_status_id": task_status.id})
self.assertEqual(task_status.status, ReportTaskStatus.PENDING)
def test_response_on_incorrect_date_format(self):
data = {
'start_date': '2016:01:01',
'end_date': '2016:02:01',
'email_to': ['foo@example.com'],
'email_subject': 'The Email Subject'
}
request = self.adminclient.post('/api/v1/reports/',
json.dumps(data),
content_type='application/json')
self.assertEqual(request.status_code, 400)
self.assertEqual(request.data, {
'start_date':
["time data '2016:01:01' does not match format '%Y-%m-%d'"],
'end_date':
["time data '2016:02:01' does not match format '%Y-%m-%d'"]
})
def test_report_task_view(self):
"""
This view should only return the last 10 items.
"""
for i in range(15):
ReportTaskStatus.objects.create(**{
"start_date": self.midnight(datetime.strptime('2016-01-01',
'%Y-%m-%d')),
"end_date": self.midnight(datetime.strptime('2016-02-01',
'%Y-%m-%d')),
"email_subject": 'The Email Subject',
"file_size": 12343,
"status": ReportTaskStatus.PENDING
})
request = self.normalclient.get('/api/v1/reporttasks/')
results = json.loads(request.content.decode('utf8'))['results']
self.assertEqual(len(results), 10)
self.assertEqual(results[0]['status'], 'Pending')
self.assertEqual(results[0]['email_subject'], 'The Email Subject')
self.assertEqual(results[0]['file_size'], 12343)
self.assertEqual(results[0]['start_date'], '2016-01-01 00:00:00+00:00')
self.assertEqual(results[0]['end_date'], '2016-02-01 00:00:00+00:00')
self.assertEqual(request.status_code, 200)
class MSISDNMessagesReportViewTest(ViewTest):
celery_method = ('reports.tasks.msisdn_message_report.'
'generate_msisdn_message_report.apply_async')
@mock.patch(celery_method)
def test_creates_task_status(self, celery_method_patch):
response = self.normalclient.post('/api/v1/reports/msisdn-messages/',
json.dumps({
'start_date': '2017-09-01',
'end_date': '2018-09-01'}),
content_type='application/json')
report_task_statuses = ReportTaskStatus.objects.all()
self.assertEqual(response.status_code, 202)
self.assertEqual(len(report_task_statuses), 1)
self.assertEqual(report_task_statuses.first().start_date, '2017-09-01')
self.assertEqual(report_task_statuses.first().end_date, '2018-09-01')
self.assertEqual(report_task_statuses.first().status, 'P')
@mock.patch(celery_method)
def test_creates_background_task(self, celery_method_patch):
self.normalclient.post('/api/v1/reports/msisdn-messages/',
json.dumps({'start_date': '2017-09-01',
'end_date': '2018-09-01',
'msisdn_list': ['+2345565942365']}),
content_type='application/json')
report_task_status = ReportTaskStatus.objects.first()
celery_method_patch.assert_called_once_with(kwargs={
"start_date": '2017-09-01',
"end_date": '2018-09-01',
'msisdns': ['+2345565942365'],
'task_status_id': report_task_status.id,
'email_recipients': [],
'email_sender': settings.DEFAULT_FROM_EMAIL,
'email_subject': 'HelloMama Generated Report'
})
@mock.patch(celery_method)
def test_forwards_email_details_to_task(self, celery_method_patch):
self.normalclient.post('/api/v1/reports/msisdn-messages/',
json.dumps({'start_date': '2017-09-01',
'end_date': '2018-09-01',
'msisdn_list': ['+2344263256918'],
'email_to': ['foo@example.com'],
'email_from': 'bar@example.com',
'email_subject': 'Cohort report'}),
content_type='application/json')
report_task_status = ReportTaskStatus.objects.first()
celery_method_patch.assert_called_once_with(kwargs={
"start_date": '2017-09-01',
"end_date": '2018-09-01',
'msisdns': ['+2344263256918'],
'task_status_id': report_task_status.id,
'email_recipients': ['foo@example.com'],
'email_sender': 'bar@example.com',
'email_subject': 'Cohort report'
})
def test_raises_400_for_invalid_msisdns(self):
response = self.normalclient.post(
'/api/v1/reports/msisdn-messages/',
json.dumps({'start_date': '2017-09-01', 'end_date': '2018-09-01',
'msisdn_list': ['+2345565']}),
content_type='application/json')
self.assertEqual(response.status_code, 400)
self.assertEqual(response.data, {
'msisdn_list': ["Invalid value for: msisdn_list. Msisdns must "
"only contain digits, be 14 characters long and "
"contain the prefix '+234'"]})
response = self.normalclient.post(
'/api/v1/reports/msisdn-messages/',
json.dumps({'start_date': '2017-09-01', 'end_date': '2018-09-01',
'msisdn_list': ['+1234265556585']}),
content_type='application/json')
self.assertEqual(response.status_code, 400)
self.assertEqual(response.data, {
'msisdn_list': ["Invalid value for: msisdn_list. Msisdns must "
"only contain digits, be 14 characters long and "
"contain the prefix '+234'"]})
response = self.normalclient.post(
'/api/v1/reports/msisdn-messages/',
json.dumps({'start_date': '2017-09-01', 'end_date': '2018-09-01',
'msisdn_list': ['+234sdk83dfs61']}),
content_type='application/json')
self.assertEqual(response.status_code, 400)
self.assertEqual(response.data, {
'msisdn_list': ["Invalid value for: msisdn_list. Msisdns must "
"only contain digits, be 14 characters long and "
"contain the prefix '+234'"]})
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.identity import base
from tempest.common.utils import data_utils
from tempest import test
class BaseInheritsV3Test(base.BaseIdentityV3AdminTest):
@classmethod
def skip_checks(cls):
super(BaseInheritsV3Test, cls).skip_checks()
if not test.is_extension_enabled('OS-INHERIT', 'identity'):
raise cls.skipException("Inherits aren't enabled")
@classmethod
def resource_setup(cls):
super(BaseInheritsV3Test, cls).resource_setup()
u_name = data_utils.rand_name('user-')
u_desc = '%s description' % u_name
u_email = '%s@testmail.tm' % u_name
u_password = data_utils.rand_name('pass-')
cls.domain = cls.domains_client.create_domain(
data_utils.rand_name('domain-'),
description=data_utils.rand_name('domain-desc-'))['domain']
cls.project = cls.projects_client.create_project(
data_utils.rand_name('project-'),
description=data_utils.rand_name('project-desc-'),
domain_id=cls.domain['id'])['project']
cls.group = cls.groups_client.create_group(
name=data_utils.rand_name('group-'), project_id=cls.project['id'],
domain_id=cls.domain['id'])['group']
cls.user = cls.users_client.create_user(
name=u_name, description=u_desc, password=u_password,
email=u_email, project_id=cls.project['id'],
domain_id=cls.domain['id'])['user']
@classmethod
def resource_cleanup(cls):
cls.groups_client.delete_group(cls.group['id'])
cls.users_client.delete_user(cls.user['id'])
cls.projects_client.delete_project(cls.project['id'])
cls.domains_client.update_domain(cls.domain['id'], enabled=False)
cls.domains_client.delete_domain(cls.domain['id'])
super(BaseInheritsV3Test, cls).resource_cleanup()
def _list_assertions(self, body, fetched_role_ids, role_id):
self.assertEqual(len(body), 1)
self.assertIn(role_id, fetched_role_ids)
class InheritsV3TestJSON(BaseInheritsV3Test):
@test.idempotent_id('4e6f0366-97c8-423c-b2be-41eae6ac91c8')
def test_inherit_assign_list_check_revoke_roles_on_domains_user(self):
# Create role
src_role = self.roles_client.create_role(
name=data_utils.rand_name('Role'))['role']
self.addCleanup(self.roles_client.delete_role, src_role['id'])
# Assign role on domains user
self.inherited_roles_client.create_inherited_role_on_domains_user(
self.domain['id'], self.user['id'], src_role['id'])
# list role on domains user
roles = self.inherited_roles_client.\
list_inherited_project_role_for_user_on_domain(
self.domain['id'], self.user['id'])['roles']
fetched_role_ids = [i['id'] for i in roles]
self._list_assertions(roles, fetched_role_ids,
src_role['id'])
# Check role on domains user
(self.inherited_roles_client.
check_user_inherited_project_role_on_domain(
self.domain['id'], self.user['id'], src_role['id']))
# Revoke role from domains user.
self.inherited_roles_client.delete_inherited_role_from_user_on_domain(
self.domain['id'], self.user['id'], src_role['id'])
@test.idempotent_id('c7a8dda2-be50-4fb4-9a9c-e830771078b1')
def test_inherit_assign_list_check_revoke_roles_on_domains_group(self):
# Create role
src_role = self.roles_client.create_role(
name=data_utils.rand_name('Role'))['role']
self.addCleanup(self.roles_client.delete_role, src_role['id'])
# Assign role on domains group
self.inherited_roles_client.create_inherited_role_on_domains_group(
self.domain['id'], self.group['id'], src_role['id'])
# List role on domains group
roles = self.inherited_roles_client.\
list_inherited_project_role_for_group_on_domain(
self.domain['id'], self.group['id'])['roles']
fetched_role_ids = [i['id'] for i in roles]
self._list_assertions(roles, fetched_role_ids,
src_role['id'])
# Check role on domains group
(self.inherited_roles_client.
check_group_inherited_project_role_on_domain(
self.domain['id'], self.group['id'], src_role['id']))
# Revoke role from domains group
self.inherited_roles_client.delete_inherited_role_from_group_on_domain(
self.domain['id'], self.group['id'], src_role['id'])
@test.idempotent_id('18b70e45-7687-4b72-8277-b8f1a47d7591')
def test_inherit_assign_check_revoke_roles_on_projects_user(self):
# Create role
src_role = self.roles_client.create_role(
name=data_utils.rand_name('Role'))['role']
self.addCleanup(self.roles_client.delete_role, src_role['id'])
# Assign role on projects user
self.inherited_roles_client.create_inherited_role_on_projects_user(
self.project['id'], self.user['id'], src_role['id'])
# Check role on projects user
(self.inherited_roles_client.
check_user_has_flag_on_inherited_to_project(
self.project['id'], self.user['id'], src_role['id']))
# Revoke role from projects user
self.inherited_roles_client.delete_inherited_role_from_user_on_project(
self.project['id'], self.user['id'], src_role['id'])
@test.idempotent_id('26021436-d5a4-4256-943c-ded01e0d4b45')
def test_inherit_assign_check_revoke_roles_on_projects_group(self):
# Create role
src_role = self.roles_client.create_role(
name=data_utils.rand_name('Role'))['role']
self.addCleanup(self.roles_client.delete_role, src_role['id'])
# Assign role on projects group
self.inherited_roles_client.create_inherited_role_on_projects_group(
self.project['id'], self.group['id'], src_role['id'])
# Check role on projects group
(self.inherited_roles_client.
check_group_has_flag_on_inherited_to_project(
self.project['id'], self.group['id'], src_role['id']))
# Revoke role from projects group
(self.inherited_roles_client.
delete_inherited_role_from_group_on_project(
self.project['id'], self.group['id'], src_role['id']))
@test.idempotent_id('3acf666e-5354-42ac-8e17-8b68893bcd36')
def test_inherit_assign_list_revoke_user_roles_on_domain(self):
# Create role
src_role = self.roles_client.create_role(
name=data_utils.rand_name('Role'))['role']
self.addCleanup(self.roles_client.delete_role, src_role['id'])
# Create a project hierarchy
leaf_project_name = data_utils.rand_name('project')
leaf_project = self.projects_client.create_project(
leaf_project_name, domain_id=self.domain['id'],
parent_id=self.project['id'])['project']
self.addCleanup(
self.projects_client.delete_project, leaf_project['id'])
# Assign role on domain
self.inherited_roles_client.create_inherited_role_on_domains_user(
self.domain['id'], self.user['id'], src_role['id'])
# List "effective" role assignments from user on the parent project
assignments = (
self.role_assignments.list_user_project_effective_assignments(
self.project['id'], self.user['id']))['role_assignments']
self.assertNotEmpty(assignments)
# List "effective" role assignments from user on the leaf project
assignments = (
self.role_assignments.list_user_project_effective_assignments(
leaf_project['id'], self.user['id']))['role_assignments']
self.assertNotEmpty(assignments)
# Revoke role from domain
self.inherited_roles_client.delete_inherited_role_from_user_on_domain(
self.domain['id'], self.user['id'], src_role['id'])
# List "effective" role assignments from user on the parent project
# should return an empty list
assignments = (
self.role_assignments.list_user_project_effective_assignments(
self.project['id'], self.user['id']))['role_assignments']
self.assertEmpty(assignments)
# List "effective" role assignments from user on the leaf project
# should return an empty list
assignments = (
self.role_assignments.list_user_project_effective_assignments(
leaf_project['id'], self.user['id']))['role_assignments']
self.assertEmpty(assignments)
@test.idempotent_id('9f02ccd9-9b57-46b4-8f77-dd5a736f3a06')
def test_inherit_assign_list_revoke_user_roles_on_project_tree(self):
# Create role
src_role = self.roles_client.create_role(
name=data_utils.rand_name('Role'))['role']
self.addCleanup(self.roles_client.delete_role, src_role['id'])
# Create a project hierarchy
leaf_project_name = data_utils.rand_name('project')
leaf_project = self.projects_client.create_project(
leaf_project_name, domain_id=self.domain['id'],
parent_id=self.project['id'])['project']
self.addCleanup(
self.projects_client.delete_project, leaf_project['id'])
# Assign role on parent project
self.inherited_roles_client.create_inherited_role_on_projects_user(
self.project['id'], self.user['id'], src_role['id'])
# List "effective" role assignments from user on the leaf project
assignments = (
self.role_assignments.list_user_project_effective_assignments(
leaf_project['id'], self.user['id']))['role_assignments']
self.assertNotEmpty(assignments)
# Revoke role from parent project
self.inherited_roles_client.delete_inherited_role_from_user_on_project(
self.project['id'], self.user['id'], src_role['id'])
# List "effective" role assignments from user on the leaf project
# should return an empty list
assignments = (
self.role_assignments.list_user_project_effective_assignments(
leaf_project['id'], self.user['id']))['role_assignments']
self.assertEmpty(assignments)
| |
# -*- coding: utf-8 -*-
"""Parsers for MacOS fseventsd files."""
from __future__ import unicode_literals
from dfdatetime import semantic_time as dfdatetime_semantic_time
from dfvfs.resolver import resolver as path_spec_resolver
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import definitions
from plaso.lib import errors
from plaso.lib import specification
from plaso.parsers import dtfabric_parser
from plaso.parsers import manager
class FseventsdEventData(events.EventData):
"""MacOS file system event (fseventsd) event data
Attributes:
event_identifier (int): the record event identifier.
flags (int): flags stored in the record.
node_identifier (int): file system node identifier related to the file
system event.
path (str): path recorded in the fseventsd record.
"""
DATA_TYPE = 'macos:fseventsd:record'
def __init__(self):
"""Initializes an Fseventsd event data."""
super(FseventsdEventData, self).__init__(data_type=self.DATA_TYPE)
self.event_identifier = None
self.flags = None
self.node_identifier = None
self.path = None
class FseventsdParser(dtfabric_parser.DtFabricBaseParser):
"""Parser for fseventsd files.
This parser supports both version 1 and version 2 fseventsd files.
Refer to http://nicoleibrahim.com/apple-fsevents-forensics/ for details.
"""
NAME = 'fseventsd'
DATA_FORMAT = 'MacOS File System Events Disk Log Stream (fseventsd) file'
# The version 1 format was used in Mac OS X 10.5 (Leopard) through macOS 10.12
# (Sierra).
_DLS_V1_SIGNATURE = b'1SLD'
# The version 2 format was introduced in MacOS High Sierra (10.13).
_DLS_V2_SIGNATURE = b'2SLD'
_DEFINITION_FILE = 'fseventsd.yaml'
@classmethod
def GetFormatSpecification(cls):
"""Retrieves the format specification.
Returns:
FormatSpecification: format specification.
"""
format_specification = specification.FormatSpecification(cls.NAME)
format_specification.AddNewSignature(cls._DLS_V1_SIGNATURE, offset=0)
format_specification.AddNewSignature(cls._DLS_V2_SIGNATURE, offset=0)
return format_specification
def _ParseDLSPageHeader(self, file_object, page_offset):
"""Parses a DLS page header from a file-like object.
Args:
file_object (file): file-like object to read the header from.
page_offset (int): offset of the start of the page header, relative
to the start of the file.
Returns:
tuple: containing:
dls_page_header: parsed record structure.
int: header size.
Raises:
ParseError: when the header cannot be parsed.
"""
page_header_map = self._GetDataTypeMap('dls_page_header')
try:
page_header, page_size = self._ReadStructureFromFileObject(
file_object, page_offset, page_header_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError(
'Unable to parse page header at offset: 0x{0:08x} '
'with error: {1!s}'.format(page_offset, exception))
return page_header, page_size
def _BuildEventData(self, record):
"""Builds an FseventsdData object from a parsed structure.
Args:
record (dls_record_v1|dls_record_v2): parsed record structure.
Returns:
FseventsdEventData: event data attribute container.
"""
event_data = FseventsdEventData()
event_data.path = record.path
event_data.flags = record.event_flags
event_data.event_identifier = record.event_identifier
# Node identifier is only set in DLS V2 records.
event_data.node_identifier = getattr(record, 'node_identifier', None)
return event_data
def _GetParentModificationTime(self, gzip_file_entry):
"""Retrieves the modification time of the file entry's parent file.
Note that this retrieves the time from the file entry of the parent of the
gzip file entry's path spec, which is different from trying to retrieve it
from the gzip file entry's parent file entry.
It would be preferable to retrieve the modification time from the metadata
in the gzip file itself, but it appears to not be set when the file is
written by fseventsd.
Args:
gzip_file_entry (dfvfs.FileEntry): file entry of the gzip file containing
the fseventsd data.
Returns:
dfdatetime.DateTimeValues: parent modification time, or None if not
available.
"""
parent_file_entry = path_spec_resolver.Resolver.OpenFileEntry(
gzip_file_entry.path_spec.parent)
if not parent_file_entry:
return None
return parent_file_entry.modification_time
def ParseFileObject(self, parser_mediator, file_object):
"""Parses an fseventsd file.
Args:
parser_mediator (ParserMediator): parser mediator.
file_object (dfvfs.FileIO): a file-like object.
Raises:
UnableToParseFile: when the header cannot be parsed.
"""
page_header_map = self._GetDataTypeMap('dls_page_header')
try:
page_header, file_offset = self._ReadStructureFromFileObject(
file_object, 0, page_header_map)
except (ValueError, errors.ParseError) as exception:
raise errors.UnableToParseFile(
'Unable to parse page header with error: {0!s}'.format(
exception))
current_page_end = page_header.page_size
file_entry = parser_mediator.GetFileEntry()
date_time = self._GetParentModificationTime(file_entry)
# TODO: Change this to use a more representative time definition (time span)
# when https://github.com/log2timeline/dfdatetime/issues/65 is resolved.
if date_time:
timestamp_description = definitions.TIME_DESCRIPTION_RECORDED
else:
date_time = dfdatetime_semantic_time.NotSet()
timestamp_description = definitions.TIME_DESCRIPTION_NOT_A_TIME
event = time_events.DateTimeValuesEvent(date_time, timestamp_description)
file_size = file_object.get_size()
while file_offset < file_size:
if file_offset >= current_page_end:
try:
page_header, header_size = self._ParseDLSPageHeader(
file_object, file_offset)
except errors.ParseError as exception:
parser_mediator.ProduceExtractionWarning(
'Unable to parse page header with error: {0!s}'.format(
exception))
break
current_page_end += page_header.page_size
file_offset += header_size
continue
if page_header.signature == self._DLS_V1_SIGNATURE:
record_map = self._GetDataTypeMap('dls_record_v1')
else:
record_map = self._GetDataTypeMap('dls_record_v2')
try:
record, record_length = self._ReadStructureFromFileObject(
file_object, file_offset, record_map)
file_offset += record_length
except (ValueError, errors.ParseError) as exception:
parser_mediator.ProduceExtractionWarning(
'Unable to parse page record with error: {0!s}'.format(
exception))
break
event_data = self._BuildEventData(record)
parser_mediator.ProduceEventWithEventData(event, event_data)
manager.ParsersManager.RegisterParser(FseventsdParser)
| |
# Copyright 2016-2017 Capital One Services, LLC
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from .common import BaseTest
class Kinesis(BaseTest):
def test_stream_query(self):
factory = self.replay_flight_data("test_kinesis_stream_query")
p = self.load_policy(
{
"name": "kstream",
"resource": "kinesis",
"filters": [
{"type": "value", "value_type": "size", "value": 3, "key": "Shards"}
],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["Tags"], [{"Key": "Origin", "Value": "home"}])
self.assertEqual(resources[0]["StreamStatus"], "ACTIVE")
def test_stream_delete(self):
factory = self.replay_flight_data("test_kinesis_stream_delete")
p = self.load_policy(
{
"name": "kstream",
"resource": "kinesis",
"filters": [{"StreamName": "sock-drawer"}],
"actions": ["delete"],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
stream = factory().client("kinesis").describe_stream(StreamName="sock-drawer")[
"StreamDescription"
]
self.assertEqual(stream["StreamStatus"], "DELETING")
def test_stream_encrypt(self):
factory = self.replay_flight_data("test_kinesis_encrypt")
p = self.load_policy(
{
"name": "kstream",
"resource": "kinesis",
"filters": [{"StreamName": "sock-drawer"}],
"actions": [{"type": "encrypt", "key": "aws/kinesis"}],
},
session_factory=factory,
)
p.run()
stream = factory().client("kinesis").describe_stream(StreamName="sock-drawer")[
"StreamDescription"
]
self.assertEqual(stream["EncryptionType"], "KMS")
def test_hose_query(self):
factory = self.replay_flight_data("test_kinesis_hose_query")
p = self.load_policy(
{
"name": "khole",
"resource": "firehose",
"filters": [{"DeliveryStreamName": "sock-index-hose"}],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["DeliveryStreamStatus"], "ACTIVE")
def test_firehose_delete(self):
factory = self.replay_flight_data("test_kinesis_hose_delete")
p = self.load_policy(
{
"name": "khole",
"resource": "firehose",
"filters": [{"DeliveryStreamName": "sock-index-hose"}],
"actions": ["delete"]
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(
factory().client("firehose").describe_delivery_stream(
DeliveryStreamName="sock-index-hose"
)[
"DeliveryStreamDescription"
][
"DeliveryStreamStatus"
],
"DELETING",
)
def test_firehose_extended_s3_encrypt_s3_destination(self):
factory = self.replay_flight_data("test_firehose_ext_s3_encrypt_s3_destination")
p = self.load_policy(
{
"name": "khole",
"resource": "firehose",
"filters": [{"type": "value",
"key": "Destinations[0].S3DestinationDescription.EncryptionConfiguration.NoEncryptionConfig", # noqa: E501
"value": "present"}],
"actions": [{"type": "encrypt-s3-destination",
"key_arn": "arn:aws:kms:us-east-1:123456789:alias/aws/s3"}],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
s = factory().client("firehose").describe_delivery_stream(
DeliveryStreamName="firehose-s3"
)['DeliveryStreamDescription']['Destinations'][0]
assert 'KMSEncryptionConfig' in s['S3DestinationDescription']['EncryptionConfiguration'].keys() # noqa: E501
def test_firehose_splunk_encrypt_s3_destination(self):
factory = self.replay_flight_data("test_firehose_splunk_encrypt_s3_destination")
p = self.load_policy(
{
"name": "khole",
"resource": "firehose",
"filters": [{"type": "value",
"key": "Destinations[0].SplunkDestinationDescription.S3DestinationDescription.EncryptionConfiguration.NoEncryptionConfig", # noqa: E501
"value": "present"}],
"actions": [{"type": "encrypt-s3-destination",
"key_arn": "arn:aws:kms:us-east-1:123456789:alias/aws/s3"}],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
s = factory().client("firehose").describe_delivery_stream(
DeliveryStreamName="firehose-splunk"
)['DeliveryStreamDescription']['Destinations'][0]['SplunkDestinationDescription']
assert 'KMSEncryptionConfig' in \
s['S3DestinationDescription']['EncryptionConfiguration'].keys()
def test_firehose_elasticsearch_encrypt_s3_destination(self):
factory = self.replay_flight_data("test_firehose_elasticsearch_encrypt_s3_destination")
p = self.load_policy(
{
"name": "khole",
"resource": "firehose",
"filters": [{"type": "value",
"key": "Destinations[0].ElasticsearchDestinationDescription.S3DestinationDescription.EncryptionConfiguration.NoEncryptionConfig", # noqa: E501
"value": "present"}],
"actions": [{"type": "encrypt-s3-destination",
"key_arn": "arn:aws:kms:us-east-1:123456789:alias/aws/s3"}],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
s = factory().client("firehose").describe_delivery_stream(
DeliveryStreamName="firehose-splunk"
)['DeliveryStreamDescription']['Destinations'][0]['ElasticsearchDestinationDescription']
assert 'KMSEncryptionConfig' in \
s['S3DestinationDescription']['EncryptionConfiguration'].keys()
def test_firehose_redshift_encrypt_s3_destination(self):
factory = self.replay_flight_data("test_firehose_redshift_encrypt_s3_destination")
p = self.load_policy(
{
"name": "khole",
"resource": "firehose",
"filters": [{"type": "value",
"key": "Destinations[0].RedshiftDestinationDescription.S3DestinationDescription.EncryptionConfiguration.NoEncryptionConfig", # noqa: E501
"value": "present"}],
"actions": [{"type": "encrypt-s3-destination",
"key_arn": "arn:aws:kms:us-east-1:123456789:alias/aws/s3"}],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
s = factory().client("firehose").describe_delivery_stream(
DeliveryStreamName="firehose-redshift"
)['DeliveryStreamDescription']['Destinations'][0]['RedshiftDestinationDescription']
assert 'KMSEncryptionConfig' in \
s['S3DestinationDescription']['EncryptionConfiguration'].keys()
def test_app_query(self):
factory = self.replay_flight_data("test_kinesis_analytics_query")
p = self.load_policy(
{
"name": "kapp",
"resource": "kinesis-analytics",
"filters": [{"ApplicationStatus": "RUNNING"}],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["ApplicationName"], "sock-app")
def test_app_delete(self):
factory = self.replay_flight_data("test_kinesis_analytics_delete")
p = self.load_policy(
{
"name": "kapp",
"resource": "kinesis-analytics",
"filters": [{"ApplicationName": "sock-app"}],
"actions": ["delete"],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(
factory().client("kinesisanalytics").describe_application(
ApplicationName="sock-app"
)[
"ApplicationDetail"
][
"ApplicationStatus"
],
"DELETING",
)
| |
from flask import Flask, request, abort, jsonify
from flask.ext.cors import CORS
from werkzeug.contrib.fixers import ProxyFix
from Models import *
import peewee
from uuid import uuid4
import hashlib
from nocache import nocache
from GeneralApiException import GeneralApiException
import UserApi
from AuthenticationApi import *
import SetUp
from SearchApi import youtubeSearch
from RegexApi import *
from RequestsApi import *
from MopidyApi import *
from time import sleep
from KarmaApi import *
from SpamApi import *
from ServiceApi import *
from BackupApi import *
from FavoritesApi import *
##################
## Server SetUp ##
##################
app = Flask(__name__)
app.wsgi_app = ProxyFix(app.wsgi_app)
CORS(app, headers=['Content-Type, Authorization'])
app.hasSetConsume = False # a bool to set consome songs the first time an add song request is made
####################
## Errror Handler ##
####################
@app.errorhandler(GeneralApiException)
def handle_invalid_usage(error):
response = jsonify(error.to_dict())
response.status_code = 200
return response
###########
## Users ##
###########
@app.route('/user/<string:key>')
@nocache
def getUser(key):
return UserApi.getUser(key)
@app.route('/users/<int:page>')
@nocache
@requireAdmin
def getUsers(page):
return UserApi.getUsers(str(page))
@app.route('/user', methods=["POST"])
@nocache
@requireAdmin
def CreateUser():
"""
{
"username":"jdoe"
"password":"boi",
"firstName":"john",
"lastName": "doe",
"email": "jdoe@jdoe.com",
}
Requires admin authentication
"""
return UserApi.createUser(request.json)
@app.route('/user/<string:key>/edit', methods=["POST"])
@requireAuth
@nocache
def EditUser(key):
"""
Requires authentication
"""
return UserApi.editUser(key, request.json, request.headers)
@app.route('/users/count', methods=["GET"])
@nocache
@requireAdmin
def CountUsers():
"""
Requires admin authentication
"""
return UserApi.countUsers()
@app.route('/karma/<string:key>', methods=["GET"])
@nocache
@requireAuth
def GetUserKarma(key):
"""
Requires authentication
"""
return UserApi.getUserKarma(key)
@app.route('/karma/<string:key>', methods=["POST"])
@nocache
@requireAdmin
def SetUserKarma(key):
"""
Requires authentication
"""
return UserApi.setUserKarma(key, request.json)
@app.route('/karma/<string:key>/up', methods=["POST"])
@nocache
@requireAuth
def UpVoteUser(key):
"""
Requires authentication
"""
return UserApi.upVote(key)
@app.route('/karma/<string:key>/down', methods=["POST"])
@nocache
@requireAuth
def DownVoteUser(key):
"""
Requires authentication
"""
return UserApi.downVote(key)
@app.route('/user/<string:key>/ban', methods=["GET"])
@nocache
@requireAdmin
def BanUser(key):
"""
Requires authentication
"""
return UserApi.banUser(key)
@app.route('/user/<string:key>/unban', methods=["GET"])
@nocache
@requireAdmin
def UnbanUser(key):
"""
Requires authentication
"""
return UserApi.unbanUser(key)
####################
## Authentication ##
####################
@app.route('/authenticate', methods=["POST"])
@nocache
def Authentication():
"""
{
"username":"jdoe",
"password":"boi",
}
"""
return authentication(request.json)
@app.route('/authenticate/verify', methods=["POST"])
@nocache
def VerifyToken():
"""
{
Verifies that a user token is valid.
Example Call:
"token": "83f72e63-2e9a-4bba-9b1f-f386f0c633c7"
}
"""
return jsonify({'result': validateAuthToken(request.json["token"], False)})
@app.route('/authenticate/verify/admin', methods=["POST"])
@nocache
def VerifyAdminToken():
"""
{
Verifies that a user token is a valid admin token.
Example Call:
"token": "83f72e63-2e9a-4bba-9b1f-f386f0c633c7"
}
"""
return jsonify({'result': validateAuthToken(request.json["token"], True)})
############
## Search ##
############
@app.route('/search/<string:query>', methods=["GET"])
@nocache
@requireAuth
def Search(query):
"""
Requires authentication
"""
return youtubeSearch(query)
#############
## Regexes ##
#############
@app.route('/regex/<string:page>', methods=["GET"])
@nocache
@requireAdmin
def GetRegex(page):
"""
Requires authentication
"""
return getRegex(page)
@app.route('/regex', methods=["POST"])
@nocache
@requireAdmin
def AddRegex():
"""
Example Request Object:
{
"pattern":"a*b*c*"
}
Requires admin authentication
"""
regex = addRegex(request.json["pattern"])
return regex
@app.route('/regex/<int:key>', methods=["DELETE"])
@nocache
@requireAdmin
def RemoveRegex(key):
"""
Example Request Objexts:
{
"key":"1234"
}
Require admin authentication
"""
return removeRegex(key)
@app.route('/regex/count', methods=["GET"])
@nocache
@requireAdmin
def CountRegexes():
"""
Require admin authentication
"""
return countRegexes()
################
## Mopidy API ##
################
@app.route('/playback/add', methods=["POST"])
@nocache
@requireAuth #<- temp permissions should requireAdmin
def AddSong():
"""
Requires Auth
"""
song = request.json['song']
if not app.hasSetConsume:
setConsume()
app.hasSetConsume = True
return addSong(song)
@app.route('/playback/play', methods=["GET"])
@nocache
#@requireAdmin <- permissions for final version
@requireAuth #<- temp permissions
def PlaySong():
"""
Requires Admin Authentication
"""
return playSong()
@app.route('/playback/pause', methods=["GET"])
@nocache
#@requireAdmin
def PauseSong():
"""
Requires Admin Authentication
"""
return pauseSong()
@app.route('/playback/stop', methods=["GET"])
@nocache
@requireAdmin
def StopSong():
"""
Requires Admin Authentication
"""
return stopSong()
@app.route('/playback/next', methods=["GET"])
@nocache
@requireAdmin
def NextSong():
"""
Requires Admin Authenciation
"""
return nextSong()
@app.route('/playback/clear', methods=["GET"])
@nocache
@requireAdmin
def ClearSongs():
"""
Requires Admin Authentication
"""
return clearSongs()
@app.route('/playback/list', methods=["GET"])
@nocache
@requireAdmin
def GetTracks():
"""
Require Admin Authentication
"""
return getTracks()
@app.route('/playback/state', methods=["GET"])
@nocache
#@requireAdmin
def GetState():
"""
Require Admin Authentication
"""
return getState()
@app.route('/playback/consume', methods=["GET"])
@nocache
@requireAdmin
def SetConsume():
"""
Require Admin Authentication
"""
return jsonify({"result": "null"})
@app.route('/volume/up', methods=["GET"])
@nocache
#@requireAdmin
def IncreaseVolume():
"""
Require Admin Authentication
"""
return increaseVolume()
@app.route('/volume/down', methods=["GET"])
@nocache
#@requireAdmin
def DecreaseVolume():
"""
Require Admin Authentication
"""
return decreaseVolume()
@app.route('/volume', methods=["GET"])
@nocache
#@requireAdmin
def GetVolume():
"""
Require Admin Authentication
"""
return jsonify(getVolume())
@app.route('/volume', methods=["POST"])
@nocache
#@requireAdmin
def SetVolume():
"""
Require Admin Authentication
"""
return jsonify(setVolume(request.json["key"]))
##################
## Requests Api ##
##################
@app.route('/queue/current', methods=["GET"])
@nocache
def GetCurrentRequest():
return getCurrentRequest()
@app.route('/queue/current', methods=["POST"])
@nocache
@requireAdmin
def SetCurrentRequest():
"""
Sets the currently playing song
POST Request:
{
"key": "<request key>"
}
"""
return setCurrentRequest(request.json)
@app.route("/queue/current", methods=["DELETE"])
@nocache
@requireAdmin
def DeleteCurrentRequest():
"""
Deletes the currently playing song
"""
return deleteCurrentRequest()
@app.route('/queue/<int:page>', methods=["GET"])
@nocache
def GetRequests(page):
return getRequests(page)
@app.route('/queue/<int:key>', methods=["DELETE"])
@nocache
@requireAdmin
def RemoveRequest(key):
"""
Require Admin Authentication
"""
return removeRequest(key)
@app.route('/queue', methods=["POST"])
@nocache
@requireAuth
def AddRequest():
"""
Require Authentication
"""
return requestSong(request.json, request.headers)
@app.route('/queue/count', methods=["GET"])
@nocache
def CountRequests():
return countRequests()
###############
## Karma Api ##
###############
@app.route('/karma/reset', methods=["GET"])
@nocache
@requireAdmin
def ResetCurrentSongKarmaTrackers():
return resetCurrentSongKarmaTrackers()
#################
## Service Api ##
#################
@app.route('/service/skip', methods=["GET"])
@nocache
@requireAdmin
def ServiceSkipSong():
return serviceSkipSong()
@app.route('/service', methods=["GET"])
@nocache
@requireAdmin
def GetServiceCommands():
return getServiceCommands()
@app.route('/service/<int:key>', methods=["DELETE"])
@nocache
@requireAdmin
def RemoveServiceCommand(key):
return removeServiceCommand(key)
##############
## Spam Api ##
##############
@app.route('/spam/reset', methods=["GET"])
@nocache
@requireAdmin
def ResetCurrentSongSpamTrackers():
return resetCurrentSongSpamTrackers()
###################
## Favorites Api ##
###################
@app.route('/favorites/<int:page>', methods=["GET"])
@nocache
@requireAuth
def GetFavorites(page):
return getFavorites(page, request.headers)
@app.route('/favorites', methods=["POST"])
@nocache
@requireAuth
def AddFavorite():
return addFavorite(request.json, request.headers)
@app.route('/favorites/<int:key>', methods=["DELETE"])
@nocache
@requireAuth
def RemoveFavorite(key):
return removeFavorite(key)
@app.route('/favorites/count', methods=["GET"])
@nocache
@requireAuth
def CountFavorites():
return countFavorites(request.headers)
####################
## Build Database ##
####################
@app.route('/buildDb')
@nocache
#@requireAdmin
def BuildDb():
"""
Requires Admin Authentication
"""
SetUp.main()
return "Database rebuilt"
###################
## Backup Models ##
###################
@app.route('/exportDb')
@nocache
#@requireAdmin
def BackupDb():
"""
Requires Admin Authentication
"""
backupDb()
return "Exported database"
###################
## Import Models ##
###################
@app.route('/importDb')
@nocache
#@requireAdmin
def ImportDb():
"""
Requires Admin Authentication
"""
return importDb()
if __name__ == "__main__":
app.run(host='0.0.0.0', debug=True)
| |
"""Simple XML-RPC Server.
This module can be used to create simple XML-RPC servers
by creating a server and either installing functions, a
class instance, or by extending the SimpleXMLRPCServer
class.
It can also be used to handle XML-RPC requests in a CGI
environment using CGIXMLRPCRequestHandler.
A list of possible usage patterns follows:
1. Install functions:
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_function(pow)
server.register_function(lambda x,y: x+y, 'add')
server.serve_forever()
2. Install an instance:
class MyFuncs:
def __init__(self):
# make all of the string functions available through
# string.func_name
import string
self.string = string
def _listMethods(self):
# implement this method so that system.listMethods
# knows to advertise the strings methods
return list_public_methods(self) + \
['string.' + method for method in list_public_methods(self.string)]
def pow(self, x, y): return pow(x, y)
def add(self, x, y) : return x + y
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_introspection_functions()
server.register_instance(MyFuncs())
server.serve_forever()
3. Install an instance with custom dispatch method:
class Math:
def _listMethods(self):
# this method must be present for system.listMethods
# to work
return ['add', 'pow']
def _methodHelp(self, method):
# this method must be present for system.methodHelp
# to work
if method == 'add':
return "add(2,3) => 5"
elif method == 'pow':
return "pow(x, y[, z]) => number"
else:
# By convention, return empty
# string if no help is available
return ""
def _dispatch(self, method, params):
if method == 'pow':
return pow(*params)
elif method == 'add':
return params[0] + params[1]
else:
raise 'bad method'
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_introspection_functions()
server.register_instance(Math())
server.serve_forever()
4. Subclass SimpleXMLRPCServer:
class MathServer(SimpleXMLRPCServer):
def _dispatch(self, method, params):
try:
# We are forcing the 'export_' prefix on methods that are
# callable through XML-RPC to prevent potential security
# problems
func = getattr(self, 'export_' + method)
except AttributeError:
raise Exception('method "%s" is not supported' % method)
else:
return func(*params)
def export_add(self, x, y):
return x + y
server = MathServer(("localhost", 8000))
server.serve_forever()
5. CGI script:
server = CGIXMLRPCRequestHandler()
server.register_function(pow)
server.handle_request()
"""
# Written by Brian Quinlan (brian@sweetapp.com).
# Based on code written by Fredrik Lundh.
import xmlrpclib
from xmlrpclib import Fault
import SocketServer
import BaseHTTPServer
import sys
import os
import traceback
import re
try:
import fcntl
except ImportError:
fcntl = None
def resolve_dotted_attribute(obj, attr, allow_dotted_names=True):
"""resolve_dotted_attribute(a, 'b.c.d') => a.b.c.d
Resolves a dotted attribute name to an object. Raises
an AttributeError if any attribute in the chain starts with a '_'.
If the optional allow_dotted_names argument is false, dots are not
supported and this function operates similar to getattr(obj, attr).
"""
if allow_dotted_names:
attrs = attr.split('.')
else:
attrs = [attr]
for i in attrs:
if i.startswith('_'):
raise AttributeError(
'attempt to access private attribute "%s"' % i
)
else:
obj = getattr(obj,i)
return obj
def list_public_methods(obj):
"""Returns a list of attribute strings, found in the specified
object, which represent callable attributes"""
return [member for member in dir(obj)
if not member.startswith('_') and
hasattr(getattr(obj, member), '__call__')]
def remove_duplicates(lst):
"""remove_duplicates([2,2,2,1,3,3]) => [3,1,2]
Returns a copy of a list without duplicates. Every list
item must be hashable and the order of the items in the
resulting list is not defined.
"""
u = {}
for x in lst:
u[x] = 1
return u.keys()
class SimpleXMLRPCDispatcher:
"""Mix-in class that dispatches XML-RPC requests.
This class is used to register XML-RPC method handlers
and then to dispatch them. This class doesn't need to be
instanced directly when used by SimpleXMLRPCServer but it
can be instanced when used by the MultiPathXMLRPCServer.
"""
def __init__(self, allow_none=False, encoding=None):
self.funcs = {}
self.instance = None
self.allow_none = allow_none
self.encoding = encoding
def register_instance(self, instance, allow_dotted_names=False):
"""Registers an instance to respond to XML-RPC requests.
Only one instance can be installed at a time.
If the registered instance has a _dispatch method then that
method will be called with the name of the XML-RPC method and
its parameters as a tuple
e.g. instance._dispatch('add',(2,3))
If the registered instance does not have a _dispatch method
then the instance will be searched to find a matching method
and, if found, will be called. Methods beginning with an '_'
are considered private and will not be called by
SimpleXMLRPCServer.
If a registered function matches a XML-RPC request, then it
will be called instead of the registered instance.
If the optional allow_dotted_names argument is true and the
instance does not have a _dispatch method, method names
containing dots are supported and resolved, as long as none of
the name segments start with an '_'.
*** SECURITY WARNING: ***
Enabling the allow_dotted_names options allows intruders
to access your module's global variables and may allow
intruders to execute arbitrary code on your machine. Only
use this option on a secure, closed network.
"""
self.instance = instance
self.allow_dotted_names = allow_dotted_names
def register_function(self, function, name = None):
"""Registers a function to respond to XML-RPC requests.
The optional name argument can be used to set a Unicode name
for the function.
"""
if name is None:
name = function.__name__
self.funcs[name] = function
def register_introspection_functions(self):
"""Registers the XML-RPC introspection methods in the system
namespace.
see http://xmlrpc.usefulinc.com/doc/reserved.html
"""
self.funcs.update({'system.listMethods' : self.system_listMethods,
'system.methodSignature' : self.system_methodSignature,
'system.methodHelp' : self.system_methodHelp})
def register_multicall_functions(self):
"""Registers the XML-RPC multicall method in the system
namespace.
see http://www.xmlrpc.com/discuss/msgReader$1208"""
self.funcs.update({'system.multicall' : self.system_multicall})
def _marshaled_dispatch(self, data, dispatch_method = None, path = None):
"""Dispatches an XML-RPC method from marshalled (XML) data.
XML-RPC methods are dispatched from the marshalled (XML) data
using the _dispatch method and the result is returned as
marshalled data. For backwards compatibility, a dispatch
function can be provided as an argument (see comment in
SimpleXMLRPCRequestHandler.do_POST) but overriding the
existing method through subclassing is the preferred means
of changing method dispatch behavior.
"""
try:
params, method = xmlrpclib.loads(data)
# generate response
if dispatch_method is not None:
response = dispatch_method(method, params)
else:
response = self._dispatch(method, params)
# wrap response in a singleton tuple
response = (response,)
response = xmlrpclib.dumps(response, methodresponse=1,
allow_none=self.allow_none, encoding=self.encoding)
except Fault, fault:
response = xmlrpclib.dumps(fault, allow_none=self.allow_none,
encoding=self.encoding)
except:
# report exception back to server
exc_type, exc_value, exc_tb = sys.exc_info()
response = xmlrpclib.dumps(
xmlrpclib.Fault(1, "%s:%s" % (exc_type, exc_value)),
encoding=self.encoding, allow_none=self.allow_none,
)
return response
def system_listMethods(self):
"""system.listMethods() => ['add', 'subtract', 'multiple']
Returns a list of the methods supported by the server."""
methods = self.funcs.keys()
if self.instance is not None:
# Instance can implement _listMethod to return a list of
# methods
if hasattr(self.instance, '_listMethods'):
methods = remove_duplicates(
methods + self.instance._listMethods()
)
# if the instance has a _dispatch method then we
# don't have enough information to provide a list
# of methods
elif not hasattr(self.instance, '_dispatch'):
methods = remove_duplicates(
methods + list_public_methods(self.instance)
)
methods.sort()
return methods
def system_methodSignature(self, method_name):
"""system.methodSignature('add') => [double, int, int]
Returns a list describing the signature of the method. In the
above example, the add method takes two integers as arguments
and returns a double result.
This server does NOT support system.methodSignature."""
# See http://xmlrpc.usefulinc.com/doc/sysmethodsig.html
return 'signatures not supported'
def system_methodHelp(self, method_name):
"""system.methodHelp('add') => "Adds two integers together"
Returns a string containing documentation for the specified method."""
method = None
if method_name in self.funcs:
method = self.funcs[method_name]
elif self.instance is not None:
# Instance can implement _methodHelp to return help for a method
if hasattr(self.instance, '_methodHelp'):
return self.instance._methodHelp(method_name)
# if the instance has a _dispatch method then we
# don't have enough information to provide help
elif not hasattr(self.instance, '_dispatch'):
try:
method = resolve_dotted_attribute(
self.instance,
method_name,
self.allow_dotted_names
)
except AttributeError:
pass
# Note that we aren't checking that the method actually
# be a callable object of some kind
if method is None:
return ""
else:
import pydoc
return pydoc.getdoc(method)
def system_multicall(self, call_list):
"""system.multicall([{'methodName': 'add', 'params': [2, 2]}, ...]) => \
[[4], ...]
Allows the caller to package multiple XML-RPC calls into a single
request.
See http://www.xmlrpc.com/discuss/msgReader$1208
"""
results = []
for call in call_list:
method_name = call['methodName']
params = call['params']
try:
# XXX A marshalling error in any response will fail the entire
# multicall. If someone cares they should fix this.
results.append([self._dispatch(method_name, params)])
except Fault, fault:
results.append(
{'faultCode' : fault.faultCode,
'faultString' : fault.faultString}
)
except:
exc_type, exc_value, exc_tb = sys.exc_info()
results.append(
{'faultCode' : 1,
'faultString' : "%s:%s" % (exc_type, exc_value)}
)
return results
def _dispatch(self, method, params):
"""Dispatches the XML-RPC method.
XML-RPC calls are forwarded to a registered function that
matches the called XML-RPC method name. If no such function
exists then the call is forwarded to the registered instance,
if available.
If the registered instance has a _dispatch method then that
method will be called with the name of the XML-RPC method and
its parameters as a tuple
e.g. instance._dispatch('add',(2,3))
If the registered instance does not have a _dispatch method
then the instance will be searched to find a matching method
and, if found, will be called.
Methods beginning with an '_' are considered private and will
not be called.
"""
func = None
try:
# check to see if a matching function has been registered
func = self.funcs[method]
except KeyError:
if self.instance is not None:
# check for a _dispatch method
if hasattr(self.instance, '_dispatch'):
return self.instance._dispatch(method, params)
else:
# call instance method directly
try:
func = resolve_dotted_attribute(
self.instance,
method,
self.allow_dotted_names
)
except AttributeError:
pass
if func is not None:
return func(*params)
else:
raise Exception('method "%s" is not supported' % method)
class SimpleXMLRPCRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Simple XML-RPC request handler class.
Handles all HTTP POST requests and attempts to decode them as
XML-RPC requests.
"""
# Class attribute listing the accessible path components;
# paths not on this list will result in a 404 error.
rpc_paths = ('/', '/RPC2')
#if not None, encode responses larger than this, if possible
encode_threshold = 1400 #a common MTU
#Override form StreamRequestHandler: full buffering of output
#and no Nagle.
wbufsize = -1
disable_nagle_algorithm = True
# a re to match a gzip Accept-Encoding
aepattern = re.compile(r"""
\s* ([^\s;]+) \s* #content-coding
(;\s* q \s*=\s* ([0-9\.]+))? #q
""", re.VERBOSE | re.IGNORECASE)
def accept_encodings(self):
r = {}
ae = self.headers.get("Accept-Encoding", "")
for e in ae.split(","):
match = self.aepattern.match(e)
if match:
v = match.group(3)
v = float(v) if v else 1.0
r[match.group(1)] = v
return r
def is_rpc_path_valid(self):
if self.rpc_paths:
return self.path in self.rpc_paths
else:
# If .rpc_paths is empty, just assume all paths are legal
return True
def do_POST(self):
"""Handles the HTTP POST request.
Attempts to interpret all HTTP POST requests as XML-RPC calls,
which are forwarded to the server's _dispatch method for handling.
"""
# Check that the path is legal
if not self.is_rpc_path_valid():
self.report_404()
return
try:
# Get arguments by reading body of request.
# We read this in chunks to avoid straining
# socket.read(); around the 10 or 15Mb mark, some platforms
# begin to have problems (bug #792570).
max_chunk_size = 10*1024*1024
size_remaining = int(self.headers["content-length"])
L = []
while size_remaining:
chunk_size = min(size_remaining, max_chunk_size)
L.append(self.rfile.read(chunk_size))
size_remaining -= len(L[-1])
data = ''.join(L)
data = self.decode_request_content(data)
if data is None:
return #response has been sent
# In previous versions of SimpleXMLRPCServer, _dispatch
# could be overridden in this class, instead of in
# SimpleXMLRPCDispatcher. To maintain backwards compatibility,
# check to see if a subclass implements _dispatch and dispatch
# using that method if present.
response = self.server._marshaled_dispatch(
data, getattr(self, '_dispatch', None), self.path
)
except Exception, e: # This should only happen if the module is buggy
# internal error, report as HTTP server error
self.send_response(500)
# Send information about the exception if requested
if hasattr(self.server, '_send_traceback_header') and \
self.server._send_traceback_header:
self.send_header("X-exception", str(e))
self.send_header("X-traceback", traceback.format_exc())
self.send_header("Content-length", "0")
self.end_headers()
else:
# got a valid XML RPC response
self.send_response(200)
self.send_header("Content-type", "text/xml")
if self.encode_threshold is not None:
if len(response) > self.encode_threshold:
q = self.accept_encodings().get("gzip", 0)
if q:
try:
response = xmlrpclib.gzip_encode(response)
self.send_header("Content-Encoding", "gzip")
except NotImplementedError:
pass
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
def decode_request_content(self, data):
#support gzip encoding of request
encoding = self.headers.get("content-encoding", "identity").lower()
if encoding == "identity":
return data
if encoding == "gzip":
try:
return xmlrpclib.gzip_decode(data)
except NotImplementedError:
self.send_response(501, "encoding %r not supported" % encoding)
except ValueError:
self.send_response(400, "error decoding gzip content")
else:
self.send_response(501, "encoding %r not supported" % encoding)
self.send_header("Content-length", "0")
self.end_headers()
def report_404 (self):
# Report a 404 error
self.send_response(404)
response = 'No such page'
self.send_header("Content-type", "text/plain")
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
def log_request(self, code='-', size='-'):
"""Selectively log an accepted request."""
if self.server.logRequests:
BaseHTTPServer.BaseHTTPRequestHandler.log_request(self, code, size)
class SimpleXMLRPCServer(SocketServer.TCPServer,
SimpleXMLRPCDispatcher):
"""Simple XML-RPC server.
Simple XML-RPC server that allows functions and a single instance
to be installed to handle requests. The default implementation
attempts to dispatch XML-RPC calls to the functions or instance
installed in the server. Override the _dispatch method inhereted
from SimpleXMLRPCDispatcher to change this behavior.
"""
allow_reuse_address = True
# Warning: this is for debugging purposes only! Never set this to True in
# production code, as will be sending out sensitive information (exception
# and stack trace details) when exceptions are raised inside
# SimpleXMLRPCRequestHandler.do_POST
_send_traceback_header = False
def __init__(self, addr, requestHandler=SimpleXMLRPCRequestHandler,
logRequests=True, allow_none=False, encoding=None, bind_and_activate=True):
self.logRequests = logRequests
SimpleXMLRPCDispatcher.__init__(self, allow_none, encoding)
SocketServer.TCPServer.__init__(self, addr, requestHandler, bind_and_activate)
# [Bug #1222790] If possible, set close-on-exec flag; if a
# method spawns a subprocess, the subprocess shouldn't have
# the listening socket open.
if fcntl is not None and hasattr(fcntl, 'FD_CLOEXEC'):
flags = fcntl.fcntl(self.fileno(), fcntl.F_GETFD)
flags |= fcntl.FD_CLOEXEC
fcntl.fcntl(self.fileno(), fcntl.F_SETFD, flags)
class MultiPathXMLRPCServer(SimpleXMLRPCServer):
"""Multipath XML-RPC Server
This specialization of SimpleXMLRPCServer allows the user to create
multiple Dispatcher instances and assign them to different
HTTP request paths. This makes it possible to run two or more
'virtual XML-RPC servers' at the same port.
Make sure that the requestHandler accepts the paths in question.
"""
def __init__(self, addr, requestHandler=SimpleXMLRPCRequestHandler,
logRequests=True, allow_none=False, encoding=None, bind_and_activate=True):
SimpleXMLRPCServer.__init__(self, addr, requestHandler, logRequests, allow_none,
encoding, bind_and_activate)
self.dispatchers = {}
self.allow_none = allow_none
self.encoding = encoding
def add_dispatcher(self, path, dispatcher):
self.dispatchers[path] = dispatcher
return dispatcher
def get_dispatcher(self, path):
return self.dispatchers[path]
def _marshaled_dispatch(self, data, dispatch_method = None, path = None):
try:
response = self.dispatchers[path]._marshaled_dispatch(
data, dispatch_method, path)
except:
# report low level exception back to server
# (each dispatcher should have handled their own
# exceptions)
exc_type, exc_value = sys.exc_info()[:2]
response = xmlrpclib.dumps(
xmlrpclib.Fault(1, "%s:%s" % (exc_type, exc_value)),
encoding=self.encoding, allow_none=self.allow_none)
return response
class CGIXMLRPCRequestHandler(SimpleXMLRPCDispatcher):
"""Simple handler for XML-RPC data passed through CGI."""
def __init__(self, allow_none=False, encoding=None):
SimpleXMLRPCDispatcher.__init__(self, allow_none, encoding)
def handle_xmlrpc(self, request_text):
"""Handle a single XML-RPC request"""
response = self._marshaled_dispatch(request_text)
print 'Content-Type: text/xml'
print 'Content-Length: %d' % len(response)
print
sys.stdout.write(response)
def handle_get(self):
"""Handle a single HTTP GET request.
Default implementation indicates an error because
XML-RPC uses the POST method.
"""
code = 400
message, explain = \
BaseHTTPServer.BaseHTTPRequestHandler.responses[code]
response = BaseHTTPServer.DEFAULT_ERROR_MESSAGE % \
{
'code' : code,
'message' : message,
'explain' : explain
}
print 'Status: %d %s' % (code, message)
print 'Content-Type: %s' % BaseHTTPServer.DEFAULT_ERROR_CONTENT_TYPE
print 'Content-Length: %d' % len(response)
print
sys.stdout.write(response)
def handle_request(self, request_text = None):
"""Handle a single XML-RPC request passed through a CGI post method.
If no XML data is given then it is read from stdin. The resulting
XML-RPC response is printed to stdout along with the correct HTTP
headers.
"""
if request_text is None and \
os.environ.get('REQUEST_METHOD', None) == 'GET':
self.handle_get()
else:
# POST data is normally available through stdin
try:
length = int(os.environ.get('CONTENT_LENGTH', None))
except (TypeError, ValueError):
length = -1
if request_text is None:
request_text = sys.stdin.read(length)
self.handle_xmlrpc(request_text)
if __name__ == '__main__':
print 'Running XML-RPC server on port 8000'
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_function(pow)
server.register_function(lambda x,y: x+y, 'add')
server.serve_forever()
| |
import logging
import random
from bearlibterminal import terminal
from clubsandwich.ui import LayoutOptions
from clubsandwich.ui import UIScene, ScrollingTextView, WindowView
import settings
from areas.level import Level
from characters import actions
from combat import AttackContext
from combat.attacks.ranged.base import RangedAttack
from data.python_templates.characters import character_templates
from data.python_templates.items import item_templates
from generators import dungeon_generator, forest_generator
from managers.action_manager import ActionManager
from managers.echo import EchoService
from scenes.game.windows import GameWindow, ItemQueryWindow, InventoryWindow, HudWindow
from util.cursor import Cursor
logger = logging.getLogger(__name__)
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.DEBUG)
class GameScene(UIScene):
"""
This handles everything relating to the UI in the game window.
"""
ID = "Game"
def __init__(self, game_context):
self.console = ScrollingTextView(
12, 110, layout_options=LayoutOptions(top=None, height=12, bottom=0, left=1, right=None, width=0.98))
EchoService(self.console, game_context)
game_context.action_manager = ActionManager(game_context)
self.game_view = GameWindow(game_context, layout_options=LayoutOptions(top=10, height=30, bottom=None, left=0, right=None, width=1))
self.game_context = game_context
self.hud_view = HudWindow(game_context, layout_options=LayoutOptions(top=0, height=10, bottom=None, left=0, right=None, width=1))
super().__init__(WindowView("", subviews=[self.hud_view, self.game_view, self.console]))
self.loaded_levels = []
# self.invoke_window(game_window)
logger.info("Initialized GameScene")
logger.info("Starting new game.")
self.new_game()
self.movement_keys = settings.KEY_MAPPINGS
self.cursor = None
def terminal_read(self, val):
# TODO It would be nice to only set moved=True if the action succeeded.
player = self.game_context.player
moved = False
if self.cursor:
if val in self.movement_keys:
key_x, key_y = self.movement_keys[val]
actions.move(self.cursor, key_x, key_y)
return
if val == terminal.TK_ENTER:
self.cursor.on_enter()
self.cursor = None
self.game_view.camera.character_focus = player
if val == terminal.TK_ESCAPE:
self.cursor = None
self.game_view.camera.character_focus = player
if player.is_dead():
return
if val is terminal.TK_KP_5 or val is terminal.TK_PERIOD:
moved = True
if val in self.movement_keys:
key_x, key_y = self.movement_keys[val]
self.game_context.action_manager.move_or_attack(player, key_x, key_y)
moved = True
if val is terminal.TK_I:
self.director.push_scene(InventoryWindow(*self._get_all_player_items()))
return
if val is terminal.TK_D:
self.director.push_scene(ItemQueryWindow(self._drop_item_callback, *self._get_all_player_items()))
return
if val is terminal.TK_E:
self.director.push_scene(ItemQueryWindow(self._consume_item_callback, *self._get_all_player_items()))
return
if val is terminal.TK_G:
for item in self.game_context.player.location.level.spawned_items:
if item.location.get_local_coords() == self.game_context.player.location.get_local_coords():
actions.get(self.game_context.player, item)
moved = True
if val is terminal.TK_R:
wielded_items, worn_items, _ = self._get_all_player_items()
self.director.push_scene(ItemQueryWindow(self._remove_item_callback, wielded_items, worn_items, []))
return
if val is terminal.TK_W:
self.director.push_scene(ItemQueryWindow(self._wear_wield_item_callback, *self._get_all_player_items()))
return
if val is terminal.TK_F:
closest_monster = self.get_closest_monster(player)
ranged_weapon = RangedAttack.get_ranged_weapon(player)
if not ranged_weapon:
EchoService.singleton.echo("You have nothing to fire with.")
return
else:
EchoService.singleton.echo("You are aiming with " + ranged_weapon.name)
def attack_wrapper(_monster):
attack_context = AttackContext(
attacker=player,
defender=_monster,
attacker_weapon=ranged_weapon,
ranged=True
)
if _monster.location.get_local_coords() in player.fov:
actions.attack(player, _monster, attack_context)
self.update_turn(player)
if closest_monster:
self.cursor = Cursor(closest_monster.location.copy(), attack_wrapper)
else:
self.cursor = Cursor(player.location.copy(), attack_wrapper)
self.game_view.camera.character_focus = self.cursor
if val is terminal.TK_X:
def clear_cursor(monster):
self.cursor = None
self.game_view.camera.character_focus = player
self.cursor = Cursor(player.location.copy(), clear_cursor)
self.game_view.camera.character_focus = self.cursor
return
if moved:
self.update_turn(player)
def update_turn(self, player):
player.update()
for monster in player.location.level.spawned_monsters:
monster.update()
self.game_context.action_manager.monster_take_turn(monster, player)
def _get_all_player_items(self):
player = self.game_context.player
wielded_items = []
worn_items = []
inventory_items = []
if player.equipment:
wielded_items = player.equipment.get_wielded_items()
worn_items = player.equipment.get_worn_items()
if player.inventory:
inventory_items = player.inventory.get_all_items()
return wielded_items, worn_items, inventory_items
def _consume_item_callback(self, chosen_item):
player = self.game_context.player
actions.consume(self.game_context.player, chosen_item)
for monster in player.location.level.spawned_monsters:
monster.update()
self.game_context.action_manager.monster_take_turn(monster, player)
def _drop_item_callback(self, chosen_item):
player = self.game_context.player
actions.drop(self.game_context.player, chosen_item)
for monster in player.location.level.spawned_monsters:
monster.update()
self.game_context.action_manager.monster_take_turn(monster, player)
def _wear_wield_item_callback(self, chosen_item):
player = self.game_context.player
actions.wear_wield(self.game_context.player, chosen_item)
for monster in player.location.level.spawned_monsters:
monster.update()
self.game_context.action_manager.monster_take_turn(monster, player)
def _remove_item_callback(self, chosen_item):
player = self.game_context.player
actions.remove_item(self.game_context.player, chosen_item)
for monster in player.location.level.spawned_monsters:
monster.update()
self.game_context.action_manager.monster_take_turn(monster, player)
def new_game(self):
# TODO This should prepare the first level
level = Level()
level.name = "DEFAULT"
level.min_room_size = 1
level.max_room_size = 10
level.max_rooms = 10
level.width = 80
level.height = 45
self.init_dungeon(level)
def init_dungeon(self, level):
generator = random.choice(
(
forest_generator.ForestGenerator,
dungeon_generator.DungeonGenerator
)
)(self.game_context.factory_service)
player = self.game_context.player
player.is_player = True
generator.generate(level)
self.place_dungeon_objects(level, player, generator)
def place_dungeon_objects(self, level, player, generator):
character_factory = self.game_context.character_factory
item_factory = self.game_context.item_factory
level.monster_spawn_list = [character_factory.build(uid) for uid, monster in character_templates.items()]
level.item_spawn_list = [item_factory.build(uid) for uid, item in item_templates.items()]
forerunner = generator.forerunner(level, player)
forerunner.run()
def get_closest_monster(self, player):
closest_delta = None
closest_monster = None
p_x, p_y = player.location.get_local_coords()
for monster in player.location.level.spawned_monsters:
if monster.is_dead():
continue
monster_x, monster_y = monster.location.get_local_coords()
delta = abs(p_x - monster_x) + abs(p_y - monster_y)
if closest_delta is None:
closest_delta = delta
closest_monster = monster
continue
if delta < closest_delta:
closest_monster = monster
closest_delta = delta
if closest_monster.location.get_local_coords() in player.fov:
return closest_monster
return None
| |
## A Collection of NURBS curve utility functions
## J Eisenmann
## ACCAD, The Ohio State University
## 2012-13
from math import *
from Vector import *
import maya.cmds as mc
import maya.mel as mm
mc.loadPlugin("closestPointOnCurve", quiet=True)
def drawLine(pt1, pt2):
try: # if pt1 and pt2 are Vectors
mc.curve( p=[pt1.asTuple(), pt1.asTuple(), pt2.asTuple(), pt2.asTuple()] )
except: # if pt1 and pt2 are tuples or lists
mc.curve( p=[pt1, pt1, pt2, pt2] )
def connectedNodeOfType( curve, type ):
for node in mc.connectionInfo( curve+".worldSpace", destinationFromSource=True):
if( type == mc.nodeType(node) ):
return node.split('.')[0].split('>')[-1]
return None
def getCurveInfoNode( curve ):
infoNode = connectedNodeOfType( curve, "curveInfo" )
if not infoNode:
print "adding an info node to curve: "+curve
infoNode = mc.createNode("curveInfo")
mc.connectAttr( curve+".worldSpace", infoNode+".inputCurve")
return infoNode
def getCurveArcLenDimNode( curve ):
arcLenDimNode = connectedNodeOfType( curve, "arcLengthDimension" )
if not arcLenDimNode:
max = mc.getAttr( curve+".maxValue" )
print "adding an arcLengthDimension node to curve: "+curve
arcLenDimNode = mc.arcLengthDimension( curve+".u[%f]"%max )
return arcLenDimNode
def getClosestPointNode( curve ):
cpNode = connectedNodeOfType( curve, "closestPointOnCurve" )
if not cpNode:
print "adding a closestPointOnCurve node to curve: "+curve
cpNode = mc.closestPointOnCurve(curve);
return cpNode
def findParamAtPoint( curve, point ):
cpNode = getClosestPointNode( curve )
mc.setAttr(cpNode+".inPosition", point[0], point[1], point[2] )
return mc.getAttr(cpNode+".paramU")
def findArcLenAtParam( curve, param ):
arcLenDimNode = getCurveArcLenDimNode( curve )
mc.setAttr( arcLenDimNode+".uParamValue", param )
return mc.getAttr( arcLenDimNode+".arcLength" )
def curveArcLen( curve ):
max = mc.getAttr( curve+".maxValue" )
arcLength = findArcLenAtParam( curve, max )
return arcLength
def findParamAtArcLen( curve, distance, epsilon=0.0001 ):
""" Returns the U parameter value at a specified length along a curve
(Adapted from: http://ewertb.soundlinker.com/mel/mel.108.php) """
u = 0.0
min = mc.getAttr( curve+".minValue" )
max = mc.getAttr( curve+".maxValue" )
arcLength = findArcLenAtParam( curve, max )
# Don't bother doing any work for the start or end of the curve.
if ( distance <= 0.0 ):
return 0.0
if ( distance >= arcLength ):
return max
# This is merely a diagnostic to measure the number of passes required to
# find any particular point. You may be surprised that the number of
# passes is typically quite low.
passes = 1
while ( True ):
u = ( min + max ) / 2.0
#mc.setAttr( arcLenDimNode+".uParamValue", u)
arcLength = findArcLenAtParam( curve, u ) #mc.getAttr( arcLenDimNode+".arcLength" )
if ( abs(arcLength-distance) < tol ):
break
if ( arcLength > distance ):
max = u
else:
min = u
passes+=1
return u
def findParamAtArcPercent( curve, percent, epsilon=0.0001 ):
""" Returns the U parameter value at a specified % of the length along a curve """
max = mc.getAttr( curve+".maxValue" )
arcLength = findArcLenAtParam( curve, max )
return findParamAtArcLen( curve, percent*arcLength, epsilon )
def findCVsInRange( curve, start, end ):
""" Returns a list of the (index, u)'s of the CVs of "curve" that have u parameter
values between "start" and "end" (percentages of arc length) """
indices = []
if( end >= start and start >= 0.0 and end <= 1.0):
a = findParamAtArcPercent( curve, start )
b = findParamAtArcPercent( curve, end )
# get CV positions in local (object) space
CVs = mc.getAttr( curve+".cv[*]" )
# translate them into global (world) space
CVs = [(Vector(cv)+Vector(mc.xform(curve, q=True, ws=True, translation=True))).asTuple() for cv in CVs]
for I,cv in enumerate(CVs):
U = findParamAtPoint(curve, cv)
L = findArcLenAtParam(curve, U)/curveArcLen(curve) # arc length as a percentage
if( a <= U and U <= b ):
indices.append((I,U,L))
return indices
def arcCurve( curve, t1, t2 ):
""" Perturb the tangents on the initial curve """
cv1 = list(mc.getAttr( curve+".cv[1]" )[0])
cv2 = list(mc.getAttr( curve+".cv[2]" )[0])
print cv1, cv2
for i in range(3):
cv1[i] += t1[i]
cv2[i] += t2[i]
mc.setAttr( curve+".cv[1]", cv1[0], cv1[1], cv1[2] )
mc.setAttr( curve+".cv[2]", cv2[0], cv2[1], cv2[2] )
return curve
def evenlyDivideCurve( curve, numDiv ):
""" Divides a curve into numDiv.
Assumes there are two CVs at the start and end of the curve """
# first, move the curve to the origin
translation = mc.xform(curve, q=True, ws=True, translation=True)
rotation = mc.xform(curve, q=True, ws=True, rotation=True)
mc.move(0, 0, 0, curve)
mc.rotate(0, 0, 0, curve)
# get the curve info node
infoNode = getCurveInfoNode(curve)
Knots = list( mc.getAttr( infoNode+".knots" )[0] )
CVs = mc.getAttr( curve+".cv[*]" )
numOrigCVs = len(CVs)
numOrigKnots = len(Knots)
if( not numOrigCVs == 4 ):
print("ERROR: original curve must have exactly 4 CVs")
return
else:
for p in range(0,(numDiv-numOrigCVs+4+1)):
percent = (p-1)/float(numDiv-2)
u = findParamAtArcPercent( curve, percent )
if p < 2 or p >= (numDiv-numOrigCVs+3):
CVs[p] = tuple(mc.pointOnCurve(curve, parameter=u))
else:
CVs.insert(p, tuple(mc.pointOnCurve(curve, parameter=u)) )
Knots.insert(p+1, u)
curve = mc.curve( curve,r=True, p=CVs, k=Knots)
mc.move(translation[0], translation[1], translation[2], curve)
mc.rotate(rotation[0], rotation[1], rotation[2], curve)
return curve
def bias(b, t):
return t**(log(b)/log(0.5))
def gain(g, t):
if(t<0.5):
return 0.5*bias(1-g,2*t)
else:
return 1-bias(1-g,2-2*t)/2.0
def smoothstep(a, fuzz, t):
if(t < a-fuzz):
return 0.0
elif(t > a):
return 1.0
else:
return gain(0.9, (t-(a-fuzz))/fuzz)
def pulse(a, b, fuzz, t):
return smoothstep(a, fuzz, t) - smoothstep(b, fuzz, t)
def oscillateCurve( curve, start=0.0, end=1.0, freq=1.0, ease=0.5, strength=1.0 ):
""" Oscillates a given curve by moving each vertex in an alternating
direction based on the normal. This process takes place over the
range defined by "start" and "end" as percentages of arc length.
Oscillation eases to full strength as determined by the "ease" and
"strength" arguments. """
if(ease > (end-start)*0.5):
ease = (end-start)*0.5
if(start < end):
CVs = mc.getAttr( curve+".cv[*]" )
newCVs = findCVsInRange(curve, start, end)
for (I,U,L) in newCVs:
interp = (L-start)/(end-start)
osc = sin(freq*interp)
scale = pulse(start+ease, end, ease, L) # ease must be between 0 and 0.5
## Don't use Maya's normalized normal -- it flip flops with curvature so it's not good for oscillating offset
# normal = Vector(mc.pointOnCurve(curve, parameter=cv[1], normalizedNormal=True))
# if(normal.mag() == 0.0):
# print "Getting normal from up x tangent"
normal = Vector(0,1,0)**Vector(mc.pointOnCurve(curve, parameter=U, tangent=True))
normal = normal.norm()
pos = Vector(CVs[I])
pos = pos+normal*scale*strength*osc
CVs[I] = pos.asTuple()
for i,cv in enumerate(CVs):
mc.setAttr(curve+".cv[%d]"%i, cv[0], cv[1], cv[2])
return curve
def noise(x=0, y=None, z=None):
""" Returns a Perlin noise value based on 1D or 3D input """
try:
if( isinstance(x, Vector) ): # if x is a Vector
return mm.eval("noise <<%f, %f, %f>>"%x.asTuple())
elif( len(x) == 3 ): # if x is a sequence
return mm.eval("noise <<%f, %f, %f>>"%x)
except:
if(not y == None and not z == None): # if y and z have values
return mm.eval("noise <<%f, %f, %f>>"%(x,y,z))
else: # otherwise just use 1D data
return mm.eval("noise %f"%x)
def noiseCurve( curve, start=0.0, end=1.0, freq=1.0, ease=0.5, strength=1.0 ):
""" Adds noise to a given curve by moving each vertex with Perlin
noise based on the normal. This process takes place over the
range defined by "start" and "end" as percentages of arc length.
Noise eases to full strength as determined by the "ease" and
"strength" arguments. """
if(ease > (end-start)*0.5): # ease must be between 0 and 0.5
ease = (end-start)*0.5
if(start < end):
CVs = mc.getAttr( curve+".cv[*]" )
newCVs = findCVsInRange(curve, start, end)
for (I,U,L) in newCVs:
interp = (L-start)/(end-start)
noiz = noise(freq*interp)
scale = pulse(start+ease, end, ease, L)
normal = Vector(0,1,0)**Vector(mc.pointOnCurve(curve, parameter=U, tangent=True))
normal = normal.norm()
pos = Vector(CVs[I])
pos = pos+normal*scale*strength*noiz
CVs[I] = pos.asTuple()
for i,cv in enumerate(CVs):
print(curve+".cv[%d]"%cv[0], cv[0], cv[1], cv[2])
mc.setAttr(curve+".cv[%d]"%i, cv[0], cv[1], cv[2])
def twistCurve( curve, start=0.0, end=1.0, freq=1.0, ease=0.5, strength=1.0 ):
""" Twist the curve over the range defined by "start" and "end" as percentages of arc length.
The twist operation happens in world space. Twist eases to full strength as determined by
the "ease" and "strength" arguments. """
if(ease > (end-start)*0.5): # ease must be between 0 and 0.5
ease = (end-start)*0.5
if(start < end):
CVs = mc.getAttr( curve+".cv[*]" )
newCVs = findCVsInRange(curve, start, end)
for (I,U,L) in newCVs:
interp = (L-start)/(end-start)
bounds = mc.exactWorldBoundingBox(curve)
boundsXmin = bounds[0]
boundsWidth = bounds[3] - bounds[0]
boundsZcenter = (bounds[2]+bounds[5])*0.5
scale = pulse(start+ease, end, ease, L)
twistT = (((CVs[I][0] - boundsXmin)/boundsWidth))*2*pi*freq
print "(((%f - %f)/%f)) = %f --> %f"%(CVs[I][0],boundsXmin,boundsWidth,(((CVs[I][0] - boundsXmin)/boundsWidth)), twistT)
CVs[I] = (CVs[I][0],
0,
scale*strength*((CVs[I][2]-boundsZcenter)*sin(twistT) + CVs[I][1]*cos(twistT)) + boundsZcenter)
for i,cv in enumerate(CVs):
mc.setAttr(curve+".cv[%d]"%i, cv[0], cv[1], cv[2])
def printCurveDetails( curve ):
infoNode = getCurveInfoNode(curve)
Knots = list( mc.getAttr( infoNode+".knots" )[0] )
CVs = mc.getAttr( curve+".cv[*]" )
print "Curve Details for: "+curve
for k in Knots:
print k
for cv in CVs:
print cv
| |
########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import contextlib
import time
import yaml
import sys
import tempfile
import shutil
import os
import threading
import Queue
import testtools
import nose.tools
import cloudify.logs
from testtools.matchers import ContainsAll
from cloudify.decorators import workflow, operation
from cloudify.exceptions import NonRecoverableError
from cloudify.workflows import local
from cloudify.workflows import workflow_context
from cloudify.workflows.workflow_context import task_config
PLUGIN_PACKAGE_NAME = 'test-package'
PLUGIN_PACKAGE_VERSION = '1.1.1'
@nose.tools.nottest
class BaseWorkflowTest(testtools.TestCase):
def setUp(self):
super(BaseWorkflowTest, self).setUp()
self.work_dir = tempfile.mkdtemp(prefix='cloudify-workflows-')
self.blueprint_dir = os.path.join(self.work_dir, 'blueprint')
self.storage_dir = os.path.join(self.work_dir, 'storage')
self.storage_kwargs = {}
self.env = None
os.mkdir(self.storage_dir)
self.addCleanup(self.cleanup)
def cleanup(self):
shutil.rmtree(self.work_dir)
self._remove_temp_module()
def _init_env(self, blueprint_path,
inputs=None,
name=None,
ignored_modules=None,
provider_context=None):
if name is None:
name = self._testMethodName
storage = self.storage_cls(**self.storage_kwargs)
if isinstance(storage, local.FileStorage) \
and (self.storage_dir != self.blueprint_dir):
shutil.rmtree(self.storage_kwargs['storage_dir'])
return local.init_env(blueprint_path,
name=name,
inputs=inputs,
storage=storage,
ignored_modules=ignored_modules,
provider_context=provider_context)
def _load_env(self, name):
if name is None:
name = self._testMethodName
storage = self.storage_cls(**self.storage_kwargs)
return local.load_env(name=name,
storage=storage)
def _setup_env(self,
workflow_methods=None,
operation_methods=None,
use_existing_env=True,
name=None,
inputs=None,
create_blueprint_func=None,
workflow_parameters_schema=None,
load_env=False,
ignored_modules=None,
operation_retries=None,
operation_retry_interval=None,
provider_context=None):
if create_blueprint_func is None:
create_blueprint_func = self._blueprint_1
def stub_op(ctx, **_):
pass
if operation_methods is None:
operation_methods = [stub_op]
if workflow_methods[0] is None:
def workflow_method(ctx, **_):
instance = _instance(ctx, 'node')
instance.set_state('state').get()
instance.execute_operation('test.op0')
workflow_methods = [workflow_method]
# same as @workflow above the method
workflow_methods = [workflow(m) for m in workflow_methods]
# same as @operation above each op method
operation_methods = [operation(m) for m in operation_methods]
temp_module = self._create_temp_module()
for workflow_method in workflow_methods:
setattr(temp_module,
workflow_method.__name__,
workflow_method)
for operation_method in operation_methods:
setattr(temp_module,
operation_method.__name__,
operation_method)
blueprint = create_blueprint_func(workflow_methods,
operation_methods,
workflow_parameters_schema,
ignored_modules,
operation_retries,
operation_retry_interval)
inner_dir = os.path.join(self.blueprint_dir, 'inner')
if not os.path.isdir(self.blueprint_dir):
os.mkdir(self.blueprint_dir)
if not os.path.isdir(inner_dir):
os.mkdir(inner_dir)
with open(os.path.join(inner_dir, 'imported.yaml'), 'w') as f:
f.write('node_types: { imported_type: {} }')
with open(os.path.join(self.blueprint_dir, 'resource'), 'w') as f:
f.write('content')
blueprint_path = os.path.join(self.blueprint_dir, 'blueprint.yaml')
with open(blueprint_path, 'w') as f:
f.write(yaml.safe_dump(blueprint))
if not self.env or not use_existing_env:
if load_env:
self.env = self._load_env(name)
else:
self.env = self._init_env(blueprint_path,
inputs=inputs,
name=name,
ignored_modules=ignored_modules,
provider_context=provider_context)
def _execute_workflow(self,
workflow_method=None,
operation_methods=None,
use_existing_env=True,
execute_kwargs=None,
name=None,
inputs=None,
create_blueprint_func=None,
workflow_parameters_schema=None,
workflow_name='workflow0',
load_env=False,
setup_env=True,
ignored_modules=None,
operation_retries=None,
operation_retry_interval=None,
provider_context=None):
if setup_env:
self._setup_env(
workflow_methods=[workflow_method],
operation_methods=operation_methods,
use_existing_env=use_existing_env,
name=name,
inputs=inputs,
create_blueprint_func=create_blueprint_func,
workflow_parameters_schema=workflow_parameters_schema,
load_env=load_env,
ignored_modules=ignored_modules,
operation_retries=operation_retries,
operation_retry_interval=operation_retry_interval,
provider_context=provider_context)
elif load_env:
self.env = self._load_env(name)
execute_kwargs = execute_kwargs or {}
final_execute_kwargs = {
'task_retries': 0,
'task_retry_interval': 1
}
final_execute_kwargs.update(execute_kwargs)
return self.env.execute(workflow_name, **final_execute_kwargs)
def _blueprint_1(self, workflow_methods, operation_methods,
workflow_parameters_schema, ignored_modules,
operation_retries, operation_retry_interval):
interfaces = {
'test': dict(
('op{0}'.format(index),
{'implementation': 'p.{0}.{1}'.format(self._testMethodName,
op_method.__name__),
'max_retries': operation_retries,
'retry_interval': operation_retry_interval})
for index, op_method in
enumerate(operation_methods)
)
}
if ignored_modules:
interfaces['test'].update({'ignored_op': 'p.{0}.ignored'
.format(ignored_modules[0])})
workflows = dict((
('workflow{0}'.format(index), {
'mapping': 'p.{0}.{1}'.format(self._testMethodName,
w_method.__name__),
'parameters': workflow_parameters_schema or {}
}) for index, w_method in enumerate(workflow_methods)
))
blueprint = {
'tosca_definitions_version': 'cloudify_dsl_1_3',
'imports': ['inner/imported.yaml'],
'inputs': {
'from_input': {
'default': 'from_input_default_value'
}
},
'outputs': {
'some_output': {
'value': {'get_attribute': ['node', 'some_output']},
},
'static': {
'value': {'get_attribute': ['node', 'property']}
}
},
'plugins': {
'p': {
'executor': 'central_deployment_agent',
'install': False,
'package_name': PLUGIN_PACKAGE_NAME,
'package_version': PLUGIN_PACKAGE_VERSION
}
},
'node_types': {
'type': {
'properties': {
'property': {
'default': 'default'
},
'from_input': {
'default': 'from_input_default_value'
}
}
},
'cloudify.nodes.Compute': {
'derived_from': 'type',
'properties': {
'ip': {
'default': ''
}
}
}
},
'relationships': {
'cloudify.relationships.contained_in': {}
},
'node_templates': {
'node4': {
'type': 'type',
'interfaces': interfaces,
'relationships': [{
'target': 'node3',
'type': 'cloudify.relationships.contained_in',
}]
},
'node3': {
'type': 'cloudify.nodes.Compute',
'interfaces': interfaces,
'properties': {
'ip': '1.1.1.1'
}
},
'node2': {
'type': 'cloudify.nodes.Compute',
'interfaces': interfaces,
},
'node': {
'type': 'type',
'interfaces': interfaces,
'properties': {
'property': 'value',
'from_input': {'get_input': 'from_input'}
},
'relationships': [{
'target': 'node2',
'type': 'cloudify.relationships.contained_in',
'source_interfaces': interfaces,
'target_interfaces': interfaces
}]
},
'node5': {
'type': 'imported_type'
}
},
'workflows': workflows,
'groups': {
'group1': {
'members': ['node']
}
},
'policies': {
'policy1': {
'type': 'cloudify.policies.scaling',
'targets': ['group1']
}
}
}
return blueprint
def _create_temp_module(self):
import imp
temp_module = imp.new_module(self._testMethodName)
sys.modules[self._testMethodName] = temp_module
return temp_module
def _remove_temp_module(self):
if self._testMethodName in sys.modules:
del sys.modules[self._testMethodName]
@contextlib.contextmanager
def _mock_stdout_event_and_log(self):
events = []
logs = []
# Provide same interface as other log/event functions
def mock_stdout_event(event):
events.append(event)
# Provide same interface as other log/event functions
def mock_stdout_log(log):
logs.append(log)
o_stdout_event = cloudify.logs.stdout_event_out
o_stdout_log = cloudify.logs.stdout_log_out
cloudify.logs.stdout_event_out = mock_stdout_event
cloudify.logs.stdout_log_out = mock_stdout_log
try:
yield events, logs
finally:
cloudify.logs.stdout_event_out = o_stdout_log
cloudify.logs.stdout_event_out = o_stdout_event
def _test_retry_configuration_impl(self,
global_retries,
global_retry_interval,
operation_retries,
operation_retry_interval):
expected_retries = global_retries
if operation_retries is not None:
expected_retries = operation_retries
expected_retry_interval = global_retry_interval
if operation_retry_interval is not None:
expected_retry_interval = operation_retry_interval
def flow(ctx, **_):
instance = _instance(ctx, 'node')
instance.execute_operation('test.op0', kwargs={
'props': {'key': 'initial_value'}
}).get()
instance.execute_operation('test.op1').get()
def op0(ctx, props, **_):
self.assertIsNotNone(ctx.instance.id)
current_retry = ctx.instance.runtime_properties.get('retry', 0)
last_timestamp = ctx.instance.runtime_properties.get('timestamp')
current_timestamp = time.time()
ctx.instance.runtime_properties['retry'] = current_retry + 1
ctx.instance.runtime_properties['timestamp'] = current_timestamp
self.assertEqual('initial_value', props['key'])
props['key'] = 'new_value'
if current_retry > 0:
duration = current_timestamp - last_timestamp
self.assertTrue(expected_retry_interval <= duration <=
expected_retry_interval + 0.5)
if current_retry < expected_retries:
self.fail()
def op1(ctx, **_):
self.assertEqual(
expected_retries + 1, ctx.instance.runtime_properties['retry'])
self._execute_workflow(
flow,
operation_methods=[op0, op1],
operation_retries=operation_retries,
operation_retry_interval=operation_retry_interval,
execute_kwargs={
'task_retry_interval': global_retry_interval,
'task_retries': global_retries})
@nose.tools.nottest
class LocalWorkflowTest(BaseWorkflowTest):
def test_workflow_and_operation_logging_and_events(self):
def assert_task_events(indexes, events):
self.assertEqual('sending_task',
events[indexes[0]]['event_type'])
self.assertEqual('task_started',
events[indexes[1]]['event_type'])
self.assertEqual('task_succeeded',
events[indexes[2]]['event_type'])
def the_workflow(ctx, **_):
def local_task():
pass
instance = _instance(ctx, 'node')
ctx.logger.info('workflow_logging')
ctx.send_event('workflow_event').get()
instance.logger.info('node_instance_logging')
instance.send_event('node_instance_event').get()
instance.execute_operation('test.op0').get()
ctx.local_task(local_task).get()
def the_operation(ctx, **_):
ctx.logger.info('op_logging')
ctx.send_event('op_event')
with self._mock_stdout_event_and_log() as (events, logs):
self._execute_workflow(the_workflow, operation_methods=[
the_operation])
self.assertEqual(11, len(events))
self.assertEqual(3, len(logs))
self.assertEqual('workflow_started',
events[0]['event_type'])
self.assertEqual('workflow_event',
events[1]['message']['text'])
self.assertEqual('node_instance_event',
events[2]['message']['text'])
assert_task_events([3, 4, 6], events)
self.assertEqual('op_event',
events[5]['message']['text'])
assert_task_events([7, 8, 9], events)
self.assertEqual('workflow_succeeded',
events[10]['event_type'])
self.assertEqual('workflow_logging',
logs[0]['message']['text'])
self.assertEqual('node_instance_logging',
logs[1]['message']['text'])
self.assertEqual('op_logging',
logs[2]['message']['text'])
def test_task_event_filtering(self):
def flow1(ctx, **_):
def task():
pass
ctx.local_task(task)
with self._mock_stdout_event_and_log() as (events, _):
self._execute_workflow(flow1, use_existing_env=False)
self.assertEqual(5, len(events))
def flow2(ctx, **_):
def task():
pass
ctx.local_task(task, send_task_events=False)
with self._mock_stdout_event_and_log() as (events, _):
self._execute_workflow(flow2,
use_existing_env=False)
self.assertEqual(2, len(events))
def flow3(ctx, **_):
@task_config(send_task_events=False)
def task():
pass
ctx.local_task(task)
with self._mock_stdout_event_and_log() as (events, _):
self._execute_workflow(flow3, use_existing_env=False)
self.assertEqual(2, len(events))
def flow4(ctx, **_):
@task_config(send_task_events=True)
def task():
pass
ctx.local_task(task)
with self._mock_stdout_event_and_log() as (events, _):
self._execute_workflow(flow4, use_existing_env=False)
self.assertEqual(5, len(events))
def flow5(ctx, **_):
def task():
self.fail()
ctx.local_task(task, send_task_events=False)
with self._mock_stdout_event_and_log() as (events, _):
self.assertRaises(AssertionError,
self._execute_workflow,
flow5, use_existing_env=False)
self.assertEqual(3, len(events))
self.assertEqual('task_failed', events[1]['event_type'])
self.assertEqual('workflow_failed', events[2]['event_type'])
def test_task_config_decorator(self):
def flow(ctx, **_):
task_config_kwargs = {'key': 'task_config'}
invocation_kwargs = {'key': 'invocation'}
@task_config(kwargs=task_config_kwargs)
def task1(**kwargs):
self.assertEqual(kwargs, task_config_kwargs)
ctx.local_task(task1).get()
@task_config(kwargs=task_config_kwargs)
def task2(**kwargs):
self.assertEqual(kwargs, task_config_kwargs)
ctx.local_task(task2, kwargs=invocation_kwargs).get()
@task_config(kwargs=task_config_kwargs)
def task3(**kwargs):
self.assertEqual(kwargs, invocation_kwargs)
ctx.local_task(task3,
kwargs=invocation_kwargs,
override_task_config=True).get()
self._execute_workflow(flow)
def test_workflow_bootstrap_context(self):
def bootstrap_context(ctx, **_):
bootstrap_context = ctx.internal._get_bootstrap_context()
self.assertEqual(bootstrap_context, {})
self._execute_workflow(bootstrap_context)
def test_update_execution_status(self):
def update_execution_status(ctx, **_):
ctx.update_execution_status('status')
self.assertRaises(NotImplementedError,
self._execute_workflow,
update_execution_status)
def test_workflow_set_get_node_instance_state(self):
def get_set_node_instance_state(ctx, **_):
instance = _instance(ctx, 'node')
self.assertIsNone(instance.get_state().get())
instance.set_state('state').get()
self.assertEquals('state', instance.get_state().get())
self._execute_workflow(get_set_node_instance_state)
def test_workflow_ctx_properties(self):
def attributes(ctx, **_):
self.assertEqual(self._testMethodName, ctx.blueprint.id)
self.assertEqual(self._testMethodName, ctx.deployment.id)
self.assertEqual(
['node'], ctx.deployment.scaling_groups['group1']['members'])
node_instance = next(ctx.get_node('node').instances)
scaling_groups = node_instance.scaling_groups
self.assertEqual(1, len(scaling_groups))
self.assertEqual('group1', scaling_groups[0]['name'])
self.assertEqual('workflow0', ctx.workflow_id)
self.assertIsNotNone(ctx.execution_id)
self._execute_workflow(attributes)
def test_workflow_blueprint_model(self):
def blueprint_model(ctx, **_):
nodes = list(ctx.nodes)
node1 = ctx.get_node('node')
node2 = ctx.get_node('node2')
node1_instances = list(node1.instances)
node2_instances = list(node2.instances)
instance1 = node1_instances[0]
instance2 = node2_instances[0]
node1_relationships = list(node1.relationships)
node2_relationships = list(node2.relationships)
instance1_relationships = list(instance1.relationships)
instance2_relationships = list(instance2.relationships)
relationship = node1_relationships[0]
relationship_instance = instance1_relationships[0]
self.assertEqual(5, len(nodes))
self.assertEqual(1, len(node1_instances))
self.assertEqual(1, len(node2_instances))
self.assertEqual(1, len(node1_relationships))
self.assertEqual(0, len(node2_relationships))
self.assertEqual(1, len(instance1_relationships))
self.assertEqual(0, len(instance2_relationships))
sorted_ops = ['op0', 'test.op0']
self.assertEqual(1, node1.number_of_instances)
self.assertEqual(1, node2.number_of_instances)
self.assertEqual('node', node1.id)
self.assertEqual('node2', node2.id)
self.assertEqual('type', node1.type)
self.assertEqual('type', node1.type)
self.assertEqual('cloudify.nodes.Compute', node2.type)
self.assertEqual(['type'], node1.type_hierarchy)
self.assertEqual(['type', 'cloudify.nodes.Compute'],
node2.type_hierarchy)
self.assertThat(node1.properties.items(),
ContainsAll({'property': 'value'}.items()))
self.assertThat(node2.properties.items(),
ContainsAll({'property': 'default'}.items()))
self.assertEqual(sorted_ops, sorted(node1.operations.keys()))
self.assertEqual(sorted_ops, sorted(node2.operations.keys()))
self.assertIs(relationship, node1.get_relationship('node2'))
self.assertIn('node_', instance1.id)
self.assertIn('node2_', instance2.id)
self.assertEqual('node', instance1.node_id)
self.assertEqual('node2', instance2.node_id)
self.assertIs(node1, instance1.node)
self.assertIs(node2, instance2.node)
self.assertEqual(node2.id, relationship.target_id)
self.assertTrue(relationship.is_derived_from(
"cloudify.relationships.contained_in"
))
self.assertEqual(node2, relationship.target_node)
self.assertEqual(sorted_ops,
sorted(relationship.source_operations.keys()))
self.assertEqual(sorted_ops,
sorted(relationship.target_operations.keys()))
self.assertEqual(instance2.id, relationship_instance.target_id)
self.assertEqual(instance2,
relationship_instance.target_node_instance)
self.assertIs(relationship, relationship_instance.relationship)
self._execute_workflow(blueprint_model)
def test_operation_capabilities(self):
def the_workflow(ctx, **_):
instance = _instance(ctx, 'node')
instance2 = _instance(ctx, 'node2')
instance2.execute_operation('test.op0').get()
instance.execute_operation('test.op1').get()
def op0(ctx, **_):
ctx.instance.runtime_properties['key'] = 'value'
def op1(ctx, **_):
caps = ctx.capabilities.get_all()
self.assertEqual(1, len(caps))
key, value = next(caps.iteritems())
self.assertIn('node2_', key)
self.assertEqual(value, {'key': 'value'})
self._execute_workflow(the_workflow, operation_methods=[op0, op1])
def test_operation_runtime_properties(self):
def runtime_properties(ctx, **_):
instance = _instance(ctx, 'node')
instance.execute_operation('test.op0').get()
instance.execute_operation('test.op1').get()
def op0(ctx, **_):
ctx.instance.runtime_properties['key'] = 'value'
def op1(ctx, **_):
self.assertEqual('value', ctx.instance.runtime_properties['key'])
self._execute_workflow(runtime_properties, operation_methods=[
op0, op1])
def test_operation_related_properties(self):
def the_workflow(ctx, **_):
instance = _instance(ctx, 'node')
relationship = next(instance.relationships)
relationship.execute_source_operation('test.op0')
relationship.execute_target_operation('test.op0')
def op(ctx, **_):
if 'node2_' in ctx.target.instance.id:
self.assertThat(ctx.target.node.properties.items(),
ContainsAll({'property': 'default'}.items()))
elif 'node_' in ctx.target.instance.id:
self.assertThat(ctx.target.node.properties.items(),
ContainsAll({'property': 'value'}.items()))
else:
self.fail('unexpected: {0}'.format(ctx.target.instance.id))
self._execute_workflow(the_workflow, operation_methods=[op])
def test_operation_related_runtime_properties(self):
def related_runtime_properties(ctx, **_):
instance = _instance(ctx, 'node')
instance2 = _instance(ctx, 'node2')
relationship = next(instance.relationships)
instance.execute_operation('test.op0',
kwargs={'value': 'instance1'}).get()
instance2.execute_operation('test.op0',
kwargs={'value': 'instance2'}).get()
relationship.execute_source_operation(
'test.op1', kwargs={
'source': 'instance1',
'target': 'instance2'
}).get()
relationship.execute_target_operation(
'test.op1', kwargs={
'source': 'instance1',
'target': 'instance2'
}).get()
def op0(ctx, value, **_):
ctx.instance.runtime_properties['key'] = value
def op1(ctx, source, target, **_):
self.assertEqual(source,
ctx.source.instance.runtime_properties['key'])
self.assertEqual(target,
ctx.target.instance.runtime_properties['key'])
self._execute_workflow(related_runtime_properties, operation_methods=[
op0, op1])
def test_operation_ctx_properties_and_methods(self):
def flow(ctx, **_):
instance = _instance(ctx, 'node')
instance.set_state('state').get()
instance.execute_operation('test.op0').get()
target_path = ctx.internal.handler.download_deployment_resource(
'resource')
with open(target_path) as f:
self.assertEqual('content', f.read())
def ctx_properties(ctx, **_):
self.assertEqual('node', ctx.node.name)
self.assertIn('node_', ctx.instance.id)
self.assertEqual(self._testMethodName, ctx.blueprint.id)
self.assertEqual(self._testMethodName, ctx.deployment.id)
self.assertIsNotNone(ctx.execution_id)
self.assertEqual('workflow0', ctx.workflow_id)
self.assertIsNotNone(ctx.task_id)
self.assertEqual('{0}.{1}'.format(self._testMethodName,
'ctx_properties'),
ctx.task_name)
self.assertIsNone(ctx.task_target)
self.assertEqual('p', ctx.plugin)
self.assertEqual('p', ctx.plugin.name)
self.assertEqual(PLUGIN_PACKAGE_NAME, ctx.plugin.package_name)
self.assertEqual(PLUGIN_PACKAGE_VERSION,
ctx.plugin.package_version)
self.assertEqual(sys.prefix, ctx.plugin.prefix)
self.assertEqual('test.op0', ctx.operation.name)
self.assertThat(ctx.node.properties.items(),
ContainsAll({'property': 'value'}.items()))
self.assertEqual('content', ctx.get_resource('resource'))
target_path = ctx.download_resource('resource')
with open(target_path) as f:
self.assertEqual('content', f.read())
expected_target_path = os.path.join(self.work_dir, 'resource')
target_path = ctx.download_resource(
'resource', target_path=expected_target_path)
self.assertEqual(target_path, expected_target_path)
with open(target_path) as f:
self.assertEqual('content', f.read())
self._execute_workflow(flow, operation_methods=[ctx_properties])
def test_ctx_host_ip(self):
def op0(ctx, **_):
ctx.instance.runtime_properties['ip'] = '2.2.2.2'
def op1(ctx, expected_ip, **_):
self.assertEqual(ctx.instance.host_ip, expected_ip)
def flow(ctx, **_):
instance1 = _instance(ctx, 'node')
instance4 = _instance(ctx, 'node4')
# these are hosts
# in this one will will set a runtime_property of ip
instance2 = _instance(ctx, 'node2')
# this one has ip as static properties
instance3 = _instance(ctx, 'node3')
instance2.execute_operation('test.op0').get()
instance1.execute_operation('test.op1', kwargs={
'expected_ip': '2.2.2.2'
}).get()
instance2.execute_operation('test.op1', kwargs={
'expected_ip': '2.2.2.2'
}).get()
instance3.execute_operation('test.op1', kwargs={
'expected_ip': '1.1.1.1'
}).get()
instance4.execute_operation('test.op1', kwargs={
'expected_ip': '1.1.1.1'
}).get()
self._execute_workflow(flow, operation_methods=[op0, op1])
def test_operation_bootstrap_context(self):
bootstrap_context = {'stub': 'prop'}
provider_context = {
'cloudify': bootstrap_context
}
def contexts(ctx, **_):
self.assertEqual(bootstrap_context,
ctx.bootstrap_context._bootstrap_context)
self.assertEqual(provider_context, ctx.provider_context)
self._execute_workflow(operation_methods=[contexts],
provider_context=provider_context)
def test_workflow_graph_mode(self):
def flow(ctx, **_):
instance = _instance(ctx, 'node')
graph = ctx.graph_mode()
sequence = graph.sequence()
sequence.add(instance.execute_operation('test.op2'))
sequence.add(instance.execute_operation('test.op1'))
sequence.add(instance.execute_operation('test.op0'))
graph.execute()
def op0(ctx, **_):
invocation = ctx.instance.runtime_properties['invocation']
self.assertEqual(2, invocation)
def op1(ctx, **_):
invocation = ctx.instance.runtime_properties['invocation']
self.assertEqual(1, invocation)
ctx.instance.runtime_properties['invocation'] += 1
def op2(ctx, **_):
invocation = ctx.instance.runtime_properties.get('invocation')
self.assertIsNone(invocation)
ctx.instance.runtime_properties['invocation'] = 1
self._execute_workflow(flow, operation_methods=[op0, op1, op2])
def test_node_instance_version_conflict(self):
def flow(ctx, **_):
pass
# stub to get a properly initialized storage instance
self._execute_workflow(flow)
storage = self.env.storage
instance = storage.get_node_instances()[0]
storage.update_node_instance(
instance.id,
runtime_properties={},
state=instance.state,
version=instance.version)
instance_id = instance.id
exception = Queue.Queue()
done = Queue.Queue()
def proceed():
try:
done.get_nowait()
return False
except Queue.Empty:
return True
def publisher(key, value):
def func():
timeout = time.time() + 5
while time.time() < timeout and proceed():
p_instance = storage.get_node_instance(instance_id)
p_instance.runtime_properties[key] = value
try:
storage.update_node_instance(
p_instance.id,
runtime_properties=p_instance.runtime_properties,
state=p_instance.state,
version=p_instance.version)
except local.StorageConflictError, e:
exception.put(e)
done.put(True)
return
return func
publisher1 = publisher('publisher1', 'value1')
publisher2 = publisher('publisher2', 'value2')
publisher1_thread = threading.Thread(target=publisher1)
publisher2_thread = threading.Thread(target=publisher2)
publisher1_thread.daemon = True
publisher2_thread.daemon = True
publisher1_thread.start()
publisher2_thread.start()
publisher1_thread.join()
publisher2_thread.join()
conflict_error = exception.get_nowait()
self.assertIn('does not match current', conflict_error.message)
def test_get_node(self):
def flow(ctx, **_):
pass
# stub to get a properly initialized storage instance
self._execute_workflow(flow)
storage = self.env.storage
node = storage.get_node('node')
self.assertEqual(node.properties['property'], 'value')
def test_get_node_missing(self):
def flow(ctx, **_):
pass
# stub to get a properly initialized storage instance
self._execute_workflow(flow)
storage = self.env.storage
self.assertRaises(RuntimeError,
storage.get_node, 'node_that_does_not_exist')
def test_execute_non_existent_operation(self):
def flow(ctx, **_):
instance = _instance(ctx, 'node')
instance.execute_operation('non_existent')
with testtools.testcase.ExpectedException(RuntimeError,
".*does not exist.*"):
self._execute_workflow(flow)
def test_operation_retry_configuration(self):
self._test_retry_configuration_impl(
global_retries=100,
global_retry_interval=100,
operation_retries=1,
operation_retry_interval=1
)
@nose.tools.istest
class LocalWorkflowTestInMemoryStorage(LocalWorkflowTest):
def setUp(self):
super(LocalWorkflowTestInMemoryStorage, self).setUp()
self.storage_cls = local.InMemoryStorage
@nose.tools.istest
class LocalWorkflowTestFileStorage(LocalWorkflowTest):
def setUp(self):
super(LocalWorkflowTestFileStorage, self).setUp()
self.storage_cls = local.FileStorage
self.storage_kwargs = {'storage_dir': self.storage_dir}
@nose.tools.istest
class FileStorageTest(BaseWorkflowTest):
def setUp(self):
super(FileStorageTest, self).setUp()
self.storage_cls = local.FileStorage
self.storage_kwargs = {'storage_dir': self.storage_dir}
def test_storage_dir(self):
def stub_workflow(ctx, **_):
pass
self._execute_workflow(stub_workflow, name=self._testMethodName)
self.assertTrue(os.path.isdir(
os.path.join(self.storage_dir, self._testMethodName)))
def test_persistency(self):
bootstrap_context = {'stub': 'prop'}
provider_context = {'cloudify': bootstrap_context}
def persistency_1(ctx, **_):
instance = _instance(ctx, 'node')
instance.set_state('persistency')
instance.execute_operation('test.op0').get()
def persistency_2(ctx, **_):
instance = _instance(ctx, 'node')
self.assertEqual('persistency', instance.get_state().get())
instance.execute_operation('test.op0').get()
def op(ctx, **_):
self.assertEqual('new_input', ctx.node.properties['from_input'])
self.assertEqual('content', ctx.get_resource('resource'))
self.assertEqual(bootstrap_context,
ctx.bootstrap_context._bootstrap_context)
self.assertEqual(provider_context, ctx.provider_context)
self._setup_env(workflow_methods=[persistency_1, persistency_2],
operation_methods=[op],
inputs={'from_input': 'new_input'},
provider_context=provider_context)
self._execute_workflow(workflow_name='workflow0',
setup_env=False, load_env=True)
self._execute_workflow(workflow_name='workflow1',
setup_env=False, load_env=True)
def test_path_agnostic_persistency(self):
# tests file storage isn't dependent on the blueprint directory
# for resources (but stores its own copies instead)
def persistency(ctx, **_):
instance = _instance(ctx, 'node')
instance.execute_operation('test.op0').get()
def op(ctx, **_):
self.assertEqual('new_input', ctx.node.properties['from_input'])
self.assertEqual('content', ctx.get_resource('resource'))
self._setup_env(workflow_methods=[persistency],
operation_methods=[op],
inputs={'from_input': 'new_input'})
shutil.rmtree(self.blueprint_dir)
self._execute_workflow(workflow_name='workflow0',
setup_env=False, load_env=True)
def test_local_init_in_blueprint_dir(self):
self.blueprint_dir = self.storage_dir
def flow(ctx, **_):
pass
self._setup_env(workflow_methods=[flow])
def test_workdir(self):
content = 'CONTENT'
def op0(ctx, **_):
self.assertEquals(
ctx.plugin.workdir,
os.path.join(self.storage_dir, self._testMethodName,
'workdir', 'plugins', 'p'))
work_file = os.path.join(ctx.plugin.workdir, 'work_file')
self.assertFalse(os.path.exists(work_file))
with open(work_file, 'w') as f:
f.write(content)
def op1(ctx, **_):
work_file = os.path.join(ctx.plugin.workdir, 'work_file')
with open(work_file) as f:
print work_file
self.assertEqual(content, f.read())
def workflow1(ctx, **_):
instance = _instance(ctx, 'node')
instance.execute_operation('test.op0').get()
def workflow2(ctx, **_):
instance = _instance(ctx, 'node')
instance.execute_operation('test.op1').get()
self._setup_env(workflow_methods=[workflow1, workflow2],
operation_methods=[op0, op1])
self._execute_workflow(workflow_name='workflow0',
setup_env=False, load_env=True)
self._execute_workflow(workflow_name='workflow1',
setup_env=False, load_env=True)
@nose.tools.istest
class LocalWorkflowEnvironmentTest(BaseWorkflowTest):
def setUp(self):
super(LocalWorkflowEnvironmentTest, self).setUp()
self.storage_cls = local.InMemoryStorage
def test_inputs(self):
def op(ctx, **_):
self.assertEqual('new_input', ctx.node.properties['from_input'])
self._execute_workflow(operation_methods=[op],
inputs={'from_input': 'new_input'})
def test_outputs(self):
def op(ctx, **_):
pass
self._execute_workflow(operation_methods=[op],
use_existing_env=False)
self.assertEqual(self.env.outputs(),
{'some_output': None, 'static': 'value'})
def op(ctx, **_):
ctx.instance.runtime_properties['some_output'] = 'value'
self._execute_workflow(operation_methods=[op],
use_existing_env=False)
self.assertEqual(self.env.outputs(),
{'some_output': 'value', 'static': 'value'})
def test_workflow_return_value(self):
def flow(ctx, **_):
return 1
self.assertEqual(1, self._execute_workflow(flow))
def test_blueprint_imports(self):
def flow(ctx, **_):
node = ctx.get_node('node5')
self.assertEqual('imported_type', node.type)
self._execute_workflow(flow)
def test_workflow_parameters(self):
normal_schema = {
'from_invocation': {},
'from_default': {
'default': 'from_default_default'
},
'invocation_overrides_default': {
'default': 'invocation_overrides_default_default'
}
}
normal_execute_kwargs = {
'parameters': {
'from_invocation': 'from_invocation',
'invocation_overrides_default':
'invocation_overrides_default_override'
}
}
def normal_flow(ctx,
from_invocation,
from_default,
invocation_overrides_default,
**_):
self.assertEqual(from_invocation, 'from_invocation')
self.assertEqual(from_default, 'from_default_default')
self.assertEqual(invocation_overrides_default,
'invocation_overrides_default_override')
self._execute_workflow(normal_flow,
execute_kwargs=normal_execute_kwargs,
workflow_parameters_schema=normal_schema,
use_existing_env=False)
# now test missing
missing_schema = normal_schema.copy()
missing_schema['missing_parameter'] = {}
missing_flow = normal_flow
missing_execute_kwargs = normal_execute_kwargs
self.assertRaises(ValueError,
self._execute_workflow,
missing_flow,
execute_kwargs=missing_execute_kwargs,
workflow_parameters_schema=missing_schema,
use_existing_env=False)
# now test invalid custom parameters
invalid_custom_schema = normal_schema
invalid_custom_flow = normal_flow
invalid_custom_kwargs = normal_execute_kwargs.copy()
invalid_custom_kwargs['parameters']['custom_parameter'] = 'custom'
self.assertRaises(ValueError,
self._execute_workflow,
invalid_custom_flow,
execute_kwargs=invalid_custom_kwargs,
workflow_parameters_schema=invalid_custom_schema,
use_existing_env=False)
# now test valid custom parameters
def valid_custom_flow(ctx,
from_invocation,
from_default,
invocation_overrides_default,
custom_parameter,
**_):
self.assertEqual(from_invocation, 'from_invocation')
self.assertEqual(from_default, 'from_default_default')
self.assertEqual(invocation_overrides_default,
'invocation_overrides_default_override')
self.assertEqual(custom_parameter, 'custom')
valid_custom_schema = normal_schema
valid_custom_kwargs = normal_execute_kwargs.copy()
valid_custom_kwargs['parameters']['custom_parameter'] = 'custom'
valid_custom_kwargs['allow_custom_parameters'] = True
self._execute_workflow(
valid_custom_flow,
execute_kwargs=valid_custom_kwargs,
workflow_parameters_schema=valid_custom_schema,
use_existing_env=False)
def test_workflow_parameters_types(self):
workflow = {
'parameters': {
'optional1': {'default': 7},
'optional2': {'default': 'bla'},
'optional_int1': {
'default': 1,
'type': 'integer'
},
'optional_int2': {
'default': 2,
'type': 'integer'
},
'optional_float1': {
'default': 1.5,
'type': 'float'
},
'optional_float2': {
'default': 2,
'type': 'float'
},
'optional_str1': {
'default': 'bla',
'type': 'string'
},
'optional_str2': {
'default': 'blabla',
'type': 'string'
},
'optional_bool1': {
'default': 'False',
'type': 'boolean'
},
'optional_bool2': {
'default': 'True',
'type': 'boolean'
},
'mandatory1': {},
'mandatory2': {},
'mandatory_int1': {'type': 'integer'},
'mandatory_int2': {'type': 'integer'},
'mandatory_float1': {'type': 'float'},
'mandatory_float2': {'type': 'float'},
'mandatory_str1': {'type': 'string'},
'mandatory_str2': {'type': 'string'},
'mandatory_bool1': {'type': 'boolean'},
'mandatory_bool2': {'type': 'boolean'}
}
}
self._test_workflow_mandatory_parameters_types(workflow)
self._test_workflow_optional_parameters_types(workflow)
self._test_workflow_custom_parameters_types(workflow)
def _test_workflow_mandatory_parameters_types(self, workflow):
parameters = {
'mandatory1': 'bla',
'mandatory2': 6,
'mandatory_int1': 1,
'mandatory_int2': 'bla',
'mandatory_float1': 3.5,
'mandatory_float2': True,
'mandatory_str1': 'bla',
'mandatory_str2': 7,
'mandatory_bool1': False,
'mandatory_bool2': 'boolean_that_is_not_string'
}
try:
local._merge_and_validate_execution_parameters(
workflow, 'workflow', parameters)
except ValueError, e:
# check which parameters are mentioned in the error message
self.assertIn('mandatory_int2', str(e))
self.assertIn('mandatory_float2', str(e))
self.assertIn('mandatory_str2', str(e))
self.assertIn('mandatory_bool2', str(e))
self.assertNotIn('mandatory1', str(e))
self.assertNotIn('mandatory2', str(e))
self.assertNotIn('mandatory_int1', str(e))
self.assertNotIn('mandatory_float1', str(e))
self.assertNotIn('mandatory_str1', str(e))
self.assertNotIn('mandatory_bool1', str(e))
else:
self.fail()
def _test_workflow_optional_parameters_types(self, workflow):
parameters = {
'mandatory1': False,
'mandatory2': [],
'mandatory_int1': '-7',
'mandatory_int2': 3.5,
'mandatory_float1': '5.0',
'mandatory_float2': [],
'mandatory_str1': u'bla',
'mandatory_str2': ['bla'],
'mandatory_bool1': 'tRUe',
'mandatory_bool2': 0,
'optional1': 'bla',
'optional2': 6,
'optional_int1': 1,
'optional_int2': 'bla',
'optional_float1': 3.5,
'optional_float2': True,
'optional_str1': 'bla',
'optional_str2': 7,
'optional_bool1': False,
'optional_bool2': 'bla'
}
try:
local._merge_and_validate_execution_parameters(
workflow, 'workflow', parameters)
except ValueError, e:
# check which parameters are mentioned in the error message
self.assertIn('mandatory_int2', str(e))
self.assertIn('mandatory_float2', str(e))
self.assertIn('mandatory_str2', str(e))
self.assertIn('mandatory_bool2', str(e))
self.assertNotIn('mandatory1', str(e))
self.assertNotIn('mandatory2', str(e))
self.assertNotIn('mandatory_int1', str(e))
self.assertNotIn('mandatory_float1', str(e))
self.assertNotIn('mandatory_str1', str(e))
self.assertNotIn('mandatory_bool1', str(e))
self.assertIn('optional_int2', str(e))
self.assertIn('optional_float2', str(e))
self.assertIn('optional_str2', str(e))
self.assertIn('optional_bool2', str(e))
self.assertNotIn('optional1', str(e))
self.assertNotIn('optional2', str(e))
self.assertNotIn('optional_int1', str(e))
self.assertNotIn('optional_float1', str(e))
self.assertNotIn('optional_str1', str(e))
self.assertNotIn('optional_bool1', str(e))
else:
self.fail()
def _test_workflow_custom_parameters_types(self, workflow):
parameters = {
'mandatory1': False,
'mandatory2': [],
'mandatory_int1': -7,
'mandatory_int2': 3,
'mandatory_float1': 5.0,
'mandatory_float2': 0.0,
'mandatory_str1': u'bla',
'mandatory_str2': 'bla',
'mandatory_bool1': True,
'mandatory_bool2': False,
'optional1': 'bla',
'optional2': 6,
'optional_int1': 1,
'optional_int2': 'bla',
'optional_float1': 3.5,
'optional_str1': 'bla',
'optional_bool1': 'falSE',
'custom1': 8,
'custom2': 3.2,
'custom3': 'bla',
'custom4': True
}
try:
local._merge_and_validate_execution_parameters(
workflow, 'workflow', parameters, True)
except ValueError, e:
# check which parameters are mentioned in the error message
self.assertNotIn('mandatory_int2', str(e))
self.assertNotIn('mandatory_float2', str(e))
self.assertNotIn('mandatory_str2', str(e))
self.assertNotIn('mandatory_bool2', str(e))
self.assertNotIn('mandatory1', str(e))
self.assertNotIn('mandatory2', str(e))
self.assertNotIn('mandatory_int1', str(e))
self.assertNotIn('mandatory_float1', str(e))
self.assertNotIn('mandatory_str1', str(e))
self.assertNotIn('mandatory_bool1', str(e))
self.assertIn('optional_int2', str(e))
self.assertNotIn('optional_float2', str(e))
self.assertNotIn('optional_str2', str(e))
self.assertNotIn('optional_bool2', str(e))
self.assertNotIn('optional1', str(e))
self.assertNotIn('optional2', str(e))
self.assertNotIn('optional_int1', str(e))
self.assertNotIn('optional_float1', str(e))
self.assertNotIn('optional_str1', str(e))
self.assertNotIn('optional_bool1', str(e))
self.assertNotIn('custom1', str(e))
self.assertNotIn('custom2', str(e))
self.assertNotIn('custom3', str(e))
self.assertNotIn('custom4', str(e))
else:
self.fail()
def test_global_retry_configuration(self):
self._test_retry_configuration_impl(
global_retries=1,
global_retry_interval=1,
operation_retries=None,
operation_retry_interval=None
)
def test_local_task_thread_pool_size(self):
default_size = workflow_context.DEFAULT_LOCAL_TASK_THREAD_POOL_SIZE
def flow(ctx, **_):
task_processor = ctx.internal.local_tasks_processor
self.assertEqual(len(task_processor._local_task_processing_pool),
default_size)
self._execute_workflow(
flow,
use_existing_env=False)
def flow(ctx, **_):
task_processor = ctx.internal.local_tasks_processor
self.assertEqual(len(task_processor._local_task_processing_pool),
default_size + 1)
self._execute_workflow(
flow,
execute_kwargs={'task_thread_pool_size': default_size + 1},
use_existing_env=False)
def test_no_operation_module(self):
self._no_module_or_attribute_test(
is_missing_module=True,
test_type='operation')
def test_no_operation_module_ignored(self):
def op1(ctx, **_):
pass
self._execute_workflow(operation_methods=[op1],
ignored_modules=['ignored_module'])
def test_no_operation_attribute(self):
self._no_module_or_attribute_test(
is_missing_module=False,
test_type='operation')
def test_no_source_operation_module(self):
self._no_module_or_attribute_test(
is_missing_module=True,
test_type='source')
def test_no_source_operation_attribute(self):
self._no_module_or_attribute_test(
is_missing_module=False,
test_type='source')
def test_no_target_operation_module(self):
self._no_module_or_attribute_test(
is_missing_module=True,
test_type='target')
def test_no_target_operation_attribute(self):
self._no_module_or_attribute_test(
is_missing_module=False,
test_type='target')
def test_no_workflow_module(self):
self._no_module_or_attribute_test(
is_missing_module=True,
test_type='workflow')
def test_no_workflow_attribute(self):
self._no_module_or_attribute_test(
is_missing_module=False,
test_type='workflow')
def test_no_workflow(self):
try:
self._execute_workflow(workflow_name='does_not_exist')
self.fail()
except ValueError, e:
self.assertIn("['workflow0']", e.message)
def test_getting_contained_elements(self):
def check_subgraph(ctx, **_):
node_host = _instance(ctx, 'node_host')
node = _instance(ctx, 'node')
node2 = _instance(ctx, 'node2')
node3 = _instance(ctx, 'node3')
node4 = _instance(ctx, 'node4')
full_contained_subgraph = set([
node_host,
node,
node2,
node3,
node4
])
self.assertEqual(
full_contained_subgraph,
node_host.get_contained_subgraph()
)
half_subgraph = set([
node,
node2
])
self.assertEqual(
half_subgraph,
node2.get_contained_subgraph()
)
host_contained_instances = set([
node2,
node3
])
self.assertEqual(
host_contained_instances,
set(node_host.contained_instances)
)
self.assertEqual(
[],
node.contained_instances
)
self._execute_workflow(
check_subgraph,
create_blueprint_func=self._blueprint_3
)
def _no_module_or_attribute_test(self, is_missing_module, test_type):
try:
self._execute_workflow(
create_blueprint_func=self._blueprint_2(is_missing_module,
test_type),
workflow_name='workflow')
self.fail()
except (ImportError, AttributeError, NonRecoverableError) as e:
if is_missing_module:
self.assertIn('No module named zzz', e.message)
if test_type != 'workflow':
self.assertIn(test_type, e.message)
self.assertTrue(isinstance(e, ImportError))
else:
if test_type == 'workflow':
thing1 = 'function'
thing2 = ' named'
else:
thing1 = 'attribute'
thing2 = ''
self.assertIn("has no {0}{1} 'does_not_exist'".format(thing1,
thing2),
e.message)
if test_type != 'workflow':
self.assertIn(test_type, e.message)
self.assertTrue(isinstance(e, AttributeError))
def _blueprint_2(self,
is_missing_module,
test_type):
def func(*_):
module_name = 'zzz' if is_missing_module else self._testMethodName
interfaces = {
'test': {
'op': 'p.{0}.{1}'.format(module_name, 'does_not_exist')
}
}
blueprint = {
'tosca_definitions_version': 'cloudify_dsl_1_0',
'plugins': {
'p': {
'executor': 'central_deployment_agent',
'install': False
}
},
'node_types': {
'type': {}
},
'relationships': {
'cloudify.relationships.contained_in': {}
},
'node_templates': {
'node2': {
'type': 'type',
},
'node': {
'type': 'type',
'relationships': [{
'target': 'node2',
'type': 'cloudify.relationships.contained_in',
}]
},
},
'workflows': {
'workflow': 'p.{0}.{1}'.format(module_name,
'does_not_exist')
}
}
node = blueprint['node_templates']['node']
relationship = node['relationships'][0]
if test_type == 'operation':
node['interfaces'] = interfaces
elif test_type == 'source':
relationship['source_interfaces'] = interfaces
elif test_type == 'target':
relationship['target_interfaces'] = interfaces
elif test_type == 'workflow':
pass
else:
self.fail('unsupported: {}'.format(test_type))
return blueprint
return func
def _blueprint_3(self, workflow_methods, _,
workflow_parameters_schema, __, *args):
workflows = dict((
('workflow{0}'.format(index), {
'mapping': 'p.{0}.{1}'.format(self._testMethodName,
w_method.__name__),
'parameters': workflow_parameters_schema or {}
}) for index, w_method in enumerate(workflow_methods)
))
blueprint = {
'tosca_definitions_version': 'cloudify_dsl_1_0',
'plugins': {
'p': {
'executor': 'central_deployment_agent',
'install': False
}
},
'node_types': {
'type': {},
},
'relationships': {
'cloudify.relationships.contained_in': {}
},
'node_templates': {
'node_host': {
'type': 'type'
},
'node4': {
'type': 'type',
'relationships': [{
'target': 'node3',
'type': 'cloudify.relationships.contained_in',
}]
},
'node3': {
'type': 'type',
'relationships': [{
'target': 'node_host',
'type': 'cloudify.relationships.contained_in',
}]
},
'node2': {
'type': 'type',
'relationships': [{
'target': 'node_host',
'type': 'cloudify.relationships.contained_in',
}]
},
'node': {
'type': 'type',
'relationships': [{
'target': 'node2',
'type': 'cloudify.relationships.contained_in',
}]
},
'outside_node': {
'type': 'type'
}
},
'workflows': workflows
}
return blueprint
def _instance(ctx, node_name):
return next(ctx.get_node(node_name).instances)
| |
import string
import random
import math
import itertools
import re
import datetime
from django.utils import timezone
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.db import models
from django.db import IntegrityError
from django.db.models import ObjectDoesNotExist
from django.template.defaultfilters import slugify
from django.utils.translation import ugettext_lazy as _
import django.utils.simplejson as json
from jsonfield import JSONField
from django_extensions.db.models import TimeStampedModel
from organization.models import Organization
from history.models import History
# from protocols.helpers import settify, unify
# from protocols.settify import settify
# from protocols.utils import VERB_FORM_DICT
from protocols.utils import MACHINE_VERBS, COMPONENT_VERBS, THERMOCYCLER_VERBS, MANUAL_LAYER, MANUAL_VERBS, settify, labeler, get_timeunit, eval_time, ProtocolChangeLog, DataDiffer
COMPONENT_KEY = "components"
#MACHINE_VERBS = ['heat', 'chill', 'centrifuge', 'agitate', 'collect', 'cook', 'cool', 'electrophorese', 'incubate', 'shake', 'vortex']
REFERENCE_TYPES = [('pmid',"PMID"), ('doi',"DOI")]
class Protocol(TimeStampedModel):
# STATUS_DRAFT = "draft"
# STATUS_PUBLISHED = "published"
# STATUS = (
# (STATUS_DRAFT, _(STATUS_DRAFT)),
# (STATUS_PUBLISHED, _(STATUS_PUBLISHED)),
# )
parent = models.ForeignKey("self", blank=True, null=True)
author = models.ForeignKey(User, blank=True, null=True)
owner = models.ForeignKey(Organization)
name = models.CharField(_("Name"), max_length=255, unique=True)
slug = models.SlugField(_("Slug"), blank=True, null=True, max_length=255)
duration_in_seconds = models.IntegerField(_("old Duration in seconds"), blank=True, null=True)
duration = models.CharField(_("Duration in seconds"), blank=True, null=True, max_length=30)
raw = models.TextField(blank=True, null=True)
data = JSONField(blank=True, null=True)
description = models.TextField(_("Description"), blank=True, null=True)
note = models.TextField(_("Notes"), blank=True, null=True)
# protocol_input = models.CharField(_("Input"), max_length=255, unique=True)
# protocol_output = models.CharField(_("Output"), max_length=255, unique=True)
published = models.BooleanField(_("Published"), default=False)
public = models.BooleanField(_("Public"), default=False)
# status = models.CharField(_("Status"), max_length=30, default=STATUS_DRAFT, choices=STATUS)
# version = models.CharField(_("Version"), max_length=100, blank=True, null=True)
# reference fields -> MOVING TO NEW MODEL
# url = models.URLField(_("URL"), max_length=255, null=True, blank=True)
# pmid = models.CharField(_("PMID"), max_length=255, null=True, blank=True)
# doi_id = models.CharField(_("DOI"), max_length=255, null=True, blank=True)
# document_id = models.CharField(_("Document ID"), max_length=255, null=True, blank=True)
def __init__(self, *args, **kwargs):
self.data = {}
super(Protocol, self).__init__(*args, **kwargs)
self.rebuild_steps()
def __unicode__(self):
if self.name:
return self.name
else:
return None
def clone(self, name=None, owner=None, author=None):
'''Turns the current instance into a clone of the previous.
This instance still need to be saved to be committed.'''
# CAPTURE PK VALUE, SET PARENT TO PK
parentid = self.pk
# SET PK TO None
self.pk = None
if name:
self.name = self.generate_name(name)
else:
# self.name = self.generate_name(self.owner.name + " " + self.name)
self.name = self.generate_name(self.name)
self.slug = self.generate_slug()
self.published = False
self.private = True
self.created = timezone.now()
self.modified = timezone.now()
# NEED TO SET THE ORGANIZATION
if owner:
self.owner = owner
if author:
self.author = author
self.parent = Protocol.objects.get(pk=parentid)
def save(self, *args, **kwargs):
if not self.name:
if self.data['Name']:
self.name = self.data['Name']
self.update_duration()
if 'editor' in kwargs:
user = kwargs.pop('editor')
else:
user = self.author
# DIFF DATA
# print 'determine old'
if not self.pk and not self.parent_id: # protocol is new
old_state = None
elif not self.pk and self.slug: # protocol is cloned
old_state = Protocol.objects.get(pk = self.parent_id)
else:
old_state = Protocol.objects.get(pk = self.pk) # JUST A PROTOCOL
# print old_state
super(Protocol, self).save(*args, **kwargs) # Method may need to be changed to handle giving it a new name.
# print 'triggered first save'
new_slug = self.generate_slug()
if not new_slug == self.slug: # Triggered when its a clone method
self.slug = new_slug
super(Protocol, self).save(*args, **kwargs) # Method may need to be changed to handle giving it a new name.
# print 'triggered second save'
# print 'determine new'
new_state = self
diff = None
diff = ProtocolChangeLog(old_state, new_state)
# LOG THIS HISTORY OBJECT HERE IF THERE IS A DIFF, CURRENTLY MAKE ASSUMPTION THAT THE AUTHOR IS MAKING THE EDITS
if diff.hdf:
History.objects.create(org=self.owner, user=user, protocol=self, htype="EDIT", data=diff.hdf)
def user_has_access(self, user):
if self.published and self.public: # IF IT IS A PUBLIC PUBLISHED PROTOCOL THEN YES
# print "PUBLISHED-PUBLIC"
return True
pk = getattr(user, "pk", None)
if not pk: # NO ANONYMOUS USER ACCESS EXCEPT FOR PUBLIC PROTOCOLS?
return False
if self.author:
if pk == self.author.pk: # IF THEY ARE THE AUTHOR THEN YES
return True
if self.published:
return bool( user.organization_set.filter( pk=self.owner.pk ) ) # IF IT IS PUBLISHED ARE THEY ARE THEY A MEMBER OF THE ORG THEN YES
return False
##########
# URLs
def get_absolute_url(self):
return reverse("protocol_detail", kwargs={'owner_slug':self.owner.slug, 'protocol_slug': self.slug})
def protocol_update_url(self):
return reverse("protocol_update", kwargs={'protocol_slug':self.slug, 'owner_slug':self.owner.slug})
def step_create_url(self):
return reverse("step_create", kwargs={'protocol_slug':self.slug, 'owner_slug':self.owner.slug})
def protocol_publish_url(self):
return reverse("protocol_publish", kwargs={'protocol_slug':self.slug, 'owner_slug':self.owner.slug})
def protocol_public_url(self):
return reverse("protocol_public", kwargs={'protocol_slug':self.slug, 'owner_slug':self.owner.slug})
def protocol_duplicate_url(self):
return reverse("protocol_duplicate", kwargs={'protocol_slug':self.slug, 'owner_slug':self.owner.slug})
def protocol_clone_url(self):
return reverse("clone_layout_single_view", kwargs={'protocol_a_slug':self.slug})
def protocol_outline_url(self):
return reverse("layout_sinlge_view", kwargs={'protocol_a_slug':self.slug})
##########
# Generators
def as_dict(self, compressed=False):
'''
Similar to the __dict__ method but cleans up fields and properly formats the JSONField
Compressed will ignore root-level fields that are empty
'''
result = {}
tmp_dict = self.__dict__
for key, value in tmp_dict.items():
if compressed and not value:
continue
if isinstance(value, datetime.datetime):
result[key] = value.isoformat()
elif key[0] != "_":
result[key] = value
# result['data'] = dict(self.data)
# result['pk'] = self.pk
return result
# GET A LIST OF ALL THE ATTRS ON THE MODEL
# SERIALIZE THE ALLOWED ATTRS INTO A DICT
# ADD THE ID FIELD
# result['id'] = self.pk
# ADD THE DATA TO THE DICT
# RETURN
# return result
def generate_name(self, name, count=0):
if count:
new_name = "%s-%d" % (name, count)
else:
new_name = "%s" % (name)
try:
Protocol.objects.get(name=new_name)
return self.generate_name(name, count=count + 1)
except ObjectDoesNotExist:
return new_name
def generate_slug(self):
slug = slugify(self.name)
#try:
# Protocol.objects.get(slug=slug)
# return "%s-%d" % (slug, self.pk)
#except ObjectDoesNotExist:
# return slug
if self.pk:
return "%d-%s" % (self.pk, slug)
else:
return slug
def get_hash_id(self, size=6, chars=string.ascii_lowercase + string.digits):
'''Always returns a unique ID in the protocol'''
uid_list = []
uid = ''.join(random.choice(chars) for x in range(size))
for step in self.data['steps']:
if hasattr(step, 'objectid'):
if step['objectid']:
uid_list.append(step.objectid)
for action in step['actions']:
if hasattr(action, 'objectid'):
if action['objectid']:
uid_list.append(action['objectid'])
if COMPONENT_KEY in action.keys():
for reagent in action[COMPONENT_KEY]:
if 'objectid' in reagent: # hasattr doesn't work here I think because of unicode
uid_list.append(reagent['objectid'])
# print "\nUID: %s" % uid
if uid not in uid_list:
return uid
return self.get_hash_id(size, chars)
def rebuild_steps(self):
if self.data and 'steps' in self.data:
self.data['steps'] = [ Step(protocol=self, data=s) for s in self.data['steps'] ]
#self.steps_data = [ Step(protocol=self, data=s) for s in self.data['steps'] ]
else:
if not self.data:
self.data={'steps':[]}
# def add_step(self, step):
# if not step['objectid'] in [ s['objectid'] for s in self.data['steps'] ]:
# print "STEP NOT THERE, ADDING"
# #print type(step)
# print "IS STEP: %s" % isinstance(step, Step)
# self.data['steps'].append(step)
# self.rebuild_steps()
# else:
# print "ALREADY THERE"
def add_node(self, node):
'''
Every node needs to register it's self with a protocol here. If it's a step it get_steps
added to the list of steps. It's written this way to handle other types of Nodes being
added with special needs.
'''
if not node['objectid'] in self.nodes:
if isinstance(node, Step): # IF IT IS A STEP GIVE IT THIS EXTRA STEP
#print "STEP NOT THERE, ADDING"
self.data['steps'].append(node)
self.rebuild_steps()
# else:
# print "NODE NOT THERE, ADDING"
# # IN THIS CASE JUST REGISTER IS WITH THE NODE DICTIONARY
# else:
# print "ALREADY THERE"
# NEED TO ADD ACTIONS TO THE PROTOCOL
###########
# Validators
# def has_changed(self, field):
# if not self.pk:
# return False
# old_value = self.__class__._default_manager.filter(pk=self.pk).values(field).get()[field]
# return not getattr(self, field) == old_value
###########
# Properties
@property
def title(self):
return self.name
# NEED TO CREATE add AND delete METHODS FOR THE PROPERTY
@property
def steps(self):
if not self.data:
self.rebuild_steps()
# if not 'steps' in self.data:
# return []
# if not 'steps' in self.data or not self.data['steps']:
# self.rebuild_steps()
# self.steps_data = self.data['steps']
# return self.steps_data
return self.data['steps']
@property
def status(self):
if self.public:
prefix = "Public - "
else:
prefix = "Private - "
if self.published:
return prefix + "Published"
else:
return prefix + "Draft"
# NEED TO CREATE add AND delete METHODS FOR THE PROPERTY
@property
def nodes(self):
''' Returns a dictionary containing the '''
result = {}
for step in self.steps:
result[step['objectid']] = step
for action in step['actions']:
result[action['objectid']] = action
for key in ['thermocycle', 'components']:
if key in action:
for item in action[key]:
result[item['objectid']] = item
if 'machine' in action:
result[action['machine']['objectid']] = action['machine']
return result
def get_machines(self):
return [r for r in self.get_actions() if self.nodes[r].has_machine()]
def get_actions(self):
return [r[2] for r in self.get_action_tree('objectid')]
def get_steps(self):
return [r['objectid'] for r in self.steps]
def get_action_durations(self):
return [a['actions'][0]['duration'] for a in self.steps]
def get_action_verbs(self):
return [a['actions'][0]['verb'] for a in self.steps]
def get_action_names(self):
return [a['actions'][0]['name'] for a in self.steps]
def get_components(self):
return [r for r in self.get_actions() if self.nodes[r].has_components()]
def get_thermocycle(self):
return [r for r in self.get_actions() if self.nodes[r].has_thermocycler()]
def get_manual(self):
return [self.nodes[r]['objectid'] for r in self.get_actions() if self.nodes[r].has_manual()]
###########
# delete node properties:
def delete_node(self, node_id):
""" This will remove a child node form a hierarchy """
node = self.nodes[node_id]
#print node.__class__.__str__
parent = node.parent
parent.delete_child_node(node_id)
#self.save()
def delete_child_node(self, node_id):
""" Removes a Child Node with the given name from the list of nodes """
#print "%s (%s): REMOVING -> %s" % (self.__class__, self.pk, node_id)
self.data['steps'] = [ x for x in self.data['steps'] if not x['objectid'] == node_id ]
# def levels()
###########
# Methods
@property
def get_num_steps(self):
return len(self.steps)
def get_num_actions(self):
return [len(s['actions']) for s in self.steps]
def get_actions_by_step(self):
actions_by_step = []
# num_actions = self.get_num_actions()
for stepnum in range(0, self.get_num_steps):
tmp = [self.data['steps'][stepnum]['actions'][r]['verb'] for r in range(0, self.get_num_actions()[stepnum])]
actions_by_step.append(tmp)
return actions_by_step
def get_action_tree(self, display = None):
action_tree = []
for stepnum in range(0, self.get_num_steps): # traversign all steps
for actionnum in range(0, len(self.steps[stepnum]['actions'])): # traversing all actions per step
if display == 'objectid':
action_tree.append([stepnum, actionnum, self.steps[stepnum]['actions'][actionnum]['objectid']])
else:
action_tree.append([stepnum, actionnum, self.steps[stepnum]['actions'][actionnum]['verb']])
return action_tree
def update_duration_actions(self):
min_time = []
delta_time = []
for item in self.get_actions():
if self.nodes[item]['name'] =='store':
continue
action_time = self.nodes[item].get_children_times()
min_time.append(action_time[0])
if len(action_time) >3:
delta_time.append(action_time[1]-action_time[0])
min_duration = sum(min_time)
delta_duration = sum(delta_time)
if delta_duration == 0:
return str(min_duration)
else:
return str(min_duration) + '-' + str(min_duration + delta_duration)
def update_duration_steps(self):
min_time = []
delta_time = []
total = []
for step in self.steps:
value = step.update_duration()
if '-' in value:
min_time_temp = float(value[:value.index('-')])
min_time.append(min_time_temp)
max_time_temp = float(value[value.index('-')+1:])
# max_time.append(float(temp))
delta_time.append(max_time_temp - min_time_temp)
else:
min_time.append(float(value))
# max_time.append(float(value))
min_duration = sum(min_time)
delta_duration = sum(delta_time)
# if min_duration == max_duration:
if delta_duration == 0:
# self.duration = str(min_duration)
return str(min_duration)
else:
# self.duration = str(min_duration) + '-' + str(max_duration)
# print str(min_duration) + '-' + str(max_duration)
return str(min_duration) + '-' + str(min_duration + delta_duration)
def update_duration(self, debug = False):
min_time = 0
max_time = 0
for step in self.steps:
step_min_time = 0
step_max_time = 0
if debug:
print "Step: %s" % step['name']
for action in step['actions']:
if action['name'] == 'store':
continue
action_min_time = 0
action_max_time = 0
auto_update = False
if not 'duration' in action:
action['duration'] = ""
if action['verb'] in MANUAL_VERBS: # if it should be a manual action, update
print action['verb']
if 'duration' in action and 'min_time' in action['verb']:
time = action['duration'].split('-')
if time and time[0]:
action_min_time = float(time[0])
action_max_time = float(time[1])
print '\t input time before method %s-%s' %(action_min_time, action_max_time)
else:
action_min_time = eval_time(action, value = 'min_time')
action_max_time = eval_time(action, value = 'max_time')
print '\t input time after method %d' %action_min_time
# debuggin Clause:
# if action_max_time ==0:
# print action['name'], action['objectid']
print "MANUAL TRIGGERED"
auto_update = True
# Total Up Machine Time Values Here from the DICT
else:
if 'components' in action and action['verb'] in COMPONENT_VERBS: # if it should have components, update
action_min_time = float(len(action['components']) * 30 )
action_max_time = float(len(action['components']) * 60 )
print "COMPONENTS TRIGGERED"
auto_update = True
# Total Up Component Time Values Here from the DICT
if 'thermocycle' in action and action['verb'] in THERMOCYCLER_VERBS: # if it should have a thermocycle, update
min_time_temp = []
max_time_temp = []
cycles = [r['cycles'] for r in action['thermocycle']]
cycle_back_to = [r['cycle_back_to'] for r in action['thermocycle']]
for cnt, (C, B) in enumerate(zip(cycles, cycle_back_to)):
# Append times of single-phase cycles
if C and not B:
min_time_temp.append(eval_time(action['thermocycle'][cnt], value = 'min_time'))
max_time_temp.append(eval_time(action['thermocycle'][cnt], value = 'max_time'))
# Append times of multi-phased cycles
if C and B:
phases_in_cycle_min = [eval_time(r, value='min_time') for r in action['thermocycle'][int(B)-1:int(cnt)+1]]
phases_in_cycle_max = [eval_time(r, value='max_time') for r in action['thermocycle'][int(B)-1:int(cnt)+1]]
# Multiply the cycle number for multi-phased cycle:
sum_of_cycles_min = sum(phases_in_cycle_min) * C
sum_of_cycles_max = sum(phases_in_cycle_max) * C
# append repeating cycle to single cycle phases:
min_time_temp.append(sum_of_cycles_min)
max_time_temp.append(sum_of_cycles_max)
action_min_time = float(sum(min_time_temp))
action_max_time = float(sum(max_time_temp))
auto_update = True
# Total Up Machine Time Values Here from the DICT
if 'machine' in action and 'verb' in action and action['verb'] in MACHINE_VERBS: # Make sure this action is supposed to have a "machine" attribute
action_min_time = eval_time(action['machine'], value = 'min_time')
action_max_time = eval_time(action['machine'], value = 'max_time')
# Debuggin Clause
# if debug:
# if action_max_time ==0:
# print action['name'], action['objectid']
print "MACHINE TRIGGERED"
auto_update = True
# Total Up Machine Time Values Here from the DICT
if auto_update or not action['duration']: # If this is an autoupdating action or there is no previous manually entered value...
action['duration'] = "%d-%d" % ( action_min_time, action_max_time )
if debug:
print "\t\tAction Duration: %s, %s" % (action['verb'], action['duration'])
step_min_time += action_min_time
step_max_time += action_max_time
step['duration'] = "%d-%d" % ( step_min_time, step_max_time )
if debug:
print "\tStep Duration: %s" % (step['duration'])
min_time += step_min_time
max_time += step_max_time
self.duration = "%d-%d" % ( min_time, max_time)
# print self.duration
def get_item(self, objectid, item, return_default = None, **kwargs):
out = None
call = False
try:
call = self.nodes[objectid]
except KeyError:
return None
if call and item in call.keys():
out = call[item]
if item not in call.keys():
try:
out = getattr(call, item)()
except TypeError:
out = getattr(call, item)
except AttributeError:
if return_default:
out = None
else:
out = []
return out
def action_children_json(self, select = None, **kwargs):
out = []
switch = {
'components': self.get_components(),
'machine': self.get_machines(),
'manual': self.get_steps(),
'thermocycle': self.get_thermocycle()
}
selection = self.get_actions()
if select:
selection = switch[select]
for action in selection:
children = self.nodes[action].children
if children:
temp = []
for child in children:
temp.append(child['objectid'])
out.append({action: temp})
else:
out.append({action: None})
return out
def protocol_tree_json(self):
out = []
for step in self.get_steps():
step_dict={}
step_dict[step] = []
actions = [r['objectid'] for r in self.nodes[step].children]
for action in actions:
action_dict = {}
children = self.nodes[action].children
if children:
action_dict[action] = [r['objectid'] for r in self.nodes[action].children]
else:
action_dict[action] = None
step_dict[step].append(action_dict)
out.append(step_dict)
return out
def get_verbatim_text(self, numbers = False):
'''this method returns a list with the verbatim text'''
# Validate if the protocol has verbatim text for each step:
if numbers:
verbatim = ["%d. "%(cnt+1) + item for cnt, item in enumerate(self.get_verbatim_text())]
else:
verbatim = []
for step in self.steps:
if 'verbatim_text' in step:
verbatim.append( step['verbatim_text'] )
else:
verbatim.append( "" )
if len(verbatim) == len(self.steps):
return verbatim
else:
return None
class Reference(models.Model):
protocol = models.ManyToManyField(Protocol)
data = models.CharField(_("Data"), max_length=255, default="#NDF")
typ = models.CharField(_("Type"), max_length=255, choices=REFERENCE_TYPES)
################
# NODES
################
class NodeBase(dict):
"""Base class for the protocol components"""
parent_key_name = None # NAME THE PARENT OBJECT USES TO HOLD THIS OBJECT
parent_key_plural = True # WETHER OR NOT THERE ARE MULTIPLE OF THESE OBJECTS UNDER THE PARENT (string vs list)
# keylist = ['name','objectid'] # <- REQUIRED OBJECTS FOR ALL NODES
# ADD _meta CLASS TO USE SOME EXTRA DB-LIKE FUNCTIONALITY
default_attrs = ['name', 'objectid']
class Meta:
def __init__(self, node):
self.node = node
def get_all_field_names(self):
result = self.node.keys() #[x for x in self.node.keys() if x not in ['components', 'machine', 'termocycler'] ]
result.sort()
return result
def __init__(self, protocol, parent=None, data={}, **kwargs):
super(NodeBase, self).__init__(**kwargs)
if not self.parent_key_name:
self.parent_key_name = self.__class__.__name__.lower()
self.protocol = protocol
if parent:
self.parent = parent
else:
self.parent = self.protocol
data = self.clean_data(data)
self._meta = NodeBase.Meta(self)
# IT SHOULD APPEND IT'S SELF TO THE PARENT
# for item in self.keylist: # REQUIRED ATTRIBUTES
# self[item] = None
self.update_data(data)
# self.set_defaults()
def register_with_parent(self):
if self.parent_key_name in self.parent and self.parent[self.parent_key_name]: # CHECK TO SEE IF THE KEY EXISTS
if self.parent_key_plural: # FALL THROUGH IF NOT PLURAL
if self['objectid'] not in [x['objectid'] for x in self.parent[self.parent_key_name]]: # CHECK IF THIS IS ALREADY A CHILD OF THE PARENT
self.parent[self.parent_key_name].append(self) # IF NOT APPEND
return # RETURN
# elif parent[self.parent_key_name]['objectid'] == self['objectid']: # IF IT IS ALREADY THE CHILD, RETURN
# return
if self.parent_key_plural:
self.parent[self.parent_key_name] = [self] # ANY OTHER CASE, MAKE SURE THIS IS REGISTERED WITH THE PARENT
else:
self.parent[self.parent_key_name] = self # NO HARM IN RE-ASSIGNING IF IT'S ALREADY THE CHILD?
def clean_data(self, data):
# OBJECT KEY GENERATOR IF MISSING
# if not self['objectid']:
# self['objectid'] = self.protocol.get_hash_id()
if data == None:
data = {}
if not 'objectid' in data or not data['objectid']:
data['objectid'] = self.protocol.get_hash_id()
if not 'name' in data or not data['name']:
data['name'] = data['objectid']
if not 'slug' in data or not data['slug']:
data['slug'] = slugify(data['objectid'])
return data
@property
def pk(self):
return "%d-%s" % (self.protocol.pk, self['objectid'])
@property
def id(self):
return self['objectid']
@property
def slug(self):
#if not self['slug']:
# self['slug'] = slugify(self['name'])
return self['slug']
@property
def graph_label(self):
return self['name']
@property
def node_type(self):
return self.__class__.__name__
def update_data(self, data={}, **kwargs):
if data:
for key in data:
self[key] = data[key]
if not 'name' in self or not self['name']:
self['name'] = self['slug']
def __unicode__(self):
return self['slug']
@property
def title(self):
if self.parent:
return "%s - %s" % (self.parent.title, self['name'])
else:
return "%s - %s" % (self.protocol.name, self['name'])
# @property
# def parent(self):
# return self.protocol
def delete_child_node(self, node_id):
""" Removes a Child Node with the given name from the list of nodes """
print "NOT YET IMPLETMENTED FOR %s (%s): REMOVING -> %s" % (self.__class__, self['objectid'], node_id)
@property
def children(self):
print 'object does not have children'
# def update_duration(self):
# pass
class Component(NodeBase):
parent_key_name = "components"
def __init__(self, protocol, parent=None, data=None, **kwargs):
#self.parent = parent
super(Component, self).__init__(protocol, parent=parent, data=data, **kwargs) # Method may need to be changed to handle giving it a new name.
if 'name' in self and not['name'] and 'reagent_name' in self:
self['name'] = self.pop("reagent_name")
self.register_with_parent() # REPLACE THE ABOVE WITH THIS
def get_absolute_url(self):
return reverse("component_detail", kwargs={'owner_slug':self.protocol.owner.slug, 'protocol_slug': self.protocol.slug, 'step_slug':self.parent.parent.slug, 'action_slug':self.parent.slug, 'component_slug':self.slug })
def get_update_url(self):
return reverse('component_edit', kwargs={'owner_slug':self.protocol.owner.slug, 'protocol_slug': self.protocol.slug, 'step_slug':self.parent.parent.slug, 'action_slug':self.parent.slug, 'component_slug':self.slug })
def get_delete_url(self):
return reverse('component_delete', kwargs={'owner_slug':self.protocol.owner.slug, 'protocol_slug': self.protocol.slug, 'step_slug':self.parent.parent.slug, 'action_slug':self.parent.slug, 'component_slug':self.slug })
# @property
# def title(self):
# return "%s - %s - %s" % (self.protocol.name, self.action.step['name'], self.action['name'], self['name'])
# @property
# def parent(self):
# return self.action
@property
def label(self):
return settify(self, summary = False)
@property
def summary(self):
''' takes self.label as a list and turns it into a dict:
u'25 degrees Celsius', u'2 minutes' ->
{temp: '25C', time: '2 min'}'''
result = settify(self, shorthand = True, summary = True)
result['name'] = self['name']
return result
class Machine(NodeBase):
parent_key_name = "machine"
parent_key_plural = False
default_attrs = ['name', 'objectid', 'min_time', 'max_time', 'time_comment', 'time_units', 'min_temp', 'max_temp', 'temp_comment', 'temp_units', 'min_speed', 'max_speed', 'speed_comment', 'speed_units']
def __init__(self, protocol, parent=None, data=None, **kwargs):
super(Machine, self).__init__(protocol, parent=parent, data=data, **kwargs) # Method may need to be changed to handle giving it a new name.
self.register_with_parent() # REPLACE THE ABOVE WITH THIS
def get_absolute_url(self):
return reverse('machine_detail', kwargs={'owner_slug':self.protocol.owner.slug, 'protocol_slug': self.protocol.slug, 'step_slug':self.parent.parent.slug, 'action_slug':self.parent.slug, 'machine_slug':self.slug })
#return reverse("machine_detail", kwargs={'protocol_slug': self.protocol.slug, 'step_slug':self.action.step.slug, 'action_slug':self.action.slug, 'machine_slug':self.slug })
def get_update_url(self):
return reverse('machine_edit', kwargs={'owner_slug':self.protocol.owner.slug, 'protocol_slug': self.protocol.slug, 'step_slug':self.parent.parent.slug, 'action_slug':self.parent.slug, 'machine_slug':self.slug })
def get_delete_url(self):
return reverse('machine_delete', kwargs={'owner_slug':self.protocol.owner.slug, 'protocol_slug': self.protocol.slug, 'step_slug':self.parent.parent.slug, 'action_slug':self.parent.slug, 'machine_slug':self.slug })
# @property
# def title(self):
# return "%s - %s - %s" % (self.protocol.name, self.action.step['name'], self.action['name'], self['name'])
# @property
# def parent(self):
# return self.action
@property
def label(self):
return settify(self, shorthand = True)
@property
def summary(self):
''' takes self.label as a list and turns it into a dict:
u'25 degrees Celsius', u'2 minutes' ->
{temp: '25C', time: '2 min'}'''
result = settify(self, shorthand = True, summary = True)
result['name'] = self['name']
return result
class Thermocycle(NodeBase):
parent_key_name = "thermocycle"
def __init__(self, protocol, parent=None, data=None, **kwargs):
#self.parent = parent
super(Thermocycle, self).__init__(protocol, parent=parent, data=data, **kwargs) # Method may need to be changed to handle giving it a new name.
# if self.parent_key_name in parent:
# if parent[self.parent_key_name]:
# if self['objectid'] not in [x['objectid'] for x in parent[self.parent_key_name]]:
# parent[self.parent_key_name].append(self)
# return
# parent[self.parent_key_name] = [self] # ANY OTHER CASE, MAKE SURE THIS IS REGISTERED WITH THE PARENT
self.register_with_parent() # REPLACE THE ABOVE WITH THIS
# if 'reagent_name' in self:
# self['name'] = self.pop("reagent_name")
def get_absolute_url(self):
return reverse("thermocycle_detail", kwargs={'owner_slug':self.protocol.owner.slug, 'protocol_slug': self.protocol.slug, 'step_slug':self.parent.parent.slug, 'action_slug':self.parent.slug, 'thermocycle_slug':self.slug })
def get_update_url(self):
return reverse('thermocycle_update', kwargs={'owner_slug':self.protocol.owner.slug, 'protocol_slug': self.protocol.slug, 'step_slug':self.parent.parent.slug, 'action_slug':self.parent.slug, 'thermocycle_slug':self.slug })
def get_delete_url(self):
return reverse('thermocycle_delete', kwargs={'owner_slug':self.protocol.owner.slug, 'protocol_slug': self.protocol.slug, 'step_slug':self.parent.parent.slug, 'action_slug':self.parent.slug, 'component_slug':self.slug })
# def get_absolute_url(self):
# return "#NDF"
# #return reverse("thermocycle_detail", kwargs={'protocol_slug': self.protocol.slug, 'step_slug':self.action.step.slug, 'action_slug':self.action.slug, 'thermocycler_slug':self.slug })
# def update_data(self, data={}, **kwargs):
# super(Thermocycle, self).update_data(data=data, **kwargs) # Method may need to be changed to handle giving it a new name.
# if 'phases' in data:
# self['phases'] = [ Phase(self.protocol, parent=self, data=a) for a in data['settings'] ]
# else:
# self['phases'] = []
@property
def label(self):
return settify(self, shorthand = True)
@property
def summary(self):
tmp = settify(self, shorthand = True, summary = True)
tmp['name'] = self['name']
return tmp
class Action(NodeBase):
parent_key_name = "actions"
def __init__(self, protocol, parent=None, data=None, **kwargs):
#self.step = step
# self.parent = parent
super(Action, self).__init__(protocol, parent=parent, data=data, **kwargs) # Method may need to be changed to handle giving it a new name.
# REGISTER SELF WITH PARENT?
self.register_with_parent()
def update_data(self, data={}, **kwargs):
super(Action, self).update_data(data=data, **kwargs) # Method may need to be changed to handle giving it a new name.
if 'component - list' in data: # rename "componet - list" to "components"
data['components'] = data.pop("component - list")
if 'components' in data: # Convert dictionaries into Component Objects
self['components'] = [ Component(self.protocol, parent=self, data=c) for c in data['components'] ]
if 'thermocycle' in data: # Convert dictionaries into Thermocycle Objects
self['thermocycle'] = [ Thermocycle(self.protocol, parent=self, data=c) for c in data['thermocycle'] ]
if 'machine' in data and 'verb' in data and data['verb'] in MACHINE_VERBS: # Make sure this action is supposed to have a "machine" attribute
self['machine'] = Machine(self.protocol, parent=self, data=data['machine'])
if not self['name']: # Action default name should be the same as the verb
self['name'] = self['verb']
if self['name'] == self['objectid']: # CORRECT THIS DATA
self['name'] = self['verb']
# NEEDS APPEND TO THE PARENT LIKE STEP DOES
# if self.parent and not self['objectid'] in self.protocol.nodes: # THIS WORKS BUT COMMENTED OUT FOR TESTING AGAINST EXISTING CODE
# self.parent.add_child_node(self) # SOMETHING SIMILAR SHOULD WORK FOR OTHER NODES, CAN MAKE MORE GENERIC
def get_absolute_url(self):
return reverse("action_detail", kwargs={'owner_slug':self.protocol.owner.slug, 'protocol_slug': self.protocol.slug, 'step_slug':self.parent.slug, 'action_slug':self.slug })
def action_update_url(self):
return reverse("action_update", kwargs={'owner_slug':self.protocol.owner.slug, 'protocol_slug': self.protocol.slug, 'step_slug':self.parent.slug, 'action_slug':self.slug })
def action_delete_url(self):
return reverse("action_delete", kwargs={'owner_slug':self.protocol.owner.slug, 'protocol_slug': self.protocol.slug, 'step_slug':self.parent.slug, 'action_slug':self.slug })
def machine_create_url(self):
return reverse("machine_create", kwargs={'owner_slug':self.protocol.owner.slug, 'protocol_slug': self.protocol.slug, 'step_slug':self.parent.slug, 'action_slug':self.slug })
def thermocycle_create_url(self):
return reverse("thermocycle_create", kwargs={'owner_slug':self.protocol.owner.slug, 'protocol_slug': self.protocol.slug, 'step_slug':self.parent.slug, 'action_slug':self.slug })
def component_create_url(self):
return reverse("component_create", kwargs={'owner_slug':self.protocol.owner.slug, 'protocol_slug': self.protocol.slug, 'step_slug':self.parent.slug, 'action_slug':self.slug })
# def machine_update_url(self):
# return reverse('machine_edit', kwargs={'owner_slug':self.protocol.owner.slug, 'protocol_slug': self.protocol.slug, 'step_slug':self.parent.slug, 'action_slug':self.slug, 'machine_slug':self.machine.slug })
# @property
# def title(self):
# return "%s - %s - %s" % (self.protocol.name, self.step['name'], self['name'])
# @property
# def parent(self):
# return self.step
@property
def components(self):
if 'components' in self:
return self['components']
else:
return None
@property
def machine(self):
if 'machine' in self:
return self['machine']
else:
return None
@machine.setter
def machine(self, value):
if value.__class__ == Machine:
self['machine'] = value
else:
raise ValueError("Action's machine attribute can only accept a Machine object")
@property
def thermocycle(self):
if 'thermocycle' in self:
return self['thermocycle']
else:
return None
@property
def summary(self):
''' returns a summary for manual objects'''
return labeler(self)
@property
def children(self):
if type(self.components) == 'list' or 'machine' in self:
return [self['machine']]
if type(self.machine) == 'NoneType' and 'components' in self:
return self['components']
if 'components' in self:
return self['components']
if 'machine' in self:
return [self['machine']]
if 'thermocycle' in self:
return self['thermocycle']
else:
return None
def delete_child_node(self, node_id):
"""
Removes a Child Node with the given name from the list of nodes
Though it can be called directly it is meant to be called from the protocol and trickle down
"""
#print "%s (%s): REMOVING -> %s" % (self.__class__.__name__, self['objectid'], node_id)
print "ACTION DELETE"
if 'machine' in self:
print "HAS MACHINE"
if self['machine']['objectid'] == node_id:
print "REMOVE MACHINE"
del( self['machine'] )
return
if 'thermocycle' in self and node_id in [r['objectid'] for r in self['thermocycle']]:
self['thermocycle'] = [ x for x in self['thermocycle'] if x['objectid'] is not node_id ]
return
if 'components' in self and node_id in [r['objectid'] for r in self['components']]:
self['components'] = [ x for x in self['components'] if x['objectid'] is not node_id ]
def has_components(self):
if 'verb' in self:
return self['verb'] in COMPONENT_VERBS
return False
def has_machine(self):
if 'verb' in self:
return self['verb'] in MACHINE_VERBS
return False
def has_thermocycler(self):
if 'verb' in self:
return self['verb'] in THERMOCYCLER_VERBS
return False
def has_manual(self):
if 'verb' in self:
return self['verb'] in MANUAL_VERBS
return False
def get_children_times(self, desired_unit = 'sec'):
''' method returns a tuple for each action:
(float(min_time), [,float(max_time)], output_untis, input_units)
In further versions the time related items will be integrated into a get_time object.
'''
if not self.children and not self.childtype()== 'manual':
return (0, 'sec', 'sec')
# get children times:
children_time = 0
if self.childtype() == "components":
if self.children:
children_time = (len(self.children) * 30, 'sec', 'sec')
if self.childtype() == "manual":
children_time = get_timeunit(self.summary['time'])
if self.childtype() == "machine":
children_time = get_timeunit(self.children[0].summary['time'])
if self.childtype() == "thermocycle":
tmp_time =[0, 'sec']
cycles = [r.summary['cycles'] for r in self.children]
cycle_back_to = [r.summary['cycle_back_to'] for r in self.children]
for cnt, (cycle, cycle_back_to) in enumerate(zip(cycles, cycle_back_to)):
if cycle and not cycle_back_to:
tmp = get_timeunit(self.children[cnt].summary['time'])
tmp_time[0] = tmp_time[0] + tmp[0]
if cycle and cycle_back_to:
phases_in_cycle = [get_timeunit(r.summary['time']) for r in self.children[int(cycle_back_to)-1:int(cnt)]]
sum_of_cycles = sum(t[0] for t in phases_in_cycle)
tmp_time[0] = tmp_time[0] + (float(sum_of_cycles) * float(cycle))
children_time = tuple(tmp_time)
return children_time
def update_duration(self):
max_duration = None
value = self.get_children_times()
min_duration = str(value[0])
if len(value) >3:
max_duration = str(value[1])
if max_duration:
# self['duration'] = str(min_duration) + '-' + str(max_duration)
return str(min_duration) + '-' + str(max_duration)
else:
# self['duration'] = str(min_duration)
return str(min_duration)
def childtype(self):
if 'verb' in self:
if self['verb'] in COMPONENT_VERBS:
return 'components'
if self['verb'] in MACHINE_VERBS:
return 'machine'
if self['verb'] in THERMOCYCLER_VERBS:
return 'thermocycle'
if self['verb'] in MANUAL_VERBS:
return 'manual'
return None
class Step(NodeBase):
parent_key_name = "steps"
# NEED TO TEST BELOW AND REMOVE THE self.protocol.add_node FROM THE update_data METHOD
# def __init__(self, protocol, parent=None, data=None, **kwargs):
# super(Step, self).__init__(protocol, parent=parent, data=data, **kwargs) # Method may need to be changed to handle giving it a new name.
# self.register_with_parent()
def update_data(self, data={}, **kwargs):
super(Step, self).update_data(data=data, **kwargs) # Method may need to be changed to handle giving it a new name.
if 'actions' in data:
self['actions'] = [ Action(self.protocol, parent=self, data=a) for a in data['actions'] ]
else:
self['actions'] = []
# UPDATE DURATION AT THE SAME TIME
# self['duration'] = duration
#print self.protocol.nodes
# if not data['objectid'] in self.protocol.nodes:
# print "STEP NOT THERE, ADDING"
self.protocol.add_node(self)
# else:
# print "ALREADY THERE"
def register_with_parent(self):
self.protocol.add_node(self)
def get_absolute_url(self):
return reverse("step_detail", kwargs={'owner_slug':self.protocol.owner.slug, 'protocol_slug': self.protocol.slug, 'step_slug':self.slug })
def step_update_url(self):
return reverse("step_update", kwargs={'owner_slug':self.protocol.owner.slug, 'protocol_slug': self.protocol.slug, 'step_slug':self.slug })
def add_action_url(self):
return reverse("action_create", kwargs={'owner_slug':self.protocol.owner.slug, 'protocol_slug': self.protocol.slug, 'step_slug':self.slug })
def action_verb_list_url(self):
return reverse("action_verb_list", kwargs={'owner_slug':self.protocol.owner.slug, 'protocol_slug': self.protocol.slug, 'step_slug':self.slug })
def step_delete_url(self):
return reverse("step_delete", kwargs={'owner_slug':self.protocol.owner.slug, 'protocol_slug': self.protocol.slug, 'step_slug':self.slug })
# @property
# def title(self):
# return "%s - %s" % (self.protocol.name, self['name'])
def add_child_node(self, action):
self['actions'].append(action)
def delete_child_node(self, node_id):
"""
Removes a Child Node with the given name from the list of nodes
Though it can be called directly it is meant to be called from the protocol and trickle down
"""
#print "%s (%s): REMOVING -> %s" % (self.__class__, self['objectid'], node_id)
self['actions'] = [ x for x in self['actions'] if not x['objectid'] == node_id ]
@property
def actions(self):
if 'actions' in self:
return self['actions']
else:
return None
@property
def children(self):
if 'actions' in self:
return self['actions']
else:
return None
def update_duration(self):
min_time = []
delta_time = []
for item in self.children:
if item['name'] =='store':
continue
action_time = item.get_children_times()
min_time.append(action_time[0])
if len(action_time) >3:
delta_time.append(action_time[1]-action_time[0])
min_duration = sum(min_time)
delta_duration = sum(delta_time)
if delta_duration == 0:
return str(min_duration)
else:
return str(min_duration) + '-' + str(min_duration + delta_duration)
# NEED TO UPDATE URLS TO USE THE BELOW METHOD
# def __getitem__(self, key):
# val = dict.__getitem__(self, key)
# if key == "slug":
# val = slugify(dict.__getitem__(self, 'name'))
# return val
#def get_hash_id(self, size=6, chars=string.ascii_lowercase + string.digits):
# '''Always returns a unique ID in the protocol'''
# uid_list = []
# uid = ''.join(random.choice(chars) for x in range(size))
# return uid
# @property
# def actions(self):
# return
class ProtocolHistoryDiffer(object):
'''
[
{'id':"XXXXXX", 'event':"add", data: {} },
{'id':"XXXXXX", 'event':"update", data: {} },
{'id':"XXXXXX", 'event':"delete" },
]
'''
add = []
update = []
delete = []
def parse_changes(self, protocol):
pass
# DIFF THE GIVEN PROTOCOL OBJECT INTO PARTS
# PSEUDO CODE
# for node in parsed_changes:
# if add:
# self.add.append( {'id':node.node_id, 'data':node.new_data_dict })
# elif delete:
# self.delete.append( {'id':node.node_id, 'data':node.new_data_dict })
# else:
# self.update.append( {'id':node.node_id, 'data':node.new_data_dict })
| |
import urllib.parse
import bs4
import copy
import hashlib
import os.path
import traceback
import WebRequest
from WebMirror.processor.ProcessorBase import PageProcessor
# import TextScrape.SiteArchiver
import common.util.urlFuncs as urlFuncs
import WebMirror.processor.ProcessorUtils.gDocParse as gdp
# import TextScrape.RelinkLookup
# import TextScrape.RELINKABLE as RELINKABLE
########################################################################################################################
#
# ## ## ### #### ## ## ###### ## ### ###### ######
# ### ### ## ## ## ### ## ## ## ## ## ## ## ## ## ##
# #### #### ## ## ## #### ## ## ## ## ## ## ##
# ## ### ## ## ## ## ## ## ## ## ## ## ## ###### ######
# ## ## ######### ## ## #### ## ## ######### ## ##
# ## ## ## ## ## ## ### ## ## ## ## ## ## ## ## ##
# ## ## ## ## #### ## ## ###### ######## ## ## ###### ######
#
########################################################################################################################
class GdocPageProcessor(PageProcessor):
wanted_mimetypes = ['text/html']
want_priority = 90
@staticmethod
def wantsUrl(url):
return urlFuncs.isGdocUrl(url)[0]
loggerPath = "Main.Text.GdocPageProcessor"
def __init__(self, pageUrl, pgContent, loggerPath, relinkable, scannedDomains=None, tlds=None, **kwargs):
self.loggerPath = loggerPath+".GDocExtract"
self.pageUrl = pageUrl
self._relinkDomains = set()
for url in relinkable:
self._relinkDomains.add(url)
self._tld = set()
self._scannedDomains = set()
# Tell the path filtering mechanism that we can fetch google doc files
# Not switchable, since not fetching google docs content from a google docs page
# wouldn't work too well.
self._scannedDomains.add('https://docs.google.com/document/')
self._scannedDomains.add('https://docs.google.com/spreadsheets/')
self._scannedDomains.add('https://drive.google.com/folderview')
self._scannedDomains.add('https://drive.google.com/open')
if not scannedDomains:
scannedDomains = []
if not tlds:
tlds = []
# Build the filtering structures for checking outgoing links.
for tld in tlds:
self._tld.add(tld)
if isinstance(scannedDomains, (set, list)):
for url in scannedDomains:
self.installBaseUrl(url)
else:
self.installBaseUrl(scannedDomains)
# File mapping LUT
self.fMap = {}
def installBaseUrl(self, url):
# print("Inserting ", url)
netloc = urllib.parse.urlsplit(url.lower()).netloc
if not netloc:
raise ValueError("One of the scanned domains collapsed down to an empty string: '%s'!" % url)
# Generate the possible wordpress netloc values.
if 'wordpress.com' in netloc:
subdomain, mainDomain, tld = netloc.rsplit(".")[-3:]
self._scannedDomains.add("www.{sub}.{main}.{tld}".format(sub=subdomain, main=mainDomain, tld=tld))
self._scannedDomains.add("{sub}.{main}.{tld}".format(sub=subdomain, main=mainDomain, tld=tld))
self._scannedDomains.add("www.{sub}.files.{main}.{tld}".format(sub=subdomain, main=mainDomain, tld=tld))
self._scannedDomains.add("{sub}.files.{main}.{tld}".format(sub=subdomain, main=mainDomain, tld=tld))
# Blogspot is annoying and sometimes a single site is spread over several tlds. *.com, *.sg, etc...
if 'blogspot.' in netloc:
subdomain, mainDomain, tld = netloc.rsplit(".")[-3:]
self._tld.add(tld)
for tld in self._tld:
self._scannedDomains.add("www.{sub}.{main}.{tld}".format(sub=subdomain, main=mainDomain, tld=tld))
self._scannedDomains.add("{sub}.{main}.{tld}".format(sub=subdomain, main=mainDomain, tld=tld))
if 'sites.google.com/site/' in url:
self._scannedDomains.add(url)
elif 'google.' in netloc:
self.log.info("Skipping URL: '%s'", url)
else:
base, tld = netloc.rsplit(".", 1)
self._tld.add(tld)
for tld in self._tld:
self._scannedDomains.add("{main}.{tld}".format(main=base, tld=tld))
# print(self._scannedDomains)
########################################################################################################################
#
# ###### ####### ####### ###### ## ######## ######## ####### ###### ######
# ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ##
# ## ## ## ## ## ## ## ## ## ## ## ## ## ##
# ## #### ## ## ## ## ## #### ## ###### ## ## ## ## ## ######
# ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ##
# ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ##
# ###### ####### ####### ###### ######## ######## ######## ####### ###### ######
#
########################################################################################################################
def processGdocResources(self, resources):
# Expected format of tuples in ret:
# fName, mimeType, content, fHash
ret = []
for fName, mimeType, content in resources:
m = hashlib.md5()
m.update(content)
fHash = m.hexdigest()
pseudoUrl = "gdoc-"+fHash
self.fMap[fName] = fHash
fName = os.path.split(fName)[-1]
self.log.info("Resource = '%s', '%s', '%s'", fName, mimeType, pseudoUrl)
if mimeType in ["image/gif", "image/jpeg", "image/pjpeg", "image/png", "image/svg+xml", "image/vnd.djvu"]:
self.log.info("Processing resource '%s' as an image file. (mimetype: %s)", fName, mimeType)
ret.append((fName, mimeType, content, pseudoUrl))
elif mimeType in ["application/octet-stream"]:
self.log.info("Processing '%s' as an binary file.", fName)
ret.append((fName, mimeType, content, pseudoUrl))
else:
self.log.warn("Unknown MIME Type? '%s', FileName: '%s'", mimeType, fName)
if len(resources) == 0:
self.log.info("File had no resource content!")
return ret
def cleanGdocPage(self, soup, url):
# doc = readability.readability.Document(str(soup))
title = self.extractTitle(soup, url)
for span in soup.find_all("span"):
span.unwrap()
for style in soup.find_all('style'):
style.decompose()
for tag in soup.find_all(attrs = {'class' : True}):
del tag['class']
return title, soup
# Hook so plugins can modify the internal URLs as part of the relinking process
def preprocessGdocReaderUrl(self, inUrl):
if inUrl.lower().endswith("/preview"):
inUrl = inUrl[:-len("/preview")]
return inUrl
def convertToGdocReaderImage(self, srcUrl):
itemHash = None
for rscEnd in self.fMap:
if srcUrl.endswith(rscEnd):
itemHash = self.fMap[rscEnd]
# if srcUrl in self.fMap:
# url = self.fMap[srcUrl]
# elif any([fUrl in url for fUrl in self.fMap]):
# print('wat')
# raise ValueError("Unknown image URL! = '%s'" % url)
if not itemHash:
raise ValueError("Unknown image URL! = '%s' (hash '%s')" % (srcUrl, itemHash))
url = '/books/render?mdsum=%s' % urllib.parse.quote(itemHash)
return url
def processGdocPage(self, url, content):
dummy_fName, content = content
soup = WebRequest.as_soup(content)
urlFuncs.canonizeUrls(soup, url)
pgTitle, soup = self.cleanGdocPage(soup, url)
plainLinks = self.extractLinks(soup, url)
self.log.info("Page title = '%s'", pgTitle)
soup = self.relink(soup, imRelink=self.convertToGdocReaderImage)
url = self.preprocessGdocReaderUrl(url)
url = urlFuncs.trimGDocUrl(url)
# Since the content we're extracting will be embedded into another page, we want to
# strip out the <body> and <html> tags. `unwrap()` replaces the soup with the contents of the
# tag it's called on. We end up with just the contents of the <body> tag.
soup.body.unwrap()
pgBody = soup.prettify()
# No image links, since they're served as resource files in a google doc
imageLinks = []
return plainLinks, imageLinks, pgTitle, pgBody
# self.updateDbEntry(url=url, title=pgTitle, contents=pgBody, mimetype='text/html', dlstate=2)
def retreiveGoogleDoc(self, url):
self.log.info("Should fetch google doc at '%s'", url)
doc = gdp.GDocExtractor(url)
attempts = 0
mainPage = None
while 1:
attempts += 1
try:
mainPage, resources = doc.extract()
except TypeError:
self.log.critical('Extracting item failed!')
for line in traceback.format_exc().strip().split("\n"):
self.log.critical(line.strip())
raise urlFuncs.CannotAccessGDocException("Cannot access google doc! Is it protected?")
if mainPage:
break
if attempts > 3:
raise TextScrape.SiteArchiver.DownloadException
resources = self.processGdocResources(resources)
return self.processGdocPage(url, mainPage) + (resources, )
# Process a Google-Doc resource page.
# This call does a set of operations to permute and clean a google doc page.
def extractContent(self):
plainLinks, imageLinks, pgTitle, pgBody, resources = self.retreiveGoogleDoc(self.pageUrl)
ret = {}
ret['plainLinks'] = plainLinks
ret['rsrcLinks'] = imageLinks
ret['title'] = pgTitle
ret['contents'] = pgBody
ret['resources'] = resources
return ret
def test():
print("Test mode!")
import WebRequest
import logSetup
logSetup.initLogging()
wg = WebRequest.WebGetRobust()
# content = wg.getpage('http://www.arstechnica.com')
scraper = GdocPageProcessor('https://docs.google.com/document/d/1atXMtCutHRpcHwSRS5UyMAC58_gQjMPR2dDVn1LCD3E', 'Main.Test', 'testinating')
print(scraper)
extr, rsc = scraper.extractContent()
print('Plain Links:')
for link in extr['plainLinks']:
print(link)
print()
print()
print('Resource files:')
# for link in extr['rsrcLinks']:
# print(link)
for fName, mimeType, content, pseudoUrl in rsc:
print(fName, mimeType, pseudoUrl)
# print(extr['contents'])
if __name__ == "__main__":
test()
| |
#-----------------------------------------------------------------------------
# Copyright (c) 2008-2010, David P. D. Moss. All rights reserved.
#
# Released under the BSD license. See the LICENSE file for details.
#-----------------------------------------------------------------------------
#
# From: http://code.google.com/p/netaddr/
# https://github.com/drkjam/netaddr/
"""Fallback routines for Python's standard library socket module"""
from struct import unpack as _unpack, pack as _pack
#from netaddr.compat import _bytes_join
def _bytes_join(*args): return ''.join(*args)
AF_INET = 2
AF_INET6 = 10
#-----------------------------------------------------------------------------
def inet_ntoa(packed_ip):
"""
Convert an IP address from 32-bit packed binary format to string format.
"""
if not hasattr(packed_ip, 'split'):
raise TypeError('string type expected, not %s' % str(type(packed_ip)))
if len(packed_ip) != 4:
raise ValueError('invalid length of packed IP address string')
return '%d.%d.%d.%d' % _unpack('4B', packed_ip)
#-----------------------------------------------------------------------------
def inet_aton(ip_string):
"""
Convert an IP address in string format (123.45.67.89) to the 32-bit packed
binary format used in low-level network functions.
"""
if hasattr(ip_string, 'split'):
invalid_addr = ValueError('illegal IP address string %r' % ip_string)
# Support for hexadecimal and octal octets.
tokens = []
base = 10
for token in ip_string.split('.'):
if token.startswith('0x'):
base = 16
elif token.startswith('0') and len(token) > 1:
base = 8
elif token == '':
continue
try:
tokens.append(int(token, base))
except ValueError:
raise invalid_addr
# Zero fill missing octets.
num_tokens = len(tokens)
if num_tokens < 4:
fill_tokens = [0] * (4 - num_tokens)
if num_tokens > 1:
end_token = tokens.pop()
tokens = tokens + fill_tokens + [end_token]
else:
tokens = tokens + fill_tokens
# Pack octets.
if len(tokens) == 4:
words = []
for token in tokens:
if (token >> 8) != 0:
raise invalid_addr
words.append(_pack('B', token))
return _bytes_join(words)
else:
raise invalid_addr
raise ValueError('argument should be a string, not %s' % type(ip_string))
#-----------------------------------------------------------------------------
def _compact_ipv6_tokens(tokens):
new_tokens = []
positions = []
start_index = None
num_tokens = 0
# Discover all runs of zeros.
for idx, token in enumerate(tokens):
if token == '0':
if start_index is None:
start_index = idx
num_tokens += 1
else:
if num_tokens > 1:
positions.append((num_tokens, start_index))
start_index = None
num_tokens = 0
new_tokens.append(token)
# Store any position not saved before loop exit.
if num_tokens > 1:
positions.append((num_tokens, start_index))
# Replace first longest run with an empty string.
if len(positions) != 0:
# Locate longest, left-most run of zeros.
positions.sort(key=lambda x: x[1])
best_position = positions[0]
for position in positions:
if position[0] > best_position[0]:
best_position = position
# Replace chosen zero run.
(length, start_idx) = best_position
new_tokens = new_tokens[0:start_idx] + [''] + \
new_tokens[start_idx+length:]
# Add start and end blanks so join creates '::'.
if new_tokens[0] == '':
new_tokens.insert(0, '')
if new_tokens[-1] == '':
new_tokens.append('')
return new_tokens
#-----------------------------------------------------------------------------
def inet_ntop(af, packed_ip):
"""Convert an packed IP address of the given family to string format."""
if af == AF_INET:
# IPv4.
return inet_ntoa(packed_ip)
elif af == AF_INET6:
# IPv6.
if len(packed_ip) != 16 or not hasattr(packed_ip, 'split'):
raise ValueError('invalid length of packed IP address string')
tokens = ['%x' % i for i in _unpack('>8H', packed_ip)]
# Convert packed address to an integer value.
words = list(_unpack('>8H', packed_ip))
int_val = 0
for i, num in enumerate(reversed(words)):
word = num
word = word << 16 * i
int_val = int_val | word
if 0xffff < int_val <= 0xffffffff or int_val >> 32 == 0xffff:
# IPv4 compatible / mapped IPv6.
packed_ipv4 = _pack('>2H', *[int(i, 16) for i in tokens[-2:]])
ipv4_str = inet_ntoa(packed_ipv4)
tokens = tokens[0:-2] + [ipv4_str]
return ':'.join(_compact_ipv6_tokens(tokens))
else:
raise ValueError('unknown address family %d' % af)
#-----------------------------------------------------------------------------
def _inet_pton_af_inet(ip_string):
"""
Convert an IP address in string format (123.45.67.89) to the 32-bit packed
binary format used in low-level network functions. Differs from inet_aton
by only support decimal octets. Using octal or hexadecimal values will
raise a ValueError exception.
"""
#TODO: optimise this ... use inet_aton with mods if available ...
if hasattr(ip_string, 'split'):
invalid_addr = ValueError('illegal IP address string %r' % ip_string)
# Support for hexadecimal and octal octets.
tokens = ip_string.split('.')
# Pack octets.
if len(tokens) == 4:
words = []
for token in tokens:
if token.startswith('0x') or \
(token.startswith('0') and len(token) > 1):
raise invalid_addr
try:
octet = int(token)
except ValueError:
raise invalid_addr
if (octet >> 8) != 0:
raise invalid_addr
words.append(_pack('B', octet))
return _bytes_join(words)
else:
raise invalid_addr
raise ValueError('argument should be a string, not %s' % type(ip_string))
#-----------------------------------------------------------------------------
def inet_pton(af, ip_string):
"""
Convert an IP address from string format to a packed string suitable for
use with low-level network functions.
"""
if af == AF_INET:
# IPv4.
return _inet_pton_af_inet(ip_string)
elif af == AF_INET6:
invalid_addr = ValueError('illegal IP address string %r' % ip_string)
# IPv6.
values = []
if not hasattr(ip_string, 'split'):
raise invalid_addr
if 'x' in ip_string:
# Don't accept hextets with the 0x prefix.
raise invalid_addr
if '::' in ip_string:
if ip_string == '::':
# Unspecified address.
return '\x00'.encode() * 16
# IPv6 compact mode.
try:
prefix, suffix = ip_string.split('::')
except ValueError:
raise invalid_addr
l_prefix = []
l_suffix = []
if prefix != '':
l_prefix = prefix.split(':')
if suffix != '':
l_suffix = suffix.split(':')
# IPv6 compact IPv4 compatibility mode.
if len(l_suffix) and '.' in l_suffix[-1]:
ipv4_str = _inet_pton_af_inet(l_suffix.pop())
l_suffix.append('%x' % _unpack('>H', ipv4_str[0:2])[0])
l_suffix.append('%x' % _unpack('>H', ipv4_str[2:4])[0])
token_count = len(l_prefix) + len(l_suffix)
if not 0 <= token_count <= 8 - 1:
raise invalid_addr
gap_size = 8 - ( len(l_prefix) + len(l_suffix) )
values = [_pack('>H', int(i, 16)) for i in l_prefix] \
+ ['\x00\x00'.encode() for i in range(gap_size)] \
+ [_pack('>H', int(i, 16)) for i in l_suffix]
try:
for token in l_prefix + l_suffix:
word = int(token, 16)
if not 0 <= word <= 0xffff:
raise invalid_addr
except ValueError:
raise invalid_addr
else:
# IPv6 verbose mode.
if ':' in ip_string:
tokens = ip_string.split(':')
if '.' in ip_string:
ipv6_prefix = tokens[:-1]
if ipv6_prefix[:-1] != ['0', '0', '0', '0', '0']:
raise invalid_addr
if ipv6_prefix[-1].lower() not in ('0', 'ffff'):
raise invalid_addr
# IPv6 verbose IPv4 compatibility mode.
if len(tokens) != 7:
raise invalid_addr
ipv4_str = _inet_pton_af_inet(tokens.pop())
tokens.append('%x' % _unpack('>H', ipv4_str[0:2])[0])
tokens.append('%x' % _unpack('>H', ipv4_str[2:4])[0])
values = [_pack('>H', int(i, 16)) for i in tokens]
else:
# IPv6 verbose mode.
if len(tokens) != 8:
raise invalid_addr
try:
tokens = [int(token, 16) for token in tokens]
for token in tokens:
if not 0 <= token <= 0xffff:
raise invalid_addr
except ValueError:
raise invalid_addr
values = [_pack('>H', i) for i in tokens]
else:
raise invalid_addr
return _bytes_join(values)
else:
raise ValueError('Unknown address family %d' % af)
| |
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Insidetextfont(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "treemap"
_path_str = "treemap.insidetextfont"
_valid_props = {"color", "colorsrc", "family", "familysrc", "size", "sizesrc"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# familysrc
# ---------
@property
def familysrc(self):
"""
Sets the source reference on Chart Studio Cloud for `family`.
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `size`.
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
size=None,
sizesrc=None,
**kwargs
):
"""
Construct a new Insidetextfont object
Sets the font used for `textinfo` lying inside the sector.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.treemap.Insidetextfont`
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
Returns
-------
Insidetextfont
"""
super(Insidetextfont, self).__init__("insidetextfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.treemap.Insidetextfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.treemap.Insidetextfont`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("colorsrc", None)
_v = colorsrc if colorsrc is not None else _v
if _v is not None:
self["colorsrc"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("familysrc", None)
_v = familysrc if familysrc is not None else _v
if _v is not None:
self["familysrc"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
_v = arg.pop("sizesrc", None)
_v = sizesrc if sizesrc is not None else _v
if _v is not None:
self["sizesrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for training.moving_averages when using a DistributionStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.distribute import collective_all_reduce_strategy
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.distribute import test_util
from tensorflow.python.distribute import tpu_strategy
from tensorflow.python.eager import def_function
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import variables
from tensorflow.python.training import moving_averages
all_distributions = [
strategy_combinations.default_strategy,
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.central_storage_strategy_with_gpu_and_cpu,
strategy_combinations.tpu_strategy,
strategy_combinations.multi_worker_mirrored_2x1_cpu,
strategy_combinations.multi_worker_mirrored_2x1_gpu,
strategy_combinations.multi_worker_mirrored_2x2_gpu,
strategy_combinations.multi_worker_mirrored_2x2_gpu_no_merge_call,
strategy_combinations.multi_worker_mirrored_4x1_cpu,
]
all_combinations = combinations.combine(
distribution=all_distributions, mode=["graph"])
all_combinations_eager = combinations.combine(
distribution=all_distributions, mode=["eager"], use_function=[True, False])
class AssignMovingAveragesTest(test.TestCase, parameterized.TestCase):
@combinations.generate(all_combinations)
def testReplicaModeWithoutZeroDebias(self, distribution):
replica_id = [0]
def replica_fn():
var = variables.Variable([10.0, 11.0])
val = constant_op.constant([1.0 + replica_id[0], 2.0 - replica_id[0]])
replica_id[0] += 1
decay = 0.25
assign = moving_averages.assign_moving_average(
var, val, decay, zero_debias=False)
return var, assign
with distribution.scope():
var, assign = distribution.extended.call_for_each_replica(replica_fn)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose([10.0, 11.0], self.evaluate(var))
self.evaluate(distribution.experimental_local_results(assign))
# Mean of val across calls to replica_fn().
average_val = [1.0 + 0.5 * (replica_id[0] - 1),
2.0 - 0.5 * (replica_id[0] - 1)]
val_weight = 1.0 - 0.25
self.assertAllClose(
[10.0 * 0.25 + average_val[0] * val_weight,
11.0 * 0.25 + average_val[1] * val_weight],
self.evaluate(var))
@combinations.generate(all_combinations)
def testReplicaMode(self, distribution):
replica_id = [0]
def replica_fn():
var = variables.Variable([0.0, 0.0])
val = constant_op.constant([1.0 + replica_id[0], 2.0 - replica_id[0]])
replica_id[0] += 1
decay = 0.25
assign = moving_averages.assign_moving_average(var, val, decay)
return var, assign.op
with distribution.scope():
var, assign_op = distribution.extended.call_for_each_replica(replica_fn)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose([0.0, 0.0], self.evaluate(var))
self.evaluate(distribution.experimental_local_results(assign_op))
# Mean of val across calls to replica_fn().
average_val = [1.0 + 0.5 * (replica_id[0] - 1),
2.0 - 0.5 * (replica_id[0] - 1)]
self.assertAllClose(average_val, self.evaluate(var))
@combinations.generate(all_combinations)
def testCrossDeviceWithoutZeroDebias(self, distribution):
with distribution.scope():
var = variables.Variable([10.0, 11.0])
val = constant_op.constant([1.0, 2.0])
decay = 0.25
# NOTE(josh11b): We currently generate an error if val is a PerReplica
# value.
assign = moving_averages.assign_moving_average(
var, val, decay, zero_debias=False)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose([10.0, 11.0], self.evaluate(var))
self.evaluate(assign)
average_val = [1.0, 2.0]
val_weight = 1.0 - 0.25
self.assertAllClose(
[10.0 * 0.25 + average_val[0] * val_weight,
11.0 * 0.25 + average_val[1] * val_weight],
self.evaluate(var))
# Also try assign.op.
self.evaluate(assign.op)
orig_weight = 0.25 * 0.25
val_weight = 1.0 - orig_weight
self.assertAllClose(
[10.0 * orig_weight + average_val[0] * val_weight,
11.0 * orig_weight + average_val[1] * val_weight],
self.evaluate(var))
@combinations.generate(all_combinations)
def testCrossDevice(self, distribution):
with distribution.scope():
var = variables.Variable([0.0, 0.0])
val = variables.Variable([1.0, 2.0])
decay = 0.25
# NOTE(josh11b): We currently generate an error if val is a PerReplica
# value.
assign = moving_averages.assign_moving_average(var, val, decay)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose([0.0, 0.0], self.evaluate(var))
self.evaluate(assign)
self.assertAllClose([1.0, 2.0], self.evaluate(var))
@combinations.generate(all_combinations_eager)
def testUpdateContext(self, distribution, use_function):
with distribution.scope():
var1 = variables.Variable([0.0, 0.0])
var2 = variables.Variable([0.0, 0.0])
var3 = variables.Variable([0.0, 0.0])
def update_fn(v, value):
v.assign_add(value)
moving_averages.assign_moving_average(var2, [2.0, 4.0], decay=0.25)
moving_averages.assign_moving_average(
var3, [2.0, 4.0], decay=0.25, zero_debias=False)
distribution.extended.update(var1, update_fn, ([1.0, 1.0],))
self.assertAllClose([2.0, 4.0], var2.read_value())
self.assertAllClose([1.5, 3.0], var3.read_value())
@combinations.generate(all_combinations)
def testAssignVariable(self, distribution):
def replica_fn():
var = variables.Variable([10.0, 11.0])
# Here we expect to check the case when input value are variable.
val = variables.Variable([1., 2.])
decay = 0.25
assign = moving_averages.assign_moving_average(
var, val, decay, zero_debias=False)
return var, assign
with distribution.scope():
var, assign = distribution.extended.call_for_each_replica(replica_fn)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose([10.0, 11.0], self.evaluate(var))
self.evaluate(distribution.experimental_local_results(assign))
self.assertAllClose(
[10 * 0.25 + 1. * (1 - 0.25), 11 * 0.25 + 2. * (1 - 0.25)],
self.evaluate(var))
class ExponentialMovingAverageTest(test.TestCase, parameterized.TestCase):
@combinations.generate(all_combinations_eager)
def testReplicaContextEager(self, distribution, use_function):
if not use_function and isinstance(
distribution, (tpu_strategy.TPUStrategy, tpu_strategy.TPUStrategyV1)):
self.skipTest("TPUStrategy doesn't support pure eager execution.")
if isinstance(distribution,
collective_all_reduce_strategy.CollectiveAllReduceStrategy):
self.skipTest("b/160194267: Cannot do variable.assign([0.5]) in replica "
"context with MultiWorkerMirroredStrategy.")
with distribution.scope():
w = variables.Variable([1.0],
name="w",
aggregation=variables.VariableAggregation.MEAN)
ema = moving_averages.ExponentialMovingAverage(0.8)
def fn():
def _ema_replica_fn_eager():
ema.apply([w])
w.assign_sub([0.5])
ema.apply([w])
return ema.average(w)
return distribution.run(_ema_replica_fn_eager)
if use_function:
fn = def_function.function(fn)
ema_w = fn()
self.assertAllClose(
self.evaluate(distribution.experimental_local_results(ema_w))[0],
[0.89999998])
@combinations.generate(all_combinations_eager)
def testCrossReplicaContextEager(self, distribution, use_function):
with distribution.scope():
w = variables.Variable([1.0],
name="w",
aggregation=variables.VariableAggregation.MEAN)
ema = moving_averages.ExponentialMovingAverage(0.8)
def fn():
ema.apply([w])
w.assign_sub([0.5])
ema.apply([w])
return ema.average(w)
if use_function:
fn = def_function.function(fn)
avg = fn()
self.assertAllClose(
self.evaluate(distribution.experimental_local_results(avg))[0],
[0.89999998])
def _ema_replica_fn_graph(self):
w = variables.Variable([1.0],
name="w",
aggregation=variables.VariableAggregation.MEAN)
ema = moving_averages.ExponentialMovingAverage(0.8)
w_apply = ema.apply([w])
w_assign = w.assign_sub([0.5])
return w_assign, w_apply, ema.average(w)
@combinations.generate(all_combinations)
def testReplicaContextGraph(self, distribution):
if isinstance(distribution,
(tpu_strategy.TPUStrategy, tpu_strategy.TPUStrategyV1)):
self.skipTest("b/139550827: Cannot do variable.assign in replica context "
"of TPUStrategy")
if isinstance(distribution,
collective_all_reduce_strategy.CollectiveAllReduceStrategy):
self.skipTest("b/160194267: Cannot do variable.assign([0.5]) in replica "
"context with MultiWorkerMirroredStrategy.")
with distribution.scope():
w_assign, w_apply, ema_w = distribution.run(
self._ema_replica_fn_graph)
self.assertEqual(ema_w.name, "w/ExponentialMovingAverage:0")
self.evaluate(variables.global_variables_initializer())
self.evaluate(distribution.experimental_local_results(w_apply))
self.evaluate(distribution.experimental_local_results(w_assign))
self.evaluate(distribution.experimental_local_results(w_apply))
self.assertAllClose(
self.evaluate(distribution.experimental_local_results(ema_w))[0],
[0.89999998])
@combinations.generate(all_combinations)
def testCrossReplicaContextGraph(self, distribution):
with distribution.scope():
w_assign, w_apply, ema_w = self._ema_replica_fn_graph()
self.assertEqual(ema_w.name, "w/ExponentialMovingAverage:0")
self.evaluate(variables.global_variables_initializer())
self.evaluate(distribution.experimental_local_results(w_apply))
self.evaluate(distribution.experimental_local_results(w_assign))
self.evaluate(distribution.experimental_local_results(w_apply))
self.assertAllClose(
self.evaluate(distribution.experimental_local_results(ema_w))[0],
[0.89999998])
if __name__ == "__main__":
# TODO(b/172304955): enable logical devices.
test_util.main(config_logical_devices=False)
| |
#Note
#CNN used
#XRMB Dataset
# had to degrade numpy to 1.11.0 as 1.13.0 doesn't support float index type in arrays
# myarray = np.fromfile('BinaryData.dat',dtype=float)
import sys
import math
import random
import warnings
import numpy as np
from sklearn import svm
import keras.backend as K
from keras.models import Model
#from theano import tensor as T
import matplotlib.pyplot as plt
from keras.layers import Input, Merge
from keras.engine.topology import Layer
from sklearn.metrics import accuracy_score
from keras.layers.core import Activation, Dense, Reshape
from keras.layers import Conv1D, MaxPooling1D, UpSampling1D, Flatten
warnings.simplefilter("ignore")
nb_epoch = 40
batch_size = 100
dimx = 273
dimy = 112
lamda = 0.02
loss_type = 2 # 1 - l1+l2+l3-L4; 2 - l2+l3-L4; 3 - l1+l2+l3 , 4 - l2+l3
def svm_classifier(train_x, train_y, valid_x, valid_y, test_x, test_y):
clf = svm.LinearSVC()
#print train_x.shape,train_y.shape
clf.fit(train_x,train_y)
pred = clf.predict(valid_x)
va = accuracy_score(np.ravel(valid_y),np.ravel(pred))
pred = clf.predict(test_x)
ta = accuracy_score(np.ravel(test_y),np.ravel(pred))
return va, ta
def split(train_l,train_r,label,ratio):
total = train_l.shape[0]
train_samples = int(total*(1-ratio))
test_samples = total-train_samples
tr_l,tst_l,tr_r,tst_r,l_tr,l_tst=[],[],[],[],[],[]
dat=random.sample(range(total),train_samples)
for a in dat:
tr_l.append(train_l[a,:])
tr_r.append(train_r[a,:])
l_tr.append(label[a])
for i in range(test_samples):
if i not in dat:
tst_l.append(train_l[i,:])
tst_r.append(train_r[i,:])
l_tst.append(label[i])
tr_l = np.array(tr_l)
tr_r = np.array(tr_r)
tst_l = np.array(tst_l)
tst_r = np.array(tst_r)
l_tr = np.array(l_tr)
l_tst = np.array(l_tst)
return tr_l,tst_l,tr_r,tst_r,l_tr,l_tst
class ZeroPadding(Layer):
def __init__(self, **kwargs):
super(ZeroPadding, self).__init__(**kwargs)
def call(self, x, mask=None):
return K.zeros_like(x)
def get_output_shape_for(self, input_shape):
return input_shape
class MultiplyBy2(Layer):
def __init__(self, **kwargs):
super(MultiplyBy2, self).__init__(**kwargs)
def call(self, x, mask=None):
return 2*x
def get_output_shape_for(self, input_shape):
return input_shape
class CorrnetCost(Layer):
def __init__(self,lamda, **kwargs):
super(CorrnetCost, self).__init__(**kwargs)
self.lamda = lamda
def cor(self,y1, y2, lamda):
y1_mean = K.mean(y1, axis=0)
y1_centered = y1 - y1_mean
y2_mean = K.mean(y2, axis=0)
y2_centered = y2 - y2_mean
corr_nr = K.sum(y1_centered * y2_centered, axis=0)
corr_dr1 = K.sqrt(K.sum(y1_centered * y1_centered, axis=0) + 1e-8)
corr_dr2 = K.sqrt(K.sum(y2_centered * y2_centered, axis=0) + 1e-8)
corr_dr = corr_dr1 * corr_dr2
corr = corr_nr / corr_dr
return K.sum(corr) * lamda
def call(self ,x ,mask=None):
h1=x[0]
h2=x[1]
corr = self.cor(h1,h2,self.lamda)
#self.add_loss(corr,x)
#we output junk but be sure to use it for the loss to be added
return corr
def get_output_shape_for(self, input_shape):
#print input_shape[0][0]
return (input_shape[0][0],input_shape[0][1])
def corr_loss(y_true, y_pred):
#print y_true.type,y_pred.type
#return K.zeros_like(y_pred)
return y_pred
def project(model,inp):
#print (inp[0].shape, inp[1].shape)
m = model.predict([inp[0],inp[1]])
return m[2]
def reconstruct_from_left(model,inp):
img_inp = inp.reshape((28,14))
f, axarr = plt.subplots(1,2,sharey=False)
pred = model.predict([inp,np.zeros_like(inp)])
img = pred[0].reshape((28,14))
axarr[0].imshow(img_inp)
axarr[1].imshow(img)
def reconstruct_from_right(model,inp):
img_inp = inp.reshape((28,14))
f, axarr = plt.subplots(1,2,sharey=False)
pred = model.predict([np.zeros_like(inp),inp])
img = pred[1].reshape((28,14))
axarr[1].imshow(img_inp)
axarr[0].imshow(img)
def sum_corr(model):
view1 = np.load("MFCC_Test.npy")
view2 = np.load("XRMB_Test.npy")
x = project(model,[view1,np.zeros_like(view2)])
y = project(model,[np.zeros_like(view1),view2])
print ("test correlation")
corr = 0
for i in range(0,len(x[0])):
x1 = x[:,i] - (np.ones(len(x))*(sum(x[:,i])/len(x)))
x2 = y[:,i] - (np.ones(len(y))*(sum(y[:,i])/len(y)))
nr = sum(x1 * x2)/(math.sqrt(sum(x1*x1))*math.sqrt(sum(x2*x2)))
corr+=nr
print (corr)
def transfer(model):
view11 = np.load("MFCC_Test.npy")
view22 = np.load("XRMB_Test.npy")
labels = np.load("Labels_Test.npy")
view1 = project(model,[view11,np.zeros_like(view22)])
view2 = project(model,[np.zeros_like(view11),view22])
perp = len(view1) // 5
print ("view1 to view2")
acc = 0
for i in range(5):
print('@ i' + str(i))
test_x = view2[int(i*perp):int((i+1)*perp)]
test_y = labels[i*perp:(i+1)*perp]
if i==0:
train_x = view1[perp:len(view1)]
train_y = labels[perp:len(view1)]
elif i==4:
train_x = view1[0:4*perp]
train_y = labels[0:4*perp]
else:
train_x1 = view1[0:i*perp]
train_y1 = labels[0:i*perp]
train_x2 = view1[(i+1)*perp:len(view1)]
train_y2 = labels[(i+1)*perp:len(view1)]
train_x = np.concatenate((train_x1,train_x2))
train_y = np.concatenate((train_y1,train_y2))
va, ta = svm_classifier(train_x, train_y, test_x, test_y, test_x, test_y)
acc += ta
print (acc/5)
print ("view2 to view1")
acc = 0
for i in range(5):
print('@ i' + str(i))
test_x = view1[i*perp:(i+1)*perp]
test_y = labels[i*perp:(i+1)*perp]
if i==0:
train_x = view2[perp:len(view1)]
train_y = labels[perp:len(view1)]
elif i==4:
train_x = view2[0:4*perp]
train_y = labels[0:4*perp]
else:
train_x1 = view2[0:i*perp]
train_y1 = labels[0:i*perp]
train_x2 = view2[(i+1)*perp:len(view1)]
train_y2 = labels[(i+1)*perp:len(view1)]
train_x = np.concatenate((train_x1,train_x2))
train_y = np.concatenate((train_y1,train_y2))
va, ta = svm_classifier(train_x, train_y, test_x, test_y, test_x, test_y)
acc += ta
print (acc/5)
def prepare_data():
data_l = np.load('MFCC_Train.npy')
data_r = np.load('XRMB_Train.npy')
label = np.load('Labels_Train.npy')
X_train_l, X_test_l, X_train_r, X_test_r,y_train,y_test = split(data_l,data_r,label,ratio=0.0)
return X_train_l, X_train_r
def buildModel(loss_type,lamda):
inpx = Input(shape=(dimx,))
inpy = Input(shape=(dimy,))
hx = Reshape((dimx, 1))(inpx)
hx = Conv1D(256, 5, activation='relu', padding='valid', strides=1)(hx)
hx = MaxPooling1D(pool_size=4, padding='valid')(hx)
hx = Conv1D(65, 4, activation='relu', padding='valid', strides=1)(hx)
hx = MaxPooling1D(pool_size=3, padding='valid')(hx)
hx = Flatten()(hx)
hx = Dense(560,activation='sigmoid')(hx)
hx = Dense(280, activation='sigmoid')(hx)
hx = Dense(112, activation='sigmoid')(hx)
hx = Dense(680, activation='sigmoid')(hx)
hx = Dense(1365, activation='sigmoid')(hx)
hy = Reshape((dimy, 1))(inpy)
hy = Conv1D(256, 3, activation='relu', padding='valid', strides=1)(hy)
hy = MaxPooling1D(pool_size=2, padding='valid')(hy)
hy = Conv1D(50, 3, activation='relu', padding='valid', strides=1)(hy)
hy = MaxPooling1D(pool_size=2, padding='valid')(hy)
hy = Flatten()(hy)
hy = Dense(560,activation='sigmoid')(hy)
hy = Dense(280, activation='sigmoid')(hy)
hy = Dense(112, activation='sigmoid')(hy)
hy = Dense(680, activation='sigmoid')(hy)
hy = Dense(1365, activation='sigmoid')(hy)
#h = Activation("sigmoid")( Merge(mode="sum")([hx,hy]) )
h = Merge(mode="sum")([hx,hy])
#recx = Dense(hdim_deep,activation='sigmoid')(h)
recx = Dense(dimx)(h)
#recy = Dense(hdim_deep,activation='sigmoid')(h)
recy = Dense(dimy)(h)
branchModel = Model( [inpx,inpy],[recx,recy,h])
[recx1,recy1,h1] = branchModel( [inpx, ZeroPadding()(inpy)])
[recx2,recy2,h2] = branchModel( [ZeroPadding()(inpx), inpy ])
#you may probably add a reconstruction from combined
[recx3,recy3,h] = branchModel([inpx, inpy])
corr=CorrnetCost(-lamda)([h1,h2])
model = Model( [inpx,inpy],[recy1,recx2,recx1,recy2,corr]) #2
model.compile( loss=["mse","mse","mse","mse",corr_loss],optimizer="rmsprop")
branchModel.summary()
# if loss_type == 1:
# model = Model( [inpx,inpy],[recy1,recx2,recx3,recx1,recy2,recy3,corr])
# model.compile( loss=["mse","mse","mse","mse","mse","mse",corr_loss],optimizer="rmsprop")
# elif loss_type == 2:
# model = Model( [inpx,inpy],[recy1,recx2,recx1,recy2,corr])
# model.compile( loss=["mse","mse","mse","mse",corr_loss],optimizer="rmsprop")
# elif loss_type == 3:
# model = Model( [inpx,inpy],[recy1,recx2,recx3,recx1,recy2,recy3])
# model.compile( loss=["mse","mse","mse","mse","mse","mse"],optimizer="rmsprop")
# elif loss_type == 4:
# model = Model( [inpx,inpy],[recy1,recx2,recx1,recy2])
# model.compile( loss=["mse","mse","mse","mse"],optimizer="rmsprop")
return model, branchModel
def trainModel(model,data_left,data_right,loss_type,nb_epoch,batch_size):
X_train_l = data_left
X_train_r = data_right
#y_train = np_utils.to_categorical(y_train, nb_classes)
#y_test = np_utils.to_categorical(y_test, nb_classes)
data_l = np.load('MFCC_Train.npy')
data_r = np.load('XRMB_Train.npy')
label = np.load('Labels_Train.npy')
X_train_l, X_test_l, X_train_r, X_test_r,y_train,y_test = split(data_l,data_r,label,ratio=0.01)
print ('data split')
model.fit([X_train_l,X_train_r], [X_train_r,X_train_l,X_train_l,X_train_r,np.zeros((X_train_l.shape[0],112))],
nb_epoch=nb_epoch,
batch_size=batch_size,verbose=1)
# if loss_type == 1:
# print ('L_Type: l1+l2+l3-L4 h_dim:',hdim,' lamda:',lamda)
# model.fit([X_train_l,X_train_r], [X_train_r,X_train_l,X_train_l,X_train_l,X_train_r,X_train_r,np.zeros((X_train_l.shape[0],h_loss))],
# nb_epoch=nb_epoch,
# batch_size=batch_size,verbose=1)
# elif loss_type == 2:
# print ('L_Type: l2+l3-L4 h_dim:',hdim,' hdim_deep',hdim_deep,' lamda:',lamda)
# model.fit([X_train_l,X_train_r], [X_train_r,X_train_l,X_train_l,X_train_r,np.zeros((X_train_l.shape[0],h_loss))],
# nb_epoch=nb_epoch,
# batch_size=batch_size,verbose=1)
# elif loss_type == 3:
# print ('L_Type: l1+l2+l3 h_dim:',hdim,' lamda:',lamda)
# model.fit([X_train_l,X_train_r], [X_train_r,X_train_l,X_train_l,X_train_l,X_train_r,X_train_r],
# nb_epoch=nb_epoch,
# batch_size=batch_size,verbose=1)
# elif loss_type == 4:
# print ('L_Type: l2+l3 h_dim:',hdim,' lamda:',lamda)
# model.fit([X_train_l,X_train_r], [X_train_r,X_train_l,X_train_l,X_train_r],
# nb_epoch=nb_epoch,
# batch_size=batch_size,verbose=1)
# score = m.evaluate([X_test_l,X_test_r], [X_test_l,X_test_l,X_test_r,X_test_r,np.zeros((X_test_l.shape[0],hdim))],
# batch_size=100)
# print score
def testModel(b_model):
transfer(b_model)
sum_corr(b_model)
left_view, right_view = prepare_data()
model,branchModel = buildModel(loss_type=loss_type,lamda=lamda)
trainModel(model=model, data_left=left_view, data_right = right_view,
loss_type=loss_type,nb_epoch=nb_epoch,batch_size=batch_size)
testModel(branchModel)
| |
from __future__ import print_function
import atexit
import errno
import logging
import os
import signal
import sys
import time
from functools import partial
TIMEOUT = int(os.getenv('MANHOLE_TEST_TIMEOUT', 10))
SOCKET_PATH = '/tmp/manhole-socket'
OUTPUT = sys.__stdout__
def handle_sigterm(signo, _frame):
# Simulate real termination
print("Terminated", file=OUTPUT)
sys.exit(128 + signo)
# Handling sigterm ensure that atexit functions are called, and we do not leave
# leftover /tmp/manhole-pid sockets.
signal.signal(signal.SIGTERM, handle_sigterm)
@atexit.register
def log_exit():
print("In atexit handler.", file=OUTPUT)
def setup_greenthreads(patch_threads=False):
try:
from gevent import monkey
monkey.patch_all(thread=False)
except (ImportError, SyntaxError):
pass
try:
import eventlet
eventlet.hubs.get_hub() # workaround for circular import issue in eventlet,
# see https://github.com/eventlet/eventlet/issues/401
eventlet.monkey_patch(thread=False)
except (ImportError, SyntaxError):
pass
def do_fork():
pid = os.fork()
if pid:
@atexit.register
def cleanup():
try:
os.kill(pid, signal.SIGINT)
time.sleep(0.2)
os.kill(pid, signal.SIGTERM)
except OSError as e:
if e.errno != errno.ESRCH:
raise
os.waitpid(pid, 0)
else:
time.sleep(TIMEOUT * 10)
if __name__ == '__main__':
logging.basicConfig(
level=logging.DEBUG,
format='[pid=%(process)d - %(asctime)s]: %(name)s - %(levelname)s - %(message)s',
)
test_name = sys.argv[1]
try:
if os.getenv('PATCH_THREAD', False):
import manhole
setup_greenthreads(True)
else:
setup_greenthreads(True)
import manhole
if test_name == 'test_environ_variable_activation':
time.sleep(TIMEOUT)
elif test_name == 'test_install_twice_not_strict':
manhole.install(oneshot_on='USR2')
manhole.install(strict=False)
time.sleep(TIMEOUT)
elif test_name == 'test_log_fd':
manhole.install(verbose=True, verbose_destination=2)
manhole._LOG("whatever-1")
manhole._LOG("whatever-2")
elif test_name == 'test_log_fh':
class Output(object):
data = []
write = data.append
manhole.install(verbose=True, verbose_destination=Output)
manhole._LOG("whatever")
if Output.data and "]: whatever" in Output.data[-1]:
print("SUCCESS")
elif test_name == 'test_activate_on_usr2':
manhole.install(activate_on='USR2')
for i in range(TIMEOUT * 100):
time.sleep(0.1)
elif test_name == 'test_install_once':
manhole.install()
try:
manhole.install()
except manhole.AlreadyInstalled:
print('ALREADY_INSTALLED')
else:
raise AssertionError("Did not raise AlreadyInstalled")
elif test_name == 'test_stderr_doesnt_deadlock':
import subprocess
manhole.install()
for i in range(50):
print('running iteration', i)
p = subprocess.Popen(['true'])
print('waiting for process', p.pid)
p.wait()
print('process ended')
path = '/tmp/manhole-%d' % p.pid
if os.path.exists(path):
os.unlink(path)
raise AssertionError(path + ' exists !')
print('SUCCESS')
elif test_name == 'test_fork_exec':
manhole.install(reinstall_delay=5)
print("Installed.")
time.sleep(0.2)
pid = os.fork()
print("Forked, pid =", pid)
if pid:
os.waitpid(pid, 0)
path = '/tmp/manhole-%d' % pid
if os.path.exists(path):
os.unlink(path)
raise AssertionError(path + ' exists !')
else:
try:
time.sleep(1)
print("Exec-ing `true`")
os.execvp('true', ['true'])
finally:
os._exit(1)
print('SUCCESS')
elif test_name == 'test_activate_on_with_oneshot_on':
manhole.install(activate_on='USR2', oneshot_on='USR2')
for i in range(TIMEOUT * 100):
time.sleep(0.1)
elif test_name == 'test_interrupt_on_accept':
def handle_usr2(_sig, _frame):
print('Got USR2')
signal.signal(signal.SIGUSR2, handle_usr2)
import ctypes
import ctypes.util
libpthread_path = ctypes.util.find_library("pthread")
if not libpthread_path:
raise ImportError('ctypes.util.find_library("pthread") failed')
libpthread = ctypes.CDLL(libpthread_path)
if not hasattr(libpthread, "pthread_setname_np"):
raise ImportError('libpthread.pthread_setname_np missing')
pthread_kill = libpthread.pthread_kill
pthread_kill.argtypes = [ctypes.c_void_p, ctypes.c_int]
pthread_kill.restype = ctypes.c_int
manhole.install(sigmask=None)
for i in range(15):
time.sleep(0.1)
print("Sending signal to manhole thread ...")
pthread_kill(manhole._MANHOLE.thread.ident, signal.SIGUSR2)
for i in range(TIMEOUT * 100):
time.sleep(0.1)
elif test_name == 'test_oneshot_on_usr2':
manhole.install(oneshot_on='USR2')
for i in range(TIMEOUT * 100):
time.sleep(0.1)
elif test_name.startswith('test_signalfd_weirdness'):
signalled = False
@partial(signal.signal, signal.SIGUSR1)
def signal_handler(sig, _):
print('Received signal %s' % sig)
global signalled
signalled = True
if 'negative' in test_name:
manhole.install(sigmask=None)
else:
manhole.install(sigmask=[signal.SIGUSR1])
time.sleep(0.3) # give the manhole a bit enough time to start
print('Starting ...')
import signalfd
signalfd.sigprocmask(signalfd.SIG_BLOCK, [signal.SIGUSR1])
sys.setcheckinterval(1)
for i in range(100000):
os.kill(os.getpid(), signal.SIGUSR1)
print('signalled=%s' % signalled)
time.sleep(TIMEOUT * 10)
elif test_name == 'test_auth_fail':
manhole.get_peercred = lambda _: (-1, -1, -1)
manhole.install()
time.sleep(TIMEOUT * 10)
elif test_name == 'test_socket_path':
manhole.install(socket_path=SOCKET_PATH)
time.sleep(TIMEOUT * 10)
elif test_name == 'test_daemon_connection':
manhole.install(daemon_connection=True)
time.sleep(TIMEOUT)
elif test_name == 'test_socket_path_with_fork':
manhole.install(socket_path=SOCKET_PATH)
time.sleep(TIMEOUT)
do_fork()
elif test_name == 'test_locals':
manhole.install(socket_path=SOCKET_PATH,
locals={'k1': 'v1', 'k2': 'v2'})
time.sleep(TIMEOUT)
elif test_name == 'test_locals_after_fork':
manhole.install(locals={'k1': 'v1', 'k2': 'v2'})
do_fork()
elif test_name == 'test_redirect_stderr_default':
manhole.install(socket_path=SOCKET_PATH)
time.sleep(TIMEOUT)
elif test_name == 'test_redirect_stderr_disabled':
manhole.install(socket_path=SOCKET_PATH, redirect_stderr=False)
time.sleep(TIMEOUT)
elif test_name == 'test_sigmask':
manhole.install(socket_path=SOCKET_PATH, sigmask=[signal.SIGUSR1])
time.sleep(TIMEOUT)
elif test_name == 'test_connection_handler_exec_func':
manhole.install(connection_handler=manhole.handle_connection_exec, locals={'tete': lambda: print('TETE')})
time.sleep(TIMEOUT * 10)
elif test_name == 'test_connection_handler_exec_str':
manhole.install(connection_handler='exec', locals={'tete': lambda: print('TETE')})
time.sleep(TIMEOUT * 10)
else:
manhole.install()
time.sleep(0.3) # give the manhole a bit enough time to start
if test_name == 'test_simple':
time.sleep(TIMEOUT * 10)
elif test_name == 'test_with_forkpty':
time.sleep(1)
pid, masterfd = os.forkpty()
if pid:
@atexit.register
def cleanup():
try:
os.kill(pid, signal.SIGINT)
time.sleep(0.2)
os.kill(pid, signal.SIGTERM)
except OSError as e:
if e.errno != errno.ESRCH:
raise
while not os.waitpid(pid, os.WNOHANG)[0]:
try:
os.write(2, os.read(masterfd, 1024))
except OSError as e:
print("Error while reading from masterfd:", e)
else:
time.sleep(TIMEOUT * 10)
elif test_name == 'test_with_fork':
time.sleep(1)
do_fork()
else:
raise RuntimeError('Invalid test spec.')
except: # noqa
print('Died with %s.' % sys.exc_info()[0].__name__, file=OUTPUT)
import traceback
traceback.print_exc(file=OUTPUT)
print('DIED.', file=OUTPUT)
| |
import os
import subprocess
from pathlib import Path
def bashoutput(filename,inpi,lang):
if lang == 'cpp':
compilecommand = "g++ -std=c++11 {0} 2>compile_errors.txt".format(filename)
os.system(compilecommand)
if os.stat("compile_errors.txt").st_size:
contents = Path('compile_errors.txt').read_text()
os.system("rm compile_errors.txt")
return contents
else:
command1 = "./test.sh './a.out' < {0} > tempout.txt 2>runtime_errors.txt & PID=$!; sleep {1}; kill $PID 2>time_errors.txt".format(inpi,2)
os.system(command1)
if os.stat("runtime_errors.txt").st_size:
contents = Path('runtime_errors.txt').read_text()
os.system("rm a.out {0} {1} tempout.txt compile_errors.txt runtime_errors.txt time_errors.txt".format(inpi,filename))
return contents
if os.stat("time_errors.txt").st_size == 0:
os.system("rm a.out tempout.txt compile_errors.txt runtime_errors.txt time_errors.txt")
return "Time Limit Exceeded"
contents = Path('tempout.txt').read_text()
os.system("rm a.out diff.txt tempout.txt compile_errors.txt runtime_errors.txt time_errors.txt")
return contents
elif lang == 'py3':
command1 = "./test.sh 'python {0}' < {1} > tempout.txt 2>runtime_errors.txt & PID=$!; sleep {2}; kill $PID 2>time_errors.txt".format(filename,inpi,2)
os.system(command1)
if os.stat("runtime_errors.txt").st_size:
contents = Path('runtime_errors.txt').read_text()
os.system("rm tempout.txt runtime_errors.txt time_errors.txt")
return contents
if os.stat("time_errors.txt").st_size == 0:
contents = Path('tempout.txt').read_text()
os.system("rm tempout.txt runtime_errors.txt time_errors.txt")
return "Time Limit Exceeded"
contents = Path('tempout.txt').read_text()
os.system("rm tempout.txt runtime_errors.txt time_errors.txt")
return contents
elif lang == 'py':
command1 = "./test.sh 'python {0}' < {1} > tempout.txt 2>runtime_errors.txt & PID=$!; sleep {2}; kill $PID 2>time_errors.txt".format(filename,inpi,2)
os.system(command1)
if os.stat("runtime_errors.txt").st_size:
contents = Path('runtime_errors.txt').read_text()
os.system("rm tempout.txt runtime_errors.txt time_errors.txt")
return contents
if os.stat("time_errors.txt").st_size == 0:
os.system("rm tempout.txt runtime_errors.txt time_errors.txt")
return "Time Limit Exceeded"
contents = Path('tempout.txt').read_text()
os.system("rm tempout.txt runtime_errors.txt time_errors.txt")
return contents
if lang == 'c':
compilecommand = "gcc {0} 2>compile_errors.txt".format(filename)
os.system(compilecommand)
if os.stat("compile_errors.txt").st_size:
contents = Path('compile_errors.txt').read_text()
os.system("rm compile_errors.txt")
return contents
else:
command1 = "./test.sh './a.out' < {0} > tempout.txt 2>runtime_errors.txt & PID=$!; sleep {1}; kill $PID 2>time_errors.txt".format(inpi,2)
os.system(command1)
if os.stat("runtime_errors.txt").st_size:
contents = Path('runtime_errors.txt').read_text()
os.system("rm a.out {0} {1} tempout.txt compile_errors.txt runtime_errors.txt time_errors.txt".format(inpi,filename))
return contents
if os.stat("time_errors.txt").st_size == 0:
os.system("rm a.out tempout.txt compile_errors.txt runtime_errors.txt time_errors.txt")
return "Time Limit Exceeded"
contents = Path('tempout.txt').read_text()
os.system("rm a.out diff.txt tempout.txt compile_errors.txt runtime_errors.txt time_errors.txt")
return contents
elif lang == 'java':
compilecommand = "javac {0} 2>compile_errors.txt".format(filename)
os.system(compilecommand)
if os.stat("compile_errors.txt").st_size:
contents = Path('compile_errors.txt').read_text()
os.system("rm compile_errors.txt")
return contents
filei = filename[:-5]
command1 = "./test.sh 'java {0}' < {1} > tempout.txt 2>runtime_errors.txt & PID=$!; sleep {2}; kill $PID 2>time_errors.txt".format(filei,inpi,2)
os.system(command1)
if os.stat("runtime_errors.txt").st_size:
contents = Path('runtime_errors.txt').read_text()
os.system("rm *.class tempout.txt runtime_errors.txt time_errors.txt")
return contents
if os.stat("time_errors.txt").st_size == 0:
os.system("rm *.class tempout.txt runtime_errors.txt time_errors.txt")
return 'Time Limit Exceeded'
os.system("rm tempout.txt runtime_errors.txt time_errors.txt *.class")
return ""
else:
return "no language found"
def bashfunc(filename,testcase,number,lang,timeout):
if lang == 'cpp':
compilecommand = "g++ -std=c++11 {0} 2>compile_errors.txt".format(filename)
os.system(compilecommand)
if os.stat("compile_errors.txt").st_size:
os.system("rm compile_errors.txt")
return "CE"
else:
inputs = testcase + "input_"
outputs = testcase + "output_"
correct = 0
for i in range(number):
inpi = inputs + str(i+1)
outi = outputs + str(i+1)
command1 = "./test.sh './a.out' < {0} > tempout.txt 2>runtime_errors.txt & PID=$!; sleep {1}; kill $PID 2>time_errors.txt".format(inpi,timeout)
command2 = "diff tempout.txt {0} > diff.txt".format(outi)
os.system(command1)
if os.stat("runtime_errors.txt").st_size:
os.system("rm a.out tempout.txt compile_errors.txt runtime_errors.txt time_errors.txt")
return 'RE'
if os.stat("time_errors.txt").st_size == 0:
os.system("rm a.out tempout.txt compile_errors.txt runtime_errors.txt time_errors.txt")
return 'TLE'
os.system(command2)
if os.stat("diff.txt").st_size == 0:
os.system("rm diff.txt")
correct += 1
else:
os.system("rm a.out diff.txt tempout.txt compile_errors.txt runtime_errors.txt time_errors.txt")
return "WA"
os.system("rm a.out tempout.txt compile_errors.txt runtime_errors.txt time_errors.txt")
return "AC"
elif lang == 'py3':
inputs = testcase + "input_"
outputs = testcase + "output_"
correct = 0
for i in range(number):
inpi = inputs + str(i+1)
outi = outputs + str(i+1)
command1 = "./test.sh 'python3 {0}' < {1} > tempout.txt 2>runtime_errors.txt & PID=$!; sleep {2}; kill $PID 2>time_errors.txt".format(filename,inpi,timeout)
command2 = "diff tempout.txt {0} > diff.txt".format(outi)
os.system(command1)
if os.stat("runtime_errors.txt").st_size:
os.system("rm tempout.txt runtime_errors.txt time_errors.txt")
return 'RE'
if os.stat("time_errors.txt").st_size == 0:
os.system("rm tempout.txt runtime_errors.txt time_errors.txt")
return 'TLE'
os.system(command2)
if os.stat("diff.txt").st_size == 0:
os.system("rm diff.txt")
correct += 1
else:
os.system("rm diff.txt tempout.txt runtime_errors.txt time_errors.txt")
return "WA"
os.system("rm tempout.txt runtime_errors.txt time_errors.txt")
return "AC"
elif lang == 'py':
inputs = testcase + "input_"
outputs = testcase + "output_"
correct = 0
for i in range(number):
inpi = inputs + str(i+1)
outi = outputs + str(i+1)
command1 = "./test.sh 'python {0}' < {1} > tempout.txt 2>runtime_errors.txt & PID=$!; sleep {2}; kill $PID 2>time_errors.txt".format(filename,inpi,timeout)
command2 = "diff tempout.txt {0} > diff.txt".format(outi)
os.system(command1)
if os.stat("runtime_errors.txt").st_size:
os.system("rm tempout.txt runtime_errors.txt time_errors.txt")
return 'RE'
if os.stat("time_errors.txt").st_size == 0:
os.system("rm tempout.txt runtime_errors.txt time_errors.txt")
return 'TLE'
os.system(command2)
if os.stat("diff.txt").st_size == 0:
os.system("rm diff.txt")
correct += 1
else:
os.system("rm diff.txt tempout.txt runtime_errors.txt time_errors.txt")
return "WA"
os.system("rm tempout.txt runtime_errors.txt time_errors.txt")
return "AC"
elif lang == 'java':
compilecommand = "javac {0} 2>compile_errors.txt".format(filename)
os.system(compilecommand)
if os.stat("compile_errors.txt").st_size:
os.system("rm compile_errors.txt")
return "CE"
else:
inputs = testcase + "input_"
outputs = testcase + "output_"
correct = 0
filei = filename[:-5]
for i in range(number):
inpi = inputs + str(i+1)
outi = outputs + str(i+1)
command1 = "./test.sh 'java {0}' < {1} > tempout.txt 2>runtime_errors.txt & PID=$!; sleep {2}; kill $PID 2>time_errors.txt".format(filei,inpi,timeout)
command2 = "diff tempout.txt {0} > diff.txt".format(outi)
os.system(command1)
if os.stat("runtime_errors.txt").st_size:
os.system("rm *.class tempout.txt runtime_errors.txt time_errors.txt")
return 'RE'
if os.stat("time_errors.txt").st_size == 0:
os.system("rm *.class tempout.txt runtime_errors.txt time_errors.txt")
return 'TLE'
os.system(command2)
if os.stat("diff.txt").st_size == 0:
os.system("rm diff.txt")
correct += 1
else:
os.system("rm diff.txt tempout.txt runtime_errors.txt time_errors.txt *.class")
return "WA"
os.system("rm tempout.txt runtime_errors.txt time_errors.txt *.class")
return "AC"
elif lang == 'c':
compilecommand = "gcc {0} 2>compile_errors.txt".format(filename)
os.system(compilecommand)
if os.stat("compile_errors.txt").st_size:
os.system("rm compile_errors.txt")
return "CE"
else:
inputs = testcase + "input_"
outputs = testcase + "output_"
correct = 0
for i in range(number):
inpi = inputs + str(i+1)
outi = outputs + str(i+1)
command1 = "./test.sh './a.out' < {0} > tempout.txt 2>runtime_errors.txt & PID=$!; sleep {1}; kill $PID 2>time_errors.txt".format(inpi,timeout)
command2 = "diff tempout.txt {0} > diff.txt".format(outi)
os.system(command1)
if os.stat("runtime_errors.txt").st_size:
os.system("rm a.out tempout.txt compile_errors.txt runtime_errors.txt time_errors.txt")
return 'RE'
if os.stat("time_errors.txt").st_size == 0:
os.system("rm a.out tempout.txt compile_errors.txt runtime_errors.txt time_errors.txt")
return 'TLE'
os.system(command2)
if os.stat("diff.txt").st_size == 0:
os.system("rm diff.txt")
correct += 1
else:
os.system("rm a.out diff.txt tempout.txt compile_errors.txt runtime_errors.txt time_errors.txt")
return "WA"
os.system("rm a.out tempout.txt compile_errors.txt runtime_errors.txt time_errors.txt")
return "AC"
else:
return "no language found"
| |
from abc import ABC
from unittest import TestCase
from gilgamesh.log import EntryPoint, Log
from gilgamesh.snes.rom import ROM
from gilgamesh.snes.state import StateChange
from tests.test_rom import assemble
class LogTest(ABC):
@classmethod
def setUpClass(cls):
cls.rom = ROM(assemble(cls.asm))
def setUp(self):
self.log = Log(self.rom)
self.log.analyze()
class LoROMTest(LogTest, TestCase):
asm = "lorom.asm"
def test_initial_entry_points(self):
self.assertEqual(
self.log.entry_points[0x8000], EntryPoint("reset", 0b0011_0000)
)
self.assertEqual(self.log.subroutines[0x8000].label, "reset")
self.assertEqual(self.log.entry_points[0x0000], EntryPoint("nmi", 0b0011_0000))
self.assertNotIn(0x0000, self.log.subroutines)
class InfiniteLoopTest(LogTest, TestCase):
asm = "infinite_loop.asm"
def test_instructions(self):
self.assertEqual(len(self.log.instructions), 1)
subroutine = self.log.subroutines[0x8000]
instruction = subroutine.instructions.popitem()[1]
self.assertEqual(instruction.pc, 0x8000)
self.assertEqual(instruction.name, "jmp")
self.assertEqual(instruction.absolute_argument, 0x8000)
class StateChangeTest(LogTest, TestCase):
asm = "state_change.asm"
def test_instructions(self):
self.assertEqual(len(self.log.instructions), 7)
def test_sub_state_change(self):
sub = self.log.subroutines[0x800E]
self.assertEqual(len(sub.state_changes), 1)
change = sub.state_change
self.assertEqual(change.m, 0)
self.assertEqual(change.x, 0)
def test_lda_ldx_size(self):
reset = self.log.subroutines_by_label["reset"]
lda = reset.instructions[0x8005]
ldx = reset.instructions[0x8008]
self.assertEqual(lda.name, "lda")
self.assertEqual(lda.argument_size, 2)
self.assertEqual(ldx.name, "ldx")
self.assertEqual(ldx.argument_size, 2)
class ElidableStateChangeTest(LogTest, TestCase):
asm = "elidable_state_change.asm"
def test_instructions(self):
self.assertEqual(len(self.log.instructions), 10)
def test_sub_state_change_elided(self):
sub = self.log.subroutines[0x800A]
self.assertEqual(len(sub.state_changes), 1)
change = sub.state_change
self.assertEqual(change.m, None)
self.assertEqual(change.x, None)
class PhpPlpTest(LogTest, TestCase):
asm = "php_plp.asm"
def test_instructions(self):
self.assertEqual(len(self.log.instructions), 9)
def test_sub_state_change_elided(self):
sub = self.log.subroutines[0x800A]
self.assertEqual(len(sub.state_changes), 1)
change = sub.state_change
self.assertEqual(change.m, None)
self.assertEqual(change.x, None)
class JumpInsideSubroutineTest(LogTest, TestCase):
asm = "jump_inside_subroutine.asm"
def test_sub_state_change(self):
sub = self.log.subroutines[0x8016]
self.assertEqual(len(sub.state_changes), 1)
change = sub.state_change
self.assertEqual(change.m, 0)
class UnknownJumpTest(LogTest, TestCase):
asm = "unknown_jump.asm"
def test_sub_state_change_unknown(self):
reset = self.log.subroutines_by_label["reset"]
sub = self.log.subroutines[0x800B]
self.assertDictEqual(sub.state_changes, {0x800B: StateChange(unknown=True)})
self.assertTrue(sub.indirect_jumps)
self.assertTrue(sub.has_unknown_return_state)
self.assertNotIn(0x8005, reset.instructions)
self.assertNotIn(0x800E, sub.instructions)
self.assertTrue(reset.instructions[0x8002].stopped_execution)
self.assertTrue(sub.instructions[0x800B].stopped_execution)
def test_assert_state_change(self):
# Assertion.
unknown = self.log.subroutines[0x800B]
self.log.assert_subroutine_state_change(unknown, 0x800B, StateChange())
self.assertTrue(self.log.dirty)
self.log.analyze()
self.assertFalse(self.log.dirty)
reset = self.log.subroutines_by_label["reset"]
unknown = self.log.subroutines[0x800B]
self.assertIn(0x8005, reset.instructions)
self.assertIn(0x8008, reset.instructions)
self.assertTrue(unknown.indirect_jumps)
self.assertTrue(unknown.has_asserted_state_change)
self.assertFalse(unknown.has_unknown_return_state)
# Deassertion.
self.log.deassert_subroutine_state_change(0x800B, 0x800B)
self.assertTrue(self.log.dirty)
self.log.analyze()
self.assertFalse(self.log.dirty)
unknown = self.log.subroutines[0x800B]
self.assertFalse(unknown.has_asserted_state_change)
self.assertTrue(unknown.has_unknown_return_state)
def test_load_save(self):
unknown = self.log.subroutines[0x800B]
self.log.rename_label(unknown.label, "unknown")
data = self.log.save()
self.log.reset()
self.log.analyze()
unknown = self.log.subroutines[0x800B]
self.assertNotEqual(unknown.label, "unknown")
self.log.load(data)
unknown = self.log.subroutines[0x800B]
self.assertEqual(unknown.label, "unknown")
class SimplifiableReturnState(LogTest, TestCase):
asm = "simplifiable_return_state.asm"
def test_double_state_change_simplification(self):
reset = self.log.subroutines_by_label["reset"]
# double_state_change is simplified.
self.assertIn(0x8005, reset.instructions)
self.assertIn(0x8008, reset.instructions)
double_state_sub = self.log.subroutines[0x8017]
self.assertFalse(double_state_sub.has_unknown_return_state)
self.assertEqual(len(double_state_sub.state_changes), 2)
unknown_sub = self.log.subroutines[0x801F]
self.assertTrue(unknown_sub.indirect_jumps)
self.assertTrue(unknown_sub.has_unknown_return_state)
def test_instruction_state_change_assertion(self):
# Assertion.
self.log.assert_instruction_state_change(0x8024, StateChange())
self.assertTrue(self.log.dirty)
self.log.analyze()
self.assertFalse(self.log.dirty)
unknown_sub = self.log.subroutines[0x801F]
self.assertTrue(unknown_sub.indirect_jumps)
self.assertTrue(unknown_sub.instruction_has_asserted_state_change)
self.assertFalse(unknown_sub.has_unknown_return_state)
# Deassertion.
self.log.deassert_instruction_state_change(0x8024)
self.assertTrue(self.log.dirty)
self.log.analyze()
self.assertFalse(self.log.dirty)
unknown_sub = self.log.subroutines[0x801F]
self.assertFalse(unknown_sub.instruction_has_asserted_state_change)
self.assertTrue(unknown_sub.has_unknown_return_state)
class SuspectInstructionsTest(LogTest, TestCase):
asm = "suspect_instructions.asm"
def test_detects_suspect_instruction(self):
reset = self.log.subroutines_by_label["reset"]
self.assertTrue(reset.has_suspect_instructions)
self.assertTrue(reset.has_unknown_return_state)
class StackManipulationTest(LogTest, TestCase):
asm = "stack_manipulation.asm"
def test_stack_manipulation_is_detected(self):
reset = self.log.subroutines_by_label["reset"]
self.assertNotIn(0x8004, reset.instructions)
manipulation_sub = self.log.subroutines[0x8007]
self.assertTrue(manipulation_sub.has_unknown_return_state)
class ChangeRegisterTest(LogTest, TestCase):
asm = "change_register.asm"
def test_value_of_register_changes(self):
reset = self.log.subroutines_by_label["reset"]
# LDA
self.assertEqual(reset.instructions[0x8006].registers["a"], 0x1234)
self.assertEqual(reset.instructions[0x8008].registers["a"], 0x34)
self.assertEqual(reset.instructions[0x800C].registers["a"], None)
self.assertEqual(reset.instructions[0x800E].registers["a"], 0xFF)
self.assertEqual(reset.instructions[0x8010].registers["a"], 0x12FF)
# ADC
self.assertEqual(reset.instructions[0x8013].registers["a"], 0x13FF)
self.assertEqual(reset.instructions[0x8019].registers["a"], 0x1300)
| |
# Generated from sdoc/antlr/sdoc1Parser.g4 by ANTLR 4.9
# encoding: utf-8
from antlr4 import *
from io import StringIO
import sys
if sys.version_info[1] > 5:
from typing import TextIO
else:
from typing.io import TextIO
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\3*")
buf.write("\u00f4\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7")
buf.write("\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r\4\16")
buf.write("\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23\t\23")
buf.write("\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30\3\2")
buf.write("\3\2\7\2\63\n\2\f\2\16\2\66\13\2\3\3\3\3\3\4\3\4\3\4\3")
buf.write("\4\3\4\3\4\3\4\3\4\3\4\5\4C\n\4\3\5\3\5\3\6\3\6\3\6\5")
buf.write("\6J\n\6\3\6\3\6\3\7\3\7\3\7\3\7\3\7\3\b\3\b\3\b\3\b\3")
buf.write("\b\3\t\3\t\3\t\3\t\3\t\3\t\3\t\3\t\3\t\3\t\3\t\7\tc\n")
buf.write("\t\f\t\16\tf\13\t\3\t\3\t\5\tj\n\t\3\t\3\t\3\n\3\n\3\n")
buf.write("\3\n\3\n\3\13\3\13\3\13\3\13\3\13\3\f\3\f\3\f\3\f\3\f")
buf.write("\3\r\3\r\3\16\3\16\3\16\3\16\3\16\3\16\3\16\5\16\u0086")
buf.write("\n\16\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17\7\17\u0090")
buf.write("\n\17\f\17\16\17\u0093\13\17\3\20\3\20\3\20\3\20\3\20")
buf.write("\3\20\3\20\3\20\3\20\7\20\u009e\n\20\f\20\16\20\u00a1")
buf.write("\13\20\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\7")
buf.write("\21\u00ac\n\21\f\21\16\21\u00af\13\21\3\22\3\22\3\22\3")
buf.write("\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22")
buf.write("\3\22\7\22\u00c0\n\22\f\22\16\22\u00c3\13\22\3\23\3\23")
buf.write("\3\23\3\23\3\23\3\23\3\23\3\23\3\23\7\23\u00ce\n\23\f")
buf.write("\23\16\23\u00d1\13\23\3\24\3\24\3\24\3\24\3\24\3\24\7")
buf.write("\24\u00d9\n\24\f\24\16\24\u00dc\13\24\3\25\3\25\3\25\3")
buf.write("\25\3\25\3\25\7\25\u00e4\n\25\f\25\16\25\u00e7\13\25\3")
buf.write("\26\3\26\3\26\3\26\3\26\5\26\u00ee\n\26\3\27\3\27\3\30")
buf.write("\3\30\3\30\2\t\34\36 \"$&(\31\2\4\6\b\n\f\16\20\22\24")
buf.write("\26\30\32\34\36 \"$&(*,.\2\2\2\u00fa\2\64\3\2\2\2\4\67")
buf.write("\3\2\2\2\6B\3\2\2\2\bD\3\2\2\2\nF\3\2\2\2\fM\3\2\2\2\16")
buf.write("R\3\2\2\2\20W\3\2\2\2\22m\3\2\2\2\24r\3\2\2\2\26w\3\2")
buf.write("\2\2\30|\3\2\2\2\32\u0085\3\2\2\2\34\u0087\3\2\2\2\36")
buf.write("\u0094\3\2\2\2 \u00a2\3\2\2\2\"\u00b0\3\2\2\2$\u00c4\3")
buf.write("\2\2\2&\u00d2\3\2\2\2(\u00dd\3\2\2\2*\u00ed\3\2\2\2,\u00ef")
buf.write("\3\2\2\2.\u00f1\3\2\2\2\60\63\5\6\4\2\61\63\5\4\3\2\62")
buf.write("\60\3\2\2\2\62\61\3\2\2\2\63\66\3\2\2\2\64\62\3\2\2\2")
buf.write("\64\65\3\2\2\2\65\3\3\2\2\2\66\64\3\2\2\2\678\7\3\2\2")
buf.write("8\5\3\2\2\29C\5\b\5\2:C\5\n\6\2;C\5\f\7\2<C\5\16\b\2=")
buf.write("C\5\20\t\2>C\5\22\n\2?C\5\24\13\2@C\5\26\f\2AC\5\30\r")
buf.write("\2B9\3\2\2\2B:\3\2\2\2B;\3\2\2\2B<\3\2\2\2B=\3\2\2\2B")
buf.write(">\3\2\2\2B?\3\2\2\2B@\3\2\2\2BA\3\2\2\2C\7\3\2\2\2DE\7")
buf.write("\4\2\2E\t\3\2\2\2FG\7\6\2\2GI\7\24\2\2HJ\5.\30\2IH\3\2")
buf.write("\2\2IJ\3\2\2\2JK\3\2\2\2KL\7\25\2\2L\13\3\2\2\2MN\7\13")
buf.write("\2\2NO\7\24\2\2OP\5.\30\2PQ\7\25\2\2Q\r\3\2\2\2RS\7\n")
buf.write("\2\2ST\7\21\2\2TU\7\23\2\2UV\7\22\2\2V\17\3\2\2\2WX\7")
buf.write("\f\2\2XY\7\24\2\2YZ\5.\30\2Z[\7\25\2\2[d\5\2\2\2\\]\7")
buf.write("\7\2\2]^\7\24\2\2^_\5.\30\2_`\7\25\2\2`a\5\2\2\2ac\3\2")
buf.write("\2\2b\\\3\2\2\2cf\3\2\2\2db\3\2\2\2de\3\2\2\2ei\3\2\2")
buf.write("\2fd\3\2\2\2gh\7\b\2\2hj\5\2\2\2ig\3\2\2\2ij\3\2\2\2j")
buf.write("k\3\2\2\2kl\7\t\2\2l\21\3\2\2\2mn\7\r\2\2no\7\21\2\2o")
buf.write("p\7\23\2\2pq\7\22\2\2q\23\3\2\2\2rs\7\16\2\2st\7\21\2")
buf.write("\2tu\7\23\2\2uv\7\22\2\2v\25\3\2\2\2wx\7\17\2\2xy\7\24")
buf.write("\2\2yz\5.\30\2z{\7\25\2\2{\27\3\2\2\2|}\7\20\2\2}\31\3")
buf.write("\2\2\2~\u0086\7(\2\2\177\u0086\7)\2\2\u0080\u0086\7*\2")
buf.write("\2\u0081\u0082\7\27\2\2\u0082\u0083\5.\30\2\u0083\u0084")
buf.write("\7\30\2\2\u0084\u0086\3\2\2\2\u0085~\3\2\2\2\u0085\177")
buf.write("\3\2\2\2\u0085\u0080\3\2\2\2\u0085\u0081\3\2\2\2\u0086")
buf.write("\33\3\2\2\2\u0087\u0088\b\17\1\2\u0088\u0089\5\32\16\2")
buf.write("\u0089\u0091\3\2\2\2\u008a\u008b\f\3\2\2\u008b\u008c\7")
buf.write("\31\2\2\u008c\u008d\5.\30\2\u008d\u008e\7\32\2\2\u008e")
buf.write("\u0090\3\2\2\2\u008f\u008a\3\2\2\2\u0090\u0093\3\2\2\2")
buf.write("\u0091\u008f\3\2\2\2\u0091\u0092\3\2\2\2\u0092\35\3\2")
buf.write("\2\2\u0093\u0091\3\2\2\2\u0094\u0095\b\20\1\2\u0095\u0096")
buf.write("\5\34\17\2\u0096\u009f\3\2\2\2\u0097\u0098\f\4\2\2\u0098")
buf.write("\u0099\7\33\2\2\u0099\u009e\5\34\17\2\u009a\u009b\f\3")
buf.write("\2\2\u009b\u009c\7\34\2\2\u009c\u009e\5\34\17\2\u009d")
buf.write("\u0097\3\2\2\2\u009d\u009a\3\2\2\2\u009e\u00a1\3\2\2\2")
buf.write("\u009f\u009d\3\2\2\2\u009f\u00a0\3\2\2\2\u00a0\37\3\2")
buf.write("\2\2\u00a1\u009f\3\2\2\2\u00a2\u00a3\b\21\1\2\u00a3\u00a4")
buf.write("\5\36\20\2\u00a4\u00ad\3\2\2\2\u00a5\u00a6\f\4\2\2\u00a6")
buf.write("\u00a7\7\35\2\2\u00a7\u00ac\5\36\20\2\u00a8\u00a9\f\3")
buf.write("\2\2\u00a9\u00aa\7\36\2\2\u00aa\u00ac\5\36\20\2\u00ab")
buf.write("\u00a5\3\2\2\2\u00ab\u00a8\3\2\2\2\u00ac\u00af\3\2\2\2")
buf.write("\u00ad\u00ab\3\2\2\2\u00ad\u00ae\3\2\2\2\u00ae!\3\2\2")
buf.write("\2\u00af\u00ad\3\2\2\2\u00b0\u00b1\b\22\1\2\u00b1\u00b2")
buf.write("\5 \21\2\u00b2\u00c1\3\2\2\2\u00b3\u00b4\f\6\2\2\u00b4")
buf.write("\u00b5\7$\2\2\u00b5\u00c0\5 \21\2\u00b6\u00b7\f\5\2\2")
buf.write("\u00b7\u00b8\7 \2\2\u00b8\u00c0\5 \21\2\u00b9\u00ba\f")
buf.write("\4\2\2\u00ba\u00bb\7%\2\2\u00bb\u00c0\5 \21\2\u00bc\u00bd")
buf.write("\f\3\2\2\u00bd\u00be\7!\2\2\u00be\u00c0\5 \21\2\u00bf")
buf.write("\u00b3\3\2\2\2\u00bf\u00b6\3\2\2\2\u00bf\u00b9\3\2\2\2")
buf.write("\u00bf\u00bc\3\2\2\2\u00c0\u00c3\3\2\2\2\u00c1\u00bf\3")
buf.write("\2\2\2\u00c1\u00c2\3\2\2\2\u00c2#\3\2\2\2\u00c3\u00c1")
buf.write("\3\2\2\2\u00c4\u00c5\b\23\1\2\u00c5\u00c6\5\"\22\2\u00c6")
buf.write("\u00cf\3\2\2\2\u00c7\u00c8\f\4\2\2\u00c8\u00c9\7\37\2")
buf.write("\2\u00c9\u00ce\5\"\22\2\u00ca\u00cb\f\3\2\2\u00cb\u00cc")
buf.write("\7&\2\2\u00cc\u00ce\5\"\22\2\u00cd\u00c7\3\2\2\2\u00cd")
buf.write("\u00ca\3\2\2\2\u00ce\u00d1\3\2\2\2\u00cf\u00cd\3\2\2\2")
buf.write("\u00cf\u00d0\3\2\2\2\u00d0%\3\2\2\2\u00d1\u00cf\3\2\2")
buf.write("\2\u00d2\u00d3\b\24\1\2\u00d3\u00d4\5$\23\2\u00d4\u00da")
buf.write("\3\2\2\2\u00d5\u00d6\f\3\2\2\u00d6\u00d7\7\"\2\2\u00d7")
buf.write("\u00d9\5$\23\2\u00d8\u00d5\3\2\2\2\u00d9\u00dc\3\2\2\2")
buf.write("\u00da\u00d8\3\2\2\2\u00da\u00db\3\2\2\2\u00db\'\3\2\2")
buf.write("\2\u00dc\u00da\3\2\2\2\u00dd\u00de\b\25\1\2\u00de\u00df")
buf.write("\5&\24\2\u00df\u00e5\3\2\2\2\u00e0\u00e1\f\3\2\2\u00e1")
buf.write("\u00e2\7#\2\2\u00e2\u00e4\5&\24\2\u00e3\u00e0\3\2\2\2")
buf.write("\u00e4\u00e7\3\2\2\2\u00e5\u00e3\3\2\2\2\u00e5\u00e6\3")
buf.write("\2\2\2\u00e6)\3\2\2\2\u00e7\u00e5\3\2\2\2\u00e8\u00ee")
buf.write("\5(\25\2\u00e9\u00ea\5\34\17\2\u00ea\u00eb\5,\27\2\u00eb")
buf.write("\u00ec\5*\26\2\u00ec\u00ee\3\2\2\2\u00ed\u00e8\3\2\2\2")
buf.write("\u00ed\u00e9\3\2\2\2\u00ee+\3\2\2\2\u00ef\u00f0\7\'\2")
buf.write("\2\u00f0-\3\2\2\2\u00f1\u00f2\5*\26\2\u00f2/\3\2\2\2\25")
buf.write("\62\64BIdi\u0085\u0091\u009d\u009f\u00ab\u00ad\u00bf\u00c1")
buf.write("\u00cd\u00cf\u00da\u00e5\u00ed")
return buf.getvalue()
class sdoc1Parser ( Parser ):
grammarFileName = "sdoc1Parser.g4"
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
sharedContextCache = PredictionContextCache()
literalNames = [ "<INVALID>", "<INVALID>", "<INVALID>", "'\\comment'",
"'\\debug'", "'\\elif'", "'\\else'", "'\\endif'", "'\\error'",
"'\\expression'", "'\\if'", "'\\include'", "'\\notice'",
"'\\substitute'", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"'('", "')'", "'['", "']'", "'*'", "'/'", "'+'", "'-'",
"'=='", "'>'", "'>='", "'&&'", "'||'", "'<'", "'<='",
"'!='", "'='" ]
symbolicNames = [ "<INVALID>", "TEXT", "LINE_COMMENT", "COMMENT", "DEBUG",
"ELIF", "ELSE", "ENDIF", "ERROR", "EXPRESSION", "IF",
"INCLUDE", "NOTICE", "SUBSTITUTE", "SDOC2_COMMAND",
"SIMPLE_OBRACE", "SIMPLE_CBRACE", "SIMPLE_ARG", "EXPR_OBRACE",
"EXPR_CBRACE", "EXPR_WS", "EXPR_LEFT_PAREN", "EXPR_RIGHT_PAREN",
"EXPR_LEFT_BRACKET", "EXPR_RIGHT_BRACKET", "EXPR_MULT",
"EXPR_DIV", "EXPR_ADD", "EXPR_MINUS", "EXPR_EQUAL",
"EXPR_GT", "EXPR_GTE", "EXPR_LOGICAL_AND", "EXPR_LOGICAL_OR",
"EXPR_LT", "EXPR_LTE", "EXPR_NOT_EQUAL", "EXPR_ASSIGN",
"EXPR_IDENTIFIER", "EXPR_INTEGER_CONSTANT", "EXPR_STRING_CONSTANT" ]
RULE_sdoc = 0
RULE_text = 1
RULE_command = 2
RULE_cmd_comment = 3
RULE_cmd_debug = 4
RULE_cmd_expression = 5
RULE_cmd_error = 6
RULE_cmd_if = 7
RULE_cmd_include = 8
RULE_cmd_notice = 9
RULE_cmd_substitute = 10
RULE_cmd_sdoc2 = 11
RULE_primaryExpression = 12
RULE_postfixExpression = 13
RULE_multiplicativeExpression = 14
RULE_additiveExpression = 15
RULE_relationalExpression = 16
RULE_equalityExpression = 17
RULE_logicalAndExpression = 18
RULE_logicalOrExpression = 19
RULE_assignmentExpression = 20
RULE_assignmentOperator = 21
RULE_expression = 22
ruleNames = [ "sdoc", "text", "command", "cmd_comment", "cmd_debug",
"cmd_expression", "cmd_error", "cmd_if", "cmd_include",
"cmd_notice", "cmd_substitute", "cmd_sdoc2", "primaryExpression",
"postfixExpression", "multiplicativeExpression", "additiveExpression",
"relationalExpression", "equalityExpression", "logicalAndExpression",
"logicalOrExpression", "assignmentExpression", "assignmentOperator",
"expression" ]
EOF = Token.EOF
TEXT=1
LINE_COMMENT=2
COMMENT=3
DEBUG=4
ELIF=5
ELSE=6
ENDIF=7
ERROR=8
EXPRESSION=9
IF=10
INCLUDE=11
NOTICE=12
SUBSTITUTE=13
SDOC2_COMMAND=14
SIMPLE_OBRACE=15
SIMPLE_CBRACE=16
SIMPLE_ARG=17
EXPR_OBRACE=18
EXPR_CBRACE=19
EXPR_WS=20
EXPR_LEFT_PAREN=21
EXPR_RIGHT_PAREN=22
EXPR_LEFT_BRACKET=23
EXPR_RIGHT_BRACKET=24
EXPR_MULT=25
EXPR_DIV=26
EXPR_ADD=27
EXPR_MINUS=28
EXPR_EQUAL=29
EXPR_GT=30
EXPR_GTE=31
EXPR_LOGICAL_AND=32
EXPR_LOGICAL_OR=33
EXPR_LT=34
EXPR_LTE=35
EXPR_NOT_EQUAL=36
EXPR_ASSIGN=37
EXPR_IDENTIFIER=38
EXPR_INTEGER_CONSTANT=39
EXPR_STRING_CONSTANT=40
def __init__(self, input:TokenStream, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.9")
self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)
self._predicates = None
class SdocContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def command(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(sdoc1Parser.CommandContext)
else:
return self.getTypedRuleContext(sdoc1Parser.CommandContext,i)
def text(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(sdoc1Parser.TextContext)
else:
return self.getTypedRuleContext(sdoc1Parser.TextContext,i)
def getRuleIndex(self):
return sdoc1Parser.RULE_sdoc
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSdoc" ):
return visitor.visitSdoc(self)
else:
return visitor.visitChildren(self)
def sdoc(self):
localctx = sdoc1Parser.SdocContext(self, self._ctx, self.state)
self.enterRule(localctx, 0, self.RULE_sdoc)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 50
self._errHandler.sync(self)
_la = self._input.LA(1)
while (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << sdoc1Parser.TEXT) | (1 << sdoc1Parser.LINE_COMMENT) | (1 << sdoc1Parser.DEBUG) | (1 << sdoc1Parser.ERROR) | (1 << sdoc1Parser.EXPRESSION) | (1 << sdoc1Parser.IF) | (1 << sdoc1Parser.INCLUDE) | (1 << sdoc1Parser.NOTICE) | (1 << sdoc1Parser.SUBSTITUTE) | (1 << sdoc1Parser.SDOC2_COMMAND))) != 0):
self.state = 48
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [sdoc1Parser.LINE_COMMENT, sdoc1Parser.DEBUG, sdoc1Parser.ERROR, sdoc1Parser.EXPRESSION, sdoc1Parser.IF, sdoc1Parser.INCLUDE, sdoc1Parser.NOTICE, sdoc1Parser.SUBSTITUTE, sdoc1Parser.SDOC2_COMMAND]:
self.state = 46
self.command()
pass
elif token in [sdoc1Parser.TEXT]:
self.state = 47
self.text()
pass
else:
raise NoViableAltException(self)
self.state = 52
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class TextContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def TEXT(self):
return self.getToken(sdoc1Parser.TEXT, 0)
def getRuleIndex(self):
return sdoc1Parser.RULE_text
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitText" ):
return visitor.visitText(self)
else:
return visitor.visitChildren(self)
def text(self):
localctx = sdoc1Parser.TextContext(self, self._ctx, self.state)
self.enterRule(localctx, 2, self.RULE_text)
try:
self.enterOuterAlt(localctx, 1)
self.state = 53
self.match(sdoc1Parser.TEXT)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class CommandContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def cmd_comment(self):
return self.getTypedRuleContext(sdoc1Parser.Cmd_commentContext,0)
def cmd_debug(self):
return self.getTypedRuleContext(sdoc1Parser.Cmd_debugContext,0)
def cmd_expression(self):
return self.getTypedRuleContext(sdoc1Parser.Cmd_expressionContext,0)
def cmd_error(self):
return self.getTypedRuleContext(sdoc1Parser.Cmd_errorContext,0)
def cmd_if(self):
return self.getTypedRuleContext(sdoc1Parser.Cmd_ifContext,0)
def cmd_include(self):
return self.getTypedRuleContext(sdoc1Parser.Cmd_includeContext,0)
def cmd_notice(self):
return self.getTypedRuleContext(sdoc1Parser.Cmd_noticeContext,0)
def cmd_substitute(self):
return self.getTypedRuleContext(sdoc1Parser.Cmd_substituteContext,0)
def cmd_sdoc2(self):
return self.getTypedRuleContext(sdoc1Parser.Cmd_sdoc2Context,0)
def getRuleIndex(self):
return sdoc1Parser.RULE_command
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCommand" ):
return visitor.visitCommand(self)
else:
return visitor.visitChildren(self)
def command(self):
localctx = sdoc1Parser.CommandContext(self, self._ctx, self.state)
self.enterRule(localctx, 4, self.RULE_command)
try:
self.state = 64
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [sdoc1Parser.LINE_COMMENT]:
self.enterOuterAlt(localctx, 1)
self.state = 55
self.cmd_comment()
pass
elif token in [sdoc1Parser.DEBUG]:
self.enterOuterAlt(localctx, 2)
self.state = 56
self.cmd_debug()
pass
elif token in [sdoc1Parser.EXPRESSION]:
self.enterOuterAlt(localctx, 3)
self.state = 57
self.cmd_expression()
pass
elif token in [sdoc1Parser.ERROR]:
self.enterOuterAlt(localctx, 4)
self.state = 58
self.cmd_error()
pass
elif token in [sdoc1Parser.IF]:
self.enterOuterAlt(localctx, 5)
self.state = 59
self.cmd_if()
pass
elif token in [sdoc1Parser.INCLUDE]:
self.enterOuterAlt(localctx, 6)
self.state = 60
self.cmd_include()
pass
elif token in [sdoc1Parser.NOTICE]:
self.enterOuterAlt(localctx, 7)
self.state = 61
self.cmd_notice()
pass
elif token in [sdoc1Parser.SUBSTITUTE]:
self.enterOuterAlt(localctx, 8)
self.state = 62
self.cmd_substitute()
pass
elif token in [sdoc1Parser.SDOC2_COMMAND]:
self.enterOuterAlt(localctx, 9)
self.state = 63
self.cmd_sdoc2()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Cmd_commentContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def LINE_COMMENT(self):
return self.getToken(sdoc1Parser.LINE_COMMENT, 0)
def getRuleIndex(self):
return sdoc1Parser.RULE_cmd_comment
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCmd_comment" ):
return visitor.visitCmd_comment(self)
else:
return visitor.visitChildren(self)
def cmd_comment(self):
localctx = sdoc1Parser.Cmd_commentContext(self, self._ctx, self.state)
self.enterRule(localctx, 6, self.RULE_cmd_comment)
try:
self.enterOuterAlt(localctx, 1)
self.state = 66
self.match(sdoc1Parser.LINE_COMMENT)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Cmd_debugContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def DEBUG(self):
return self.getToken(sdoc1Parser.DEBUG, 0)
def EXPR_OBRACE(self):
return self.getToken(sdoc1Parser.EXPR_OBRACE, 0)
def EXPR_CBRACE(self):
return self.getToken(sdoc1Parser.EXPR_CBRACE, 0)
def expression(self):
return self.getTypedRuleContext(sdoc1Parser.ExpressionContext,0)
def getRuleIndex(self):
return sdoc1Parser.RULE_cmd_debug
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCmd_debug" ):
return visitor.visitCmd_debug(self)
else:
return visitor.visitChildren(self)
def cmd_debug(self):
localctx = sdoc1Parser.Cmd_debugContext(self, self._ctx, self.state)
self.enterRule(localctx, 8, self.RULE_cmd_debug)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 68
self.match(sdoc1Parser.DEBUG)
self.state = 69
self.match(sdoc1Parser.EXPR_OBRACE)
self.state = 71
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << sdoc1Parser.EXPR_LEFT_PAREN) | (1 << sdoc1Parser.EXPR_IDENTIFIER) | (1 << sdoc1Parser.EXPR_INTEGER_CONSTANT) | (1 << sdoc1Parser.EXPR_STRING_CONSTANT))) != 0):
self.state = 70
self.expression()
self.state = 73
self.match(sdoc1Parser.EXPR_CBRACE)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Cmd_expressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def EXPRESSION(self):
return self.getToken(sdoc1Parser.EXPRESSION, 0)
def EXPR_OBRACE(self):
return self.getToken(sdoc1Parser.EXPR_OBRACE, 0)
def expression(self):
return self.getTypedRuleContext(sdoc1Parser.ExpressionContext,0)
def EXPR_CBRACE(self):
return self.getToken(sdoc1Parser.EXPR_CBRACE, 0)
def getRuleIndex(self):
return sdoc1Parser.RULE_cmd_expression
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCmd_expression" ):
return visitor.visitCmd_expression(self)
else:
return visitor.visitChildren(self)
def cmd_expression(self):
localctx = sdoc1Parser.Cmd_expressionContext(self, self._ctx, self.state)
self.enterRule(localctx, 10, self.RULE_cmd_expression)
try:
self.enterOuterAlt(localctx, 1)
self.state = 75
self.match(sdoc1Parser.EXPRESSION)
self.state = 76
self.match(sdoc1Parser.EXPR_OBRACE)
self.state = 77
self.expression()
self.state = 78
self.match(sdoc1Parser.EXPR_CBRACE)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Cmd_errorContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ERROR(self):
return self.getToken(sdoc1Parser.ERROR, 0)
def SIMPLE_OBRACE(self):
return self.getToken(sdoc1Parser.SIMPLE_OBRACE, 0)
def SIMPLE_ARG(self):
return self.getToken(sdoc1Parser.SIMPLE_ARG, 0)
def SIMPLE_CBRACE(self):
return self.getToken(sdoc1Parser.SIMPLE_CBRACE, 0)
def getRuleIndex(self):
return sdoc1Parser.RULE_cmd_error
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCmd_error" ):
return visitor.visitCmd_error(self)
else:
return visitor.visitChildren(self)
def cmd_error(self):
localctx = sdoc1Parser.Cmd_errorContext(self, self._ctx, self.state)
self.enterRule(localctx, 12, self.RULE_cmd_error)
try:
self.enterOuterAlt(localctx, 1)
self.state = 80
self.match(sdoc1Parser.ERROR)
self.state = 81
self.match(sdoc1Parser.SIMPLE_OBRACE)
self.state = 82
self.match(sdoc1Parser.SIMPLE_ARG)
self.state = 83
self.match(sdoc1Parser.SIMPLE_CBRACE)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Cmd_ifContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def IF(self):
return self.getToken(sdoc1Parser.IF, 0)
def EXPR_OBRACE(self, i:int=None):
if i is None:
return self.getTokens(sdoc1Parser.EXPR_OBRACE)
else:
return self.getToken(sdoc1Parser.EXPR_OBRACE, i)
def expression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(sdoc1Parser.ExpressionContext)
else:
return self.getTypedRuleContext(sdoc1Parser.ExpressionContext,i)
def EXPR_CBRACE(self, i:int=None):
if i is None:
return self.getTokens(sdoc1Parser.EXPR_CBRACE)
else:
return self.getToken(sdoc1Parser.EXPR_CBRACE, i)
def sdoc(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(sdoc1Parser.SdocContext)
else:
return self.getTypedRuleContext(sdoc1Parser.SdocContext,i)
def ENDIF(self):
return self.getToken(sdoc1Parser.ENDIF, 0)
def ELIF(self, i:int=None):
if i is None:
return self.getTokens(sdoc1Parser.ELIF)
else:
return self.getToken(sdoc1Parser.ELIF, i)
def ELSE(self):
return self.getToken(sdoc1Parser.ELSE, 0)
def getRuleIndex(self):
return sdoc1Parser.RULE_cmd_if
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCmd_if" ):
return visitor.visitCmd_if(self)
else:
return visitor.visitChildren(self)
def cmd_if(self):
localctx = sdoc1Parser.Cmd_ifContext(self, self._ctx, self.state)
self.enterRule(localctx, 14, self.RULE_cmd_if)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 85
self.match(sdoc1Parser.IF)
self.state = 86
self.match(sdoc1Parser.EXPR_OBRACE)
self.state = 87
self.expression()
self.state = 88
self.match(sdoc1Parser.EXPR_CBRACE)
self.state = 89
self.sdoc()
self.state = 98
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==sdoc1Parser.ELIF:
self.state = 90
self.match(sdoc1Parser.ELIF)
self.state = 91
self.match(sdoc1Parser.EXPR_OBRACE)
self.state = 92
self.expression()
self.state = 93
self.match(sdoc1Parser.EXPR_CBRACE)
self.state = 94
self.sdoc()
self.state = 100
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 103
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==sdoc1Parser.ELSE:
self.state = 101
self.match(sdoc1Parser.ELSE)
self.state = 102
self.sdoc()
self.state = 105
self.match(sdoc1Parser.ENDIF)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Cmd_includeContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def INCLUDE(self):
return self.getToken(sdoc1Parser.INCLUDE, 0)
def SIMPLE_OBRACE(self):
return self.getToken(sdoc1Parser.SIMPLE_OBRACE, 0)
def SIMPLE_ARG(self):
return self.getToken(sdoc1Parser.SIMPLE_ARG, 0)
def SIMPLE_CBRACE(self):
return self.getToken(sdoc1Parser.SIMPLE_CBRACE, 0)
def getRuleIndex(self):
return sdoc1Parser.RULE_cmd_include
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCmd_include" ):
return visitor.visitCmd_include(self)
else:
return visitor.visitChildren(self)
def cmd_include(self):
localctx = sdoc1Parser.Cmd_includeContext(self, self._ctx, self.state)
self.enterRule(localctx, 16, self.RULE_cmd_include)
try:
self.enterOuterAlt(localctx, 1)
self.state = 107
self.match(sdoc1Parser.INCLUDE)
self.state = 108
self.match(sdoc1Parser.SIMPLE_OBRACE)
self.state = 109
self.match(sdoc1Parser.SIMPLE_ARG)
self.state = 110
self.match(sdoc1Parser.SIMPLE_CBRACE)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Cmd_noticeContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def NOTICE(self):
return self.getToken(sdoc1Parser.NOTICE, 0)
def SIMPLE_OBRACE(self):
return self.getToken(sdoc1Parser.SIMPLE_OBRACE, 0)
def SIMPLE_ARG(self):
return self.getToken(sdoc1Parser.SIMPLE_ARG, 0)
def SIMPLE_CBRACE(self):
return self.getToken(sdoc1Parser.SIMPLE_CBRACE, 0)
def getRuleIndex(self):
return sdoc1Parser.RULE_cmd_notice
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCmd_notice" ):
return visitor.visitCmd_notice(self)
else:
return visitor.visitChildren(self)
def cmd_notice(self):
localctx = sdoc1Parser.Cmd_noticeContext(self, self._ctx, self.state)
self.enterRule(localctx, 18, self.RULE_cmd_notice)
try:
self.enterOuterAlt(localctx, 1)
self.state = 112
self.match(sdoc1Parser.NOTICE)
self.state = 113
self.match(sdoc1Parser.SIMPLE_OBRACE)
self.state = 114
self.match(sdoc1Parser.SIMPLE_ARG)
self.state = 115
self.match(sdoc1Parser.SIMPLE_CBRACE)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Cmd_substituteContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def SUBSTITUTE(self):
return self.getToken(sdoc1Parser.SUBSTITUTE, 0)
def EXPR_OBRACE(self):
return self.getToken(sdoc1Parser.EXPR_OBRACE, 0)
def expression(self):
return self.getTypedRuleContext(sdoc1Parser.ExpressionContext,0)
def EXPR_CBRACE(self):
return self.getToken(sdoc1Parser.EXPR_CBRACE, 0)
def getRuleIndex(self):
return sdoc1Parser.RULE_cmd_substitute
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCmd_substitute" ):
return visitor.visitCmd_substitute(self)
else:
return visitor.visitChildren(self)
def cmd_substitute(self):
localctx = sdoc1Parser.Cmd_substituteContext(self, self._ctx, self.state)
self.enterRule(localctx, 20, self.RULE_cmd_substitute)
try:
self.enterOuterAlt(localctx, 1)
self.state = 117
self.match(sdoc1Parser.SUBSTITUTE)
self.state = 118
self.match(sdoc1Parser.EXPR_OBRACE)
self.state = 119
self.expression()
self.state = 120
self.match(sdoc1Parser.EXPR_CBRACE)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Cmd_sdoc2Context(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def SDOC2_COMMAND(self):
return self.getToken(sdoc1Parser.SDOC2_COMMAND, 0)
def getRuleIndex(self):
return sdoc1Parser.RULE_cmd_sdoc2
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCmd_sdoc2" ):
return visitor.visitCmd_sdoc2(self)
else:
return visitor.visitChildren(self)
def cmd_sdoc2(self):
localctx = sdoc1Parser.Cmd_sdoc2Context(self, self._ctx, self.state)
self.enterRule(localctx, 22, self.RULE_cmd_sdoc2)
try:
self.enterOuterAlt(localctx, 1)
self.state = 122
self.match(sdoc1Parser.SDOC2_COMMAND)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class PrimaryExpressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return sdoc1Parser.RULE_primaryExpression
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class PrimaryExpressionIdentifierContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a sdoc1Parser.PrimaryExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def EXPR_IDENTIFIER(self):
return self.getToken(sdoc1Parser.EXPR_IDENTIFIER, 0)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitPrimaryExpressionIdentifier" ):
return visitor.visitPrimaryExpressionIdentifier(self)
else:
return visitor.visitChildren(self)
class PrimaryExpressionSubExpressionContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a sdoc1Parser.PrimaryExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def EXPR_LEFT_PAREN(self):
return self.getToken(sdoc1Parser.EXPR_LEFT_PAREN, 0)
def expression(self):
return self.getTypedRuleContext(sdoc1Parser.ExpressionContext,0)
def EXPR_RIGHT_PAREN(self):
return self.getToken(sdoc1Parser.EXPR_RIGHT_PAREN, 0)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitPrimaryExpressionSubExpression" ):
return visitor.visitPrimaryExpressionSubExpression(self)
else:
return visitor.visitChildren(self)
class PrimaryExpressionIntegerConstantContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a sdoc1Parser.PrimaryExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def EXPR_INTEGER_CONSTANT(self):
return self.getToken(sdoc1Parser.EXPR_INTEGER_CONSTANT, 0)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitPrimaryExpressionIntegerConstant" ):
return visitor.visitPrimaryExpressionIntegerConstant(self)
else:
return visitor.visitChildren(self)
class PrimaryExpressionStringConstantContext(PrimaryExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a sdoc1Parser.PrimaryExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def EXPR_STRING_CONSTANT(self):
return self.getToken(sdoc1Parser.EXPR_STRING_CONSTANT, 0)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitPrimaryExpressionStringConstant" ):
return visitor.visitPrimaryExpressionStringConstant(self)
else:
return visitor.visitChildren(self)
def primaryExpression(self):
localctx = sdoc1Parser.PrimaryExpressionContext(self, self._ctx, self.state)
self.enterRule(localctx, 24, self.RULE_primaryExpression)
try:
self.state = 131
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [sdoc1Parser.EXPR_IDENTIFIER]:
localctx = sdoc1Parser.PrimaryExpressionIdentifierContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 124
self.match(sdoc1Parser.EXPR_IDENTIFIER)
pass
elif token in [sdoc1Parser.EXPR_INTEGER_CONSTANT]:
localctx = sdoc1Parser.PrimaryExpressionIntegerConstantContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 125
self.match(sdoc1Parser.EXPR_INTEGER_CONSTANT)
pass
elif token in [sdoc1Parser.EXPR_STRING_CONSTANT]:
localctx = sdoc1Parser.PrimaryExpressionStringConstantContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 126
self.match(sdoc1Parser.EXPR_STRING_CONSTANT)
pass
elif token in [sdoc1Parser.EXPR_LEFT_PAREN]:
localctx = sdoc1Parser.PrimaryExpressionSubExpressionContext(self, localctx)
self.enterOuterAlt(localctx, 4)
self.state = 127
self.match(sdoc1Parser.EXPR_LEFT_PAREN)
self.state = 128
self.expression()
self.state = 129
self.match(sdoc1Parser.EXPR_RIGHT_PAREN)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class PostfixExpressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return sdoc1Parser.RULE_postfixExpression
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class PrimaryExpressionParentContext(PostfixExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a sdoc1Parser.PostfixExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def primaryExpression(self):
return self.getTypedRuleContext(sdoc1Parser.PrimaryExpressionContext,0)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitPrimaryExpressionParent" ):
return visitor.visitPrimaryExpressionParent(self)
else:
return visitor.visitChildren(self)
class PostfixExpressionExpressionContext(PostfixExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a sdoc1Parser.PostfixExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def postfixExpression(self):
return self.getTypedRuleContext(sdoc1Parser.PostfixExpressionContext,0)
def EXPR_LEFT_BRACKET(self):
return self.getToken(sdoc1Parser.EXPR_LEFT_BRACKET, 0)
def expression(self):
return self.getTypedRuleContext(sdoc1Parser.ExpressionContext,0)
def EXPR_RIGHT_BRACKET(self):
return self.getToken(sdoc1Parser.EXPR_RIGHT_BRACKET, 0)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitPostfixExpressionExpression" ):
return visitor.visitPostfixExpressionExpression(self)
else:
return visitor.visitChildren(self)
def postfixExpression(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = sdoc1Parser.PostfixExpressionContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 26
self.enterRecursionRule(localctx, 26, self.RULE_postfixExpression, _p)
try:
self.enterOuterAlt(localctx, 1)
localctx = sdoc1Parser.PrimaryExpressionParentContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 134
self.primaryExpression()
self._ctx.stop = self._input.LT(-1)
self.state = 143
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,7,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = sdoc1Parser.PostfixExpressionExpressionContext(self, sdoc1Parser.PostfixExpressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_postfixExpression)
self.state = 136
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 137
self.match(sdoc1Parser.EXPR_LEFT_BRACKET)
self.state = 138
self.expression()
self.state = 139
self.match(sdoc1Parser.EXPR_RIGHT_BRACKET)
self.state = 145
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,7,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class MultiplicativeExpressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def postfixExpression(self):
return self.getTypedRuleContext(sdoc1Parser.PostfixExpressionContext,0)
def multiplicativeExpression(self):
return self.getTypedRuleContext(sdoc1Parser.MultiplicativeExpressionContext,0)
def EXPR_MULT(self):
return self.getToken(sdoc1Parser.EXPR_MULT, 0)
def EXPR_DIV(self):
return self.getToken(sdoc1Parser.EXPR_DIV, 0)
def getRuleIndex(self):
return sdoc1Parser.RULE_multiplicativeExpression
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitMultiplicativeExpression" ):
return visitor.visitMultiplicativeExpression(self)
else:
return visitor.visitChildren(self)
def multiplicativeExpression(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = sdoc1Parser.MultiplicativeExpressionContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 28
self.enterRecursionRule(localctx, 28, self.RULE_multiplicativeExpression, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 147
self.postfixExpression(0)
self._ctx.stop = self._input.LT(-1)
self.state = 157
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,9,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
self.state = 155
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,8,self._ctx)
if la_ == 1:
localctx = sdoc1Parser.MultiplicativeExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_multiplicativeExpression)
self.state = 149
if not self.precpred(self._ctx, 2):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 2)")
self.state = 150
self.match(sdoc1Parser.EXPR_MULT)
self.state = 151
self.postfixExpression(0)
pass
elif la_ == 2:
localctx = sdoc1Parser.MultiplicativeExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_multiplicativeExpression)
self.state = 152
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 153
self.match(sdoc1Parser.EXPR_DIV)
self.state = 154
self.postfixExpression(0)
pass
self.state = 159
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,9,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class AdditiveExpressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def multiplicativeExpression(self):
return self.getTypedRuleContext(sdoc1Parser.MultiplicativeExpressionContext,0)
def additiveExpression(self):
return self.getTypedRuleContext(sdoc1Parser.AdditiveExpressionContext,0)
def EXPR_ADD(self):
return self.getToken(sdoc1Parser.EXPR_ADD, 0)
def EXPR_MINUS(self):
return self.getToken(sdoc1Parser.EXPR_MINUS, 0)
def getRuleIndex(self):
return sdoc1Parser.RULE_additiveExpression
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAdditiveExpression" ):
return visitor.visitAdditiveExpression(self)
else:
return visitor.visitChildren(self)
def additiveExpression(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = sdoc1Parser.AdditiveExpressionContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 30
self.enterRecursionRule(localctx, 30, self.RULE_additiveExpression, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 161
self.multiplicativeExpression(0)
self._ctx.stop = self._input.LT(-1)
self.state = 171
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,11,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
self.state = 169
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,10,self._ctx)
if la_ == 1:
localctx = sdoc1Parser.AdditiveExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_additiveExpression)
self.state = 163
if not self.precpred(self._ctx, 2):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 2)")
self.state = 164
self.match(sdoc1Parser.EXPR_ADD)
self.state = 165
self.multiplicativeExpression(0)
pass
elif la_ == 2:
localctx = sdoc1Parser.AdditiveExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_additiveExpression)
self.state = 166
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 167
self.match(sdoc1Parser.EXPR_MINUS)
self.state = 168
self.multiplicativeExpression(0)
pass
self.state = 173
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,11,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class RelationalExpressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def additiveExpression(self):
return self.getTypedRuleContext(sdoc1Parser.AdditiveExpressionContext,0)
def relationalExpression(self):
return self.getTypedRuleContext(sdoc1Parser.RelationalExpressionContext,0)
def EXPR_LT(self):
return self.getToken(sdoc1Parser.EXPR_LT, 0)
def EXPR_GT(self):
return self.getToken(sdoc1Parser.EXPR_GT, 0)
def EXPR_LTE(self):
return self.getToken(sdoc1Parser.EXPR_LTE, 0)
def EXPR_GTE(self):
return self.getToken(sdoc1Parser.EXPR_GTE, 0)
def getRuleIndex(self):
return sdoc1Parser.RULE_relationalExpression
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRelationalExpression" ):
return visitor.visitRelationalExpression(self)
else:
return visitor.visitChildren(self)
def relationalExpression(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = sdoc1Parser.RelationalExpressionContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 32
self.enterRecursionRule(localctx, 32, self.RULE_relationalExpression, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 175
self.additiveExpression(0)
self._ctx.stop = self._input.LT(-1)
self.state = 191
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,13,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
self.state = 189
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,12,self._ctx)
if la_ == 1:
localctx = sdoc1Parser.RelationalExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_relationalExpression)
self.state = 177
if not self.precpred(self._ctx, 4):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 4)")
self.state = 178
self.match(sdoc1Parser.EXPR_LT)
self.state = 179
self.additiveExpression(0)
pass
elif la_ == 2:
localctx = sdoc1Parser.RelationalExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_relationalExpression)
self.state = 180
if not self.precpred(self._ctx, 3):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 3)")
self.state = 181
self.match(sdoc1Parser.EXPR_GT)
self.state = 182
self.additiveExpression(0)
pass
elif la_ == 3:
localctx = sdoc1Parser.RelationalExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_relationalExpression)
self.state = 183
if not self.precpred(self._ctx, 2):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 2)")
self.state = 184
self.match(sdoc1Parser.EXPR_LTE)
self.state = 185
self.additiveExpression(0)
pass
elif la_ == 4:
localctx = sdoc1Parser.RelationalExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_relationalExpression)
self.state = 186
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 187
self.match(sdoc1Parser.EXPR_GTE)
self.state = 188
self.additiveExpression(0)
pass
self.state = 193
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,13,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class EqualityExpressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def relationalExpression(self):
return self.getTypedRuleContext(sdoc1Parser.RelationalExpressionContext,0)
def equalityExpression(self):
return self.getTypedRuleContext(sdoc1Parser.EqualityExpressionContext,0)
def EXPR_EQUAL(self):
return self.getToken(sdoc1Parser.EXPR_EQUAL, 0)
def EXPR_NOT_EQUAL(self):
return self.getToken(sdoc1Parser.EXPR_NOT_EQUAL, 0)
def getRuleIndex(self):
return sdoc1Parser.RULE_equalityExpression
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitEqualityExpression" ):
return visitor.visitEqualityExpression(self)
else:
return visitor.visitChildren(self)
def equalityExpression(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = sdoc1Parser.EqualityExpressionContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 34
self.enterRecursionRule(localctx, 34, self.RULE_equalityExpression, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 195
self.relationalExpression(0)
self._ctx.stop = self._input.LT(-1)
self.state = 205
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,15,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
self.state = 203
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,14,self._ctx)
if la_ == 1:
localctx = sdoc1Parser.EqualityExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_equalityExpression)
self.state = 197
if not self.precpred(self._ctx, 2):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 2)")
self.state = 198
self.match(sdoc1Parser.EXPR_EQUAL)
self.state = 199
self.relationalExpression(0)
pass
elif la_ == 2:
localctx = sdoc1Parser.EqualityExpressionContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_equalityExpression)
self.state = 200
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 201
self.match(sdoc1Parser.EXPR_NOT_EQUAL)
self.state = 202
self.relationalExpression(0)
pass
self.state = 207
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,15,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class LogicalAndExpressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return sdoc1Parser.RULE_logicalAndExpression
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class LogicalAndExpressionParentContext(LogicalAndExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a sdoc1Parser.LogicalAndExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def equalityExpression(self):
return self.getTypedRuleContext(sdoc1Parser.EqualityExpressionContext,0)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLogicalAndExpressionParent" ):
return visitor.visitLogicalAndExpressionParent(self)
else:
return visitor.visitChildren(self)
class LogicalAndExpressionAndContext(LogicalAndExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a sdoc1Parser.LogicalAndExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def logicalAndExpression(self):
return self.getTypedRuleContext(sdoc1Parser.LogicalAndExpressionContext,0)
def EXPR_LOGICAL_AND(self):
return self.getToken(sdoc1Parser.EXPR_LOGICAL_AND, 0)
def equalityExpression(self):
return self.getTypedRuleContext(sdoc1Parser.EqualityExpressionContext,0)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLogicalAndExpressionAnd" ):
return visitor.visitLogicalAndExpressionAnd(self)
else:
return visitor.visitChildren(self)
def logicalAndExpression(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = sdoc1Parser.LogicalAndExpressionContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 36
self.enterRecursionRule(localctx, 36, self.RULE_logicalAndExpression, _p)
try:
self.enterOuterAlt(localctx, 1)
localctx = sdoc1Parser.LogicalAndExpressionParentContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 209
self.equalityExpression(0)
self._ctx.stop = self._input.LT(-1)
self.state = 216
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,16,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = sdoc1Parser.LogicalAndExpressionAndContext(self, sdoc1Parser.LogicalAndExpressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_logicalAndExpression)
self.state = 211
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 212
self.match(sdoc1Parser.EXPR_LOGICAL_AND)
self.state = 213
self.equalityExpression(0)
self.state = 218
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,16,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class LogicalOrExpressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return sdoc1Parser.RULE_logicalOrExpression
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class LogicalOrExpressionParentContext(LogicalOrExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a sdoc1Parser.LogicalOrExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def logicalAndExpression(self):
return self.getTypedRuleContext(sdoc1Parser.LogicalAndExpressionContext,0)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLogicalOrExpressionParent" ):
return visitor.visitLogicalOrExpressionParent(self)
else:
return visitor.visitChildren(self)
class LogicalOrExpressionLogicalOrContext(LogicalOrExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a sdoc1Parser.LogicalOrExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def logicalOrExpression(self):
return self.getTypedRuleContext(sdoc1Parser.LogicalOrExpressionContext,0)
def EXPR_LOGICAL_OR(self):
return self.getToken(sdoc1Parser.EXPR_LOGICAL_OR, 0)
def logicalAndExpression(self):
return self.getTypedRuleContext(sdoc1Parser.LogicalAndExpressionContext,0)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLogicalOrExpressionLogicalOr" ):
return visitor.visitLogicalOrExpressionLogicalOr(self)
else:
return visitor.visitChildren(self)
def logicalOrExpression(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = sdoc1Parser.LogicalOrExpressionContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 38
self.enterRecursionRule(localctx, 38, self.RULE_logicalOrExpression, _p)
try:
self.enterOuterAlt(localctx, 1)
localctx = sdoc1Parser.LogicalOrExpressionParentContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 220
self.logicalAndExpression(0)
self._ctx.stop = self._input.LT(-1)
self.state = 227
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,17,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = sdoc1Parser.LogicalOrExpressionLogicalOrContext(self, sdoc1Parser.LogicalOrExpressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_logicalOrExpression)
self.state = 222
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 223
self.match(sdoc1Parser.EXPR_LOGICAL_OR)
self.state = 224
self.logicalAndExpression(0)
self.state = 229
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,17,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class AssignmentExpressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return sdoc1Parser.RULE_assignmentExpression
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class AssignmentExpressionAssignmentContext(AssignmentExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a sdoc1Parser.AssignmentExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def postfixExpression(self):
return self.getTypedRuleContext(sdoc1Parser.PostfixExpressionContext,0)
def assignmentOperator(self):
return self.getTypedRuleContext(sdoc1Parser.AssignmentOperatorContext,0)
def assignmentExpression(self):
return self.getTypedRuleContext(sdoc1Parser.AssignmentExpressionContext,0)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAssignmentExpressionAssignment" ):
return visitor.visitAssignmentExpressionAssignment(self)
else:
return visitor.visitChildren(self)
class AssignmentExpressionParentContext(AssignmentExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a sdoc1Parser.AssignmentExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def logicalOrExpression(self):
return self.getTypedRuleContext(sdoc1Parser.LogicalOrExpressionContext,0)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAssignmentExpressionParent" ):
return visitor.visitAssignmentExpressionParent(self)
else:
return visitor.visitChildren(self)
def assignmentExpression(self):
localctx = sdoc1Parser.AssignmentExpressionContext(self, self._ctx, self.state)
self.enterRule(localctx, 40, self.RULE_assignmentExpression)
try:
self.state = 235
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,18,self._ctx)
if la_ == 1:
localctx = sdoc1Parser.AssignmentExpressionParentContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 230
self.logicalOrExpression(0)
pass
elif la_ == 2:
localctx = sdoc1Parser.AssignmentExpressionAssignmentContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 231
self.postfixExpression(0)
self.state = 232
self.assignmentOperator()
self.state = 233
self.assignmentExpression()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class AssignmentOperatorContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def EXPR_ASSIGN(self):
return self.getToken(sdoc1Parser.EXPR_ASSIGN, 0)
def getRuleIndex(self):
return sdoc1Parser.RULE_assignmentOperator
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAssignmentOperator" ):
return visitor.visitAssignmentOperator(self)
else:
return visitor.visitChildren(self)
def assignmentOperator(self):
localctx = sdoc1Parser.AssignmentOperatorContext(self, self._ctx, self.state)
self.enterRule(localctx, 42, self.RULE_assignmentOperator)
try:
self.enterOuterAlt(localctx, 1)
self.state = 237
self.match(sdoc1Parser.EXPR_ASSIGN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ExpressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def assignmentExpression(self):
return self.getTypedRuleContext(sdoc1Parser.AssignmentExpressionContext,0)
def getRuleIndex(self):
return sdoc1Parser.RULE_expression
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitExpression" ):
return visitor.visitExpression(self)
else:
return visitor.visitChildren(self)
def expression(self):
localctx = sdoc1Parser.ExpressionContext(self, self._ctx, self.state)
self.enterRule(localctx, 44, self.RULE_expression)
try:
self.enterOuterAlt(localctx, 1)
self.state = 239
self.assignmentExpression()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
def sempred(self, localctx:RuleContext, ruleIndex:int, predIndex:int):
if self._predicates == None:
self._predicates = dict()
self._predicates[13] = self.postfixExpression_sempred
self._predicates[14] = self.multiplicativeExpression_sempred
self._predicates[15] = self.additiveExpression_sempred
self._predicates[16] = self.relationalExpression_sempred
self._predicates[17] = self.equalityExpression_sempred
self._predicates[18] = self.logicalAndExpression_sempred
self._predicates[19] = self.logicalOrExpression_sempred
pred = self._predicates.get(ruleIndex, None)
if pred is None:
raise Exception("No predicate with index:" + str(ruleIndex))
else:
return pred(localctx, predIndex)
def postfixExpression_sempred(self, localctx:PostfixExpressionContext, predIndex:int):
if predIndex == 0:
return self.precpred(self._ctx, 1)
def multiplicativeExpression_sempred(self, localctx:MultiplicativeExpressionContext, predIndex:int):
if predIndex == 1:
return self.precpred(self._ctx, 2)
if predIndex == 2:
return self.precpred(self._ctx, 1)
def additiveExpression_sempred(self, localctx:AdditiveExpressionContext, predIndex:int):
if predIndex == 3:
return self.precpred(self._ctx, 2)
if predIndex == 4:
return self.precpred(self._ctx, 1)
def relationalExpression_sempred(self, localctx:RelationalExpressionContext, predIndex:int):
if predIndex == 5:
return self.precpred(self._ctx, 4)
if predIndex == 6:
return self.precpred(self._ctx, 3)
if predIndex == 7:
return self.precpred(self._ctx, 2)
if predIndex == 8:
return self.precpred(self._ctx, 1)
def equalityExpression_sempred(self, localctx:EqualityExpressionContext, predIndex:int):
if predIndex == 9:
return self.precpred(self._ctx, 2)
if predIndex == 10:
return self.precpred(self._ctx, 1)
def logicalAndExpression_sempred(self, localctx:LogicalAndExpressionContext, predIndex:int):
if predIndex == 11:
return self.precpred(self._ctx, 1)
def logicalOrExpression_sempred(self, localctx:LogicalOrExpressionContext, predIndex:int):
if predIndex == 12:
return self.precpred(self._ctx, 1)
| |
# --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING, Union
from .._utils import get_http_request_kwargs
from .._common._constants import SchemaFormat, DEFAULT_VERSION
from .._common._schema import Schema, SchemaProperties
from .._common._response_handlers import (
_parse_response_schema,
_parse_response_schema_properties,
)
from .._generated.aio._azure_schema_registry import AzureSchemaRegistry
from .._generated.rest import schema as schema_rest
if TYPE_CHECKING:
from azure.core.credentials_async import AsyncTokenCredential
class SchemaRegistryClient(object):
"""
SchemaRegistryClient is a client for registering and retrieving schemas from the Azure Schema Registry service.
:param str fully_qualified_namespace: The Schema Registry service fully qualified host name.
For example: my-namespace.servicebus.windows.net.
:param credential: To authenticate managing the entities of the SchemaRegistry namespace.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:keyword str api_version: The Schema Registry service API version to use for requests.
Default value and only accepted value currently is "2021-10".
.. admonition:: Example:
.. literalinclude:: ../samples/async_samples/sample_code_schemaregistry_async.py
:start-after: [START create_sr_client_async]
:end-before: [END create_sr_client_async]
:language: python
:dedent: 4
:caption: Create a new instance of the SchemaRegistryClient.
"""
def __init__(
self,
fully_qualified_namespace: str,
credential: "AsyncTokenCredential",
**kwargs: Any
) -> None:
api_version = kwargs.pop("api_version", DEFAULT_VERSION)
self._generated_client = AzureSchemaRegistry(
credential=credential,
endpoint=fully_qualified_namespace,
api_version=api_version,
**kwargs
)
async def __aenter__(self):
await self._generated_client.__aenter__()
return self
async def __aexit__(self, *args):
await self._generated_client.__aexit__(*args)
async def close(self) -> None:
"""This method is to close the sockets opened by the client.
It need not be used when using with a context manager.
"""
await self._generated_client.close()
async def register_schema(
self,
group_name: str,
name: str,
definition: str,
format: Union[str, SchemaFormat], # pylint:disable=redefined-builtin
**kwargs: Any
) -> SchemaProperties:
"""
Register new schema. If schema of specified name does not exist in specified group,
schema is created at version 1. If schema of specified name exists already in specified group,
schema is created at latest version + 1.
:param str group_name: Schema group under which schema should be registered.
:param str name: Name of schema being registered.
:param str definition: String representation of the schema being registered.
:param format: Format for the schema being registered.
For now Avro is the only supported schema format by the service.
:type format: Union[str, ~azure.schemaregistry.SchemaFormat]
:rtype: ~azure.schemaregistry.SchemaProperties
:raises: :class:`~azure.core.exceptions.HttpResponseError`
.. admonition:: Example:
.. literalinclude:: ../samples/async_samples/sample_code_schemaregistry_async.py
:start-after: [START register_schema_async]
:end-before: [END register_schema_async]
:language: python
:dedent: 4
:caption: Register a new schema.
"""
try:
format = format.value
except AttributeError:
pass
format = format.capitalize()
http_request_kwargs = get_http_request_kwargs(kwargs)
request = schema_rest.build_register_request(
group_name=group_name,
schema_name=name,
content=definition,
content_type=kwargs.pop(
"content_type", "application/json; serialization={}".format(format)
),
**http_request_kwargs
)
response = await self._generated_client.send_request(request, **kwargs)
response.raise_for_status()
return _parse_response_schema_properties(response, format)
async def get_schema(self, schema_id: str, **kwargs: Any) -> Schema:
"""
Gets a registered schema by its unique ID.
Azure Schema Registry guarantees that ID is unique within a namespace.
:param str schema_id: References specific schema in registry namespace.
:rtype: ~azure.schemaregistry.Schema
:raises: :class:`~azure.core.exceptions.HttpResponseError`
.. admonition:: Example:
.. literalinclude:: ../samples/async_samples/sample_code_schemaregistry_async.py
:start-after: [START get_schema_async]
:end-before: [END get_schema_async]
:language: python
:dedent: 4
:caption: Get schema by id.
"""
http_request_kwargs = get_http_request_kwargs(kwargs)
request = schema_rest.build_get_by_id_request(
id=schema_id, **http_request_kwargs
)
response = await self._generated_client.send_request(request, **kwargs)
response.raise_for_status()
return _parse_response_schema(response)
async def get_schema_properties(
self,
group_name: str,
name: str,
definition: str,
format: Union[str, SchemaFormat], # pylint:disable=redefined-builtin
**kwargs: Any
) -> SchemaProperties:
"""
Gets the schema properties corresponding to an existing schema within the specified schema group,
as matched by schema defintion comparison.
:param str group_name: Schema group under which schema should be registered.
:param str name: Name of schema being registered.
:param str definition: String representation of the schema being registered.
:param format: Format for the schema being registered.
:type format: Union[str, ~azure.schemaregistry.SchemaFormat]
:rtype: ~azure.schemaregistry.SchemaProperties
:raises: :class:`~azure.core.exceptions.HttpResponseError`
.. admonition:: Example:
.. literalinclude:: ../samples/async_samples/sample_code_schemaregistry_async.py
:start-after: [START get_schema_id_async]
:end-before: [END get_schema_id_async]
:language: python
:dedent: 4
:caption: Get schema by id.
"""
try:
format = format.value
except AttributeError:
pass
format = format.capitalize()
http_request_kwargs = get_http_request_kwargs(kwargs)
request = schema_rest.build_query_id_by_content_request(
group_name=group_name,
schema_name=name,
content=definition,
content_type=kwargs.pop(
"content_type", "application/json; serialization={}".format(format)
),
**http_request_kwargs
)
response = await self._generated_client.send_request(request, **kwargs)
response.raise_for_status()
return _parse_response_schema_properties(response, format)
| |
""" Odoo real module.
Odoo is an OpenERP framework.
Website: http://www.odoo.com
"""
from distutils.util import strtobool
from functools import partial
from importlib import import_module
import logging
import os
import sys
#-
import psycopg2 # pylint:disable=import-error
sys.path[0] = os.path.abspath(os.curdir)
import odoo
SHELL_ENV_QUIET = 'RUNNER_SUBPROCESS_ARG_QUIET'
SHELL_ENV_WITH_SERVER = 'RUNNER_SUBPROCESS_ARGS_WITH_SERVER'
logging.basicConfig(level=logging.INFO)
_logger = logging.getLogger(__name__)
def _run_server():
odoo.cli.main()
def _run_light(arg):
version = odoo.release.version_info[0]
sys.argv.insert(1, arg)
sys.argv.insert(1, '--no-xmlrpc' if version <= 10 else '--no-http')
sys.argv.insert(1, '--workers=0')
sys.argv.insert(1, '--max-cron-threads=0')
odoo.cli.main()
def _load_config(odoo_args):
for arg in sys.argv[1:]:
if arg.startswith('-'):
odoo_args.append(arg)
odoo.tools.config.parse_config(odoo_args)
return odoo.tools.config
def _run_silent_server(quiet=False):
version = odoo.release.version_info[0]
_load_config([
'--no-xmlrpc' if version <= 10 else '--no-http',
'--workers=0',
'--max-cron-threads=0'])
if not quiet:
odoo.cli.server.report_configuration()
odoo.service.server.start(preload=[], stop=True)
def _execute(*callbacks):
local_vars = {
'openerp': odoo,
'odoo': odoo,
}
with odoo.api.Environment.manage():
registry = odoo.registry(odoo.tools.config['db_name'])
with registry.cursor() as cr:
uid = odoo.SUPERUSER_ID
ctx = odoo.api.Environment(cr, uid, {})['res.users'].context_get()
env = odoo.api.Environment(cr, uid, ctx)
local_vars['env'] = env
local_vars['self'] = env.user
for callback in callbacks:
try:
callback(**local_vars)
except Exception as e: #pylint:disable=broad-except
_logger.exception(e)
cr.rollback()
cr.rollback()
def _simple_execute(*callbacks):
for callback in callbacks:
try:
callback()
except Exception as e: #pylint:disable=broad-except
_logger.exception(e)
def _reset_database():
config = _load_config([])
dsn = 'postgresql://%s:%s@%s:%s/%s' % (config['db_user'],
config['db_password'], config['db_host'] or 'localhost',
config['db_port'] or 5432, config['db_name'])
with psycopg2.connect(dsn) as db:
with db.cursor() as cur:
cur.execute('DROP OWNED BY CURRENT_USER')
db.commit()
def _run_script(quiet=False):
with_server = strtobool(os.environ.get(SHELL_ENV_WITH_SERVER, 'y'))
if with_server:
_run_silent_server(quiet)
else:
_load_config([])
module_name = sys.argv[1]
callback_args = []
# Remove shell arguments started with '-'
for arg in sys.argv[2:]:
if not arg.startswith('-'):
callback_args.append(arg)
try:
mod = import_module(module_name)
if with_server:
_execute(partial(mod.execute, args=callback_args))
else:
_simple_execute(partial(mod.simple_execute, args=callback_args))
except ImportError:
_logger.error("Unable to load module '%s'.", module_name)
def _install(env, **kwargs):
IrModule = env['ir.module.module']
for name in sys.argv[1:]:
if name.startswith('-'):
continue
_logger.info("Installing module '%s'.", name)
mod = IrModule.search([('name', '=', name)]).exists()
if not mod:
IrModule.update_list()
if not mod:
mod = IrModule.search([('name', '=', name)]).exists()
if not mod:
_logger.error("Module '%s' not found!", name)
break
if mod.state not in ('installed', 'to upgrade'):
mod.button_immediate_install()
env.cr.commit()
def _uninstall(env, **kwargs):
IrModule = env['ir.module.module']
for name in sys.argv[1:]:
if name.startswith('-'):
continue
_logger.info("Uninstalling module '%s'.", name)
mod = IrModule.search([('name', '=', name)]).exists()
if not mod:
continue
if mod.state != 'uninstalled':
mod.button_immediate_uninstall()
env.cr.commit()
def _list_installed(env, **kwargs):
IrModule = env['ir.module.module']
print("Installed modules:")
for mod in IrModule.search([('state', '=', 'installed')], order='name'):
print(mod.name)
def main():
__import__('pkg_resources').declare_namespace('odoo.addons')
try:
sys.path.remove(os.path.dirname(__file__))
except: #pylint:disable=bare-except
pass
try:
sys.path.remove(os.path.dirname(os.path.abspath(__file__)))
except: #pylint:disable=bare-except
pass
quiet = strtobool(os.environ.get(SHELL_ENV_QUIET, 'n'))
cmd = sys.argv.pop(1)
if cmd == 'server':
_run_server()
elif cmd == 'shell':
_run_light('shell')
elif cmd == 'upgrade':
_run_light('-u')
elif cmd == 'install':
_run_silent_server(quiet)
_execute(_install)
elif cmd == 'uninstall':
_run_silent_server(quiet)
_execute(_uninstall)
elif cmd == 'script':
_run_script(quiet)
elif cmd == 'list-installed':
_run_silent_server(quiet)
_execute(_list_installed)
elif cmd == 'cleardb':
_reset_database()
else:
sys.argv.insert(1, cmd)
_run_server()
if __name__ == '__main__':
main()
| |
import os.path
import re
import shutil
import subprocess
import time
from abc import ABC
from typing import List, Dict, Union
from panoptes.utils import error
from panoptes.utils.images import cr2 as cr2_utils
from panoptes.utils.serializers import from_yaml
from panoptes.utils.utils import listify
from panoptes.pocs.camera import AbstractCamera
class AbstractGPhotoCamera(AbstractCamera, ABC): # pragma: no cover
""" Abstract camera class that uses gphoto2 interaction.
Args:
config(Dict): Config key/value pairs, defaults to empty dict.
"""
def __init__(self, *arg, **kwargs):
super().__init__(*arg, **kwargs)
# Setup a holder for the exposure process.
self._command_proc = None
self.logger.info(f'GPhoto2 camera {self.name} created on {self.port}')
@property
def temperature(self):
return None
@property
def target_temperature(self):
return None
@property
def cooling_power(self):
return None
@AbstractCamera.uid.getter
def uid(self) -> str:
""" A six-digit serial number for the camera """
return self._serial_number[0:6]
def connect(self):
raise NotImplementedError
@property
def is_exposing(self):
if self._command_proc is not None and self._command_proc.poll() is not None:
self._is_exposing_event.clear()
return self._is_exposing_event.is_set()
def process_exposure(self, metadata, **kwargs):
"""Converts the CR2 to FITS then processes image."""
# Wait for exposure to complete. Timeout handled by exposure thread.
while self.is_exposing:
time.sleep(1)
self.logger.debug(f'Processing Canon DSLR exposure with {metadata=!r}')
file_path = metadata['filepath']
try:
self.logger.debug(f"Converting CR2 -> FITS: {file_path}")
fits_path = cr2_utils.cr2_to_fits(file_path, headers=metadata, remove_cr2=False)
metadata['filepath'] = fits_path
super(AbstractGPhotoCamera, self).process_exposure(metadata, **kwargs)
except TimeoutError:
self.logger.error(f'Error processing exposure for {file_path} on {self}')
def command(self, cmd: Union[List[str], str]):
""" Run gphoto2 command. """
# Test to see if there is a running command already
if self.is_exposing:
raise error.InvalidCommand("Command already running")
else:
# Build the command.
run_cmd = [shutil.which('gphoto2')]
if self.port is not None:
run_cmd.extend(['--port', self.port])
run_cmd.extend(listify(cmd))
self.logger.debug(f"gphoto2 command: {run_cmd!r}")
try:
self._command_proc = subprocess.Popen(
run_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
except OSError as e:
raise error.InvalidCommand(f"Can't send command to gphoto2. {e} \t {run_cmd}")
except ValueError as e:
raise error.InvalidCommand(f"Bad parameters to gphoto2. {e} \t {run_cmd}")
except Exception as e:
raise error.PanError(e)
def get_command_result(self, timeout: float = 10) -> Union[List[str], None]:
""" Get the output from the command.
Accepts a `timeout` param for communicating with the process.
Returns a list of strings corresponding to the output from the gphoto2
camera or `None` if no command has been specified.
"""
if self._command_proc is None:
return None
self.logger.debug(f"Getting output from proc {self._command_proc.pid}")
try:
outs, errs = self._command_proc.communicate(timeout=timeout)
except subprocess.TimeoutExpired:
self.logger.debug(f"Timeout while waiting. Killing process {self._command_proc.pid}")
self._command_proc.kill()
outs, errs = self._command_proc.communicate()
self.logger.trace(f'gphoto2 output: {outs=!r}')
if errs != '':
self.logger.error(f'gphoto2 error: {errs!r}')
if isinstance(outs, str):
outs = outs.split('\n')
self._command_proc = None
return outs
def set_property(self, prop: str, val: Union[str, int]):
""" Set a property on the camera """
set_cmd = ['--set-config', f'{prop}="{val}"']
self.command(set_cmd)
# Forces the command to wait
self.get_command_result()
def set_properties(self, prop2index: Dict[str, int] = None, prop2value: Dict[str, str] = None):
""" Sets a number of properties all at once, by index or value.
Args:
prop2index (dict or None): A dict with keys corresponding to the property to
be set and values corresponding to the index option.
prop2value (dict or None): A dict with keys corresponding to the property to
be set and values corresponding to the literal value.
"""
set_cmd = list()
if prop2index:
for prop, val in prop2index.items():
set_cmd.extend(['--set-config-index', f'{prop}={val}'])
if prop2value:
for prop, val in prop2value.items():
set_cmd.extend(['--set-config-value', f'{prop}="{val}"'])
self.command(set_cmd)
# Forces the command to wait
self.get_command_result()
def get_property(self, prop: str) -> str:
""" Gets a property from the camera """
set_cmd = ['--get-config', f'{prop}']
self.command(set_cmd)
result = self.get_command_result()
output = ''
for line in result:
match = re.match(r'Current:\s*(.*)', line)
if match:
output = match.group(1)
return output
def load_properties(self) -> dict:
""" Load properties from the camera.
Reads all the configuration properties available via gphoto2 and returns
as dictionary.
"""
self.logger.debug('Getting all properties for gphoto2 camera')
self.command(['--list-all-config'])
lines = self.get_command_result()
properties = {}
yaml_string = ''
for line in lines:
is_id = len(line.split('/')) > 1
is_label = re.match(r'^Label:\s*(.*)', line)
is_type = re.match(r'^Type:\s*(.*)', line)
is_readonly = re.match(r'^Readonly:\s*(.*)', line)
is_current = re.match(r'^Current:\s*(.*)', line)
is_choice = re.match(r'^Choice:\s*(\d+)\s*(.*)', line)
is_printable = re.match(r'^Printable:\s*(.*)', line)
is_help = re.match(r'^Help:\s*(.*)', line)
if is_label or is_type or is_current or is_readonly:
line = f' {line}'
elif is_choice:
if int(is_choice.group(1)) == 0:
line = f' Choices:\n {int(is_choice.group(1)):d}: {is_choice.group(2)}'
else:
line = f' {int(is_choice.group(1)):d}: {is_choice.group(2)}'
elif is_printable:
line = f' {line}'
elif is_help:
line = f' {line}'
elif is_id:
line = f'- ID: {line}'
elif line == '' or line == 'END':
continue
else:
self.logger.debug(f'Line not parsed: {line}')
yaml_string += f'{line}\n'
self.logger.debug(yaml_string)
properties_list = from_yaml(yaml_string)
if isinstance(properties_list, list):
for prop in properties_list:
if prop['Label']:
properties[prop['Label']] = prop
else:
properties = properties_list
return properties
def _readout(self, filename, *args, **kwargs):
if os.path.exists(filename):
self.logger.debug(f'Readout complete for {filename}')
self._readout_complete = True
def _set_target_temperature(self, target):
return None
def _set_cooling_enabled(self, enable):
return None
| |
#!/usr/bin/python
# Copyright 2017 Northern.tech AS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import socket
import subprocess
import pytest
from . import conftest
from .MenderAPI import auth, auth_v2, reset_mender_api
from .common import *
from .common_docker import *
def wait_for_containers(expected_containers, defined_in):
for _ in range(60 * 5):
out = subprocess.check_output("docker-compose -p %s %s ps -q" % (conftest.docker_compose_instance, "-f " + " -f ".join(defined_in)), shell=True)
if len(out.split()) == expected_containers:
time.sleep(60)
return
else:
time.sleep(1)
pytest.fail("timeout: %d containers not running for docker-compose project: %s" % (expected_containers, conftest.docker_compose_instance))
@pytest.fixture(scope="function")
def standard_setup_one_client(request):
restart_docker_compose()
reset_mender_api()
def setup_set_client_number_bootstrapped(clients):
docker_compose_cmd("scale mender-client=%d" % clients)
ssh_is_opened()
auth.reset_auth_token()
auth_v2.accept_devices(clients)
@pytest.fixture(scope="function")
def standard_setup_one_client_bootstrapped():
restart_docker_compose()
reset_mender_api()
auth_v2.accept_devices(1)
@pytest.fixture(scope="function")
def standard_setup_one_rofs_client_bootstrapped():
stop_docker_compose()
reset_mender_api()
docker_compose_cmd("-f " + COMPOSE_FILES_PATH + "/docker-compose.client.rofs.yml up -d")
ssh_is_opened()
auth.reset_auth_token()
auth_v2.accept_devices(1)
@pytest.fixture(scope="function")
def standard_setup_one_docker_client_bootstrapped():
stop_docker_compose()
reset_mender_api()
docker_compose_cmd("-f " + COMPOSE_FILES_PATH + "/docker-compose.yml \
-f " + COMPOSE_FILES_PATH + "/docker-compose.docker-client.yml \
-f " + COMPOSE_FILES_PATH + "/docker-compose.testing.yml \
-f " + COMPOSE_FILES_PATH + "/docker-compose.storage.minio.yml up -d",
use_common_files=False)
ssh_is_opened()
auth.reset_auth_token()
auth_v2.accept_devices(1)
@pytest.fixture(scope="function")
def standard_setup_two_clients_bootstrapped():
restart_docker_compose(2)
reset_mender_api()
auth_v2.accept_devices(2)
@pytest.fixture(scope="function")
def standard_setup_one_client_bootstrapped_with_s3():
stop_docker_compose()
reset_mender_api()
docker_compose_cmd("-f " + COMPOSE_FILES_PATH + "/docker-compose.yml \
-f " + COMPOSE_FILES_PATH + "/docker-compose.client.yml \
-f " + COMPOSE_FILES_PATH + "/docker-compose.testing.yml \
-f " + COMPOSE_FILES_PATH + "/docker-compose.storage.minio.yml \
-f " + COMPOSE_FILES_PATH + "/docker-compose.storage.s3.yml up -d",
use_common_files=False)
docker_compose_cmd("logs -f &")
ssh_is_opened()
auth.reset_auth_token()
auth_v2.accept_devices(1)
@pytest.fixture(scope="function")
def standard_setup_without_client():
stop_docker_compose()
reset_mender_api()
docker_compose_cmd("-f " + COMPOSE_FILES_PATH + "/docker-compose.yml \
-f " + COMPOSE_FILES_PATH + "/docker-compose.storage.minio.yml \
-f " + COMPOSE_FILES_PATH + "/docker-compose.testing.yml up -d",
use_common_files=False)
@pytest.fixture(scope="function")
def setup_with_legacy_client():
# The legacy 1.7.0 client was only built for qemux86-64, so skip tests using
# it when running other platforms.
if conftest.machine_name != "qemux86-64":
pytest.skip("Test only works with qemux86-64, and this is %s"
% conftest.machine_name)
stop_docker_compose()
reset_mender_api()
docker_compose_cmd("-f " + COMPOSE_FILES_PATH + "/docker-compose.yml \
-f " + COMPOSE_FILES_PATH + "/docker-compose.client.yml \
-f " + COMPOSE_FILES_PATH + "/tests/legacy-v1-client.yml \
-f " + COMPOSE_FILES_PATH + "/docker-compose.storage.minio.yml \
-f " + COMPOSE_FILES_PATH + "/docker-compose.testing.yml up -d",
use_common_files=False)
ssh_is_opened()
auth_v2.accept_devices(1)
@pytest.fixture(scope="function")
def standard_setup_with_signed_artifact_client(request):
stop_docker_compose()
reset_mender_api()
docker_compose_cmd("-f " + COMPOSE_FILES_PATH + "/extra/signed-artifact-client-testing/docker-compose.signed-client.yml up -d")
ssh_is_opened()
auth.reset_auth_token()
auth_v2.accept_devices(1)
@pytest.fixture(scope="function")
def standard_setup_with_short_lived_token():
stop_docker_compose()
reset_mender_api()
docker_compose_cmd("-f " + COMPOSE_FILES_PATH + "/docker-compose.yml \
-f " + COMPOSE_FILES_PATH + "/docker-compose.client.yml \
-f " + COMPOSE_FILES_PATH + "/docker-compose.storage.minio.yml \
-f " + COMPOSE_FILES_PATH + "/docker-compose.testing.yml \
-f " + COMPOSE_FILES_PATH + "/extra/expired-token-testing/docker-compose.short-token.yml up -d",
use_common_files=False)
ssh_is_opened()
auth.reset_auth_token()
auth_v2.accept_devices(1)
@pytest.fixture(scope="function")
def setup_failover():
"""
Setup with two servers and one client.
First server (A) behaves as usual, whereas the second server (B) should
not expect any clients. Client is initially set up against server A.
In docker all microservices for B has a suffix "-2"
"""
stop_docker_compose()
reset_mender_api()
docker_compose_cmd("-f " + COMPOSE_FILES_PATH + "/docker-compose.yml \
-f " + COMPOSE_FILES_PATH + "/docker-compose.client.yml \
-f " + COMPOSE_FILES_PATH + "/docker-compose.storage.minio.yml \
-f " + COMPOSE_FILES_PATH + "/docker-compose.testing.yml \
-f " + COMPOSE_FILES_PATH + "/extra/failover-testing/docker-compose.failover-server.yml up -d",
use_common_files=False)
ssh_is_opened()
auth.reset_auth_token()
auth_v2.accept_devices(1)
@pytest.fixture(scope="function")
def running_custom_production_setup(request):
conftest.production_setup_lock.acquire()
stop_docker_compose()
reset_mender_api()
def fin():
conftest.production_setup_lock.release()
stop_docker_compose()
request.addfinalizer(fin)
@pytest.fixture(scope="function")
def enterprise_no_client(request):
if conftest.is_integration_branch:
pytest.skip("Enterprise tests disabled on integration branches")
stop_docker_compose()
reset_mender_api()
docker_compose_cmd("-f " + COMPOSE_FILES_PATH + "/docker-compose.yml \
-f " + COMPOSE_FILES_PATH + "/docker-compose.storage.minio.yml \
-f " + COMPOSE_FILES_PATH + "/docker-compose.testing.yml \
-f " + COMPOSE_FILES_PATH + "/docker-compose.enterprise.yml \
up -d",
use_common_files=False)
# wait a bit for the backend to start
wait_for_containers(15, [COMPOSE_FILES_PATH + "/docker-compose.yml",
COMPOSE_FILES_PATH + "/docker-compose.enterprise.yml",
COMPOSE_FILES_PATH + "/docker-compose.storage.minio.yml"])
def fin():
stop_docker_compose()
request.addfinalizer(fin)
@pytest.fixture(scope="function")
def enterprise_client_s3(request):
if conftest.is_integration_branch:
pytest.skip("Enterprise tests disabled on integration branches")
stop_docker_compose()
reset_mender_api()
docker_compose_cmd("-f " + COMPOSE_FILES_PATH + "/docker-compose.yml \
-f " + COMPOSE_FILES_PATH + "/docker-compose.testing.yml \
-f " + COMPOSE_FILES_PATH + "/docker-compose.storage.minio.yml \
-f " + COMPOSE_FILES_PATH + "/docker-compose.storage.s3.yml \
-f " + COMPOSE_FILES_PATH + "/docker-compose.enterprise.yml \
up -d",
use_common_files=False)
wait_for_containers(15, [COMPOSE_FILES_PATH + "/docker-compose.yml",
COMPOSE_FILES_PATH + "/docker-compose.testing.yml ",
COMPOSE_FILES_PATH + "/docker-compose.enterprise.yml",
COMPOSE_FILES_PATH + "/docker-compose.storage.minio.yml",
COMPOSE_FILES_PATH + "/docker-compose.storage.s3.yml"])
def fin():
stop_docker_compose()
request.addfinalizer(fin)
@pytest.fixture(scope="function")
def enterprise_no_client_smtp(request):
if conftest.is_integration_branch:
pytest.skip("Enterprise tests disabled on integration branches")
stop_docker_compose()
reset_mender_api()
host_ip = get_host_ip()
docker_compose_cmd("-f " + COMPOSE_FILES_PATH + "/docker-compose.yml \
-f " + COMPOSE_FILES_PATH + "/docker-compose.storage.minio.yml \
-f " + COMPOSE_FILES_PATH + "/docker-compose.testing.yml \
-f " + COMPOSE_FILES_PATH + "/docker-compose.enterprise.yml \
-f " + COMPOSE_FILES_PATH + "/extra/smtp-testing/conductor-workers-smtp-test.yml \
-f " + COMPOSE_FILES_PATH + "/extra/recaptcha-testing/tenantadm-test-recaptcha-conf.yml \
up -d",
use_common_files=False, env={"HOST_IP": host_ip})
# wait a bit for the backend to start
wait_for_containers(15, [COMPOSE_FILES_PATH + "/docker-compose.yml",
COMPOSE_FILES_PATH + "/docker-compose.enterprise.yml",
COMPOSE_FILES_PATH + "/docker-compose.storage.minio.yml"])
def fin():
stop_docker_compose()
request.addfinalizer(fin)
def get_host_ip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
host_ip = s.getsockname()[0]
s.close()
return host_ip
| |
'''
Created on Aug 16, 2011
@author: jklo
'''
import couchdb
import sys, os, gnupg, json, getpass
import traceback
from uuid import uuid4
import lrnodetemplate as t
from pprint import pprint
import urlparse
#Default url to the couchdb server.
_DEFAULT_COUCHDB_URL = "http://127.0.0.1:5984"
_DEFAULT_AUTH_COUCHDB_URL = "http://admin:password@127.0.0.1:5984"
class ResponseFile(object):
def __init__(self, filename=None):
self._response_file = None
ResponseFile.set(self, filename)
def set(self, path):
self._path = path
try:
self._response_file = open(path, "w")
self._response_file.truncate()
self._response_file.flush()
except:
pass
def write(self, response):
if self._response_file:
self._response_file.write("{0}{1}".format(response, os.linesep))
self._response_file.flush()
def close(self):
if self._response_file:
self._response_file.close()
self._response_file = None
self._path = None
response_file = ResponseFile()
def publishService(nodeUrl, server, dbname, serviceType, serviceName):
service = {}
service.update(t.service_description)
service['service_type'] =serviceType
service['service_id'] = uuid4().hex
# service['service_name'] = serviceName+" service"
service['service_name'] = serviceName
service["service_endpoint"] = urlparse.urljoin(nodeUrl, serviceName)
service['service_description']= "{0} {1} service".format(serviceType, serviceName)
PublishDoc(server, dbname, "{0}:{1} service".format(serviceType, serviceName), service)
def CreateDB(couchServer = _DEFAULT_COUCHDB_URL, dblist=[], deleteDB=False):
'''Creates a DB in Couch based upon config'''
for db in dblist:
if deleteDB:
try:
del couchServer[db]
except couchdb.http.ResourceNotFound as rnf:
print("DB '{0}' doesn't exist on '{1}', creating".format(db, couchServer))
else:
try:
existingDB = couchServer[db]
print("Using existing DB '{0}' on '{1}'\n".format(db, couchServer))
continue
except:
pass
try:
couchServer.create(db)
print("Created DB '{0}' on '{1}'\n".format(db, couchServer))
except Exception as e:
print("Exception while creating database: {0}\n".format(e) )
def PublishDoc(couchServer, dbname, name, doc_data):
try:
#delete existing document.
db = couchServer[dbname]
if "_rev" in doc_data:
del doc_data["_rev"]
try:
del db[name]
except:
pass
db[name] = doc_data
print("Added config document '{0}' to '{1}".format(name, dbname))
except Exception as ex:
print("Exception when add config document:\n")
exc_type, exc_value, exc_tb = sys.exc_info()
pprint(traceback.format_exception(exc_type, exc_value, exc_tb))
def testCouchServer(serverURL):
try:
couchServer = couchdb.Server(url=serverURL)
# Try to get the server configuration to ensure the the server is up and
# and running. There may be a better way of doing that.
couchServer.version()
except Exception as e:
print(e)
print("Cannot connect to couchDB server '{0}'\n".format(serverURL))
return False
return True
def testAuthCouchServer(serverURL):
try:
couchServer = couchdb.Server(url=serverURL)
# Try to get the server configuration to ensure the the server is up and
# and running. There may be a better way of doing that.
couchServer.config()
except Exception as e:
print(e)
print("Cannot connect to couchDB server '{0}'\n".format(serverURL))
return False
return True
def getInput(question, defaultInput=None, validateFunc=None, hide_input=False):
ques = question+': '
if defaultInput is not None:
ques = question+' [{0}]: '.format(defaultInput)
while True:
if not hide_input:
userInput = raw_input(ques)
else:
userInput = getpass.getpass(ques)
inputLen = len(userInput.strip())
if inputLen == 0:
if defaultInput is not None:
userInput = defaultInput
else:
continue
if validateFunc is not None and validateFunc(userInput) == False:
continue
response_file.write(userInput)
return userInput
_DEFAULT_ENDPOINT = "http://www.example.com"
def isValidKey(userInput):
pass
def isURL(userInput):
if userInput.lower() == _DEFAULT_ENDPOINT:
return False
import re
return re.match("^https?://[^/]+", userInput.lower()) is not None
YES = ['t', 'true', 'yes', 'y']
NO = ['f', 'false', 'no', 'n']
def isBoolean(userInput):
if userInput.lower() in YES or userInput.lower in NO:
return True
def isInt(userInput):
try:
int(userInput)
return True
except ValueError:
return False
def getDefaultEndpoint():
import socket
hostname = socket.gethostname()
if hostname != None:
parts = list(urlparse.urlsplit(_DEFAULT_ENDPOINT))
parts[1] = hostname
return urlparse.urlunsplit(parts)
else:
return _DEFAULT_ENDPOINT
def getSetupInfo(response_file=None):
"""Get the user node info"""
nodeSetup = {}
nodeUrl = getInput("Enter the node service endpoint URL", getDefaultEndpoint(), isURL)
nodeSetup['nodeUrl'] = nodeUrl
couchDBUrl = getInput("Enter your unauthenticated couchDB server URL",
_DEFAULT_COUCHDB_URL, testCouchServer)
nodeSetup['couchDBUrl'] = couchDBUrl
couchDBUrlDBA = getInput("Enter your AUTHENTICATED CouchDB server DBA URL",
_DEFAULT_AUTH_COUCHDB_URL, testAuthCouchServer)
nodeSetup['couchDBUrlDBA'] = couchDBUrlDBA
nodeName = getInput("Enter your node name", "Node@{0}".format(nodeUrl))
nodeSetup['node_name'] = nodeName
nodeDescription = getInput("Enter your node description", nodeName)
nodeSetup['node_description'] = nodeDescription
adminUrl = getInput("Enter node admin indentity",
"admin@learningregistry.org".format(nodeUrl))
nodeSetup['node_admin_identity'] = adminUrl
distributeTargets = getInput("Enter the URLs of nodes that you wish to distribute to",
"")
nodeSetup['connections'] = distributeTargets.split()
isGatewayNode = getInput('Is the node a gateway node" (T/F)', 'F')
nodeSetup['gateway_node'] = (isGatewayNode == 'T')
isNodeOpen = getInput('Is the node "open" (T/F)', 'T')
nodeSetup['open_connect_source'] = (isNodeOpen=='T')
'''
nodeSetup['distributeResourceDataUrl'] = getInput("\nEnter distribute/replication "+
"resource_data destination URL \n(this is the resource_data URL that another node couchdb "+
"will use to replicate/distribute to this node)", "{0}/resource_data".format(nodeUrl))
'''
nodeSetup['distributeResourceDataUrl'] = "{0}/resource_data".format(nodeUrl)
nodeSetup['distributeIncomingUrl'] = getInput("\nEnter distribute/replication "+
"incoming destination URL \n(this is the incoming URL that another node couchdb "+
"will use to replicate/distribute to this node)", "{0}/incoming".format(nodeUrl))
isDistributeDest = getInput("Does the node want to be the destination for replication (T/F)", 'T')
nodeSetup['open_connect_dest'] =(isDistributeDest =='T')
return nodeSetup
def getDefaultGnuPGHome():
return os.path.join(os.path.expanduser('~'), ".gnupg")
def getGPG(gpgbin, gnupghome):
return gnupg.GPG(gpgbin, gnupghome)
def checkKey(gpg):
def checkKeyID(userInput):
try:
if len(userInput.strip()) == 0:
return False
privateKey = gpg.export_keys(userInput, True)
publicKey = gpg.export_keys(userInput, True)
foundKey = len(privateKey) > 0 and len(publicKey) > 0
if not foundKey:
print("Invalid Private Key ID. Ensure key public and private key exists in keyring. Please try again.\n")
return foundKey
except:
pass
return False
return checkKeyID
def checkPassphrase(gpg, keyID):
def check(userInput):
try:
sign = gpg.sign("hello learning registry", keyid=keyID, passphrase=userInput)
if len(sign.data) > 0 and len(sign.fingerprint) > 0:
return True
else:
print("Bad passphrase! Please try again.\n")
except:
pass
return False
return check
def getDefaultSigner(gpg, keyID):
try:
for key in gpg.list_keys(True):
if key['keyid'] == keyID.strip():
return key['uids'][0]
except:
pass
return None
def setNodeSigning(server, config, setupInfo):
if "oauth" in setupInfo and setupInfo["oauth"]:
from services.service_template import getCouchAppPath
import oauth2 as oauth, time
gpgbin = getInput("Path to GnuPG executable", "gpg")
setupInfo["lr.publish.signing.gpgbin"] = gpgbin
config.set("app:main","lr.publish.signing.gpgbin",gpgbin)
gnupghome = getInput("Path to GnuPG Home", getDefaultGnuPGHome())
setupInfo["lr.publish.signing.gnupghome"] = gnupghome
config.set("app:main","lr.publish.signing.gnupghome",gnupghome)
gpg = getGPG(gpgbin, gnupghome)
privateKeyId = getInput("Private Key Id for Signing", "", checkKey(gpg)).strip()
setupInfo["lr.publish.signing.privatekeyid"] = privateKeyId
config.set("app:main","lr.publish.signing.privatekeyid",privateKeyId)
publickeylocations = [ "%s/pubkey" % setupInfo['nodeUrl']]
setupInfo["lr.publish.signing.publickeylocations"] = json.dumps(publickeylocations)
config.set("app:main","lr.publish.signing.publickeylocations",json.dumps(publickeylocations))
signer = getInput("Signer for Resource Data Identity", getDefaultSigner(gpg, privateKeyId))
setupInfo["lr.publish.signing.signer"] = signer
config.set("app:main","lr.publish.signing.signer",signer)
passphrase = getInput("Passphrase for Signing with Private Key [typing is concealed]", "", checkPassphrase(gpg, privateKeyId), hide_input=True)
setupInfo["lr.publish.signing.passphrase"] = passphrase
config.set("app:main","lr.publish.signing.passphrase",passphrase)
server.resource("_config","couch_httpd_oauth").put('use_users_db', '"true"')
server.resource("_config","httpd").put('WWW-Authenticate', '"OAuth"')
server.resource("_config","browserid").put('enabled', '"true"')
apps = config.get("app:main", "couchdb.db.apps", "apps")
try:
server.create(apps)
except:
pass
oauthCouchApp = os.path.join(getCouchAppPath(),apps,"kanso","oauth-key-management.json")
with open(oauthCouchApp) as f:
ddoc = json.load(f)
try:
del server[apps]["_design/%s"%ddoc['kanso']['config']['name']]
except:
pass
ddoc["_id"] = "_design/%s"%ddoc['kanso']['config']['name']
server[apps].save(ddoc)
setupInfo["oauth.app.name"] = ddoc['kanso']['config']['name']
setupInfo["lr.oauth.signup"] = "{0}/apps/{1}".format(setupInfo["nodeUrl"],ddoc['kanso']['config']['name'])
config.set("app:main","lr.oauth.signup",setupInfo["lr.oauth.signup"])
## TODO: Need to make an initial OAuth call to get the oauth view installed.
users = config.get("app:main", "couchdb.db.users", "_users")
couch_url = config.get("app:main", "couchdb.url", "http://localhost:5984")
dummy_user = {
"_id": "org.couchdb.user:tempuser",
"name": "tempuser",
"type": "user",
"roles": [],
"oauth": {
"consumer_keys":
{
"localhost": "walt_2.0"
},
"tokens":
{
"temptoken": "learningregistry"
}
}
}
server[users].save(dummy_user)
# Create your consumer with the proper key/secret.
consumer = oauth.Consumer(key="localhost",
secret=dummy_user["oauth"]["consumer_keys"]["localhost"])
token = oauth.Token(key="temptoken",
secret=dummy_user["oauth"]["tokens"]["temptoken"])
# Create our client.
client = oauth.Client(consumer, token=token)
client.disable_ssl_certificate_validation=True
params = {
'oauth_version': "1.0",
'oauth_nonce': oauth.generate_nonce(),
'oauth_timestamp': int(time.time())
}
resp, content = client.request("{0}/_session".format(couch_url), "GET", headers={"Content-Type": "application/json"})
del server[users][dummy_user["_id"]]
return True
return False
| |
# test.py
#
# The MIT License (MIT)
#
# Copyright (c) 2015 Jonathan Miller
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import bracket
from bracket import bracketPhase, branchedElement, rankedElement
import unittest
import getbracket
import random
class TestPhases(unittest.TestCase):
def test_basic_stuff(self):
r1 = bracketPhase(participants=10, phase=0)
self.assertEqual(r1.min_rank(), 7)
self.assertEqual(r1.max_rank(), 16)
r1 = r1.shifted(1)
self.assertEqual(r1.min_rank(), 1)
self.assertEqual(r1.max_rank(), 6)
r2 = bracketPhase(participants=4).shifted_to_top()
self.assertEqual(r2.size(), 1)
self.assertEqual(r2.min_rank(), 0)
self.assertEqual(r2.max_rank(), 0)
r2 = r2.shifted(-2)
self.assertEqual(r2.min_rank(), 1)
self.assertEqual(r2.max_rank(), 4)
class TestElements(unittest.TestCase):
def test_list_functions(self):
# __init__
b1 = branchedElement("one", "two")
b1dupe = branchedElement(b1, b1)
#self.assertTrue(b1dupe[0] != b1dupe[1])
b1 = branchedElement()
self.assertEqual(len(b1), 2)
self.assertEqual(b1.rank(), 0)
# __getitem__
b2 = branchedElement("hi", "there")
self.assertEqual(b2[0], "hi")
self.assertEqual(b2[1], "there")
# __setitem__
b2[0] = "hello"
b2[1] = "world!"
self.assertEqual(b2[0] + " " + b2[1], "hello world!")
# __len__, __delitem__
self.assertEqual(len(b2), 2)
del b2[1]
self.assertEqual(len(b2), 1)
del b2[0]
self.assertEqual(len(b2), 0)
# __contains__
b3 = branchedElement(rankedElement("four", 4), rankedElement("4", 4))
b3first = b3[0]
self.assertTrue(b3first in b3)
self.assertFalse(rankedElement("bloop", 4) in b3)
def test_rank(self):
# simple, ranked element
b1 = rankedElement("ten", 10)
self.assertEqual(b1.rank(), 10)
# two ranked members
b2 = branchedElement(rankedElement("four", 4), rankedElement("eight", 8))
self.assertEqual(b2[0].rank(), 4)
self.assertEqual(b2[1].rank(), 8)
self.assertEqual(b2.rank(), 4)
# unranked memebers
b3 = branchedElement(rankedElement(), rankedElement())
self.assertEqual(b3.rank(), 0)
b3[0].set_rank(5)
b3[1].set_rank(4)
self.assertEqual(b3.rank(), 4)
def test_sum_members(self):
b1 = branchedElement(rankedElement("eight", 8), rankedElement("two", 2))
self.assertEqual(b1.sum_members(), 10)
def test_count(self):
b1 = branchedElement(branchedElement(rankedElement(), rankedElement()), rankedElement())
self.assertEqual(b1.count(), 5)
def test_phase(self):
b1 = rankedElement(rank=1)
b2 = rankedElement(rank=2)
self.assertEqual(b1.phase.size(), 1)
self.assertEqual(b2.phase.size(), 1)
b3 = branchedElement(b1, b2)
self.assertEqual(b1.phase.size(), 1)
self.assertEqual(b2.phase.size(), 1)
self.assertEqual(b3.phase, None)
b3.phase = bracketPhase(2).shifted_to_top()
self.assertEqual(b1.phase.size(), 2)
self.assertEqual(b2.phase.size(), 2)
self.assertEqual(b3.phase.size(), 1)
self.assertEqual(b1.phase, b2.phase)
def test_residual(self):
b1 = rankedElement(rank=1)
b2 = rankedElement(rank=2)
b3 = rankedElement(rank=7)
b4 = rankedElement(rank=16)
b5 = branchedElement(b1, b2)
b6 = branchedElement(b3, b4)
r1 = bracketPhase(participants=4).shifted_to_top()
b7 = branchedElement(b5, b6, r1)
self.assertEqual(b1.residual(), 0)
self.assertEqual(b2.residual(), 0)
self.assertEqual(b3.residual(), 3)
self.assertEqual(b4.residual(), 12)
self.assertEqual(b5.residual(), -2)
self.assertEqual(b6.residual(), 18)
self.assertEqual(b7.residual(), 5)
b8 = rankedElement(rank=6)
b9 = rankedElement(rank=7)
b10 = branchedElement(b8, b9)
r2 = bracketPhase(participants=6).shifted_to_top()
b11 = branchedElement(b7, b10, r2)
self.assertEqual(b1.residual(), -2)
self.assertEqual(b2.residual(), -1)
self.assertEqual(b3.residual(), 0)
self.assertEqual(b4.residual(), 8)
self.assertEqual(b5.residual(), -6)
self.assertEqual(b6.residual(), 14)
self.assertEqual(b7.residual(), 3)
self.assertEqual(b8.residual(), 4)
self.assertEqual(b9.residual(), 5)
self.assertEqual(b10.residual(), 8)
self.assertEqual(b11.residual(), 4)
def test_phase(self):
b1 = rankedElement()
b2 = rankedElement()
b3 = rankedElement()
b4 = branchedElement(b1, b2)
b5 = branchedElement(b3, b4)
def test_swap(self):
b1 = rankedElement("Charles", 1)
b2 = rankedElement("Anakin", 2)
b3 = rankedElement("George", 3)
b4 = branchedElement(b1, b2)
b5 = branchedElement(b3, b4)
self.assertEqual(b5[0].rank(), 3)
self.assertEqual(b5[0].name, "George")
b1.swap(b3)
self.assertEqual(b5[0].rank(), 1)
self.assertEqual(b5[0].name, "Charles")
self.assertEqual(b4[0].rank(), 3)
self.assertEqual(b4[0].name, "George")
b5[0].swap(b4[0])
self.assertEqual(b5[0].rank(), 3)
self.assertEqual(b5[0].name, "George")
self.assertEqual(b4[0].rank(), 1)
self.assertEqual(b4[0].name, "Charles")
with self.assertRaises(ValueError):
b5.swap(b4)
b6 = rankedElement("Errol", 4)
b7 = rankedElement("Hedwig", 5)
b8 = branchedElement(b6, b7)
b8.swap(b4)
self.assertEqual(b4.rank(), 4)
self.assertEqual(b8.rank(), 1)
b9 = branchedElement(b3, branchedElement(b1, b2))
b9.swap(b4)
self.assertEqual(b9.count(), 3)
self.assertEqual(b4.count(), 5)
def test_iter(self):
composers = ["Ludwig", "Copland", "Bernstein", "Britten", "Schubert", "Chopin"]
b1 = rankedElement(composers[0])
b2 = rankedElement(composers[1])
b3 = rankedElement(composers[2])
b4 = rankedElement(composers[3])
b5 = rankedElement(composers[4])
b6 = rankedElement(composers[5])
b7 = branchedElement(b1, b2)
b8 = branchedElement(b3, b4)
b9 = branchedElement(b5, b7)
b10 = branchedElement(b6, b8)
top = branchedElement(b9, b10)
br = bracket.bracket(top)
elements = [b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, top]
for e in br:
if e in elements:
elements.remove(e)
else:
raise ValueError("Found element " + str(e) + " in this tree for some reason!")
self.assertEqual(len(elements), 0)
class TestBracket(unittest.TestCase):
def generate_bracket(self, num):
participants = []
for i in range(num):
participants.append(rankedElement(str(i), i))
random.shuffle(participants)
def get_bracket(self):
return getbracket.generate("foobar18-matches.xml", "foobar18-participants.xml")
def test_print(self):
b = self.get_bracket()
#print_bracket(b)
def test_sort(self):
b = self.get_bracket()
counts = {1:[], 3:[], 5:[], 7:[], 13:[], 15:[], 29:[]}
for e in b:
counts[e._count].append(e)
ranks = {}
for e in counts[1]:
ranks[e._rank] = e
self.assertEqual(b._rate_swap(counts[7][0][0], counts[7][0], counts[3][1][0], counts[3][1]), 0)
if bracket.DEBUG:
print "---------------------- BEFORE SORT ----------------------"
b.print_verbose()
b.sort()
if bracket.DEBUG:
print "---------------------- AFTER SORT ----------------------"
b.print_verbose()
for e in b:
self.assertEqual(e.residual(), 0)
def test_iter_phase(self):
b = self.get_bracket()
f = b.iter_phase(0)
g = b.iter_phase(1)
ranked_elements = [i.rank() for i in g]
ranked_elements += [i.rank() for i in f]
self.assertEqual(ranked_elements, [12,8,9,4,6,3,1,2,15,7,10,5,14,13,11])
def test_really_big_sort(self):
#print "Testing really big sort..."
self.generate_bracket(1000)
def test_post(self):
#b = self.get_bracket()
#getbracket.post_bracket(b, "foobar19")
pass
if __name__ == "__main__":
unittest.main()
| |
# -*- coding: utf-8 -*-
from __future__ import print_function
import csv
import multiprocessing
import os
import platform
import signal
import sys
import threading
from multiprocessing.managers import SyncManager
from time import time
import requests
import six
from six.moves import queue
from datarobot_batch_scoring import __version__
from datarobot_batch_scoring.api_response_handlers import \
get_response_handlers_from_url
from datarobot_batch_scoring.consts import (WriterQueueMsg,
ProgressQueueMsg,
SENTINEL,
REPORT_INTERVAL)
from datarobot_batch_scoring.network import Network, DryRunNetworkWorker
from datarobot_batch_scoring.reader import (fast_to_csv_chunk,
slow_to_csv_chunk, peek_row,
Shovel, auto_sampler,
investigate_encoding_and_dialect,
decode_reader_state)
from datarobot_batch_scoring.utils import (acquire_api_token,
make_validation_call)
from datarobot_batch_scoring.writer import (WriterProcess, RunContext,
decode_writer_state)
if six.PY2: # pragma: no cover
from contextlib2 import ExitStack
elif six.PY3: # pragma: no cover
from contextlib import ExitStack
MAX_BATCH_SIZE = 5 * 1024 ** 2
def manager_init():
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGTERM, signal.SIG_IGN)
def format_usage(rusage):
if not rusage:
return ""
else:
return " User time: {utime:.3f} System time: {stime:.3f} " \
"RSS: {rss}".format(**rusage)
def my_os_cannot_handle_life_in_the_fast_lane():
if os.name == 'nt':
return True
else:
return False
def run_batch_predictions(base_url, base_headers, user, pwd,
api_token, create_api_token,
pid, lid, import_id, n_retry, concurrent,
resume, n_samples,
out_file, keep_cols, delimiter,
dataset, pred_name,
timeout, ui, fast_mode, auto_sample,
dry_run, encoding, skip_dialect,
skip_row_id=False,
output_delimiter=None,
max_batch_size=None, compression=None,
field_size_limit=None,
verify_ssl=True,
deployment_id=None,
max_prediction_explanations=0,
pred_threshold_name=None,
pred_decision_name=None):
if field_size_limit is not None:
csv.field_size_limit(field_size_limit)
if max_batch_size is None:
max_batch_size = MAX_BATCH_SIZE
multiprocessing.freeze_support()
t1 = time()
queue_size = concurrent * 2
# provide version info and system info in user-agent
base_headers['User-Agent'] = 'datarobot_batch_scoring/{}|' \
'Python/{}|{}|system/{}|concurrency/{}' \
''.format(__version__,
sys.version.split(' ')[0],
requests.utils.default_user_agent(),
platform.system(),
concurrent)
with ExitStack() as stack:
if my_os_cannot_handle_life_in_the_fast_lane():
# Windows requires an additional manager process. The locks
# and queues it creates are proxies for objects that exist within
# the manager itself. It does not perform as well so we only
# use it when necessary.
manager = SyncManager()
manager.start(initializer=manager_init)
conc_manager = stack.enter_context(manager)
else:
# You're on a nix of some sort and don't need a manager process.
conc_manager = multiprocessing
network_queue = conc_manager.Queue(queue_size)
network_deque = conc_manager.Queue(queue_size)
writer_queue = conc_manager.Queue(queue_size)
progress_queue = conc_manager.Queue()
shovel_status = conc_manager.Value('c', b'-', lock=False)
network_status = conc_manager.Value('c', b'-', lock=False)
writer_status = conc_manager.Value('c', b'-', lock=False)
abort_flag = conc_manager.Value('b', 0, lock=False)
base_headers['content-type'] = 'text/csv; charset=utf8'
if compression:
base_headers['Content-Encoding'] = 'gzip'
if import_id:
endpoint = base_url + import_id
elif deployment_id is not None:
endpoint = base_url + '/'.join(
('deployments', deployment_id))
else:
endpoint = base_url + '/'.join((pid, lid))
if max_prediction_explanations:
if deployment_id is not None:
# Deployment routes only support predictionExplanations.
endpoint += '/predictionExplanations?maxCodes='
else:
# For non-deployment routes we use the old reasonCodesRoutes
# to support 4.3.x releases.
endpoint += '/reasonCodesPredictions?maxCodes='
endpoint += str(max_prediction_explanations)
else:
if deployment_id is not None:
endpoint += '/predictions'
else:
endpoint += '/predict'
encoding = investigate_encoding_and_dialect(
dataset=dataset,
sep=delimiter, ui=ui,
fast=fast_mode,
encoding=encoding,
skip_dialect=skip_dialect,
output_delimiter=output_delimiter)
if auto_sample:
# override n_sample
n_samples = auto_sampler(dataset, encoding, ui)
ui.info('auto_sample: will use batches of {} rows'
''.format(n_samples))
# Make a sync request to check authentication and fail early
first_row = peek_row(dataset, delimiter, ui, fast_mode, encoding)
ui.debug('First row for auth request: {}'.format(first_row))
if fast_mode:
chunk_formatter = fast_to_csv_chunk
else:
chunk_formatter = slow_to_csv_chunk
first_row_data = chunk_formatter(first_row.data, first_row.fieldnames)
first_row = first_row._replace(data=first_row_data)
if keep_cols:
# If any columns appear in `keep_cols` that are not in
# `first_row.fieldnames`, it is a fatal error.
extra_cols = set(keep_cols) - set(first_row.fieldnames)
if extra_cols:
msg = 'keep_cols "{}" not in columns {}.'.format(
list(sorted(extra_cols)),
first_row.fieldnames)
ui.fatal(msg)
if not dry_run:
if not (api_token or import_id):
try:
api_token = acquire_api_token(base_url, base_headers, user,
pwd, create_api_token, ui,
verify_ssl)
except Exception as e:
ui.fatal(str(e))
make_validation_call(user, api_token, n_retry, endpoint,
base_headers, first_row, ui,
compression=compression,
verify_ssl=verify_ssl)
ctx = stack.enter_context(
RunContext.create(resume, n_samples, out_file, pid,
lid, keep_cols, n_retry, delimiter,
dataset, pred_name, ui, fast_mode,
encoding, skip_row_id, output_delimiter,
pred_threshold_name, pred_decision_name,
max_prediction_explanations)
)
n_batches_checkpointed_init = len(ctx.db['checkpoints'])
ui.debug('number of batches checkpointed initially: {}'
.format(n_batches_checkpointed_init))
batch_generator_args = ctx.batch_generator_args()
shovel = stack.enter_context(Shovel(network_queue,
progress_queue,
shovel_status,
abort_flag,
batch_generator_args,
ui))
ui.info('Reader go...')
shovel_proc = shovel.go()
t0 = time()
if dry_run:
network = stack.enter_context(DryRunNetworkWorker(
concurrency=concurrent,
timeout=timeout,
ui=ui,
network_queue=network_queue,
network_deque=network_deque,
writer_queue=writer_queue,
progress_queue=progress_queue,
abort_flag=abort_flag,
network_status=network_status,
endpoint=endpoint,
headers=base_headers,
user=user,
api_token=api_token,
pred_name=pred_name,
fast_mode=fast_mode,
max_batch_size=max_batch_size,
compression=compression,
verify_ssl=verify_ssl))
network.go()
ui.info('dry-run complete | time elapsed {}s'.format(time() - t0))
ui.info('dry-run complete | total time elapsed {}s'.format(
time() - t1))
i = 0
while True:
if not shovel_proc.is_alive():
break
if i == 0:
ui.info("Waiting for shovel process exit")
elif i == 10:
ui.info("Sending terminate signal to shovel process")
shovel_proc.terminate()
elif i == 20:
ui.error("Sending kill signal to shovel process")
os.kill(shovel_proc.pid, 9)
elif i == 30:
ui.error("Reader process was not exited,"
" processing anyway.")
break
i += 1
try:
msg, args = progress_queue.get(timeout=1)
ui.debug("got progress: {} args: {}".format(msg, args))
except queue.Empty:
continue
ctx.scoring_succeeded = True
return
network = stack.enter_context(Network(concurrency=concurrent,
timeout=timeout,
ui=ui,
network_queue=network_queue,
network_deque=network_deque,
writer_queue=writer_queue,
progress_queue=progress_queue,
abort_flag=abort_flag,
network_status=network_status,
endpoint=endpoint,
headers=base_headers,
user=user,
api_token=api_token,
pred_name=pred_name,
fast_mode=fast_mode,
max_batch_size=max_batch_size,
compression=compression,
verify_ssl=verify_ssl
))
exit_code = None
try:
response_handlers = get_response_handlers_from_url(base_url)
except ValueError as e:
ui.fatal(str(e))
writer = stack.enter_context(WriterProcess(ui, ctx, writer_queue,
network_queue,
network_deque,
progress_queue,
abort_flag,
writer_status,
response_handlers))
ui.info('Writer go...')
writer_proc = writer.go()
ui.info('Network go...')
network_proc = network.go()
shovel_done = False
network_done = False
writer_done = False
writer_exitcode = None
n_ret = False
n_consumed = 0
n_requests = 0
n_retried = 0
n_rusage = None
s_produced = 0
s_rusage = None
s_read = 0
s_skipped = 0
w_ret = 0
w_requests = 0
w_written = 0
w_rows = 0
w_rusage = None
aborting_phase = 0
phase_start = time()
local_abort_flag = [False]
def exit_fast(*_):
local_abort_flag[0] = True
signal.signal(signal.SIGINT, exit_fast)
signal.signal(signal.SIGTERM, exit_fast)
while True:
progress_empty = False
try:
msg, args = progress_queue.get(timeout=1)
ui.debug("got progress: {} args={}".format(msg, args))
except queue.Empty:
progress_empty = True
ui.debug("get progress timed out")
ui.debug(" aborting_phase: {} ({} seconds)".format(
aborting_phase, time() - phase_start))
ui.debug(" shovel_status: '{}' shovel_done: {} "
"shovel_proc: {}({})"
"".format(decode_reader_state(shovel_status.value),
shovel_done,
shovel_proc,
shovel_proc and shovel_proc.pid))
ui.debug(" network_status: '{}' network_done: {} "
"network_proc: {}({})"
"".format(network.state_name(),
network_done,
network_proc,
network_proc and network_proc.pid))
ui.debug(" writer_status: '{}' writer_done: {} "
"writer_proc: {}({})"
"".format(decode_writer_state(writer_status.value),
writer_done,
writer_proc,
writer_proc and writer_proc.pid))
except KeyboardInterrupt:
local_abort_flag[0] = True
else:
if msg == ProgressQueueMsg.NETWORK_DONE:
n_ret = args["ret"]
n_requests = args["processed"]
n_retried = args["retried"]
n_consumed = args["consumed"]
n_rusage = args["rusage"]
network_done = "ok"
if not n_ret:
network_done = "with error"
exit_code = 1
aborting_phase = 1
elif msg == ProgressQueueMsg.WRITER_DONE:
w_ret = args["ret"]
w_requests = args["processed"]
w_written = args["written"]
w_rows = args["rows"]
w_rusage = args["rusage"]
writer_done = "ok"
if not w_ret:
writer_done = "with error"
exit_code = 1
aborting_phase = 1
elif msg == ProgressQueueMsg.NETWORK_PROGRESS:
n_requests = args["processed"]
n_retried = args["retried"]
n_consumed = args["consumed"]
n_rusage = args["rusage"]
ui.info("Network progress: Chunks: {}"
" Requests: {} Retries: {}{}"
"".format(n_consumed,
n_requests, n_retried,
format_usage(n_rusage)))
elif msg == ProgressQueueMsg.WRITER_PROGRESS:
w_requests = args["processed"]
w_written = args["written"]
w_rows = args["rows"]
w_rusage = args["rusage"]
ui.info("Writer progress:"
" Results: {} Written: {}"
" Rows done: {}{}"
"".format(w_requests,
w_written,
w_rows,
format_usage(w_rusage)))
elif msg == ProgressQueueMsg.SHOVEL_PROGRESS:
s_produced = args["produced"]
s_read = args["read"]
s_skipped = args["skipped"]
s_rusage = args["rusage"]
ui.info("Reader progress: Read: {} Skipped: {}"
" Produced: {}{}"
"".format(s_read, s_skipped, s_produced,
format_usage(s_rusage)))
elif msg == ProgressQueueMsg.SHOVEL_DONE:
s_produced = args["produced"]
s_read = args["read"]
s_skipped = args["skipped"]
s_rusage = args["rusage"]
shovel_done = "ok"
elif msg in (ProgressQueueMsg.SHOVEL_CSV_ERROR,
ProgressQueueMsg.SHOVEL_ERROR):
batch = args["batch"]
error = args["error"]
s_produced = args["produced"]
s_read = args["read"]
s_skipped = args["skipped"]
s_rusage = args["rusage"]
if msg == ProgressQueueMsg.SHOVEL_CSV_ERROR:
shovel_done = "with csv format error"
ui.error("Error parsing CSV file after line {},"
" error: {}, aborting".format(
batch.id + batch.rows, error))
else:
shovel_done = "with error"
ui.error("Unexpected reader error after line {},"
" error: {}, aborting".format(
batch.id + batch.rows, error))
exit_code = 1
aborting_phase = 1
else:
ui.error("got unknown progress message: {} args={}"
"".format(msg, args))
if local_abort_flag[0]:
exit_code = 2
if aborting_phase == 0:
ui.info("Keyboard Interrupt, abort sequence started")
aborting_phase = 1
else:
ui.info("Aborting is already in progress")
some_worker_exited = False
if shovel_proc and not shovel_proc.is_alive():
shovel_exitcode = shovel_proc.exitcode
ui.info("shovel proc finished, exit code: {}"
.format(shovel_exitcode))
shovel_proc = None
some_worker_exited = True
if shovel_exitcode != 0:
exit_code = 1
aborting_phase = 1
if network_proc and not network_proc.is_alive():
network_exitcode = network_proc.exitcode
ui.info("network proc finished, exit code: {}"
.format(network_exitcode))
network_proc = None
some_worker_exited = True
if network_exitcode != 0:
exit_code = 1
aborting_phase = 1
if writer_proc and not writer_proc.is_alive():
writer_exitcode = writer_proc.exitcode
ui.info("writer proc finished, exit code: {}"
.format(writer_exitcode))
writer_proc = None
some_worker_exited = True
if writer_exitcode != 0:
exit_code = 1
aborting_phase = 1
if aborting_phase == 0:
if progress_empty and not some_worker_exited:
if time() - phase_start > REPORT_INTERVAL:
if network_proc is None and not network_done:
ui.warning("network process finished without "
"posting results, aborting")
network_done = "exited"
exit_code = 1
aborting_phase = 1
if shovel_proc is None and not shovel_done:
ui.warning("shovel process finished without "
"posting results, aborting")
shovel_done = "exited"
exit_code = 1
aborting_phase = 1
if writer_proc is None and not writer_done:
ui.warning("writer process finished without "
"posting results, aborting")
writer_done = "exited"
exit_code = 1
aborting_phase = 1
phase_start = time()
else:
phase_start = time()
if shovel_done and \
network_status.value == b"I" and \
writer_status.value == b"I":
ui.info("All requests done, waiting for writer")
if writer_proc:
writer_queue.put((WriterQueueMsg.SENTINEL, {}))
if network_proc:
network_queue.put(SENTINEL)
aborting_phase = -1
phase_start = time()
elif aborting_phase == -1:
procs = [shovel_proc, network_proc, writer_proc]
not_done = [a is False for a in [shovel_done,
network_done,
writer_done]]
if (procs == [None, None, None] and
not_done == [False, False, False]):
ui.info("all workers exited successfully")
break
elif time() - phase_start > 30:
ui.info("some of workers are still active, aborting")
if writer_done != "ok":
exit_code = 1
aborting_phase = 1
elif aborting_phase == 1:
abort_flag.value = 1
aborting_phase = 2
phase_start = time()
ui.info("abort sequence started, waiting for workers exit")
elif aborting_phase == 2:
procs = [shovel_proc, network_proc, writer_proc]
if procs == [None, None, None]:
ui.info("all workers exited")
break
elif time() - phase_start > 10:
for proc in procs:
if proc and proc.is_alive():
proc.terminate()
aborting_phase = 3
phase_start = time()
elif aborting_phase == 3:
procs = [shovel_proc, network_proc, writer_proc]
if procs == [None, None, None]:
ui.info("all workers exited")
break
elif time() - phase_start > 10:
for proc in procs:
if proc and proc.is_alive():
os.kill(proc.pid, 9)
aborting_phase = 4
elif aborting_phase == 4:
procs = [shovel_proc, network_proc, writer_proc]
if procs == [None, None, None]:
ui.info("all workers exited")
break
elif time() - phase_start > 10:
ui.error("some workers are not exited, ignoring")
break
if shovel_done:
ui.info("Reader is finished: Read: {} Skipped: {}"
" Produced: {}{}"
"".format(s_read, s_skipped, s_produced,
format_usage(s_rusage)))
if network_done:
ui.info("Network is finished {}. Chunks: {}"
" Requests: {} Retries: {}{}"
"".format(network_done, n_consumed,
n_requests, n_retried,
format_usage(n_rusage)))
if writer_done:
ui.info("Writer is finished {}. Result: {}"
" Results: {} Written: {} Rows done: {}{}"
"".format(writer_done, w_ret, w_requests,
w_written, w_rows,
format_usage(w_rusage)))
if n_ret is not True:
ui.debug('Network finished with error')
exit_code = 1
if writer_exitcode == 0:
ui.debug('writer process exited successfully')
else:
ui.debug('writer process did not exit properly: '
'returncode="{}"'.format(writer_exitcode))
exit_code = 1
ui.debug("active threads: {}".format(threading.enumerate()))
ctx.open()
ui.debug('number of batches checkpointed initially: {}'
.format(n_batches_checkpointed_init))
ui.debug('list of checkpointed batches: {}'
.format(sorted(ctx.db['checkpoints'])))
n_batches_checkpointed = (len(ctx.db['checkpoints']) -
n_batches_checkpointed_init)
ui.debug('number of batches checkpointed: {}'
.format(n_batches_checkpointed))
n_batches_not_checkpointed = (n_consumed -
n_batches_checkpointed)
batches_missing = n_batches_not_checkpointed > 0
if batches_missing:
ui.error(('scoring incomplete, {} batches were dropped | '
'time elapsed {}s')
.format(n_batches_not_checkpointed, time() - t0))
exit_code = 1
else:
ui.info('scoring complete | time elapsed {}s'
.format(time() - t0))
ui.info('scoring complete | total time elapsed {}s'
.format(time() - t1))
total_done = 0
for _, batch_len in ctx.db["checkpoints"]:
total_done += batch_len
total_lost = 0
for bucket in ("warnings", "errors"):
ui.info('==== Scoring {} ===='.format(bucket))
if ctx.db[bucket]:
msg_data = ctx.db[bucket]
msg_keys = sorted(msg_data.keys())
for batch_id in msg_keys:
suffix = ""
if bucket == "errors":
if batch_id not in ctx.db['checkpoints']:
total_lost += batch_id[1]
else:
suffix = " (fixed)"
first = True
for msg in msg_data[batch_id]:
if first:
first = False
ui.info("{}: {}{}".format(batch_id, msg, suffix))
else:
ui.info(" {}".format(msg, suffix))
ui.info('==== Total stats ===='.format(bucket))
ui.info("done: {} lost: {}".format(total_done, total_lost))
if exit_code is None and total_lost == 0:
ctx.scoring_succeeded = True
else:
exit_code = 1
return exit_code
| |
#
# _____ _____ _______ __ _ _______ ______ _______ _____
# | | |_____] |______ | \ | | |_____/ |_____| | |
# |_____| | |______ | \_| | | \_ | | __|__ |_____
#
# _______ _____ __ _ _ _ _______ ______ _______ _____ _____ __ _
# | | | | \ | \ / |______ |_____/ |______ | | | | \ |
# |_____ |_____| | \_| \/ |______ | \_ ______| __|__ |_____| | \_|
#
# This is the OpenTrails conversion script for MI DNR to OpenTrails
# Huge thank you to Ben Sainsbury (formerly Oregon Metro) for the initial work done on this script.
# http://www.lfd.uci.edu/~gohlke/pythonlibs/#pyshp
import shapefile
# http://www.lfd.uci.edu/~gohlke/pythonlibs/#requests
import requests
import hashlib, collections, csv, os, sys, zipfile
import json
import csv
from support import *
# http://www.codeforamerica.org/specifications/trails/spec.html
TRAILS_URL = 'http://library.oregonmetro.gov/rlisdiscovery/trails.zip'
#OBJECTID,name,id,url,address,publisher,license,phone
STEWARD_FIELDS = ['OBJECTID', 'name', 'id', 'url', 'address', 'publisher', 'license', 'phone' ]
STEWARDS = []
STEWARD_MAP = {}
NAMED_TRAILS = []
NAMED_TRAIL_IDS = []
NAMED_TRAIL_MAP = {}
NAMED_TRAIL_SEGMENT_ID_MAP = {}
SEGMENT_ID_NAMED_TRAIL_MAP = {}
TRAIL_SEGMENTS = []
TRAIL_SEGMENT_IDS = []
TRAILHEADS = []
if not os.path.exists(os.getcwd()+'/output'):
"""
Create a directory to hold the output
"""
os.makedirs(os.getcwd()+'/output')
### PARSING FUNCTIONS
def parse_stewards_csv():
print "* Parsing stewards.csv"
with open(os.getcwd() + "/input/stewards.csv", mode='r') as infile:
reader = csv.DictReader(infile, STEWARD_FIELDS) #stewards.csv header
reader.next()
for row in reader:
STEWARDS.append(row)
for row in STEWARDS:
row['id'] = str(row['id'])
print "** Steward"
print row
STEWARD_MAP[row['name']] = row['id']
print "* Done parsing stewards.csv"
def parse_named_trails_csv():
print "* Parsing named_trails.csv"
with open(os.getcwd() + "/input/named_trails.csv", mode='r') as infile:
reader = csv.DictReader(infile, ['OBJECTID','Code', 'Name']) # named_trails.csv header
reader.next() #skip header line
for row in reader:
NAMED_TRAILS.append(row)
for row in NAMED_TRAILS:
row['id'] = str(row['Code'])
row['name'] = row['Name']
row['segment_ids'] = ""
row['description'] = ""
print "** Named Trail"
print row
NAMED_TRAIL_MAP[row['name']] = row['id']
NAMED_TRAIL_IDS.append(row['id'])
print "* Done parsing named_trails.csv"
def parse_trail_segments():
print "* Parsing trail segments"
# read the trails shapefile
reader = shapefile.Reader(os.getcwd()+'/input/trail_segments.shp')
fields = reader.fields[1:]
field_names = [field[0].upper() for field in fields]
#iterate trails
for sr in reader.shapeRecords():
atr = dict(zip(field_names, sr.record))
# we're only allowing open existing trails to pass
props = collections.OrderedDict()
#effectively join to the stewards table
id = props['id'] = atr['TRAIL_ID']
props['steward_id'] = "000000"
props['motor_vehicles'] = is_motor_vehicles(atr)
props['foot'] = 'yes' if atr['HIKE'] == 'Yes' else 'no'
props['bicycle'] = 'yes' if atr['BIKE'] == 'Yes' else 'no'
props['horse'] = 'yes' if atr['EQUESTRIAN'] == 'Yes' else 'no'
props['ski'] = 'yes' if atr['SKI'] == 'Yes' else 'no'
# spec: "yes", "no", "permissive", "designated"
props['wheelchair'] = 'yes' if atr['ADA'] == 'Yes' else 'no'
props['osm_tags'] = build_osm_tags(atr)
geom = sr.shape.__geo_interface__
geom_type = geom['type']
n_geom = transform_geometry(geom)
segment= collections.OrderedDict()
segment['type']='Feature'
segment['properties'] = props
segment['geometry'] = {"type":geom_type, "coordinates":n_geom}
# NEED TO PARSE THE TRAIL_CODE FIELD TO NAMED_TRAIL_SEGMENT_ID_MAP
_codes = atr['TRAIL_CODE'].split(";")
if len(_codes) > 0:
SEGMENT_ID_NAMED_TRAIL_MAP[id] = _codes
for code in _codes:
if code in NAMED_TRAIL_SEGMENT_ID_MAP:
NAMED_TRAIL_SEGMENT_ID_MAP[code].append(id)
else:
NAMED_TRAIL_SEGMENT_ID_MAP[code] = [id]
TRAIL_SEGMENTS.append(segment)
TRAIL_SEGMENT_IDS.append(id)
#Release the trails shapefile
reader = None
print ("* Done parsing trail segments")
def parse_trailheads():
print ("* Parsing trailheads")
# read the trails shapefile
reader = shapefile.Reader(os.getcwd()+'/input/trailheads.shp')
fields = reader.fields[1:]
field_names = [field[0].upper() for field in fields]
#iterate trails
for sr in reader.shapeRecords():
atr = dict(zip(field_names, sr.record))
# we're only allowing open existing trails to pass
props = collections.OrderedDict()
#effectively join to the stewards table
id = props['id'] = str(atr['ID'])
props['steward_id'] = str(atr['STEWARD_ID'])
props['segment_ids'] = atr['TRAIL_SEG_']
props['name'] = atr['THNAME']
props['restrooms'] = 'yes' if atr['RESTROOM'] == 'Yes' else 'no'
props['drinkwater'] = 'yes' if atr['WATER'] == 'Yes' else 'no'
props['parking'] = 'yes' if atr['PARKING'] == 'Yes' else 'no'
props['address'] = atr['ADDRESS']
geom = sr.shape.__geo_interface__
n_geom = transform_coordinates(geom['coordinates'])
segment= collections.OrderedDict()
segment['type']='Feature'
segment['properties'] = props
segment['geometry'] = {"type":"Point", "coordinates":n_geom}
TRAILHEADS.append(segment)
#Release the trails shapefile
reader = None
print ("* Done parsing trailheads")
### WRITING FUNCTIONS
def write_stewards_csv():
OUT_STEWARD_FIELDS = ['id', 'name', 'url', 'phone', 'address','publisher', 'license']
print "* Writing stewards.csv"
stewards_out = open(os.getcwd() + "/output/stewards.csv", "w")
stewards_out.write(",".join(OUT_STEWARD_FIELDS)+"\n")
for steward in STEWARDS:
_row_data = [ \
str(steward['id']), \
steward['name'], \
steward['url'], \
steward['phone'], \
steward['address'], \
steward['publisher'], \
steward['license'] \
]
stewards_out.write(','.join(_row_data)+"\n")
stewards_out.close()
print "* Done writing stewards.csv"
def write_named_trails_csv():
print "* Writing named_trails.csv"
named_trails_out = open(os.getcwd() + "/output/named_trails.csv", "w")
named_trails_out.write('"id","name","segment_ids","description","part_of"\n')
for named_trail in NAMED_TRAILS:
_segment_ids = ';'.join(NAMED_TRAIL_SEGMENT_ID_MAP[named_trail['id']]) if (named_trail['id'] in NAMED_TRAIL_SEGMENT_ID_MAP) else ''
_row_data = [ \
str(named_trail['id']), \
named_trail['name'], \
_segment_ids, \
'','']
named_trails_out.write(','.join(_row_data)+"\n")
named_trails_out.close()
print "* Done writing named_trails.csv"
def write_trail_segments_geojson():
trail_segments_out = open(os.getcwd() + "/output/trail_segments.geojson", "w")
trail_segments_out.write(json.dumps({"type": "FeatureCollection",\
"features": TRAIL_SEGMENTS}, indent=2) + "\n")
trail_segments_out.close()
def write_trailheads_geojson():
trailheads_out = open(os.getcwd() + "/output/trailheads.geojson", "w")
trailheads_out.write(json.dumps({"type": "FeatureCollection",\
"features": TRAILHEADS}, indent=2) + "\n")
trailheads_out.close()
def validate():
# Check for empty trails
empty_count = 0
missing_count = 0
for trail in NAMED_TRAILS:
if trail['id'] not in NAMED_TRAIL_SEGMENT_ID_MAP:
print trail['id'] + " has no segments"
empty_count = empty_count + 1
else:
segments = NAMED_TRAIL_SEGMENT_ID_MAP[trail['id']]
for id in segments:
if id not in TRAIL_SEGMENT_IDS:
missing_count = missing_count + 1
print 'Missing trail segment : ' + str(id)
else:
print "Found trail segment " + str(id)
print str(len(NAMED_TRAILS)) + " trails"
print str(empty_count) + " empty trails"
print str(missing_count) + " missing segments"
# Check for trail segments without trails
for segment in TRAIL_SEGMENTS:
unused_count = 0
if segment['properties']['id'] not in SEGMENT_ID_NAMED_TRAIL_MAP:
unused_count = unused_count + 1
print "Unused trail segment : " + segment['properties']['id']
print str(len(TRAIL_SEGMENTS)) + " trail segments"
print str(unused_count) + " unused trail segments"
if __name__ == "__main__":
# PARSE PARSE PARSE
parse_stewards_csv()
parse_named_trails_csv()
parse_trail_segments()
parse_trailheads()
# WRITE WRITE WRITE
write_stewards_csv()
write_named_trails_csv()
write_trail_segments_geojson()
write_trailheads_geojson()
validate()
print '* Process complete'
| |
import glob
import sys
from tqdm import tqdm
import cv2
from os.path import splitext, isdir
from time import time
from tinyface_face_extractor import extract_tinyfaces
from utils.get_md5 import file_digest
from utils.match_to_faces import write_out_pickle
# Process an image for faces
def process_image(
image_file,
reduceby,
prob_thresh,
nms_thresh,
use_gpu,
verbose=False):
filename = image_file.split('/')[-1]
file_content_hash = file_digest(image_file)
image = cv2.imread(image_file)
frame_number = -1
# Find bounding boxes for face chips in this image
face_locations, num_detections = identify_chips(
image, frame_number, reduceby, prob_thresh, nms_thresh, use_gpu)
# Only save pickle if faces were detected
if num_detections > 0:
results = (filename, file_content_hash, [face_locations])
write_out_pickle(filename, results, "/bboxes", "tinyface", "bboxes")
if verbose:
print("{0} face detections in {1}".format(num_detections, filename))
# getframe
def get_frame_inefficient(filename, frame_number):
camera = cv2.VideoCapture(filename)
camera.set(1, frame_number)
keep_going, image = camera.read()
camera.release()
return (keep_going, image)
# get movie length
def get_movie_length(filename):
camera = cv2.VideoCapture(filename)
ret_val = camera.get(cv2.CAP_PROP_FRAME_COUNT)
camera.release()
return ret_val
# Process a video for faces
def process_video(
image_file,
reduceby,
every,
prob_thresh,
nms_thresh,
use_gpu):
frame_number = 0
num_detections = 0
filename = image_file.split('/')[-1]
#camera = cv2.VideoCapture(image_file)
#capture_length = int(camera.get(cv2.CAP_PROP_FRAME_COUNT))
capture_length = get_movie_length(image_file)
progress = tqdm(total=capture_length)
file_content_hash = file_digest(image_file)
combined_face_locations = list()
keep_going = True
first = True
while keep_going:
if not first:
if (every + frame_number) > capture_length:
keep_going = False
progress.close()
break
frame_number += every
#camera.set(1, frame_number)
progress.update(every)
else:
first = False
#keep_going, image = camera.read()
keep_going, image = get_frame_inefficient(image_file, frame_number)
# only face detect every once in a while
progress.set_description(
'Processing video: {0} detections: {1}'.format(
filename[
0:30] + "...",
num_detections))
progress.refresh()
# verify that there is a video frame to process
if image is None:
progress.refresh()
progress.write('end of capture:IMG')
progress.close()
break
if frame_number > capture_length:
progress.refresh()
progress.write('end of capture:Length')
progress.close()
break
if not keep_going:
progress.refresh()
progress.write('end of capture:camera.read')
progress.close()
break
# Find bounding boxes for face chips in this frame
face_locations, detections = identify_chips(
image, frame_number, reduceby, prob_thresh, nms_thresh, use_gpu)
if detections > 0:
combined_face_locations += [face_locations]
num_detections += detections
# Only save pickle if faces were detected
if num_detections > 0:
results = (filename, file_content_hash, combined_face_locations)
write_out_pickle(filename, results, "/bboxes", "tinyface", "bboxes")
# Detect faces and vectorize chips based on input parameters
def identify_chips(
image,
frame_number,
reduceby,
prob_thresh,
nms_thresh,
use_gpu):
resized_image = cv2.resize(image, (0, 0),
fx=1.0 / reduceby,
fy=1.0 / reduceby)
list_face_locations = extract_tinyfaces(
resized_image, prob_thresh, nms_thresh, use_gpu)
list_face_locations = [(int(x[1]), int(x[2]), int(
x[3]), int(x[0])) for x in list_face_locations]
# Align face locations with original image
transformed_face_locations = [[int(face_location[0] * reduceby),
int(face_location[1] * reduceby),
int(face_location[2] * reduceby),
int(face_location[3] * reduceby)]
for face_location in list_face_locations]
frame_with_face_locations = (frame_number, transformed_face_locations)
return frame_with_face_locations, len(list_face_locations)
def main(
use_gpu=False,
prob_thresh=0.85,
nms_thresh=0.1,
reduceby=1,
every=30,
verbose=False):
# Look for files at /media folder
files = [item for item in glob.glob('/media/*') if not isdir(item)]
if verbose:
durations = list()
kickoff = time()
for f in files:
ext = splitext(f)[1]
# videos
if ext in ['.avi', '.mov', '.mp4']:
process_video(f, reduceby, every, prob_thresh, nms_thresh, use_gpu)
# images
elif ext in ['.jpg', '.png', '.jpeg', '.bmp', '.gif']:
if verbose:
start = time()
process_image(f, reduceby, prob_thresh,
nms_thresh, use_gpu, verbose)
duration = time() - start
durations.append(duration)
print("{0} seconds to process {1}\n".format(
'%.3f' % duration, f.split('/')[-1]))
else:
process_image(f, reduceby, prob_thresh, nms_thresh, use_gpu)
sys.stdout.flush()
sys.stderr.flush()
final = time()
if verbose and len(durations) > 0:
average = sum(durations) / len(durations)
print("\nAverage elapsed time to detect faces in images = {0}".format(
'%.3f' % average))
print("Total time to detect faces in {0} images = {1}".format(
len(durations), '%.3f' % (final - kickoff)))
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description='Process video for faces using TinyFace')
# Required args
parser.add_argument(
'--reduceby',
type=float,
default=1.0,
help='Factor by which to reduce video resolution to increase processing speed (ex: 1 = original resolution)')
parser.add_argument(
'--every',
type=int,
default=30,
help='Analyze every nth frame_number (ex: 30 = process only every 30th frame_number of video')
parser.add_argument(
"--prob_thresh",
type=float,
default=0.85,
help="Tiny Face Detector threshold for face likelihood (default = 0.85)")
parser.add_argument(
"--nms_thresh",
type=float,
default=0.1,
help="Tiny Face Detector non-maximum suppression threshold (default = 0.1)")
parser.add_argument(
"--use_gpu",
type=bool,
default=False,
help="Flag to use GPU (note: Use nvidia-docker to run container")
parser.add_argument(
"--verbose",
type=bool,
default=False,
help="Flag to print number of faces detected per image and elapsed time to detect faces per image")
args = parser.parse_args()
print(
"Tinyface parameters set as: \n \
Use GPU = {0} \n \
TinyFace Probability Threshold = {1} \n \
TinyFace NMS Threshold = {2} \n \
Media reduced by {3}x \n \
Analyzing every {4}th frame of video \n \
Verbose = {5} \n"
.format(
args.use_gpu,
args.prob_thresh,
args.nms_thresh,
args.reduceby,
args.every,
args.verbose))
sys.stdout.flush()
sys.stderr.flush()
main(args.use_gpu, args.prob_thresh, args.nms_thresh,
args.reduceby, args.every, args.verbose)
print("Finished processing all media.")
| |
#Michael Uy SID: 861064409 Feature Selection with Nearest Neighbor
import math
# Import Dataset
def mkDataSet(fileName):
dataSet = []
with open(fileName) as f:
for i in f.readlines():
try:
temp= i.lstrip(" ")
temp = [float(j) for j in temp.split()]
temp[0] = int(temp[0])
dataSet.append(temp)
except ValueError, e:
print "error",e,"on line", i
return dataSet
# Normalize
def normalize(activeDataSet):
dataSet = activeDataSet
average = [0.00]*(len(dataSet[0])-1)
stds = [0.00]*(len(dataSet[0])-1)
# get averages
for i in dataSet:
for j in range (1,(len(i))):
average[j-1] += i[j]
for i in range(len(average)):
average[i] = (average[i]/len(dataSet))
# get std's sqrt((sum(x-mean)^2)/n)
for i in dataSet:
for j in range (1,(len(i))):
stds[j-1] += pow((i[j] - average[j-1]),2)
for i in range(len(stds)):
stds[i] = math.sqrt(stds[i]/len(dataSet))
# calculate new values (x-mean)/std
for i in range(len(dataSet)):
for j in range (1,(len(dataSet[0]))):
dataSet[i][j] = (dataSet[i][j] - average[j-1])/ stds[j-1]
return dataSet
# Calculate similarity
def distance(a,b,params): #params is a list of 0's/1's for all true flags
dis = 0
for i in range (len(params)):
if params[i]:
dis += pow((a[i]-b[i]),2)
return math.sqrt(dis)
# Locate neighbors
import operator
def getNeighbor(trainingSet, testInstance, k, params):
dis = []
for x in range(len(trainingSet)):
dist = distance(testInstance, trainingSet[x], params)
dis.append((trainingSet[x], dist))
dis.sort(key=operator.itemgetter(1))
neighbors = []
for x in range(k):
neighbors.append(dis[x][0])
return neighbors
# Calculate accuracy
def getAccuracy(dataSet,flags):
accuracy = 0.00
for i in range(len(dataSet)):
trainingSet = list(dataSet)
testInstance = trainingSet.pop(i)
neighbors = getNeighbor(trainingSet,testInstance,1, flags)
if (len(neighbors) == 1):
if (neighbors[0][0] == testInstance[0]):
accuracy +=1
accuracy = (accuracy/len(dataSet))* 100
return accuracy
# Forward selection
# in:(flags, featureSet,best accuracy) out:(feature set and accuracy)
def getFeatureSet(dataSet,possibleFlags,currFeatures, bestAccuracy):
accuracy = 0.0
#featureSet = currFeatures
flagsLeft = [i for i in possibleFlags if i not in currFeatures]
#print "flags left: ", flagsLeft
flagScores = [0.0]* len(flagsLeft)
currIndex=0
for i in flagsLeft:
flags = [0]* (len(possibleFlags)+1)
flags[i] = 1
for j in currFeatures:
flags[j] = 1
accuracy = getAccuracy(dataSet,flags)
flagScores[currIndex] = accuracy
featureSet = list(currFeatures)
featureSet.append(i)
print "Using feature(s) {",
if (len(featureSet)== 1):
print featureSet[0],
else:
print ','.join(str(i) for i in featureSet),
print"} accuracy is ",flagScores[currIndex],"%"
currIndex += 1
featureSet = list(currFeatures)
y = max(flagScores)
for i in range(len(flagsLeft)):
if (flagScores[i]== y):
featureSet.append(flagsLeft.pop(i))
break
print "\n"
if(y < bestAccuracy):
print "(Warning, Accuracy has decreased!",
print " Continuing search in case of local maxima)",
print "Feature set{",
if (len(featureSet)== 1):
print featureSet[0],
else:
print ','.join(str(i) for i in featureSet),
print "} was best, accuracy is ", y,"%\n"
return (featureSet, y)
def getBackwardFeatureSet(dataSet,possibleFlags,currFeatures, bestAccuracy):
accuracy = 0.0
#featureSet = currFeatures
flagsLeft = list(currFeatures)
#print "flags left: ", flagsLeft
flagScores = [0.0]* len(flagsLeft)
currIndex=0
for i in flagsLeft:
flags = [0]* (len(possibleFlags)+1)
for j in currFeatures:
flags[j] = 1
flags[i] = 0
accuracy = getAccuracy(dataSet,flags)
flagScores[currIndex] = accuracy
featureSet = list(currFeatures)
featureSet.remove(i)
print "Using feature(s) {",
if (len(featureSet)== 1):
print featureSet[0],
else:
print ','.join(str(i) for i in featureSet),
print"} accuracy is ",flagScores[currIndex],"%"
currIndex += 1
featureSet = list(currFeatures)
y = max(flagScores)
weakestLink = 0
for i in range(len(flagScores)):
if (flagScores[i]== y):
weakestLink = flagsLeft.pop(i)
featureSet.remove(weakestLink)
break
if(y < bestAccuracy):
print "\n(Warning, Accuracy has decreased!",
print " Continuing search in case of local maxima)",
print "\nFeature set{",
if (len(featureSet)== 1):
print featureSet[0],
else:
print ','.join(str(i) for i in featureSet),
print "} was best, removing",weakestLink," has the highest accuracy,", y,"%\n"
return (featureSet, y)
#in(dataSet,sortedScores, bestaccuracy), out:(bestFeatures,bestAccuracy)
def getOriginalFeatureSet(dataSet,sortedScores, bestAccuracy):
accuracy = 0.0
#currFeatures = []
flagsLeft = [i[1] for i in sortedScores]
currBestAccuracy = bestAccuracy
currBestFeature = [flagsLeft[-1]]
#print "flags left: ", flagsLeft
flagScores = [0.0]* len(flagsLeft)
currIndex=0
flags = [0]* (len(flagsLeft)+1)
for i in range(len(flagsLeft)):
flags[flagsLeft[len(flagsLeft)-i-1]] = 1
accuracy = getAccuracy(dataSet,flags)
flagScores[currIndex] = accuracy
featureSet = flagsLeft[(len(flagsLeft)-i-1):]
if (accuracy> currBestAccuracy):
currBestAccuracy = accuracy
currBestFeature = list(featureSet)
#featureSet.append(flagsLeft[i])
print "Using feature(s) {",
if (len(featureSet)== 1):
print featureSet[0],
else:
print ','.join(str(i) for i in featureSet),
print"} accuracy is ",flagScores[currIndex],"%"
currIndex += 1
y = max(flagScores)
for i in range(len(flagsLeft)):
if (flagScores[i]== y):
featureSet =flagsLeft[len(flagsLeft)-i-1:]
break
print "\nFeature set{",
if (len(featureSet)== 1):
print featureSet[0],
else:
print ','.join(str(i) for i in featureSet),
print "} was best, accuracy is ", y,"%\n"
return (currBestFeature, currBestAccuracy)
def forward(fileName):
dataSet = mkDataSet(fileName)
instances = len(dataSet)
features = len(dataSet[0])-1
print "This dataset has ",features," features (not including the class attribute), with "\
,instances," instances.\n"
print "Please wait while I normalize the data...",
#call normalize
dataSet = normalize(dataSet)
print "Done!"
flags = [0,1,1,1,1,1,1,1,1,1,1]
accuracy = getAccuracy(dataSet,flags)
print "Running nearest neighbor with all "\
,features," features, using \"leaving-one-out\" evaluation, I get an accuracy of "\
,accuracy,"%\n"
print "Beginning search.\n"
posFlags = [i for i in range(1,features+1)]
featureSet = []
bestFeatureSet = []
bestAccuracy = 0.0
for i in range(features):
retValue = getFeatureSet(dataSet,posFlags,featureSet,bestAccuracy)
featureSet =retValue[0]
accuracy = retValue[1]
if (accuracy > bestAccuracy):
bestAccuracy = accuracy
bestFeatureSet = list(featureSet)
print "Finished search!! The best feature subset is {",
print ','.join(str(i) for i in bestFeatureSet),
print "},which has an accuracy of ",bestAccuracy,"%"
def backward(fileName):
dataSet = mkDataSet(fileName)
instances = len(dataSet)
features = len(dataSet[0])-1
print "This dataset has ",features," features (not including the class attribute), with "\
,instances," instances.\n"
print "Please wait while I normalize the data...",
#call normalize
dataSet = normalize(dataSet)
print "Done!"
flags = [0,1,1,1,1,1,1,1,1,1,1]
accuracy = getAccuracy(dataSet,flags)
print "Running nearest neighbor with all "\
,features," features, using \"leaving-one-out\" evaluation, I get an accuracy of "\
,accuracy,"%\n"
print "Beginning search.\n"
posFlags = [i for i in range(1,features+1)]
featureSet = [i for i in range(1,features+1)]
bestFeatureSet = [i for i in range(1,features+1)]
bestAccuracy = 0.0
for i in range(features-1):
retValue = getBackwardFeatureSet(dataSet,posFlags,featureSet,bestAccuracy)
featureSet =retValue[0]
accuracy = retValue[1]
if (accuracy > bestAccuracy):
bestAccuracy = accuracy
bestFeatureSet = list(featureSet)
print "Finished search!! The best feature subset is {",
print ','.join(str(i) for i in bestFeatureSet),
print "},which has an accuracy of ",bestAccuracy,"%"
def original(fileName):
dataSet = mkDataSet(fileName)
instances = len(dataSet)
features = len(dataSet[0])-1
print "This dataset has ",features," features (not including the class attribute), with "\
,instances," instances.\n"
print "Please wait while I normalize the data...",
#call normalize
dataSet = normalize(dataSet)
print "Done!"
flags = [0,1,1,1,1,1,1,1,1,1,1]
accuracy = getAccuracy(dataSet,flags)
print "Running nearest neighbor with all "\
,features," features, using \"leaving-one-out\" evaluation, I get an accuracy of "\
,accuracy,"%\n"
print "Beginning search.\n"
posFlags = [i for i in range(1,features+1)]
featureSet = []
bestFeatureSet = []
bestAccuracy = 0.0
#runn feature evaluation for single features
accuracy = 0.0
currFeatures = []
flagsLeft = [i for i in posFlags if i not in currFeatures]
#print "flags left: ", flagsLeft
flagScores = [0.0]* len(flagsLeft)
currIndex=0
for i in flagsLeft:
flags = [0]* (len(posFlags)+1)
flags[i] = 1
for j in currFeatures:
flags[j] = 1
accuracy = getAccuracy(dataSet,flags)
flagScores[currIndex] = accuracy
featureSet = list(currFeatures)
featureSet.append(i)
print "Using feature(s) {",
if (len(featureSet)== 1):
print featureSet[0],
else:
print ','.join(str(i) for i in featureSet),
print"} accuracy is ",flagScores[currIndex],"%"
currIndex += 1
featureSet = list(currFeatures)
y = max(flagScores)
for i in range(len(flagsLeft)):
if (flagScores[i]== y):
featureSet.append(flagsLeft[i])
break
sortedScores=[(0.00,0)]*(len(flagScores))
for i in range(len(flagScores)):
sortedScores[i]= (flagScores[i],i+1)
sortedScores = sorted(sortedScores)
print "\nFeature set{",
if (len(featureSet)== 1):
print featureSet[0],
else:
print ','.join(str(i) for i in featureSet),
print "} was best, accuracy is ", y,"%\n"
accuracy = y
if (accuracy > bestAccuracy):
bestAccuracy = accuracy
bestFeatureSet = list(featureSet)
retValue = getOriginalFeatureSet(dataSet,sortedScores,bestAccuracy)
bestFeatureSet =retValue[0]
bestAccuracy = retValue[1]
print "Finished search!! The best feature subset is {",
print ','.join(str(i) for i in bestFeatureSet),
print "},which has an accuracy of ",bestAccuracy,"%"
# Main
fileName = ""
algorithm = 0
print "Welcome to Michael Uy\'s Feature Selection Algorithm"
while(1):
try:
fileName = raw_input('Type in the name of the file to test: ')
open(fileName)
except EnvironmentError :
print "Error: Cannot find ",fileName,"."
continue
else:
break
print "Type in the number of the algorithm you want to test: "
print "1) Forward Selection"
print "2) Backward Elimination"
print "3) Original Algorithm"
algorithm = input()
if (algorithm == 1):
forward(fileName)
elif (algorithm == 2):
backward(fileName)
elif (algorithm == 3):
original(fileName)
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class ServersOperations(object):
"""ServersOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: The API version to use for the request. Constant value: "2017-12-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-12-01"
self.config = config
def _create_initial(
self, resource_group_name, server_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.create.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'ServerForCreate')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 201, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Server', response)
if response.status_code == 201:
deserialized = self._deserialize('Server', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create(
self, resource_group_name, server_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Creates a new server, or will overwrite an existing server.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param parameters: The required parameters for creating or updating a
server.
:type parameters: ~azure.mgmt.rdbms.postgresql.models.ServerForCreate
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns Server or
ClientRawResponse<Server> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.rdbms.postgresql.models.Server]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.rdbms.postgresql.models.Server]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_initial(
resource_group_name=resource_group_name,
server_name=server_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('Server', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/servers/{serverName}'}
def _update_initial(
self, resource_group_name, server_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.update.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'ServerUpdateParameters')
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Server', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update(
self, resource_group_name, server_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Updates an existing server. The request body can contain one to many of
the properties present in the normal server definition.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param parameters: The required parameters for updating a server.
:type parameters:
~azure.mgmt.rdbms.postgresql.models.ServerUpdateParameters
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns Server or
ClientRawResponse<Server> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.rdbms.postgresql.models.Server]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.rdbms.postgresql.models.Server]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._update_initial(
resource_group_name=resource_group_name,
server_name=server_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('Server', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/servers/{serverName}'}
def _delete_initial(
self, resource_group_name, server_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, server_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Deletes a server.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
server_name=server_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/servers/{serverName}'}
def get(
self, resource_group_name, server_name, custom_headers=None, raw=False, **operation_config):
"""Gets information about a server.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Server or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.rdbms.postgresql.models.Server or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Server', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/servers/{serverName}'}
def list_by_resource_group(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""List all the servers in a given resource group.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Server
:rtype:
~azure.mgmt.rdbms.postgresql.models.ServerPaged[~azure.mgmt.rdbms.postgresql.models.Server]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.ServerPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.ServerPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/servers'}
def list(
self, custom_headers=None, raw=False, **operation_config):
"""List all the servers in a given subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Server
:rtype:
~azure.mgmt.rdbms.postgresql.models.ServerPaged[~azure.mgmt.rdbms.postgresql.models.Server]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.ServerPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.ServerPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DBforPostgreSQL/servers'}
| |
# encoding: utf-8
# ---------------------------------------------------------------------------
# Copyright (C) 2008-2014, IPython Development Team and Enthought, Inc.
# Distributed under the terms of the BSD License. See COPYING.rst.
# ---------------------------------------------------------------------------
"""
Define a simple format for saving LocalArrays to disk with full information
about them. This format, ``.dnpy``, draws heavily from the ``.npy`` format
specification from NumPy and from the data structure defined in the Distributed
Array Protocol.
Version numbering
-----------------
The version numbering of this format is independent of DistArray's and
the Distributed Array Protocol's version numberings.
Format Version 1.0
------------------
The first 6 bytes are a magic string: exactly ``\\x93DARRY``.
The next 1 byte is an unsigned byte: the major version number of the file
format, e.g. ``\\x01``.
The next 1 byte is an unsigned byte: the minor version number of the file
format, e.g. ``\\x00``. Note: the version of the file format is not tied
to the version of the DistArray package.
The next 2 bytes form a little-endian unsigned short int: the length of
the header data HEADER_LEN.
The next HEADER_LEN bytes form the header data describing the
distribution of this chunk of the LocalArray. It is an ASCII string
which contains a Python literal expression of a dictionary. It is
terminated by a newline (``\\n``) and padded with spaces (``\\x20``) to
make the total length of ``magic string + 4 + HEADER_LEN`` be evenly
divisible by 16 for alignment purposes.
The dictionary contains two keys, both described in the Distributed
Array Protocol:
"__version__" : str
Version of the Distributed Array Protocol used in this header.
"dim_data" : tuple of dict
One dictionary per array dimension; see the Distributed Array
Protocol for the details of this data structure.
For repeatability and readability, the dictionary keys are sorted in
alphabetic order. This is for convenience only. A writer SHOULD implement
this if possible. A reader MUST NOT depend on this.
Following this header is the output of ``numpy.save`` for the underlying
data buffer. This contains the full output of ``save``, beginning with
the magic number for ``.npy`` files, followed by the ``.npy`` header and
array data.
The ``.npy`` format, including reasons for creating it and a comparison
of alternatives, is described fully in the "npy-format" NEP and in the
module docstring for ``numpy.lib.format``.
"""
import io
from distarray.externals import six
import numpy as np
from numpy.lib.format import write_array_header_1_0
from numpy.lib.utils import safe_eval
from numpy.compat import asbytes
from distarray.utils import _raise_nie
MAGIC_PREFIX = asbytes('\x93DARRY')
MAGIC_LEN = len(MAGIC_PREFIX) + 2
# This is only copied from numpy/lib/format.py because the numpy version
# doesn't allow one to set the MAGIC_PREFIX
def magic(major, minor, prefix=MAGIC_PREFIX):
"""Return the magic string for the given file format version.
Parameters
----------
major : int in [0, 255]
minor : int in [0, 255]
Returns
-------
magic : str
Raises
------
ValueError
if the version cannot be formatted.
"""
if major < 0 or major > 255:
raise ValueError("Major version must be 0 <= major < 256.")
if minor < 0 or minor > 255:
raise ValueError("Minor version must be 0 <= minor < 256.")
if six.PY2:
return prefix + chr(major) + chr(minor)
elif six.PY3:
return prefix + bytes([major, minor])
else:
raise _raise_nie()
def write_localarray(fp, arr, version=(1, 0)):
"""
Write a LocalArray to a .dnpy file, including a header.
The ``__version__`` and ``dim_data`` keys from the Distributed Array
Protocol are written to a header, then ``numpy.save`` is used to write the
value of the ``buffer`` key.
Parameters
----------
fp : file_like object
An open, writable file object, or similar object with a ``.write()``
method.
arr : LocalArray
The array to write to disk.
version : (int, int), optional
The version number of the file format. Default: (1, 0)
Raises
------
ValueError
If the array cannot be persisted.
Various other errors
If the underlying numpy array contains Python objects as part of its
dtype, the process of pickling them may raise various errors if the
objects are not picklable.
"""
if version != (1, 0):
msg = "Only version (1, 0) is supported, not %s."
raise ValueError(msg % (version,))
fp.write(magic(*version))
distbuffer = arr.__distarray__()
metadata = {'__version__': distbuffer['__version__'],
'dim_data': distbuffer['dim_data'],
}
write_array_header_1_0(fp, metadata)
np.save(fp, distbuffer['buffer'])
def read_magic(fp):
"""Read the magic string to get the version of the file format.
Parameters
----------
fp : filelike object
Returns
-------
major : int
minor : int
"""
magic_str = _read_bytes(fp, MAGIC_LEN, "magic string")
if magic_str[:-2] != MAGIC_PREFIX:
msg = "the magic string is not correct; expected %r, got %r"
raise ValueError(msg % (MAGIC_PREFIX, magic_str[:-2]))
if six.PY2:
major, minor = map(ord, magic_str[-2:])
elif six.PY3:
major, minor = magic_str[-2:]
else:
raise _raise_nie()
return major, minor
def read_array_header_1_0(fp):
"""
Read an array header from a filelike object using the 1.0 file format
version.
This will leave the file object located just after the header.
Parameters
----------
fp : filelike object
A file object or something with a `.read()` method like a file.
Returns
-------
__version__ : str
Version of the Distributed Array Protocol used.
dim_data : tuple
A tuple containing a dictionary for each dimension of the underlying
array, as described in the Distributed Array Protocol.
Raises
------
ValueError
If the data is invalid.
"""
# Read an unsigned, little-endian short int which has the length of the
# header.
import struct
hlength_str = _read_bytes(fp, 2, "Array header length")
header_length = struct.unpack('<H', hlength_str)[0]
header = _read_bytes(fp, header_length, "Array header")
# The header is a pretty-printed string representation of a literal Python
# dictionary with trailing newlines padded to a 16-byte boundary. The keys
# are strings.
try:
d = safe_eval(header)
except SyntaxError as e:
msg = "Cannot parse header: %r\nException: %r"
raise ValueError(msg % (header, e))
if not isinstance(d, dict):
msg = "Header is not a dictionary: %r"
raise ValueError(msg % d)
keys = sorted(d.keys())
if keys != ['__version__', 'dim_data']:
msg = "Header does not contain the correct keys: %r"
raise ValueError(msg % (keys,))
# TODO: Sanity check with the DAP validator
return d['__version__'], d['dim_data']
def read_localarray(fp):
"""
Read a LocalArray from an .dnpy file.
Parameters
----------
fp : file_like object
If this is not a real file object, then this may take extra memory
and time.
Returns
-------
distbuffer : dict
The Distributed Array Protocol structure created from the data on disk.
Raises
------
ValueError
If the data is invalid.
"""
version = read_magic(fp)
if version != (1, 0):
msg = "only support version (1,0) of file format, not %r"
raise ValueError(msg % (version,))
__version__, dim_data = read_array_header_1_0(fp)
buf = np.load(fp)
distbuffer = {
'__version__': __version__,
'dim_data': dim_data,
'buffer': buf,
}
return distbuffer
# This is only copied from numpy/lib/format.py because importing it doesn't
# work
def _read_bytes(fp, size, error_template="ran out of data"):
"""
Read from file-like object until size bytes are read.
Raises ValueError if not EOF is encountered before size bytes are read.
Non-blocking objects only supported if they derive from io objects.
Required as e.g. ZipExtFile in python 2.6 can return less data than
requested.
"""
data = bytes()
while True:
# io files (default in python3) return None or raise on would-block,
# python2 file will truncate, probably nothing can be done about that.
# note that regular files can't be non-blocking
try:
r = fp.read(size - len(data))
data += r
if len(r) == 0 or len(data) == size:
break
except io.BlockingIOError:
pass
if len(data) != size:
msg = "EOF: reading %s, expected %d bytes got %d"
raise ValueError(msg % (error_template, size, len(data)))
else:
return data
| |
"""
SQL functions reference lists:
https://web.archive.org/web/20130407175746/http://www.gaia-gis.it/gaia-sins/spatialite-sql-4.0.0.html
http://www.gaia-gis.it/gaia-sins/spatialite-sql-4.2.1.html
"""
import re
from django.contrib.gis.db.backends.base.operations import \
BaseSpatialOperations
from django.contrib.gis.db.backends.spatialite.adapter import SpatiaLiteAdapter
from django.contrib.gis.db.backends.utils import SpatialOperator
from django.contrib.gis.db.models import aggregates
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.measure import Distance
from django.core.exceptions import ImproperlyConfigured
from django.db.backends.sqlite3.operations import DatabaseOperations
from django.utils.functional import cached_property
class SpatiaLiteDistanceOperator(SpatialOperator):
def as_sql(self, connection, lookup, template_params, sql_params):
if lookup.lhs.output_field.geodetic(connection):
# SpatiaLite returns NULL instead of zero on geodetic coordinates
sql_template = 'COALESCE(%(func)s(%(lhs)s, %(rhs)s, %%s), 0) %(op)s %(value)s'
template_params.update({
'op': self.op,
'func': connection.ops.spatial_function_name('Distance'),
})
sql_params.insert(1, len(lookup.rhs) == 3 and lookup.rhs[-1] == 'spheroid')
return sql_template % template_params, sql_params
return super().as_sql(connection, lookup, template_params, sql_params)
class SpatiaLiteOperations(BaseSpatialOperations, DatabaseOperations):
name = 'spatialite'
spatialite = True
version_regex = re.compile(r'^(?P<major>\d)\.(?P<minor1>\d)\.(?P<minor2>\d+)')
Adapter = SpatiaLiteAdapter
area = 'Area'
centroid = 'Centroid'
collect = 'Collect'
contained = 'MbrWithin'
difference = 'Difference'
distance = 'Distance'
envelope = 'Envelope'
extent = 'Extent'
geojson = 'AsGeoJSON'
gml = 'AsGML'
intersection = 'Intersection'
kml = 'AsKML'
length = 'GLength' # OpenGis defines Length, but this conflicts with an SQLite reserved keyword
makeline = 'MakeLine'
num_geom = 'NumGeometries'
num_points = 'NumPoints'
point_on_surface = 'PointOnSurface'
scale = 'ScaleCoords'
svg = 'AsSVG'
sym_difference = 'SymDifference'
transform = 'Transform'
translate = 'ShiftCoords'
union = 'GUnion' # OpenGis defines Union, but this conflicts with an SQLite reserved keyword
unionagg = 'GUnion'
from_text = 'GeomFromText'
from_wkb = 'GeomFromWKB'
select = 'AsText(%s)'
gis_operators = {
# Unary predicates
'isvalid': SpatialOperator(func='IsValid'),
# Binary predicates
'equals': SpatialOperator(func='Equals'),
'disjoint': SpatialOperator(func='Disjoint'),
'touches': SpatialOperator(func='Touches'),
'crosses': SpatialOperator(func='Crosses'),
'within': SpatialOperator(func='Within'),
'overlaps': SpatialOperator(func='Overlaps'),
'contains': SpatialOperator(func='Contains'),
'intersects': SpatialOperator(func='Intersects'),
'relate': SpatialOperator(func='Relate'),
# Returns true if B's bounding box completely contains A's bounding box.
'contained': SpatialOperator(func='MbrWithin'),
# Returns true if A's bounding box completely contains B's bounding box.
'bbcontains': SpatialOperator(func='MbrContains'),
# Returns true if A's bounding box overlaps B's bounding box.
'bboverlaps': SpatialOperator(func='MbrOverlaps'),
# These are implemented here as synonyms for Equals
'same_as': SpatialOperator(func='Equals'),
'exact': SpatialOperator(func='Equals'),
# Distance predicates
'dwithin': SpatialOperator(func='PtDistWithin'),
'distance_gt': SpatiaLiteDistanceOperator(func='Distance', op='>'),
'distance_gte': SpatiaLiteDistanceOperator(func='Distance', op='>='),
'distance_lt': SpatiaLiteDistanceOperator(func='Distance', op='<'),
'distance_lte': SpatiaLiteDistanceOperator(func='Distance', op='<='),
}
disallowed_aggregates = (aggregates.Extent3D,)
@cached_property
def function_names(self):
return {
'Length': 'ST_Length',
'Reverse': 'ST_Reverse',
'Scale': 'ScaleCoords',
'Translate': 'ST_Translate',
'Union': 'ST_Union',
}
@cached_property
def unsupported_functions(self):
unsupported = {'BoundingCircle', 'ForceRHR', 'MemSize'}
if not self.lwgeom_version():
unsupported |= {'GeoHash', 'IsValid', 'MakeValid'}
return unsupported
@cached_property
def spatial_version(self):
"""Determine the version of the SpatiaLite library."""
try:
version = self.spatialite_version_tuple()[1:]
except Exception as exc:
raise ImproperlyConfigured(
'Cannot determine the SpatiaLite version for the "%s" database. '
'Was the SpatiaLite initialization SQL loaded on this database?' % (
self.connection.settings_dict['NAME'],
)
) from exc
if version < (4, 0, 0):
raise ImproperlyConfigured('GeoDjango only supports SpatiaLite versions 4.0.0 and above.')
return version
def convert_extent(self, box, srid):
"""
Convert the polygon data received from SpatiaLite to min/max values.
"""
if box is None:
return None
shell = Geometry(box, srid).shell
xmin, ymin = shell[0][:2]
xmax, ymax = shell[2][:2]
return (xmin, ymin, xmax, ymax)
def geo_db_type(self, f):
"""
Return None because geometry columns are added via the
`AddGeometryColumn` stored procedure on SpatiaLite.
"""
return None
def get_distance(self, f, value, lookup_type):
"""
Return the distance parameters for the given geometry field,
lookup value, and lookup type.
"""
if not value:
return []
value = value[0]
if isinstance(value, Distance):
if f.geodetic(self.connection):
if lookup_type == 'dwithin':
raise ValueError(
'Only numeric values of degree units are allowed on '
'geographic DWithin queries.'
)
dist_param = value.m
else:
dist_param = getattr(value, Distance.unit_attname(f.units_name(self.connection)))
else:
dist_param = value
return [dist_param]
def get_geom_placeholder(self, f, value, compiler):
"""
Provide a proper substitution value for Geometries that are not in the
SRID of the field. Specifically, this routine will substitute in the
Transform() and GeomFromText() function call(s).
"""
def transform_value(value, srid):
return not (value is None or value.srid == srid)
if hasattr(value, 'as_sql'):
if transform_value(value, f.srid):
placeholder = '%s(%%s, %s)' % (self.transform, f.srid)
else:
placeholder = '%s'
# No geometry value used for F expression, substitute in
# the column name instead.
sql, _ = compiler.compile(value)
return placeholder % sql
else:
if transform_value(value, f.srid):
# Adding Transform() to the SQL placeholder.
return '%s(%s(%%s,%s), %s)' % (self.transform, self.from_text, value.srid, f.srid)
else:
return '%s(%%s,%s)' % (self.from_text, f.srid)
def _get_spatialite_func(self, func):
"""
Helper routine for calling SpatiaLite functions and returning
their result.
Any error occurring in this method should be handled by the caller.
"""
cursor = self.connection._cursor()
try:
cursor.execute('SELECT %s' % func)
row = cursor.fetchone()
finally:
cursor.close()
return row[0]
def geos_version(self):
"Return the version of GEOS used by SpatiaLite as a string."
return self._get_spatialite_func('geos_version()')
def proj4_version(self):
"Return the version of the PROJ.4 library used by SpatiaLite."
return self._get_spatialite_func('proj4_version()')
def lwgeom_version(self):
"""Return the version of LWGEOM library used by SpatiaLite."""
return self._get_spatialite_func('lwgeom_version()')
def spatialite_version(self):
"Return the SpatiaLite library version as a string."
return self._get_spatialite_func('spatialite_version()')
def spatialite_version_tuple(self):
"""
Return the SpatiaLite version as a tuple (version string, major,
minor, subminor).
"""
version = self.spatialite_version()
m = self.version_regex.match(version)
if m:
major = int(m.group('major'))
minor1 = int(m.group('minor1'))
minor2 = int(m.group('minor2'))
else:
raise Exception('Could not parse SpatiaLite version string: %s' % version)
return (version, major, minor1, minor2)
def spatial_aggregate_name(self, agg_name):
"""
Return the spatial aggregate SQL template and function for the
given Aggregate instance.
"""
agg_name = 'unionagg' if agg_name.lower() == 'union' else agg_name.lower()
return getattr(self, agg_name)
# Routines for getting the OGC-compliant models.
def geometry_columns(self):
from django.contrib.gis.db.backends.spatialite.models import SpatialiteGeometryColumns
return SpatialiteGeometryColumns
def spatial_ref_sys(self):
from django.contrib.gis.db.backends.spatialite.models import SpatialiteSpatialRefSys
return SpatialiteSpatialRefSys
def get_db_converters(self, expression):
converters = super().get_db_converters(expression)
if hasattr(expression.output_field, 'geom_type'):
converters.append(self.convert_geometry)
return converters
def convert_geometry(self, value, expression, connection, context):
if value:
value = Geometry(value)
if 'transformed_srid' in context:
value.srid = context['transformed_srid']
return value
| |
# Copyright 2017 the authors.
# This file is part of Hy, which is free software licensed under the Expat
# license. See the LICENSE.
from hy.compiler import hy_compile, HyTypeError
from hy.models import HyObject, replace_hy_obj
from hy.lex import tokenize, LexException
from hy.errors import HyIOError
from io import open
import re
import marshal
import struct
import imp
import sys
import ast
import os
import __future__
from hy._compat import PY3, PY34, MAGIC, builtins, long_type, wr_long
from hy._compat import string_types
def ast_compile(ast, filename, mode):
"""Compile AST.
Like Python's compile, but with some special flags."""
flags = (__future__.CO_FUTURE_DIVISION |
__future__.CO_FUTURE_PRINT_FUNCTION)
return compile(ast, filename, mode, flags)
def import_buffer_to_hst(buf):
"""Import content from buf and return a Hy AST."""
return tokenize(buf + "\n")
def import_file_to_hst(fpath):
"""Import content from fpath and return a Hy AST."""
try:
with open(fpath, 'r', encoding='utf-8') as f:
return import_buffer_to_hst(f.read())
except IOError as e:
raise HyIOError(e.errno, e.strerror, e.filename)
def import_buffer_to_ast(buf, module_name):
""" Import content from buf and return a Python AST."""
return hy_compile(import_buffer_to_hst(buf), module_name)
def import_file_to_ast(fpath, module_name):
"""Import content from fpath and return a Python AST."""
return hy_compile(import_file_to_hst(fpath), module_name)
def import_file_to_module(module_name, fpath, loader=None):
"""Import Hy source from fpath and put it into a Python module.
If there's an up-to-date byte-compiled version of this module, load that
instead. Otherwise, byte-compile the module once we're done loading it, if
we can.
Return the module."""
module = None
bytecode_path = get_bytecode_path(fpath)
try:
source_mtime = int(os.stat(fpath).st_mtime)
with open(bytecode_path, 'rb') as bc_f:
# The first 4 bytes are the magic number for the version of Python
# that compiled this bytecode.
bytecode_magic = bc_f.read(4)
# The next 4 bytes, interpreted as a little-endian 32-bit integer,
# are the mtime of the corresponding source file.
bytecode_mtime, = struct.unpack('<i', bc_f.read(4))
except (IOError, OSError):
pass
else:
if bytecode_magic == MAGIC and bytecode_mtime >= source_mtime:
# It's a cache hit. Load the byte-compiled version.
if PY3:
# As of Python 3.6, imp.load_compiled still exists, but it's
# deprecated. So let's use SourcelessFileLoader instead.
from importlib.machinery import SourcelessFileLoader
module = (SourcelessFileLoader(module_name, bytecode_path).
load_module(module_name))
else:
module = imp.load_compiled(module_name, bytecode_path)
if not module:
# It's a cache miss, so load from source.
sys.modules[module_name] = None
try:
_ast = import_file_to_ast(fpath, module_name)
module = imp.new_module(module_name)
module.__file__ = fpath
code = ast_compile(_ast, fpath, "exec")
try:
write_code_as_pyc(fpath, code)
except (IOError, OSError):
# We failed to save the bytecode, probably because of a
# permissions issue. The user only asked to import the
# file, so don't bug them about it.
pass
eval(code, module.__dict__)
except (HyTypeError, LexException) as e:
if e.source is None:
with open(fpath, 'rt') as fp:
e.source = fp.read()
e.filename = fpath
raise
except Exception:
sys.modules.pop(module_name, None)
raise
sys.modules[module_name] = module
module.__name__ = module_name
module.__file__ = fpath
if loader:
module.__loader__ = loader
if is_package(module_name):
module.__path__ = []
module.__package__ = module_name
else:
module.__package__ = module_name.rpartition('.')[0]
return module
def import_buffer_to_module(module_name, buf):
try:
_ast = import_buffer_to_ast(buf, module_name)
mod = imp.new_module(module_name)
eval(ast_compile(_ast, "", "exec"), mod.__dict__)
except (HyTypeError, LexException) as e:
if e.source is None:
e.source = buf
e.filename = '<stdin>'
raise
return mod
def hy_eval(hytree, namespace, module_name, ast_callback=None):
foo = HyObject()
foo.start_line = 0
foo.end_line = 0
foo.start_column = 0
foo.end_column = 0
replace_hy_obj(hytree, foo)
if not isinstance(module_name, string_types):
raise HyTypeError(foo, "Module name must be a string")
_ast, expr = hy_compile(hytree, module_name, get_expr=True)
# Spoof the positions in the generated ast...
for node in ast.walk(_ast):
node.lineno = 1
node.col_offset = 1
for node in ast.walk(expr):
node.lineno = 1
node.col_offset = 1
if ast_callback:
ast_callback(_ast, expr)
if not isinstance(namespace, dict):
raise HyTypeError(foo, "Globals must be a dictionary")
# Two-step eval: eval() the body of the exec call
eval(ast_compile(_ast, "<eval_body>", "exec"), namespace)
# Then eval the expression context and return that
return eval(ast_compile(expr, "<eval>", "eval"), namespace)
def write_hy_as_pyc(fname):
_ast = import_file_to_ast(fname,
os.path.basename(os.path.splitext(fname)[0]))
code = ast_compile(_ast, fname, "exec")
write_code_as_pyc(fname, code)
def write_code_as_pyc(fname, code):
st = os.stat(fname)
timestamp = long_type(st.st_mtime)
cfile = get_bytecode_path(fname)
try:
os.makedirs(os.path.dirname(cfile))
except (IOError, OSError):
pass
with builtins.open(cfile, 'wb') as fc:
fc.write(MAGIC)
wr_long(fc, timestamp)
if PY3:
wr_long(fc, st.st_size)
marshal.dump(code, fc)
class MetaLoader(object):
def __init__(self, path):
self.path = path
def load_module(self, fullname):
if fullname in sys.modules:
return sys.modules[fullname]
if not self.path:
return
return import_file_to_module(fullname, self.path, self)
class MetaImporter(object):
def find_on_path(self, fullname):
fls = ["%s/__init__.hy", "%s.hy"]
dirpath = "/".join(fullname.split("."))
for pth in sys.path:
pth = os.path.abspath(pth)
for fp in fls:
composed_path = fp % ("%s/%s" % (pth, dirpath))
if os.path.exists(composed_path):
return composed_path
def find_module(self, fullname, path=None):
path = self.find_on_path(fullname)
if path:
return MetaLoader(path)
sys.meta_path.insert(0, MetaImporter())
sys.path.insert(0, "")
def is_package(module_name):
mpath = os.path.join(*module_name.split("."))
for path in map(os.path.abspath, sys.path):
if os.path.exists(os.path.join(path, mpath, "__init__.hy")):
return True
return False
def get_bytecode_path(source_path):
if PY34:
import importlib.util
return importlib.util.cache_from_source(source_path)
elif hasattr(imp, "cache_from_source"):
return imp.cache_from_source(source_path)
else:
# If source_path has a file extension, replace it with ".pyc".
# Otherwise, just append ".pyc".
d, f = os.path.split(source_path)
return os.path.join(d, re.sub(r"(?:\.[^.]+)?\Z", ".pyc", f))
| |
# -*- coding: utf-8 -*-
"""
===============================================================================
Voronoi --Subclass of GenericGeometry for a standard Geometry created from a
Voronoi Diagram Used with Delaunay Network but could work for others (not tested)
===============================================================================
"""
import scipy as sp
import numpy as np
from OpenPNM.Geometry import models as gm
import OpenPNM.Utilities.vertexops as vo
from OpenPNM.Geometry import GenericGeometry
from OpenPNM.Base import logging
import matplotlib.pyplot as plt
from scipy.io import savemat
from OpenPNM.Utilities import topology
logger = logging.getLogger(__name__)
topo = topology()
class Voronoi(GenericGeometry):
r"""
Voronoi subclass of GenericGeometry.
Parameters
----------
name : string
A unique name for the network
fibre_rad: float
Fibre radius to apply to Voronoi edges when calculating pore and throat
sizes
voxel_vol : boolean
Determines whether to calculate pore volumes by creating a voxel image
or to use the offset vertices of the throats. Voxel method is slower
and may run into memory issues but is more accurate and allows manipulation
of the image. N.B. many of the class methods are dependent on the voxel
image.
"""
def __init__(self, fibre_rad=3e-06, voxel_vol=True, **kwargs):
super().__init__(**kwargs)
self._fibre_rad = fibre_rad
self._voxel_vol = voxel_vol
try:
self._vox_len = kwargs['vox_len']
except:
self._vox_len = 1e-6
self._generate()
def _generate(self):
# Set all the required models
self.models.add(propname='pore.vertices',
model=gm.pore_vertices.voronoi)
self.models.add(propname='throat.vertices',
model=gm.throat_vertices.voronoi)
self.models.add(propname='throat.normal',
model=gm.throat_normal.voronoi)
self.models.add(propname='throat.offset_vertices',
model=gm.throat_offset_vertices.distance_transform,
offset=self._fibre_rad,
set_dependent=True)
topo.trim_occluded_throats(network=self._net, mask=self.name)
if self._voxel_vol:
self.models.add(propname='pore.volume',
model=gm.pore_volume.in_hull_volume,
fibre_rad=self._fibre_rad,
vox_len=self._vox_len)
else:
self.models.add(propname='pore.volume',
model=gm.pore_volume.voronoi)
self.models.add(propname='throat.shape_factor',
model=gm.throat_shape_factor.compactness)
self.models.add(propname='pore.seed',
model=gm.pore_misc.random)
self.models.add(propname='throat.seed',
model=gm.throat_misc.neighbor,
pore_prop='pore.seed',
mode='min')
self.models.add(propname='pore.centroid',
model=gm.pore_centroid.voronoi)
self.models.add(propname='pore.diameter',
model=gm.pore_diameter.equivalent_sphere)
self.models.add(propname='pore.indiameter',
model=gm.pore_diameter.centroids)
self.models.add(propname='pore.area',
model=gm.pore_area.spherical)
self.models.add(propname='throat.diameter',
model=gm.throat_diameter.equivalent_circle)
self['throat.volume'] = 0.0
self['throat.length'] = self._fibre_rad*2
self.models.add(propname='throat.surface_area',
model=gm.throat_surface_area.extrusion)
self.models.add(propname='throat.c2c',
model=gm.throat_length.c2c)
def make_fibre_image(self, fibre_rad=None, vox_len=1e-6):
r"""
If the voronoi voxel method was implemented to calculate pore volumes
an image of the fibre space has already been calculated and stored on
the geometry. If not generate it
Parameters
----------
fibre_rad : float
Fibre radius to apply to Voronoi edges when calculating pore and throat
sizes
vox_len : float
Length of voxel edge when dividing domain
"""
if hasattr(self, '_fibre_image'):
logger.info('fibre image already created')
return
else:
if fibre_rad is None:
fibre_rad = self._fibre_rad
fibre_rad /= vox_len
self._fibre_image = gm.pore_volume._get_fibre_image(self._net,
self.pores(),
vox_len,
fibre_rad)
def _export_fibre_image(self, mat_file='OpenPNMFibres'):
r"""
If the voronoi voxel method was implemented to calculate pore volumes
an image of the fibre space has already been calculated and stored on
the geometry.
Parameters
----------
mat_file : string
Filename of Matlab file to save fibre image
"""
if hasattr(self, '_fibre_image') is False:
logger.warning('This method only works when a fibre image exists, ' +
'please run make_fibre_image')
return
matlab_dict = {"fibres": self._fibre_image}
savemat(mat_file, matlab_dict, format='5', long_field_names=True)
def _get_fibre_slice(self, plane=None, index=None):
r"""
Plot an image of a slice through the fibre image
plane contains percentage values of the length of the image in each axis
Parameters
----------
plane : array_like
List of 3 values, [x,y,z], 2 must be zero and the other must be between
zero and one representing the fraction of the domain to slice along
the non-zero axis
index : array_like
similar to plane but instead of the fraction an index of the image is used
"""
if hasattr(self, '_fibre_image') is False:
logger.warning('This method only works when a fibre image exists, ' +
'please run make_fibre_image')
return None
if plane is None and index is None:
logger.warning('Please provide either a plane array or index array')
return None
if self._fibre_image is None:
self.make_fibre_image()
if plane is not None:
if 'array' not in plane.__class__.__name__:
plane = sp.asarray(plane)
if sp.sum(plane == 0) != 2:
logger.warning('Plane argument must have two zero valued ' +
'elements to produce a planar slice')
return None
l = sp.asarray(sp.shape(self._fibre_image))
s = sp.around(plane*l).astype(int)
elif index is not None:
if 'array' not in index.__class__.__name__:
index = sp.asarray(index)
if sp.sum(index == 0) != 2:
logger.warning('Index argument must have two zero valued ' +
'elements to produce a planar slice')
return None
if 'int' not in str(index.dtype):
index = sp.around(index).astype(int)
s = index
if s[0] != 0:
slice_image = self._fibre_image[s[0], :, :]
elif s[1] != 0:
slice_image = self._fibre_image[:, s[1], :]
else:
slice_image = self._fibre_image[:, :, s[2]]
return slice_image
def plot_fibre_slice(self, plane=None, index=None, fig=None):
r"""
Plot one slice from the fibre image
Parameters
----------
plane : array_like
List of 3 values, [x,y,z], 2 must be zero and the other must be between
zero and one representing the fraction of the domain to slice along
the non-zero axis
index : array_like
similar to plane but instead of the fraction an index of the image is used
"""
if hasattr(self, '_fibre_image') is False:
logger.warning('This method only works when a fibre image exists, ' +
'please run make_fibre_image')
return
slice_image = self._get_fibre_slice(plane, index)
if slice_image is not None:
if fig is None:
plt.figure()
plt.imshow(slice_image.T, cmap='Greys', origin='lower',
interpolation='nearest')
return fig
def plot_porosity_profile(self, fig=None):
r"""
Return a porosity profile in all orthogonal directions by summing
the voxel volumes in consectutive slices.
"""
if hasattr(self, '_fibre_image') is False:
logger.warning('This method only works when a fibre image exists, ' +
'please run make_fibre_image')
return
if self._fibre_image is None:
self.make_fibre_image()
l = sp.asarray(sp.shape(self._fibre_image))
px = sp.zeros(l[0])
py = sp.zeros(l[1])
pz = sp.zeros(l[2])
for x in sp.arange(l[0]):
px[x] = sp.sum(self._fibre_image[x, :, :])
px[x] /= sp.size(self._fibre_image[x, :, :])
for y in sp.arange(l[1]):
py[y] = sp.sum(self._fibre_image[:, y, :])
py[y] /= sp.size(self._fibre_image[:, y, :])
for z in sp.arange(l[2]):
pz[z] = sp.sum(self._fibre_image[:, :, z])
pz[z] /= sp.size(self._fibre_image[:, :, z])
if fig is None:
fig = plt.figure()
ax = fig.gca()
plots = []
plots.append(plt.plot(sp.arange(l[0])/l[0], px, 'r', label='x'))
plots.append(plt.plot(sp.arange(l[1])/l[1], py, 'g', label='y'))
plots.append(plt.plot(sp.arange(l[2])/l[2], pz, 'b', label='z'))
plt.xlabel('Normalized Distance')
plt.ylabel('Porosity')
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles, labels, loc=1)
plt.legend(bbox_to_anchor=(1, 1), loc=2, borderaxespad=0.)
return fig
def compress_geometry(self, factor=None, preserve_fibres=False):
r"""
Adjust the vertices and recalculate geometry. Save fibre voxels before
and after then put them back into the image to preserve fibre volume.
Shape will not be conserved. Also make adjustments to the pore and throat
properties given an approximate volume change from adding the missing fibre
voxels back in
Parameters
----------
factor : array_like
List of 3 values, [x,y,z], 2 must be one and the other must be between
zero and one representing the fraction of the domain height to compress
to.
preserve_fibres : boolean
If the fibre image has been generated and used to calculate pore volumes
then preserve fibre volume artificially by adjusting pore and throat sizes
"""
if factor is None:
logger.warning('Please supply a compression factor in the form ' +
'[1,1,CR], with CR < 1')
return
if sp.any(sp.asarray(factor) > 1):
logger.warning('The supplied compression factor is greater than 1, ' +
'the method is not tested for stretching')
return
# uncompressed number of fibre voxels in each pore
fvu = self["pore.fibre_voxels"]
r1 = self["pore.diameter"]/2
# Most likely boundary pores - prevents divide by zero (vol change zero)
r1[r1 == 0] = 1
vo.scale(network=self._net, scale_factor=factor, preserve_vol=False)
self.models.regenerate()
if preserve_fibres and self._voxel_vol:
# compressed number of fibre voxels in each pore
fvc = self["pore.fibre_voxels"]
# amount to adjust pore volumes by
# (based on 1 micron cubed voxels for volume calc)
vol_diff = (fvu-fvc)*1e-18
# don't account for positive volume changes
vol_diff[vol_diff < 0] = 0
pv1 = self["pore.volume"].copy()
self["pore.volume"] -= vol_diff
self["pore.volume"][self["pore.volume"] < 0.0] = 0.0
pv2 = self["pore.volume"].copy()
"Now need to adjust the pore diameters"
from scipy.special import cbrt
rdiff = cbrt(3*np.abs(vol_diff)/(4*sp.pi))
self["pore.diameter"] -= 2*rdiff*sp.sign(vol_diff)
"Now as a crude approximation adjust all the throat areas and diameters"
"by the same ratio as the increase in a spherical pore surface area"
spd = np.ones(len(fvu))
spd[fvu > 0] = (pv2[fvu > 0]/pv1[fvu > 0])**(2/3)
spd[spd > 1.0] = 1.0
tconns = self._net["throat.conns"][self.map_throats(self._net,
self.throats())]
# Need to work out the average volume change for the two pores
# connected by each throat Boundary pores will be connected to a
# throat outside this geometry if there are multiple geoms so get mapping
mapping = self._net.map_pores(self, self._net.pores(),
return_mapping=True)
source = list(mapping['source'])
target = list(mapping['target'])
ta_diff_avg = np.ones(len(tconns))
for i in np.arange(len(tconns)):
np1, np2 = tconns[i]
if np1 in source and np2 in source:
gp1 = target[source.index(np1)]
gp2 = target[source.index(np2)]
ta_diff_avg[i] = (spd[gp1] + spd[gp2]) / 2
elif np1 in source:
gp1 = target[source.index(np1)]
ta_diff_avg[i] = spd[gp1]
elif np2 in source:
gp2 = target[source.index(np2)]
ta_diff_avg[i] = spd[gp2]
self["throat.area"] *= ta_diff_avg
self["throat.area"][self["throat.area"] < 0] = 0
self["throat.diameter"] = 2*sp.sqrt(self["throat.area"]/sp.pi)
self["throat.indiameter"] *= sp.sqrt(ta_diff_avg)
else:
logger.warning('Fibre volume is not be conserved under compression')
# Remove pores with zero throats
topo.trim_occluded_throats(self._net)
| |
import random
import time
import math
import os
from collections import deque
from kivy.tests import UnitTestTouch
__all__ = ('UnitKivyApp', )
class AsyncUnitTestTouch(UnitTestTouch):
def __init__(self, *largs, **kwargs):
self.grab_exclusive_class = None
self.is_touch = True
super(AsyncUnitTestTouch, self).__init__(*largs, **kwargs)
def touch_down(self, *args):
self.eventloop._dispatch_input("begin", self)
def touch_move(self, x, y):
win = self.eventloop.window
self.move({
"x": x / float(win.width),
"y": y / float(win.height)
})
self.eventloop._dispatch_input("update", self)
def touch_up(self, *args):
self.eventloop._dispatch_input("end", self)
_unique_value = object
class WidgetResolver(object):
"""It assumes that the widget tree strictly forms a DAG.
"""
base_widget = None
matched_widget = None
_kwargs_filter = {}
_funcs_filter = []
def __init__(self, base_widget, **kwargs):
self.base_widget = base_widget
self._kwargs_filter = {}
self._funcs_filter = []
super(WidgetResolver, self).__init__(**kwargs)
def __call__(self):
if self.matched_widget is not None:
return self.matched_widget
if not self._kwargs_filter and not self._funcs_filter:
return self.base_widget
return None
def match(self, **kwargs_filter):
self._kwargs_filter.update(kwargs_filter)
def match_funcs(self, funcs_filter=()):
self._funcs_filter.extend(funcs_filter)
def check_widget(self, widget):
if not all(func(widget) for func in self._funcs_filter):
return False
for attr, val in self._kwargs_filter.items():
if getattr(widget, attr, _unique_value) != val:
return False
return True
def not_found(self, op):
raise ValueError(
'Cannot find widget matching <{}, {}> starting from base '
'widget "{}" doing "{}" traversal'.format(
self._kwargs_filter, self._funcs_filter, self.base_widget, op))
def down(self, **kwargs_filter):
self.match(**kwargs_filter)
check = self.check_widget
fifo = deque([self.base_widget])
while fifo:
widget = fifo.popleft()
if check(widget):
return WidgetResolver(base_widget=widget)
fifo.extend(widget.children)
self.not_found('down')
def up(self, **kwargs_filter):
self.match(**kwargs_filter)
check = self.check_widget
parent = self.base_widget
while parent is not None:
if check(parent):
return WidgetResolver(base_widget=parent)
new_parent = parent.parent
# Window is its own parent oO
if new_parent is parent:
break
parent = new_parent
self.not_found('up')
def family_up(self, **kwargs_filter):
self.match(**kwargs_filter)
check = self.check_widget
base_widget = self.base_widget
already_checked_base = None
while base_widget is not None:
fifo = deque([base_widget])
while fifo:
widget = fifo.popleft()
# don't check the child we checked before moving up
if widget is already_checked_base:
continue
if check(widget):
return WidgetResolver(base_widget=widget)
fifo.extend(widget.children)
already_checked_base = base_widget
new_base_widget = base_widget.parent
# Window is its own parent oO
if new_base_widget is base_widget:
break
base_widget = new_base_widget
self.not_found('family_up')
class UnitKivyApp(object):
"""Base class to use with async test apps.
"""
app_has_started = False
app_has_stopped = False
async_sleep = None
def __init__(self, **kwargs):
super().__init__(**kwargs)
from kivy.clock import Clock
self.async_sleep = Clock._async_lib.sleep
def started_app(*largs):
self.app_has_started = True
self.fbind('on_start', started_app)
def stopped_app(*largs):
self.app_has_stopped = True
self.fbind('on_stop', stopped_app)
async def async_run(self, async_lib=None):
from kivy.clock import Clock
if async_lib is not None:
Clock.init_async_lib(async_lib)
self.async_sleep = Clock._async_lib.sleep
return await super(UnitKivyApp, self).async_run(async_lib=async_lib)
def resolve_widget(self, base_widget=None):
if base_widget is None:
from kivy.core.window import Window
base_widget = Window
return WidgetResolver(base_widget=base_widget)
async def wait_clock_frames(self, n, sleep_time=1 / 60.):
from kivy.clock import Clock
frames_start = Clock.frames
while Clock.frames < frames_start + n:
await self.async_sleep(sleep_time)
def get_widget_pos_pixel(self, widget, positions):
from kivy.graphics import Fbo, ClearColor, ClearBuffers
canvas_parent_index = -2
if widget.parent is not None:
canvas_parent_index = widget.parent.canvas.indexof(widget.canvas)
if canvas_parent_index > -1:
widget.parent.canvas.remove(widget.canvas)
w, h = int(widget.width), int(widget.height)
fbo = Fbo(size=(w, h), with_stencilbuffer=True)
with fbo:
ClearColor(0, 0, 0, 0)
ClearBuffers()
fbo.add(widget.canvas)
fbo.draw()
pixels = fbo.pixels
fbo.remove(widget.canvas)
if widget.parent is not None and canvas_parent_index > -1:
widget.parent.canvas.insert(canvas_parent_index, widget.canvas)
values = []
for x, y in positions:
x = int(x)
y = int(y)
i = y * w * 4 + x * 4
values.append(tuple(pixels[i:i + 4]))
return values
async def do_touch_down_up(
self, pos=None, widget=None, duration=.2, pos_jitter=None,
widget_jitter=False, jitter_dt=1 / 15., end_on_pos=False):
if widget is None:
x, y = pos
else:
if pos is None:
x, y = widget.to_window(*widget.center)
else:
x, y = widget.to_window(*pos, initial=False)
touch = AsyncUnitTestTouch(x, y)
ts = time.perf_counter()
touch.touch_down()
await self.wait_clock_frames(1)
yield 'down', touch.pos
if not pos_jitter and not widget_jitter:
await self.async_sleep(duration)
touch.touch_up()
await self.wait_clock_frames(1)
yield 'up', touch.pos
return
moved = False
if pos_jitter:
dx, dy = pos_jitter
else:
dx = widget.width / 2.
dy = widget.height / 2.
while time.perf_counter() - ts < duration:
moved = True
await self.async_sleep(jitter_dt)
touch.touch_move(
x + (random.random() * 2 - 1) * dx,
y + (random.random() * 2 - 1) * dy
)
await self.wait_clock_frames(1)
yield 'move', touch.pos
if end_on_pos and moved:
touch.touch_move(x, y)
await self.wait_clock_frames(1)
yield 'move', touch.pos
touch.touch_up()
await self.wait_clock_frames(1)
yield 'up', touch.pos
async def do_touch_drag(
self, pos=None, widget=None,
widget_loc=('center_x', 'center_y'), dx=0, dy=0,
target_pos=None, target_widget=None,
target_widget_loc=('center_x', 'center_y'), long_press=0,
duration=.2, drag_n=5):
"""Initiates a touch down, followed by some dragging to a target
location, ending with a touch up.
`origin`: These parameters specify where the drag starts.
- If ``widget`` is None, it starts at ``pos`` (in window coordinates).
If ``dx``/``dy`` is used, it is in the window coordinate system also.
- If ``pos`` is None, it starts on the ``widget`` as specified by
``widget_loc``. If ``dx``/``dy`` is used, it is in the ``widget``
coordinate system.
- If neither is None, it starts at ``pos``, but in the ``widget``'s
coordinate system (:meth:`~kivy.uix.widget.Widget.to_window` is used
on it). If ``dx``/``dy`` is used, it is in the ``widget``
coordinate system.
`target`: These parameters specify where the drag ends.
- If ``target_pos`` and ``target_widget`` is None, then ``dx`` and
``dy`` is used relative to the position where the drag started.
- If ``target_widget`` is None, it ends at ``target_pos``
(in window coordinates).
- If ``target_pos`` is None, it ends on the ``target_widget`` as
specified by ``target_widget_loc``.
- If neither is None, it starts at ``target_pos``, but in the
``target_widget``'s coordinate system
(:meth:`~kivy.uix.widget.Widget.to_window` is used on it).
When ``widget`` and/or ``target_widget`` are specified, ``widget_loc``
and ``target_widget_loc``, respectively, indicate where on the widget
the drag starts/ends. It is a a tuple with property names of the widget
to loop up to get the value. The default is
``('center_x', 'center_y')`` so the drag would start/end in the
widget's center.
"""
if widget is None:
x, y = pos
tx, ty = x + dx, y + dy
else:
if pos is None:
w_x = getattr(widget, widget_loc[0])
w_y = getattr(widget, widget_loc[1])
x, y = widget.to_window(w_x, w_y)
tx, ty = widget.to_window(w_x + dx, w_y + dy)
else:
x, y = widget.to_window(*pos, initial=False)
tx, ty = widget.to_window(
pos[0] + dx, pos[1] + dy, initial=False)
if target_pos is not None:
if target_widget is None:
tx, ty = target_pos
else:
tx, ty = target_pos = target_widget.to_window(
*target_pos, initial=False)
elif target_widget is not None:
w_x = getattr(target_widget, target_widget_loc[0])
w_y = getattr(target_widget, target_widget_loc[1])
tx, ty = target_pos = target_widget.to_window(w_x, w_y)
else:
target_pos = tx, ty
touch = AsyncUnitTestTouch(x, y)
touch.touch_down()
await self.wait_clock_frames(1)
if long_press:
await self.async_sleep(long_press)
yield 'down', touch.pos
dx = (tx - x) / drag_n
dy = (ty - y) / drag_n
ts0 = time.perf_counter()
for i in range(drag_n):
await self.async_sleep(
max(0., duration - (time.perf_counter() - ts0)) / (drag_n - i))
touch.touch_move(x + (i + 1) * dx, y + (i + 1) * dy)
await self.wait_clock_frames(1)
yield 'move', touch.pos
if touch.pos != target_pos:
touch.touch_move(*target_pos)
await self.wait_clock_frames(1)
yield 'move', touch.pos
touch.touch_up()
await self.wait_clock_frames(1)
yield 'up', touch.pos
async def do_touch_drag_follow(
self, pos=None, widget=None,
widget_loc=('center_x', 'center_y'),
target_pos=None, target_widget=None,
target_widget_loc=('center_x', 'center_y'), long_press=0,
duration=.2, drag_n=5, max_n=25):
"""Very similar to :meth:`do_touch_drag`, except it follows the target
widget, even if the target widget moves as a result of the drag, the
drag will follow it until it's on the target widget.
`origin`: These parameters specify where the drag starts.
- If ``widget`` is None, it starts at ``pos`` (in window coordinates).
- If ``pos`` is None, it starts on the ``widget`` as specified by
``widget_loc``.
- If neither is None, it starts at ``pos``, but in the ``widget``'s
coordinate system (:meth:`~kivy.uix.widget.Widget.to_window` is used
on it).
`target`: These parameters specify where the drag ends.
- If ``target_pos`` is None, it ends on the ``target_widget`` as
specified by ``target_widget_loc``.
- If ``target_pos`` is not None, it starts at ``target_pos``, but in
the ``target_widget``'s coordinate system
(:meth:`~kivy.uix.widget.Widget.to_window` is used on it).
When ``widget`` and/or ``target_widget`` are specified, ``widget_loc``
and ``target_widget_loc``, respectively, indicate where on the widget
the drag starts/ends. It is a a tuple with property names of the widget
to loop up to get the value. The default is
``('center_x', 'center_y')`` so the drag would start/end in the
widget's center.
"""
if widget is None:
x, y = pos
else:
if pos is None:
w_x = getattr(widget, widget_loc[0])
w_y = getattr(widget, widget_loc[1])
x, y = widget.to_window(w_x, w_y)
else:
x, y = widget.to_window(*pos, initial=False)
if target_widget is None:
raise ValueError('target_widget must be specified')
def get_target():
if target_pos is not None:
return target_widget.to_window(*target_pos, initial=False)
else:
wt_x = getattr(target_widget, target_widget_loc[0])
wt_y = getattr(target_widget, target_widget_loc[1])
return target_widget.to_window(wt_x, wt_y)
touch = AsyncUnitTestTouch(x, y)
touch.touch_down()
await self.wait_clock_frames(1)
if long_press:
await self.async_sleep(long_press)
yield 'down', touch.pos
ts0 = time.perf_counter()
tx, ty = get_target()
i = 0
while not (math.isclose(touch.x, tx) and math.isclose(touch.y, ty)):
if i >= max_n:
raise Exception(
'Exceeded the maximum number of iterations, '
'but {} != {}'.format(touch.pos, (tx, ty)))
rem_i = max(1, drag_n - i)
rem_t = max(0., duration - (time.perf_counter() - ts0)) / rem_i
i += 1
await self.async_sleep(rem_t)
x, y = touch.pos
touch.touch_move(x + (tx - x) / rem_i, y + (ty - y) / rem_i)
await self.wait_clock_frames(1)
yield 'move', touch.pos
tx, ty = get_target()
touch.touch_up()
await self.wait_clock_frames(1)
yield 'up', touch.pos
async def do_touch_drag_path(
self, path, axis_widget=None, long_press=0, duration=.2):
"""Drags the touch along the specified path.
:parameters:
`path`: list
A list of position tuples the touch will follow. The first
item is used for the touch down and the rest for the move.
`axis_widget`: a Widget
If None, the path coordinates is in window coordinates,
otherwise, we will first transform the path coordinates
to window coordinates using
:meth:`~kivy.uix.widget.Widget.to_window` of the specified
widget.
"""
if axis_widget is not None:
path = [axis_widget.to_window(*p, initial=False) for p in path]
x, y = path[0]
path = path[1:]
touch = AsyncUnitTestTouch(x, y)
touch.touch_down()
await self.wait_clock_frames(1)
if long_press:
await self.async_sleep(long_press)
yield 'down', touch.pos
ts0 = time.perf_counter()
n = len(path)
for i, (x2, y2) in enumerate(path):
await self.async_sleep(
max(0., duration - (time.perf_counter() - ts0)) / (n - i))
touch.touch_move(x2, y2)
await self.wait_clock_frames(1)
yield 'move', touch.pos
touch.touch_up()
await self.wait_clock_frames(1)
yield 'up', touch.pos
async def do_keyboard_key(
self, key, modifiers=(), duration=.05, num_press=1):
from kivy.core.window import Window
if key == ' ':
key = 'spacebar'
key_lower = key.lower()
key_code = Window._system_keyboard.string_to_keycode(key_lower)
known_modifiers = {'shift', 'alt', 'ctrl', 'meta'}
if set(modifiers) - known_modifiers:
raise ValueError('Unknown modifiers "{}"'.
format(set(modifiers) - known_modifiers))
special_keys = {
27: 'escape',
9: 'tab',
8: 'backspace',
13: 'enter',
127: 'del',
271: 'enter',
273: 'up',
274: 'down',
275: 'right',
276: 'left',
278: 'home',
279: 'end',
280: 'pgup',
281: 'pgdown',
300: 'numlock',
301: 'capslock',
145: 'screenlock',
}
text = None
try:
text = chr(key_code)
if key_lower != key:
text = key
except ValueError:
pass
dt = duration / num_press
for i in range(num_press):
await self.async_sleep(dt)
Window.dispatch('on_key_down', key_code, 0, text, modifiers)
if (key not in known_modifiers and
key_code not in special_keys and
not (known_modifiers & set(modifiers))):
Window.dispatch('on_textinput', text)
await self.wait_clock_frames(1)
yield 'down', (key, key_code, 0, text, modifiers)
Window.dispatch('on_key_up', key_code, 0)
await self.wait_clock_frames(1)
yield 'up', (key, key_code, 0, text, modifiers)
| |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import shutil
import tempfile
import threading
import time
import unittest
from collections import namedtuple
from pyspark import SparkFiles, SparkContext
from pyspark.testing.utils import ReusedPySparkTestCase, PySparkTestCase, QuietTest, SPARK_HOME
class CheckpointTests(ReusedPySparkTestCase):
def setUp(self):
self.checkpointDir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(self.checkpointDir.name)
self.sc.setCheckpointDir(self.checkpointDir.name)
def tearDown(self):
shutil.rmtree(self.checkpointDir.name)
def test_basic_checkpointing(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: range(1, x + 1))
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertTrue(flatMappedRDD.getCheckpointFile() is None)
flatMappedRDD.checkpoint()
result = flatMappedRDD.collect()
time.sleep(1) # 1 second
self.assertTrue(flatMappedRDD.isCheckpointed())
self.assertEqual(flatMappedRDD.collect(), result)
self.assertEqual("file:" + self.checkpointDir.name,
os.path.dirname(os.path.dirname(flatMappedRDD.getCheckpointFile())))
def test_checkpoint_and_restore(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: [x])
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertTrue(flatMappedRDD.getCheckpointFile() is None)
flatMappedRDD.checkpoint()
flatMappedRDD.count() # forces a checkpoint to be computed
time.sleep(1) # 1 second
self.assertTrue(flatMappedRDD.getCheckpointFile() is not None)
recovered = self.sc._checkpointFile(flatMappedRDD.getCheckpointFile(),
flatMappedRDD._jrdd_deserializer)
self.assertEqual([1, 2, 3, 4], recovered.collect())
class LocalCheckpointTests(ReusedPySparkTestCase):
def test_basic_localcheckpointing(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: range(1, x + 1))
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertFalse(flatMappedRDD.isLocallyCheckpointed())
flatMappedRDD.localCheckpoint()
result = flatMappedRDD.collect()
time.sleep(1) # 1 second
self.assertTrue(flatMappedRDD.isCheckpointed())
self.assertTrue(flatMappedRDD.isLocallyCheckpointed())
self.assertEqual(flatMappedRDD.collect(), result)
class AddFileTests(PySparkTestCase):
def test_add_py_file(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this job fails due to `userlibrary` not being on the Python path:
# disable logging in log4j temporarily
def func(x):
from userlibrary import UserClass
return UserClass().hello()
with QuietTest(self.sc):
self.assertRaises(Exception, self.sc.parallelize(range(2)).map(func).first)
# Add the file, so the job should now succeed:
path = os.path.join(SPARK_HOME, "python/test_support/userlibrary.py")
self.sc.addPyFile(path)
res = self.sc.parallelize(range(2)).map(func).first()
self.assertEqual("Hello World!", res)
def test_add_file_locally(self):
path = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt")
self.sc.addFile(path)
download_path = SparkFiles.get("hello.txt")
self.assertNotEqual(path, download_path)
with open(download_path) as test_file:
self.assertEqual("Hello World!\n", test_file.readline())
def test_add_file_recursively_locally(self):
path = os.path.join(SPARK_HOME, "python/test_support/hello")
self.sc.addFile(path, True)
download_path = SparkFiles.get("hello")
self.assertNotEqual(path, download_path)
with open(download_path + "/hello.txt") as test_file:
self.assertEqual("Hello World!\n", test_file.readline())
with open(download_path + "/sub_hello/sub_hello.txt") as test_file:
self.assertEqual("Sub Hello World!\n", test_file.readline())
def test_add_py_file_locally(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this fails due to `userlibrary` not being on the Python path:
def func():
from userlibrary import UserClass
self.assertRaises(ImportError, func)
path = os.path.join(SPARK_HOME, "python/test_support/userlibrary.py")
self.sc.addPyFile(path)
from userlibrary import UserClass
self.assertEqual("Hello World!", UserClass().hello())
def test_add_egg_file_locally(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this fails due to `userlibrary` not being on the Python path:
def func():
from userlib import UserClass
self.assertRaises(ImportError, func)
path = os.path.join(SPARK_HOME, "python/test_support/userlib-0.1.zip")
self.sc.addPyFile(path)
from userlib import UserClass
self.assertEqual("Hello World from inside a package!", UserClass().hello())
def test_overwrite_system_module(self):
self.sc.addPyFile(os.path.join(SPARK_HOME, "python/test_support/SimpleHTTPServer.py"))
import SimpleHTTPServer
self.assertEqual("My Server", SimpleHTTPServer.__name__)
def func(x):
import SimpleHTTPServer
return SimpleHTTPServer.__name__
self.assertEqual(["My Server"], self.sc.parallelize(range(1)).map(func).collect())
class ContextTests(unittest.TestCase):
def test_failed_sparkcontext_creation(self):
# Regression test for SPARK-1550
self.assertRaises(Exception, lambda: SparkContext("an-invalid-master-name"))
def test_get_or_create(self):
with SparkContext.getOrCreate() as sc:
self.assertTrue(SparkContext.getOrCreate() is sc)
def test_parallelize_eager_cleanup(self):
with SparkContext() as sc:
temp_files = os.listdir(sc._temp_dir)
rdd = sc.parallelize([0, 1, 2])
post_parallalize_temp_files = os.listdir(sc._temp_dir)
self.assertEqual(temp_files, post_parallalize_temp_files)
def test_set_conf(self):
# This is for an internal use case. When there is an existing SparkContext,
# SparkSession's builder needs to set configs into SparkContext's conf.
sc = SparkContext()
sc._conf.set("spark.test.SPARK16224", "SPARK16224")
self.assertEqual(sc._jsc.sc().conf().get("spark.test.SPARK16224"), "SPARK16224")
sc.stop()
def test_stop(self):
sc = SparkContext()
self.assertNotEqual(SparkContext._active_spark_context, None)
sc.stop()
self.assertEqual(SparkContext._active_spark_context, None)
def test_with(self):
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
self.assertEqual(SparkContext._active_spark_context, None)
def test_with_exception(self):
try:
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
raise Exception()
except:
pass
self.assertEqual(SparkContext._active_spark_context, None)
def test_with_stop(self):
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
sc.stop()
self.assertEqual(SparkContext._active_spark_context, None)
def test_progress_api(self):
with SparkContext() as sc:
sc.setJobGroup('test_progress_api', '', True)
rdd = sc.parallelize(range(10)).map(lambda x: time.sleep(100))
def run():
try:
rdd.count()
except Exception:
pass
t = threading.Thread(target=run)
t.daemon = True
t.start()
# wait for scheduler to start
time.sleep(1)
tracker = sc.statusTracker()
jobIds = tracker.getJobIdsForGroup('test_progress_api')
self.assertEqual(1, len(jobIds))
job = tracker.getJobInfo(jobIds[0])
self.assertEqual(1, len(job.stageIds))
stage = tracker.getStageInfo(job.stageIds[0])
self.assertEqual(rdd.getNumPartitions(), stage.numTasks)
sc.cancelAllJobs()
t.join()
# wait for event listener to update the status
time.sleep(1)
job = tracker.getJobInfo(jobIds[0])
self.assertEqual('FAILED', job.status)
self.assertEqual([], tracker.getActiveJobsIds())
self.assertEqual([], tracker.getActiveStageIds())
sc.stop()
def test_startTime(self):
with SparkContext() as sc:
self.assertGreater(sc.startTime, 0)
def test_forbid_insecure_gateway(self):
# Fail immediately if you try to create a SparkContext
# with an insecure gateway
parameters = namedtuple('MockGatewayParameters', 'auth_token')(None)
mock_insecure_gateway = namedtuple('MockJavaGateway', 'gateway_parameters')(parameters)
with self.assertRaises(ValueError) as context:
SparkContext(gateway=mock_insecure_gateway)
self.assertIn("insecure Py4j gateway", str(context.exception))
if __name__ == "__main__":
from pyspark.tests.test_context import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| |
"""
Support for controlling raspihats boards.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/raspihats/
"""
import logging
import threading
import time
from homeassistant.const import (
EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP
)
REQUIREMENTS = ['raspihats==2.2.3',
'smbus-cffi==0.5.1']
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'raspihats'
CONF_I2C_HATS = 'i2c_hats'
CONF_BOARD = 'board'
CONF_ADDRESS = 'address'
CONF_CHANNELS = 'channels'
CONF_INDEX = 'index'
CONF_INVERT_LOGIC = 'invert_logic'
CONF_INITIAL_STATE = 'initial_state'
I2C_HAT_NAMES = [
'Di16', 'Rly10', 'Di6Rly6',
'DI16ac', 'DQ10rly', 'DQ16oc', 'DI6acDQ6rly'
]
I2C_HATS_MANAGER = 'I2CH_MNG'
# pylint: disable=unused-argument
def setup(hass, config):
"""Setup the raspihats component."""
hass.data[I2C_HATS_MANAGER] = I2CHatsManager()
def start_i2c_hats_keep_alive(event):
"""Start I2C-HATs keep alive."""
hass.data[I2C_HATS_MANAGER].start_keep_alive()
def stop_i2c_hats_keep_alive(event):
"""Stop I2C-HATs keep alive."""
hass.data[I2C_HATS_MANAGER].stop_keep_alive()
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, start_i2c_hats_keep_alive)
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_i2c_hats_keep_alive)
return True
def log_message(source, *parts):
"""Build log message."""
message = source.__class__.__name__
for part in parts:
message += ": " + str(part)
return message
class I2CHatsException(Exception):
"""I2C-HATs exception."""
class I2CHatsDIScanner(object):
"""Scan Digital Inputs and fire callbacks."""
_DIGITAL_INPUTS = "di"
_OLD_VALUE = "old_value"
_CALLBACKS = "callbacks"
def setup(self, i2c_hat):
"""Setup I2C-HAT instance for digital inputs scanner."""
if hasattr(i2c_hat, self._DIGITAL_INPUTS):
digital_inputs = getattr(i2c_hat, self._DIGITAL_INPUTS)
old_value = None
# add old value attribute
setattr(digital_inputs, self._OLD_VALUE, old_value)
# add callbacks dict attribute {channel: callback}
setattr(digital_inputs, self._CALLBACKS, {})
def register_callback(self, i2c_hat, channel, callback):
"""Register edge callback."""
if hasattr(i2c_hat, self._DIGITAL_INPUTS):
digital_inputs = getattr(i2c_hat, self._DIGITAL_INPUTS)
callbacks = getattr(digital_inputs, self._CALLBACKS)
callbacks[channel] = callback
setattr(digital_inputs, self._CALLBACKS, callbacks)
def scan(self, i2c_hat):
"""Scan I2C-HATs digital inputs and fire callbacks."""
if hasattr(i2c_hat, self._DIGITAL_INPUTS):
digital_inputs = getattr(i2c_hat, self._DIGITAL_INPUTS)
callbacks = getattr(digital_inputs, self._CALLBACKS)
old_value = getattr(digital_inputs, self._OLD_VALUE)
value = digital_inputs.value # i2c data transfer
if old_value is not None and value != old_value:
for channel in range(0, len(digital_inputs.channels)):
state = (value >> channel) & 0x01
old_state = (old_value >> channel) & 0x01
if state != old_state:
callback = callbacks.get(channel, None)
if callback is not None:
callback(state)
setattr(digital_inputs, self._OLD_VALUE, value)
class I2CHatsManager(threading.Thread):
"""Manages all I2C-HATs instances."""
_EXCEPTION = "exception"
_CALLBACKS = "callbacks"
def __init__(self):
"""Init I2C-HATs Manager."""
threading.Thread.__init__(self)
self._lock = threading.Lock()
self._i2c_hats = {}
self._run = False
self._di_scanner = I2CHatsDIScanner()
def register_board(self, board, address):
"""Register I2C-HAT."""
with self._lock:
i2c_hat = self._i2c_hats.get(address)
if i2c_hat is None:
# pylint: disable=import-error
import raspihats.i2c_hats as module
constructor = getattr(module, board)
i2c_hat = constructor(address)
setattr(i2c_hat, self._CALLBACKS, {})
# Setting exception attribute will trigger online callbacks
# when keep alive thread starts.
setattr(i2c_hat, self._EXCEPTION, None)
self._di_scanner.setup(i2c_hat)
self._i2c_hats[address] = i2c_hat
status_word = i2c_hat.status # read status_word to reset bits
_LOGGER.info(
log_message(self, i2c_hat, "registered", status_word)
)
def run(self):
"""Keep alive for I2C-HATs."""
# pylint: disable=import-error
from raspihats.i2c_hats import ResponseException
_LOGGER.info(
log_message(self, "starting")
)
while self._run:
with self._lock:
for i2c_hat in list(self._i2c_hats.values()):
try:
self._di_scanner.scan(i2c_hat)
self._read_status(i2c_hat)
if hasattr(i2c_hat, self._EXCEPTION):
if getattr(i2c_hat, self._EXCEPTION) is not None:
_LOGGER.warning(
log_message(self, i2c_hat, "online again")
)
delattr(i2c_hat, self._EXCEPTION)
# trigger online callbacks
callbacks = getattr(i2c_hat, self._CALLBACKS)
for callback in list(callbacks.values()):
callback()
except ResponseException as ex:
if not hasattr(i2c_hat, self._EXCEPTION):
_LOGGER.error(
log_message(self, i2c_hat, ex)
)
setattr(i2c_hat, self._EXCEPTION, ex)
time.sleep(0.05)
_LOGGER.info(
log_message(self, "exiting")
)
def _read_status(self, i2c_hat):
"""Read I2C-HATs status."""
status_word = i2c_hat.status
if status_word.value != 0x00:
_LOGGER.error(
log_message(self, i2c_hat, status_word)
)
def start_keep_alive(self):
"""Start keep alive mechanism."""
self._run = True
threading.Thread.start(self)
def stop_keep_alive(self):
"""Stop keep alive mechanism."""
self._run = False
self.join()
def register_di_callback(self, address, channel, callback):
"""Register I2C-HAT digital input edge callback."""
with self._lock:
i2c_hat = self._i2c_hats[address]
self._di_scanner.register_callback(i2c_hat, channel, callback)
def register_online_callback(self, address, channel, callback):
"""Register I2C-HAT online callback."""
with self._lock:
i2c_hat = self._i2c_hats[address]
callbacks = getattr(i2c_hat, self._CALLBACKS)
callbacks[channel] = callback
setattr(i2c_hat, self._CALLBACKS, callbacks)
def read_di(self, address, channel):
"""Read a value from a I2C-HAT digital input."""
# pylint: disable=import-error
from raspihats.i2c_hats import ResponseException
with self._lock:
i2c_hat = self._i2c_hats[address]
try:
value = i2c_hat.di.value
return (value >> channel) & 0x01
except ResponseException as ex:
raise I2CHatsException(str(ex))
def write_dq(self, address, channel, value):
"""Write a value to a I2C-HAT digital output."""
# pylint: disable=import-error
from raspihats.i2c_hats import ResponseException
with self._lock:
i2c_hat = self._i2c_hats[address]
try:
i2c_hat.dq.channels[channel] = value
except ResponseException as ex:
raise I2CHatsException(str(ex))
def read_dq(self, address, channel):
"""Read a value from a I2C-HAT digital output."""
# pylint: disable=import-error
from raspihats.i2c_hats import ResponseException
with self._lock:
i2c_hat = self._i2c_hats[address]
try:
return i2c_hat.dq.channels[channel]
except ResponseException as ex:
raise I2CHatsException(str(ex))
| |
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information.
'''
Tests for the _ssl module. See http://docs.python.org/library/ssl.html
'''
# https://github.com/IronLanguages/main/issues/733
import _ssl as real_ssl
import os
import socket
import unittest
from iptest import IronPythonTestCase, is_cli, is_netcoreapp, retryOnFailure, run_test, skipUnlessIronPython
SSL_URL = "www.microsoft.com"
SSL_ISSUER = "CN=Microsoft RSA TLS CA 01, O=Microsoft Corporation, C=US"
SSL_SERVER = "www.microsoft.com"
SSL_PORT = 443
SSL_REQUEST = "GET /en-us HTTP/1.0\r\nHost: www.microsoft.com\r\n\r\n"
SSL_RESPONSE = "Microsoft"
CERTFILE = os.path.join(os.path.dirname(__file__), "keycert.pem")
class _SslTest(IronPythonTestCase):
def test_constants(self):
self.assertEqual(real_ssl.CERT_NONE, 0)
self.assertEqual(real_ssl.CERT_OPTIONAL, 1)
self.assertEqual(real_ssl.CERT_REQUIRED, 2)
self.assertEqual(real_ssl.PROTOCOL_SSLv2, 0)
self.assertEqual(real_ssl.PROTOCOL_SSLv23, 2)
self.assertEqual(real_ssl.PROTOCOL_SSLv3, 1)
self.assertEqual(real_ssl.PROTOCOL_TLSv1, 3)
self.assertEqual(real_ssl.PROTOCOL_TLSv1_1, 4)
self.assertEqual(real_ssl.PROTOCOL_TLSv1_2, 5)
self.assertEqual(real_ssl.OP_NO_SSLv2, 0x1000000)
self.assertEqual(real_ssl.OP_NO_SSLv3, 0x2000000)
self.assertEqual(real_ssl.OP_NO_TLSv1, 0x4000000)
self.assertEqual(real_ssl.OP_NO_TLSv1_1, 0x10000000)
self.assertEqual(real_ssl.OP_NO_TLSv1_2, 0x8000000)
self.assertEqual(real_ssl.SSL_ERROR_EOF, 8)
self.assertEqual(real_ssl.SSL_ERROR_INVALID_ERROR_CODE, 9)
self.assertEqual(real_ssl.SSL_ERROR_SSL, 1)
self.assertEqual(real_ssl.SSL_ERROR_SYSCALL, 5)
self.assertEqual(real_ssl.SSL_ERROR_WANT_CONNECT, 7)
self.assertEqual(real_ssl.SSL_ERROR_WANT_READ, 2)
self.assertEqual(real_ssl.SSL_ERROR_WANT_WRITE, 3)
self.assertEqual(real_ssl.SSL_ERROR_WANT_X509_LOOKUP, 4)
self.assertEqual(real_ssl.SSL_ERROR_ZERO_RETURN, 6)
def test_RAND_add(self):
#--Positive
self.assertEqual(real_ssl.RAND_add("", 3.14), None)
self.assertEqual(real_ssl.RAND_add(u"", 3.14), None)
self.assertEqual(real_ssl.RAND_add("", 3), None)
#--Negative
for g1, g2 in [ (None, None),
("", None),
(None, 3.14), #http://ironpython.codeplex.com/WorkItem/View.aspx?WorkItemId=24276
]:
self.assertRaises(TypeError, real_ssl.RAND_add, g1, g2)
self.assertRaises(TypeError, real_ssl.RAND_add)
self.assertRaises(TypeError, real_ssl.RAND_add, "")
self.assertRaises(TypeError, real_ssl.RAND_add, 3.14)
self.assertRaises(TypeError, real_ssl.RAND_add, "", 3.14, "")
def test_RAND_status(self):
#--Positive
self.assertEqual(real_ssl.RAND_status(), 1)
#--Negative
self.assertRaises(TypeError, real_ssl.RAND_status, None)
self.assertRaises(TypeError, real_ssl.RAND_status, "")
self.assertRaises(TypeError, real_ssl.RAND_status, 1)
self.assertRaises(TypeError, real_ssl.RAND_status, None, None)
def test_SSLError(self):
self.assertEqual(real_ssl.SSLError.__bases__, (socket.error, ))
def test___doc__(self):
expected_doc = """Implementation module for SSL socket operations. See the socket module
for documentation."""
self.assertEqual(real_ssl.__doc__, expected_doc)
def test__test_decode_cert(self):
if is_cli and hasattr(real_ssl, "decode_cert"):
self.fail("Please add a test for _ssl.decode_cert")
print 'TODO: no implementation to test yet.'
def test_sslwrap(self):
print 'TODO: no implementation to test yet.'
def test_SSLType(self):
#--Positive
if is_cli:
#https://github.com/IronLanguages/main/issues/733
self.assertEqual(str(real_ssl.SSLType),
"<type '_socket.ssl'>")
else:
self.assertEqual(str(real_ssl.SSLType),
"<type 'ssl.SSLContext'>")
'''
TODO: once we have a proper implementation of _ssl.sslwrap the tests below need
to be rewritten.
'''
@retryOnFailure
def test_SSLType_ssl(self):
'''
Should be essentially the same as _ssl.sslwrap. It's not though and will
simply be tested as implemented for the time being.
ssl(PythonSocket.socket sock,
[DefaultParameterValue(null)] string keyfile,
[DefaultParameterValue(null)] string certfile)
'''
#--Positive
#sock
s = socket.socket(socket.AF_INET)
s.connect((SSL_URL, SSL_PORT))
ssl_s = real_ssl.sslwrap(s._sock, False)
ssl_s.shutdown()
s.close()
#sock, keyfile, certfile
#TODO!
@unittest.expectedFailure
@retryOnFailure
def test_SSLType_ssl_neg(self):
'''
See comments on test_SSLType_ssl. Basically this needs to be revisited
entirely (TODO) after we're more compatible with CPython.
'''
s = socket.socket(socket.AF_INET)
s.connect((SSL_URL, SSL_PORT))
#--Negative
#Empty
self.assertRaises(TypeError, real_ssl.sslwrap)
self.assertRaises(TypeError, real_ssl.sslwrap, False)
#None
self.assertRaises(TypeError, real_ssl.sslwrap, None, False)
#s, bad keyfile
#Should throw _ssl.SSLError because both keyfile and certificate weren't specified
self.assertRaises(real_ssl.SSLError, real_ssl.sslwrap, s._sock, False, "bad keyfile")
#s, bad certfile
#Should throw _ssl.SSLError because both keyfile and certificate weren't specified
#s, bad keyfile, bad certfile
#Should throw ssl.SSLError
self.assertRaises(real_ssl.SSLError, real_ssl.sslwrap, s._sock, False, "bad keyfile", "bad certfile")
#Cleanup
s.close()
@retryOnFailure
def test_SSLType_issuer(self):
#--Positive
s = socket.socket(socket.AF_INET)
s.connect((SSL_URL, SSL_PORT))
ssl_s = real_ssl.sslwrap(s._sock, False)
self.assertEqual(ssl_s.issuer(), '') #http://ironpython.codeplex.com/WorkItem/View.aspx?WorkItemId=24281
ssl_s.do_handshake()
#Incompat, but a good one at that
if is_cli:
self.assertTrue("Returns a string that describes the issuer of the server's certificate" in ssl_s.issuer.__doc__)
else:
self.assertEqual(ssl_s.issuer.__doc__, None)
issuer = ssl_s.issuer()
#If we can get the issuer once, we should be able to do it again
self.assertEqual(issuer, ssl_s.issuer())
self.assertTrue(SSL_ISSUER in issuer)
#--Negative
self.assertRaisesMessage(TypeError, "issuer() takes no arguments (1 given)",
ssl_s.issuer, None)
self.assertRaisesMessage(TypeError, "issuer() takes no arguments (1 given)",
ssl_s.issuer, 1)
self.assertRaisesMessage(TypeError, "issuer() takes no arguments (2 given)",
ssl_s.issuer, 3.14, "abc")
#Cleanup
ssl_s.shutdown()
s.close()
@retryOnFailure
def test_SSLType_server(self):
#--Positive
s = socket.socket(socket.AF_INET)
s.connect((SSL_URL, SSL_PORT))
ssl_s = real_ssl.sslwrap(s._sock, False)
self.assertEqual(ssl_s.server(), '') #http://ironpython.codeplex.com/WorkItem/View.aspx?WorkItemId=24281
ssl_s.do_handshake()
if is_cli:
#Incompat, but a good one at that
self.assertTrue("Returns a string that describes the issuer of the server's certificate" in ssl_s.issuer.__doc__)
else:
self.assertEqual(ssl_s.server.__doc__, None)
server = ssl_s.server()
#If we can get the server once, we should be able to do it again
self.assertEqual(server, ssl_s.server())
self.assertTrue(SSL_SERVER in server)
#--Negative
self.assertRaisesMessage(TypeError, "server() takes no arguments (1 given)",
ssl_s.server, None)
self.assertRaisesMessage(TypeError, "server() takes no arguments (1 given)",
ssl_s.server, 1)
self.assertRaisesMessage(TypeError, "server() takes no arguments (2 given)",
ssl_s.server, 3.14, "abc")
#Cleanup
ssl_s.shutdown()
s.close()
@retryOnFailure
def test_SSLType_read_and_write(self):
#--Positive
s = socket.socket(socket.AF_INET)
s.connect((SSL_URL, SSL_PORT))
ssl_s = real_ssl.sslwrap(s._sock, False)
ssl_s.do_handshake()
self.assertTrue("Writes the string s into the SSL object." in ssl_s.write.__doc__)
self.assertTrue("Read up to len bytes from the SSL socket." in ssl_s.read.__doc__)
#Write
self.assertEqual(ssl_s.write(SSL_REQUEST),
len(SSL_REQUEST))
#Read
self.assertEqual(ssl_s.read(4).lower(), "http")
response = ssl_s.read(5000)
self.assertTrue(SSL_RESPONSE in response)
#Cleanup
ssl_s.shutdown()
s.close()
def test_parse_cert(self):
"""part of test_parse_cert from CPython.test_ssl"""
# note that this uses an 'unofficial' function in _ssl.c,
# provided solely for this test, to exercise the certificate
# parsing code
p = real_ssl._test_decode_cert(CERTFILE)
self.assertEqual(p['issuer'],
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),))
)
# Note the next three asserts will fail if the keys are regenerated
self.assertEqual(p['notAfter'], 'Oct 5 23:01:56 2020 GMT')
self.assertEqual(p['notBefore'], 'Oct 8 23:01:56 2010 GMT')
self.assertEqual(p['serialNumber'], 'D7C7381919AFC24E')
self.assertEqual(p['subject'],
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),))
)
self.assertEqual(p['subjectAltName'], (('DNS', 'localhost'),))
@skipUnlessIronPython()
def test_cert_date_locale(self):
import System
if is_netcoreapp:
import clr
clr.AddReference("System.Threading.Thread")
culture = System.Threading.Thread.CurrentThread.CurrentCulture
try:
System.Threading.Thread.CurrentThread.CurrentCulture = System.Globalization.CultureInfo("fr")
p = real_ssl._test_decode_cert(CERTFILE)
self.assertEqual(p['notAfter'], 'Oct 5 23:01:56 2020 GMT')
self.assertEqual(p['notBefore'], 'Oct 8 23:01:56 2010 GMT')
finally:
System.Threading.Thread.CurrentThread.CurrentCulture = culture
run_test(__name__)
| |
import random
import unittest
from SDWLE.cards.spells.neutral import TheCoin
from testsSDW.agents.testing_agents import OneCardPlayingAgent, MinionAttackingAgent, CardTestingAgent, \
PlayAndAttackAgent
from testsSDW.testing_utils import generate_game_for
from SDWLE.cards import *
from SDWLE.constants import MINION_TYPE
from SDWLE.agents.basic_agents import PredictableAgent, DoNothingAgent
class TestShaman(unittest.TestCase):
def setUp(self):
random.seed(1857)
def test_AlAkirTheWindlord(self):
game = generate_game_for(AlAkirTheWindlord, StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 15):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual("Al'Akir the Windlord", game.players[0].minions[0].card.name)
self.assertTrue(game.players[0].minions[0].windfury())
self.assertTrue(game.players[0].minions[0].charge())
self.assertTrue(game.players[0].minions[0].divine_shield)
self.assertTrue(game.players[0].minions[0].taunt)
def test_DustDevil(self):
game = generate_game_for(DustDevil, StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual("Dust Devil", game.players[0].minions[0].card.name)
self.assertTrue(game.players[0].minions[0].windfury())
self.assertEqual(2, game.players[0].upcoming_overload)
game.play_single_turn()
# Overload should cause that we start this turn with 0 mana
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(0, game.players[0].upcoming_overload)
self.assertEqual(0, game.players[0].mana)
self.assertEqual(2, game.players[0].max_mana)
def test_EarthElemental(self):
game = generate_game_for(EarthElemental, StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
# Earth Elemental should be played
for turn in range(0, 9):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual("Earth Elemental", game.players[0].minions[0].card.name)
self.assertTrue(game.players[0].minions[0].taunt)
self.assertEqual(3, game.players[0].upcoming_overload)
def test_FireElemental(self):
game = generate_game_for(FireElemental, StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 10):
game.play_single_turn()
self.assertEqual(30, game.players[1].hero.health)
# Fire Elemental should be played, and its battlecry dealing three damage to opponent
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual("Fire Elemental", game.players[0].minions[0].card.name)
self.assertEqual(27, game.players[1].hero.health)
def test_FlametongueTotem(self):
game = generate_game_for(StonetuskBoar, StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 5):
game.play_single_turn()
# There should be three Stonetusk Boars on the board
self.assertEqual(3, len(game.players[0].minions))
# add a new Flametongue Totem at index 1
totem = FlametongueTotem()
totem.summon(game.players[0], game, 1)
# The minions to either side should have their attack increased
self.assertEqual(4, len(game.players[0].minions))
self.assertEqual(3, game.players[0].minions[0].calculate_attack())
self.assertEqual(3, game.players[0].minions[2].calculate_attack())
self.assertEqual(1, game.players[0].minions[3].calculate_attack())
# When removing the minion at index 0, we should not get an error
game.players[0].minions[0].die(None)
game.players[0].minions[0].activate_delayed()
self.assertEqual(3, len(game.players[0].minions))
# When removing the minion at index 1, we should have a new minion at index 1,
# and its attack should be increased
game.players[0].minions[1].die(None)
game.players[0].minions[1].activate_delayed()
self.assertEqual(2, len(game.players[0].minions))
self.assertEqual(3, game.players[0].minions[1].calculate_attack())
# Silencing this minion should have no effect on its attack
game.players[0].minions[1].silence()
self.assertEqual(3, game.players[0].minions[1].calculate_attack())
# We should be able to add a boar on either side of the wolf, and their attack should be increased
# The attack of the boar which used to be next to the wolf should decrease
boar = StonetuskBoar()
boar.summon(game.players[0], game, 0)
boar.summon(game.players[0], game, 2)
self.assertEqual(4, len(game.players[0].minions))
self.assertEqual(3, game.players[0].minions[0].calculate_attack())
self.assertEqual(3, game.players[0].minions[2].calculate_attack())
self.assertEqual(1, game.players[0].minions[3].calculate_attack())
# Add a new boar on the left of the totem since we haven't tested that yet
boar.summon(game.players[0], game, 1)
self.assertEqual(5, len(game.players[0].minions))
self.assertEqual(1, game.players[0].minions[0].calculate_attack())
self.assertEqual(3, game.players[0].minions[1].calculate_attack())
game.players[0].minions[1].die(None)
game.players[0].minions[1].activate_delayed()
self.assertEqual(4, len(game.players[0].minions))
self.assertEqual(3, game.players[0].minions[0].calculate_attack())
# If the totem is silenced, then the boars to either side should no longer have increased attack
game.players[0].minions[1].silence()
self.assertEqual(1, game.players[0].minions[0].calculate_attack())
self.assertEqual(1, game.players[0].minions[2].calculate_attack())
self.assertEqual(1, game.players[0].minions[3].calculate_attack())
def test_ManaTideTotem(self):
game = generate_game_for([ManaTideTotem, WarGolem], StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 4):
game.play_single_turn()
self.assertEqual(25, game.players[0].deck.left)
self.assertEqual(0, len(game.players[0].minions))
# Mana Tide Totem should be played, and we should draw a card at the end of turn
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual("Mana Tide Totem", game.players[0].minions[0].card.name)
self.assertEqual(23, game.players[0].deck.left)
game.play_single_turn()
# Silence, we should only draw one card next turn
game.players[0].minions[0].silence()
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(22, game.players[0].deck.left)
def test_UnboundElemental(self):
game = generate_game_for([UnboundElemental, DustDevil, DustDevil], StonetuskBoar, OneCardPlayingAgent,
DoNothingAgent)
for turn in range(0, 6):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual("Unbound Elemental", game.players[0].minions[0].card.name)
self.assertEqual(2, game.players[0].minions[0].calculate_attack())
self.assertEqual(4, game.players[0].minions[0].calculate_max_health())
# One Dust Devil should be played, giving the Unbound Elemental +1/+1
game.play_single_turn()
self.assertEqual(2, len(game.players[0].minions))
self.assertEqual(3, game.players[0].minions[-1].calculate_attack())
self.assertEqual(5, game.players[0].minions[-1].calculate_max_health())
# Test the silence
game.players[0].minions[-1].silence()
self.assertEqual(2, game.players[0].minions[-1].calculate_attack())
self.assertEqual(4, game.players[0].minions[-1].calculate_max_health())
# Another Dust Devil, nothing should happen because of silence
game.play_single_turn()
game.play_single_turn()
self.assertEqual(3, len(game.players[0].minions))
self.assertEqual(2, game.players[0].minions[-1].calculate_attack())
self.assertEqual(4, game.players[0].minions[-1].calculate_max_health())
def test_Windspeaker(self):
game = generate_game_for([StonetuskBoar, Windspeaker], StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 6):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual("Stonetusk Boar", game.players[0].minions[0].card.name)
self.assertFalse(game.players[0].minions[0].windfury())
# Windspeaker should be played, giving the boar windfury
game.play_single_turn()
self.assertEqual(2, len(game.players[0].minions))
self.assertEqual("Windspeaker", game.players[0].minions[0].card.name)
self.assertTrue(game.players[0].minions[1].windfury())
def test_AncestralHealing(self):
game = generate_game_for([FlametongueTotem, AncestralHealing], StonetuskBoar,
OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 4):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual("Flametongue Totem", game.players[0].minions[0].card.name)
self.assertEqual(3, game.players[0].minions[0].health)
self.assertFalse(game.players[0].minions[0].taunt)
game.players[0].minions[0].health = 1
game.play_single_turn()
self.assertEqual(3, game.players[0].minions[0].health)
self.assertTrue(game.players[0].minions[0].taunt)
def test_AncestralSpirit(self):
game = generate_game_for([ArgentCommander, AncestralSpirit], StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 11):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual("Argent Commander", game.players[0].minions[0].card.name)
self.assertEqual(2, game.players[0].minions[0].health)
self.assertTrue(game.players[0].minions[0].divine_shield)
game.play_single_turn()
# Ancestral Spirit should be played on the Argent Commander
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
game.players[0].minions[0].health = 1
game.players[0].minions[0].divine_shield = False
# Let the minion die in order to test Ancestral Spirit
commander = game.players[0].minions[0]
commander.die(None)
commander.activate_delayed()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual("Argent Commander", game.players[0].minions[0].card.name)
self.assertEqual(2, game.players[0].minions[0].health)
self.assertTrue(game.players[0].minions[0].divine_shield)
def test_AncestralSpiritDeathrattle(self):
game = generate_game_for([LootHoarder, AncestralSpirit], StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 5):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(4, len(game.players[0].hand))
loot = game.players[0].minions[0]
loot.die(None)
loot.activate_delayed()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(5, len(game.players[0].hand))
def test_Bloodlust(self):
game = generate_game_for([StonetuskBoar, StonetuskBoar, StonetuskBoar, StonetuskBoar, Bloodlust], StonetuskBoar,
MinionAttackingAgent, DoNothingAgent)
for turn in range(0, 8):
game.play_single_turn()
self.assertEqual(4, len(game.players[0].minions))
self.assertEqual(20, game.players[1].hero.health)
# Bloodlust should be played, resulting in 4 * 4 = 16 damage
game.play_single_turn()
self.assertEqual(4, game.players[1].hero.health)
# Attack power should be back to normal
self.assertEqual(1, game.players[0].minions[0].calculate_attack())
def test_EarthShock(self):
game = generate_game_for(EarthShock, ArgentSquire, OneCardPlayingAgent, OneCardPlayingAgent)
for turn in range(0, 2):
game.play_single_turn()
self.assertEqual(1, len(game.players[1].minions))
self.assertTrue(game.players[1].minions[0].divine_shield)
# Earth Shock should be played, resulting in silence which removes the divine shield and then 1 damage
game.play_single_turn()
self.assertEqual(0, len(game.players[1].minions))
def test_FarSight(self):
game = generate_game_for(FarSight, StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 5):
game.play_single_turn()
# Far Sight should have been played, our latest card should cost 3 - 3 = 0
self.assertEqual(0, game.players[0].hand[-1].mana_cost())
self.assertEqual(3, game.players[0].hand[0].mana_cost())
# Draw a card to make sure the new card doesn't get the effect
game.players[0].draw()
self.assertEqual(3, game.players[0].hand[-1].mana_cost())
# Our old card shouldn't have been affected
self.assertEqual(0, game.players[0].hand[-2].mana_cost())
def test_FeralSpirit(self):
game = generate_game_for(FeralSpirit, StonetuskBoar, CardTestingAgent, DoNothingAgent)
for turn in range(0, 5):
game.play_single_turn()
self.assertEqual(2, len(game.players[0].minions))
self.assertEqual(2, game.players[0].minions[0].calculate_attack())
self.assertEqual(3, game.players[0].minions[0].health)
self.assertTrue(game.players[0].minions[0].taunt)
self.assertEqual("Spirit Wolf", game.players[0].minions[0].card.name)
self.assertEqual(2, game.players[0].minions[0].card.mana)
self.assertEqual(2, game.players[0].minions[1].calculate_attack())
self.assertEqual(3, game.players[0].minions[1].health)
self.assertTrue(game.players[0].minions[1].taunt)
self.assertEqual("Spirit Wolf", game.players[0].minions[1].card.name)
self.assertEqual(2, game.players[0].minions[1].card.mana)
self.assertEqual(2, game.players[0].upcoming_overload)
def test_VitalityTotem(self):
game = generate_game_for(VitalityTotem, StonetuskBoar, CardTestingAgent, DoNothingAgent)
for turn in range(0, 2):
game.play_single_turn()
game.players[0].hero.health = 20
game.play_single_turn()
game.play_single_turn()
self.assertEqual(24, game.players[0].hero.health)
self.assertEqual(0, game.players[0].minions[0].calculate_attack())
self.assertEqual(3, game.players[0].minions[0].health)
game.play_single_turn()
game.play_single_turn()
# player now has two vitality totems in play
self.assertEqual(30, game.players[0].hero.health)
self.assertEqual(2, len(game.players[0].minions))
def test_ForkedLightning(self):
game = generate_game_for(ForkedLightning, StonetuskBoar, CardTestingAgent, OneCardPlayingAgent)
for turn in range(0, 4):
game.play_single_turn()
# Nothing should have happened yet, since the opponent haven't got 2 minions until now
self.assertEqual(2, len(game.players[1].minions))
# Forked Lightning should be played
game.play_single_turn()
self.assertEqual(0, len(game.players[1].minions))
self.assertEqual(2, game.players[0].upcoming_overload)
def test_FrostShock(self):
game = generate_game_for(FrostShock, StonetuskBoar, CardTestingAgent, DoNothingAgent)
# Frost Shock should be played
game.play_single_turn()
self.assertEqual(29, game.players[1].hero.health)
self.assertTrue(game.players[1].hero.frozen)
def test_Hex(self):
game = generate_game_for(ChillwindYeti, Hex, OneCardPlayingAgent, CardTestingAgent)
for turn in range(0, 7):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertFalse(game.players[0].minions[0].taunt)
self.assertEqual(4, game.players[0].minions[0].calculate_attack())
self.assertEqual(5, game.players[0].minions[0].health)
self.assertEqual("Chillwind Yeti", game.players[0].minions[0].card.name)
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertTrue(game.players[0].minions[0].taunt)
self.assertEqual(0, game.players[0].minions[0].calculate_attack())
self.assertEqual(1, game.players[0].minions[0].health)
self.assertEqual("Frog", game.players[0].minions[0].card.name)
self.assertEqual(MINION_TYPE.BEAST, game.players[0].minions[0].card.minion_type)
def test_LavaBurst(self):
game = generate_game_for(LavaBurst, StonetuskBoar, CardTestingAgent, DoNothingAgent)
for turn in range(0, 4):
game.play_single_turn()
self.assertEqual(30, game.players[1].hero.health)
game.play_single_turn()
self.assertEqual(25, game.players[1].hero.health)
self.assertEqual(2, game.players[0].upcoming_overload)
def test_LightningBolt(self):
game = generate_game_for(LightningBolt, StonetuskBoar, CardTestingAgent, DoNothingAgent)
self.assertEqual(30, game.players[1].hero.health)
game.play_single_turn()
self.assertEqual(27, game.players[1].hero.health)
self.assertEqual(1, game.players[0].upcoming_overload)
def test_LightningStorm(self):
game = generate_game_for(LightningStorm, Shieldbearer, CardTestingAgent, PlayAndAttackAgent)
for turn in range(0, 4):
game.play_single_turn()
# Lightning Storm should be played
game.play_single_turn()
self.assertEqual(3, len(game.players[1].minions))
self.assertEqual(1, game.players[1].minions[0].health)
self.assertEqual(2, game.players[1].minions[1].health)
self.assertEqual(2, game.players[1].minions[2].health)
self.assertEqual(2, game.players[0].upcoming_overload)
def test_RockbiterWeapon(self):
game = generate_game_for(RockbiterWeapon, Shieldbearer, PlayAndAttackAgent, DoNothingAgent)
self.assertEqual(30, game.players[1].hero.health)
# Rockbiter Weapon should be played and used
game.play_single_turn()
self.assertEqual(27, game.players[1].hero.health)
def test_RockbiterWeapon_and_Hex(self):
game = generate_game_for([IronfurGrizzly, RockbiterWeapon, Hex], StonetuskBoar,
CardTestingAgent, DoNothingAgent)
for turn in range(7):
game.play_single_turn()
self.assertEqual(1, len(game.current_player.minions))
self.assertEqual("Frog", game.current_player.minions[0].card.name)
def test_RockbiterWeapon_and_BaronGeddon(self):
game = generate_game_for([BaronGeddon, RecklessRocketeer, RockbiterWeapon], StonetuskBoar,
PlayAndAttackAgent, DoNothingAgent)
for turn in range(15):
game.play_single_turn()
self.assertEqual(1, len(game.current_player.minions))
self.assertEqual("Baron Geddon", game.current_player.minions[0].card.name)
self.assertEqual(11, game.other_player.hero.health)
def test_TotemicMight(self):
game = generate_game_for([TotemicMight, StonetuskBoar], Shieldbearer, PredictableAgent, DoNothingAgent)
for turn in range(0, 2):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual("Stonetusk Boar", game.players[0].minions[0].card.name)
# Hero power and Totemic Might should be played
game.play_single_turn()
self.assertEqual(2, len(game.players[0].minions))
self.assertEqual(1, game.players[0].minions[0].calculate_max_health())
self.assertEqual("Stoneclaw Totem", game.players[0].minions[1].card.name)
self.assertEqual(4, game.players[0].minions[1].calculate_max_health())
def test_Windfury(self):
game = generate_game_for(Windfury, StonetuskBoar, CardTestingAgent, OneCardPlayingAgent)
for turn in range(0, 2):
game.play_single_turn()
self.assertFalse(game.players[1].minions[0].windfury())
# Windfury should be played
game.play_single_turn()
self.assertTrue(game.players[1].minions[0].windfury())
def test_Doomhammer(self):
game = generate_game_for(Doomhammer, StonetuskBoar, PlayAndAttackAgent, DoNothingAgent)
for turn in range(0, 8):
game.play_single_turn()
self.assertEqual(30, game.players[1].hero.health)
self.assertFalse(game.players[0].hero.windfury())
# Doomhammer should be played
game.play_single_turn()
self.assertTrue(game.players[0].hero.windfury())
self.assertEqual(2, game.players[0].weapon.base_attack)
self.assertEqual(6, game.players[0].weapon.durability)
self.assertEqual(2, game.players[0].upcoming_overload)
self.assertEqual(26, game.players[1].hero.health)
def test_StormforgedAxe(self):
game = generate_game_for(StormforgedAxe, StonetuskBoar, CardTestingAgent, DoNothingAgent)
for turn in range(0, 3):
game.play_single_turn()
self.assertEqual(2, game.players[0].weapon.base_attack)
self.assertEqual(3, game.players[0].weapon.durability)
self.assertEqual(1, game.players[0].upcoming_overload)
def test_Crackle(self):
game = generate_game_for(Crackle, StonetuskBoar, CardTestingAgent, DoNothingAgent)
for turn in range(0, 3):
game.play_single_turn()
self.assertEqual(25, game.players[1].hero.health)
self.assertEqual(1, game.players[0].upcoming_overload)
def test_SiltfinSpiritwalker(self):
game = generate_game_for([MurlocTidecaller, MurlocTidehunter, SiltfinSpiritwalker, Deathwing],
[MurlocTidecaller, Hellfire, BaneOfDoom], OneCardPlayingAgent, OneCardPlayingAgent)
for turn in range(6):
game.play_single_turn()
self.assertEqual(3, len(game.other_player.minions))
self.assertEqual(1, len(game.current_player.minions))
# Play Siltfin
game.play_single_turn()
self.assertEqual(4, len(game.current_player.minions))
self.assertEqual(1, len(game.other_player.minions))
self.assertEqual(4, len(game.current_player.hand))
self.assertEqual(7, len(game.other_player.hand))
# Hellfire will kill all the murlocs but the siltfin.
game.play_single_turn()
self.assertEqual(1, len(game.other_player.minions))
self.assertEqual(7, len(game.other_player.hand))
self.assertEqual(0, len(game.current_player.minions))
self.assertEqual(7, len(game.current_player.hand))
def test_WhirlingZapOMatic(self):
game = generate_game_for(WhirlingZapomatic, StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 3):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual("Whirling Zap-o-matic", game.players[0].minions[0].card.name)
self.assertTrue(game.players[0].minions[0].windfury())
def test_DunemaulShaman(self):
game = generate_game_for(DunemaulShaman,
[StonetuskBoar, GoldshireFootman, SilverbackPatriarch, MogushanWarden],
PlayAndAttackAgent, OneCardPlayingAgent)
for turn in range(7):
game.play_single_turn()
self.assertEqual(1, len(game.current_player.minions))
self.assertEqual(3, len(game.other_player.minions))
game.play_single_turn()
# The shaman's forgetful ability triggers once. It hits the warden one time (its intended target)
# and the footman one time (after triggering forgetful)
game.play_single_turn()
self.assertEqual(2, len(game.current_player.minions))
self.assertEqual(3, len(game.other_player.minions))
self.assertEqual("Mogu'shan Warden", game.other_player.minions[0].card.name)
self.assertEqual("Silverback Patriarch", game.other_player.minions[1].card.name)
self.assertEqual("Stonetusk Boar", game.other_player.minions[2].card.name)
self.assertEqual(30, game.other_player.hero.health)
def test_Powermace(self):
game = generate_game_for([Powermace, SpiderTank, SpiderTank], Wisp, PlayAndAttackAgent, DoNothingAgent)
for turn in range(0, 6):
game.play_single_turn()
self.assertEqual(0, len(game.players[0].minions))
self.assertEqual(27, game.players[1].hero.health)
self.assertEqual(3, game.players[0].weapon.base_attack)
self.assertEqual(1, game.players[0].weapon.durability)
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(24, game.players[1].hero.health)
self.assertEqual(5, game.players[0].minions[0].calculate_attack())
self.assertEqual(6, game.players[0].minions[0].health)
def test_Neptulon(self):
game = generate_game_for([TheCoin, TheCoin, TheCoin, TheCoin, TheCoin, TheCoin, TheCoin, TheCoin, TheCoin,
Neptulon], Wisp, CardTestingAgent, DoNothingAgent)
for turn in range(0, 12):
game.play_single_turn()
self.assertEqual(0, len(game.players[0].minions))
self.assertEqual(0, len(game.players[0].hand))
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(4, len(game.players[0].hand))
for card in game.players[0].hand:
self.assertEqual(MINION_TYPE.MURLOC, card.minion_type)
def test_AncestorsCall(self):
game = generate_game_for([AncestorsCall, StonetuskBoar], [Doomguard, Soulfire],
OneCardPlayingAgent, OneCardPlayingAgent)
for turn in range(6):
game.play_single_turn()
game.play_single_turn()
self.assertEqual(1, len(game.current_player.minions))
self.assertEqual("Stonetusk Boar", game.current_player.minions[0].card.name)
self.assertEqual(1, len(game.other_player.minions))
self.assertEqual("Doomguard", game.other_player.minions[0].card.name)
self.assertEqual(5, len(game.current_player.hand))
self.assertEqual(7, len(game.other_player.hand))
def test_LavaShock(self):
game = generate_game_for([Doomhammer, LightningBolt, LavaShock], StonetuskBoar,
CardTestingAgent, DoNothingAgent)
for turn in range(11):
game.play_single_turn()
# The player should have been able to do everything AND have three mana left over
self.assertEqual(25, game.other_player.hero.health)
self.assertEqual(3, game.current_player.mana)
def test_FireguardDestroyer(self):
game = generate_game_for(FireguardDestroyer, Wisp, OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 8):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(6, game.players[0].minions[0].calculate_attack())
self.assertEqual(6, game.players[0].minions[0].health)
game.play_single_turn()
game.play_single_turn()
self.assertEqual(2, len(game.players[0].minions))
self.assertEqual(5, game.players[0].minions[0].calculate_attack())
self.assertEqual(6, game.players[0].minions[0].health)
game.play_single_turn()
game.play_single_turn()
self.assertEqual(3, len(game.players[0].minions))
self.assertEqual(5, game.players[0].minions[0].calculate_attack())
self.assertEqual(6, game.players[0].minions[0].health)
game.play_single_turn()
game.play_single_turn()
self.assertEqual(4, len(game.players[0].minions))
self.assertEqual(5, game.players[0].minions[0].calculate_attack())
self.assertEqual(6, game.players[0].minions[0].health)
game.play_single_turn()
game.play_single_turn()
self.assertEqual(5, len(game.players[0].minions))
self.assertEqual(6, game.players[0].minions[0].calculate_attack())
self.assertEqual(6, game.players[0].minions[0].health)
game.play_single_turn()
game.play_single_turn()
self.assertEqual(6, len(game.players[0].minions))
self.assertEqual(4, game.players[0].minions[0].calculate_attack())
self.assertEqual(6, game.players[0].minions[0].health)
game.play_single_turn()
game.play_single_turn()
self.assertEqual(7, len(game.players[0].minions)) # Well, I was trying to get a 7/6 but no luck
self.assertEqual(5, game.players[0].minions[0].calculate_attack())
self.assertEqual(6, game.players[0].minions[0].health)
def test_AncestralKnowledge(self):
game = generate_game_for(AncestralKnowledge, StonetuskBoar, CardTestingAgent, DoNothingAgent)
for turn in range(0, 3):
game.play_single_turn()
self.assertEqual(6, len(game.current_player.hand))
self.assertEqual(2, game.current_player.upcoming_overload)
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from mox import IsA # noqa
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django import http
from horizon.workflows import views
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
from openstack_dashboard.dashboards.project.loadbalancers import workflows
class LoadBalancerTests(test.TestCase):
class AttributeDict(dict):
def __getattr__(self, attr):
return self[attr]
def __setattr__(self, attr, value):
self[attr] = value
DASHBOARD = 'project'
INDEX_URL = reverse_lazy('horizon:%s:loadbalancers:index' % DASHBOARD)
ADDPOOL_PATH = 'horizon:%s:loadbalancers:addpool' % DASHBOARD
ADDVIP_PATH = 'horizon:%s:loadbalancers:addvip' % DASHBOARD
ADDMEMBER_PATH = 'horizon:%s:loadbalancers:addmember' % DASHBOARD
ADDMONITOR_PATH = 'horizon:%s:loadbalancers:addmonitor' % DASHBOARD
POOL_DETAIL_PATH = 'horizon:%s:loadbalancers:pooldetails' % DASHBOARD
VIP_DETAIL_PATH = 'horizon:%s:loadbalancers:vipdetails' % DASHBOARD
MEMBER_DETAIL_PATH = 'horizon:%s:loadbalancers:memberdetails' % DASHBOARD
MONITOR_DETAIL_PATH = 'horizon:%s:loadbalancers:monitordetails' % DASHBOARD
UPDATEPOOL_PATH = 'horizon:%s:loadbalancers:updatepool' % DASHBOARD
UPDATEVIP_PATH = 'horizon:%s:loadbalancers:updatevip' % DASHBOARD
UPDATEMEMBER_PATH = 'horizon:%s:loadbalancers:updatemember' % DASHBOARD
UPDATEMONITOR_PATH = 'horizon:%s:loadbalancers:updatemonitor' % DASHBOARD
ADDASSOC_PATH = 'horizon:%s:loadbalancers:addassociation' % DASHBOARD
DELETEASSOC_PATH = 'horizon:%s:loadbalancers:deleteassociation' % DASHBOARD
def set_up_expect(self):
# retrieve pools
api.lbaas.pool_list(
IsA(http.HttpRequest), tenant_id=self.tenant.id) \
.AndReturn(self.pools.list())
# retrieves members
api.lbaas.member_list(
IsA(http.HttpRequest), tenant_id=self.tenant.id) \
.AndReturn(self.members.list())
# retrieves monitors
api.lbaas.pool_health_monitor_list(
IsA(http.HttpRequest), tenant_id=self.tenant.id).MultipleTimes() \
.AndReturn(self.monitors.list())
def set_up_expect_with_exception(self):
api.lbaas.pool_list(
IsA(http.HttpRequest), tenant_id=self.tenant.id) \
.AndRaise(self.exceptions.neutron)
api.lbaas.member_list(
IsA(http.HttpRequest), tenant_id=self.tenant.id) \
.AndRaise(self.exceptions.neutron)
api.lbaas.pool_health_monitor_list(
IsA(http.HttpRequest), tenant_id=self.tenant.id) \
.AndRaise(self.exceptions.neutron)
@test.create_stubs({api.lbaas: ('pool_list', 'member_list',
'pool_health_monitor_list')})
def test_index_pools(self):
self.set_up_expect()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL)
self.assertTemplateUsed(res, '%s/loadbalancers/details_tabs.html'
% self.DASHBOARD)
self.assertTemplateUsed(res, 'horizon/common/_detail_table.html')
self.assertEqual(len(res.context['table'].data),
len(self.pools.list()))
@test.create_stubs({api.lbaas: ('pool_list', 'member_list',
'pool_health_monitor_list')})
def test_index_members(self):
self.set_up_expect()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL + '?tab=lbtabs__members')
self.assertTemplateUsed(res, '%s/loadbalancers/details_tabs.html'
% self.DASHBOARD)
self.assertTemplateUsed(res, 'horizon/common/_detail_table.html')
self.assertEqual(len(res.context['memberstable_table'].data),
len(self.members.list()))
@test.create_stubs({api.lbaas: ('pool_list', 'member_list',
'pool_health_monitor_list')})
def test_index_monitors(self):
self.set_up_expect()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL + '?tab=lbtabs__monitors')
self.assertTemplateUsed(res, '%s/loadbalancers/details_tabs.html'
% self.DASHBOARD)
self.assertTemplateUsed(res, 'horizon/common/_detail_table.html')
self.assertEqual(len(res.context['monitorstable_table'].data),
len(self.monitors.list()))
@test.create_stubs({api.lbaas: ('pool_list', 'member_list',
'pool_health_monitor_list')})
def test_index_exception_pools(self):
self.set_up_expect_with_exception()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL)
self.assertTemplateUsed(res,
'%s/loadbalancers/details_tabs.html'
% self.DASHBOARD)
self.assertTemplateUsed(res,
'horizon/common/_detail_table.html')
self.assertEqual(len(res.context['table'].data), 0)
@test.create_stubs({api.lbaas: ('pool_list', 'member_list',
'pool_health_monitor_list')})
def test_index_exception_members(self):
self.set_up_expect_with_exception()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL + '?tab=lbtabs__members')
self.assertTemplateUsed(res,
'%s/loadbalancers/details_tabs.html'
% self.DASHBOARD)
self.assertTemplateUsed(res,
'horizon/common/_detail_table.html')
self.assertEqual(len(res.context['memberstable_table'].data), 0)
@test.create_stubs({api.lbaas: ('pool_list', 'member_list',
'pool_health_monitor_list')})
def test_index_exception_monitors(self):
self.set_up_expect_with_exception()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL + '?tab=lbtabs__monitors')
self.assertTemplateUsed(res,
'%s/loadbalancers/details_tabs.html'
% self.DASHBOARD)
self.assertTemplateUsed(res,
'horizon/common/_detail_table.html')
self.assertEqual(len(res.context['monitorstable_table'].data), 0)
@test.create_stubs({api.neutron: ('network_list_for_tenant',
'provider_list',
'is_extension_supported'),
api.lbaas: ('pool_create', )})
def test_add_pool_post(self):
pool = self.pools.first()
subnet = self.subnets.first()
networks = [{'subnets': [subnet, ]}, ]
api.neutron.is_extension_supported(
IsA(http.HttpRequest), 'service-type').AndReturn(True)
api.neutron.network_list_for_tenant(
IsA(http.HttpRequest), self.tenant.id).AndReturn(networks)
api.neutron.provider_list(IsA(http.HttpRequest)) \
.AndReturn(self.providers.list())
form_data = {'name': pool.name,
'description': pool.description,
'subnet_id': pool.subnet_id,
'protocol': pool.protocol,
'lb_method': pool.lb_method,
'admin_state_up': pool.admin_state_up}
api.lbaas.pool_create(
IsA(http.HttpRequest), **form_data).AndReturn(pool)
self.mox.ReplayAll()
res = self.client.post(reverse(self.ADDPOOL_PATH), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.neutron: ('network_list_for_tenant',
'provider_list',
'is_extension_supported')})
def test_add_pool_get(self):
self._test_add_pool_get()
@test.create_stubs({api.neutron: ('network_list_for_tenant',
'provider_list',
'is_extension_supported')})
def test_add_pool_get_provider_list_exception(self):
self._test_add_pool_get(with_provider_exception=True)
@test.create_stubs({api.neutron: ('network_list_for_tenant',
'is_extension_supported')})
def test_add_pool_get_without_service_type_support(self):
self._test_add_pool_get(with_service_type=False)
def _test_add_pool_get(self, with_service_type=True,
with_provider_exception=False):
subnet = self.subnets.first()
default_provider = self.providers.first()['name']
networks = [{'subnets': [subnet, ]}, ]
api.neutron.is_extension_supported(
IsA(http.HttpRequest), 'service-type').AndReturn(with_service_type)
api.neutron.network_list_for_tenant(
IsA(http.HttpRequest), self.tenant.id).AndReturn(networks)
if with_service_type:
prov_list = api.neutron.provider_list(IsA(http.HttpRequest))
if with_provider_exception:
prov_list.AndRaise(self.exceptions.neutron)
else:
prov_list.AndReturn(self.providers.list())
self.mox.ReplayAll()
res = self.client.get(reverse(self.ADDPOOL_PATH))
workflow = res.context['workflow']
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertEqual(workflow.name, workflows.AddPool.name)
expected_objs = ['<AddPoolStep: addpoolaction>', ]
self.assertQuerysetEqual(workflow.steps, expected_objs)
if not with_service_type:
self.assertNotContains(res, default_provider)
self.assertContains(res, ('Provider for Load Balancer '
'is not supported'))
elif with_provider_exception:
self.assertNotContains(res, default_provider)
self.assertContains(res, 'No provider is available')
else:
self.assertContains(res, default_provider)
def test_add_vip_post(self):
self._test_add_vip_post()
def test_add_vip_post_no_connection_limit(self):
self._test_add_vip_post(with_conn_limit=False)
def test_add_vip_post_with_diff_subnet(self):
self._test_add_vip_post(with_diff_subnet=True)
@test.create_stubs({api.lbaas: ('pool_get', 'vip_create'),
api.neutron: (
'network_list_for_tenant', 'subnet_get', )})
def _test_add_vip_post(self, with_diff_subnet=False, with_conn_limit=True):
vip = self.vips.first()
subnet = self.subnets.first()
pool = self.pools.first()
networks = [{'subnets': [subnet, ]}, ]
api.lbaas.pool_get(
IsA(http.HttpRequest), pool.id).MultipleTimes().AndReturn(pool)
api.neutron.subnet_get(
IsA(http.HttpRequest), subnet.id).AndReturn(subnet)
api.neutron.network_list_for_tenant(
IsA(http.HttpRequest), self.tenant.id).AndReturn(networks)
params = {'name': vip.name,
'description': vip.description,
'pool_id': vip.pool_id,
'address': vip.address,
'floatip_address': vip.floatip_address,
'other_address': vip.other_address,
'subnet_id': pool.subnet_id,
'protocol_port': vip.protocol_port,
'protocol': vip.protocol,
'session_persistence': vip.session_persistence['type'],
'cookie_name': vip.session_persistence['cookie_name'],
'admin_state_up': vip.admin_state_up,
}
if with_conn_limit:
params['connection_limit'] = vip.connection_limit
if with_diff_subnet:
params['subnet_id'] = vip.subnet_id
api.lbaas.vip_create(
IsA(http.HttpRequest), **params).AndReturn(vip)
self.mox.ReplayAll()
form_data = {
'name': vip.name,
'description': vip.description,
'pool_id': vip.pool_id,
'address': vip.address,
'floatip_address': vip.floatip_address,
'other_address': vip.other_address,
'subnet_id': pool.subnet_id,
'protocol_port': vip.protocol_port,
'protocol': vip.protocol,
'session_persistence': vip.session_persistence['type'].lower(),
'cookie_name': vip.session_persistence['cookie_name'],
'admin_state_up': vip.admin_state_up}
if with_conn_limit:
form_data['connection_limit'] = vip.connection_limit
if with_diff_subnet:
params['subnet_id'] = vip.subnet_id
res = self.client.post(
reverse(self.ADDVIP_PATH, args=(pool.id,)), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.lbaas: ('pool_get', ),
api.neutron: (
'network_list_for_tenant', 'subnet_get', )})
def test_add_vip_post_with_error(self):
vip = self.vips.first()
subnet = self.subnets.first()
pool = self.pools.first()
networks = [{'subnets': [subnet, ]}, ]
api.lbaas.pool_get(IsA(http.HttpRequest), pool.id).AndReturn(pool)
api.neutron.subnet_get(
IsA(http.HttpRequest), subnet.id).AndReturn(subnet)
api.neutron.network_list_for_tenant(
IsA(http.HttpRequest), self.tenant.id).AndReturn(networks)
self.mox.ReplayAll()
form_data = {
'name': vip.name,
'description': vip.description,
'pool_id': vip.pool_id,
'address': vip.address,
'subnet_id': pool.subnet_id,
'protocol_port': 65536,
'protocol': vip.protocol,
'session_persistence': vip.session_persistence['type'].lower(),
'cookie_name': vip.session_persistence['cookie_name'],
'connection_limit': -2,
'admin_state_up': vip.admin_state_up}
res = self.client.post(
reverse(self.ADDVIP_PATH, args=(pool.id,)), form_data)
self.assertFormErrors(res, 2)
def test_add_vip_get(self):
self._test_add_vip_get()
def test_add_vip_get_with_diff_subnet(self):
self._test_add_vip_get(with_diff_subnet=True)
@test.create_stubs({api.lbaas: ('pool_get', ),
api.neutron: (
'network_list_for_tenant', 'subnet_get', )})
def _test_add_vip_get(self, with_diff_subnet=False):
subnet = self.subnets.first()
pool = self.pools.first()
networks = [{'subnets': [subnet, ]}, ]
api.lbaas.pool_get(IsA(http.HttpRequest), pool.id).AndReturn(pool)
api.neutron.subnet_get(
IsA(http.HttpRequest), subnet.id).AndReturn(subnet)
api.neutron.network_list_for_tenant(
IsA(http.HttpRequest), self.tenant.id).AndReturn(networks)
self.mox.ReplayAll()
res = self.client.get(reverse(self.ADDVIP_PATH, args=(pool.id,)))
workflow = res.context['workflow']
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertEqual(workflow.name, workflows.AddVip.name)
expected_objs = ['<AddVipStep: addvipaction>', ]
self.assertQuerysetEqual(workflow.steps, expected_objs)
if with_diff_subnet:
self.assertNotEqual(networks[0], pool.subnet_id)
@test.create_stubs({api.lbaas: ('pool_health_monitor_create', )})
def test_add_monitor_post(self):
monitor = self.monitors.first()
form_data = {'type': monitor.type,
'delay': monitor.delay,
'timeout': monitor.timeout,
'max_retries': monitor.max_retries,
'http_method': monitor.http_method,
'url_path': monitor.url_path,
'expected_codes': monitor.expected_codes,
'admin_state_up': monitor.admin_state_up}
api.lbaas.pool_health_monitor_create(
IsA(http.HttpRequest), **form_data).AndReturn(monitor)
self.mox.ReplayAll()
res = self.client.post(reverse(self.ADDMONITOR_PATH), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
def test_add_monitor_post_with_error(self):
monitor = self.monitors.first()
form_data = {'type': monitor.type,
'delay': 0,
'timeout': 0,
'max_retries': 11,
'http_method': monitor.http_method,
'url_path': monitor.url_path,
'expected_codes': monitor.expected_codes,
'admin_state_up': monitor.admin_state_up}
res = self.client.post(reverse(self.ADDMONITOR_PATH), form_data)
self.assertFormErrors(res, 3)
def test_add_monitor_post_with_httpmethod_error(self):
monitor = self.monitors.first()
form_data = {'type': 'http',
'delay': monitor.delay,
'timeout': monitor.timeout,
'max_retries': monitor.max_retries,
'http_method': '',
'url_path': '',
'expected_codes': '',
'admin_state_up': monitor.admin_state_up}
res = self.client.post(reverse(self.ADDMONITOR_PATH), form_data)
self.assertFormErrors(res, 3)
def test_add_monitor_get(self):
res = self.client.get(reverse(self.ADDMONITOR_PATH))
workflow = res.context['workflow']
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertEqual(workflow.name, workflows.AddMonitor.name)
expected_objs = ['<AddMonitorStep: addmonitoraction>', ]
self.assertQuerysetEqual(workflow.steps, expected_objs)
def test_add_member_post(self):
self._test_add_member_post()
def test_add_member_post_without_weight(self):
self._test_add_member_post(with_weight=False)
def test_add_member_post_without_server_list(self):
self._test_add_member_post(with_server_list=False)
@test.create_stubs({api.lbaas: ('pool_list', 'member_create'),
api.neutron: ('port_list',),
api.nova: ('server_list',)})
def _test_add_member_post(self, with_weight=True, with_server_list=True):
member = self.members.first()
server1 = self.AttributeDict({'id':
'12381d38-c3eb-4fee-9763-12de3338042e',
'name': 'vm1'})
server2 = self.AttributeDict({'id':
'12381d38-c3eb-4fee-9763-12de3338043e',
'name': 'vm2'})
api.lbaas.pool_list(IsA(http.HttpRequest), tenant_id=self.tenant.id) \
.AndReturn(self.pools.list())
api.nova.server_list(IsA(http.HttpRequest)).AndReturn(
[[server1, server2], False])
if with_server_list:
port1 = self.AttributeDict(
{'fixed_ips': [{'ip_address': member.address}]})
api.neutron.port_list(IsA(http.HttpRequest),
device_id=server1.id).AndReturn([port1, ])
form_data = {'pool_id': member.pool_id,
'address': member.address,
'protocol_port': member.protocol_port,
'members': [server1.id],
'admin_state_up': member.admin_state_up}
if with_weight:
form_data['weight'] = member.weight
if with_server_list:
form_data['member_type'] = 'server_list'
else:
form_data['member_type'] = 'member_address'
api.lbaas.member_create(IsA(http.HttpRequest),
**form_data).AndReturn(member)
self.mox.ReplayAll()
res = self.client.post(reverse(self.ADDMEMBER_PATH), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.lbaas: ('pool_list',),
api.nova: ('server_list',)})
def test_add_member_post_with_error(self):
member = self.members.first()
server1 = self.AttributeDict({'id':
'12381d38-c3eb-4fee-9763-12de3338042e',
'name': 'vm1'})
server2 = self.AttributeDict({'id':
'12381d38-c3eb-4fee-9763-12de3338043e',
'name': 'vm2'})
api.lbaas.pool_list(IsA(http.HttpRequest), tenant_id=self.tenant.id) \
.AndReturn(self.pools.list())
api.nova.server_list(IsA(http.HttpRequest)).AndReturn([[server1,
server2],
False])
self.mox.ReplayAll()
# try to create member with invalid protocol port and weight
form_data = {'pool_id': member.pool_id,
'address': member.address,
'protocol_port': 65536,
'weight': -1,
'members': [server1.id],
'admin_state_up': member.admin_state_up}
res = self.client.post(reverse(self.ADDMEMBER_PATH), form_data)
self.assertFormErrors(res, 2)
@test.create_stubs({api.lbaas: ('pool_list',),
api.nova: ('server_list',)})
def test_add_member_get(self):
server1 = self.AttributeDict({'id':
'12381d38-c3eb-4fee-9763-12de3338042e',
'name': 'vm1'})
server2 = self.AttributeDict({'id':
'12381d38-c3eb-4fee-9763-12de3338043e',
'name': 'vm2'})
api.lbaas.pool_list(IsA(http.HttpRequest), tenant_id=self.tenant.id) \
.AndReturn(self.pools.list())
api.nova.server_list(
IsA(http.HttpRequest)).AndReturn([[server1, server2], False])
self.mox.ReplayAll()
res = self.client.get(reverse(self.ADDMEMBER_PATH))
workflow = res.context['workflow']
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertEqual(workflow.name, workflows.AddMember.name)
expected_objs = ['<AddMemberStep: addmemberaction>', ]
self.assertQuerysetEqual(workflow.steps, expected_objs)
@test.create_stubs({api.lbaas: ('pool_get', 'pool_update')})
def test_update_pool_post(self):
pool = self.pools.first()
api.lbaas.pool_get(IsA(http.HttpRequest), pool.id).AndReturn(pool)
data = {'name': pool.name,
'description': pool.description,
'lb_method': pool.lb_method,
'admin_state_up': pool.admin_state_up}
api.lbaas.pool_update(IsA(http.HttpRequest), pool.id, pool=data)\
.AndReturn(pool)
self.mox.ReplayAll()
form_data = data.copy()
form_data['pool_id'] = pool.id
res = self.client.post(
reverse(self.UPDATEPOOL_PATH, args=(pool.id,)), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.lbaas: ('pool_get',)})
def test_update_pool_get(self):
pool = self.pools.first()
api.lbaas.pool_get(IsA(http.HttpRequest), pool.id).AndReturn(pool)
self.mox.ReplayAll()
res = self.client.get(reverse(self.UPDATEPOOL_PATH, args=(pool.id,)))
self.assertTemplateUsed(res, 'project/loadbalancers/updatepool.html')
@test.create_stubs({api.lbaas: ('pool_list', 'vip_get',
'vip_update')})
def test_update_vip_post(self):
vip = self.vips.first()
api.lbaas.pool_list(IsA(http.HttpRequest), tenant_id=self.tenant.id) \
.AndReturn(self.pools.list())
api.lbaas.vip_get(IsA(http.HttpRequest), vip.id).AndReturn(vip)
data = {'name': vip.name,
'description': vip.description,
'pool_id': vip.pool_id,
'session_persistence': {},
'connection_limit': vip.connection_limit,
'admin_state_up': vip.admin_state_up}
api.lbaas.vip_update(IsA(http.HttpRequest), vip.id, vip=data)\
.AndReturn(vip)
self.mox.ReplayAll()
form_data = data.copy()
form_data['vip_id'] = vip.id
res = self.client.post(
reverse(self.UPDATEVIP_PATH, args=(vip.id,)), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.lbaas: ('vip_get', 'pool_list')})
def test_update_vip_get(self):
vip = self.vips.first()
api.lbaas.pool_list(IsA(http.HttpRequest), tenant_id=self.tenant.id) \
.AndReturn(self.pools.list())
api.lbaas.vip_get(IsA(http.HttpRequest), vip.id).AndReturn(vip)
self.mox.ReplayAll()
res = self.client.get(reverse(self.UPDATEVIP_PATH, args=(vip.id,)))
self.assertTemplateUsed(res, 'project/loadbalancers/updatevip.html')
@test.create_stubs({api.lbaas: ('pool_list', 'member_get',
'member_update')})
def test_update_member_post(self):
member = self.members.first()
api.lbaas.pool_list(IsA(http.HttpRequest), tenant_id=self.tenant.id) \
.AndReturn(self.pools.list())
api.lbaas.member_get(IsA(http.HttpRequest), member.id)\
.AndReturn(member)
data = {'pool_id': member.pool_id,
'weight': member.weight,
'admin_state_up': member.admin_state_up}
api.lbaas.member_update(IsA(http.HttpRequest), member.id, member=data)\
.AndReturn(member)
self.mox.ReplayAll()
form_data = data.copy()
form_data['member_id'] = member.id
res = self.client.post(
reverse(self.UPDATEMEMBER_PATH, args=(member.id,)), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.lbaas: ('member_get', 'pool_list')})
def test_update_member_get(self):
member = self.members.first()
api.lbaas.pool_list(IsA(http.HttpRequest), tenant_id=self.tenant.id) \
.AndReturn(self.pools.list())
api.lbaas.member_get(IsA(http.HttpRequest), member.id)\
.AndReturn(member)
self.mox.ReplayAll()
res = self.client.get(
reverse(self.UPDATEMEMBER_PATH, args=(member.id,)))
self.assertTemplateUsed(res, 'project/loadbalancers/updatemember.html')
@test.create_stubs({api.lbaas: ('pool_health_monitor_get',
'pool_health_monitor_update')})
def test_update_monitor_post(self):
monitor = self.monitors.first()
api.lbaas.pool_health_monitor_get(IsA(http.HttpRequest), monitor.id)\
.AndReturn(monitor)
data = {'delay': monitor.delay,
'timeout': monitor.timeout,
'max_retries': monitor.max_retries,
'admin_state_up': monitor.admin_state_up}
api.lbaas.pool_health_monitor_update(
IsA(http.HttpRequest),
monitor.id, health_monitor=data).AndReturn(monitor)
self.mox.ReplayAll()
form_data = data.copy()
form_data['monitor_id'] = monitor.id
res = self.client.post(
reverse(self.UPDATEMONITOR_PATH, args=(monitor.id,)), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.lbaas: ('pool_health_monitor_get',)})
def test_update_monitor_get(self):
monitor = self.monitors.first()
api.lbaas.pool_health_monitor_get(IsA(http.HttpRequest), monitor.id)\
.AndReturn(monitor)
self.mox.ReplayAll()
res = self.client.get(
reverse(self.UPDATEMONITOR_PATH, args=(monitor.id,)))
self.assertTemplateUsed(
res, 'project/loadbalancers/updatemonitor.html')
@test.create_stubs({api.lbaas: ('pool_get', 'pool_health_monitor_list',
'pool_monitor_association_create')})
def test_add_pool_monitor_association_post(self):
pool = self.pools.first()
monitors = self.monitors.list()
monitor = self.monitors.list()[1]
api.lbaas.pool_get(IsA(http.HttpRequest), pool.id).AndReturn(pool)
api.lbaas.pool_health_monitor_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id).AndReturn(monitors)
form_data = {'monitor_id': monitor.id,
'pool_id': pool.id,
'pool_monitors': pool.health_monitors,
'pool_name': pool.name}
api.lbaas.pool_monitor_association_create(
IsA(http.HttpRequest), **form_data).AndReturn(None)
self.mox.ReplayAll()
res = self.client.post(
reverse(self.ADDASSOC_PATH, args=(pool.id,)), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.lbaas: ('pool_get', 'pool_health_monitor_list')})
def test_add_pool_monitor_association_get(self):
pool = self.pools.first()
monitors = self.monitors.list()
api.lbaas.pool_get(IsA(http.HttpRequest), pool.id).AndReturn(pool)
api.lbaas.pool_health_monitor_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id).AndReturn(monitors)
self.mox.ReplayAll()
res = self.client.get(reverse(self.ADDASSOC_PATH, args=(pool.id,)))
workflow = res.context['workflow']
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertEqual(workflow.name, workflows.AddPMAssociation.name)
expected_objs = ['<AddPMAssociationStep: addpmassociationaction>', ]
self.assertQuerysetEqual(workflow.steps, expected_objs)
@test.create_stubs({api.lbaas: ('pool_get',
'pool_health_monitor_list',
'pool_monitor_association_delete')})
def test_delete_pool_monitor_association_post(self):
pool = self.pools.first()
monitors = self.monitors.list()
monitor = monitors[0]
api.lbaas.pool_get(IsA(http.HttpRequest), pool.id).AndReturn(pool)
api.lbaas.pool_health_monitor_list(
IsA(http.HttpRequest)).AndReturn(monitors)
form_data = {'monitor_id': monitor.id,
'pool_id': pool.id,
'pool_monitors': pool.health_monitors,
'pool_name': pool.name}
api.lbaas.pool_monitor_association_delete(
IsA(http.HttpRequest), **form_data).AndReturn(None)
self.mox.ReplayAll()
res = self.client.post(
reverse(self.DELETEASSOC_PATH, args=(pool.id,)), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.lbaas: ('pool_get',
'pool_health_monitor_list')})
def test_delete_pool_monitor_association_get(self):
pool = self.pools.first()
monitors = self.monitors.list()
api.lbaas.pool_get(IsA(http.HttpRequest), pool.id).AndReturn(pool)
api.lbaas.pool_health_monitor_list(
IsA(http.HttpRequest)).AndReturn(monitors)
self.mox.ReplayAll()
res = self.client.get(
reverse(self.DELETEASSOC_PATH, args=(pool.id,)))
workflow = res.context['workflow']
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertEqual(workflow.name, workflows.DeletePMAssociation.name)
expected_objs = [
'<DeletePMAssociationStep: deletepmassociationaction>', ]
self.assertQuerysetEqual(workflow.steps, expected_objs)
@test.create_stubs({api.lbaas: ('pool_list', 'member_list',
'pool_health_monitor_list',
'pool_delete')})
def test_delete_pool(self):
self.set_up_expect()
pool = self.pools.first()
api.lbaas.pool_delete(IsA(http.HttpRequest), pool.id)
self.mox.ReplayAll()
form_data = {"action": "poolstable__deletepool__%s" % pool.id}
res = self.client.post(self.INDEX_URL, form_data)
self.assertNoFormErrors(res)
@test.create_stubs({api.lbaas: ('pool_list', 'member_list',
'pool_health_monitor_list',
'pool_get', 'vip_delete')})
def test_delete_vip(self):
self.set_up_expect()
pool = self.pools.first()
vip = self.vips.first()
api.lbaas.pool_get(IsA(http.HttpRequest), pool.id).AndReturn(pool)
api.lbaas.vip_delete(IsA(http.HttpRequest), vip.id)
self.mox.ReplayAll()
form_data = {"action": "poolstable__deletevip__%s" % pool.id}
res = self.client.post(self.INDEX_URL, form_data)
self.assertNoFormErrors(res)
@test.create_stubs({api.lbaas: ('pool_list', 'member_list',
'pool_health_monitor_list',
'member_delete')})
def test_delete_member(self):
self.set_up_expect()
member = self.members.first()
api.lbaas.member_delete(IsA(http.HttpRequest), member.id)
self.mox.ReplayAll()
form_data = {"action": "memberstable__deletemember__%s" % member.id}
res = self.client.post(self.INDEX_URL, form_data)
self.assertNoFormErrors(res)
@test.create_stubs({api.lbaas: ('pool_list', 'member_list',
'pool_health_monitor_list',
'pool_health_monitor_delete')})
def test_delete_monitor(self):
self.set_up_expect()
monitor = self.monitors.first()
api.lbaas.pool_health_monitor_delete(IsA(http.HttpRequest), monitor.id)
self.mox.ReplayAll()
form_data = {"action": "monitorstable__deletemonitor__%s" % monitor.id}
res = self.client.post(self.INDEX_URL, form_data)
self.assertNoFormErrors(res)
| |
#!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
Pelix remote services: JSON-RPC implementation
Based on a modified version of the 3rd-party package jsonrpclib-pelix.
:author: Thomas Calmant
:copyright: Copyright 2016, Thomas Calmant
:license: Apache License 2.0
:version: 0.6.4
..
Copyright 2016 Thomas Calmant
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Standard library
import logging
# JSON-RPC module
import jsonrpclib.jsonrpc
from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCDispatcher
# iPOPO decorators
from pelix.ipopo.decorators import ComponentFactory, Requires, Validate, \
Invalidate, Property, Provides
# Pelix constants
from pelix.utilities import to_str
import pelix.http
import pelix.remote
import pelix.remote.transport.commons as commons
# ------------------------------------------------------------------------------
# Module version
__version_info__ = (0, 6, 4)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
JSONRPC_CONFIGURATION = 'jsonrpc'
""" Remote Service configuration constant """
PROP_JSONRPC_URL = '{0}.url'.format(JSONRPC_CONFIGURATION)
""" JSON-RPC servlet URL """
_logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
class _JsonRpcServlet(SimpleJSONRPCDispatcher):
"""
A JSON-RPC servlet that can be registered in the Pelix HTTP service
Calls the dispatch method given in the constructor
"""
def __init__(self, dispatch_method, encoding=None):
"""
Sets up the servlet
"""
SimpleJSONRPCDispatcher.__init__(self, encoding=encoding)
# Register the system.* functions
self.register_introspection_functions()
# Make a link to the dispatch method
self._dispatch_method = dispatch_method
def _simple_dispatch(self, name, params):
"""
Dispatch method
"""
try:
# Internal method
func = self.funcs[name]
except KeyError:
# Other method
pass
else:
# Internal method found
if isinstance(params, (list, tuple)):
return func(*params)
else:
return func(**params)
# Call the other method outside the except block, to avoid messy logs
# in case of error
return self._dispatch_method(name, params)
def do_POST(self, request, response):
"""
Handles a HTTP POST request
:param request: The HTTP request bean
:param request: The HTTP response handler
"""
try:
# Get the request content
data = to_str(request.read_data())
# Dispatch
result = self._marshaled_dispatch(data, self._simple_dispatch)
# Send the result
response.send_content(200, result, 'application/json-rpc')
except Exception as ex:
response.send_content(500, "Internal error:\n{0}\n".format(ex),
'text/plain')
# ------------------------------------------------------------------------------
@ComponentFactory(pelix.remote.FACTORY_TRANSPORT_JSONRPC_EXPORTER)
@Provides(pelix.remote.SERVICE_EXPORT_PROVIDER)
@Requires('_http', pelix.http.HTTP_SERVICE)
@Property('_path', pelix.http.HTTP_SERVLET_PATH, '/JSON-RPC')
@Property('_kinds', pelix.remote.PROP_REMOTE_CONFIGS_SUPPORTED,
(JSONRPC_CONFIGURATION,))
class JsonRpcServiceExporter(commons.AbstractRpcServiceExporter):
"""
JSON-RPC Remote Services exporter
"""
def __init__(self):
"""
Sets up the exporter
"""
# Call parent
super(JsonRpcServiceExporter, self).__init__()
# Handled configurations
self._kinds = None
# HTTP Service
self._http = None
self._path = None
# JSON-RPC servlet
self._servlet = None
def get_access(self):
"""
Retrieves the URL to access this component
"""
port = self._http.get_access()[1]
return "http://{{server}}:{0}{1}".format(port, self._path)
def make_endpoint_properties(self, svc_ref, name, fw_uid):
"""
Prepare properties for the ExportEndpoint to be created
:param svc_ref: Service reference
:param name: Endpoint name
:param fw_uid: Framework UID
:return: A dictionary of extra endpoint properties
"""
return {PROP_JSONRPC_URL: self.get_access()}
@Validate
def validate(self, context):
"""
Component validated
"""
# Call parent
super(JsonRpcServiceExporter, self).validate(context)
# Create/register the servlet
self._servlet = _JsonRpcServlet(self.dispatch)
self._http.register_servlet(self._path, self._servlet)
@Invalidate
def invalidate(self, context):
"""
Component invalidated
"""
# Unregister the servlet
self._http.unregister(None, self._servlet)
# Call parent
super(JsonRpcServiceExporter, self).invalidate(context)
# Clean up members
self._servlet = None
# ------------------------------------------------------------------------------
class _ServiceCallProxy(object):
"""
Service call proxy
"""
def __init__(self, name, url):
"""
Sets up the call proxy
:param name: End point name
:param url: End point URL
"""
self.__name = name
self.__url = url
def __getattr__(self, name):
"""
Prefixes the requested attribute name by the endpoint name
"""
# Make a proxy for this call
# This is an ugly trick to handle multi-threaded calls, as the
# underlying proxy re-uses the same connection when possible: sometimes
# it means sending a request before retrieving a result
proxy = jsonrpclib.jsonrpc.ServerProxy(self.__url)
return getattr(proxy, "{0}.{1}".format(self.__name, name))
@ComponentFactory(pelix.remote.FACTORY_TRANSPORT_JSONRPC_IMPORTER)
@Provides(pelix.remote.SERVICE_IMPORT_ENDPOINT_LISTENER)
@Property('_kinds', pelix.remote.PROP_REMOTE_CONFIGS_SUPPORTED,
(JSONRPC_CONFIGURATION,))
class JsonRpcServiceImporter(commons.AbstractRpcServiceImporter):
"""
JSON-RPC Remote Services importer
"""
def __init__(self):
"""
Sets up the exporter
"""
# Call parent
super(JsonRpcServiceImporter, self).__init__()
# Component properties
self._kinds = None
def make_service_proxy(self, endpoint):
"""
Creates the proxy for the given ImportEndpoint
:param endpoint: An ImportEndpoint bean
:return: A service proxy
"""
# Get the access URL
access_url = endpoint.properties.get(PROP_JSONRPC_URL)
if not access_url:
# No URL information
_logger.warning("No access URL given: %s", endpoint)
return
if endpoint.server is not None:
# Server information given
access_url = access_url.format(server=endpoint.server)
else:
# Use the local IP as the source server, just in case
local_server = "localhost"
access_url = access_url.format(server=local_server)
# Return the proxy
return _ServiceCallProxy(endpoint.name, access_url)
def clear_service_proxy(self, endpoint):
"""
Destroys the proxy made for the given ImportEndpoint
:param endpoint: An ImportEndpoint bean
"""
# Nothing to do
return
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.core import urlresolvers
from django.template.defaultfilters import title # noqa
from django.utils.translation import ugettext_lazy as _ # noqa
from horizon import exceptions
from horizon import tables
from horizon.templatetags import sizeformat
from horizon.utils import filters
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.database_backups \
import tables as backup_tables
LOG = logging.getLogger(__name__)
ACTIVE_STATES = ("ACTIVE",)
class TerminateInstance(tables.BatchAction):
name = "terminate"
action_present = _("Terminate")
action_past = _("Scheduled termination of")
data_type_singular = _("Instance")
data_type_plural = _("Instances")
classes = ('btn-danger', 'btn-terminate')
def action(self, request, obj_id):
api.trove.instance_delete(request, obj_id)
class RestartInstance(tables.BatchAction):
name = "restart"
action_present = _("Restart")
action_past = _("Restarted")
data_type_singular = _("Database")
data_type_plural = _("Databases")
classes = ('btn-danger', 'btn-reboot')
def allowed(self, request, instance=None):
return ((instance.status in ACTIVE_STATES
or instance.status == 'SHUTOFF'))
def action(self, request, obj_id):
api.trove.instance_restart(request, obj_id)
class DeleteUser(tables.DeleteAction):
name = "delete"
action_present = _("Delete")
action_past = _("Deleted")
data_type_singular = _("User")
data_type_plural = _("Users")
def delete(self, request, obj_id):
datum = self.table.get_object_by_id(obj_id)
try:
api.trove.users_delete(request, datum.instance.id, datum.name)
except Exception:
msg = _('Error deleting database user.')
exceptions.handle(request, msg)
class DeleteDatabase(tables.DeleteAction):
name = "delete"
action_present = _("Delete")
action_past = _("Deleted")
data_type_singular = _("Database")
data_type_plural = _("Databases")
def delete(self, request, obj_id):
datum = self.table.get_object_by_id(obj_id)
try:
api.trove.database_delete(request, datum.instance.id, datum.name)
except Exception:
msg = _('Error deleting database on instance.')
exceptions.handle(request, msg)
class LaunchLink(tables.LinkAction):
name = "launch"
verbose_name = _("Launch Instance")
url = "horizon:project:databases:launch"
classes = ("btn-launch", "ajax-modal")
class CreateBackup(tables.LinkAction):
name = "backup"
verbose_name = _("Create Backup")
url = "horizon:project:database_backups:create"
classes = ("ajax-modal", "btn-camera")
def allowed(self, request, instance=None):
return request.user.has_perm('openstack.services.object-store')
def get_link_url(self, datam):
url = urlresolvers.reverse(self.url)
return url + "?instance=%s" % datam.id
class UpdateRow(tables.Row):
ajax = True
def get_data(self, request, instance_id):
instance = api.trove.instance_get(request, instance_id)
try:
flavor_id = instance.flavor['id']
instance.full_flavor = api.trove.flavor_get(request, flavor_id)
except Exception:
pass
return instance
def get_ips(instance):
if hasattr(instance, "ip"):
if len(instance.ip):
return instance.ip[0]
return _("Not Assigned")
def get_size(instance):
if hasattr(instance, "full_flavor"):
size_string = _("%(name)s | %(RAM)s RAM")
vals = {'name': instance.full_flavor.name,
'RAM': sizeformat.mbformat(instance.full_flavor.ram)}
return size_string % vals
return _("Not available")
def get_databases(user):
if hasattr(user, "access"):
databases = [db.name for db in user.access]
databases.sort()
return ', '.join(databases)
return _("-")
class InstancesTable(tables.DataTable):
STATUS_CHOICES = (
("active", True),
("shutoff", True),
("suspended", True),
("paused", True),
("error", False),
)
name = tables.Column("name",
link=("horizon:project:databases:detail"),
verbose_name=_("Database Name"))
ip = tables.Column(get_ips, verbose_name=_("IP Address"))
size = tables.Column(get_size,
verbose_name=_("Size"),
attrs={'data-type': 'size'})
status = tables.Column("status",
filters=(title, filters.replace_underscores),
verbose_name=_("Status"),
status=True,
status_choices=STATUS_CHOICES)
class Meta:
name = "databases"
verbose_name = _("Databases")
status_columns = ["status"]
row_class = UpdateRow
table_actions = (LaunchLink, TerminateInstance)
row_actions = (CreateBackup,
RestartInstance, TerminateInstance)
class UsersTable(tables.DataTable):
name = tables.Column("name", verbose_name=_("User Name"))
host = tables.Column("host", verbose_name=_("Allowed Hosts"))
databases = tables.Column(get_databases, verbose_name=_("Databases"))
class Meta:
name = "users"
verbose_name = _("Database Instance Users")
table_actions = [DeleteUser]
row_actions = [DeleteUser]
def get_object_id(self, datum):
return datum.name
class DatabaseTable(tables.DataTable):
name = tables.Column("name", verbose_name=_("Database Name"))
class Meta:
name = "databases"
verbose_name = _("Databases")
table_actions = [DeleteDatabase]
row_actions = [DeleteDatabase]
def get_object_id(self, datum):
return datum.name
class InstanceBackupsTable(tables.DataTable):
name = tables.Column("name",
link=("horizon:project:database_backups:detail"),
verbose_name=_("Name"))
created = tables.Column("created", verbose_name=_("Created At"),
filters=[filters.parse_isotime])
location = tables.Column(lambda obj: _("Download"),
link=lambda obj: obj.locationRef,
verbose_name=_("Backup File"))
status = tables.Column("status",
filters=(title, filters.replace_underscores),
verbose_name=_("Status"),
status=True,
status_choices=backup_tables.STATUS_CHOICES)
class Meta:
name = "backups"
verbose_name = _("Backups")
status_columns = ["status"]
row_class = UpdateRow
table_actions = (backup_tables.LaunchLink, backup_tables.DeleteBackup)
row_actions = (backup_tables.RestoreLink, backup_tables.DeleteBackup)
| |
"""
SoftLayer.vs
~~~~~~~~~~~~
VS Manager/helpers
:license: MIT, see LICENSE for more details.
"""
import datetime
import logging
import socket
import time
import warnings
from SoftLayer.decoration import retry
from SoftLayer import exceptions
from SoftLayer.managers import ordering
from SoftLayer import utils
LOGGER = logging.getLogger(__name__)
# pylint: disable=no-self-use,too-many-lines
class VSManager(utils.IdentifierMixin, object):
"""Manages SoftLayer Virtual Servers.
See product information here: http://www.softlayer.com/virtual-servers
Example::
# Initialize the VSManager.
# env variables. These can also be specified in ~/.softlayer,
# or passed directly to SoftLayer.Client()
# SL_USERNAME = YOUR_USERNAME
# SL_API_KEY = YOUR_API_KEY
import SoftLayer
client = SoftLayer.Client()
mgr = SoftLayer.VSManager(client)
:param SoftLayer.API.BaseClient client: the client instance
:param SoftLayer.managers.OrderingManager ordering_manager: an optional
manager to handle ordering.
If none is provided, one will be
auto initialized.
"""
def __init__(self, client, ordering_manager=None):
self.client = client
self.account = client['Account']
self.guest = client['Virtual_Guest']
self.package_svc = client['Product_Package']
self.resolvers = [self._get_ids_from_ip, self._get_ids_from_hostname]
if ordering_manager is None:
self.ordering_manager = ordering.OrderingManager(client)
else:
self.ordering_manager = ordering_manager
@retry(logger=LOGGER)
def list_instances(self, hourly=True, monthly=True, tags=None, cpus=None,
memory=None, hostname=None, domain=None,
local_disk=None, datacenter=None, nic_speed=None,
public_ip=None, private_ip=None, transient=None, **kwargs):
"""Retrieve a list of all virtual servers on the account.
Example::
# Print out a list of hourly instances in the DAL05 data center.
for vsi in mgr.list_instances(hourly=True, datacenter='dal05'):
print vsi['fullyQualifiedDomainName'], vsi['primaryIpAddress']
# Using a custom object-mask. Will get ONLY what is specified
object_mask = "mask[hostname,monitoringRobot[robotStatus]]"
for vsi in mgr.list_instances(mask=object_mask,hourly=True):
print vsi
:param boolean hourly: include hourly instances
:param boolean monthly: include monthly instances
:param list tags: filter based on list of tags
:param integer cpus: filter based on number of CPUS
:param integer memory: filter based on amount of memory
:param string hostname: filter based on hostname
:param string domain: filter based on domain
:param string local_disk: filter based on local_disk
:param string datacenter: filter based on datacenter
:param integer nic_speed: filter based on network speed (in MBPS)
:param string public_ip: filter based on public ip address
:param string private_ip: filter based on private ip address
:param boolean transient: filter on transient or non-transient instances
:param dict \\*\\*kwargs: response-level options (mask, limit, etc.)
:returns: Returns a list of dictionaries representing the matching
virtual servers
"""
if 'mask' not in kwargs:
items = [
'id',
'globalIdentifier',
'hostname',
'domain',
'fullyQualifiedDomainName',
'primaryBackendIpAddress',
'primaryIpAddress',
'lastKnownPowerState.name',
'powerState',
'maxCpu',
'maxMemory',
'datacenter',
'activeTransaction.transactionStatus[friendlyName,name]',
'status',
]
kwargs['mask'] = "mask[%s]" % ','.join(items)
call = 'getVirtualGuests'
if not all([hourly, monthly]):
if hourly:
call = 'getHourlyVirtualGuests'
elif monthly:
call = 'getMonthlyVirtualGuests'
_filter = utils.NestedDict(kwargs.get('filter') or {})
if tags:
_filter['virtualGuests']['tagReferences']['tag']['name'] = {
'operation': 'in',
'options': [{'name': 'data', 'value': tags}],
}
if cpus:
_filter['virtualGuests']['maxCpu'] = utils.query_filter(cpus)
if memory:
_filter['virtualGuests']['maxMemory'] = utils.query_filter(memory)
if hostname:
_filter['virtualGuests']['hostname'] = utils.query_filter(hostname)
if domain:
_filter['virtualGuests']['domain'] = utils.query_filter(domain)
if local_disk is not None:
_filter['virtualGuests']['localDiskFlag'] = (
utils.query_filter(bool(local_disk)))
if datacenter:
_filter['virtualGuests']['datacenter']['name'] = (
utils.query_filter(datacenter))
if nic_speed:
_filter['virtualGuests']['networkComponents']['maxSpeed'] = (
utils.query_filter(nic_speed))
if public_ip:
_filter['virtualGuests']['primaryIpAddress'] = (
utils.query_filter(public_ip))
if private_ip:
_filter['virtualGuests']['primaryBackendIpAddress'] = (
utils.query_filter(private_ip))
if transient is not None:
_filter['virtualGuests']['transientGuestFlag'] = (
utils.query_filter(bool(transient))
)
kwargs['filter'] = _filter.to_dict()
kwargs['iter'] = True
return self.client.call('Account', call, **kwargs)
@retry(logger=LOGGER)
def get_instance(self, instance_id, **kwargs):
"""Get details about a virtual server instance.
:param integer instance_id: the instance ID
:returns: A dictionary containing a large amount of information about
the specified instance.
Example::
# Print out instance ID 12345.
vsi = mgr.get_instance(12345)
print vsi
# Print out only FQDN and primaryIP for instance 12345
object_mask = "mask[fullyQualifiedDomainName,primaryIpAddress]"
vsi = mgr.get_instance(12345, mask=mask)
print vsi
"""
if 'mask' not in kwargs:
kwargs['mask'] = (
'id,'
'globalIdentifier,'
'fullyQualifiedDomainName,'
'hostname,'
'domain,'
'createDate,'
'modifyDate,'
'provisionDate,'
'notes,'
'dedicatedAccountHostOnlyFlag,'
'transientGuestFlag,'
'privateNetworkOnlyFlag,'
'primaryBackendIpAddress,'
'primaryIpAddress,'
'''networkComponents[id, status, speed, maxSpeed, name,
macAddress, primaryIpAddress, port,
primarySubnet[addressSpace],
securityGroupBindings[
securityGroup[id, name]]],'''
'lastKnownPowerState.name,'
'powerState,'
'status,'
'maxCpu,'
'maxMemory,'
'datacenter,'
'activeTransaction[id, transactionStatus[friendlyName,name]],'
'lastTransaction[transactionStatus],'
'lastOperatingSystemReload.id,'
'blockDevices,'
'blockDeviceTemplateGroup[id, name, globalIdentifier],'
'postInstallScriptUri,'
'''operatingSystem[passwords[username,password],
softwareLicense.softwareDescription[
manufacturer,name,version,
referenceCode]],'''
'''softwareComponents[
passwords[username,password,notes],
softwareLicense[softwareDescription[
manufacturer,name,version,
referenceCode]]],'''
'hourlyBillingFlag,'
'userData,'
'''billingItem[id,nextInvoiceTotalRecurringAmount,
package[id,keyName],
children[categoryCode,nextInvoiceTotalRecurringAmount],
orderItem[id,
order.userRecord[username],
preset.keyName]],'''
'tagReferences[id,tag[name,id]],'
'networkVlans[id,vlanNumber,networkSpace],'
'dedicatedHost.id,'
'placementGroupId'
)
return self.guest.getObject(id=instance_id, **kwargs)
@retry(logger=LOGGER)
def get_create_options(self):
"""Retrieves the available options for creating a VS.
:returns: A dictionary of creation options.
Example::
# Prints out the create option dictionary
options = mgr.get_create_options()
print(options)
"""
return self.guest.getCreateObjectOptions()
def cancel_instance(self, instance_id):
"""Cancel an instance immediately, deleting all its data.
:param integer instance_id: the instance ID to cancel
Example::
# Cancels instance 12345
mgr.cancel_instance(12345)
"""
return self.guest.deleteObject(id=instance_id)
def reload_instance(self, instance_id,
post_uri=None,
ssh_keys=None,
image_id=None):
"""Perform an OS reload of an instance.
:param integer instance_id: the instance ID to reload
:param string post_url: The URI of the post-install script to run
after reload
:param list ssh_keys: The SSH keys to add to the root user
:param int image_id: The GUID of the image to load onto the server
.. warning::
This will reformat the primary drive.
Post-provision script MUST be HTTPS for it to be executed.
Example::
# Reload instance ID 12345 then run a custom post-provision script.
# Post-provision script MUST be HTTPS for it to be executed.
post_uri = 'https://somehost.com/bootstrap.sh'
vsi = mgr.reload_instance(12345, post_uri=post_url)
"""
config = {}
if post_uri:
config['customProvisionScriptUri'] = post_uri
if ssh_keys:
config['sshKeyIds'] = [key_id for key_id in ssh_keys]
if image_id:
config['imageTemplateId'] = image_id
return self.client.call('Virtual_Guest', 'reloadOperatingSystem',
'FORCE', config, id=instance_id)
def _generate_create_dict(
self, cpus=None, memory=None, hourly=True,
hostname=None, domain=None, local_disk=True,
datacenter=None, os_code=None, image_id=None,
dedicated=False, public_vlan=None, private_vlan=None,
private_subnet=None, public_subnet=None,
userdata=None, nic_speed=None, disks=None, post_uri=None,
private=False, ssh_keys=None, public_security_groups=None,
private_security_groups=None, boot_mode=None, transient=False, **kwargs):
"""Returns a dict appropriate to pass into Virtual_Guest::createObject
See :func:`create_instance` for a list of available options.
"""
required = [hostname, domain]
flavor = kwargs.get('flavor', None)
host_id = kwargs.get('host_id', None)
mutually_exclusive = [
{'os_code': os_code, 'image_id': image_id},
{'cpu': cpus, 'flavor': flavor},
{'memory': memory, 'flavor': flavor},
{'flavor': flavor, 'dedicated': dedicated},
{'flavor': flavor, 'host_id': host_id}
]
if not all(required):
raise ValueError("hostname, and domain are required")
for mu_ex in mutually_exclusive:
if all(mu_ex.values()):
raise ValueError(
'Can only specify one of: %s' % (','.join(mu_ex.keys())))
data = {
"startCpus": cpus,
"maxMemory": memory,
"hostname": hostname,
"domain": domain,
"localDiskFlag": local_disk,
"hourlyBillingFlag": hourly,
"supplementalCreateObjectOptions": {
"bootMode": boot_mode
}
}
if flavor:
data["supplementalCreateObjectOptions"]["flavorKeyName"] = flavor
if dedicated and not host_id:
data["dedicatedAccountHostOnlyFlag"] = dedicated
if host_id:
data["dedicatedHost"] = {"id": host_id}
if private:
data['privateNetworkOnlyFlag'] = private
if transient:
data['transientGuestFlag'] = transient
if image_id:
data["blockDeviceTemplateGroup"] = {"globalIdentifier": image_id}
elif os_code:
data["operatingSystemReferenceCode"] = os_code
if datacenter:
data["datacenter"] = {"name": datacenter}
if private_vlan or public_vlan or private_subnet or public_subnet:
network_components = self._create_network_components(public_vlan, private_vlan,
private_subnet, public_subnet)
data.update(network_components)
if public_security_groups:
secgroups = [{'securityGroup': {'id': int(sg)}}
for sg in public_security_groups]
pnc = data.get('primaryNetworkComponent', {})
pnc['securityGroupBindings'] = secgroups
data.update({'primaryNetworkComponent': pnc})
if private_security_groups:
secgroups = [{'securityGroup': {'id': int(sg)}}
for sg in private_security_groups]
pbnc = data.get('primaryBackendNetworkComponent', {})
pbnc['securityGroupBindings'] = secgroups
data.update({'primaryBackendNetworkComponent': pbnc})
if userdata:
data['userData'] = [{'value': userdata}]
if nic_speed:
data['networkComponents'] = [{'maxSpeed': nic_speed}]
if disks:
data['blockDevices'] = [
{"device": "0", "diskImage": {"capacity": disks[0]}}
]
for dev_id, disk in enumerate(disks[1:], start=2):
data['blockDevices'].append(
{
"device": str(dev_id),
"diskImage": {"capacity": disk}
}
)
if post_uri:
data['postInstallScriptUri'] = post_uri
if ssh_keys:
data['sshKeys'] = [{'id': key_id} for key_id in ssh_keys]
return data
def _create_network_components(
self, public_vlan=None, private_vlan=None,
private_subnet=None, public_subnet=None):
parameters = {}
if private_vlan:
parameters['primaryBackendNetworkComponent'] = {"networkVlan": {"id": int(private_vlan)}}
if public_vlan:
parameters['primaryNetworkComponent'] = {"networkVlan": {"id": int(public_vlan)}}
if public_subnet:
if public_vlan is None:
raise exceptions.SoftLayerError("You need to specify a public_vlan with public_subnet")
parameters['primaryNetworkComponent']['networkVlan']['primarySubnet'] = {'id': int(public_subnet)}
if private_subnet:
if private_vlan is None:
raise exceptions.SoftLayerError("You need to specify a private_vlan with private_subnet")
parameters['primaryBackendNetworkComponent']['networkVlan']['primarySubnet'] = {'id': int(private_subnet)}
return parameters
@retry(logger=LOGGER)
def wait_for_transaction(self, instance_id, limit, delay=10):
"""Waits on a VS transaction for the specified amount of time.
This is really just a wrapper for wait_for_ready(pending=True).
Provided for backwards compatibility.
:param int instance_id: The instance ID with the pending transaction
:param int limit: The maximum amount of time to wait.
:param int delay: The number of seconds to sleep before checks. Defaults to 10.
"""
return self.wait_for_ready(instance_id, limit, delay=delay, pending=True)
def wait_for_ready(self, instance_id, limit=3600, delay=10, pending=False):
"""Determine if a VS is ready and available.
In some cases though, that can mean that no transactions are running.
The default arguments imply a VS is operational and ready for use by
having network connectivity and remote access is available. Setting
``pending=True`` will ensure future API calls against this instance
will not error due to pending transactions such as OS Reloads and
cancellations.
:param int instance_id: The instance ID with the pending transaction
:param int limit: The maximum amount of seconds to wait.
:param int delay: The number of seconds to sleep before checks. Defaults to 10.
:param bool pending: Wait for pending transactions not related to
provisioning or reloads such as monitoring.
Example::
# Will return once vsi 12345 is ready, or after 10 checks
ready = mgr.wait_for_ready(12345, 10)
"""
now = time.time()
until = now + limit
mask = "mask[id, lastOperatingSystemReload[id], activeTransaction, provisionDate]"
while now <= until:
instance = self.get_instance(instance_id, mask=mask)
if utils.is_ready(instance, pending):
return True
transaction = utils.lookup(instance, 'activeTransaction', 'transactionStatus', 'friendlyName')
snooze = min(delay, until - now)
LOGGER.info("%s - %d not ready. Auto retry in %ds", transaction, instance_id, snooze)
time.sleep(snooze)
now = time.time()
LOGGER.info("Waiting for %d expired.", instance_id)
return False
def verify_create_instance(self, **kwargs):
"""Verifies an instance creation command.
Without actually placing an order.
See :func:`create_instance` for a list of available options.
Example::
new_vsi = {
'domain': u'test01.labs.sftlyr.ws',
'hostname': u'minion05',
'datacenter': u'hkg02',
'flavor': 'BL1_1X2X100'
'dedicated': False,
'private': False,
'transient': False,
'os_code' : u'UBUNTU_LATEST',
'hourly': True,
'ssh_keys': [1234],
'disks': ('100','25'),
'local_disk': True,
'tags': 'test, pleaseCancel',
'public_security_groups': [12, 15]
}
vsi = mgr.verify_create_instance(**new_vsi)
# vsi will be a SoftLayer_Container_Product_Order_Virtual_Guest
# if your order is correct. Otherwise you will get an exception
print vsi
"""
kwargs.pop('tags', None)
create_options = self._generate_create_dict(**kwargs)
return self.guest.generateOrderTemplate(create_options)
def create_instance(self, **kwargs):
"""Creates a new virtual server instance.
.. warning::
This will add charges to your account
Example::
new_vsi = {
'domain': u'test01.labs.sftlyr.ws',
'hostname': u'minion05',
'datacenter': u'hkg02',
'flavor': 'BL1_1X2X100'
'dedicated': False,
'private': False,
'os_code' : u'UBUNTU_LATEST',
'hourly': True,
'ssh_keys': [1234],
'disks': ('100','25'),
'local_disk': True,
'tags': 'test, pleaseCancel',
'public_security_groups': [12, 15]
}
vsi = mgr.create_instance(**new_vsi)
# vsi will have the newly created vsi details if done properly.
print vsi
:param int cpus: The number of virtual CPUs to include in the instance.
:param int memory: The amount of RAM to order.
:param bool hourly: Flag to indicate if this server should be billed hourly (default) or monthly.
:param string hostname: The hostname to use for the new server.
:param string domain: The domain to use for the new server.
:param bool local_disk: Flag to indicate if this should be a local disk (default) or a SAN disk.
:param string datacenter: The short name of the data center in which the VS should reside.
:param string os_code: The operating system to use. Cannot be specified if image_id is specified.
:param int image_id: The GUID of the image to load onto the server. Cannot be specified if os_code is specified.
:param bool dedicated: Flag to indicate if this should be housed on adedicated or shared host (default).
This will incur a fee on your account.
:param int public_vlan: The ID of the public VLAN on which you want this VS placed.
:param list public_security_groups: The list of security group IDs to apply to the public interface
:param list private_security_groups: The list of security group IDs to apply to the private interface
:param int private_vlan: The ID of the private VLAN on which you want this VS placed.
:param list disks: A list of disk capacities for this server.
:param string post_uri: The URI of the post-install script to run after reload
:param bool private: If true, the VS will be provisioned only with access to the private network.
Defaults to false
:param list ssh_keys: The SSH keys to add to the root user
:param int nic_speed: The port speed to set
:param string tags: tags to set on the VS as a comma separated list
:param string flavor: The key name of the public virtual server flavor being ordered.
:param int host_id: The host id of a dedicated host to provision a dedicated host virtual server on.
"""
tags = kwargs.pop('tags', None)
inst = self.guest.createObject(self._generate_create_dict(**kwargs))
if tags is not None:
self.set_tags(tags, guest_id=inst['id'])
return inst
@retry(logger=LOGGER)
def set_tags(self, tags, guest_id):
"""Sets tags on a guest with a retry decorator
Just calls guest.setTags, but if it fails from an APIError will retry
"""
self.guest.setTags(tags, id=guest_id)
def create_instances(self, config_list):
"""Creates multiple virtual server instances.
This takes a list of dictionaries using the same arguments as
create_instance().
.. warning::
This will add charges to your account
Example::
# Define the instance we want to create.
new_vsi = {
'domain': u'test01.labs.sftlyr.ws',
'hostname': u'minion05',
'datacenter': u'hkg02',
'flavor': 'BL1_1X2X100'
'dedicated': False,
'private': False,
'os_code' : u'UBUNTU_LATEST',
'hourly': True,
'ssh_keys': [1234],
'disks': ('100','25'),
'local_disk': True,
'tags': 'test, pleaseCancel',
'public_security_groups': [12, 15]
}
# using .copy() so we can make changes to individual nodes
instances = [new_vsi.copy(), new_vsi.copy(), new_vsi.copy()]
# give each its own hostname, not required.
instances[0]['hostname'] = "multi-test01"
instances[1]['hostname'] = "multi-test02"
instances[2]['hostname'] = "multi-test03"
vsi = mgr.create_instances(config_list=instances)
#vsi will be a dictionary of all the new virtual servers
print vsi
"""
tags = [conf.pop('tags', None) for conf in config_list]
resp = self.guest.createObjects([self._generate_create_dict(**kwargs)
for kwargs in config_list])
for instance, tag in zip(resp, tags):
if tag is not None:
self.set_tags(tag, guest_id=instance['id'])
return resp
def change_port_speed(self, instance_id, public, speed):
"""Allows you to change the port speed of a virtual server's NICs.
Example::
#change the Public interface to 10Mbps on instance 12345
result = mgr.change_port_speed(instance_id=12345,
public=True, speed=10)
# result will be True or an Exception
:param int instance_id: The ID of the VS
:param bool public: Flag to indicate which interface to change.
True (default) means the public interface.
False indicates the private interface.
:param int speed: The port speed to set.
.. warning::
A port speed of 0 will disable the interface.
"""
if public:
return self.client.call('Virtual_Guest', 'setPublicNetworkInterfaceSpeed',
speed, id=instance_id)
else:
return self.client.call('Virtual_Guest', 'setPrivateNetworkInterfaceSpeed',
speed, id=instance_id)
def _get_ids_from_hostname(self, hostname):
"""List VS ids which match the given hostname."""
results = self.list_instances(hostname=hostname, mask="id")
return [result['id'] for result in results]
def _get_ids_from_ip(self, ip_address): # pylint: disable=inconsistent-return-statements
"""List VS ids which match the given ip address."""
try:
# Does it look like an ip address?
socket.inet_aton(ip_address)
except socket.error:
return []
# Find the VS via ip address. First try public ip, then private
results = self.list_instances(public_ip=ip_address, mask="id")
if results:
return [result['id'] for result in results]
results = self.list_instances(private_ip=ip_address, mask="id")
if results:
return [result['id'] for result in results]
def edit(self, instance_id, userdata=None, hostname=None, domain=None,
notes=None, tags=None):
"""Edit hostname, domain name, notes, and/or the user data of a VS.
Parameters set to None will be ignored and not attempted to be updated.
:param integer instance_id: the instance ID to edit
:param string userdata: user data on VS to edit.
If none exist it will be created
:param string hostname: valid hostname
:param string domain: valid domain namem
:param string notes: notes about this particular VS
:param string tags: tags to set on the VS as a comma separated list.
Use the empty string to remove all tags.
:returns: bool -- True or an Exception
Example::
# Change the hostname on instance 12345 to 'something'
result = mgr.edit(instance_id=12345 , hostname="something")
#result will be True or an Exception
"""
obj = {}
if userdata:
self.guest.setUserMetadata([userdata], id=instance_id)
if tags is not None:
self.set_tags(tags, guest_id=instance_id)
if hostname:
obj['hostname'] = hostname
if domain:
obj['domain'] = domain
if notes:
obj['notes'] = notes
if not obj:
return True
return self.guest.editObject(obj, id=instance_id)
def rescue(self, instance_id):
"""Reboot a VSI into the Xen recsue kernel.
:param integer instance_id: the instance ID to rescue
:returns: bool -- True or an Exception
Example::
# Puts instance 12345 into rescue mode
result = mgr.rescue(instance_id=12345)
"""
return self.guest.executeRescueLayer(id=instance_id)
def capture(self, instance_id, name, additional_disks=False, notes=None):
"""Capture one or all disks from a VS to a SoftLayer image.
Parameters set to None will be ignored and not attempted to be updated.
:param integer instance_id: the instance ID to edit
:param string name: name assigned to the image
:param bool additional_disks: set to true to include all additional
attached storage devices
:param string notes: notes about this particular image
:returns: dictionary -- information about the capture transaction.
Example::
name = "Testing Images"
notes = "Some notes about this image"
result = mgr.capture(instance_id=12345, name=name, notes=notes)
"""
vsi = self.client.call(
'Virtual_Guest',
'getObject',
id=instance_id,
mask="""id,
blockDevices[id,device,mountType,
diskImage[id,metadataFlag,type[keyName]]]""")
disks_to_capture = []
for block_device in vsi['blockDevices']:
# We never want metadata disks
if utils.lookup(block_device, 'diskImage', 'metadataFlag'):
continue
# We never want swap devices
type_name = utils.lookup(block_device, 'diskImage', 'type', 'keyName')
if type_name == 'SWAP':
continue
# We never want CD images
if block_device['mountType'] == 'CD':
continue
# Only use the first block device if we don't want additional disks
if not additional_disks and str(block_device['device']) != '0':
continue
disks_to_capture.append(block_device)
return self.guest.createArchiveTransaction(
name, disks_to_capture, notes, id=instance_id)
def upgrade(self, instance_id, cpus=None, memory=None, nic_speed=None, public=True, preset=None):
"""Upgrades a VS instance.
Example::
# Upgrade instance 12345 to 4 CPUs and 4 GB of memory
import SoftLayer
client = SoftLayer.create_client_from_env()
mgr = SoftLayer.VSManager(client)
mgr.upgrade(12345, cpus=4, memory=4)
:param int instance_id: Instance id of the VS to be upgraded
:param int cpus: The number of virtual CPUs to upgrade to
of a VS instance.
:param string preset: preset assigned to the vsi
:param int memory: RAM of the VS to be upgraded to.
:param int nic_speed: The port speed to set
:param bool public: CPU will be in Private/Public Node.
:returns: bool
"""
upgrade_prices = self._get_upgrade_prices(instance_id)
prices = []
data = {'nic_speed': nic_speed}
if cpus is not None and preset is not None:
raise ValueError("Do not use cpu, private and memory if you are using flavors")
data['cpus'] = cpus
if memory is not None and preset is not None:
raise ValueError("Do not use memory, private or cpu if you are using flavors")
data['memory'] = memory
maintenance_window = datetime.datetime.now(utils.UTC())
order = {
'complexType': 'SoftLayer_Container_Product_Order_Virtual_Guest_Upgrade',
'properties': [{
'name': 'MAINTENANCE_WINDOW',
'value': maintenance_window.strftime("%Y-%m-%d %H:%M:%S%z")
}],
'virtualGuests': [{'id': int(instance_id)}],
}
for option, value in data.items():
if not value:
continue
price_id = self._get_price_id_for_upgrade_option(upgrade_prices,
option,
value,
public)
if not price_id:
# Every option provided is expected to have a price
raise exceptions.SoftLayerError(
"Unable to find %s option with value %s" % (option, value))
prices.append({'id': price_id})
order['prices'] = prices
if preset is not None:
vs_object = self.get_instance(instance_id)['billingItem']['package']
order['presetId'] = self.ordering_manager.get_preset_by_key(vs_object['keyName'], preset)['id']
if prices or preset:
self.client['Product_Order'].placeOrder(order)
return True
return False
def order_guest(self, guest_object, test=False):
"""Uses Product_Order::placeOrder to create a virtual guest.
Useful when creating a virtual guest with options not supported by Virtual_Guest::createObject
specifically ipv6 support.
:param dictionary guest_object: See SoftLayer.CLI.virt.create._parse_create_args
Example::
new_vsi = {
'domain': u'test01.labs.sftlyr.ws',
'hostname': u'minion05',
'datacenter': u'hkg02',
'flavor': 'BL1_1X2X100'
'dedicated': False,
'private': False,
'transient': False,
'os_code' : u'UBUNTU_LATEST',
'hourly': True,
'ssh_keys': [1234],
'disks': ('100','25'),
'local_disk': True,
'tags': 'test, pleaseCancel',
'public_security_groups': [12, 15],
'ipv6': True
}
vsi = mgr.order_guest(new_vsi)
# vsi will have the newly created vsi receipt.
# vsi['orderDetails']['virtualGuests'] will be an array of created Guests
print vsi
"""
tags = guest_object.pop('tags', None)
template = self.verify_create_instance(**guest_object)
if guest_object.get('ipv6'):
ipv6_price = self.ordering_manager.get_price_id_list('PUBLIC_CLOUD_SERVER', ['1_IPV6_ADDRESS'])
template['prices'].append({'id': ipv6_price[0]})
# Notice this is `userdata` from the cli, but we send it in as `userData`
if guest_object.get('userdata'):
# SL_Virtual_Guest::generateOrderTemplate() doesn't respect userData, so we need to add it ourself
template['virtualGuests'][0]['userData'] = [{"value": guest_object.get('userdata')}]
if guest_object.get('host_id'):
template['hostId'] = guest_object.get('host_id')
if guest_object.get('placement_id'):
template['virtualGuests'][0]['placementGroupId'] = guest_object.get('placement_id')
if test:
result = self.client.call('Product_Order', 'verifyOrder', template)
else:
result = self.client.call('Product_Order', 'placeOrder', template)
if tags is not None:
virtual_guests = utils.lookup(result, 'orderDetails', 'virtualGuests')
for guest in virtual_guests:
self.set_tags(tags, guest_id=guest['id'])
return result
def _get_package_items(self):
"""Following Method gets all the item ids related to VS.
Deprecated in favor of _get_upgrade_prices()
"""
warnings.warn("use _get_upgrade_prices() instead",
DeprecationWarning)
mask = [
'description',
'capacity',
'units',
'prices[id,locationGroupId,categories[name,id,categoryCode]]'
]
mask = "mask[%s]" % ','.join(mask)
package_keyname = "CLOUD_SERVER"
package = self.ordering_manager.get_package_by_key(package_keyname)
package_service = self.client['Product_Package']
return package_service.getItems(id=package['id'], mask=mask)
def _get_upgrade_prices(self, instance_id, include_downgrade_options=True):
"""Following Method gets all the price ids related to upgrading a VS.
:param int instance_id: Instance id of the VS to be upgraded
:returns: list
"""
mask = [
'id',
'locationGroupId',
'categories[name,id,categoryCode]',
'item[description,capacity,units]'
]
mask = "mask[%s]" % ','.join(mask)
return self.guest.getUpgradeItemPrices(include_downgrade_options, id=instance_id, mask=mask)
# pylint: disable=inconsistent-return-statements
def _get_price_id_for_upgrade_option(self, upgrade_prices, option, value, public=True):
"""Find the price id for the option and value to upgrade. This
:param list upgrade_prices: Contains all the prices related to a VS upgrade
:param string option: Describes type of parameter to be upgraded
:param int value: The value of the parameter to be upgraded
:param bool public: CPU will be in Private/Public Node.
"""
option_category = {
'memory': 'ram',
'cpus': 'guest_core',
'nic_speed': 'port_speed'
}
category_code = option_category.get(option)
for price in upgrade_prices:
if price.get('categories') is None or price.get('item') is None:
continue
product = price.get('item')
is_private = (product.get('units') == 'PRIVATE_CORE'
or product.get('units') == 'DEDICATED_CORE')
for category in price.get('categories'):
if not (category.get('categoryCode') == category_code
and str(product.get('capacity')) == str(value)):
continue
if option == 'cpus':
# Public upgrade and public guest_core price
if public and not is_private:
return price.get('id')
# Private upgrade and private guest_core price
elif not public and is_private:
return price.get('id')
elif option == 'nic_speed':
if 'Public' in product.get('description'):
return price.get('id')
else:
return price.get('id')
def get_summary_data_usage(self, instance_id, start_date=None, end_date=None, valid_type=None, summary_period=None):
"""Retrieve the usage information of a virtual server.
:param string instance_id: a string identifier used to resolve ids
:param string start_date: the start data to retrieve the vs usage information
:param string end_date: the start data to retrieve the vs usage information
:param string string valid_type: the Metric_Data_Type keyName.
:param int summary_period: summary period.
"""
valid_types = [
{
"keyName": valid_type,
"summaryType": "max"
}
]
metric_tracking_id = self.get_tracking_id(instance_id)
return self.client.call('Metric_Tracking_Object', 'getSummaryData', start_date, end_date, valid_types,
summary_period, id=metric_tracking_id, iter=True)
def get_tracking_id(self, instance_id):
"""Returns the Metric Tracking Object Id for a hardware server
:param int instance_id: Id of the hardware server
"""
return self.guest.getMetricTrackingObjectId(id=instance_id)
def get_bandwidth_data(self, instance_id, start_date=None, end_date=None, direction=None, rollup=3600):
"""Gets bandwidth data for a server
Will get averaged bandwidth data for a given time period. If you use a rollup over 3600 be aware
that the API will bump your start/end date to align with how data is stored. For example if you
have a rollup of 86400 your start_date will be bumped to 00:00. If you are not using a time in the
start/end date fields, this won't really matter.
:param int instance_id: Hardware Id to get data for
:param date start_date: Date to start pulling data for.
:param date end_date: Date to finish pulling data for
:param string direction: Can be either 'public', 'private', or None for both.
:param int rollup: 300, 600, 1800, 3600, 43200 or 86400 seconds to average data over.
"""
tracking_id = self.get_tracking_id(instance_id)
data = self.client.call('Metric_Tracking_Object', 'getBandwidthData', start_date, end_date, direction,
rollup, id=tracking_id, iter=True)
return data
def get_bandwidth_allocation(self, instance_id):
"""Combines getBandwidthAllotmentDetail() and getBillingCycleBandwidthUsage() """
a_mask = "mask[allocation[amount]]"
allotment = self.client.call('Virtual_Guest', 'getBandwidthAllotmentDetail', id=instance_id, mask=a_mask)
u_mask = "mask[amountIn,amountOut,type]"
useage = self.client.call('Virtual_Guest', 'getBillingCycleBandwidthUsage', id=instance_id, mask=u_mask)
return {'allotment': allotment['allocation'], 'useage': useage}
# pylint: disable=inconsistent-return-statements
def _get_price_id_for_upgrade(self, package_items, option, value, public=True):
"""Find the price id for the option and value to upgrade.
Deprecated in favor of _get_price_id_for_upgrade_option()
:param list package_items: Contains all the items related to an VS
:param string option: Describes type of parameter to be upgraded
:param int value: The value of the parameter to be upgraded
:param bool public: CPU will be in Private/Public Node.
"""
warnings.warn("use _get_price_id_for_upgrade_option() instead",
DeprecationWarning)
option_category = {
'memory': 'ram',
'cpus': 'guest_core',
'nic_speed': 'port_speed'
}
category_code = option_category[option]
for item in package_items:
is_private = (item.get('units') == 'PRIVATE_CORE')
for price in item['prices']:
if 'locationGroupId' in price and price['locationGroupId']:
# Skip location based prices
continue
if 'categories' not in price:
continue
categories = price['categories']
for category in categories:
if not (category['categoryCode'] == category_code
and str(item['capacity']) == str(value)):
continue
if option == 'cpus':
if public and not is_private:
return price['id']
elif not public and is_private:
return price['id']
elif option == 'nic_speed':
if 'Public' in item['description']:
return price['id']
else:
return price['id']
| |
#!/usr/bin/python
###############################################################################
# GenAllReports.py
# Jeremy Clay
# Aug 20, 2016
#
# This file serves as the main function call. Running this file will generate
# a .pdf file that reports the CPU usage of Mt. Moran as well as the amount of
# disk space that is being consumed on Bighorn for every active account on Mt.
# Moran. The .pdf file will automatically be attached to a very short email
# and sent to the principle investigator of each account.
#
# Dependencies:
#
# Updates:
# 2016-09-14 JAC added code to archive the .pdf files on local machine
###############################################################################
from GenAllOutputFiles import GenAll
from CoreHoursByMonthReport import GenReport
import ipaShowTools
import ldapShowTools
import os
from datetime import date
import argparse
# argparser and auto generate help
parser = argparse.ArgumentParser(
description="generate a monthly usage statement as a .pdf document and"\
+" sends an email to the group's PI with the document attached. The"\
+" document contains information regarding the number of jobs submitted"\
+" by the group as well as the number of CPU hours used on Mt. Moran.")
parser.add_argument("-i", "--ipa", action="store_true",
help="when this option is selected, the 'ipa' commands will be used,"\
+" otherwise the 'ldapsearch' commands will be used (ldapsearch commands"\
+" are the default)")
parser.add_argument("-m", "--month", type=int,
choices=[1,2,3,4,5,6,7,8,9,10,11,12],help="the month in which the usage"\
+" report will be generated for, if this option is not selected the most"\
+" recent month will be used. Note: No storage statistics are available"\
+" prior to August 16, 2016.")
parser.add_argument("-Y", "--year", type=int,
help="the year, entered in 4 digit format (e.g. 2016), in which the usage"\
+" report will be generated for, if this option is not selected the"\
+" current year will be used. Note: No reports can be generated prior to"\
+" 2016.")
args = parser.parse_args()
# based on users input, local variables statementMonth and statementYear are
# initialized.
if args.month == None or args.month >= int(date.today().strftime("%m")):
if args.year == None or args.year >= int(date.today().strftime("%Y")) or \
args.year < 2016:
statementMonth = int(date.today().strftime("%m"))-1
else:
statementMonth = args.month
else:
statementMonth = args.month
if args.year == None or args.year > int(date.today().strftime("%Y")):
statementYear = int(date.today().strftime("%Y"))
else:
statementYear = args.year
if statementYear < 2016:
statementYear = 2016 # no reports generated prior to 2016
# in the case the current month is Jan, the statement will be generated for Dec
# of the previous year
if statementMonth == 0:
statementMonth = 12
statementYear = int(date.today().strftime("%Y"))-1
Months = {1:'Jan', 2:'Feb', 3:'Mar', 4:'April', 5:'May', 6:'June', 7:'July',
8:'Aug', 9:'Sept', 10:'Oct', 11:'Nov', 12:'Dec'}
theDateYYYYmmdd = date.today().strftime("%Y-%m-%d") #YYYY-mm-dd format
# list of groups to ignore
badGroups = ['bsa','taed','proteinstructureevol','cudaperfmodelling',
'gpucfdcomputing','rmacc','utahchpc','arcc','bc-201606','bc-201607']
###############################################################################
# Debugging purposes
goodGroups = ['evolvingai']
# end debugging
###############################################################################
# all groups with jobs ran on Mt. Moran
accounts=GenAll(statementMonth)
# all groups that are active members of Mt. Moran
activeGroups = ipaShowTools.activeGroups() if args.ipa else ldapShowTools.activeGroups()
if args.ipa:
print ('Using ipa command.')
else:
print ('Using ldapsearch command.')
# variable to store the path of the archive folder
archiveFolder = '/home/jclay6/arcc_metrics/Reports/mtmoran/'
# loop through all of the accounts and call the GenReport() method on all
# that are active. We also do not want to generate reports for a few select
# accounts, so we also check against the list 'badGroups'
for account in accounts:
if (activeGroups.__contains__(account) and\
not(badGroups.__contains__(account))):
# using either ipa or ldap commands, set local variables uid, fullName,
# and email. These variables will be passed into the GenReport() call
uid = ipaShowTools.getPI(account) if args.ipa else ldapShowTools.getPI(account)
fullName = ipaShowTools.getName(uid) if args.ipa else ldapShowTools.getName(uid)
email = ipaShowTools.getEmail(uid) if args.ipa else ldapShowTools.getEmail(uid)
######################################################################################################
# debugging
# accountIs = account
# accountPrint = 'account: '+accountIs
# print(accountPrint)
# fullNameIs = fullName
# fullNamePrint = 'fullName: '+fullNameIs
# print(fullNamePrint)
# statementMonthIs = str(statementMonth)
# statementMonthPrint = 'statementMonth: '+statementMonthIs
# print(statementMonthPrint)
# statementYearIs = str(statementYear)
# statementYearPrint = 'statementYear: '+statementYearIs
# print(statementYearPrint)
# GenReportCall = 'Calling: GenReport(str(account), str(fullName), int(statementMonth), int(statementYear)'
# print(GenReportCall)
######################################################################################################
# GenReport(account, fullName, statementMonth,statementYear) # this version works in Python 2.7
GenReport(account, fullName, statementMonth, statementYear)
# Archive the report in:
# /home/jclay6/arcc_metrics/Reports/mtmoran/<statementYear>/<account>/reportname.pdf
if not os.path.exists(archiveFolder+str(statementYear)+'/'+account):
os.makedirs(archiveFolder+str(statementYear)+'/'+account)
os.system('cp '+account+'*.pdf '+archiveFolder+str(statementYear)+'/'+account+'/')
###############################################################################
# debugging section, generates emails and sends them to jclay6@uwyo.edu
print ('\nGroup: %s' % account)
print ('PI: %s, %s' % (uid, fullName))
print ('Email: %s\n' % email)
myEmail = 'jclay6@uwyo.edu'
# cc line for bash command, should be pasted after the line beginning
# with +"-r and before the line beginning with +myEmail\
# +"-c arcc-info@uwyo.edu "\
bashCommand="mail -s 'Mt Moran Usage Report for "+account+"' "\
+"-a "+account+"_report_"+theDateYYYYmmdd+".pdf "\
+"-r arcc-info@uwyo.edu "\
+myEmail\
+" <<< 'Dear "+fullName+",\n\n\tWe at ARCC hope that our"\
+" services, including the use of Mt. Moran, have been beneficial"\
+" to you and your research team. Attached is a"\
+" monthly usage statement for the "+account+" group. Previously,"\
+" many of the principal investigators replied to a similar email"\
+" with suggestions on how to make this report more useful to"\
+" them and their investigative teams. In reponse to these"\
+" requests, two additional pages have been added to the report"\
+" that reflect the group storage usage on the Bighorn cluster. "\
+" With enough feedback from principal investigators such as"\
+" yourself, this report will evolve into a tool more useful for"\
+" all ARCC PIs. Please reply to"\
+" this email with any questions or comments.\n\nGeneral ARCC"\
+" questions can also be emailed to arcc-info@uwyo.edu and"\
+" service requests may be opened by emailing arcc-help@uwyo.edu."\
+"\n\nThe ARCC Team'"
if not(email == ''):
os.system(bashCommand)
# end of debugging
###############################################################################
###############################################################################
# WARNING the following lines of code will send out emails to all PIs!
# Send emails to all PIs
# bashCommand="mail -s 'Mt Moran Usage Report for "+account+"' "\
# +"-a "+account+"_report_"+theDateYYYYmmdd+".pdf "\
# +"-r arcc-info@uwyo.edu "\
# +"-b arcc-admin@uwyo.edu "\
# +email\
# +" <<< 'Dear "+fullName+",\n\n\tWe at ARCC hope that our"\
# +" services, including the use of Mt. Moran, have been beneficial"\
# +" to you and your research team. Attached is a"\
# +" monthly usage statement for the "+account+" group. Previously,"\
# +" many of the principal investigators replied to a similar email"\
# +" with suggestions on how to make this report more useful to"\
# +" them and their investigative teams. In reponse to these"\
# +" requests, two additional pages have been added to the report"\
# +" that reflect the group storage usage on the Bighorn cluster. "\
# +" With enough feedback from principal investigators such as"\
# +" yourself, this report will evolve into a tool more useful for"\
# +" all ARCC PIs. Please reply to"\
# +" this email with any questions or comments.\n\nGeneral ARCC"\
# +" questions can also be emailed to arcc-info@uwyo.edu and"\
# +" service requests may be opened by emailing arcc-help@uwyo.edu."\
# +"\n\nThe ARCC Team'"
# if not(email == ''):
# os.system(bashCommand)
# end WARNING
###############################################################################
#delete created files that are no longer necessary to keep
for i in range(1,statementMonth+1):
bashCommand = 'rm *'+Months[i]+'.out'
os.system(bashCommand)
os.system('rm *.pdf')
| |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""An evaluator of a specific application of a transform."""
from __future__ import absolute_import
import collections
import random
import time
import apache_beam.io as io
from apache_beam import coders
from apache_beam import pvalue
from apache_beam.internal import pickler
from apache_beam.options.pipeline_options import TypeOptions
from apache_beam.runners import common
from apache_beam.runners.common import DoFnRunner
from apache_beam.runners.common import DoFnState
from apache_beam.runners.dataflow.native_io.iobase import _NativeWrite # pylint: disable=protected-access
from apache_beam.runners.direct.direct_runner import _StreamingGroupAlsoByWindow
from apache_beam.runners.direct.direct_runner import _StreamingGroupByKeyOnly
from apache_beam.runners.direct.sdf_direct_runner import ProcessElements
from apache_beam.runners.direct.sdf_direct_runner import ProcessFn
from apache_beam.runners.direct.sdf_direct_runner import SDFProcessElementInvoker
from apache_beam.runners.direct.util import KeyedWorkItem
from apache_beam.runners.direct.util import TransformResult
from apache_beam.runners.direct.watermark_manager import WatermarkManager
from apache_beam.testing.test_stream import ElementEvent
from apache_beam.testing.test_stream import ProcessingTimeEvent
from apache_beam.testing.test_stream import TestStream
from apache_beam.testing.test_stream import WatermarkEvent
from apache_beam.transforms import core
from apache_beam.transforms.trigger import TimeDomain
from apache_beam.transforms.trigger import _CombiningValueStateTag
from apache_beam.transforms.trigger import _ListStateTag
from apache_beam.transforms.trigger import create_trigger_driver
from apache_beam.transforms.window import GlobalWindows
from apache_beam.transforms.window import WindowedValue
from apache_beam.typehints.typecheck import OutputCheckWrapperDoFn
from apache_beam.typehints.typecheck import TypeCheckError
from apache_beam.typehints.typecheck import TypeCheckWrapperDoFn
from apache_beam.utils import counters
from apache_beam.utils.timestamp import MIN_TIMESTAMP
from apache_beam.utils.timestamp import Timestamp
class TransformEvaluatorRegistry(object):
"""For internal use only; no backwards-compatibility guarantees.
Creates instances of TransformEvaluator for the application of a transform.
"""
def __init__(self, evaluation_context):
assert evaluation_context
self._evaluation_context = evaluation_context
self._evaluators = {
io.Read: _BoundedReadEvaluator,
io.ReadStringsFromPubSub: _PubSubReadEvaluator,
core.Flatten: _FlattenEvaluator,
core.ParDo: _ParDoEvaluator,
core._GroupByKeyOnly: _GroupByKeyOnlyEvaluator,
_StreamingGroupByKeyOnly: _StreamingGroupByKeyOnlyEvaluator,
_StreamingGroupAlsoByWindow: _StreamingGroupAlsoByWindowEvaluator,
_NativeWrite: _NativeWriteEvaluator,
TestStream: _TestStreamEvaluator,
ProcessElements: _ProcessElementsEvaluator
}
self._root_bundle_providers = {
core.PTransform: DefaultRootBundleProvider,
TestStream: _TestStreamRootBundleProvider,
}
def get_evaluator(
self, applied_ptransform, input_committed_bundle,
side_inputs, scoped_metrics_container):
"""Returns a TransformEvaluator suitable for processing given inputs."""
assert applied_ptransform
assert bool(applied_ptransform.side_inputs) == bool(side_inputs)
# Walk up the class hierarchy to find an evaluable type. This is necessary
# for supporting sub-classes of core transforms.
for cls in applied_ptransform.transform.__class__.mro():
evaluator = self._evaluators.get(cls)
if evaluator:
break
if not evaluator:
raise NotImplementedError(
'Execution of [%s] not implemented in runner %s.' % (
type(applied_ptransform.transform), self))
return evaluator(self._evaluation_context, applied_ptransform,
input_committed_bundle, side_inputs,
scoped_metrics_container)
def get_root_bundle_provider(self, applied_ptransform):
provider_cls = None
for cls in applied_ptransform.transform.__class__.mro():
provider_cls = self._root_bundle_providers.get(cls)
if provider_cls:
break
if not provider_cls:
raise NotImplementedError(
'Root provider for [%s] not implemented in runner %s' % (
type(applied_ptransform.transform), self))
return provider_cls(self._evaluation_context, applied_ptransform)
def should_execute_serially(self, applied_ptransform):
"""Returns True if this applied_ptransform should run one bundle at a time.
Some TransformEvaluators use a global state object to keep track of their
global execution state. For example evaluator for _GroupByKeyOnly uses this
state as an in memory dictionary to buffer keys.
Serially executed evaluators will act as syncing point in the graph and
execution will not move forward until they receive all of their inputs. Once
they receive all of their input, they will release the combined output.
Their output may consist of multiple bundles as they may divide their output
into pieces before releasing.
Args:
applied_ptransform: Transform to be used for execution.
Returns:
True if executor should execute applied_ptransform serially.
"""
return isinstance(applied_ptransform.transform,
(core._GroupByKeyOnly,
_StreamingGroupByKeyOnly,
_StreamingGroupAlsoByWindow,
_NativeWrite))
class RootBundleProvider(object):
"""Provides bundles for the initial execution of a root transform."""
def __init__(self, evaluation_context, applied_ptransform):
self._evaluation_context = evaluation_context
self._applied_ptransform = applied_ptransform
def get_root_bundles(self):
raise NotImplementedError
class DefaultRootBundleProvider(RootBundleProvider):
"""Provides an empty bundle by default for root transforms."""
def get_root_bundles(self):
input_node = pvalue.PBegin(self._applied_ptransform.transform.pipeline)
empty_bundle = (
self._evaluation_context.create_empty_committed_bundle(input_node))
return [empty_bundle]
class _TestStreamRootBundleProvider(RootBundleProvider):
"""Provides an initial bundle for the TestStream evaluator."""
def get_root_bundles(self):
test_stream = self._applied_ptransform.transform
bundles = []
if len(test_stream.events) > 0:
bundle = self._evaluation_context.create_bundle(
pvalue.PBegin(self._applied_ptransform.transform.pipeline))
# Explicitly set timestamp to MIN_TIMESTAMP to ensure that we hold the
# watermark.
bundle.add(GlobalWindows.windowed_value(0, timestamp=MIN_TIMESTAMP))
bundle.commit(None)
bundles.append(bundle)
return bundles
class _TransformEvaluator(object):
"""An evaluator of a specific application of a transform."""
def __init__(self, evaluation_context, applied_ptransform,
input_committed_bundle, side_inputs, scoped_metrics_container):
self._evaluation_context = evaluation_context
self._applied_ptransform = applied_ptransform
self._input_committed_bundle = input_committed_bundle
self._side_inputs = side_inputs
self._expand_outputs()
self._execution_context = evaluation_context.get_execution_context(
applied_ptransform)
self.scoped_metrics_container = scoped_metrics_container
def _expand_outputs(self):
outputs = set()
for pval in self._applied_ptransform.outputs.values():
if isinstance(pval, pvalue.DoOutputsTuple):
pvals = (v for v in pval)
else:
pvals = (pval,)
for v in pvals:
outputs.add(v)
self._outputs = frozenset(outputs)
def _split_list_into_bundles(
self, output_pcollection, elements, max_element_per_bundle,
element_size_fn):
"""Splits elements, an iterable, into multiple output bundles.
Args:
output_pcollection: PCollection that the elements belong to.
elements: elements to be chunked into bundles.
max_element_per_bundle: (approximately) the maximum element per bundle.
If it is None, only a single bundle will be produced.
element_size_fn: Function to return the size of a given element.
Returns:
List of output uncommitted bundles with at least one bundle.
"""
bundle = self._evaluation_context.create_bundle(output_pcollection)
bundle_size = 0
bundles = [bundle]
for element in elements:
if max_element_per_bundle and bundle_size >= max_element_per_bundle:
bundle = self._evaluation_context.create_bundle(output_pcollection)
bundle_size = 0
bundles.append(bundle)
bundle.output(element)
bundle_size += element_size_fn(element)
return bundles
def start_bundle(self):
"""Starts a new bundle."""
pass
def process_timer_wrapper(self, timer_firing):
"""Process timer by clearing and then calling process_timer().
This method is called with any timer firing and clears the delivered
timer from the keyed state and then calls process_timer(). The default
process_timer() implementation emits a KeyedWorkItem for the particular
timer and passes it to process_element(). Evaluator subclasses which
desire different timer delivery semantics can override process_timer().
"""
state = self.step_context.get_keyed_state(timer_firing.encoded_key)
state.clear_timer(
timer_firing.window, timer_firing.name, timer_firing.time_domain)
self.process_timer(timer_firing)
def process_timer(self, timer_firing):
"""Default process_timer() impl. generating KeyedWorkItem element."""
self.process_element(
GlobalWindows.windowed_value(
KeyedWorkItem(timer_firing.encoded_key,
timer_firings=[timer_firing])))
def process_element(self, element):
"""Processes a new element as part of the current bundle."""
raise NotImplementedError('%s do not process elements.', type(self))
def finish_bundle(self):
"""Finishes the bundle and produces output."""
pass
class _BoundedReadEvaluator(_TransformEvaluator):
"""TransformEvaluator for bounded Read transform."""
# After some benchmarks, 1000 was optimal among {100,1000,10000}
MAX_ELEMENT_PER_BUNDLE = 1000
def __init__(self, evaluation_context, applied_ptransform,
input_committed_bundle, side_inputs, scoped_metrics_container):
assert not side_inputs
self._source = applied_ptransform.transform.source
self._source.pipeline_options = evaluation_context.pipeline_options
super(_BoundedReadEvaluator, self).__init__(
evaluation_context, applied_ptransform, input_committed_bundle,
side_inputs, scoped_metrics_container)
def finish_bundle(self):
assert len(self._outputs) == 1
output_pcollection = list(self._outputs)[0]
def _read_values_to_bundles(reader):
read_result = [GlobalWindows.windowed_value(e) for e in reader]
return self._split_list_into_bundles(
output_pcollection, read_result,
_BoundedReadEvaluator.MAX_ELEMENT_PER_BUNDLE, lambda _: 1)
if isinstance(self._source, io.iobase.BoundedSource):
# Getting a RangeTracker for the default range of the source and reading
# the full source using that.
range_tracker = self._source.get_range_tracker(None, None)
reader = self._source.read(range_tracker)
bundles = _read_values_to_bundles(reader)
else:
with self._source.reader() as reader:
bundles = _read_values_to_bundles(reader)
return TransformResult(self, bundles, [], None, None)
class _TestStreamEvaluator(_TransformEvaluator):
"""TransformEvaluator for the TestStream transform."""
def __init__(self, evaluation_context, applied_ptransform,
input_committed_bundle, side_inputs, scoped_metrics_container):
assert not side_inputs
self.test_stream = applied_ptransform.transform
super(_TestStreamEvaluator, self).__init__(
evaluation_context, applied_ptransform, input_committed_bundle,
side_inputs, scoped_metrics_container)
def start_bundle(self):
self.current_index = -1
self.watermark = MIN_TIMESTAMP
self.bundles = []
def process_element(self, element):
index = element.value
self.watermark = element.timestamp
assert isinstance(index, int)
assert 0 <= index <= len(self.test_stream.events)
self.current_index = index
event = self.test_stream.events[self.current_index]
if isinstance(event, ElementEvent):
assert len(self._outputs) == 1
output_pcollection = list(self._outputs)[0]
bundle = self._evaluation_context.create_bundle(output_pcollection)
for tv in event.timestamped_values:
bundle.output(
GlobalWindows.windowed_value(tv.value, timestamp=tv.timestamp))
self.bundles.append(bundle)
elif isinstance(event, WatermarkEvent):
assert event.new_watermark >= self.watermark
self.watermark = event.new_watermark
elif isinstance(event, ProcessingTimeEvent):
# TODO(ccy): advance processing time in the context's mock clock.
pass
else:
raise ValueError('Invalid TestStream event: %s.' % event)
def finish_bundle(self):
unprocessed_bundles = []
hold = None
if self.current_index < len(self.test_stream.events) - 1:
unprocessed_bundle = self._evaluation_context.create_bundle(
pvalue.PBegin(self._applied_ptransform.transform.pipeline))
unprocessed_bundle.add(GlobalWindows.windowed_value(
self.current_index + 1, timestamp=self.watermark))
unprocessed_bundles.append(unprocessed_bundle)
hold = self.watermark
return TransformResult(
self, self.bundles, unprocessed_bundles, None, {None: hold})
class _PubSubSubscriptionWrapper(object):
"""Wrapper for garbage-collecting temporary PubSub subscriptions."""
def __init__(self, subscription, should_cleanup):
self.subscription = subscription
self.should_cleanup = should_cleanup
def __del__(self):
if self.should_cleanup:
self.subscription.delete()
class _PubSubReadEvaluator(_TransformEvaluator):
"""TransformEvaluator for PubSub read."""
_subscription_cache = {}
def __init__(self, evaluation_context, applied_ptransform,
input_committed_bundle, side_inputs, scoped_metrics_container):
assert not side_inputs
super(_PubSubReadEvaluator, self).__init__(
evaluation_context, applied_ptransform, input_committed_bundle,
side_inputs, scoped_metrics_container)
source = self._applied_ptransform.transform._source
self._subscription = _PubSubReadEvaluator.get_subscription(
self._applied_ptransform, source.project, source.topic_name,
source.subscription_name)
@classmethod
def get_subscription(cls, transform, project, topic, subscription_name):
if transform not in cls._subscription_cache:
from google.cloud import pubsub
should_create = not subscription_name
if should_create:
subscription_name = 'beam_%d_%x' % (
int(time.time()), random.randrange(1 << 32))
cls._subscription_cache[transform] = _PubSubSubscriptionWrapper(
pubsub.Client(project=project).topic(topic).subscription(
subscription_name),
should_create)
if should_create:
cls._subscription_cache[transform].subscription.create()
return cls._subscription_cache[transform].subscription
def start_bundle(self):
pass
def process_element(self, element):
pass
def _read_from_pubsub(self):
from google.cloud import pubsub
# Because of the AutoAck, we are not able to reread messages if this
# evaluator fails with an exception before emitting a bundle. However,
# the DirectRunner currently doesn't retry work items anyway, so the
# pipeline would enter an inconsistent state on any error.
with pubsub.subscription.AutoAck(
self._subscription, return_immediately=True,
max_messages=10) as results:
return [message.data for unused_ack_id, message in results.items()]
def finish_bundle(self):
data = self._read_from_pubsub()
if data:
output_pcollection = list(self._outputs)[0]
bundle = self._evaluation_context.create_bundle(output_pcollection)
# TODO(ccy): we currently do not use the PubSub message timestamp or
# respect the PubSub source's id_label field.
now = Timestamp.of(time.time())
for message_data in data:
bundle.output(GlobalWindows.windowed_value(message_data, timestamp=now))
bundles = [bundle]
else:
bundles = []
if self._applied_ptransform.inputs:
input_pvalue = self._applied_ptransform.inputs[0]
else:
input_pvalue = pvalue.PBegin(self._applied_ptransform.transform.pipeline)
unprocessed_bundle = self._evaluation_context.create_bundle(
input_pvalue)
return TransformResult(self, bundles, [unprocessed_bundle], None,
{None: Timestamp.of(time.time())})
class _FlattenEvaluator(_TransformEvaluator):
"""TransformEvaluator for Flatten transform."""
def __init__(self, evaluation_context, applied_ptransform,
input_committed_bundle, side_inputs, scoped_metrics_container):
assert not side_inputs
super(_FlattenEvaluator, self).__init__(
evaluation_context, applied_ptransform, input_committed_bundle,
side_inputs, scoped_metrics_container)
def start_bundle(self):
assert len(self._outputs) == 1
output_pcollection = list(self._outputs)[0]
self.bundle = self._evaluation_context.create_bundle(output_pcollection)
def process_element(self, element):
self.bundle.output(element)
def finish_bundle(self):
bundles = [self.bundle]
return TransformResult(self, bundles, [], None, None)
class _TaggedReceivers(dict):
"""Received ParDo output and redirect to the associated output bundle."""
def __init__(self, evaluation_context):
self._evaluation_context = evaluation_context
self._null_receiver = None
self._undeclared_in_memory_tag_values = None
super(_TaggedReceivers, self).__init__()
@property
def undeclared_in_memory_tag_values(self):
assert (not self._undeclared_in_memory_tag_values
or self._evaluation_context.has_cache)
return self._undeclared_in_memory_tag_values
class NullReceiver(common.Receiver):
"""Ignores undeclared outputs, default execution mode."""
def receive(self, element):
pass
class _InMemoryReceiver(common.Receiver):
"""Buffers undeclared outputs to the given dictionary."""
def __init__(self, target, tag):
self._target = target
self._tag = tag
def receive(self, element):
self._target[self._tag].append(element)
def __missing__(self, key):
if self._evaluation_context.has_cache:
if not self._undeclared_in_memory_tag_values:
self._undeclared_in_memory_tag_values = collections.defaultdict(list)
receiver = _TaggedReceivers._InMemoryReceiver(
self._undeclared_in_memory_tag_values, key)
else:
if not self._null_receiver:
self._null_receiver = _TaggedReceivers.NullReceiver()
receiver = self._null_receiver
return receiver
class _ParDoEvaluator(_TransformEvaluator):
"""TransformEvaluator for ParDo transform."""
def __init__(self, evaluation_context, applied_ptransform,
input_committed_bundle, side_inputs, scoped_metrics_container,
perform_dofn_pickle_test=True):
super(_ParDoEvaluator, self).__init__(
evaluation_context, applied_ptransform, input_committed_bundle,
side_inputs, scoped_metrics_container)
# This is a workaround for SDF implementation. SDF implementation adds state
# to the SDF that is not picklable.
self._perform_dofn_pickle_test = perform_dofn_pickle_test
def start_bundle(self):
transform = self._applied_ptransform.transform
self._tagged_receivers = _TaggedReceivers(self._evaluation_context)
for output_tag in self._applied_ptransform.outputs:
output_pcollection = pvalue.PCollection(None, tag=output_tag)
output_pcollection.producer = self._applied_ptransform
self._tagged_receivers[output_tag] = (
self._evaluation_context.create_bundle(output_pcollection))
self._tagged_receivers[output_tag].tag = output_tag
self._counter_factory = counters.CounterFactory()
# TODO(aaltay): Consider storing the serialized form as an optimization.
dofn = (pickler.loads(pickler.dumps(transform.dofn))
if self._perform_dofn_pickle_test else transform.dofn)
pipeline_options = self._evaluation_context.pipeline_options
if (pipeline_options is not None
and pipeline_options.view_as(TypeOptions).runtime_type_check):
dofn = TypeCheckWrapperDoFn(dofn, transform.get_type_hints())
dofn = OutputCheckWrapperDoFn(dofn, self._applied_ptransform.full_label)
args = transform.args if hasattr(transform, 'args') else []
kwargs = transform.kwargs if hasattr(transform, 'kwargs') else {}
self.runner = DoFnRunner(
dofn, args, kwargs,
self._side_inputs,
self._applied_ptransform.inputs[0].windowing,
tagged_receivers=self._tagged_receivers,
step_name=self._applied_ptransform.full_label,
state=DoFnState(self._counter_factory),
scoped_metrics_container=self.scoped_metrics_container)
self.runner.start()
def process_element(self, element):
self.runner.process(element)
def finish_bundle(self):
self.runner.finish()
bundles = self._tagged_receivers.values()
result_counters = self._counter_factory.get_counters()
return TransformResult(
self, bundles, [], result_counters, None,
self._tagged_receivers.undeclared_in_memory_tag_values)
class _GroupByKeyOnlyEvaluator(_TransformEvaluator):
"""TransformEvaluator for _GroupByKeyOnly transform."""
MAX_ELEMENT_PER_BUNDLE = None
ELEMENTS_TAG = _ListStateTag('elements')
COMPLETION_TAG = _CombiningValueStateTag('completed', any)
def __init__(self, evaluation_context, applied_ptransform,
input_committed_bundle, side_inputs, scoped_metrics_container):
assert not side_inputs
super(_GroupByKeyOnlyEvaluator, self).__init__(
evaluation_context, applied_ptransform, input_committed_bundle,
side_inputs, scoped_metrics_container)
def _is_final_bundle(self):
return (self._execution_context.watermarks.input_watermark
== WatermarkManager.WATERMARK_POS_INF)
def start_bundle(self):
self.step_context = self._execution_context.get_step_context()
self.global_state = self.step_context.get_keyed_state(None)
assert len(self._outputs) == 1
self.output_pcollection = list(self._outputs)[0]
# The output type of a GroupByKey will be KV[Any, Any] or more specific.
# TODO(BEAM-2717): Infer coders earlier.
kv_type_hint = (
self._applied_ptransform.outputs[None].element_type
or
self._applied_ptransform.transform.get_type_hints().input_types[0][0])
self.key_coder = coders.registry.get_coder(kv_type_hint.tuple_types[0])
def process_timer(self, timer_firing):
# We do not need to emit a KeyedWorkItem to process_element().
pass
def process_element(self, element):
assert not self.global_state.get_state(
None, _GroupByKeyOnlyEvaluator.COMPLETION_TAG)
if (isinstance(element, WindowedValue)
and isinstance(element.value, collections.Iterable)
and len(element.value) == 2):
k, v = element.value
encoded_k = self.key_coder.encode(k)
state = self.step_context.get_keyed_state(encoded_k)
state.add_state(None, _GroupByKeyOnlyEvaluator.ELEMENTS_TAG, v)
else:
raise TypeCheckError('Input to _GroupByKeyOnly must be a PCollection of '
'windowed key-value pairs. Instead received: %r.'
% element)
def finish_bundle(self):
if self._is_final_bundle():
if self.global_state.get_state(
None, _GroupByKeyOnlyEvaluator.COMPLETION_TAG):
# Ignore empty bundles after emitting output. (This may happen because
# empty bundles do not affect input watermarks.)
bundles = []
else:
gbk_result = []
# TODO(ccy): perhaps we can clean this up to not use this
# internal attribute of the DirectStepContext.
for encoded_k in self.step_context.existing_keyed_state:
# Ignore global state.
if encoded_k is None:
continue
k = self.key_coder.decode(encoded_k)
state = self.step_context.get_keyed_state(encoded_k)
vs = state.get_state(None, _GroupByKeyOnlyEvaluator.ELEMENTS_TAG)
gbk_result.append(GlobalWindows.windowed_value((k, vs)))
def len_element_fn(element):
_, v = element.value
return len(v)
bundles = self._split_list_into_bundles(
self.output_pcollection, gbk_result,
_GroupByKeyOnlyEvaluator.MAX_ELEMENT_PER_BUNDLE, len_element_fn)
self.global_state.add_state(
None, _GroupByKeyOnlyEvaluator.COMPLETION_TAG, True)
hold = WatermarkManager.WATERMARK_POS_INF
else:
bundles = []
hold = WatermarkManager.WATERMARK_NEG_INF
self.global_state.set_timer(
None, '', TimeDomain.WATERMARK, WatermarkManager.WATERMARK_POS_INF)
return TransformResult(self, bundles, [], None, {None: hold})
class _StreamingGroupByKeyOnlyEvaluator(_TransformEvaluator):
"""TransformEvaluator for _StreamingGroupByKeyOnly transform.
The _GroupByKeyOnlyEvaluator buffers elements until its input watermark goes
to infinity, which is suitable for batch mode execution. During streaming
mode execution, we emit each bundle as it comes to the next transform.
"""
MAX_ELEMENT_PER_BUNDLE = None
def __init__(self, evaluation_context, applied_ptransform,
input_committed_bundle, side_inputs, scoped_metrics_container):
assert not side_inputs
super(_StreamingGroupByKeyOnlyEvaluator, self).__init__(
evaluation_context, applied_ptransform, input_committed_bundle,
side_inputs, scoped_metrics_container)
def start_bundle(self):
self.gbk_items = collections.defaultdict(list)
assert len(self._outputs) == 1
self.output_pcollection = list(self._outputs)[0]
# The input type of a GroupByKey will be KV[Any, Any] or more specific.
kv_type_hint = (
self._applied_ptransform.transform.get_type_hints().input_types[0])
self.key_coder = coders.registry.get_coder(kv_type_hint[0].tuple_types[0])
def process_element(self, element):
if (isinstance(element, WindowedValue)
and isinstance(element.value, collections.Iterable)
and len(element.value) == 2):
k, v = element.value
self.gbk_items[self.key_coder.encode(k)].append(v)
else:
raise TypeCheckError('Input to _GroupByKeyOnly must be a PCollection of '
'windowed key-value pairs. Instead received: %r.'
% element)
def finish_bundle(self):
bundles = []
bundle = None
for encoded_k, vs in self.gbk_items.iteritems():
if not bundle:
bundle = self._evaluation_context.create_bundle(
self.output_pcollection)
bundles.append(bundle)
kwi = KeyedWorkItem(encoded_k, elements=vs)
bundle.add(GlobalWindows.windowed_value(kwi))
return TransformResult(self, bundles, [], None, None)
class _StreamingGroupAlsoByWindowEvaluator(_TransformEvaluator):
"""TransformEvaluator for the _StreamingGroupAlsoByWindow transform.
This evaluator is only used in streaming mode. In batch mode, the
GroupAlsoByWindow operation is evaluated as a normal DoFn, as defined
in transforms/core.py.
"""
def __init__(self, evaluation_context, applied_ptransform,
input_committed_bundle, side_inputs, scoped_metrics_container):
assert not side_inputs
super(_StreamingGroupAlsoByWindowEvaluator, self).__init__(
evaluation_context, applied_ptransform, input_committed_bundle,
side_inputs, scoped_metrics_container)
def start_bundle(self):
assert len(self._outputs) == 1
self.output_pcollection = list(self._outputs)[0]
self.step_context = self._execution_context.get_step_context()
self.driver = create_trigger_driver(
self._applied_ptransform.transform.windowing)
self.gabw_items = []
self.keyed_holds = {}
# The input type of a GroupAlsoByWindow will be KV[Any, Iter[Any]] or more
# specific.
kv_type_hint = (
self._applied_ptransform.transform.get_type_hints().input_types[0])
self.key_coder = coders.registry.get_coder(kv_type_hint[0].tuple_types[0])
def process_element(self, element):
kwi = element.value
assert isinstance(kwi, KeyedWorkItem), kwi
encoded_k, timer_firings, vs = (
kwi.encoded_key, kwi.timer_firings, kwi.elements)
k = self.key_coder.decode(encoded_k)
state = self.step_context.get_keyed_state(encoded_k)
for timer_firing in timer_firings:
for wvalue in self.driver.process_timer(
timer_firing.window, timer_firing.name, timer_firing.time_domain,
timer_firing.timestamp, state):
self.gabw_items.append(wvalue.with_value((k, wvalue.value)))
if vs:
for wvalue in self.driver.process_elements(state, vs, MIN_TIMESTAMP):
self.gabw_items.append(wvalue.with_value((k, wvalue.value)))
self.keyed_holds[encoded_k] = state.get_earliest_hold()
def finish_bundle(self):
bundles = []
if self.gabw_items:
bundle = self._evaluation_context.create_bundle(self.output_pcollection)
for item in self.gabw_items:
bundle.add(item)
bundles.append(bundle)
return TransformResult(self, bundles, [], None, self.keyed_holds)
class _NativeWriteEvaluator(_TransformEvaluator):
"""TransformEvaluator for _NativeWrite transform."""
ELEMENTS_TAG = _ListStateTag('elements')
def __init__(self, evaluation_context, applied_ptransform,
input_committed_bundle, side_inputs, scoped_metrics_container):
assert not side_inputs
super(_NativeWriteEvaluator, self).__init__(
evaluation_context, applied_ptransform, input_committed_bundle,
side_inputs, scoped_metrics_container)
assert applied_ptransform.transform.sink
self._sink = applied_ptransform.transform.sink
@property
def _is_final_bundle(self):
return (self._execution_context.watermarks.input_watermark
== WatermarkManager.WATERMARK_POS_INF)
@property
def _has_already_produced_output(self):
return (self._execution_context.watermarks.output_watermark
== WatermarkManager.WATERMARK_POS_INF)
def start_bundle(self):
self.step_context = self._execution_context.get_step_context()
self.global_state = self.step_context.get_keyed_state(None)
def process_timer(self, timer_firing):
# We do not need to emit a KeyedWorkItem to process_element().
pass
def process_element(self, element):
self.global_state.add_state(
None, _NativeWriteEvaluator.ELEMENTS_TAG, element)
def finish_bundle(self):
# finish_bundle will append incoming bundles in memory until all the bundles
# carrying data is processed. This is done to produce only a single output
# shard (some tests depends on this behavior). It is possible to have
# incoming empty bundles after the output is produced, these bundles will be
# ignored and would not generate additional output files.
# TODO(altay): Do not wait until the last bundle to write in a single shard.
if self._is_final_bundle:
elements = self.global_state.get_state(
None, _NativeWriteEvaluator.ELEMENTS_TAG)
if self._has_already_produced_output:
# Ignore empty bundles that arrive after the output is produced.
assert elements == []
else:
self._sink.pipeline_options = self._evaluation_context.pipeline_options
with self._sink.writer() as writer:
for v in elements:
writer.Write(v.value)
hold = WatermarkManager.WATERMARK_POS_INF
else:
hold = WatermarkManager.WATERMARK_NEG_INF
self.global_state.set_timer(
None, '', TimeDomain.WATERMARK, WatermarkManager.WATERMARK_POS_INF)
return TransformResult(self, [], [], None, {None: hold})
class _ProcessElementsEvaluator(_TransformEvaluator):
"""An evaluator for sdf_direct_runner.ProcessElements transform."""
# Maximum number of elements that will be produced by a Splittable DoFn before
# a checkpoint is requested by the runner.
DEFAULT_MAX_NUM_OUTPUTS = 100
# Maximum duration a Splittable DoFn will process an element before a
# checkpoint is requested by the runner.
DEFAULT_MAX_DURATION = 1
def __init__(self, evaluation_context, applied_ptransform,
input_committed_bundle, side_inputs, scoped_metrics_container):
super(_ProcessElementsEvaluator, self).__init__(
evaluation_context, applied_ptransform, input_committed_bundle,
side_inputs, scoped_metrics_container)
process_elements_transform = applied_ptransform.transform
assert isinstance(process_elements_transform, ProcessElements)
# Replacing the do_fn of the transform with a wrapper do_fn that performs
# SDF magic.
transform = applied_ptransform.transform
sdf = transform.sdf
self._process_fn = transform.new_process_fn(sdf)
transform.dofn = self._process_fn
assert isinstance(self._process_fn, ProcessFn)
self.step_context = self._execution_context.get_step_context()
self._process_fn.step_context = self.step_context
process_element_invoker = (
SDFProcessElementInvoker(
max_num_outputs=self.DEFAULT_MAX_NUM_OUTPUTS,
max_duration=self.DEFAULT_MAX_DURATION))
self._process_fn.set_process_element_invoker(process_element_invoker)
self._par_do_evaluator = _ParDoEvaluator(
evaluation_context, applied_ptransform, input_committed_bundle,
side_inputs, scoped_metrics_container, perform_dofn_pickle_test=False)
self.keyed_holds = {}
def start_bundle(self):
self._par_do_evaluator.start_bundle()
def process_element(self, element):
assert isinstance(element, WindowedValue)
assert len(element.windows) == 1
window = element.windows[0]
if isinstance(element.value, KeyedWorkItem):
key = element.value.encoded_key
else:
# If not a `KeyedWorkItem`, this must be a tuple where key is a randomly
# generated key and the value is a `WindowedValue` that contains an
# `ElementAndRestriction` object.
assert isinstance(element.value, tuple)
key = element.value[0]
self._par_do_evaluator.process_element(element)
state = self.step_context.get_keyed_state(key)
self.keyed_holds[key] = state.get_state(
window, self._process_fn.watermark_hold_tag)
def finish_bundle(self):
par_do_result = self._par_do_evaluator.finish_bundle()
transform_result = TransformResult(
self, par_do_result.uncommitted_output_bundles,
par_do_result.unprocessed_bundles, par_do_result.counters,
par_do_result.keyed_watermark_holds,
par_do_result.undeclared_tag_values)
for key in self.keyed_holds:
transform_result.keyed_watermark_holds[key] = self.keyed_holds[key]
return transform_result
| |
from ddt import data, ddt
from django.core import mail
from django.urls import reverse
from rest_framework import status, test
from waldur_core.core.tests.helpers import override_waldur_core_settings
from waldur_core.logging import loggers, models, tasks
from waldur_core.logging.tests.factories import WebHookFactory
from waldur_core.structure.tests import factories as structure_factories
from waldur_core.structure.tests import fixtures as structure_fixtures
from . import factories
class BaseHookApiTest(test.APITransactionTestCase):
def setUp(self):
self.staff = structure_factories.UserFactory(is_staff=True)
self.author = structure_factories.UserFactory()
self.other_user = structure_factories.UserFactory()
self.valid_event_types = loggers.get_valid_events()[:3]
self.valid_event_groups = loggers.get_event_groups_keys()[:3]
class HookCreationViewTest(BaseHookApiTest):
def test_user_can_create_webhook(self):
self.client.force_authenticate(user=self.author)
response = self.client.post(
WebHookFactory.get_list_url(),
data={
'event_types': self.valid_event_types,
'destination_url': 'http://example.com/',
},
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data)
def test_user_can_create_email_hook(self):
self.client.force_authenticate(user=self.author)
response = self.client.post(
reverse('emailhook-list'),
data={'event_types': self.valid_event_types, 'email': 'test@example.com'},
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data)
def test_user_can_subscribe_to_event_groups(self):
event_groups = self.valid_event_groups
event_types = loggers.expand_event_groups(event_groups)
self.client.force_authenticate(user=self.author)
response = self.client.post(
WebHookFactory.get_list_url(),
data={
'event_groups': event_groups,
'destination_url': 'http://example.com/',
},
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data['event_groups'], set(event_groups))
self.assertEqual(response.data['event_types'], set(event_types))
@ddt
class HookUpdateTest(BaseHookApiTest):
def setUp(self):
super(HookUpdateTest, self).setUp()
self.hooks = {
'web': WebHookFactory.get_url(WebHookFactory(user=self.author)),
}
def test_author_can_update_webhook_destination_url(self):
new_data = {'destination_url': 'http://another-host.com'}
response = self.update_hook('web', new_data)
self.assertEqual(new_data['destination_url'], response.data['destination_url'])
@data('web',)
def test_author_can_update_hook_event_types(self, hook):
new_event_types = set(self.valid_event_types[:1])
response = self.update_hook(hook, {'event_types': new_event_types})
self.assertEqual(new_event_types, response.data['event_types'])
@data('web',)
def test_author_can_update_event_groups(self, hook):
event_groups = self.valid_event_groups
event_types = loggers.expand_event_groups(event_groups)
self.client.force_authenticate(user=self.author)
response = self.update_hook(hook, {'event_groups': event_groups})
self.assertEqual(response.data['event_groups'], set(event_groups))
self.assertEqual(response.data['event_types'], set(event_types))
@data('web',)
def test_author_can_disable_hook(self, hook):
response = self.update_hook(hook, {'is_active': False})
self.assertFalse(response.data['is_active'])
def update_hook(self, hook, data):
self.client.force_authenticate(user=self.author)
url = self.hooks[hook]
response = self.client.patch(url, data=data)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.data)
return response
class HookPermissionsViewTest(BaseHookApiTest):
def setUp(self):
super(HookPermissionsViewTest, self).setUp()
self.url = WebHookFactory.get_url(WebHookFactory(user=self.author))
def test_hook_visible_to_author(self):
self.client.force_authenticate(user=self.author)
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.data)
self.assertEqual(str(self.author.uuid), str(response.data['author_uuid']))
def test_hook_visible_to_staff(self):
self.client.force_authenticate(user=self.staff)
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.data)
def test_hook_not_visible_to_other_user(self):
self.client.force_authenticate(user=self.other_user)
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, response.data)
class HookFilterViewTest(BaseHookApiTest):
def test_staff_can_filter_webhook_by_author_uuid(self):
WebHookFactory(user=self.author)
WebHookFactory(user=self.other_user)
self.client.force_authenticate(user=self.staff)
response = self.client.get(
WebHookFactory.get_list_url(), {'author_uuid': self.author.uuid.hex}
)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.data)
self.assertEqual(len(response.data), 1)
self.assertEqual(str(self.author.uuid), str(response.data[0]['author_uuid']))
def test_staff_can_filter_summary_hook_by_author_uuid(self):
WebHookFactory(user=self.author)
WebHookFactory(user=self.other_user)
self.client.force_authenticate(user=self.staff)
response = self.client.get(
reverse('hooks-list'), {'author_uuid': self.author.uuid.hex}
)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.data)
self.assertEqual(len(response.data), 1)
self.assertEqual(str(self.author.uuid), str(response.data[0]['author_uuid']))
class SystemNotificationTest(test.APITransactionTestCase):
def setUp(self):
self.system_notification = factories.SystemNotificationFactory()
self.event_types = self.system_notification.event_types
self.project_fixture = structure_fixtures.ProjectFixture()
self.project = self.project_fixture.project
self.admin = self.project_fixture.admin
self.manager = self.project_fixture.manager
self.event = factories.EventFactory(event_type=self.event_types[0])
self.feed = models.Feed.objects.create(scope=self.project, event=self.event)
def test_send_notification_if_user_is_not_subscribed_but_event_type_is_system_type(
self,
):
self.assertFalse(models.EmailHook.objects.count())
tasks.process_event(self.event.id)
self.assertEqual(len(mail.outbox), 1)
self.assertTrue(self.admin.email in mail.outbox[0].to)
def test_not_send_notification_if_event_type_is_not_system_type(self):
self.assertFalse(models.EmailHook.objects.count())
self.event.event_type = 'test_event_type'
self.event.save()
tasks.process_event(self.event.id)
self.assertEqual(len(mail.outbox), 0)
def test_not_send_notification_if_wrong_project(self):
self.assertFalse(models.EmailHook.objects.count())
self.feed.delete()
self.event.save()
tasks.process_event(self.event.id)
self.assertEqual(len(mail.outbox), 0)
def test_not_send_notification_if_wrong_role(self):
self.assertFalse(models.EmailHook.objects.count())
self.system_notification.roles = ['manager']
self.system_notification.save()
tasks.process_event(self.event.id)
self.assertEqual(len(mail.outbox), 1)
self.assertFalse(self.admin.email in mail.outbox[0].to)
def test_event_groups(self):
groups = loggers.get_event_groups()
group = list(groups.keys())[0]
self.system_notification.event_groups = [group]
self.system_notification.event_types = []
self.system_notification.save()
self.event.event_type = list(groups[group])[0]
self.event.save()
tasks.process_event(self.event.id)
self.assertEqual(len(mail.outbox), 1)
self.assertTrue(self.admin.email in mail.outbox[0].to)
@override_waldur_core_settings(NOTIFICATION_SUBJECT='Test Subject')
def test_notification_subject(self):
self.assertFalse(models.EmailHook.objects.count())
tasks.process_event(self.event.id)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Test Subject')
| |
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""This module is used for handling a single line from a csv file.
"""
import collections
import ggrc.services
from ggrc import db
from ggrc.converters import errors
from ggrc.login import get_current_user_id
from ggrc.models.reflection import AttributeInfo
from ggrc.rbac import permissions
from ggrc.services.common import Resource
class RowConverter(object):
"""Base class for handling row data."""
def __init__(self, block_converter, object_class, **options):
self.block_converter = block_converter
self.options = options.copy()
self.object_class = object_class
self.obj = options.get("obj")
self.from_ids = self.obj is not None
self.is_new = True
self.is_delete = False
self.ignore = False
self.index = options.get("index", -1)
self.row = options.get("row", [])
self.attrs = collections.OrderedDict()
self.objects = collections.OrderedDict()
self.id_key = ""
offset = 3 # 2 header rows and 1 for 0 based index
self.line = self.index + self.block_converter.offset + offset
self.headers = options.get("headers", [])
def add_error(self, template, **kwargs):
"""Add error for current row.
Add an error entry for the current row and mark it as ignored. If the error
occurred on a new object, it gets removed from the new object cache dict.
Args:
template: String template.
**kwargs: Arguments needed to format the string template.
"""
message = template.format(line=self.line, **kwargs)
self.block_converter.row_errors.append(message)
new_objects = self.block_converter.converter.new_objects[self.object_class]
key = self.get_value(self.id_key)
if key in new_objects:
del new_objects[key]
self.ignore = True
def add_warning(self, template, **kwargs):
message = template.format(line=self.line, **kwargs)
self.block_converter.row_warnings.append(message)
def handle_csv_row_data(self, field_list=None):
""" Pack row data with handlers """
handle_fields = self.headers if field_list is None else field_list
for i, (attr_name, header_dict) in enumerate(self.headers.items()):
if attr_name not in handle_fields or \
attr_name in self.attrs or \
self.is_delete:
continue
handler = header_dict["handler"]
item = handler(self, attr_name, parse=True,
raw_value=self.row[i], **header_dict)
if header_dict.get("type") == AttributeInfo.Type.PROPERTY:
self.attrs[attr_name] = item
else:
self.objects[attr_name] = item
if not self.ignore and attr_name in ("slug", "email"):
self.id_key = attr_name
self.obj = self.get_or_generate_object(attr_name)
item.set_obj_attr()
item.check_unique_consistency()
def handle_obj_row_data(self):
for attr_name, header_dict in self.headers.items():
handler = header_dict["handler"]
item = handler(self, attr_name, **header_dict)
if header_dict.get("type") == AttributeInfo.Type.PROPERTY:
self.attrs[attr_name] = item
else:
self.objects[attr_name] = item
def handle_row_data(self, field_list=None):
if self.from_ids:
self.handle_obj_row_data()
else:
self.handle_csv_row_data(field_list)
def check_mandatory_fields(self):
"""Check if the new object contains all mandatory columns."""
if not self.is_new or self.is_delete:
return
headers = self.block_converter.object_headers
mandatory = [key for key, header in headers.items() if header["mandatory"]]
missing_keys = set(mandatory).difference(set(self.headers.keys()))
# TODO: fix mandatory checks for individual rows based on object level
# custom attributes.
missing = [headers[key]["display_name"] for key in missing_keys if
headers[key]["type"] != AttributeInfo.Type.OBJECT_CUSTOM]
if missing:
self.add_error(errors.MISSING_COLUMN,
s="s" if len(missing) > 1 else "",
column_names=", ".join(missing))
def find_by_key(self, key, value):
return self.object_class.query.filter_by(**{key: value}).first()
def get_value(self, key):
item = self.attrs.get(key) or self.objects.get(key)
if item:
return item.value
return None
def set_ignore(self, ignore=True):
self.ignore = ignore
def get_or_generate_object(self, attr_name):
"""Fetch an existing object if possible or create and return a new one.
Note: Person object is the only exception here since it does not have a
slug field.
"""
value = self.get_value(attr_name)
new_objects = self.block_converter.converter.new_objects[self.object_class]
if value in new_objects:
return new_objects[value]
obj = self.get_object_by_key(attr_name)
if value:
new_objects[value] = obj
obj.modified_by_id = get_current_user_id()
return obj
def get_object_by_key(self, key="slug"):
""" Get object if the slug is in the system or return a new object """
value = self.get_value(key)
self.is_new = False
obj = self.find_by_key(key, value)
if not obj:
obj = self.object_class()
self.is_new = True
elif not permissions.is_allowed_update_for(obj):
self.ignore = True
self.add_error(errors.PERMISSION_ERROR)
return obj
def setup_secondary_objects(self, slugs_dict):
"""Import secondary objects.
This function creates and stores all secondary object such as relationships
and any linked object that need the original object to be saved before they
can be processed. This is usually due to needing the id of the original
object that is created with a csv import.
"""
if not self.obj or self.ignore or self.is_delete:
return
for mapping in self.objects.values():
mapping.set_obj_attr()
def setup_object(self):
""" Set the object values or relate object values
Set all object attributes to the value specified in attrs. If the value
is in some related object such as "UserRole" it should be added there and
handled by the handler defined in attrs.
"""
if self.ignore:
return
for item_handler in self.attrs.values():
item_handler.set_obj_attr()
def send_post_commit_signals(self):
"""Send after commit signals for all objects
This function sends proper signals for all objects depending if the object
was created, updated or deleted.
Note: signals are only sent for the row objects. Secondary objects such as
Relationships do not get any signals triggered.
."""
if self.ignore:
return
service_class = getattr(ggrc.services, self.object_class.__name__)
service_class.model = self.object_class
if self.is_delete:
Resource.model_deleted_after_commit.send(
self.object_class, obj=self.obj, service=service_class)
elif self.is_new:
Resource.model_posted_after_commit.send(
self.object_class, obj=self.obj, src={}, service=service_class)
else:
Resource.model_put_after_commit.send(
self.object_class, obj=self.obj, src={}, service=service_class)
def send_pre_commit_signals(self):
"""Send before commit signals for all objects.
This function sends proper signals for all objects depending if the object
was created, updated or deleted.
Note: signals are only sent for the row objects. Secondary objects such as
Relationships do not get any signals triggered.
"""
if self.ignore:
return
service_class = getattr(ggrc.services, self.object_class.__name__)
service_class.model = self.object_class
if self.is_delete:
Resource.model_deleted.send(
self.object_class, obj=self.obj, service=service_class)
elif self.is_new:
Resource.model_posted.send(
self.object_class, obj=self.obj, src={}, service=service_class)
Resource.collection_posted.send(
self.object_class, objects=[self.obj], sources=[{}])
else:
Resource.model_put.send(
self.object_class, obj=self.obj, src={}, service=service_class)
def insert_object(self):
"""Add the row object to the current database session."""
if self.ignore or self.is_delete:
return
if self.is_new:
db.session.add(self.obj)
for handler in self.attrs.values():
handler.insert_object()
def insert_secondary_objects(self):
"""Add additional objects to the current database session.
This is used for adding any extra created objects such as Relationships, to
the current session to be committed.
"""
if not self.obj or self.ignore or self.is_delete:
return
for secondery_object in self.objects.values():
secondery_object.insert_object()
def to_array(self, fields):
"""Get an array representation of the current row.
Fiter the values to match the fields array and return the string
representation of the values.
Args:
fields (list of strings): A list of columns that will be included in the
output array. This is basically a filter of all possible fields that
this row contains.
Returns:
list of strings where each cell contains a string value of the
coresponding field.
"""
row = []
for field in fields:
field_type = self.headers.get(field, {}).get("type")
if field_type == AttributeInfo.Type.PROPERTY:
field_handler = self.attrs.get(field)
else:
field_handler = self.objects.get(field)
value = field_handler.get_value() if field_handler else ""
row.append(value or "")
return row
| |
"""Easy install Tests
"""
import sys
import os
import shutil
import tempfile
import unittest
import site
import contextlib
import textwrap
import tarfile
import logging
import distutils.core
from setuptools.compat import StringIO, BytesIO, next, urlparse
from setuptools.sandbox import run_setup, SandboxViolation
from setuptools.command.easy_install import (
easy_install, fix_jython_executable, get_script_args, nt_quote_arg)
from setuptools.command.easy_install import PthDistributions
from setuptools.command import easy_install as easy_install_pkg
from setuptools.dist import Distribution
from pkg_resources import working_set, VersionConflict
from pkg_resources import Distribution as PRDistribution
import setuptools.tests.server
import pkg_resources
from .py26compat import skipIf
class FakeDist(object):
def get_entry_map(self, group):
if group != 'console_scripts':
return {}
return {'name': 'ep'}
def as_requirement(self):
return 'spec'
WANTED = """\
#!%s
# EASY-INSTALL-ENTRY-SCRIPT: 'spec','console_scripts','name'
__requires__ = 'spec'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('spec', 'console_scripts', 'name')()
)
""" % nt_quote_arg(fix_jython_executable(sys.executable, ""))
SETUP_PY = """\
from setuptools import setup
setup(name='foo')
"""
class TestEasyInstallTest(unittest.TestCase):
def test_install_site_py(self):
dist = Distribution()
cmd = easy_install(dist)
cmd.sitepy_installed = False
cmd.install_dir = tempfile.mkdtemp()
try:
cmd.install_site_py()
sitepy = os.path.join(cmd.install_dir, 'site.py')
self.assertTrue(os.path.exists(sitepy))
finally:
shutil.rmtree(cmd.install_dir)
def test_get_script_args(self):
dist = FakeDist()
old_platform = sys.platform
try:
name, script = [i for i in next(get_script_args(dist))][0:2]
finally:
sys.platform = old_platform
self.assertEqual(script, WANTED)
def test_no_find_links(self):
# new option '--no-find-links', that blocks find-links added at
# the project level
dist = Distribution()
cmd = easy_install(dist)
cmd.check_pth_processing = lambda: True
cmd.no_find_links = True
cmd.find_links = ['link1', 'link2']
cmd.install_dir = os.path.join(tempfile.mkdtemp(), 'ok')
cmd.args = ['ok']
cmd.ensure_finalized()
self.assertEqual(cmd.package_index.scanned_urls, {})
# let's try without it (default behavior)
cmd = easy_install(dist)
cmd.check_pth_processing = lambda: True
cmd.find_links = ['link1', 'link2']
cmd.install_dir = os.path.join(tempfile.mkdtemp(), 'ok')
cmd.args = ['ok']
cmd.ensure_finalized()
keys = sorted(cmd.package_index.scanned_urls.keys())
self.assertEqual(keys, ['link1', 'link2'])
class TestPTHFileWriter(unittest.TestCase):
def test_add_from_cwd_site_sets_dirty(self):
'''a pth file manager should set dirty
if a distribution is in site but also the cwd
'''
pth = PthDistributions('does-not_exist', [os.getcwd()])
self.assertTrue(not pth.dirty)
pth.add(PRDistribution(os.getcwd()))
self.assertTrue(pth.dirty)
def test_add_from_site_is_ignored(self):
if os.name != 'nt':
location = '/test/location/does-not-have-to-exist'
else:
location = 'c:\\does_not_exist'
pth = PthDistributions('does-not_exist', [location, ])
self.assertTrue(not pth.dirty)
pth.add(PRDistribution(location))
self.assertTrue(not pth.dirty)
class TestUserInstallTest(unittest.TestCase):
def setUp(self):
self.dir = tempfile.mkdtemp()
setup = os.path.join(self.dir, 'setup.py')
f = open(setup, 'w')
f.write(SETUP_PY)
f.close()
self.old_cwd = os.getcwd()
os.chdir(self.dir)
self.old_enable_site = site.ENABLE_USER_SITE
self.old_file = easy_install_pkg.__file__
self.old_base = site.USER_BASE
site.USER_BASE = tempfile.mkdtemp()
self.old_site = site.USER_SITE
site.USER_SITE = tempfile.mkdtemp()
easy_install_pkg.__file__ = site.USER_SITE
def tearDown(self):
os.chdir(self.old_cwd)
shutil.rmtree(self.dir)
shutil.rmtree(site.USER_BASE)
shutil.rmtree(site.USER_SITE)
site.USER_BASE = self.old_base
site.USER_SITE = self.old_site
site.ENABLE_USER_SITE = self.old_enable_site
easy_install_pkg.__file__ = self.old_file
def test_user_install_implied(self):
site.ENABLE_USER_SITE = True # disabled sometimes
#XXX: replace with something meaningfull
dist = Distribution()
dist.script_name = 'setup.py'
cmd = easy_install(dist)
cmd.args = ['py']
cmd.ensure_finalized()
self.assertTrue(cmd.user, 'user should be implied')
def test_multiproc_atexit(self):
try:
__import__('multiprocessing')
except ImportError:
# skip the test if multiprocessing is not available
return
log = logging.getLogger('test_easy_install')
logging.basicConfig(level=logging.INFO, stream=sys.stderr)
log.info('this should not break')
def test_user_install_not_implied_without_usersite_enabled(self):
site.ENABLE_USER_SITE = False # usually enabled
#XXX: replace with something meaningfull
dist = Distribution()
dist.script_name = 'setup.py'
cmd = easy_install(dist)
cmd.args = ['py']
cmd.initialize_options()
self.assertFalse(cmd.user, 'NOT user should be implied')
def test_local_index(self):
# make sure the local index is used
# when easy_install looks for installed
# packages
new_location = tempfile.mkdtemp()
target = tempfile.mkdtemp()
egg_file = os.path.join(new_location, 'foo-1.0.egg-info')
f = open(egg_file, 'w')
try:
f.write('Name: foo\n')
finally:
f.close()
sys.path.append(target)
old_ppath = os.environ.get('PYTHONPATH')
os.environ['PYTHONPATH'] = os.path.pathsep.join(sys.path)
try:
dist = Distribution()
dist.script_name = 'setup.py'
cmd = easy_install(dist)
cmd.install_dir = target
cmd.args = ['foo']
cmd.ensure_finalized()
cmd.local_index.scan([new_location])
res = cmd.easy_install('foo')
actual = os.path.normcase(os.path.realpath(res.location))
expected = os.path.normcase(os.path.realpath(new_location))
self.assertEqual(actual, expected)
finally:
sys.path.remove(target)
for basedir in [new_location, target, ]:
if not os.path.exists(basedir) or not os.path.isdir(basedir):
continue
try:
shutil.rmtree(basedir)
except:
pass
if old_ppath is not None:
os.environ['PYTHONPATH'] = old_ppath
else:
del os.environ['PYTHONPATH']
def test_setup_requires(self):
"""Regression test for Distribute issue #318
Ensure that a package with setup_requires can be installed when
setuptools is installed in the user site-packages without causing a
SandboxViolation.
"""
test_pkg = create_setup_requires_package(self.dir)
test_setup_py = os.path.join(test_pkg, 'setup.py')
try:
with quiet_context():
with reset_setup_stop_context():
run_setup(test_setup_py, ['install'])
except SandboxViolation:
self.fail('Installation caused SandboxViolation')
except IndexError:
# Test fails in some cases due to bugs in Python
# See https://bitbucket.org/pypa/setuptools/issue/201
pass
class TestSetupRequires(unittest.TestCase):
def test_setup_requires_honors_fetch_params(self):
"""
When easy_install installs a source distribution which specifies
setup_requires, it should honor the fetch parameters (such as
allow-hosts, index-url, and find-links).
"""
# set up a server which will simulate an alternate package index.
p_index = setuptools.tests.server.MockServer()
p_index.start()
netloc = 1
p_index_loc = urlparse(p_index.url)[netloc]
if p_index_loc.endswith(':0'):
# Some platforms (Jython) don't find a port to which to bind,
# so skip this test for them.
return
with quiet_context():
# create an sdist that has a build-time dependency.
with TestSetupRequires.create_sdist() as dist_file:
with tempdir_context() as temp_install_dir:
with environment_context(PYTHONPATH=temp_install_dir):
ei_params = ['--index-url', p_index.url,
'--allow-hosts', p_index_loc,
'--exclude-scripts', '--install-dir', temp_install_dir,
dist_file]
with reset_setup_stop_context():
with argv_context(['easy_install']):
# attempt to install the dist. It should fail because
# it doesn't exist.
self.assertRaises(SystemExit,
easy_install_pkg.main, ei_params)
# there should have been two or three requests to the server
# (three happens on Python 3.3a)
self.assertTrue(2 <= len(p_index.requests) <= 3)
self.assertEqual(p_index.requests[0].path, '/does-not-exist/')
@staticmethod
@contextlib.contextmanager
def create_sdist():
"""
Return an sdist with a setup_requires dependency (of something that
doesn't exist)
"""
with tempdir_context() as dir:
dist_path = os.path.join(dir, 'setuptools-test-fetcher-1.0.tar.gz')
make_trivial_sdist(
dist_path,
textwrap.dedent("""
import setuptools
setuptools.setup(
name="setuptools-test-fetcher",
version="1.0",
setup_requires = ['does-not-exist'],
)
""").lstrip())
yield dist_path
def test_setup_requires_overrides_version_conflict(self):
"""
Regression test for issue #323.
Ensures that a distribution's setup_requires requirements can still be
installed and used locally even if a conflicting version of that
requirement is already on the path.
"""
pr_state = pkg_resources.__getstate__()
fake_dist = PRDistribution('does-not-matter', project_name='foobar',
version='0.0')
working_set.add(fake_dist)
try:
with tempdir_context() as temp_dir:
test_pkg = create_setup_requires_package(temp_dir)
test_setup_py = os.path.join(test_pkg, 'setup.py')
with quiet_context() as (stdout, stderr):
with reset_setup_stop_context():
try:
# Don't even need to install the package, just
# running the setup.py at all is sufficient
run_setup(test_setup_py, ['--name'])
except VersionConflict:
self.fail('Installing setup.py requirements '
'caused a VersionConflict')
lines = stdout.readlines()
self.assertTrue(len(lines) > 0)
self.assertTrue(lines[-1].strip(), 'test_pkg')
finally:
pkg_resources.__setstate__(pr_state)
def create_setup_requires_package(path):
"""Creates a source tree under path for a trivial test package that has a
single requirement in setup_requires--a tarball for that requirement is
also created and added to the dependency_links argument.
"""
test_setup_attrs = {
'name': 'test_pkg', 'version': '0.0',
'setup_requires': ['foobar==0.1'],
'dependency_links': [os.path.abspath(path)]
}
test_pkg = os.path.join(path, 'test_pkg')
test_setup_py = os.path.join(test_pkg, 'setup.py')
os.mkdir(test_pkg)
f = open(test_setup_py, 'w')
f.write(textwrap.dedent("""\
import setuptools
setuptools.setup(**%r)
""" % test_setup_attrs))
f.close()
foobar_path = os.path.join(path, 'foobar-0.1.tar.gz')
make_trivial_sdist(
foobar_path,
textwrap.dedent("""\
import setuptools
setuptools.setup(
name='foobar',
version='0.1'
)
"""))
return test_pkg
def make_trivial_sdist(dist_path, setup_py):
"""Create a simple sdist tarball at dist_path, containing just a
setup.py, the contents of which are provided by the setup_py string.
"""
setup_py_file = tarfile.TarInfo(name='setup.py')
try:
# Python 3 (StringIO gets converted to io module)
MemFile = BytesIO
except AttributeError:
MemFile = StringIO
setup_py_bytes = MemFile(setup_py.encode('utf-8'))
setup_py_file.size = len(setup_py_bytes.getvalue())
dist = tarfile.open(dist_path, 'w:gz')
try:
dist.addfile(setup_py_file, fileobj=setup_py_bytes)
finally:
dist.close()
@contextlib.contextmanager
def tempdir_context(cd=lambda dir:None):
temp_dir = tempfile.mkdtemp()
orig_dir = os.getcwd()
try:
cd(temp_dir)
yield temp_dir
finally:
cd(orig_dir)
shutil.rmtree(temp_dir)
@contextlib.contextmanager
def environment_context(**updates):
old_env = os.environ.copy()
os.environ.update(updates)
try:
yield
finally:
for key in updates:
del os.environ[key]
os.environ.update(old_env)
@contextlib.contextmanager
def argv_context(repl):
old_argv = sys.argv[:]
sys.argv[:] = repl
yield
sys.argv[:] = old_argv
@contextlib.contextmanager
def reset_setup_stop_context():
"""
When the setuptools tests are run using setup.py test, and then
one wants to invoke another setup() command (such as easy_install)
within those tests, it's necessary to reset the global variable
in distutils.core so that the setup() command will run naturally.
"""
setup_stop_after = distutils.core._setup_stop_after
distutils.core._setup_stop_after = None
yield
distutils.core._setup_stop_after = setup_stop_after
@contextlib.contextmanager
def quiet_context():
"""
Redirect stdout/stderr to StringIO objects to prevent console output from
distutils commands.
"""
old_stdout = sys.stdout
old_stderr = sys.stderr
new_stdout = sys.stdout = StringIO()
new_stderr = sys.stderr = StringIO()
try:
yield new_stdout, new_stderr
finally:
new_stdout.seek(0)
new_stderr.seek(0)
sys.stdout = old_stdout
sys.stderr = old_stderr
| |
# coding: utf-8
"""
TranslationApi.py
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import sys
import os
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class TranslationApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def get_translation(self, uuid, authorization, **kwargs):
"""
This is to retrieve a translation that has been queued up (via TranslationRequest uuid)
Retrieves the translation via UUID
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_translation(uuid, authorization, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str uuid: Uuid (required)
:param str authorization: Access token required to access the API (required)
:return: TranslationRequest
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['uuid', 'authorization']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_translation" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'uuid' is set
if ('uuid' not in params) or (params['uuid'] is None):
raise ValueError("Missing the required parameter `uuid` when calling `get_translation`")
# verify the required parameter 'authorization' is set
if ('authorization' not in params) or (params['authorization'] is None):
raise ValueError("Missing the required parameter `authorization` when calling `get_translation`")
resource_path = '/translation/{uuid}/'.replace('{format}', 'json')
method = 'GET'
path_params = {}
if 'uuid' in params:
path_params['uuid'] = params['uuid']
query_params = {}
header_params = {}
if 'authorization' in params:
header_params['authorization'] = params['authorization']
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, method,
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=files,
response_type='TranslationRequest',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def update_translation_request(self, uuid, translation_request_creation_payload, authorization, **kwargs):
"""
Update a given translation defined by uuid
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_translation_request(uuid, translation_request_creation_payload, authorization, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str uuid: Uuid (required)
:param TranslationRequestCreationPayload translation_request_creation_payload: The TranslationRequest Attributes that can be updated (required)
:param str authorization: The access token required to access the API (required)
:return: TranslationRequest
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['uuid', 'translation_request_creation_payload', 'authorization']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_translation_request" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'uuid' is set
if ('uuid' not in params) or (params['uuid'] is None):
raise ValueError("Missing the required parameter `uuid` when calling `update_translation_request`")
# verify the required parameter 'translation_request_creation_payload' is set
if ('translation_request_creation_payload' not in params) or (params['translation_request_creation_payload'] is None):
raise ValueError("Missing the required parameter `translation_request_creation_payload` when calling `update_translation_request`")
# verify the required parameter 'authorization' is set
if ('authorization' not in params) or (params['authorization'] is None):
raise ValueError("Missing the required parameter `authorization` when calling `update_translation_request`")
resource_path = '/translation/{uuid}/'.replace('{format}', 'json')
method = 'POST'
path_params = {}
if 'uuid' in params:
path_params['uuid'] = params['uuid']
query_params = {}
header_params = {}
if 'authorization' in params:
header_params['authorization'] = params['authorization']
form_params = {}
files = {}
body_params = None
if 'translation_request_creation_payload' in params:
body_params = params['translation_request_creation_payload']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, method,
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=files,
response_type='TranslationRequest',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for api module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.autograph import utils
from tensorflow.contrib.autograph.impl import api
from tensorflow.contrib.autograph.impl import config
from tensorflow.contrib.autograph.pyct import parser
from tensorflow.contrib.autograph.utils import py_func
from tensorflow.python.framework import constant_op
from tensorflow.python.platform import test
tf = utils.fake_tf()
class ApiTest(test.TestCase):
def setUp(self):
config.COMPILED_IMPORT_STATEMENTS = (
'from __future__ import print_function',
'from tensorflow.contrib.autograph import utils'
' as autograph_utils',
'from tensorflow.contrib.autograph import operators'
' as __ops',
'tf = autograph_utils.fake_tf()',
)
def test_decorator_recurses(self):
class TestClass(object):
def called_member(self, a):
if a < 0:
a = -a
return a
@api.convert(recursive=True)
def test_method(self, x, s, a):
while tf.reduce_sum(x) > s:
x //= self.called_member(a)
return x
tc = TestClass()
with self.test_session() as sess:
x = tc.test_method(
constant_op.constant([2, 4]), constant_op.constant(1),
constant_op.constant(-2))
self.assertListEqual([0, 1], sess.run(x).tolist())
def test_decorator_does_not_recurse(self):
class TestClass(object):
def called_member(self, a):
return tf.negative(a)
@api.convert(recursive=False)
def test_method(self, x, s, a):
while tf.reduce_sum(x) > s:
x //= self.called_member(a)
return x
tc = TestClass()
with self.test_session() as sess:
x = tc.test_method(
constant_op.constant([2, 4]), constant_op.constant(1),
constant_op.constant(-2))
self.assertListEqual([0, 1], sess.run(x).tolist())
def test_decorator_calls_unconverted_graph(self):
class TestClass(object):
@api.do_not_convert(api.RunMode.GRAPH)
def called_member(self, a):
return tf.negative(a)
@api.convert(recursive=True)
def test_method(self, x, s, a):
while tf.reduce_sum(x) > s:
x //= self.called_member(a)
return x
tc = TestClass()
with self.test_session() as sess:
x = tc.test_method(
constant_op.constant([2, 4]), constant_op.constant(1),
constant_op.constant(-2))
self.assertListEqual([0, 1], sess.run(x).tolist())
def test_decorator_calls_unconverted_py_func(self):
class TestClass(object):
@api.do_not_convert(
api.RunMode.PY_FUNC, return_dtypes=py_func.MatchDType(1))
def called_member(self, a):
return np.negative(a)
@api.convert(recursive=True)
def test_method(self, x, s, a):
while tf.reduce_sum(x) > s:
y = self.called_member(a)
# set_shape works around while_loop's limitations.
# TODO(mdan): Allow specifying shapes (or ShapeLike) instead.
y.set_shape(a.shape)
x //= y
return x
tc = TestClass()
with self.test_session() as sess:
x = tc.test_method(
constant_op.constant([2, 4]), constant_op.constant(1),
constant_op.constant(-2))
self.assertListEqual([0, 1], sess.run(x).tolist())
def test_decorator_calls_decorated(self):
class TestClass(object):
@api.convert()
def called_member(self, a):
if a < 0:
a = -a
return a
@api.convert(recursive=True)
def test_method(self, x, s, a):
while tf.reduce_sum(x) > s:
x //= self.called_member(a)
return x
tc = TestClass()
with self.test_session() as sess:
x = tc.test_method(
constant_op.constant([2, 4]), constant_op.constant(1),
constant_op.constant(-2))
self.assertListEqual([0, 1], sess.run(x).tolist())
def test_convert_call_site_decorator(self):
class TestClass(object):
def called_member(self, a):
if a < 0:
a = -a
return a
@api.convert(recursive=True)
def test_method(self, x, s, a):
while tf.reduce_sum(x) > s:
x //= api.converted_call(self.called_member, False, False, {}, self,
a)
return x
tc = TestClass()
with self.test_session() as sess:
x = tc.test_method(
constant_op.constant([2, 4]), constant_op.constant(1),
constant_op.constant(-2))
self.assertListEqual([0, 1], sess.run(x).tolist())
def test_to_graph_basic(self):
def test_fn(x, s):
while tf.reduce_sum(x) > s:
x //= 2
return x
compiled_fn = api.to_graph(test_fn)
with self.test_session() as sess:
x = compiled_fn(constant_op.constant([4, 8]), 4)
self.assertListEqual([1, 2], sess.run(x).tolist())
def test_to_code_basic(self):
def test_fn(x, s):
while tf.reduce_sum(x) > s:
x /= 2
return x
compiled_code = api.to_code(test_fn)
# Just check that it is parseable Python code.
self.assertIsNotNone(parser.parse_str(compiled_code))
if __name__ == '__main__':
test.main()
| |
from __future__ import print_function
from awips.dataaccess import DataAccessLayer as DAL
from awips.ThriftClient import ThriftRequestException
import os
import unittest
#
# Base TestCase for DAF tests. This class provides helper methods and
# tests common to all DAF test cases.
#
# SOFTWARE HISTORY
#
# Date Ticket# Engineer Description
# ------------ ---------- ----------- --------------------------
# 01/19/16 4795 mapeters Initial Creation.
# 04/11/16 5548 tgurney Cleanup
# 04/13/16 5379 tgurney Add identifier values tests
# 04/18/16 5548 tgurney More cleanup, plus new tests
# 04/26/16 5587 tgurney Move identifier values tests
# to subclasses
# 06/01/16 5587 tgurney Add testGet*Identifiers
# 06/07/16 5574 tgurney Make geometry/grid data tests
# return the retrieved data
# 06/10/16 5548 tgurney Make testDatatypeIsSupported
# case-insensitive
# 08/10/16 2416 tgurney Don't test identifier values
# for dataURI
# 10/05/16 5926 dgilling Better checks in runGeometryDataTest.
# 11/08/16 5985 tgurney Do not check data times on
# time-agnostic data
# 03/13/17 5981 tgurney Do not check valid period on
# data time
#
#
class DafTestCase(unittest.TestCase):
sampleDataLimit = 5
"""
Maximum number of levels, locations, times, and geometry/grid data to
display
"""
numTimesToLimit = 3
"""
When limiting geometry/grid data requests with times, only retrieve data
for this many times
"""
datatype = None
"""Name of the datatype"""
@classmethod
def setUpClass(cls):
host = os.environ.get('DAF_TEST_HOST')
if host is None:
host = 'edex-cloud.unidata.ucar.edu'
DAL.changeEDEXHost(host)
@staticmethod
def getTimesIfSupported(req):
"""Return available times for req. If req refers to a time-agnostic
datatype, return an empty list instead.
"""
times = []
try:
times = DAL.getAvailableTimes(req)
except ThriftRequestException as e:
if not 'TimeAgnosticDataException' in str(e):
raise
return times
def testDatatypeIsSupported(self):
allSupported = (item.lower() for item in DAL.getSupportedDatatypes())
self.assertIn(self.datatype.lower(), allSupported)
def testGetRequiredIdentifiers(self):
req = DAL.newDataRequest(self.datatype)
required = DAL.getRequiredIdentifiers(req)
self.assertIsNotNone(required)
print("Required identifiers:", required)
def testGetOptionalIdentifiers(self):
req = DAL.newDataRequest(self.datatype)
optional = DAL.getOptionalIdentifiers(req)
self.assertIsNotNone(optional)
print("Optional identifiers:", optional)
def runGetIdValuesTest(self, identifiers):
for id in identifiers:
if id.lower() == 'datauri':
continue
req = DAL.newDataRequest(self.datatype)
idValues = DAL.getIdentifierValues(req, id)
self.assertTrue(hasattr(idValues, '__iter__'))
def runInvalidIdValuesTest(self):
badString = 'id from ' + self.datatype + '; select 1;'
with self.assertRaises(ThriftRequestException) as cm:
req = DAL.newDataRequest(self.datatype)
idValues = DAL.getIdentifierValues(req, badString)
def runNonexistentIdValuesTest(self):
with self.assertRaises(ThriftRequestException) as cm:
req = DAL.newDataRequest(self.datatype)
idValues = DAL.getIdentifierValues(req, 'idthatdoesnotexist')
def runParametersTest(self, req):
params = DAL.getAvailableParameters(req)
self.assertIsNotNone(params)
print(params)
def runLevelsTest(self, req):
levels = DAL.getAvailableLevels(req)
self.assertIsNotNone(levels)
print("Number of levels: " + str(len(levels)))
strLevels = [str(t) for t in levels[:self.sampleDataLimit]]
print("Sample levels:\n" + str(strLevels))
def runLocationsTest(self, req):
locs = DAL.getAvailableLocationNames(req)
self.assertIsNotNone(locs)
print("Number of location names: " + str(len(locs)))
print("Sample location names:\n" + str(locs[:self.sampleDataLimit]))
def runTimesTest(self, req):
times = DAL.getAvailableTimes(req)
self.assertIsNotNone(times)
print("Number of times: " + str(len(times)))
strTimes = [str(t) for t in times[:self.sampleDataLimit]]
print("Sample times:\n" + str(strTimes))
def runTimeAgnosticTest(self, req):
with self.assertRaises(ThriftRequestException) as cm:
times = DAL.getAvailableTimes(req)
self.assertIn('TimeAgnosticDataException', str(cm.exception))
def runGeometryDataTest(self, req, checkDataTimes=True):
"""
Test that we are able to successfully retrieve geometry data for the
given request.
"""
times = DafTestCase.getTimesIfSupported(req)
geomData = DAL.getGeometryData(req, times[:self.numTimesToLimit])
self.assertIsNotNone(geomData)
#if times:
# self.assertNotEqual(len(geomData), 0)
if not geomData:
raise unittest.SkipTest("No data available")
print("Number of geometry records: " + str(len(geomData)))
print("Sample geometry data:")
for record in geomData[:self.sampleDataLimit]:
if (checkDataTimes and times and
"PERIOD_USED" not in record.getDataTime().getUtilityFlags()):
self.assertIn(record.getDataTime(), times[:self.numTimesToLimit])
print("geometry=" + str(record.getGeometry()), end="")
for p in req.getParameters():
print(" " + p + "=" + str(record.getString(p)), end="")
print()
return geomData
def runGeometryDataTestWithTimeRange(self, req, timeRange):
"""
Test that we are able to successfully retrieve geometry data for the
given request.
"""
geomData = DAL.getGeometryData(req, timeRange)
self.assertIsNotNone(geomData)
if not geomData:
raise unittest.SkipTest("No data available")
print("Number of geometry records: " + str(len(geomData)))
print("Sample geometry data:")
for record in geomData[:self.sampleDataLimit]:
self.assertGreaterEqual(record.getDataTime().getRefTime().getTime(), timeRange.getStartInMillis())
self.assertLessEqual(record.getDataTime().getRefTime().getTime(), timeRange.getEndInMillis())
print("geometry=" + str(record.getGeometry()), end="")
for p in req.getParameters():
print(" " + p + "=" + record.getString(p), end="")
print()
return geomData
def runGridDataTest(self, req, testSameShape=True):
"""
Test that we are able to successfully retrieve grid data for the given
request.
Args:
testSameShape: whether or not to verify that all the retrieved data
have the same shape (most data don't change shape)
"""
times = DafTestCase.getTimesIfSupported(req)
gridData = DAL.getGridData(req, times[:self.numTimesToLimit])
self.assertIsNotNone(gridData)
if not gridData:
raise unittest.SkipTest("No data available")
print("Number of grid records: " + str(len(gridData)))
if len(gridData) > 0:
print("Sample grid data shape:\n" + str(gridData[0].getRawData().shape) + "\n")
print("Sample grid data:\n" + str(gridData[0].getRawData()) + "\n")
print("Sample lat-lon data:\n" + str(gridData[0].getLatLonCoords()) + "\n")
if testSameShape:
correctGridShape = gridData[0].getLatLonCoords()[0].shape
for record in gridData:
rawData = record.getRawData()
self.assertIsNotNone(rawData)
self.assertEqual(rawData.shape, correctGridShape)
return gridData
| |
# Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Cells Utility Methods
"""
import random
import sys
import six
import nova.conf
from nova import objects
from nova.objects import base as obj_base
# Separator used between cell names for the 'full cell name' and routing
# path
PATH_CELL_SEP = '!'
# Flag prepended to a cell name to indicate data shouldn't be synced during
# an instance save. There are no illegal chars in a cell name so using the
# meaningful PATH_CELL_SEP in an invalid way will need to suffice.
BLOCK_SYNC_FLAG = '!!'
# Separator used between cell name and item
_CELL_ITEM_SEP = '@'
CONF = nova.conf.CONF
class ProxyObjectSerializer(obj_base.NovaObjectSerializer):
def __init__(self):
super(ProxyObjectSerializer, self).__init__()
self.serializer = super(ProxyObjectSerializer, self)
def _process_object(self, context, objprim):
return _CellProxy.obj_from_primitive(self.serializer, objprim, context)
class _CellProxy(object):
def __init__(self, obj, cell_path):
self._obj = obj
self._cell_path = cell_path
@property
def id(self):
return cell_with_item(self._cell_path, self._obj.id)
@property
def host(self):
return cell_with_item(self._cell_path, self._obj.host)
def __getitem__(self, key):
if key == 'id':
return self.id
if key == 'host':
return self.host
return getattr(self._obj, key)
def obj_to_primitive(self):
obj_p = self._obj.obj_to_primitive()
obj_p['cell_proxy.class_name'] = self.__class__.__name__
obj_p['cell_proxy.cell_path'] = self._cell_path
return obj_p
@classmethod
def obj_from_primitive(cls, serializer, primitive, context=None):
obj_primitive = primitive.copy()
cell_path = obj_primitive.pop('cell_proxy.cell_path', None)
klass_name = obj_primitive.pop('cell_proxy.class_name', None)
obj = serializer._process_object(context, obj_primitive)
if klass_name is not None and cell_path is not None:
klass = getattr(sys.modules[__name__], klass_name)
return klass(obj, cell_path)
else:
return obj
# dict-ish syntax sugar
def _iteritems(self):
"""For backwards-compatibility with dict-based objects.
NOTE(sbauza): May be removed in the future.
"""
for name in self._obj.obj_fields:
if (self._obj.obj_attr_is_set(name) or
name in self._obj.obj_extra_fields):
if name == 'id':
yield name, self.id
elif name == 'host':
yield name, self.host
else:
yield name, getattr(self._obj, name)
if six.PY2:
iteritems = _iteritems
else:
items = _iteritems
def __getattr__(self, key):
return getattr(self._obj, key)
class ComputeNodeProxy(_CellProxy):
pass
class ServiceProxy(_CellProxy):
def __getattr__(self, key):
if key == 'compute_node':
# NOTE(sbauza): As the Service object is still having a nested
# ComputeNode object that consumers of this Proxy don't use, we can
# safely remove it from what's returned
raise AttributeError
# NOTE(claudiub): needed for py34 compatiblity.
# get self._obj first, without ending into an infinite recursion.
return getattr(self.__getattribute__("_obj"), key)
def get_instances_to_sync(context, updated_since=None, project_id=None,
deleted=True, shuffle=False, uuids_only=False):
"""Return a generator that will return a list of active and
deleted instances to sync with parent cells. The list may
optionally be shuffled for periodic updates so that multiple
cells services aren't self-healing the same instances in nearly
lockstep.
"""
def _get_paginated_instances(context, filters, shuffle, limit, marker):
instances = objects.InstanceList.get_by_filters(
context, filters, sort_key='deleted', sort_dir='asc',
limit=limit, marker=marker)
if len(instances) > 0:
marker = instances[-1]['uuid']
# NOTE(melwitt/alaski): Need a list that supports assignment for
# shuffle. And pop() on the returned result.
instances = list(instances)
if shuffle:
random.shuffle(instances)
return instances, marker
filters = {}
if updated_since is not None:
filters['changes-since'] = updated_since
if project_id is not None:
filters['project_id'] = project_id
if not deleted:
filters['deleted'] = False
# Active instances first.
limit = CONF.cells.instance_update_sync_database_limit
marker = None
instances = []
while True:
if not instances:
instances, marker = _get_paginated_instances(context, filters,
shuffle, limit, marker)
if not instances:
break
instance = instances.pop(0)
if uuids_only:
yield instance.uuid
else:
yield instance
def cell_with_item(cell_name, item):
"""Turn cell_name and item into <cell_name>@<item>."""
if cell_name is None:
return item
return cell_name + _CELL_ITEM_SEP + str(item)
def split_cell_and_item(cell_and_item):
"""Split a combined cell@item and return them."""
result = cell_and_item.rsplit(_CELL_ITEM_SEP, 1)
if len(result) == 1:
return (None, cell_and_item)
else:
return result
def add_cell_to_compute_node(compute_node, cell_name):
"""Fix compute_node attributes that should be unique. Allows
API cell to query the 'id' by cell@id.
"""
# NOTE(sbauza): As compute_node is a ComputeNode object, we need to wrap it
# for adding the cell_path information
compute_proxy = ComputeNodeProxy(compute_node, cell_name)
return compute_proxy
def add_cell_to_service(service, cell_name):
"""Fix service attributes that should be unique. Allows
API cell to query the 'id' or 'host' by cell@id/host.
"""
# NOTE(sbauza): As service is a Service object, we need to wrap it
# for adding the cell_path information
service_proxy = ServiceProxy(service, cell_name)
return service_proxy
def add_cell_to_task_log(task_log, cell_name):
"""Fix task_log attributes that should be unique. In particular,
the 'id' and 'host' fields should be prepended with cell name.
"""
task_log['id'] = cell_with_item(cell_name, task_log['id'])
task_log['host'] = cell_with_item(cell_name, task_log['host'])
| |
#
# This file is part of pyasn1 software.
#
# Copyright (c) 2005-2017, Ilya Etingof <etingof@gmail.com>
# License: http://pyasn1.sf.net/license.html
#
import sys
from pyasn1.type import constraint, tagmap, tag
from pyasn1.compat import calling
from pyasn1 import error
__all__ = ['Asn1Item', 'Asn1ItemBase', 'AbstractSimpleAsn1Item', 'AbstractConstructedAsn1Item']
class Asn1Item(object):
@classmethod
def getTypeId(cls, increment=1):
try:
Asn1Item._typeCounter += increment
except AttributeError:
Asn1Item._typeCounter = increment
return Asn1Item._typeCounter
class Asn1ItemBase(Asn1Item):
#: Set or return a :py:class:`~pyasn1.type.tag.TagSet` object representing
#: ASN.1 tag(s) associated with |ASN.1| type.
tagSet = tag.TagSet()
#: Default :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
#: object imposing constraints on initialization values.
subtypeSpec = constraint.ConstraintsIntersection()
# Disambiguation ASN.1 types identification
typeId = None
def __init__(self, **kwargs):
readOnly = {
'tagSet': self.tagSet,
'subtypeSpec': self.subtypeSpec
}
readOnly.update(kwargs)
self.__dict__.update(readOnly)
self._readOnly = readOnly
def __setattr__(self, name, value):
if name[0] != '_' and name in self._readOnly:
raise error.PyAsn1Error('read-only instance attribute "%s"' % name)
self.__dict__[name] = value
@property
def readOnly(self):
return self._readOnly
@property
def effectiveTagSet(self):
"""For |ASN.1| type is equivalent to *tagSet*
"""
return self.tagSet # used by untagged types
@property
def tagMap(self):
"""Return a :class:`~pyasn1.type.tagmap.TagMap` object mapping ASN.1 tags to ASN.1 objects within callee object.
"""
return tagmap.TagMap({self.tagSet: self})
def isSameTypeWith(self, other, matchTags=True, matchConstraints=True):
"""Examine |ASN.1| type for equality with other ASN.1 type.
ASN.1 tags (:py:mod:`~pyasn1.type.tag`) and constraints
(:py:mod:`~pyasn1.type.constraint`) are examined when carrying
out ASN.1 types comparison.
No Python inheritance relationship between PyASN1 objects is considered.
Parameters
----------
other: a pyasn1 type object
Class instance representing ASN.1 type.
Returns
-------
: :class:`bool`
:class:`True` if *other* is |ASN.1| type,
:class:`False` otherwise.
"""
return (self is other or
(not matchTags or self.tagSet == other.tagSet) and
(not matchConstraints or self.subtypeSpec == other.subtypeSpec))
def isSuperTypeOf(self, other, matchTags=True, matchConstraints=True):
"""Examine |ASN.1| type for subtype relationship with other ASN.1 type.
ASN.1 tags (:py:mod:`~pyasn1.type.tag`) and constraints
(:py:mod:`~pyasn1.type.constraint`) are examined when carrying
out ASN.1 types comparison.
No Python inheritance relationship between PyASN1 objects is considered.
Parameters
----------
other: a pyasn1 type object
Class instance representing ASN.1 type.
Returns
-------
: :class:`bool`
:class:`True` if *other* is a subtype of |ASN.1| type,
:class:`False` otherwise.
"""
return (not matchTags or
(self.tagSet.isSuperTagSetOf(other.tagSet)) and
(not matchConstraints or self.subtypeSpec.isSuperTypeOf(other.subtypeSpec)))
@staticmethod
def isNoValue(*values):
for value in values:
if value is not None and value is not noValue:
return False
return True
# backward compatibility
def getTagSet(self):
return self.tagSet
def getEffectiveTagSet(self):
return self.effectiveTagSet
def getTagMap(self):
return self.tagMap
def getSubtypeSpec(self):
return self.subtypeSpec
def hasValue(self):
return self.isValue
class NoValue(object):
"""Create a singleton instance of NoValue class.
NoValue object can be used as an initializer on PyASN1 type class
instantiation to represent ASN.1 type rather than ASN.1 data value.
No operations other than type comparison can be performed on
a PyASN1 type object.
"""
skipMethods = ('__getattribute__', '__getattr__', '__setattr__', '__delattr__',
'__class__', '__init__', '__del__', '__new__', '__repr__',
'__qualname__', '__objclass__', 'im_class', '__sizeof__')
_instance = None
def __new__(cls):
if cls._instance is None:
def getPlug(name):
def plug(self, *args, **kw):
raise error.PyAsn1Error('Uninitialized ASN.1 value ("%s" attribute looked up)' % name)
return plug
op_names = [name
for typ in (str, int, list, dict)
for name in dir(typ)
if (name not in cls.skipMethods and
name.startswith('__') and
name.endswith('__') and
calling.callable(getattr(typ, name)))]
for name in set(op_names):
setattr(cls, name, getPlug(name))
cls._instance = object.__new__(cls)
return cls._instance
def __getattr__(self, attr):
if attr in self.skipMethods:
raise AttributeError('attribute %s not present' % attr)
raise error.PyAsn1Error('No value for "%s"' % attr)
def __repr__(self):
return '%s()' % self.__class__.__name__
noValue = NoValue()
# Base class for "simple" ASN.1 objects. These are immutable.
class AbstractSimpleAsn1Item(Asn1ItemBase):
#: Default payload value
defaultValue = noValue
def __init__(self, value=noValue, **kwargs):
Asn1ItemBase.__init__(self, **kwargs)
if value is noValue or value is None:
value = self.defaultValue
else:
value = self.prettyIn(value)
try:
self.subtypeSpec(value)
except error.PyAsn1Error:
exType, exValue, exTb = sys.exc_info()
raise exType('%s at %s' % (exValue, self.__class__.__name__))
self._value = value
def __repr__(self):
representation = []
if self._value is not self.defaultValue:
representation.append(self.prettyOut(self._value))
if self.tagSet is not self.__class__.tagSet:
representation.append('tagSet=%r' % (self.tagSet,))
if self.subtypeSpec is not self.__class__.subtypeSpec:
representation.append('subtypeSpec=%r' % (self.subtypeSpec,))
return '%s(%s)' % (self.__class__.__name__, ', '.join(representation))
def __str__(self):
return str(self._value)
def __eq__(self, other):
return self is other and True or self._value == other
def __ne__(self, other):
return self._value != other
def __lt__(self, other):
return self._value < other
def __le__(self, other):
return self._value <= other
def __gt__(self, other):
return self._value > other
def __ge__(self, other):
return self._value >= other
if sys.version_info[0] <= 2:
def __nonzero__(self):
return self._value and True or False
else:
def __bool__(self):
return self._value and True or False
def __hash__(self):
return hash(self._value)
@property
def isValue(self):
"""Indicate if |ASN.1| object represents ASN.1 type or ASN.1 value.
In other words, if *isValue* is `True`, then the ASN.1 object is
initialized.
Returns
-------
: :class:`bool`
:class:`True` if object represents ASN.1 value and type,
:class:`False` if object represents just ASN.1 type.
Note
----
There is an important distinction between PyASN1 type and value objects.
The PyASN1 type objects can only participate in ASN.1 type
operations (subtyping, comparison etc) and serve as a
blueprint for serialization codecs to resolve ambiguous types.
The PyASN1 value objects can additionally participate in most
of built-in Python operations.
"""
return self._value is not noValue
def clone(self, value=noValue, **kwargs):
"""Create a copy of a |ASN.1| type or object.
Any parameters to the *clone()* method will replace corresponding
properties of the |ASN.1| object.
Parameters
----------
value: :class:`tuple`, :class:`str` or |ASN.1| object
Initialization value to pass to new ASN.1 object instead of
inheriting one from the caller.
tagSet: :py:class:`~pyasn1.type.tag.TagSet`
Object representing ASN.1 tag(s) to use in new object instead of inheriting from the caller
subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
Object representing ASN.1 subtype constraint(s) to use in new object instead of inheriting from the caller
Returns
-------
:
new instance of |ASN.1| type/value
"""
if value is noValue or value is None:
if not kwargs:
return self
value = self._value
initilaizers = self.readOnly.copy()
initilaizers.update(kwargs)
return self.__class__(value, **initilaizers)
def subtype(self, value=noValue, **kwargs):
"""Create a copy of a |ASN.1| type or object.
Any parameters to the *subtype()* method will be added to the corresponding
properties of the |ASN.1| object.
Parameters
----------
value: :class:`tuple`, :class:`str` or |ASN.1| object
Initialization value to pass to new ASN.1 object instead of
inheriting one from the caller.
implicitTag: :py:class:`~pyasn1.type.tag.Tag`
Implicitly apply given ASN.1 tag object to caller's
:py:class:`~pyasn1.type.tag.TagSet`, then use the result as
new object's ASN.1 tag(s).
explicitTag: :py:class:`~pyasn1.type.tag.Tag`
Explicitly apply given ASN.1 tag object to caller's
:py:class:`~pyasn1.type.tag.TagSet`, then use the result as
new object's ASN.1 tag(s).
subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
Add ASN.1 constraints object to one of the caller, then
use the result as new object's ASN.1 constraints.
Returns
-------
:
new instance of |ASN.1| type/value
"""
if value is noValue or value is None:
if not kwargs:
return self
value = self._value
initializers = self.readOnly.copy()
implicitTag = kwargs.pop('implicitTag', None)
if implicitTag is not None:
initializers['tagSet'] = self.tagSet.tagImplicitly(implicitTag)
explicitTag = kwargs.pop('explicitTag', None)
if explicitTag is not None:
initializers['tagSet'] = self.tagSet.tagExplicitly(explicitTag)
for arg, option in kwargs.items():
initializers[arg] += option
return self.__class__(value, **initializers)
def prettyIn(self, value):
return value
def prettyOut(self, value):
return str(value)
def prettyPrint(self, scope=0):
"""Provide human-friendly printable object representation.
Returns
-------
: :class:`str`
human-friendly type and/or value representation.
"""
if self.isValue:
return self.prettyOut(self._value)
else:
return '<no value>'
# XXX Compatibility stub
def prettyPrinter(self, scope=0):
return self.prettyPrint(scope)
# noinspection PyUnusedLocal
def prettyPrintType(self, scope=0):
return '%s -> %s' % (self.tagSet, self.__class__.__name__)
#
# Constructed types:
# * There are five of them: Sequence, SequenceOf/SetOf, Set and Choice
# * ASN1 types and values are represened by Python class instances
# * Value initialization is made for defaulted components only
# * Primary method of component addressing is by-position. Data model for base
# type is Python sequence. Additional type-specific addressing methods
# may be implemented for particular types.
# * SequenceOf and SetOf types do not implement any additional methods
# * Sequence, Set and Choice types also implement by-identifier addressing
# * Sequence, Set and Choice types also implement by-asn1-type (tag) addressing
# * Sequence and Set types may include optional and defaulted
# components
# * Constructed types hold a reference to component types used for value
# verification and ordering.
# * Component type is a scalar type for SequenceOf/SetOf types and a list
# of types for Sequence/Set/Choice.
#
def setupComponent():
"""Returns a sentinel value.
Indicates to a constructed type to set up its inner component so that it
can be referred to. This is useful in situation when you want to populate
descendants of a constructed type what requires being able to refer to
their parent types along the way.
Example
-------
>>> constructed['record'] = setupComponent()
>>> constructed['record']['scalar'] = 42
"""
return noValue
class AbstractConstructedAsn1Item(Asn1ItemBase):
#: If `True`, requires exact component type matching,
#: otherwise subtype relation is only enforced
strictConstraints = False
componentType = None
sizeSpec = None
def __init__(self, **kwargs):
readOnly = {
'componentType': self.componentType,
'sizeSpec': self.sizeSpec
}
readOnly.update(kwargs)
Asn1ItemBase.__init__(self, **readOnly)
self._componentValues = []
def __repr__(self):
representation = []
if self.componentType is not self.__class__.componentType:
representation.append('componentType=%r' % (self.componentType,))
if self.tagSet is not self.__class__.tagSet:
representation.append('tagSet=%r' % (self.tagSet,))
if self.subtypeSpec is not self.__class__.subtypeSpec:
representation.append('subtypeSpec=%r' % (self.subtypeSpec,))
representation = '%s(%s)' % (self.__class__.__name__, ', '.join(representation))
if self._componentValues:
for idx, component in enumerate(self._componentValues):
if component is None or component is noValue:
continue
representation += '.setComponentByPosition(%d, %s)' % (idx, repr(component))
return representation
def __eq__(self, other):
return self is other and True or self._componentValues == other
def __ne__(self, other):
return self._componentValues != other
def __lt__(self, other):
return self._componentValues < other
def __le__(self, other):
return self._componentValues <= other
def __gt__(self, other):
return self._componentValues > other
def __ge__(self, other):
return self._componentValues >= other
if sys.version_info[0] <= 2:
def __nonzero__(self):
return self._componentValues and True or False
else:
def __bool__(self):
return self._componentValues and True or False
def _cloneComponentValues(self, myClone, cloneValueFlag):
pass
def clone(self, **kwargs):
"""Create a copy of a |ASN.1| type or object.
Any parameters to the *clone()* method will replace corresponding
properties of the |ASN.1| object.
Parameters
----------
tagSet: :py:class:`~pyasn1.type.tag.TagSet`
Object representing non-default ASN.1 tag(s)
subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
Object representing non-default ASN.1 subtype constraint(s)
sizeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
Object representing non-default ASN.1 size constraint(s)
Returns
-------
:
new instance of |ASN.1| type/value
"""
cloneValueFlag = kwargs.pop('cloneValueFlag', False)
initilaizers = self.readOnly.copy()
initilaizers.update(kwargs)
clone = self.__class__(**initilaizers)
if cloneValueFlag:
self._cloneComponentValues(clone, cloneValueFlag)
return clone
def subtype(self, **kwargs):
"""Create a copy of a |ASN.1| type or object.
Any parameters to the *subtype()* method will be added to the corresponding
properties of the |ASN.1| object.
Parameters
----------
tagSet: :py:class:`~pyasn1.type.tag.TagSet`
Object representing non-default ASN.1 tag(s)
subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
Object representing non-default ASN.1 subtype constraint(s)
sizeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
Object representing non-default ASN.1 size constraint(s)
Returns
-------
:
new instance of |ASN.1| type/value
"""
initializers = self.readOnly.copy()
cloneValueFlag = kwargs.pop('cloneValueFlag', False)
implicitTag = kwargs.pop('implicitTag', None)
if implicitTag is not None:
initializers['tagSet'] = self.tagSet.tagImplicitly(implicitTag)
explicitTag = kwargs.pop('explicitTag', None)
if explicitTag is not None:
initializers['tagSet'] = self.tagSet.tagExplicitly(explicitTag)
for arg, option in kwargs.items():
initializers[arg] += option
clone = self.__class__(**initializers)
if cloneValueFlag:
self._cloneComponentValues(clone, cloneValueFlag)
return clone
def verifySizeSpec(self):
self.sizeSpec(self)
def getComponentByPosition(self, idx):
raise error.PyAsn1Error('Method not implemented')
def setComponentByPosition(self, idx, value, verifyConstraints=True):
raise error.PyAsn1Error('Method not implemented')
def setComponents(self, *args, **kwargs):
for idx, value in enumerate(args):
self[idx] = value
for k in kwargs:
self[k] = kwargs[k]
return self
def __len__(self):
return len(self._componentValues)
def clear(self):
self._componentValues = []
# backward compatibility
def setDefaultComponents(self):
pass
def getComponentType(self):
return self.componentType
| |
# -*- coding: utf-8 -*-
"""
Copyright (c) 2015, Andrew Jones (andyjones dot ed at gmail dot com)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the <organization> nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""
This module uses a set of hand-label experimental microarray images to produce a LMDB file that can be used to train a
Caffe neural network. The main functions of interest are ``visualize_image_for_hand_labelling`` and
``make_training_files``.
To build a training set, first use ``visualize_image_for_hand_labelling`` to save out a human-interpretable image of a
microarray file. Then use image editing software to color the image according to the ``GOOD_COLOR``, ``DAMAGED_COLOR``,
etc attributes. If you put the labelled images into the ``LABELS_FOLDER`` and add the corresponding file IDs
``LABELLED_FILE_IDS``, then calling ``make_training_files`` will construct a pair of LMDB databases that can be used to
train and test a Caffe classifier.
"""
import scipy as sp
import tifffile
import os
from experimental_tools import get_bounded_im, get_benchmark_im, WINDOW_WIDTH
from visualization_tools import two_channel_to_color
from caffe_tools import fill_database
"""The colours used to hand-label different kinds of spots"""
GOOD_COLOR = [255, 0, 0] # red, meant for pixels inside whole spots.
DAMAGED_COLOR = [255, 255, 0] # yellow, meant for pixels inside damaged spots.
MISSING_COLOR = [255, 0, 255] # magenta, meant for pixels at the approximate location of a spot that's extremely faint
LABELLED_AREA_COLOR = [0, 255, 0] # green, meant for marking the border of a block of labelled spots
UNLABELLED_SPOTTED_AREA_COLOR = [0, 0, 255] # blue, meant for marking the border of all blocks of spots that haven't been labelled.
"""The folder hand-labelled images are expected to be in"""
LABELS_FOLDER = 'sources/labels'
"""The mapping from types of pixels to classifier labels"""
LABEL_ENUM = {'inside': 1,
'outside': 0,
'inside_damaged': 1,
'outside_damaged': 0,
'block_border': 0,
'between': 0}
"""IDs of the hand-labelled images"""
LABELLED_FILE_IDS = ['3-12_pmt100']
def visualize_image_for_hand_labelling(file_id):
"""Saves out a visualization of the image ``file_id`` that's appropriate for hand-labelling"""
im = get_bounded_im(file_id)
im = two_channel_to_color(im)
target_path = os.path.join(LABELS_FOLDER, '{}_corrected.bmp'.format(file_id))
tifffile.imsave(target_path, im)
def equal_color_mask(im, color):
"""Returns a mask indicating which pixels in ``im`` are ``color``"""
return reduce(sp.logical_and, [im[:, :, i] == color[i] for i in range(3)])
def get_hand_labels(file_id):
"""Returns the spots and areas that have been hand labelled for ``file_id``"""
labels_filename = file_id + '_corrected_labelled.bmp'
labels_path = os.path.join(LABELS_FOLDER, labels_filename)
labels = sp.ndimage.imread(labels_path)
spots = {}
spots['good'] = equal_color_mask(labels, GOOD_COLOR)
spots['damaged'] = equal_color_mask(labels, DAMAGED_COLOR)
spots['missing'] = equal_color_mask(labels, MISSING_COLOR)
labelled_area_outlines = equal_color_mask(labels, LABELLED_AREA_COLOR)
unlabelled_spotted_area_outlines = equal_color_mask(labels, UNLABELLED_SPOTTED_AREA_COLOR)
areas = {}
areas['labelled'] = sp.ndimage.binary_fill_holes(labelled_area_outlines)
areas['unlabelled'] = sp.ndimage.binary_fill_holes(unlabelled_spotted_area_outlines)
return spots, areas
def make_inside_mask(spots):
"""Returns a mask indicating which pixels lie inside a good or damaged spot"""
return spots['good'] | spots['damaged']
def make_outside_mask(spots, areas):
"""Returns a mask indicating which pixels lie outside good spots, damaged spots, or an area marked as containing
spots"""
inside = make_inside_mask(spots)
near_inside = sp.ndimage.binary_dilation(inside, structure=sp.ones((3,3)))
return ~(near_inside | areas['unlabelled'])
def make_inside_damaged_mask(spots):
"""Returns a mask indicating which pixels lie inside damaged spots"""
return spots['damaged']
def make_outside_damaged_mask(spots, areas):
"""Returns a mask indicating which pixels lie just outside damaged spots"""
outside = make_outside_mask(spots, areas)
inside_damaged = make_inside_damaged_mask(spots)
near_damaged = sp.ndimage.binary_dilation(inside_damaged, structure=sp.ones((3,3)), iterations=8)
outside_near_damaged = near_damaged & outside
return outside_near_damaged
def make_block_border_mask(spots, areas):
"""Returns a mask indicating which pixels lie just outside a block of spots"""
inside = make_inside_mask(spots)
outside = make_outside_mask(spots, areas)
very_near_inside = sp.ndimage.binary_dilation(inside, structure=sp.ones((3,3)), iterations=8)
near_inside = sp.ndimage.binary_dilation(inside, structure=sp.ones((3,3)), iterations=32)
return near_inside & ~very_near_inside & outside
def make_between_mask(spots, areas):
"""Returns a mask indicating which pixels lie between two spots"""
inside = make_inside_mask(spots)
outside = make_outside_mask(spots, areas)
near_inside = sp.ndimage.binary_dilation(inside, structure=sp.ones((3,3)), iterations=8)
return near_inside & outside
def find_centers(spots, areas, border_width, im_num=0):
"""Returns a dict of arrays, one for each pixel type. The arrays are compatible with caffe_tools.fill_database.
The last row of each array is equal to ``im_num``, indicating which image those centers were created from.
"""
indices = sp.indices(spots['good'].shape)
indices = sp.concatenate([indices, im_num*sp.ones((1, indices.shape[1], indices.shape[2]), dtype=int)], 0)
inside_border = sp.zeros(spots['good'].shape, dtype=bool)
inside_border[border_width:-border_width, border_width:-border_width] = True
centers = {}
centers['inside'] = indices[:, make_inside_mask(spots) & inside_border]
centers['outside'] = indices[:, make_outside_mask(spots, areas) & inside_border]
centers['inside_damaged'] = indices[:, make_inside_damaged_mask(spots) & inside_border]
centers['outside_damaged'] = indices[:, make_outside_damaged_mask(spots, areas) & inside_border]
centers['block_border'] = indices[:, make_block_border_mask(spots, areas) & inside_border]
centers['between'] = indices[:, make_between_mask(spots, areas) & inside_border]
return centers
def find_centers_from_ims(file_ids, width):
"""Uses the images at ``file_ids`` to create a dict of arrays indexed by pixel type. The arrays are compatible with
caffe_tools.fill_database."""
centers = []
for i, file_id in enumerate(file_ids):
spots, areas = get_hand_labels(file_id)
centers.append(find_centers(spots, areas, width/2, im_num=i))
result = {}
for name in centers[0]:
result[name] = sp.concatenate([cs[name] for cs in centers], 1)
return result
def make_labelled_sets(centers, test_split=0.1):
"""Uses a dict of arrays like those created by ``find_centers_from_ims`` to build test and training sets for training
a Caffe model to distinguish different types of pixel. The arrays returned are centers and labels compatible with
caffe_tools.fill_database"""
counts = {'inside': 100e3, 'outside': 50e3, 'inside_damaged': 100e3, 'outside_damaged': 50e3, 'block_border': 50e3, 'between': 50e3}
choices = {name: sp.random.choice(sp.arange(centers[name].shape[1]), counts[name]) for name in centers}
center_sets = {name: centers[name][:, choices[name]] for name in centers}
label_sets = {name: sp.repeat(LABEL_ENUM[name], counts[name]) for name in centers}
center_set = sp.concatenate([center_sets[name] for name in centers], 1)
label_set = sp.concatenate([label_sets[name] for name in centers])
order = sp.random.permutation(sp.arange(center_set.shape[1]))
ordered_centers = center_set[:, order]
ordered_labels = label_set[order]
n_training = int((1-test_split)*center_set.shape[1])
training_centers = ordered_centers[:, :n_training]
training_labels = ordered_labels[:n_training]
test_centers = ordered_centers[:, n_training:]
test_labels = ordered_labels[n_training:]
return training_centers, training_labels, test_centers, test_labels
def create_caffe_input_files(file_ids, width):
"""Creates LMDB databases containing training and test sets derived from the hand-labelled ``file_ids``. ``width``
is the size of the windows to use.
The databases can be found in the ``temporary`` directory."""
ims = [get_bounded_im(file_id) for file_id in file_ids]
ims = [(im - im.mean())/im.std() for im in ims]
centers = find_centers_from_ims(file_ids, width)
training_centers, training_labels, test_centers, test_labels = make_labelled_sets(centers)
fill_database('temporary/train_experimental.db', ims, training_centers, training_labels, width)
fill_database('temporary/test_experimental.db', ims, test_centers, test_labels, width)
def make_training_files():
"""Use the hand-labels corresponding to ``LABELLED_FILE_IDS`` to create LMDB databases containing training and test
sets for a Caffe neural network."""
create_caffe_input_files(LABELLED_FILE_IDS, WINDOW_WIDTH)
| |
from django.conf import settings
from django.contrib.auth import models as auth_models
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models import Q
from django.urls import reverse
from django.utils.http import urlquote
from django.utils.translation import gettext_lazy as _
import mptt
from .. import settings as filer_settings
from . import mixins
class FolderPermissionManager(models.Manager):
"""
Theses methods are called by introspection from "has_generic_permisison" on
the folder model.
"""
def get_read_id_list(self, user):
"""
Give a list of a Folders where the user has read rights or the string
"All" if the user has all rights.
"""
return self.__get_id_list(user, "can_read")
def get_edit_id_list(self, user):
return self.__get_id_list(user, "can_edit")
def get_add_children_id_list(self, user):
return self.__get_id_list(user, "can_add_children")
def __get_id_list(self, user, attr):
if user.is_superuser or not filer_settings.FILER_ENABLE_PERMISSIONS:
return 'All'
allow_list = set()
deny_list = set()
group_ids = user.groups.all().values_list('id', flat=True)
q = Q(user=user) | Q(group__in=group_ids) | Q(everybody=True)
perms = self.filter(q).order_by('folder__tree_id', 'folder__level',
'folder__lft')
for perm in perms:
p = getattr(perm, attr)
if p is None:
# Not allow nor deny, we continue with the next permission
continue
if not perm.folder:
assert perm.type == FolderPermission.ALL
if p == FolderPermission.ALLOW:
allow_list.update(Folder.objects.all().values_list('id', flat=True))
else:
deny_list.update(Folder.objects.all().values_list('id', flat=True))
continue
folder_id = perm.folder.id
if p == FolderPermission.ALLOW:
allow_list.add(folder_id)
else:
deny_list.add(folder_id)
if perm.type == FolderPermission.CHILDREN:
if p == FolderPermission.ALLOW:
allow_list.update(perm.folder.get_descendants().values_list('id', flat=True))
else:
deny_list.update(perm.folder.get_descendants().values_list('id', flat=True))
# Deny has precedence over allow
return allow_list - deny_list
class Folder(models.Model, mixins.IconsMixin):
"""
Represents a Folder that things (files) can be put into. Folders are *NOT*
mirrored in the Filesystem and can have any unicode chars as their name.
Other models may attach to a folder with a ForeignKey. If the related name
ends with "_files" they will automatically be listed in the
folder.files list along with all the other models that link to the folder
in this way. Make sure the linked models obey the AbstractFile interface
(Duck Type).
"""
file_type = 'Folder'
is_root = False
can_have_subfolders = True
_icon = 'plainfolder'
# explicitly define MPTT fields which would otherwise change
# and create a migration, depending on django-mptt version
# (see: https://github.com/django-mptt/django-mptt/pull/578)
level = models.PositiveIntegerField(editable=False)
lft = models.PositiveIntegerField(editable=False)
rght = models.PositiveIntegerField(editable=False)
parent = models.ForeignKey(
'self',
verbose_name=('parent'),
null=True,
blank=True,
related_name='children',
on_delete=models.CASCADE,
)
name = models.CharField(
_('name'),
max_length=255,
)
owner = models.ForeignKey(
getattr(settings, 'AUTH_USER_MODEL', 'auth.User'),
verbose_name=_('owner'),
related_name='filer_owned_folders',
on_delete=models.SET_NULL,
null=True,
blank=True,
)
uploaded_at = models.DateTimeField(
_('uploaded at'),
auto_now_add=True,
)
created_at = models.DateTimeField(
_('created at'),
auto_now_add=True,
)
modified_at = models.DateTimeField(
_('modified at'),
auto_now=True,
)
class Meta:
# see: https://github.com/django-mptt/django-mptt/pull/577
index_together = (('tree_id', 'lft'),)
unique_together = (('parent', 'name'),)
ordering = ('name',)
permissions = (("can_use_directory_listing",
"Can use directory listing"),)
app_label = 'filer'
verbose_name = _("Folder")
verbose_name_plural = _("Folders")
@property
def file_count(self):
if not hasattr(self, '_file_count_cache'):
self._file_count_cache = self.files.count()
return self._file_count_cache
@property
def children_count(self):
if not hasattr(self, '_children_count_cache'):
self._children_count_cache = self.children.count()
return self._children_count_cache
@property
def item_count(self):
return self.file_count + self.children_count
@property
def files(self):
return self.all_files.all()
@property
def logical_path(self):
"""
Gets logical path of the folder in the tree structure.
Used to generate breadcrumbs
"""
folder_path = []
if self.parent:
folder_path.extend(self.parent.get_ancestors())
folder_path.append(self.parent)
return folder_path
@property
def pretty_logical_path(self):
return "/%s" % "/".join([f.name for f in self.logical_path + [self]])
@property
def quoted_logical_path(self):
return urlquote(self.pretty_logical_path)
def has_edit_permission(self, request):
return self.has_generic_permission(request, 'edit')
def has_read_permission(self, request):
return self.has_generic_permission(request, 'read')
def has_add_children_permission(self, request):
return self.has_generic_permission(request, 'add_children')
def has_generic_permission(self, request, permission_type):
"""
Return true if the current user has permission on this
folder. Return the string 'ALL' if the user has all rights.
"""
user = request.user
if not user.is_authenticated:
return False
elif user.is_superuser:
return True
elif user == self.owner:
return True
else:
if not hasattr(self, "permission_cache") or\
permission_type not in self.permission_cache or \
request.user.pk != self.permission_cache['user'].pk:
if not hasattr(self, "permission_cache") or request.user.pk != self.permission_cache['user'].pk:
self.permission_cache = {
'user': request.user,
}
# This calls methods on the manager i.e. get_read_id_list()
func = getattr(FolderPermission.objects,
"get_%s_id_list" % permission_type)
permission = func(user)
if permission == "All":
self.permission_cache[permission_type] = True
self.permission_cache['read'] = True
self.permission_cache['edit'] = True
self.permission_cache['add_children'] = True
else:
self.permission_cache[permission_type] = self.id in permission
return self.permission_cache[permission_type]
def get_admin_change_url(self):
return reverse('admin:filer_folder_change', args=(self.id,))
def get_admin_directory_listing_url_path(self):
return reverse('admin:filer-directory_listing', args=(self.id,))
def get_admin_delete_url(self):
try:
# Django <=1.6
model_name = self._meta.module_name
except AttributeError:
# Django >1.6
model_name = self._meta.model_name
return reverse(
'admin:{0}_{1}_delete'.format(self._meta.app_label, model_name,),
args=(self.pk,))
def __str__(self):
return "%s" % (self.name,)
def contains_folder(self, folder_name):
try:
self.children.get(name=folder_name)
return True
except Folder.DoesNotExist:
return False
# MPTT registration
try:
mptt.register(Folder)
except mptt.AlreadyRegistered:
pass
class FolderPermission(models.Model):
ALL = 0
THIS = 1
CHILDREN = 2
ALLOW = 1
DENY = 0
TYPES = [
(ALL, _("all items")),
(THIS, _("this item only")),
(CHILDREN, _("this item and all children")),
]
PERMISIONS = [
(None, _("inherit")),
(ALLOW, _("allow")),
(DENY, _("deny")),
]
folder = models.ForeignKey(
Folder,
verbose_name=("folder"),
null=True,
blank=True,
on_delete=models.CASCADE,
)
type = models.SmallIntegerField(
_("type"),
choices=TYPES,
default=ALL,
)
user = models.ForeignKey(
getattr(settings, 'AUTH_USER_MODEL', 'auth.User'),
related_name="filer_folder_permissions",
on_delete=models.SET_NULL,
verbose_name=_("user"),
blank=True,
null=True,
)
group = models.ForeignKey(
auth_models.Group,
related_name="filer_folder_permissions",
verbose_name=_("group"),
blank=True,
null=True,
on_delete=models.CASCADE,
)
everybody = models.BooleanField(
_("everybody"),
default=False,
)
can_read = models.SmallIntegerField(
_("can read"),
choices=PERMISIONS,
blank=True,
null=True,
default=None,
)
can_edit = models.SmallIntegerField(
_("can edit"),
choices=PERMISIONS,
blank=True,
null=True,
default=None,
)
can_add_children = models.SmallIntegerField(
_("can add children"),
choices=PERMISIONS,
blank=True,
null=True,
default=None,
)
class Meta:
verbose_name = _('folder permission')
verbose_name_plural = _('folder permissions')
app_label = 'filer'
objects = FolderPermissionManager()
def __str__(self):
if self.folder:
name = '%s' % self.folder
else:
name = 'All Folders'
ug = []
if self.everybody:
ug.append('Everybody')
else:
if self.group:
ug.append("Group: %s" % self.group)
if self.user:
ug.append("User: %s" % self.user)
usergroup = " ".join(ug)
perms = []
for s in ['can_edit', 'can_read', 'can_add_children']:
perm = getattr(self, s)
if perm == self.ALLOW:
perms.append(s)
elif perm == self.DENY:
perms.append('!%s' % s)
perms = ', '.join(perms)
return "Folder: '%s'->%s [%s] [%s]" % (
name, self.get_type_display(),
perms, usergroup)
def clean(self):
if self.type == self.ALL and self.folder:
raise ValidationError('Folder cannot be selected with type "all items".')
if self.type != self.ALL and not self.folder:
raise ValidationError('Folder has to be selected when type is not "all items".')
if self.everybody and (self.user or self.group):
raise ValidationError('User or group cannot be selected together with "everybody".')
if not self.user and not self.group and not self.everybody:
raise ValidationError('At least one of user, group, or "everybody" has to be selected.')
| |
"""
views.py - xmlrpc request manager
Author
Sacha Zyto <sacha@csail.mit.edu>
License
Copyright (c) 2010-2012 Massachusetts Institute of Technology.
MIT License (cf. MIT-LICENSE.txt or http://www.opensource.org/licenses/mit-license.php)
"""
from base import annotations, doc_analytics
import json, sys, datetime, time
from base import auth, signals, constants, models as M, utils_response as UR
#TODO import responder
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from django.conf import settings
from django.template.loader import render_to_string
import logging, random, string
import urllib
id_log = "".join([ random.choice(string.ascii_letters+string.digits) for i in xrange(0,10)])
logging.basicConfig(level=logging.DEBUG,format='%(asctime)s %(levelname)s %(message)s', filename='/tmp/nb_rpc_%s.log' % ( id_log,), filemode='a')
SLEEPTIME = 0.2
#The functions that are allowed to be called from a http client
__EXPORTS = [
"getObjects",
"getParams",
"getNotes",
"saveNote",
"editNote",
"deleteNote",
"deleteThread",
"getStats",
"getMyNotes",
"getCommentLabels",
"getGuestFileInfo",
"getSectionsInfo",
"getHTML5Info",
"markNote",
"request_source_id",
"log_history",
"remote_log",
"getGradees",
"edit_assignment",
"get_location_info",
"get_comment_info",
"save_settings",
"approveNote",
"editPoll",
"getMembers",
"sendInvites",
"passwordLost",
"presearch",
"get_stats_ensemble",
"add_folder",
"add_ensemble",
"rename_file",
"delete_file",
"move_file",
"copy_file",
"register_user",
"login_user",
"set_grade_assignment",
"set_comment_label",
"markThread",
"getPending",
"rate_reply",
"advanced_filter",
"get_top_comments_from_locations",
"bulk_import_annotations",
"set_location_section",
"promote_location_by_copy"
]
__AVAILABLE_TYPES = set(["folders", "ensembles", "sections", "files", "assignments", "marks", "settings", "file_stats", "ensemble_stats", "polls", "choices", "responses", "polls_stats", "ensemble_stats2"])
__AVAILABLE_PARAMS = ["RESOLUTIONS", "RESOLUTION_COORDINATES"]
__AVAILABLE_STATS = ["auth", "newauth", "question", "newquestion", "unclear", "newunclear","auth_group", "newauth_group", "question_group", "newquestion_group", "unclear_group", "newunclear_group", "auth_grader", "newauth_grader", "auth_admin", "newauth_admin", "unanswered", "auth_everyone", "newauth_everyone", "favorite", "newfavorite", "search", "collection" ]
def on_register_session(sender, **payload):
req = payload["req"]
#print req.META
uid = UR.getUserInfo(req, True).id
p={}
p["ctime"] = payload["cid"]
p["ip"] = req.META["REMOTE_ADDR"] if "REMOTE_ADDR" in req.META else None
annotations.register_session(uid, p)
signals.register_session.connect(on_register_session, weak=False)
def __parseEntities_str(s, delimiter):
if delimiter in s:
return [x.strip().lower() for x in s.split(delimiter)]
return [s.strip().lower()]
def __parseEntities_list(a, delimiter):
o = []
for s in a:
o.extend(__parseEntities_str(s, delimiter))
return o
def parseEntities(x, d):
o = x
if type(d)==list:
for delimiter in d:
o = parseEntities(o, delimiter)
return o
else:
delimiter = d
if type(x)==list:
return __parseEntities_list(x,delimiter)
return __parseEntities_str(x,delimiter)
def rate_reply(P,req):
uid = UR.getUserId(req);
status = P["status"]
tm = M.ThreadMark.objects.get(pk=P["threadmark_id"])
previous_accepted_ratings = M.ReplyRating.objects.filter(threadmark=tm, status__gt=M.ReplyRating.TYPE_UNRESOLVED)
if tm.user_id == uid:
rr = M.ReplyRating()
rr.status = status
rr.threadmark = tm
rr.comment_id = P["comment_id"]
rr.save()
if status:
tm.active = status==M.ReplyRating.TYPE_UNRESOLVED and previous_accepted_ratings.count()==0
tm.save()
return UR.prepare_response({"replyrating": {rr.id: UR.model2dict(rr)}})
return UR.prepare_response({}, 1, "NOT ALLOWED")
def sendInvites(payload, req):
from django.core import mail
from django.core.mail import EmailMessage
uid = UR.getUserId(req);
id_ensemble = payload["id_ensemble"]
id_section = payload.get("id_section", None)
admin = 0 if "admin" not in payload else payload["admin"]
if not auth.canSendInvite(uid,id_ensemble):
return UR.prepare_response({}, 1, "NOT ALLOWED")
#extract emails in a somewhat robust fashion (i.e. using several possible delimiters)
emails = parseEntities(payload["to"], [",", "\n", " "])
#remove spurious stuff: strings that don't have an "@" and trailings "<" and ">" characters,
#because some emails the following format: John Doe <john.doe@example.com>
emails = [o.replace("<", "").replace(">", "") for o in emails if "@" in o]
logging.info("to: %s, extracted: %s" % (payload["to"], emails))
#add new users to DB w/ pending status
connection = mail.get_connection()
emailmessages = []
for email in emails:
user = auth.user_from_email(email)
password=""
if user is None:
ckey = "".join([ random.choice(string.ascii_letters+string.digits) for i in xrange(0,32)])
password = "".join([ random.choice(string.ascii_letters+string.digits) for i in xrange(0,4)])
user = auth.addUser(email, password, ckey)
invite_key = "".join([ random.choice(string.ascii_letters+string.digits) for i in xrange(0,50)])
auth.addInvite(invite_key, user.id, id_ensemble, id_section, admin)
link = "http://%s/confirm_invite?invite_key=%s" % (settings.HOSTNAME, invite_key,)
ensemble = M.Ensemble.objects.get(pk=id_ensemble)
p = {
"name": ensemble.name,
"description": ensemble.description,
"link": link,
"contact": user.firstname if user.firstname != None else user.email
}
if payload["msg"] != "":
p["msg_perso"] = payload["msg"]
# TODO: We still include the password in the e-mail, should we stop doing that?
if password != "":
p["password"] = password
p["email"] = email
msg = render_to_string("email/msg_invite",p)
bcc = [] if settings.SMTP_CC_USER is None else (settings.SMTP_CC_USER,)
e = EmailMessage("You're invited on the %s channel !" % (p["name"],),
msg,
settings.EMAIL_FROM,
(email, ),
bcc,connection=connection)
emailmessages.append(e)
#time.sleep(SLEEPTIME) #in order not to stress out the email server
connection.send_messages(emailmessages)
return UR.prepare_response({"msg": "Invite for %s sent to %s" % (ensemble.name, emails,)})
def register_user(P, req):
users = M.User.objects.filter(email=P["email"].strip().lower())
if users.count() != 0:
return UR.prepare_response({}, 1,"A user with this email already exists - please choose another email.")
user= auth.getGuest(P["ckey"])
P["ckey"] = annotations.register_user(user.id, P) #returns a new confkey.
p2 = {"tutorial_url": settings.GUEST_TUTORIAL_URL, "conf_url": "%s?ckey=%s" %(req.META.get("HTTP_REFERER","http://%s" % settings.NB_SERVERNAME), P["ckey"])}
from django.core.mail import EmailMessage
p2.update(P)
msg = render_to_string("email/confirm_guest_registration",p2)
email = EmailMessage(
"Welcome to NB, %s !" % (p2["firstname"], ),
msg,
settings.EMAIL_FROM,
(P["email"], ),
(settings.EMAIL_BCC, ))
email.send()
#__send_email([P["email"], settings.SMTP_CC_USER], tpl.render(c))
return UR.prepare_response({"uid": user.id})
def login_user(P,req):
email = P["email"] if "email" in P else None
password = P["password"] if "password" in P else None
if email is None or password is None:
return UR.prepare_response({"ckey": None})
user = auth.checkUser(email, password)
if user is None:
return UR.prepare_response({"ckey": None})
u_in = json.loads(urllib.unquote(req.COOKIES.get("userinfo", urllib.quote('{"ckey": ""}'))))
if "ckey" in u_in and u_in["ckey"] != "" and u_in["ckey"] != user.confkey:
#log that there's been an identity change
auth.log_guest_login(u_in["ckey"], user.id)
return UR.prepare_response({"ckey": user.confkey, "email": user.email, "firstname": user.firstname, "lastname":user.lastname, "guest": user.guest, "valid": user.valid}) #this is what's needed for the client to set a cookie and be authenticated as the new user !
def on_delete_session(payload, s):
req = s["request"]
#print payload
#print req.getConnectionId()
p={}
p["id"] = req.getConnectionId()
if p["id"] != 0:
p["lastActivity"] = s["lastActivity"]
p["reason"] = payload["reason"]
annotations.endSession(p)
else:
pass
#print "can't delete a session whose id is 0 ! "
def on_reactivate(ids, connections):
#for id in ids:
p=[]
for id in ids:
p.append(connections[id]["lastActivity"])
p.append(id)
annotations.reactivateSession(p)
def getParams(payload, req):
o={}
for p in payload["name"]:
if p in __AVAILABLE_PARAMS:
o[p] = constants.__dict__[p]
if UR.CID != 0 and "clienttime" in payload:
try:
s = M.Session.objects.get(ctime=UR.CID)
s.clienttime = datetime.datetime.fromtimestamp((payload["clienttime"]+0.0)/1000)
s.save()
except M.Session.DoesNotExist:
pass
return UR.prepare_response({"value": o})
def presearch(payload, req):
#cid = req.getConnectionId()
cid = UR.CID
uid = UR.getUserId(req);
id_search = annotations.save_presearch(cid, payload)
output = annotations.presearch(uid, id_search, payload) #{"total": 130, "items": "blahblah"}
return UR.prepare_response(output)
def getGuestFileInfo(payload, req):
if "id_source" not in payload:
return UR.prepare_response({}, 1, "missing id_source !")
id_source = payload["id_source"]
output = annotations.get_guestfileinfo(id_source)
for i in output["ensembles"]:
if not (output["ensembles"][i]["allow_guest"] or auth.isMember(UR.getUserId(req), i)):
return UR.prepare_response({}, 1, "not allowed: guest access isn't allowed for this file.")
return UR.prepare_response(output)
def getHTML5Info(payload, req):
if "url" not in payload:
return UR.prepare_response({}, 1, "missing url !")
url = payload["url"].partition("#")[0] #remove hash part of the URL by default.
#TODO: use optional argument id_ensemble to disambiguate if provided.
sources_info = M.HTML5Info.objects.filter(url=url)
ownerships = M.Ownership.objects.select_related("source", "ensemble", "folder").filter(source__html5info__in=sources_info, deleted=False)
if not ownerships.exists():
return UR.prepare_response({}, 1, "this URL is not recognized: ")
output = {
"files": UR.qs2dict(ownerships, annotations.__NAMES["files2"] , "ID"),
"ensembles": UR.qs2dict(ownerships, annotations.__NAMES["ensembles2"] , "ID") ,
"folders": UR.qs2dict(ownerships, annotations.__NAMES["folders2"] , "ID") ,
}
for i in output["ensembles"]:
if not (output["ensembles"][i]["allow_guest"] or auth.isMember(UR.getUserId(req), i)):
return UR.prepare_response({}, 1, "not allowed: guest access isn't allowed for this file.")
return UR.prepare_response(output)
def getObjects(payload, req):
#
# CAUTION !!!
# getObjects is handy, but doesn't perform per-user authentication
# hence, don't use it to get non-public info, such as notes etc...
#
#TODO cid = req.getConnectionId()
uid = UR.getUserId(req);
types = __AVAILABLE_TYPES.intersection(set(payload["types"]))
output = {}
p2 = payload["payload"] if "payload" in payload else {}
for t in types:
output[t] = getattr(annotations, "get_"+t)(uid, p2)
return UR.prepare_response(output)
def save_settings(payload, req):
uid = UR.getUserId(req);
if uid is None:
return UR.prepare_response({}, 1, "NOT ALLOWED")
else:
return UR.prepare_response({"settings": annotations.save_settings(uid, payload)})
def getGradees(payload, req):
uid = UR.getUserId(req)
output={"gradees": annotations.getGradees(uid)}
return UR.prepare_response(output)
def getSectionsInfo(payload, req):
uid = UR.getUserId(req)
if "id_ensemble" not in payload:
return UR.prepare_response({}, 1, "MISSING id_ensemble")
id_ensemble = payload["id_ensemble"]
if auth.canGetSectionsInfo(uid, id_ensemble):
m = M.Membership.objects.filter(user__id=uid, ensemble__id=id_ensemble, deleted=False)
output={"sections": UR.qs2dict(m[0].ensemble.section_set.all())};
return UR.prepare_response(output)
return UR.prepare_response({}, 1, "NOT ALLOWED")
def editPoll(payload, req):
uid = UR.getUserId(req)
if uid is None or ("id_poll" not in payload) or not annotations.canEditPoll(uid, payload["id_poll"]):
return UR.prepare_response({}, 1, "NOT ALLOWED")
annotations.editPoll(uid, payload)
return UR.prepare_response({})
def getNotes(payload, req):
uid = UR.getUserId(req)
output = {}
if "file" in payload: #access by file
after = payload.get("after", None)
id_source = payload["file"]
if auth.canReadFile(uid, id_source, req):
#output["notes"] = annotations.getNotesByFile(id_source, uid)
output["file"] = id_source
output["locations"], output["html5locations"], output["comments"], output["threadmarks"] = annotations.getCommentsByFile(id_source, uid, after)
#TODO:
#output["links"] = annotations.get_links(uid, {"id_source": id_source})
output["seen"] = annotations.getSeenByFile(id_source, uid)
else:
return UR.prepare_response({}, 1, "NOT ALLOWED")
return UR.prepare_response(output)
def getCommentLabels(payload, req):
uid = UR.getUserId(req)
if "file" in payload: #access by file
id_source = payload["file"]
o = M.Membership.objects.filter(ensemble__in=M.Ensemble.objects.filter(ownership__in=M.Ownership.objects.filter(source__id=id_source))).filter(user__id=uid, deleted=False)
if len(o)>0 and o[0].admin: #for now, simply restrict to admin level
output = {}
lc = M.LabelCategory.objects.filter(ensemble = o[0].ensemble)
output["labelcategories"] = UR.qs2dict(lc)
comments = M.Comment.objects.filter(location__source__id=id_source, deleted=False, moderated=False)
output["commentlabels"] = UR.qs2dict(M.CommentLabel.objects.filter(category__in=lc, comment__in=comments, grader__id=uid))
output["labelcategorycaptions"] = UR.qs2dict(M.LabelCategoryCaption.objects.filter(category__in=lc))
return UR.prepare_response(output)
return UR.prepare_response({}, 1, "NOT ALLOWED")
def saveNote(payload, req):
uid = UR.getUserId(req)
if not auth.canAnnotate(uid, payload["id_ensemble"]):
return UR.prepare_response({}, 1, "NOT ALLOWED")
payload["id_author"] = uid
retval = {}
a = annotations.addNote(payload)
if len(a) == 0:
return UR.prepare_response({}, 2, "DUPLICATE")
tms = {}
for mark in payload["marks"]:
tm = M.ThreadMark()
m_types = [c[0] for c in tm.TYPES if c[1]==mark]
if len(m_types): #old clients may return types we don't have in DB so ignore them
tm.type = m_types[0]
tm.user_id = uid
tm.comment=a[0]
tm.location_id=tm.comment.location_id
tm.save()
tms[tm.id] = UR.model2dict(tm)
retval["locations"], retval["html5locations"] = annotations.getLocation(a[0].location_id)
retval["comments"] = {}
for annotation in a:
retval["comments"].update(annotations.getComment(annotation.id, uid))
retval["threadmarks"] = tms
return UR.prepare_response(retval)
#TODO responder.notify_observers("note_saved", payload,req)
def editNote(payload, req):
uid = UR.getUserId(req)
if not auth.canEdit(uid, payload["id_comment"]):
return UR.prepare_response({}, 1, "NOT ALLOWED")
else:
annotations.editNote(payload)
#no need to worry about threadmarks: they can't be changed from an "edit-mode" editor
return UR.prepare_response({"comments": [annotations.getComment(payload["id_comment"], uid)] })
def deleteNote(payload, req):
uid = UR.getUserId(req)
#print "trying to delete %s" %( payload["id_comment"],)
if not auth.canDelete(uid, payload["id_comment"]):
return UR.prepare_response({}, 1, "NOT ALLOWED")
else:
annotations.deleteNote(payload)
return UR.prepare_response({"id_comment": payload["id_comment"] })
def deleteThread(payload, req):
uid = UR.getUserId(req)
if not auth.canDeleteThread(uid, payload["id_location"]):
return UR.prepare_response({}, 1, "NOT ALLOWED")
else:
annotations.deleteThread(payload)
return UR.prepare_response({"id_location": payload["id_location"]})
def getPending(payload, req):
uid = UR.getUserId(req)
output = annotations.getPending(uid, payload)
return UR.prepare_response(output)
def getMyNotes(payload, req):
uid = UR.getUserId(req)
if uid is None or payload.get("query") not in __AVAILABLE_STATS:
return UR.prepare_response({}, 1, "NOT ALLOWED")
else:
output= getattr(annotations, "get_comments_"+payload.get("query"))(uid, payload)
#referer = None if "referer" not in req.META else req.META["referer"]
#TODO annotations.addCollageHistory(uid, referer, query)
return UR.prepare_response(output)
def getStats(payload, req):
uid = UR.getUserId(req)
if uid is None:
return UR.prepare_response({}, 1, "NOT ALLOWED")
else:
return UR.prepare_response(annotations.get_stats(uid))
def getStats2(payload, req):
uid = UR.getUserId(req)
if uid is None:
return UR.prepare_response({}, 1, "NOT ALLOWED")
else:
return UR.prepare_response(annotations.get_stats2(uid))
def getMembers(payload, req):
uid = UR.getUserId(req)
if "id_ensemble" in payload:
if auth.canGetMembers(uid, payload["id_ensemble"]):
return UR.prepare_response(annotations.get_members(payload["id_ensemble"]))
return UR.prepare_response({}, 1, "NOT ALLOWED")
def markThread(payload, req):
uid = UR.getUserId(req)
id_location = payload["id_location"]
if not auth.canMarkThread(uid,id_location ):
return UR.prepare_response({}, 1, "NOT ALLOWED")
else:
mark = annotations.markThread(uid, payload);
tms = {}
tms[mark["id"]] = mark
p = {"threadmarks": tms}
return UR.prepare_response(p)
def markNote(payload, req):
uid = UR.getUserId(req)
id_comment = payload["id_comment"]
if not auth.canMark(uid,id_comment ):
return UR.prepare_response({}, 1, "NOT ALLOWED")
else:
annotations.markNote(uid, payload);
comments = annotations.getComment(id_comment,uid)
locs, h5locs = annotations.getLocation(comments[int(id_comment)]["ID_location"])
p = {"locations":locs, "html5locations": h5locs, "marks": annotations.getMark(uid, payload), "comments": comments}
return UR.prepare_response(p)
def approveNote(payload, req):
uid = UR.getUserId(req)
id_comment = payload["id_comment"]
if not auth.canApprove(uid,id_comment ):
return UR.prepare_response({}, 1, "NOT ALLOWED")
else:
annotations.approveNote(uid, payload);
p = {"comments":annotations.getComment(id_comment,uid) }
return UR.prepare_response(p)
annotations.addApproveHistory(uid, payload)
def passwordLost(payload, req):
email = payload["email"].strip().lower()
user = auth.user_from_email(email)
if user is not None:
from django.core.mail import EmailMessage
p= {
"firstname": user.firstname,
"email": email,
"settings_url": "%s://%s/settings?ckey=%s" % (settings.PROTOCOL, settings.HOSTNAME, user.confkey)
}
msg = render_to_string("email/password_reminder",p)
e = EmailMessage(
"Password reset for your NB account",
msg,
"NB Password Reset Bot <nbnotifications@csail.mit.edu>",
(email, ),
(settings.SMTP_CC_LOSTPASSWORD, ))
e.send()
return UR.prepare_response({"email": email})
return UR.prepare_response({"email": email}, 1, "USER NOT FOUND")
def __send_email(recipients, msg):
import smtplib
session = smtplib.SMTP(settings.SMTP_SERVER)
smtpresult = session.sendmail(settings.SMTP_USER, recipients, msg)
time.sleep(SLEEPTIME)
if smtpresult:
errstr = ""
for recip in smtpresult.keys():
errstr = """Could not delivery mail to: %s Server said: %s %s %s""" % (recip, smtpresult[recip][0], smtpresult[recip][1], errstr)
logging.error(errstr)
raise smtplib.SMTPException, errstr
def rename_file(P, req):
#this method is used to rename both files and folders.
uid = UR.getUserId(req)
f_auth = auth.canRenameFile if P["item_type"]=="file" else auth.canRenameFolder
if not f_auth(uid, P["id"]):
return UR.prepare_response({}, 1, "NOT ALLOWED")
return UR.prepare_response({P["item_type"]+"s": annotations.rename_file(uid, P)})
def delete_file(P, req):
#this method is used to rename both files and folders.
uid = UR.getUserId(req)
f_auth = auth.canDeleteFile if P["item_type"]=="file" else auth.canDeleteFolder
if not f_auth(uid, P["id"]):
return UR.prepare_response({}, 1, "NOT ALLOWED")
return UR.prepare_response({"id": annotations.delete_file(uid, P)}) #special form since file isn't in there anymore
def move_file(P,req):
uid = UR.getUserId(req)
f_auth = auth.canMoveFile if P["item_type"]=="file" else auth.canMoveFolder
if not f_auth(uid, P["id"], P["dest"]):
return UR.prepare_response({}, 1, "NOT ALLOWED")
return UR.prepare_response({P["item_type"]+"s": annotations.move_file(uid, P)})
def copy_file(P, req):
uid = UR.getUserId(req)
if not auth.canMoveFile(uid, P["source_id"]):
return UR.prepare_response({}, 1, "NOT ALLOWED")
if P["target_type"] == "ensemble":
if not auth.canInsertFile(uid, P["target_id"]):
return UR.prepare_response({}, 1, "NOT ALLOWED")
elif P["target_type"] == "folder":
folder = M.Folder.objects.get(pk=P["target_id"])
if not auth.canInsertFile(uid, folder.ensemble.pk, folder.pk):
return UR.prepare_response({}, 1, "NOT ALLOWED")
else:
return UR.prepare_response({}, 1, "INVALID ARGUMENT")
new_source_id = annotations.copy_file(uid, P)
return UR.prepare_response({ "id_source": new_source_id })
def add_ensemble(payload, req):
uid = UR.getUserId(req)
if uid is None:
return UR.prepare_response({}, 1, "NOT ALLOWED")
id = annotations.create_ensemble(uid, payload)
return UR.prepare_response(annotations.get_ensembles(uid, {"id": id}))
def add_folder(payload, req):
uid = UR.getUserId(req)
id_ensemble = payload["id_ensemble"]
id_parent = payload["id_parent"]
if not auth.canAddFolder(uid, id_ensemble, id_parent):
return UR.prepare_response({}, 1, "NOT ALLOWED")
id_folder = annotations.create_folder(id_ensemble, id_parent, payload["name"])
return UR.prepare_response(annotations.get_folders(uid, {"id": id_folder}))
def edit_assignment(P, req):
uid = UR.getUserId(req)
if not auth.canEditAssignment(uid, P["id"]):
return UR.prepare_response({}, 1, "NOT ALLOWED")
return UR.prepare_response({"files": annotations.edit_assignment(uid, P)})
def request_source_id(payload, req):
uid = UR.getUserId(req);
if uid is None:
return UR.prepare_response({}, 1, "NOT ALLOWED")
logging.info("[request_source_id]: %s" %(payload,) )
return UR.prepare_response({"id_source":annotations.createSourceID()})
def remote_log(payload,req):
#deprecated. Here only for compatibility
return log_history(payload, req)
def log_history(payload, req):
uid = UR.getUserInfo(req, True).id
if uid is None:
#SACHA TODO: LOG this.
return UR.prepare_response({}, 1, "NOT ALLOWED")
cid = UR.CID
if cid == 0:
return UR.prepare_response({}, 1, "CID MOST BE NONZERO")
session, previous_activity = annotations.markActivity(cid)
if session is None:
return UR.prepare_response({}, 1, "SESSION NOT FOUND")
id_session = session.id
output={}
if "seen" in payload and cid != 0:
annotations.markCommentSeen(uid, id_session, payload["seen"])
if "page" in payload and cid != 0:
annotations.markPageSeen(uid, id_session, payload["page"])
if "idle" in payload and cid != 0:
annotations.markIdle(uid, id_session, payload["idle"])
if "scrolling" in payload and cid != 0:
logger = logging.getLogger("scrolling")
logger.info("%s|%s"%(id_session, payload["scrolling"]));
if "__return" in payload and cid != 0:
R = payload["__return"]
if R["type"] == "newNotesOnFile":
id_source = R["a"]["id_source"]
if auth.canReadFile(uid, id_source):
output["locations"], output["html5locations"], output["comments"], output["threadmarks"] = annotations.getCommentsByFile(id_source, uid, previous_activity)
elif R["type"] == "newPending":
#for now, we retrieve all the pending stuff.
output = annotations.getPending(uid, payload)
if "analytics" in payload and cid != 0:
doc_analytics.markAnalyticsVisit(uid, payload["analytics"])
if "analyticsClick" in payload and cid != 0:
doc_analytics.markAnalyticsClick(uid, payload["analyticsClick"])
return UR.prepare_response(output)
def get_location_info(payload, req):
id = payload["id"]
uid = UR.getUserId(req);
#SACHA TODO: check I'm allowed to know this
retval={}
retval["locations"], retval["html5locations"] = annotations.getLocation(id)
if "org" in payload:
annotations.logDirectURL(uid, id, payload["org"])
return UR.prepare_response(retval)
def get_comment_info(payload, req):
id = int(payload["id"])
uid = UR.getUserId(req);
#SACHA TODO: check I'm allowed to know this
retval={}
comments = annotations.getComment(id, uid)
id_location = comments[id]["ID_location"]
retval["comments"] = {id: {"ID": id, "ID_location": id_location}} #share only what's needed
#print retval["comments"]
retval["locations"] , retval["html5locations"] = annotations.getLocation( id_location)
if "org" in payload:
annotations.logDirectURL(uid, id, payload["org"])
return UR.prepare_response(retval)
def get_stats_ensemble(payload, req):
uid = UR.getUserId(req)
id_ensemble = payload["id_ensemble"]
if not auth.canSeeGrades(uid, id_ensemble):
return UR.prepare_response({}, 1, "NOT ALLOWED")
retval = annotations.get_stats_ensemble(payload)
return UR.prepare_response(retval)
def set_grade_assignment(P, req):
uid = UR.getUserId(req)
if not auth.canGrade(uid, P["id_source"], P["id_user"]):
return UR.prepare_response({}, 1, "NOT ALLOWED")
retval = {}
retval["grades"] = annotations.set_grade_assignment(uid, {"id_source": P["id_source"], "id_user": P["id_user"], "grade": P["grade"]})
return UR.prepare_response(retval)
def set_comment_label(P, req):
uid = UR.getUserId(req)
cid = P["comment_id"]
if not auth.canLabelComment(uid, cid):
return UR.prepare_response({}, 1, "NOT ALLOWED")
record = None
try:
record = M.CommentLabel.objects.get(grader__id=uid, comment__id=cid, category_id=P["category_id"])
rh = M.CommentLabelHistory()
rh.grader = record.grader
rh.ctime = record.ctime
rh.grade = record.grade
rh.category = record.category
rh.comment = record.comment
rh.save()
record.ctime = datetime.datetime.now()
except M.CommentLabel.DoesNotExist:
record = M.CommentLabel()
record.category_id = P["category_id"]
record.comment_id = cid
record.grade = P["grade"]
record.grader_id = uid
record.save()
retval = {"commentlabels":{record.id: UR.model2dict(record)}}
return UR.prepare_response(retval)
def advanced_filter(P, req):
retval = {}
retval["locs"] = annotations.getAdvancedFilteredLocationsByFile(P["id_source"], P["n"], P["r"], P["type"])
return UR.prepare_response(retval)
def get_top_comments_from_locations(P, req):
retval = {}
retval["comments"] = annotations.getTopCommentsFromLocations(P["id_locations"])
return UR.prepare_response(retval)
def bulk_import_annotations(P, req):
uid = UR.getUserId(req)
if not auth.canImportAnnotation(uid, P["from_source_id"], P["to_source_id"]):
return UR.prepare_response({}, 1, "NOT ALLOWED")
return UR.prepare_response( annotations.bulkImportAnnotations(P["from_source_id"], P["to_source_id"], P["locs_array"], P["import_type"]))
def set_location_section(P, req):
uid = UR.getUserId(req)
if not auth.canAdministrateLocation(uid, P["id_location"]):
return UR.prepare_response({}, 1, "NOT ALLOWED")
result = annotations.setLocationSection(P["id_location"], P["id_section"])
locations, html5locations = annotations.getLocation(result.pk)
return UR.prepare_response( locations )
def promote_location_by_copy(P, req):
uid = UR.getUserId(req)
if not auth.canAdministrateLocation(uid, P["id_location"]):
return UR.prepare_response({}, 1, "NOT ALLOWED")
location_ids, comment_ids = annotations.promoteLocationByCopy(P["id_location"])
retval = {}
retval["comments"] = {}
for cid in comment_ids:
retval["comments"].update(annotations.getComment(cid, uid))
retval["locations"] = {}
retval["html5locations"] = {}
for lid in location_ids:
locations, html5locations = annotations.getLocation(lid)
retval["locations"].update(locations)
if not html5locations:
retval["html5locations"].update(html5locations)
# clear out html5locations if none exist
if retval["html5locations"]:
del retval["html5locations"]
return UR.prepare_response( retval )
@csrf_exempt
def other(req):
print "nb django doesn't have an URLconf for this yet: %s" % req.method
@csrf_exempt
def run(req):
r = HttpResponse()
r["Access-Control-Allow-Origin"]="*"
try:
if req.method == "OPTIONS" or len(req.POST)==0: #FF3 trying to check if Cross Site Request allowed.
return r
else:
#rpc request:
fctname = req.POST["f"]
payload = json.loads(req.POST["a"])
cid = req.POST["cid"]
if cid == "0" or cid == 0:
cid = datetime.datetime.now()
signals.register_session.send("rpc", cid=cid,req=req)
UR.CID = cid
MODULE = sys.modules[__name__]
if fctname in __EXPORTS:
r.content = getattr(MODULE, fctname)(payload, req)
return r
else:
assert False, "[PDF] method '%s' not found in __EXPORTS" % fctname
r.content = UR.prepare_response({}, 1,"[PDF] method '%s' not found in __EXPORTS" % fctname)
return r
except IOError:
logging.error("[rpc.views.run] IOError")
r.content = UR.prepare_response({}, 1,"I/O Error")
return r
| |
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
"""
Contains the Document class representing an object / record
"""
_toc = ["webnotes.model.doc.Document"]
import webnotes
import webnotes.model.meta
from webnotes.utils import *
class Document:
"""
The wn(meta-data)framework equivalent of a Database Record.
Stores,Retrieves,Updates the record in the corresponding table.
Runs the triggers required.
The `Document` class represents the basic Object-Relational Mapper (ORM). The object type is defined by
`DocType` and the object ID is represented by `name`::
Please note the anamoly in the Web Notes Framework that `ID` is always called as `name`
If both `doctype` and `name` are specified in the constructor, then the object is loaded from the database.
If only `doctype` is given, then the object is not loaded
If `fielddata` is specfied, then the object is created from the given dictionary.
**Note 1:**
The getter and setter of the object are overloaded to map to the fields of the object that
are loaded when it is instantiated.
For example: doc.name will be the `name` field and doc.owner will be the `owner` field
**Note 2 - Standard Fields:**
* `name`: ID / primary key
* `owner`: creator of the record
* `creation`: datetime of creation
* `modified`: datetime of last modification
* `modified_by` : last updating user
* `docstatus` : Status 0 - Saved, 1 - Submitted, 2- Cancelled
* `parent` : if child (table) record, this represents the parent record
* `parenttype` : type of parent record (if any)
* `parentfield` : table fieldname of parent record (if any)
* `idx` : Index (sequence) of the child record
"""
def __init__(self, doctype = None, name = None, fielddata = None):
self._roles = []
self._perms = []
self._user_defaults = {}
self._new_name_set = False
self._meta = None
if isinstance(doctype, dict):
fielddata = doctype
doctype = None
if fielddata:
self.fields = webnotes._dict(fielddata)
else:
self.fields = webnotes._dict()
if not self.fields.has_key('name'):
self.fields['name']='' # required on save
if not self.fields.has_key('doctype'):
self.fields['doctype']='' # required on save
if not self.fields.has_key('owner'):
self.fields['owner']='' # required on save
if doctype:
self.fields['doctype'] = doctype
if name:
self.fields['name'] = name
self.__initialized = 1
if (doctype and name):
self._loadfromdb(doctype, name)
else:
if not fielddata:
self.fields['__islocal'] = 1
if not self.fields.docstatus:
self.fields.docstatus = 0
def __nonzero__(self):
return True
def __str__(self):
return str(self.fields)
def __repr__(self):
return repr(self.fields)
def __unicode__(self):
return unicode(self.fields)
def __eq__(self, other):
if isinstance(other, Document):
return self.fields == other.fields
else:
return False
def __getstate__(self):
return self.fields
def __setstate__(self, d):
self.fields = d
def encode(self, encoding='utf-8'):
"""convert all unicode values to utf-8"""
from webnotes.utils import encode_dict
encode_dict(self.fields)
def _loadfromdb(self, doctype = None, name = None):
if name: self.name = name
if doctype: self.doctype = doctype
is_single = False
try:
is_single = webnotes.model.meta.is_single(self.doctype)
except Exception, e:
pass
if is_single:
self._loadsingle()
else:
try:
dataset = webnotes.conn.sql('select * from `tab%s` where name="%s"' % (self.doctype, self.name.replace('"', '\"')))
except webnotes.SQLError, e:
if e.args[0]==1146:
dataset = None
else:
raise
if not dataset:
raise webnotes.DoesNotExistError, '[WNF] %s %s does not exist' % (self.doctype, self.name)
self._load_values(dataset[0], webnotes.conn.get_description())
def _load_values(self, data, description):
if '__islocal' in self.fields:
del self.fields['__islocal']
for i in range(len(description)):
v = data[i]
self.fields[description[i][0]] = webnotes.conn.convert_to_simple_type(v)
def _merge_values(self, data, description):
for i in range(len(description)):
v = data[i]
if v: # only if value, over-write
self.fields[description[i][0]] = webnotes.conn.convert_to_simple_type(v)
def _loadsingle(self):
self.name = self.doctype
self.fields.update(getsingle(self.doctype))
def __setattr__(self, name, value):
# normal attribute
if not self.__dict__.has_key('_Document__initialized'):
self.__dict__[name] = value
elif self.__dict__.has_key(name):
self.__dict__[name] = value
else:
# field attribute
f = self.__dict__['fields']
f[name] = value
def __getattr__(self, name):
if self.__dict__.has_key(name):
return self.__dict__[name]
elif self.fields.has_key(name):
return self.fields[name]
else:
return ''
def get(self, name, value=None):
return self.fields.get(name, value)
def update(self, d):
self.fields.update(d)
return self
def insert(self):
self.fields['__islocal'] = 1
self.save()
return self
def save(self, new=0, check_links=1, ignore_fields=0, make_autoname=1,
keep_timestamps=False):
self.get_meta()
if new:
self.fields["__islocal"] = 1
# add missing parentinfo (if reqd)
if self.parent and not (self.parenttype and self.parentfield):
self.update_parentinfo()
if self.parent and not self.idx:
self.set_idx()
# if required, make new
if not self._meta.issingle:
if self.fields.get('__islocal'):
r = self._insert(make_autoname=make_autoname, keep_timestamps = keep_timestamps)
if r:
return r
else:
if not webnotes.conn.exists(self.doctype, self.name):
webnotes.msgprint(webnotes._("Cannot update a non-exiting record, try inserting.") + ": " + self.doctype + " / " + self.name,
raise_exception=1)
# save the values
self._update_values(self._meta.issingle,
check_links and self.make_link_list() or {}, ignore_fields=ignore_fields,
keep_timestamps=keep_timestamps)
self._clear_temp_fields()
def _get_amended_name(self):
am_id = 1
am_prefix = self.amended_from
if webnotes.conn.sql('select amended_from from `tab%s` where name = "%s"' % (self.doctype, self.amended_from))[0][0] or '':
am_id = cint(self.amended_from.split('-')[-1]) + 1
am_prefix = '-'.join(self.amended_from.split('-')[:-1]) # except the last hyphen
self.name = am_prefix + '-' + str(am_id)
def set_new_name(self, controller=None):
if self._new_name_set:
# already set by bean
return
self._new_name_set = True
self.get_meta()
autoname = self._meta.autoname
self.localname = self.name
# amendments
if self.amended_from:
return self._get_amended_name()
# by method
else:
# get my object
if not controller:
controller = webnotes.get_obj([self])
if hasattr(controller, 'autoname'):
return controller.autoname()
# based on a field
if autoname and autoname.startswith('field:'):
n = self.fields[autoname[6:]]
if not n:
raise Exception, 'Name is required'
self.name = n.strip()
elif autoname and autoname.startswith("naming_series:"):
self.set_naming_series()
if not self.naming_series:
webnotes.msgprint(webnotes._("Naming Series mandatory"), raise_exception=True)
self.name = make_autoname(self.naming_series+'.#####')
# call the method!
elif autoname and autoname!='Prompt':
self.name = make_autoname(autoname, self.doctype)
# given
elif self.fields.get('__newname',''):
self.name = self.fields['__newname']
# default name for table
elif self._meta.istable:
self.name = make_autoname('#########', self.doctype)
# unable to determine a name, use global series
if not self.name:
self.name = make_autoname('#########', self.doctype)
def set_naming_series(self):
if not self.naming_series:
# pick default naming series
self.naming_series = get_default_naming_series(self.doctype)
def _insert(self, make_autoname=True, keep_timestamps=False):
# set name
if make_autoname:
self.set_new_name()
# validate name
self.name = validate_name(self.doctype, self.name, self._meta.name_case)
# insert!
if not keep_timestamps:
if not self.owner:
self.owner = webnotes.session['user']
self.modified_by = webnotes.session['user']
if not self.creation:
self.creation = self.modified = now()
else:
self.modified = now()
webnotes.conn.sql("insert into `tab%(doctype)s`" % self.fields \
+ """ (name, owner, creation, modified, modified_by)
values (%(name)s, %(owner)s, %(creation)s, %(modified)s,
%(modified_by)s)""", self.fields)
def _update_single(self, link_list):
self.modified = now()
update_str, values = [], []
webnotes.conn.sql("delete from tabSingles where doctype='%s'" % self.doctype)
for f in self.fields.keys():
if not (f in ('modified', 'doctype', 'name', 'perm', 'localname', 'creation'))\
and (not f.startswith('__')): # fields not saved
# validate links
if link_list and link_list.get(f):
self.fields[f] = self._validate_link(link_list, f)
if self.fields[f]==None:
update_str.append("(%s,%s,NULL)")
values.append(self.doctype)
values.append(f)
else:
update_str.append("(%s,%s,%s)")
values.append(self.doctype)
values.append(f)
values.append(self.fields[f])
webnotes.conn.sql("insert into tabSingles(doctype, field, value) values %s" % (', '.join(update_str)), values)
def validate_links(self, link_list):
err_list = []
for f in self.fields.keys():
# validate links
old_val = self.fields[f]
if link_list and link_list.get(f):
self.fields[f] = self._validate_link(link_list, f)
if old_val and not self.fields[f]:
s = link_list[f][1] + ': ' + old_val
err_list.append(s)
return err_list
def make_link_list(self):
res = webnotes.model.meta.get_link_fields(self.doctype)
link_list = {}
for i in res: link_list[i[0]] = (i[1], i[2]) # options, label
return link_list
def _validate_link(self, link_list, f):
dt = link_list[f][0]
dn = self.fields.get(f)
if not dt:
webnotes.throw("Options not set for link field: " + f)
if not dt: return dn
if not dn: return None
if dt=="[Select]": return dn
if dt.lower().startswith('link:'):
dt = dt[5:]
if '\n' in dt:
dt = dt.split('\n')[0]
tmp = webnotes.conn.sql("""SELECT name FROM `tab%s`
WHERE name = %s""" % (dt, '%s'), dn)
return tmp and tmp[0][0] or ''# match case
def _update_values(self, issingle, link_list, ignore_fields=0, keep_timestamps=False):
if issingle:
self._update_single(link_list)
else:
update_str, values = [], []
# set modified timestamp
if self.modified and not keep_timestamps:
self.modified = now()
self.modified_by = webnotes.session['user']
fields_list = ignore_fields and self.get_valid_fields() or self.fields.keys()
for f in fields_list:
if (not (f in ('doctype', 'name', 'perm', 'localname',
'creation','_user_tags', "file_list", "_comments"))) and (not f.startswith('__')):
# fields not saved
# validate links
if link_list and link_list.get(f):
self.fields[f] = self._validate_link(link_list, f)
if self.fields.get(f) is None or self.fields.get(f)=='':
update_str.append("`%s`=NULL" % f)
else:
values.append(self.fields.get(f))
update_str.append("`%s`=%s" % (f, '%s'))
if values:
values.append(self.name)
r = webnotes.conn.sql("update `tab%s` set %s where name=%s" % \
(self.doctype, ', '.join(update_str), "%s"), values)
def get_valid_fields(self):
import webnotes.model.doctype
if getattr(webnotes.local, "valid_fields_map", None) is None:
webnotes.local.valid_fields_map = {}
self.get_meta()
valid_fields_map = webnotes.local.valid_fields_map
if not valid_fields_map.get(self.doctype):
if cint( self._meta.issingle):
doctypelist = webnotes.model.doctype.get(self.doctype)
valid_fields_map[self.doctype] = doctypelist.get_fieldnames({
"fieldtype": ["not in", webnotes.model.no_value_fields]})
else:
valid_fields_map[self.doctype] = \
webnotes.conn.get_table_columns(self.doctype)
return valid_fields_map.get(self.doctype)
def get_meta(self):
if not self._meta:
self._meta = webnotes.conn.get_value("DocType", self.doctype, ["autoname", "issingle",
"istable", "name_case"], as_dict=True) or webnotes._dict()
return self._meta
def update_parentinfo(self):
"""update parent type and parent field, if not explicitly specified"""
tmp = webnotes.conn.sql("""select parent, fieldname from tabDocField
where fieldtype='Table' and options=%s""", self.doctype)
if len(tmp)==0:
raise Exception, 'Incomplete parent info in child table (%s, %s)' \
% (self.doctype, self.fields.get('name', '[new]'))
elif len(tmp)>1:
raise Exception, 'Ambiguous parent info (%s, %s)' \
% (self.doctype, self.fields.get('name', '[new]'))
else:
self.parenttype = tmp[0][0]
self.parentfield = tmp[0][1]
def set_idx(self):
"""set idx"""
self.idx = (webnotes.conn.sql("""select max(idx) from `tab%s`
where parent=%s and parentfield=%s""" % (self.doctype, '%s', '%s'),
(self.parent, self.parentfield))[0][0] or 0) + 1
def _clear_temp_fields(self):
# clear temp stuff
keys = self.fields.keys()
for f in keys:
if f.startswith('__'):
del self.fields[f]
def clear_table(self, doclist, tablefield, save=0):
"""
Clears the child records from the given `doclist` for a particular `tablefield`
"""
from webnotes.model.utils import getlist
table_list = getlist(doclist, tablefield)
delete_list = [d.name for d in table_list]
if delete_list:
#filter doclist
doclist = filter(lambda d: d.name not in delete_list, doclist)
# delete from db
webnotes.conn.sql("""\
delete from `tab%s`
where parent=%s and parenttype=%s"""
% (table_list[0].doctype, '%s', '%s'),
(self.name, self.doctype))
self.fields['__unsaved'] = 1
return webnotes.doclist(doclist)
def addchild(self, fieldname, childtype = '', doclist=None):
"""
Returns a child record of the give `childtype`.
* if local is set, it does not save the record
* if doclist is passed, it append the record to the doclist
"""
from webnotes.model.doc import Document
d = Document()
d.parent = self.name
d.parenttype = self.doctype
d.parentfield = fieldname
d.doctype = childtype
d.docstatus = 0;
d.name = ''
d.owner = webnotes.session['user']
d.fields['__islocal'] = 1 # for Client to identify unsaved doc
if doclist != None:
doclist.append(d)
return d
def get_values(self):
"""get non-null fields dict withouth standard fields"""
from webnotes.model import default_fields
ret = {}
for key in self.fields:
if key not in default_fields and self.fields[key]:
ret[key] = self.fields[key]
return ret
def addchild(parent, fieldname, childtype = '', doclist=None):
"""
Create a child record to the parent doc.
Example::
c = Document('Contact','ABC')
d = addchild(c, 'contact_updates', 'Contact Update')
d.last_updated = 'Phone call'
d.save(1)
"""
return parent.addchild(fieldname, childtype, doclist)
def make_autoname(key, doctype=''):
"""
Creates an autoname from the given key:
**Autoname rules:**
* The key is separated by '.'
* '####' represents a series. The string before this part becomes the prefix:
Example: ABC.#### creates a series ABC0001, ABC0002 etc
* 'MM' represents the current month
* 'YY' and 'YYYY' represent the current year
*Example:*
* DE/./.YY./.MM./.##### will create a series like
DE/09/01/0001 where 09 is the year, 01 is the month and 0001 is the series
"""
if not "#" in key:
key = key + ".#####"
n = ''
l = key.split('.')
series_set = False
today = now_datetime()
for e in l:
en = ''
if e.startswith('#'):
if not series_set:
digits = len(e)
en = getseries(n, digits, doctype)
series_set = True
elif e=='YY':
en = today.strftime('%y')
elif e=='MM':
en = today.strftime('%m')
elif e=='DD':
en = today.strftime("%d")
elif e=='YYYY':
en = today.strftime('%Y')
else: en = e
n+=en
return n
def getseries(key, digits, doctype=''):
# series created ?
current = webnotes.conn.get_value("Series", key, "current")
if current != None:
# yes, update it
webnotes.conn.sql("update tabSeries set current = current+1 where name=%s", key)
current = cint(current) + 1
else:
# no, create it
webnotes.conn.sql("insert into tabSeries (name, current) values ('%s', 1)" % key)
current = 1
return ('%0'+str(digits)+'d') % current
def getchildren(name, childtype, field='', parenttype='', from_doctype=0):
import webnotes
from webnotes.model.doclist import DocList
condition = ""
values = []
if field:
condition += ' and parentfield=%s '
values.append(field)
if parenttype:
condition += ' and parenttype=%s '
values.append(parenttype)
dataset = webnotes.conn.sql("""select * from `tab%s` where parent=%s %s order by idx""" \
% (childtype, "%s", condition), tuple([name]+values))
desc = webnotes.conn.get_description()
l = DocList()
for i in dataset:
d = Document()
d.doctype = childtype
d._load_values(i, desc)
l.append(d)
return l
def check_page_perm(doc):
if doc.name=='Login Page':
return
if doc.publish:
return
if not webnotes.conn.sql("select name from `tabPage Role` where parent=%s and role='Guest'", doc.name):
webnotes.response['403'] = 1
raise webnotes.PermissionError, '[WNF] No read permission for %s %s' % ('Page', doc.name)
def get(dt, dn='', with_children = 1, from_controller = 0):
"""
Returns a doclist containing the main record and all child records
"""
import webnotes
import webnotes.model
from webnotes.model.doclist import DocList
dn = dn or dt
# load the main doc
doc = Document(dt, dn)
if dt=='Page' and webnotes.session['user'] == 'Guest':
check_page_perm(doc)
if not with_children:
# done
return DocList([doc,])
# get all children types
tablefields = webnotes.model.meta.get_table_fields(dt)
# load chilren
doclist = DocList([doc,])
for t in tablefields:
doclist += getchildren(doc.name, t[0], t[1], dt)
return doclist
def getsingle(doctype):
"""get single doc as dict"""
dataset = webnotes.conn.sql("select field, value from tabSingles where doctype=%s", doctype)
return dict(dataset)
def copy_common_fields(from_doc, to_doc):
from webnotes.model import default_fields
doctype_list = webnotes.get_doctype(to_doc.doctype)
for fieldname, value in from_doc.fields.items():
if fieldname in default_fields:
continue
if doctype_list.get_field(fieldname) and to_doc.fields[fieldname] != value:
to_doc.fields[fieldname] = value
def validate_name(doctype, name, case=None, merge=False):
if not merge:
if webnotes.conn.sql('select name from `tab%s` where name=%s' % (doctype,'%s'), name):
raise NameError, 'Name %s already exists' % name
# no name
if not name: return 'No Name Specified for %s' % doctype
# new..
if name.startswith('New '+doctype):
raise NameError, 'There were some errors setting the name, please contact the administrator'
if case=='Title Case': name = name.title()
if case=='UPPER CASE': name = name.upper()
name = name.strip() # no leading and trailing blanks
forbidden = ['%', "'", '"', '#', '*', '?', '`']
for f in forbidden:
if f in name:
webnotes.msgprint('%s not allowed in ID (name)' % f, raise_exception =1)
return name
def get_default_naming_series(doctype):
"""get default value for `naming_series` property"""
from webnotes.model.doctype import get_property
naming_series = get_property(doctype, "options", "naming_series")
if naming_series:
naming_series = naming_series.split("\n")
return naming_series[0] or naming_series[1]
else:
return None
| |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Any, Callable, List, Optional, Union, TYPE_CHECKING, cast
import warnings
import pandas as pd
from pandas.api.types import ( # type: ignore[attr-defined]
CategoricalDtype,
is_dict_like,
is_list_like,
)
from pyspark.pandas.internal import InternalField
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.data_type_ops.categorical_ops import _to_cat
from pyspark.sql import functions as F
from pyspark.sql.types import StructField
if TYPE_CHECKING:
import pyspark.pandas as ps
class CategoricalAccessor:
"""
Accessor object for categorical properties of the Series values.
Examples
--------
>>> s = ps.Series(list("abbccc"), dtype="category")
>>> s # doctest: +SKIP
0 a
1 b
2 b
3 c
4 c
5 c
dtype: category
Categories (3, object): ['a', 'b', 'c']
>>> s.cat.categories
Index(['a', 'b', 'c'], dtype='object')
>>> s.cat.codes
0 0
1 1
2 1
3 2
4 2
5 2
dtype: int8
"""
def __init__(self, series: "ps.Series"):
if not isinstance(series.dtype, CategoricalDtype):
raise ValueError("Cannot call CategoricalAccessor on type {}".format(series.dtype))
self._data = series
@property
def _dtype(self) -> CategoricalDtype:
return cast(CategoricalDtype, self._data.dtype)
@property
def categories(self) -> pd.Index:
"""
The categories of this categorical.
Examples
--------
>>> s = ps.Series(list("abbccc"), dtype="category")
>>> s # doctest: +SKIP
0 a
1 b
2 b
3 c
4 c
5 c
dtype: category
Categories (3, object): ['a', 'b', 'c']
>>> s.cat.categories
Index(['a', 'b', 'c'], dtype='object')
"""
return self._dtype.categories
@categories.setter
def categories(self, categories: Union[pd.Index, List]) -> None:
dtype = CategoricalDtype(categories, ordered=self.ordered)
if len(self.categories) != len(dtype.categories):
raise ValueError(
"new categories need to have the same number of items as the old categories!"
)
internal = self._data._psdf._internal.with_new_spark_column(
self._data._column_label,
self._data.spark.column,
field=self._data._internal.data_fields[0].copy(dtype=dtype),
)
self._data._psdf._update_internal_frame(internal)
@property
def ordered(self) -> bool:
"""
Whether the categories have an ordered relationship.
Examples
--------
>>> s = ps.Series(list("abbccc"), dtype="category")
>>> s # doctest: +SKIP
0 a
1 b
2 b
3 c
4 c
5 c
dtype: category
Categories (3, object): ['a', 'b', 'c']
>>> s.cat.ordered
False
"""
return self._dtype.ordered
@property
def codes(self) -> "ps.Series":
"""
Return Series of codes as well as the index.
Examples
--------
>>> s = ps.Series(list("abbccc"), dtype="category")
>>> s # doctest: +SKIP
0 a
1 b
2 b
3 c
4 c
5 c
dtype: category
Categories (3, object): ['a', 'b', 'c']
>>> s.cat.codes
0 0
1 1
2 1
3 2
4 2
5 2
dtype: int8
"""
return self._data._with_new_scol(
self._data.spark.column,
field=InternalField.from_struct_field(
StructField(
name=self._data._internal.data_spark_column_names[0],
dataType=self._data.spark.data_type,
nullable=self._data.spark.nullable,
)
),
).rename()
def add_categories(
self, new_categories: Union[pd.Index, Any, List], inplace: bool = False
) -> Optional["ps.Series"]:
"""
Add new categories.
`new_categories` will be included at the last/highest place in the
categories and will be unused directly after this call.
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : bool, default False
Whether or not to add the categories inplace or return a copy of
this categorical with added categories.
.. deprecated:: 3.2.0
Returns
-------
Series or None
Categorical with new categories added or None if ``inplace=True``.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
See Also
--------
rename_categories : Rename categories.
reorder_categories : Reorder categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
Examples
--------
>>> s = ps.Series(list("abbccc"), dtype="category")
>>> s # doctest: +SKIP
0 a
1 b
2 b
3 c
4 c
5 c
dtype: category
Categories (3, object): ['a', 'b', 'c']
>>> s.cat.add_categories('x') # doctest: +SKIP
0 a
1 b
2 b
3 c
4 c
5 c
dtype: category
Categories (4, object): ['a', 'b', 'c', 'x']
"""
from pyspark.pandas.frame import DataFrame
if inplace:
warnings.warn(
"The `inplace` parameter in add_categories is deprecated "
"and will be removed in a future version.",
FutureWarning,
)
categories: List[Any]
if is_list_like(new_categories):
categories = list(new_categories)
else:
categories = [new_categories]
if any(cat in self.categories for cat in categories):
raise ValueError(
"new categories must not include old categories: {{{cats}}}".format(
cats=", ".join(set(str(cat) for cat in categories if cat in self.categories))
)
)
internal = self._data._psdf._internal.with_new_spark_column(
self._data._column_label,
self._data.spark.column,
field=self._data._internal.data_fields[0].copy(
dtype=CategoricalDtype(list(self.categories) + categories, ordered=self.ordered)
),
)
if inplace:
self._data._psdf._update_internal_frame(internal)
return None
else:
return DataFrame(internal)._psser_for(self._data._column_label).copy()
def _set_ordered(self, *, ordered: bool, inplace: bool) -> Optional["ps.Series"]:
from pyspark.pandas.frame import DataFrame
if self.ordered == ordered:
if inplace:
return None
else:
return self._data.copy()
else:
internal = self._data._psdf._internal.with_new_spark_column(
self._data._column_label,
self._data.spark.column,
field=self._data._internal.data_fields[0].copy(
dtype=CategoricalDtype(categories=self.categories, ordered=ordered)
),
)
if inplace:
self._data._psdf._update_internal_frame(internal)
return None
else:
return DataFrame(internal)._psser_for(self._data._column_label).copy()
def as_ordered(self, inplace: bool = False) -> Optional["ps.Series"]:
"""
Set the Categorical to be ordered.
Parameters
----------
inplace : bool, default False
Whether or not to set the ordered attribute in-place or return
a copy of this categorical with ordered set to True.
Returns
-------
Series or None
Ordered Categorical or None if ``inplace=True``.
Examples
--------
>>> s = ps.Series(list("abbccc"), dtype="category")
>>> s # doctest: +SKIP
0 a
1 b
2 b
3 c
4 c
5 c
dtype: category
Categories (3, object): ['a', 'b', 'c']
>>> s.cat.as_ordered() # doctest: +SKIP
0 a
1 b
2 b
3 c
4 c
5 c
dtype: category
Categories (3, object): ['a' < 'b' < 'c']
"""
return self._set_ordered(ordered=True, inplace=inplace)
def as_unordered(self, inplace: bool = False) -> Optional["ps.Series"]:
"""
Set the Categorical to be unordered.
Parameters
----------
inplace : bool, default False
Whether or not to set the ordered attribute in-place or return
a copy of this categorical with ordered set to False.
Returns
-------
Series or None
Unordered Categorical or None if ``inplace=True``.
Examples
--------
>>> s = ps.Series(list("abbccc"), dtype="category").cat.as_ordered()
>>> s # doctest: +SKIP
0 a
1 b
2 b
3 c
4 c
5 c
dtype: category
Categories (3, object): ['a' < 'b' < 'c']
>>> s.cat.as_unordered() # doctest: +SKIP
0 a
1 b
2 b
3 c
4 c
5 c
dtype: category
Categories (3, object): ['a', 'b', 'c']
"""
return self._set_ordered(ordered=False, inplace=inplace)
def remove_categories(
self, removals: Union[pd.Index, Any, List], inplace: bool = False
) -> Optional["ps.Series"]:
"""
Remove the specified categories.
`removals` must be included in the old categories. Values which were in
the removed categories will be set to NaN
Parameters
----------
removals : category or list of categories
The categories which should be removed.
inplace : bool, default False
Whether or not to remove the categories inplace or return a copy of
this categorical with removed categories.
.. deprecated:: 3.2.0
Returns
-------
Series or None
Categorical with removed categories or None if ``inplace=True``.
Raises
------
ValueError
If the removals are not contained in the categories
See Also
--------
rename_categories : Rename categories.
reorder_categories : Reorder categories.
add_categories : Add new categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
Examples
--------
>>> s = ps.Series(list("abbccc"), dtype="category")
>>> s # doctest: +SKIP
0 a
1 b
2 b
3 c
4 c
5 c
dtype: category
Categories (3, object): ['a', 'b', 'c']
>>> s.cat.remove_categories('b') # doctest: +SKIP
0 a
1 NaN
2 NaN
3 c
4 c
5 c
dtype: category
Categories (2, object): ['a', 'c']
"""
if inplace:
warnings.warn(
"The `inplace` parameter in remove_categories is deprecated "
"and will be removed in a future version.",
FutureWarning,
)
categories: List[Any]
if is_list_like(removals):
categories = [cat for cat in removals if cat is not None]
elif removals is None:
categories = []
else:
categories = [removals]
if any(cat not in self.categories for cat in categories):
raise ValueError(
"removals must all be in old categories: {{{cats}}}".format(
cats=", ".join(
set(str(cat) for cat in categories if cat not in self.categories)
)
)
)
if len(categories) == 0:
if inplace:
return None
else:
return self._data.copy()
else:
dtype = CategoricalDtype(
[cat for cat in self.categories if cat not in categories], ordered=self.ordered
)
psser = self._data.astype(dtype)
if inplace:
internal = self._data._psdf._internal.with_new_spark_column(
self._data._column_label,
psser.spark.column,
field=psser._internal.data_fields[0],
)
self._data._psdf._update_internal_frame(internal)
return None
else:
return psser
def remove_unused_categories(self, inplace: bool = False) -> Optional["ps.Series"]:
"""
Remove categories which are not used.
Parameters
----------
inplace : bool, default False
Whether or not to drop unused categories inplace or return a copy of
this categorical with unused categories dropped.
.. deprecated:: 3.2.0
Returns
-------
cat : Series or None
Categorical with unused categories dropped or None if ``inplace=True``.
See Also
--------
rename_categories : Rename categories.
reorder_categories : Reorder categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
set_categories : Set the categories to the specified ones.
Examples
--------
>>> s = ps.Series(pd.Categorical(list("abbccc"), categories=['a', 'b', 'c', 'd']))
>>> s # doctest: +SKIP
0 a
1 b
2 b
3 c
4 c
5 c
dtype: category
Categories (4, object): ['a', 'b', 'c', 'd']
>>> s.cat.remove_unused_categories() # doctest: +SKIP
0 a
1 b
2 b
3 c
4 c
5 c
dtype: category
Categories (3, object): ['a', 'b', 'c']
"""
if inplace:
warnings.warn(
"The `inplace` parameter in remove_unused_categories is deprecated "
"and will be removed in a future version.",
FutureWarning,
)
categories = set(self._data.drop_duplicates()._to_pandas())
removals = [cat for cat in self.categories if cat not in categories]
return self.remove_categories(removals=removals, inplace=inplace)
def rename_categories(
self, new_categories: Union[list, dict, Callable], inplace: bool = False
) -> Optional["ps.Series"]:
"""
Rename categories.
Parameters
----------
new_categories : list-like, dict-like or callable
New categories which will replace old categories.
* list-like: all items must be unique and the number of items in
the new categories must match the existing number of categories.
* dict-like: specifies a mapping from
old categories to new. Categories not contained in the mapping
are passed through and extra categories in the mapping are
ignored.
* callable : a callable that is called on all items in the old
categories and whose return values comprise the new categories.
inplace : bool, default False
Whether or not to rename the categories inplace or return a copy of
this categorical with renamed categories.
.. deprecated:: 3.2.0
Returns
-------
cat : Series or None
Categorical with removed categories or None if ``inplace=True``.
Raises
------
ValueError
If new categories are list-like and do not have the same number of
items than the current categories or do not validate as categories
See Also
--------
reorder_categories : Reorder categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
Examples
--------
>>> s = ps.Series(["a", "a", "b"], dtype="category")
>>> s.cat.rename_categories([0, 1]) # doctest: +SKIP
0 0
1 0
2 1
dtype: category
Categories (2, int64): [0, 1]
For dict-like ``new_categories``, extra keys are ignored and
categories not in the dictionary are passed through
>>> s.cat.rename_categories({'a': 'A', 'c': 'C'}) # doctest: +SKIP
0 A
1 A
2 b
dtype: category
Categories (2, object): ['A', 'b']
You may also provide a callable to create the new categories
>>> s.cat.rename_categories(lambda x: x.upper()) # doctest: +SKIP
0 A
1 A
2 B
dtype: category
Categories (2, object): ['A', 'B']
"""
from pyspark.pandas.frame import DataFrame
if inplace:
warnings.warn(
"The `inplace` parameter in rename_categories is deprecated "
"and will be removed in a future version.",
FutureWarning,
)
if is_dict_like(new_categories):
categories = [cast(dict, new_categories).get(item, item) for item in self.categories]
elif callable(new_categories):
categories = [new_categories(item) for item in self.categories]
elif is_list_like(new_categories):
if len(self.categories) != len(new_categories):
raise ValueError(
"new categories need to have the same number of items as the old categories!"
)
categories = cast(list, new_categories)
else:
raise TypeError("new_categories must be list-like, dict-like or callable.")
internal = self._data._psdf._internal.with_new_spark_column(
self._data._column_label,
self._data.spark.column,
field=self._data._internal.data_fields[0].copy(
dtype=CategoricalDtype(categories=categories, ordered=self.ordered)
),
)
if inplace:
self._data._psdf._update_internal_frame(internal)
return None
else:
return DataFrame(internal)._psser_for(self._data._column_label).copy()
def reorder_categories(
self,
new_categories: Union[pd.Index, List],
ordered: Optional[bool] = None,
inplace: bool = False,
) -> Optional["ps.Series"]:
"""
Reorder categories as specified in new_categories.
`new_categories` need to include all old categories and no new category
items.
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : bool, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
inplace : bool, default False
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
.. deprecated:: 3.2.0
Returns
-------
cat : Series or None
Categorical with removed categories or None if ``inplace=True``.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
See Also
--------
rename_categories : Rename categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
Examples
--------
>>> s = ps.Series(list("abbccc"), dtype="category")
>>> s # doctest: +SKIP
0 a
1 b
2 b
3 c
4 c
5 c
dtype: category
Categories (3, object): ['a', 'b', 'c']
>>> s.cat.reorder_categories(['c', 'b', 'a'], ordered=True) # doctest: +SKIP
0 a
1 b
2 b
3 c
4 c
5 c
dtype: category
Categories (3, object): ['c' < 'b' < 'a']
"""
if inplace:
warnings.warn(
"The `inplace` parameter in reorder_categories is deprecated "
"and will be removed in a future version.",
FutureWarning,
)
if not is_list_like(new_categories):
raise TypeError(
"Parameter 'new_categories' must be list-like, was '{}'".format(new_categories)
)
elif len(set(new_categories)) != len(set(self.categories)) or any(
cat not in self.categories for cat in new_categories
):
raise ValueError("items in new_categories are not the same as in old categories")
if ordered is None:
ordered = self.ordered
if new_categories == list(self.categories) and ordered == self.ordered:
if inplace:
return None
else:
return self._data.copy()
else:
dtype = CategoricalDtype(categories=new_categories, ordered=ordered)
psser = _to_cat(self._data).astype(dtype)
if inplace:
internal = self._data._psdf._internal.with_new_spark_column(
self._data._column_label,
psser.spark.column,
field=psser._internal.data_fields[0],
)
self._data._psdf._update_internal_frame(internal)
return None
else:
return psser
def set_categories(
self,
new_categories: Union[pd.Index, List],
ordered: Optional[bool] = None,
rename: bool = False,
inplace: bool = False,
) -> Optional["ps.Series"]:
"""
Set the categories to the specified new_categories.
`new_categories` can include new categories (which will result in
unused categories) or remove old categories (which results in values
set to NaN). If `rename==True`, the categories will simple be renamed
(less or more items than in old categories will result in values set to
NaN or in unused categories respectively).
This method can be used to perform more than one action of adding,
removing, and reordering simultaneously and is therefore faster than
performing the individual steps via the more specialised methods.
On the other hand this methods does not do checks (e.g., whether the
old categories are included in the new categories on a reorder), which
can result in surprising changes, for example when using special string
dtypes, which does not considers a S1 string equal to a single char
python string.
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : bool, default False
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
rename : bool, default False
Whether or not the new_categories should be considered as a rename
of the old categories or as reordered categories.
inplace : bool, default False
Whether or not to reorder the categories in-place or return a copy
of this categorical with reordered categories.
.. deprecated:: 3.2.0
Returns
-------
Series with reordered categories or None if inplace.
Raises
------
ValueError
If new_categories does not validate as categories
See Also
--------
rename_categories : Rename categories.
reorder_categories : Reorder categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
Examples
--------
>>> s = ps.Series(list("abbccc"), dtype="category")
>>> s # doctest: +SKIP
0 a
1 b
2 b
3 c
4 c
5 c
dtype: category
Categories (3, object): ['a', 'b', 'c']
>>> s.cat.set_categories(['b', 'c']) # doctest: +SKIP
0 NaN
1 b
2 b
3 c
4 c
5 c
dtype: category
Categories (2, object): ['b', 'c']
>>> s.cat.set_categories([1, 2, 3], rename=True) # doctest: +SKIP
0 1
1 2
2 2
3 3
4 3
5 3
dtype: category
Categories (3, int64): [1, 2, 3]
>>> s.cat.set_categories([1, 2, 3], rename=True, ordered=True) # doctest: +SKIP
0 1
1 2
2 2
3 3
4 3
5 3
dtype: category
Categories (3, int64): [1 < 2 < 3]
"""
from pyspark.pandas.frame import DataFrame
if inplace:
warnings.warn(
"The `inplace` parameter in set_categories is deprecated "
"and will be removed in a future version.",
FutureWarning,
)
if not is_list_like(new_categories):
raise TypeError(
"Parameter 'new_categories' must be list-like, was '{}'".format(new_categories)
)
if ordered is None:
ordered = self.ordered
new_dtype = CategoricalDtype(new_categories, ordered=ordered)
scol = self._data.spark.column
if rename:
new_scol = (
F.when(scol >= len(new_categories), SF.lit(-1).cast(self._data.spark.data_type))
.otherwise(scol)
.alias(self._data._internal.data_spark_column_names[0])
)
internal = self._data._psdf._internal.with_new_spark_column(
self._data._column_label,
new_scol,
field=self._data._internal.data_fields[0].copy(dtype=new_dtype),
)
if inplace:
self._data._psdf._update_internal_frame(internal)
return None
else:
return DataFrame(internal)._psser_for(self._data._column_label).copy()
else:
psser = self._data.astype(new_dtype)
if inplace:
internal = self._data._psdf._internal.with_new_spark_column(
self._data._column_label,
psser.spark.column,
field=psser._internal.data_fields[0],
)
self._data._psdf._update_internal_frame(internal)
return None
else:
return psser
def _test() -> None:
import os
import doctest
import sys
from pyspark.sql import SparkSession
import pyspark.pandas.categorical
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.categorical.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]")
.appName("pyspark.pandas.categorical tests")
.getOrCreate()
)
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.categorical,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| |
"""The Mayavi Envisage application.
"""
# Author: Prabhu Ramachandran <prabhu_r@users.sf.net>
# Copyright (c) 2008, Enthought, Inc.
# License: BSD Style.
# Standard library imports.
import sys
import os.path
import logging
# Enthought library imports.
from apptools.logger.api import LogFileHandler, FORMATTER
from traits.etsconfig.api import ETSConfig
from traits.api import (HasTraits, Instance, Int,
on_trait_change, Bool)
# Local imports.
from mayavi_workbench_application import MayaviWorkbenchApplication
from mayavi.preferences.api import preference_manager
from mayavi.core.customize import get_custom_plugins
# GLOBALS
logger = logging.getLogger()
######################################################################
# Useful functions.
######################################################################
def setup_logger(logger, fname, stream=True, mode=logging.ERROR):
"""Setup a log file and the logger. If the given file name is not
absolute, put the log file in `ETSConfig.application_home`, if not
it will create it where desired.
Parameters:
-----------
fname -- file name the logger should use. If this is an absolute
path it will create the log file as specified, if not it will put it
in `ETSConfig.application_home`.
stream -- Add a stream handler.
mode -- the logging mode of the stream handler.
"""
if not os.path.isabs(fname):
path = os.path.join(ETSConfig.application_home, fname)
else:
path = fname
# Check if we have already added a logger (can happen when the app
# is started multiple number of times from ipython say).
handlers = logger.handlers
if len(handlers) > 1:
h = handlers[0]
if isinstance(h, LogFileHandler) and h.baseFilename == path:
logger.info('Logging handlers already set! Not duplicating.')
return
logger.setLevel(logging.DEBUG)
handler = LogFileHandler(path)
handler.setLevel(logging.DEBUG)
logger.addHandler(handler)
if stream:
s = logging.StreamHandler()
s.setFormatter(FORMATTER)
s.setLevel(mode)
logger.addHandler(s)
logger.info("*"*80)
logger.info("logfile is: '%s'", os.path.abspath(path))
logger.info("*"*80)
def get_non_gui_plugin_classes():
"""Get list of basic mayavi plugin classes that do not add any views or
actions."""
from envisage.core_plugin import CorePlugin
from envisage.ui.workbench.workbench_plugin import WorkbenchPlugin
from tvtk.plugins.scene.scene_plugin import ScenePlugin
from mayavi.plugins.mayavi_plugin import MayaviPlugin
plugins = [CorePlugin,
WorkbenchPlugin,
MayaviPlugin,
ScenePlugin,
]
return plugins
def get_non_gui_plugins():
"""Get list of basic mayavi plugins that do not add any views or
actions."""
return [cls() for cls in get_non_gui_plugin_classes()]
def get_plugin_classes():
"""Get list of default plugin classes to use for Mayavi."""
# Force the selection of a toolkit:
from traitsui.api import toolkit
toolkit()
from traits.etsconfig.api import ETSConfig
try_use_ipython = preference_manager.root.use_ipython
use_ipython = False
if ETSConfig.toolkit == 'wx' and try_use_ipython:
try:
# If the right versions of IPython, EnvisagePlugins and
# Pyface are not installed, this import will fail.
from envisage.plugins.ipython_shell.view.ipython_shell_view \
import IPythonShellView
use_ipython = True
except: pass
if use_ipython:
from envisage.plugins.ipython_shell.ipython_shell_plugin import \
IPythonShellPlugin
PythonShellPlugin = IPythonShellPlugin
else:
from envisage.plugins.python_shell.python_shell_plugin import PythonShellPlugin
from envisage.plugins.text_editor.text_editor_plugin import TextEditorPlugin
from apptools.logger.plugin.logger_plugin import LoggerPlugin
from tvtk.plugins.scene.ui.scene_ui_plugin import SceneUIPlugin
from mayavi.plugins.mayavi_ui_plugin import MayaviUIPlugin
plugins = get_non_gui_plugin_classes()
plugins.extend([
LoggerPlugin,
MayaviUIPlugin,
SceneUIPlugin,
PythonShellPlugin,
TextEditorPlugin,
])
return plugins
def get_plugins():
"""Get list of default plugins to use for Mayavi."""
return [cls() for cls in get_plugin_classes()]
###########################################################################
# `Mayavi` class.
###########################################################################
class Mayavi(HasTraits):
"""The Mayavi application class.
This class may be easily subclassed to do something different.
For example, one way to script MayaVi (as a standalone application
and not interactively) is to subclass this and do the needful.
"""
# The main envisage application.
application = Instance('envisage.ui.workbench.api.WorkbenchApplication')
# Turn this off if you don't want the workbench to start the GUI
# event loop.
start_gui_event_loop = Bool(True, desc='start a GUI event loop')
# The MayaVi Script instance.
script = Instance('mayavi.plugins.script.Script')
# The logging mode.
log_mode = Int(logging.ERROR, desc='the logging mode to use')
def main(self, argv=None, plugins=None):
"""The main application is created and launched here.
Parameters
----------
argv : list of strings
The list of command line arguments. The default is `None`
where no command line arguments are parsed. To support
command line arguments you can pass `sys.argv[1:]`.
plugins : list of Plugin objects
List of plugins to start. If none is provided it defaults to
something meaningful.
log_mode :
The logging mode to use.
"""
# Parse any cmd line args.
if argv is None:
argv = []
self.parse_command_line(argv)
if plugins is None:
plugins = get_plugins()
plugins += get_custom_plugins()
# Create the application
prefs = preference_manager.preferences
app = MayaviWorkbenchApplication(plugins=plugins,
preferences=prefs,
start_gui_event_loop=self.start_gui_event_loop)
self.application = app
# Setup the logger.
self.setup_logger()
# Start the application.
app.run()
def setup_logger(self):
"""Setup logging for the application."""
setup_logger(logger, 'mayavi.log', mode=self.log_mode)
def parse_command_line(self, argv):
"""Parse command line options.
Parameters
----------
- argv : `list` of `strings`
The list of command line arguments.
"""
from optparse import OptionParser
usage = "usage: %prog [options]"
parser = OptionParser(usage)
(options, args) = parser.parse_args(argv)
def run(self):
"""This function is called after the GUI has started.
Override this to do whatever you want to do as a MayaVi
script. If this is not overridden then an empty MayaVi
application will be started.
*Make sure all other MayaVi specific imports are made here!*
If you import MayaVi related code earlier you will run into
difficulties. Use 'self.script' to script the mayavi engine.
"""
pass
######################################################################
# Non-public interface.
######################################################################
@on_trait_change('application.gui:started')
def _on_application_gui_started(self, obj, trait_name, old, new):
"""This is called as soon as the Envisage GUI starts up. The
method is responsible for setting our script instance.
"""
if trait_name != 'started' or not new:
return
app = self.application
from mayavi.plugins.script import Script
window = app.workbench.active_window
# Set our script instance.
self.script = window.get_service(Script)
# Call self.run from the GUI thread.
app.gui.invoke_later(self.run)
def main(argv=None):
"""Simple helper to start up the mayavi application. This returns
the running application."""
m = Mayavi()
m.main(argv)
return m
if __name__ == '__main__':
main(sys.argv[1:])
| |
# Copyright 2009 - Participatory Culture Foundation
#
# This file is part of djpubsubhubbub.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from datetime import datetime, timedelta
import urllib2
from django.core.urlresolvers import reverse
from django.test import TestCase
from djpubsubhubbub.models import Subscription, SubscriptionManager
from djpubsubhubbub.signals import pre_subscribe, verified, updated
class MockResponse(object):
def __init__(self, status, data=None):
self.status = status
self.data = data
def info(self):
return self
def read(self):
if self.data is None:
return ''
data, self.data = self.data, None
return data
class PSHBTestBase:
urls = 'djpubsubhubbub.urls'
def setUp(self):
self._old_send_request = SubscriptionManager._send_request
SubscriptionManager._send_request = self._send_request
self.responses = []
self.requests = []
self.signals = []
for connecter in pre_subscribe, verified, updated:
def callback(signal=None, **kwargs):
self.signals.append((signal, kwargs))
connecter.connect(callback, dispatch_uid=connecter, weak=False)
def tearDown(self):
SubscriptionManager._send_request = self._old_send_request
del self._old_send_request
for signal in pre_subscribe, verified:
signal.disconnect(dispatch_uid=signal)
def _send_request(self, url, data):
self.requests.append((url, data))
return self.responses.pop()
class PSHBSubscriptionManagerTest(PSHBTestBase, TestCase):
def test_sync_verify(self):
"""
If the hub returns a 204 response, the subscription is verified and
active.
"""
self.responses.append(MockResponse(204))
sub = Subscription.objects.subscribe('topic', 'hub', 'callback', 2000)
self.assertEquals(len(self.signals), 2)
self.assertEquals(self.signals[0], (pre_subscribe, {'sender': sub,
'created': True}))
self.assertEquals(self.signals[1], (verified, {'sender': sub}))
self.assertEquals(sub.hub, 'hub')
self.assertEquals(sub.topic, 'topic')
self.assertEquals(sub.verified, True)
rough_expires = datetime.now() + timedelta(seconds=2000)
self.assert_(abs(sub.lease_expires - rough_expires).seconds < 5,
'lease more than 5 seconds off')
self.assertEquals(len(self.requests), 1)
request = self.requests[0]
self.assertEquals(request[0], 'hub')
self.assertEquals(request[1]['mode'], 'subscribe')
self.assertEquals(request[1]['topic'], 'topic')
self.assertEquals(request[1]['callback'], 'callback')
self.assertEquals(request[1]['verify'], ('async', 'sync'))
self.assertEquals(request[1]['verify_token'], sub.verify_token)
self.assertEquals(request[1]['lease_seconds'], 2000)
def test_async_verify(self):
"""
If the hub returns a 202 response, we should not assume the
subscription is verified.
"""
self.responses.append(MockResponse(202))
sub = Subscription.objects.subscribe('topic', 'hub', 'callback', 2000)
self.assertEquals(len(self.signals), 1)
self.assertEquals(self.signals[0], (pre_subscribe, {'sender': sub,
'created': True}))
self.assertEquals(sub.hub, 'hub')
self.assertEquals(sub.topic, 'topic')
self.assertEquals(sub.verified, False)
rough_expires = datetime.now() + timedelta(seconds=2000)
self.assert_(abs(sub.lease_expires - rough_expires).seconds < 5,
'lease more than 5 seconds off')
self.assertEquals(len(self.requests), 1)
request = self.requests[0]
self.assertEquals(request[0], 'hub')
self.assertEquals(request[1]['mode'], 'subscribe')
self.assertEquals(request[1]['topic'], 'topic')
self.assertEquals(request[1]['callback'], 'callback')
self.assertEquals(request[1]['verify'], ('async', 'sync'))
self.assertEquals(request[1]['verify_token'], sub.verify_token)
self.assertEquals(request[1]['lease_seconds'], 2000)
def test_least_seconds_default(self):
"""
If the number of seconds to lease the subscription is not specified, it
should default to 2592000 (30 days).
"""
self.responses.append(MockResponse(202))
sub = Subscription.objects.subscribe('topic', 'hub', 'callback')
rough_expires = datetime.now() + timedelta(seconds=2592000)
self.assert_(abs(sub.lease_expires - rough_expires).seconds < 5,
'lease more than 5 seconds off')
self.assertEquals(len(self.requests), 1)
request = self.requests[0]
self.assertEquals(request[1]['lease_seconds'], 2592000)
def test_error_on_subscribe_raises_URLError(self):
"""
If a non-202/204 status is returned, raise a URLError.
"""
self.responses.append(MockResponse(500, 'error data'))
try:
Subscription.objects.subscribe('topic', 'hub', 'callback')
except urllib2.URLError, e:
self.assertEquals(e.reason,
'error subscribing to topic on hub:\nerror data')
else:
self.fail('subscription did not raise URLError exception')
class PSHBCallbackViewTestCase(PSHBTestBase, TestCase):
def test_verify(self):
"""
Getting the callback from the server should verify the subscription.
"""
sub = Subscription.objects.create(
topic='topic',
hub='hub',
verified=False)
verify_token = sub.generate_token('subscribe')
response = self.client.get(reverse('pubsubhubbub_callback',
args=(sub.pk,)),
{'hub.mode': 'subscribe',
'hub.topic': sub.topic,
'hub.challenge': 'challenge',
'hub.lease_seconds': 2000,
'hub.verify_token': verify_token})
self.assertEquals(response.status_code, 200)
self.assertEquals(response.content, 'challenge')
sub = Subscription.objects.get(pk=sub.pk)
self.assertEquals(sub.verified, True)
self.assertEquals(len(self.signals), 1)
self.assertEquals(self.signals[0], (verified, {'sender': sub}))
def test_404(self):
"""
Various things sould return a 404:
* invalid primary key in the URL
* token doesn't start with 'subscribe'
* subscription doesn't exist
* token doesn't match the subscription
"""
sub = Subscription.objects.create(
topic='topic',
hub='hub',
verified=False)
verify_token = sub.generate_token('subscribe')
response = self.client.get(reverse('pubsubhubbub_callback',
args=(0,)),
{'hub.mode': 'subscribe',
'hub.topic': sub.topic,
'hub.challenge': 'challenge',
'hub.lease_seconds': 2000,
'hub.verify_token': verify_token[1:]})
self.assertEquals(response.status_code, 404)
self.assertEquals(len(self.signals), 0)
response = self.client.get(reverse('pubsubhubbub_callback',
args=(sub.pk,)),
{'hub.mode': 'subscribe',
'hub.topic': sub.topic,
'hub.challenge': 'challenge',
'hub.lease_seconds': 2000,
'hub.verify_token': verify_token[1:]})
self.assertEquals(response.status_code, 404)
self.assertEquals(len(self.signals), 0)
response = self.client.get(reverse('pubsubhubbub_callback',
args=(sub.pk,)),
{'hub.mode': 'subscribe',
'hub.topic': sub.topic + 'extra',
'hub.challenge': 'challenge',
'hub.lease_seconds': 2000,
'hub.verify_token': verify_token})
self.assertEquals(response.status_code, 404)
self.assertEquals(len(self.signals), 0)
response = self.client.get(reverse('pubsubhubbub_callback',
args=(sub.pk,)),
{'hub.mode': 'subscribe',
'hub.topic': sub.topic,
'hub.challenge': 'challenge',
'hub.lease_seconds': 2000,
'hub.verify_token': verify_token[:-5]})
self.assertEquals(response.status_code, 404)
self.assertEquals(len(self.signals), 0)
class PSHBUpdateTestCase(PSHBTestBase, TestCase):
def test_update(self):
# this data comes from
# http://pubsubhubbub.googlecode.com/svn/trunk/pubsubhubbub-core-0.1.html#anchor3
update_data = """<?xml version="1.0"?>
<atom:feed>
<!-- Normally here would be source, title, etc ... -->
<link rel="hub" href="http://myhub.example.com/endpoint" />
<link rel="self" href="http://publisher.example.com/happycats.xml" />
<updated>2008-08-11T02:15:01Z</updated>
<!-- Example of a full entry. -->
<entry>
<title>Heathcliff</title>
<link href="http://publisher.example.com/happycat25.xml" />
<id>http://publisher.example.com/happycat25.xml</id>
<updated>2008-08-11T02:15:01Z</updated>
<content>
What a happy cat. Full content goes here.
</content>
</entry>
<!-- Example of an entity that isn't full/is truncated. This is implied
by the lack of a <content> element and a <summary> element instead. -->
<entry >
<title>Heathcliff</title>
<link href="http://publisher.example.com/happycat25.xml" />
<id>http://publisher.example.com/happycat25.xml</id>
<updated>2008-08-11T02:15:01Z</updated>
<summary>
What a happy cat!
</summary>
</entry>
<!-- Meta-data only; implied by the lack of <content> and
<summary> elements. -->
<entry>
<title>Garfield</title>
<link rel="alternate" href="http://publisher.example.com/happycat24.xml" />
<id>http://publisher.example.com/happycat25.xml</id>
<updated>2008-08-11T02:15:01Z</updated>
</entry>
<!-- Context entry that's meta-data only and not new. Implied because the
update time on this entry is before the //atom:feed/updated time. -->
<entry>
<title>Nermal</title>
<link rel="alternate" href="http://publisher.example.com/happycat23s.xml" />
<id>http://publisher.example.com/happycat25.xml</id>
<updated>2008-07-10T12:28:13Z</updated>
</entry>
</atom:feed>
"""
sub = Subscription.objects.create(
hub="http://myhub.example.com/endpoint",
topic="http://publisher.example.com/happycats.xml")
callback_data = []
updated.connect(
lambda sender=None, update=None, **kwargs: callback_data.append(
(sender, update)),
weak=False)
response = self.client.post(reverse('pubsubhubbub_callback',
args=(sub.pk,)),
update_data, 'application/atom+xml')
self.assertEquals(response.status_code, 200)
self.assertEquals(len(callback_data), 1)
sender, update = callback_data[0]
self.assertEquals(sender, sub)
self.assertEquals(len(update.entries), 4)
self.assertEquals(update.entries[0].id,
'http://publisher.example.com/happycat25.xml')
self.assertEquals(update.entries[1].id,
'http://publisher.example.com/happycat25.xml')
self.assertEquals(update.entries[2].id,
'http://publisher.example.com/happycat25.xml')
self.assertEquals(update.entries[3].id,
'http://publisher.example.com/happycat25.xml')
def test_update_with_changed_hub(self):
update_data = """<?xml version="1.0"?>
<atom:feed>
<!-- Normally here would be source, title, etc ... -->
<link rel="hub" href="http://myhub.example.com/endpoint" />
<link rel="self" href="http://publisher.example.com/happycats.xml" />
<updated>2008-08-11T02:15:01Z</updated>
<entry>
<title>Heathcliff</title>
<link href="http://publisher.example.com/happycat25.xml" />
<id>http://publisher.example.com/happycat25.xml</id>
<updated>2008-08-11T02:15:01Z</updated>
<content>
What a happy cat. Full content goes here.
</content>
</entry>
</atom:feed>
"""
sub = Subscription.objects.create(
hub="hub",
topic="http://publisher.example.com/happycats.xml",
lease_expires=datetime.now() + timedelta(days=1))
callback_data = []
updated.connect(
lambda sender=None, update=None, **kwargs: callback_data.append(
(sender, update)),
weak=False)
self.responses.append(MockResponse(204))
response = self.client.post(reverse('pubsubhubbub_callback',
args=(sub.pk,)),
update_data, 'application/atom+xml')
self.assertEquals(response.status_code, 200)
self.assertEquals(
Subscription.objects.filter(
hub='http://myhub.example.com/endpoint',
topic='http://publisher.example.com/happycats.xml',
verified=True).count(), 1)
self.assertEquals(len(self.requests), 1)
self.assertEquals(self.requests[0][0],
'http://myhub.example.com/endpoint')
self.assertEquals(self.requests[0][1]['callback'],
'http://testserver/1/')
self.assert_((self.requests[0][1]['lease_seconds'] - 86400) < 5)
def test_update_with_changed_self(self):
update_data = """<?xml version="1.0"?>
<atom:feed>
<!-- Normally here would be source, title, etc ... -->
<link rel="hub" href="http://myhub.example.com/endpoint" />
<link rel="self" href="http://publisher.example.com/happycats.xml" />
<updated>2008-08-11T02:15:01Z</updated>
<entry>
<title>Heathcliff</title>
<link href="http://publisher.example.com/happycat25.xml" />
<id>http://publisher.example.com/happycat25.xml</id>
<updated>2008-08-11T02:15:01Z</updated>
<content>
What a happy cat. Full content goes here.
</content>
</entry>
</atom:feed>
"""
sub = Subscription.objects.create(
hub="http://myhub.example.com/endpoint",
topic="topic",
lease_expires=datetime.now() + timedelta(days=1))
callback_data = []
updated.connect(
lambda sender=None, update=None, **kwargs: callback_data.append(
(sender, update)),
weak=False)
self.responses.append(MockResponse(204))
response = self.client.post(reverse('pubsubhubbub_callback',
args=(sub.pk,)),
update_data, 'application/atom+xml')
self.assertEquals(response.status_code, 200)
self.assertEquals(
Subscription.objects.filter(
hub='http://myhub.example.com/endpoint',
topic='http://publisher.example.com/happycats.xml',
verified=True).count(), 1)
self.assertEquals(len(self.requests), 1)
self.assertEquals(self.requests[0][0],
'http://myhub.example.com/endpoint')
self.assertEquals(self.requests[0][1]['callback'],
'http://testserver/1/')
self.assert_((self.requests[0][1]['lease_seconds'] - 86400) < 5)
def test_update_with_changed_hub_and_self(self):
update_data = """<?xml version="1.0"?>
<atom:feed>
<!-- Normally here would be source, title, etc ... -->
<link rel="hub" href="http://myhub.example.com/endpoint" />
<link rel="self" href="http://publisher.example.com/happycats.xml" />
<updated>2008-08-11T02:15:01Z</updated>
<entry>
<title>Heathcliff</title>
<link href="http://publisher.example.com/happycat25.xml" />
<id>http://publisher.example.com/happycat25.xml</id>
<updated>2008-08-11T02:15:01Z</updated>
<content>
What a happy cat. Full content goes here.
</content>
</entry>
</atom:feed>
"""
sub = Subscription.objects.create(
hub="hub",
topic="topic",
lease_expires=datetime.now() + timedelta(days=1))
callback_data = []
updated.connect(
lambda sender=None, update=None, **kwargs: callback_data.append(
(sender, update)),
weak=False)
self.responses.append(MockResponse(204))
response = self.client.post(reverse('pubsubhubbub_callback',
args=(sub.pk,)),
update_data, 'application/atom+xml')
self.assertEquals(response.status_code, 200)
self.assertEquals(
Subscription.objects.filter(
hub='http://myhub.example.com/endpoint',
topic='http://publisher.example.com/happycats.xml',
verified=True).count(), 1)
self.assertEquals(len(self.requests), 1)
self.assertEquals(self.requests[0][0],
'http://myhub.example.com/endpoint')
self.assertEquals(self.requests[0][1]['callback'],
'http://testserver/1/')
self.assert_((self.requests[0][1]['lease_seconds'] - 86400) < 5)
| |
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
from typing import *
from edb import errors
from edb.common import verutils
from edb import edgeql
from edb.edgeql import ast as qlast
from edb.edgeql import compiler as qlcompiler
from edb.edgeql import qltypes as ft
from edb.edgeql import parser as qlparser
from edb.edgeql import utils as qlutils
from edb.schema import scalars as s_scalars
from . import abc as s_abc
from . import annos as s_anno
from . import delta as sd
from . import expr as s_expr
from . import functions as s_func
from . import inheriting
from . import name as sn
from . import objects as so
from . import types as s_types
from . import pseudo as s_pseudo
from . import referencing
from . import utils
if TYPE_CHECKING:
from edb.common import parsing as c_parsing
from edb.schema import schema as s_schema
T = TypeVar('T')
def _assert_not_none(value: Optional[T]) -> T:
if value is None:
raise TypeError("A value is expected")
return value
def merge_constraint_params(
constraint: Constraint,
supers: List[Constraint],
field_name: str,
*,
ignore_local: bool,
schema: s_schema.Schema,
) -> Any:
if constraint.get_subject(schema) is None:
# consistency of abstract constraint params is checked
# in CreateConstraint.validate_create
return constraint.get_explicit_field_value(schema, field_name, None)
else:
# concrete constraints cannot redefined parameters and always
# inherit from super.
return supers[0].get_explicit_field_value(schema, field_name, None)
def constraintname_from_fullname(name: sn.Name) -> sn.QualName:
assert isinstance(name, sn.QualName)
# the dict key for constraints drops the first qual, which makes
# it independent of where it is declared
short = sn.shortname_from_fullname(name)
quals = sn.quals_from_fullname(name)
return sn.QualName(
name=sn.get_specialized_name(short, *quals[1:]),
module='__',
)
def _constraint_object_key(schema: s_schema.Schema, o: so.Object) -> sn.Name:
return constraintname_from_fullname(o.get_name(schema))
class ObjectIndexByConstraintName(
so.ObjectIndexBase[sn.Name, so.Object_T],
key=_constraint_object_key,
):
@classmethod
def get_key_for_name(
cls,
schema: s_schema.Schema,
name: sn.Name,
) -> sn.Name:
return constraintname_from_fullname(name)
class Constraint(
referencing.ReferencedInheritingObject,
s_func.CallableObject, s_abc.Constraint,
qlkind=ft.SchemaObjectClass.CONSTRAINT,
data_safe=True,
):
params = so.SchemaField(
s_func.FuncParameterList,
coerce=True,
compcoef=0.4,
default=so.DEFAULT_CONSTRUCTOR,
inheritable=True,
merge_fn=merge_constraint_params,
)
expr = so.SchemaField(
s_expr.Expression, default=None, compcoef=0.909,
coerce=True)
subjectexpr = so.SchemaField(
s_expr.Expression,
default=None, compcoef=0.833, coerce=True,
ddl_identity=True)
finalexpr = so.SchemaField(
s_expr.Expression,
default=None, compcoef=0.909, coerce=True)
subject = so.SchemaField(
so.Object, default=None, inheritable=False)
args = so.SchemaField(
s_expr.ExpressionList,
default=None, coerce=True, inheritable=False,
compcoef=0.875, ddl_identity=True)
delegated = so.SchemaField(
bool,
default=False,
inheritable=False,
special_ddl_syntax=True,
compcoef=0.9,
)
errmessage = so.SchemaField(
str, default=None, compcoef=0.971, allow_ddl_set=True)
is_aggregate = so.SchemaField(
bool, default=False, compcoef=0.971, allow_ddl_set=False)
def get_name_impacting_ancestors(
self, schema: s_schema.Schema,
) -> List[Constraint]:
if self.generic(schema):
return []
else:
return [self.get_nearest_generic_parent(schema)]
def get_verbosename(
self,
schema: s_schema.Schema,
*,
with_parent: bool=False
) -> str:
vn = super().get_verbosename(schema)
if self.generic(schema):
return f'abstract {vn}'
else:
if with_parent:
subject = self.get_subject(schema)
assert subject is not None
pvn = subject.get_verbosename(
schema, with_parent=True)
return f'{vn} of {pvn}'
else:
return vn
def generic(self, schema: s_schema.Schema) -> bool:
return self.get_subject(schema) is None
def get_subject(self, schema: s_schema.Schema) -> ConsistencySubject:
return cast(
ConsistencySubject,
self.get_field_value(schema, 'subject'),
)
def format_error(
self,
schema: s_schema.Schema,
) -> str:
subject = self.get_subject(schema)
titleattr = subject.get_annotation(schema, sn.QualName('std', 'title'))
if not titleattr:
subjname = subject.get_shortname(schema)
subjtitle = subjname.name
else:
subjtitle = titleattr
return self.format_error_message(schema, subjtitle)
def format_error_message(
self,
schema: s_schema.Schema,
subjtitle: str,
) -> str:
errmsg = self.get_errmessage(schema)
args = self.get_args(schema)
if args:
args_ql: List[qlast.Base] = [
qlast.Path(steps=[qlast.ObjectRef(name=subjtitle)]),
]
args_ql.extend(arg.qlast for arg in args)
constr_base: Constraint = schema.get(
self.get_name(schema), type=type(self))
index_parameters = qlutils.index_parameters(
args_ql,
parameters=constr_base.get_params(schema),
schema=schema,
)
expr = constr_base.get_field_value(schema, 'expr')
expr_ql = qlparser.parse(expr.text)
qlutils.inline_parameters(expr_ql, index_parameters)
args_map = {name: edgeql.generate_source(val, pretty=False)
for name, val in index_parameters.items()}
else:
args_map = {'__subject__': subjtitle}
assert errmsg is not None
formatted = errmsg.format(**args_map)
return formatted
def as_alter_delta(
self,
other: Constraint,
*,
self_schema: s_schema.Schema,
other_schema: s_schema.Schema,
confidence: float,
context: so.ComparisonContext,
) -> sd.ObjectCommand[Constraint]:
return super().as_alter_delta(
other,
self_schema=self_schema,
other_schema=other_schema,
confidence=confidence,
context=context,
)
def as_delete_delta(
self,
*,
schema: s_schema.Schema,
context: so.ComparisonContext,
) -> sd.ObjectCommand[Constraint]:
return super().as_delete_delta(schema=schema, context=context)
def get_ddl_identity(
self,
schema: s_schema.Schema,
) -> Optional[Dict[str, str]]:
ddl_identity = super().get_ddl_identity(schema)
if (
ddl_identity is not None
and self.field_is_inherited(schema, 'subjectexpr')
):
ddl_identity.pop('subjectexpr', None)
return ddl_identity
@classmethod
def get_root_classes(cls) -> Tuple[sn.QualName, ...]:
return (
sn.QualName(module='std', name='constraint'),
)
@classmethod
def get_default_base_name(self) -> sn.QualName:
return sn.QualName('std', 'constraint')
class ConsistencySubject(
so.QualifiedObject,
so.InheritingObject,
s_anno.AnnotationSubject,
):
constraints_refs = so.RefDict(
attr='constraints',
ref_cls=Constraint)
constraints = so.SchemaField(
ObjectIndexByConstraintName[Constraint],
inheritable=False, ephemeral=True, coerce=True, compcoef=0.887,
default=so.DEFAULT_CONSTRUCTOR
)
def add_constraint(
self,
schema: s_schema.Schema,
constraint: Constraint,
replace: bool = False,
) -> s_schema.Schema:
return self.add_classref(
schema,
'constraints',
constraint,
replace=replace,
)
def can_accept_constraints(self, schema: s_schema.Schema) -> bool:
return True
class ConsistencySubjectCommandContext:
# context mixin
pass
class ConsistencySubjectCommand(
inheriting.InheritingObjectCommand[so.InheritingObjectT],
):
pass
class ConstraintCommandContext(sd.ObjectCommandContext[Constraint],
s_anno.AnnotationSubjectCommandContext):
pass
class ConstraintCommand(
referencing.ReferencedInheritingObjectCommand[Constraint],
s_func.CallableCommand[Constraint],
context_class=ConstraintCommandContext,
referrer_context_class=ConsistencySubjectCommandContext,
):
@classmethod
def _validate_subcommands(
cls,
astnode: qlast.DDLOperation,
) -> None:
# check that 'subject' and 'subjectexpr' are not set as annotations
for command in astnode.commands:
if isinstance(command, qlast.SetField):
cname = command.name
if cname in {'subject', 'subjectexpr'}:
raise errors.InvalidConstraintDefinitionError(
f'{cname} is not a valid constraint annotation',
context=command.context)
@classmethod
def _classname_quals_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.NamedDDL,
base_name: sn.Name,
referrer_name: sn.QualName,
context: sd.CommandContext,
) -> Tuple[str, ...]:
if isinstance(astnode, qlast.CreateConstraint):
return ()
exprs = []
args = cls._constraint_args_from_ast(schema, astnode, context)
for arg in args:
exprs.append(arg.text)
assert isinstance(astnode, qlast.ConcreteConstraintOp)
if astnode.subjectexpr:
# use the normalized text directly from the expression
expr = s_expr.Expression.from_ast(
astnode.subjectexpr, schema, context.modaliases)
exprs.append(expr.text)
return (cls._name_qual_from_exprs(schema, exprs),)
@classmethod
def _classname_quals_from_name(
cls,
name: sn.QualName
) -> Tuple[str, ...]:
quals = sn.quals_from_fullname(name)
return (quals[-1],)
@classmethod
def _constraint_args_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.NamedDDL,
context: sd.CommandContext,
) -> List[s_expr.Expression]:
args = []
assert isinstance(astnode, qlast.ConcreteConstraintOp)
if astnode.args:
for arg in astnode.args:
arg_expr = s_expr.Expression.from_ast(
arg, schema, context.modaliases)
args.append(arg_expr)
return args
@classmethod
def as_inherited_ref_ast(
cls,
schema: s_schema.Schema,
context: sd.CommandContext,
name: sn.Name,
parent: so.Object,
) -> qlast.ObjectDDL:
assert isinstance(parent, Constraint)
astnode_cls = cls.referenced_astnode # type: ignore
nref = cls.get_inherited_ref_name(schema, context, parent, name)
args = []
parent_args = parent.get_args(schema)
if parent_args:
parent_args = parent.get_args(schema)
assert parent_args is not None
for arg_expr in parent_args:
arg = edgeql.parse_fragment(arg_expr.text)
args.append(arg)
subj_expr = parent.get_subjectexpr(schema)
if (
subj_expr is None
# Don't include subjectexpr if it was inherited from an
# abstract constraint.
or parent.get_nearest_generic_parent(
schema).get_subjectexpr(schema) is not None
):
subj_expr_ql = None
else:
subj_expr_ql = edgeql.parse_fragment(subj_expr.text)
astnode = astnode_cls(name=nref, args=args, subjectexpr=subj_expr_ql)
return cast(qlast.ObjectDDL, astnode)
def compile_expr_field(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
field: so.Field[Any],
value: s_expr.Expression,
track_schema_ref_exprs: bool=False,
) -> s_expr.Expression:
base: Optional[so.Object] = None
if isinstance(self, AlterConstraint):
base = self.scls.get_subject(schema)
else:
referrer_ctx = self.get_referrer_context(context)
if referrer_ctx:
base = referrer_ctx.op.scls
if base is not None:
# Concrete constraint
if field.name == 'expr':
# Concrete constraints cannot redefine the base check
# expressions, and so the only way we should get here
# is through field inheritance, so check that the
# value is compiled and move on.
if not value.is_compiled():
mcls = self.get_schema_metaclass()
dn = mcls.get_schema_class_displayname()
raise errors.InternalServerError(
f'uncompiled expression in the {field.name!r} field of'
f' {dn} {self.classname!r}'
)
return value
elif field.name in {'subjectexpr', 'finalexpr'}:
return s_expr.Expression.compiled(
value,
schema=schema,
options=qlcompiler.CompilerOptions(
modaliases=context.modaliases,
anchors={qlast.Subject().name: base},
path_prefix_anchor=qlast.Subject().name,
allow_generic_type_output=True,
schema_object_context=self.get_schema_metaclass(),
apply_query_rewrites=not context.stdmode,
track_schema_ref_exprs=track_schema_ref_exprs,
),
)
else:
return super().compile_expr_field(
schema, context, field, value)
elif field.name in ('expr', 'subjectexpr'):
# Abstract constraint.
params = self._get_params(schema, context)
param_anchors = s_func.get_params_symtable(
params,
schema,
inlined_defaults=False,
)
return s_expr.Expression.compiled(
value,
schema=schema,
options=qlcompiler.CompilerOptions(
modaliases=context.modaliases,
anchors=param_anchors,
func_params=params,
allow_generic_type_output=True,
schema_object_context=self.get_schema_metaclass(),
apply_query_rewrites=not context.stdmode,
track_schema_ref_exprs=track_schema_ref_exprs,
),
)
else:
return super().compile_expr_field(
schema, context, field, value, track_schema_ref_exprs)
def get_dummy_expr_field_value(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
field: so.Field[Any],
value: Any,
) -> Optional[s_expr.Expression]:
if field.name in {'expr', 'subjectexpr', 'finalexpr'}:
return s_expr.Expression(text='SELECT false')
else:
raise NotImplementedError(f'unhandled field {field.name!r}')
@classmethod
def get_inherited_ref_name(
cls,
schema: s_schema.Schema,
context: sd.CommandContext,
parent: so.Object,
name: sn.Name,
) -> qlast.ObjectRef:
bn = sn.shortname_from_fullname(name)
return utils.name_to_ast_ref(bn)
def get_ref_implicit_base_delta(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
refcls: Constraint,
implicit_bases: List[Constraint],
) -> inheriting.BaseDelta_T[Constraint]:
child_bases = refcls.get_bases(schema).objects(schema)
return inheriting.delta_bases(
[b.get_name(schema) for b in child_bases],
[b.get_name(schema) for b in implicit_bases],
t=Constraint,
)
def get_ast_attr_for_field(
self,
field: str,
astnode: Type[qlast.DDLOperation],
) -> Optional[str]:
if field in ('subjectexpr', 'args'):
return field
elif (
field == 'delegated'
and astnode is qlast.CreateConcreteConstraint
):
return field
else:
return super().get_ast_attr_for_field(field, astnode)
def get_ddl_identity_fields(
self,
context: sd.CommandContext,
) -> Tuple[so.Field[Any], ...]:
id_fields = super().get_ddl_identity_fields(context)
omit_fields = set()
if not self.has_ddl_identity('subjectexpr'):
omit_fields.add('subjectexpr')
if self.get_referrer_context(context) is None:
omit_fields.add('args')
if omit_fields:
return tuple(f for f in id_fields if f.name not in omit_fields)
else:
return id_fields
@classmethod
def localnames_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.DDLOperation,
context: sd.CommandContext,
) -> Set[str]:
localnames = super().localnames_from_ast(
schema, astnode, context
)
# Set up the constraint parameters as part of names to be
# ignored in expression normalization.
if isinstance(astnode, qlast.CreateConstraint):
localnames |= {param.name for param in astnode.params}
elif isinstance(astnode, qlast.AlterConstraint):
# ALTER ABSTRACT CONSTRAINT doesn't repeat the params,
# but we can get them from the schema.
objref = astnode.name
# Merge the context modaliases and the command modaliases.
modaliases = dict(context.modaliases)
modaliases.update(
cls._modaliases_from_ast(schema, astnode, context))
# Get the original constraint.
constr = schema.get(
utils.ast_ref_to_name(objref),
module_aliases=modaliases,
type=Constraint,
)
localnames |= {param.get_parameter_name(schema) for param in
constr.get_params(schema).objects(schema)}
return localnames
def _populate_concrete_constraint_attrs(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
subject_obj: Optional[so.Object],
*,
name: sn.QualName,
subjectexpr: Optional[s_expr.Expression] = None,
subjectexpr_inherited: bool = False,
sourcectx: Optional[c_parsing.ParserContext] = None,
args: Any = None,
**kwargs: Any
) -> None:
from edb.ir import ast as ir_ast
from edb.ir import utils as ir_utils
from . import pointers as s_pointers
from . import links as s_links
bases = self.get_resolved_attribute_value(
'bases', schema=schema, context=context,
)
if not bases:
bases = self.scls.get_bases(schema)
constr_base = bases.objects(schema)[0]
# If we have a concrete base, then we should inherit all of
# these attrs through the normal inherit_fields() mechanisms,
# and populating them ourselves will just mess up
# inherited_fields.
if not constr_base.generic(schema):
return
orig_subjectexpr = subjectexpr
orig_subject = subject_obj
base_subjectexpr = constr_base.get_field_value(schema, 'subjectexpr')
if subjectexpr is None:
subjectexpr = base_subjectexpr
elif (base_subjectexpr is not None
and subjectexpr.text != base_subjectexpr.text):
raise errors.InvalidConstraintDefinitionError(
f'subjectexpr is already defined for {name}'
)
if (isinstance(subject_obj, s_scalars.ScalarType)
and constr_base.get_is_aggregate(schema)):
raise errors.InvalidConstraintDefinitionError(
f'{constr_base.get_verbosename(schema)} may not '
f'be used on scalar types'
)
if subjectexpr is not None:
subject_ql = subjectexpr.qlast
subject = subject_ql
else:
subject = subject_obj
expr: s_expr.Expression = constr_base.get_field_value(schema, 'expr')
if not expr:
raise errors.InvalidConstraintDefinitionError(
f'missing constraint expression in {name}')
# Re-parse instead of using expr.qlast, because we mutate
# the AST below.
expr_ql = qlparser.parse(expr.text)
if not args:
args = constr_base.get_field_value(schema, 'args')
attrs = dict(kwargs)
inherited = dict()
if orig_subjectexpr is not None:
attrs['subjectexpr'] = orig_subjectexpr
inherited['subjectexpr'] = subjectexpr_inherited
else:
base_subjectexpr = constr_base.get_subjectexpr(schema)
if base_subjectexpr is not None:
attrs['subjectexpr'] = base_subjectexpr
inherited['subjectexpr'] = True
errmessage = attrs.get('errmessage')
if not errmessage:
errmessage = constr_base.get_errmessage(schema)
inherited['errmessage'] = True
attrs['errmessage'] = errmessage
if subject is not orig_subject:
# subject has been redefined
assert isinstance(subject, qlast.Base)
qlutils.inline_anchors(
expr_ql, anchors={qlast.Subject().name: subject})
subject = orig_subject
if args:
args_ql: List[qlast.Base] = [
qlast.Path(steps=[qlast.Subject()]),
]
args_ql.extend(arg.qlast for arg in args)
args_map = qlutils.index_parameters(
args_ql,
parameters=constr_base.get_params(schema),
schema=schema,
)
qlutils.inline_parameters(expr_ql, args_map)
attrs['args'] = args
assert subject is not None
final_expr = s_expr.Expression.compiled(
s_expr.Expression.from_ast(expr_ql, schema, {}),
schema=schema,
options=qlcompiler.CompilerOptions(
anchors={qlast.Subject().name: subject},
path_prefix_anchor=qlast.Subject().name,
apply_query_rewrites=not context.stdmode,
),
)
bool_t = schema.get('std::bool', type=s_scalars.ScalarType)
assert isinstance(final_expr.irast, ir_ast.Statement)
expr_type = final_expr.irast.stype
if not expr_type.issubclass(schema, bool_t):
raise errors.InvalidConstraintDefinitionError(
f'{name} constraint expression expected '
f'to return a bool value, got '
f'{expr_type.get_verbosename(schema)}',
context=sourcectx
)
if subjectexpr is not None:
assert isinstance(subject_obj, (s_types.Type, s_pointers.Pointer))
singletons = frozenset({subject_obj})
final_subjectexpr = s_expr.Expression.compiled(
subjectexpr,
schema=schema,
options=qlcompiler.CompilerOptions(
anchors={qlast.Subject().name: subject},
path_prefix_anchor=qlast.Subject().name,
singletons=singletons,
apply_query_rewrites=not context.stdmode,
),
)
assert isinstance(final_subjectexpr.irast, ir_ast.Statement)
refs = ir_utils.get_longest_paths(final_expr.irast)
has_multi = False
for ref in refs:
while ref.rptr:
rptr = ref.rptr
if rptr.dir_cardinality.is_multi():
has_multi = True
# We don't need to look further than the subject,
# which is always valid. (And which is a singleton
# in a constraint expression if it is itself a
# singleton, regardless of other parts of the path.)
if (
isinstance(rptr.ptrref, ir_ast.PointerRef)
and rptr.ptrref.id == subject_obj.id
):
break
if (not isinstance(rptr.ptrref,
ir_ast.TupleIndirectionPointerRef)
and rptr.ptrref.source_ptr is None
and rptr.source.rptr is not None):
if isinstance(subject, s_links.Link):
raise errors.InvalidConstraintDefinitionError(
"link constraints may not access "
"the link target",
context=sourcectx
)
else:
raise errors.InvalidConstraintDefinitionError(
"constraints cannot contain paths with more "
"than one hop",
context=sourcectx
)
ref = rptr.source
if has_multi and len(refs) > 1:
raise errors.InvalidConstraintDefinitionError(
"cannot reference multiple links or properties in a "
"constraint where at least one link or property is MULTI",
context=sourcectx
)
if has_multi and ir_utils.contains_set_of_op(
final_subjectexpr.irast):
raise errors.InvalidConstraintDefinitionError(
"cannot use aggregate functions or operators "
"in a non-aggregating constraint",
context=sourcectx
)
attrs['finalexpr'] = final_expr
attrs['params'] = constr_base.get_params(schema)
inherited['params'] = True
attrs['abstract'] = False
for k, v in attrs.items():
self.set_attribute_value(k, v, inherited=bool(inherited.get(k)))
class CreateConstraint(
ConstraintCommand,
s_func.CreateCallableObject[Constraint],
referencing.CreateReferencedInheritingObject[Constraint],
):
astnode = [qlast.CreateConcreteConstraint, qlast.CreateConstraint]
referenced_astnode = qlast.CreateConcreteConstraint
@classmethod
def _get_param_desc_from_ast(
cls,
schema: s_schema.Schema,
modaliases: Mapping[Optional[str], str],
astnode: qlast.ObjectDDL,
*,
param_offset: int=0
) -> List[s_func.ParameterDesc]:
if not isinstance(astnode, qlast.CallableObjectCommand):
# Concrete constraint.
return []
params = super()._get_param_desc_from_ast(
schema, modaliases, astnode, param_offset=param_offset + 1)
params.insert(0, s_func.ParameterDesc(
num=param_offset,
name=sn.UnqualName('__subject__'),
default=None,
type=s_pseudo.PseudoTypeShell(name=sn.UnqualName('anytype')),
typemod=ft.TypeModifier.SingletonType,
kind=ft.ParameterKind.PositionalParam,
))
return params
def validate_create(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> None:
super().validate_create(schema, context)
if self.get_referrer_context(context) is not None:
# The checks below apply only to abstract constraints.
return
base_params: Optional[s_func.FuncParameterList] = None
base_with_params: Optional[Constraint] = None
bases = self.get_resolved_attribute_value(
'bases',
schema=schema,
context=context,
)
for base in bases.objects(schema):
params = base.get_params(schema)
if params and len(params) > 1:
# All constraints have __subject__ parameter
# auto-injected, hence the "> 1" check.
if base_params is not None:
raise errors.InvalidConstraintDefinitionError(
f'{self.get_verbosename()} '
f'extends multiple constraints '
f'with parameters',
context=self.source_context,
)
base_params = params
base_with_params = base
if base_params:
assert base_with_params is not None
params = self._get_params(schema, context)
if not params or len(params) == 1:
# All constraints have __subject__ parameter
# auto-injected, hence the "== 1" check.
raise errors.InvalidConstraintDefinitionError(
f'{self.get_verbosename()} '
f'must define parameters to reflect parameters of '
f'the {base_with_params.get_verbosename(schema)} '
f'it extends',
context=self.source_context,
)
if len(params) < len(base_params):
raise errors.InvalidConstraintDefinitionError(
f'{self.get_verbosename()} '
f'has fewer parameters than the '
f'{base_with_params.get_verbosename(schema)} '
f'it extends',
context=self.source_context,
)
# Skipping the __subject__ param
for base_param, param in zip(base_params.objects(schema)[1:],
params.objects(schema)[1:]):
param_name = param.get_parameter_name(schema)
base_param_name = base_param.get_parameter_name(schema)
if param_name != base_param_name:
raise errors.InvalidConstraintDefinitionError(
f'the {param_name!r} parameter of the '
f'{self.get_verbosename()} '
f'must be renamed to {base_param_name!r} '
f'to match the signature of the base '
f'{base_with_params.get_verbosename(schema)} ',
context=self.source_context,
)
param_type = param.get_type(schema)
base_param_type = base_param.get_type(schema)
if (
not base_param_type.is_polymorphic(schema)
and param_type.is_polymorphic(schema)
):
raise errors.InvalidConstraintDefinitionError(
f'the {param_name!r} parameter of the '
f'{self.get_verbosename()} cannot '
f'be of generic type because the corresponding '
f'parameter of the '
f'{base_with_params.get_verbosename(schema)} '
f'it extends has a concrete type',
context=self.source_context,
)
if (
not base_param_type.is_polymorphic(schema) and
not param_type.is_polymorphic(schema) and
not param_type.implicitly_castable_to(
base_param_type, schema)
):
raise errors.InvalidConstraintDefinitionError(
f'the {param_name!r} parameter of the '
f'{self.get_verbosename()} has type of '
f'{param_type.get_displayname(schema)} that '
f'is not implicitly castable to the '
f'corresponding parameter of the '
f'{base_with_params.get_verbosename(schema)} with '
f'type {base_param_type.get_displayname(schema)}',
context=self.source_context,
)
def _create_begin(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
referrer_ctx = self.get_referrer_context(context)
if referrer_ctx is None:
schema = super()._create_begin(schema, context)
return schema
subject = referrer_ctx.scls
assert isinstance(subject, ConsistencySubject)
if not subject.can_accept_constraints(schema):
raise errors.UnsupportedFeatureError(
f'constraints cannot be defined on '
f'{subject.get_verbosename(schema)}',
context=self.source_context,
)
if not context.canonical:
props = self.get_attributes(schema, context)
props.pop('name')
props.pop('subject', None)
fullname = self.classname
shortname = sn.shortname_from_fullname(fullname)
assert isinstance(shortname, sn.QualName), \
"expected qualified name"
self._populate_concrete_constraint_attrs(
schema,
context,
subject_obj=subject,
name=shortname,
subjectexpr_inherited=self.is_attribute_inherited(
'subjectexpr'),
sourcectx=self.source_context,
**props,
)
self.set_attribute_value('subject', subject)
return super()._create_begin(schema, context)
@classmethod
def as_inherited_ref_cmd(
cls,
*,
schema: s_schema.Schema,
context: sd.CommandContext,
astnode: qlast.ObjectDDL,
bases: List[Constraint],
referrer: so.Object,
) -> sd.ObjectCommand[Constraint]:
cmd = super().as_inherited_ref_cmd(
schema=schema,
context=context,
astnode=astnode,
bases=bases,
referrer=referrer,
)
args = cls._constraint_args_from_ast(schema, astnode, context)
if args:
cmd.set_attribute_value('args', args)
subj_expr = bases[0].get_subjectexpr(schema)
if subj_expr is not None:
cmd.set_attribute_value('subjectexpr', subj_expr, inherited=True)
params = bases[0].get_params(schema)
if params is not None:
cmd.set_attribute_value('params', params, inherited=True)
return cmd
@classmethod
def _cmd_tree_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.DDLOperation,
context: sd.CommandContext,
) -> CreateConstraint:
cmd = super()._cmd_tree_from_ast(schema, astnode, context)
if isinstance(astnode, qlast.CreateConcreteConstraint):
if astnode.delegated:
cmd.set_attribute_value('delegated', astnode.delegated)
args = cls._constraint_args_from_ast(schema, astnode, context)
if args:
cmd.set_attribute_value('args', args)
elif isinstance(astnode, qlast.CreateConstraint):
params = cls._get_param_desc_from_ast(
schema, context.modaliases, astnode)
for param in params:
if param.get_kind(schema) is ft.ParameterKind.NamedOnlyParam:
raise errors.InvalidConstraintDefinitionError(
'named only parameters are not allowed '
'in this context',
context=astnode.context)
if param.get_default(schema) is not None:
raise errors.InvalidConstraintDefinitionError(
'constraints do not support parameters '
'with defaults',
context=astnode.context)
if cmd.get_attribute_value('return_type') is None:
cmd.set_attribute_value(
'return_type',
schema.get('std::bool'),
)
if cmd.get_attribute_value('return_typemod') is None:
cmd.set_attribute_value(
'return_typemod',
ft.TypeModifier.SingletonType,
)
assert isinstance(astnode, (qlast.CreateConstraint,
qlast.CreateConcreteConstraint))
# 'subjectexpr' can be present in either astnode type
if astnode.subjectexpr:
orig_text = cls.get_orig_expr_text(schema, astnode, 'subjectexpr')
if (
orig_text is not None
and context.compat_ver_is_before(
(1, 0, verutils.VersionStage.ALPHA, 6)
)
):
# Versions prior to a6 used a different expression
# normalization strategy, so we must renormalize the
# expression.
expr_ql = qlcompiler.renormalize_compat(
astnode.subjectexpr,
orig_text,
schema=schema,
localnames=context.localnames,
)
else:
expr_ql = astnode.subjectexpr
subjectexpr = s_expr.Expression.from_ast(
expr_ql,
schema,
context.modaliases,
context.localnames,
)
cmd.set_attribute_value(
'subjectexpr',
subjectexpr,
)
cls._validate_subcommands(astnode)
assert isinstance(cmd, CreateConstraint)
return cmd
def _skip_param(self, props: Dict[str, Any]) -> bool:
pname = s_func.Parameter.paramname_from_fullname(props['name'])
return pname == '__subject__'
def _get_params_ast(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
node: qlast.DDLOperation,
) -> List[Tuple[int, qlast.FuncParam]]:
if isinstance(node, qlast.CreateConstraint):
return super()._get_params_ast(schema, context, node)
else:
return []
def _apply_field_ast(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
node: qlast.DDLOperation,
op: sd.AlterObjectProperty,
) -> None:
if (
op.property == 'args'
and isinstance(node, (qlast.CreateConcreteConstraint,
qlast.AlterConcreteConstraint))
):
assert isinstance(op.new_value, s_expr.ExpressionList)
args = []
for arg in op.new_value:
exprast = arg.qlast
assert isinstance(exprast, qlast.Expr), "expected qlast.Expr"
args.append(exprast)
node.args = args
return
super()._apply_field_ast(schema, context, node, op)
@classmethod
def _classbases_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.ObjectDDL,
context: sd.CommandContext,
) -> List[so.ObjectShell[Constraint]]:
if isinstance(astnode, qlast.CreateConcreteConstraint):
classname = cls._classname_from_ast(schema, astnode, context)
base_name = sn.shortname_from_fullname(classname)
assert isinstance(base_name, sn.QualName), \
"expected qualified name"
base = utils.ast_objref_to_object_shell(
qlast.ObjectRef(
module=base_name.module,
name=base_name.name,
),
metaclass=Constraint,
schema=schema,
modaliases=context.modaliases,
)
return [base]
else:
return super()._classbases_from_ast(schema, astnode, context)
class RenameConstraint(
ConstraintCommand,
s_func.RenameCallableObject[Constraint],
referencing.RenameReferencedInheritingObject[Constraint],
):
@classmethod
def _classname_quals_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.NamedDDL,
base_name: sn.Name,
referrer_name: sn.QualName,
context: sd.CommandContext,
) -> Tuple[str, ...]:
parent_op = cls.get_parent_op(context)
assert isinstance(parent_op.classname, sn.QualName)
return cls._classname_quals_from_name(parent_op.classname)
def _alter_begin(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = super()._alter_begin(schema, context)
if not context.canonical and self.scls.get_abstract(schema):
self._propagate_ref_rename(schema, context, self.scls)
return schema
class AlterConstraintOwned(
referencing.AlterOwned[Constraint],
ConstraintCommand,
field='owned',
referrer_context_class=ConsistencySubjectCommandContext,
):
pass
class AlterConstraint(
ConstraintCommand,
referencing.AlterReferencedInheritingObject[Constraint],
):
astnode = [qlast.AlterConcreteConstraint, qlast.AlterConstraint]
referenced_astnode = qlast.AlterConcreteConstraint
def _alter_begin(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
referrer_ctx = self.get_referrer_context(context)
if referrer_ctx is None:
schema = super()._alter_begin(schema, context)
return schema
subject = referrer_ctx.scls
assert isinstance(subject, ConsistencySubject)
if not context.canonical:
props = self.get_attributes(schema, context)
props.pop('name', None)
props.pop('subject', None)
props.pop('expr', None)
args = props.pop('args', None)
if not args:
args = self.scls.get_args(schema)
subjectexpr = props.pop('subjectexpr', None)
subjectexpr_inherited = self.is_attribute_inherited('subjectexpr')
if not subjectexpr:
subjectexpr_inherited = self.scls.field_is_inherited(
schema, 'subjectexpr')
subjectexpr = self.scls.get_subjectexpr(schema)
fullname = self.classname
shortname = sn.shortname_from_fullname(fullname)
assert isinstance(shortname, sn.QualName), \
"expected qualified name"
self._populate_concrete_constraint_attrs(
schema,
context,
subject_obj=subject,
name=shortname,
subjectexpr=subjectexpr,
subjectexpr_inherited=subjectexpr_inherited,
args=args,
sourcectx=self.source_context,
**props,
)
return super()._alter_begin(schema, context)
@classmethod
def _cmd_tree_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.DDLOperation,
context: sd.CommandContext,
) -> AlterConstraint:
cmd = super()._cmd_tree_from_ast(schema, astnode, context)
assert isinstance(cmd, AlterConstraint)
if isinstance(astnode, (qlast.CreateConcreteConstraint,
qlast.AlterConcreteConstraint)):
if getattr(astnode, 'delegated', False):
assert isinstance(astnode, qlast.CreateConcreteConstraint)
cmd.set_attribute_value('delegated', astnode.delegated)
new_name = None
for op in cmd.get_subcommands(type=RenameConstraint):
new_name = op.new_name
if new_name is not None:
cmd.set_attribute_value('name', new_name)
cls._validate_subcommands(astnode)
return cmd
def _get_ast(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
*,
parent_node: Optional[qlast.DDLOperation] = None,
) -> Optional[qlast.DDLOperation]:
if self.scls.get_abstract(schema):
return super()._get_ast(schema, context, parent_node=parent_node)
# We need to make sure to include subjectexpr and args
# in the AST, since they are really part of the name.
op = self.as_inherited_ref_ast(
schema, context, self.scls.get_name(schema),
self.scls,
)
self._apply_fields_ast(schema, context, op)
if (op is not None and hasattr(op, 'commands') and
not op.commands):
return None
return op
def validate_alter(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> None:
super().validate_alter(schema, context)
self_delegated = self.get_attribute_value('delegated')
if not self_delegated:
return
concrete_bases = [
b for b in self.scls.get_bases(schema).objects(schema)
if not b.generic(schema) and not b.get_delegated(schema)
]
if concrete_bases:
tgt_repr = self.scls.get_verbosename(schema, with_parent=True)
bases_repr = ', '.join(
b.get_subject(schema).get_verbosename(schema, with_parent=True)
for b in concrete_bases
)
raise errors.InvalidConstraintDefinitionError(
f'cannot redefine {tgt_repr} as delegated:'
f' it is defined as non-delegated in {bases_repr}',
context=self.source_context,
)
def canonicalize_alter_from_external_ref(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> None:
if (
not self.get_attribute_value('abstract')
and (subjectexpr :=
self.get_attribute_value('subjectexpr')) is not None
):
# To compute the new name, we construct an AST of the
# constraint, since that is the infrastructure we have for
# computing the classname.
name = sn.shortname_from_fullname(self.classname)
assert isinstance(name, sn.QualName), "expected qualified name"
ast = qlast.CreateConcreteConstraint(
name=qlast.ObjectRef(name=name.name, module=name.module),
subjectexpr=subjectexpr.qlast,
args=[],
)
quals = sn.quals_from_fullname(self.classname)
new_name = self._classname_from_ast_and_referrer(
schema, sn.QualName.from_string(quals[0]), ast, context)
if new_name == self.classname:
return
rename = self.scls.init_delta_command(
schema, sd.RenameObject, new_name=new_name)
rename.set_attribute_value(
'name', value=new_name, orig_value=self.classname)
self.add(rename)
def _get_params(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_func.FuncParameterList:
return self.scls.get_params(schema)
class DeleteConstraint(
ConstraintCommand,
referencing.DeleteReferencedInheritingObject[Constraint],
s_func.DeleteCallableObject[Constraint],
):
astnode = [qlast.DropConcreteConstraint, qlast.DropConstraint]
referenced_astnode = qlast.DropConcreteConstraint
def _apply_field_ast(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
node: qlast.DDLOperation,
op: sd.AlterObjectProperty,
) -> None:
if op.property == 'args':
assert isinstance(op.old_value, s_expr.ExpressionList)
assert isinstance(node, qlast.DropConcreteConstraint)
node.args = [arg.qlast for arg in op.old_value]
return
super()._apply_field_ast(schema, context, node, op)
class RebaseConstraint(
ConstraintCommand,
referencing.RebaseReferencedInheritingObject[Constraint],
):
def _get_bases_for_ast(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
bases: Tuple[so.ObjectShell[Constraint], ...],
) -> Tuple[so.ObjectShell[Constraint], ...]:
return ()
| |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Unified APIs' python bindings."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import timeit
from absl.testing import parameterized
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework.experimental import _unified_api
from tensorflow.python.framework.experimental import context_stack as context_lib
from tensorflow.python.framework.experimental import def_function
from tensorflow.python.framework.experimental import math_ops as unified_math_ops
from tensorflow.python.framework.experimental import nn_ops as unified_nn_ops
from tensorflow.python.framework.experimental import tape as tape_lib
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_grad # pylint: disable=unused-import
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
SetTracingImplementation = _unified_api.SetTracingImplementation
TensorCastHelper = _unified_api.EagerTensorToImmediateExecutionTensorHandle
def get_immediate_execution_context():
context.context().ensure_initialized()
return _unified_api.EagerContextToImmediateExecutionContext(
context.context()._handle)
def maybe_cast(t, perform_cast):
if perform_cast:
return TensorCastHelper(t)
return t
class UnifiedApiTest(test.TestCase, parameterized.TestCase):
@parameterized.named_parameters([
("Graph", False),
("Mlir", True),
])
def testAdd(self, use_mlir):
if use_mlir:
SetTracingImplementation("mlir")
def model(a, b):
return unified_math_ops.add(a, b)
with context_lib.set_default(get_immediate_execution_context()):
a = TensorCastHelper(constant_op.constant([1., 2.]))
b = TensorCastHelper(constant_op.constant([3., 4.]))
func_output = def_function.function(model)(a, b)
self.assertAllEqual(func_output.numpy(), [4., 6.])
eager_output = model(a, b)
self.assertAllEqual(eager_output.numpy(), [4., 6.])
@parameterized.named_parameters([
("Graph", False),
("Mlir", True),
])
def testAddGrad(self, use_mlir):
if use_mlir:
SetTracingImplementation("mlir")
def model(a, b):
with tape_lib.GradientTape() as tape:
tape.watch(a)
tape.watch(b)
result = unified_math_ops.add(a, b)
grads = tape.gradient(result, [a, b])
return grads
with context_lib.set_default(get_immediate_execution_context()):
a = TensorCastHelper(constant_op.constant([1., 2.]))
b = TensorCastHelper(constant_op.constant([3., 4.]))
func_outputs = def_function.function(model)(a, b)
self.assertAllEqual(func_outputs[0].numpy(), [1.0, 1.0])
self.assertAllEqual(func_outputs[1].numpy(), [1.0, 1.0])
eager_outputs = model(a, b)
self.assertAllEqual(eager_outputs[0].numpy(), [1.0, 1.0])
self.assertAllEqual(eager_outputs[1].numpy(), [1.0, 1.0])
@parameterized.named_parameters([
("Graph", False),
("Mlir", True),
])
def testRelu(self, use_mlir):
if use_mlir:
SetTracingImplementation("mlir")
def model(t):
return unified_nn_ops.relu(t)
with context_lib.set_default(get_immediate_execution_context()):
positive = TensorCastHelper(constant_op.constant([1.]))
negative = TensorCastHelper(constant_op.constant([-1.]))
model_fn = def_function.function(model)
func_output = model_fn(positive)
self.assertAllEqual(func_output.numpy(), [1.])
func_output = model_fn(negative)
self.assertAllEqual(func_output.numpy(), [0.])
eager_output = model(positive)
self.assertAllEqual(eager_output.numpy(), [1.])
eager_output = model(negative)
self.assertAllEqual(eager_output.numpy(), [0.])
@parameterized.named_parameters([
("Graph", False),
("Mlir", True),
])
def testReluGrad(self, use_mlir):
if use_mlir:
SetTracingImplementation("mlir")
def model(t):
with tape_lib.GradientTape() as tape:
tape.watch(t)
result = unified_nn_ops.relu(t)
grads = tape.gradient(result, t)
return grads
with context_lib.set_default(get_immediate_execution_context()):
positive = TensorCastHelper(constant_op.constant([1.]))
negative = TensorCastHelper(constant_op.constant([-1.]))
model_fn = def_function.function(model)
func_output = model_fn(positive)
self.assertAllEqual(func_output.numpy(), [1.])
func_output = model_fn(negative)
self.assertAllEqual(func_output.numpy(), [0.])
eager_output = model(positive)
self.assertAllEqual(eager_output.numpy(), [1.])
eager_output = model(negative)
self.assertAllEqual(eager_output.numpy(), [0.])
@parameterized.named_parameters([
("Graph", False),
("Mlir", True),
])
def testNeg(self, use_mlir):
if use_mlir:
SetTracingImplementation("mlir")
def model(a):
return unified_math_ops.neg(a)
with context_lib.set_default(get_immediate_execution_context()):
a = TensorCastHelper(constant_op.constant([2.]))
func_output = def_function.function(model)(a)
self.assertAllEqual(func_output.numpy(), [-2.])
eager_output = model(a)
self.assertAllEqual(eager_output.numpy(), [-2.])
@parameterized.named_parameters([
("Graph", False),
("Mlir", True),
])
def testNegGrad(self, use_mlir):
if use_mlir:
SetTracingImplementation("mlir")
def model(a):
with tape_lib.GradientTape() as tape:
tape.watch(a)
result = unified_math_ops.neg(a)
grads = tape.gradient(result, a)
return grads
with context_lib.set_default(get_immediate_execution_context()):
a = TensorCastHelper(constant_op.constant([2.]))
func_outputs = def_function.function(model)(a)
self.assertAllEqual(func_outputs.numpy(), [-1.0])
eager_outputs = model(a)
self.assertAllEqual(eager_outputs.numpy(), [-1.0])
@parameterized.named_parameters([
("Graph", False),
("Mlir", True),
])
def testSub(self, use_mlir):
if use_mlir:
SetTracingImplementation("mlir")
def model(a, b):
return unified_math_ops.sub(a, b)
with context_lib.set_default(get_immediate_execution_context()):
a = TensorCastHelper(constant_op.constant([1., 2.]))
b = TensorCastHelper(constant_op.constant([3., 4.]))
func_output = def_function.function(model)(a, b)
self.assertAllEqual(func_output.numpy(), [-2., -2.])
eager_output = model(a, b)
self.assertAllEqual(eager_output.numpy(), [-2., -2.])
@parameterized.named_parameters([
("Graph", False),
("Mlir", True),
])
def testSubGrad(self, use_mlir):
if use_mlir:
SetTracingImplementation("mlir")
def model(a, b):
with tape_lib.GradientTape() as tape:
tape.watch(a)
tape.watch(b)
result = unified_math_ops.sub(a, b)
grads = tape.gradient(result, [a, b])
return grads
with context_lib.set_default(get_immediate_execution_context()):
a = TensorCastHelper(constant_op.constant([1., 2.]))
b = TensorCastHelper(constant_op.constant([3., 4.]))
func_outputs = def_function.function(model)(a, b)
self.assertAllEqual(func_outputs[0].numpy(), [1.0, 1.0])
self.assertAllEqual(func_outputs[1].numpy(), [-1.0, -1.0])
eager_outputs = model(a, b)
self.assertAllEqual(eager_outputs[0].numpy(), [1.0, 1.0])
self.assertAllEqual(eager_outputs[1].numpy(), [-1.0, -1.0])
@parameterized.named_parameters([
("Graph", False),
("Mlir", True),
])
def testMul(self, use_mlir):
if use_mlir:
SetTracingImplementation("mlir")
def model(a, b):
return unified_math_ops.mul(a, b)
with context_lib.set_default(get_immediate_execution_context()):
a = TensorCastHelper(constant_op.constant([1., 2.]))
b = TensorCastHelper(constant_op.constant([3., 4.]))
func_output = def_function.function(model)(a, b)
self.assertAllEqual(func_output.numpy(), [3., 8.])
eager_output = model(a, b)
self.assertAllEqual(eager_output.numpy(), [3., 8.])
@parameterized.named_parameters([
("Graph", False),
("Mlir", True),
])
def testMulGrad(self, use_mlir):
if use_mlir:
SetTracingImplementation("mlir")
def model(a, b):
with tape_lib.GradientTape() as tape:
tape.watch(a)
tape.watch(b)
result = unified_math_ops.mul(a, b)
grads = tape.gradient(result, [a, b])
return grads
with context_lib.set_default(get_immediate_execution_context()):
a = TensorCastHelper(constant_op.constant([1., 2.]))
b = TensorCastHelper(constant_op.constant([3., 4.]))
func_outputs = def_function.function(model)(a, b)
self.assertAllEqual(func_outputs[0].numpy(), [3., 4.])
self.assertAllEqual(func_outputs[1].numpy(), [1., 2.])
eager_outputs = model(a, b)
self.assertAllEqual(eager_outputs[0].numpy(), [3., 4.])
self.assertAllEqual(eager_outputs[1].numpy(), [1., 2.])
class UnifiedTapeBenchmark(test.Benchmark):
def _computeMnistMlpGrads(self, math_ops_lib, nn_ops_lib, backprop_lib, cast,
num_iters, hidden_layers, hidden_size, batch_size):
batch_size = 1
image_size = 28 * 28
num_classes = 10
def model(x, hidden_weights, softmax_weight, labels):
with backprop_lib.GradientTape() as tape:
for weight in hidden_weights + [softmax_weight]:
tape.watch(weight)
for hidden_weight in hidden_weights:
x = math_ops_lib.mat_mul(x, hidden_weight)
x = nn_ops_lib.relu(x)
logits = math_ops_lib.mat_mul(x, softmax_weight)
loss = nn_ops_lib.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels)
grads = tape.gradient(loss, hidden_weights + [softmax_weight])
return grads
x = maybe_cast(array_ops.ones([batch_size, image_size]), cast)
hidden_weights = []
for i in range(hidden_layers):
hidden_weights.append(
maybe_cast(
random_ops.random_uniform(
[hidden_size if i else image_size, hidden_size]), cast))
softmax_weight = maybe_cast(
random_ops.random_uniform([hidden_size, num_classes]), cast)
labels = maybe_cast(array_ops.zeros([batch_size], dtype=dtypes.int32), cast)
with context_lib.set_default(get_immediate_execution_context()):
# Warm up.
for _ in range(10):
model(x, hidden_weights, softmax_weight, labels)
runtimes = timeit.repeat(
lambda: model(x, hidden_weights, softmax_weight, labels),
repeat=num_iters,
number=10)
return min(runtimes) / 10
def benchmarkTwoHiddenLayerMnistEagerUnified(self):
num_iters = 100
duration = self._computeMnistMlpGrads(
unified_math_ops,
unified_nn_ops,
tape_lib,
True,
num_iters,
hidden_layers=2,
hidden_size=100,
batch_size=1)
self.report_benchmark(
name="TwoHiddenLayerMnistEagerUnified",
iters=num_iters,
wall_time=duration)
def benchmarkTwoHiddenLayerMnistEager(self):
num_iters = 100
duration = self._computeMnistMlpGrads(
math_ops,
nn_ops,
backprop,
False,
num_iters,
hidden_layers=2,
hidden_size=100,
batch_size=1)
self.report_benchmark(
name="TwoHiddenLayerMnistEager", iters=num_iters, wall_time=duration)
def benchmarkTenHiddenLayerMnistEagerUnified(self):
num_iters = 100
duration = self._computeMnistMlpGrads(
unified_math_ops,
unified_nn_ops,
tape_lib,
True,
num_iters,
hidden_layers=10,
hidden_size=100,
batch_size=1)
self.report_benchmark(
name="TenHiddenLayerMnistEagerUnified",
iters=num_iters,
wall_time=duration)
def benchmarkTenHiddenLayerMnistEager(self):
num_iters = 100
duration = self._computeMnistMlpGrads(
math_ops,
nn_ops,
backprop,
False,
num_iters,
hidden_layers=10,
hidden_size=100,
batch_size=1)
self.report_benchmark(
name="TenHiddenLayerMnistEager", iters=num_iters, wall_time=duration)
if __name__ == "__main__":
ops.enable_eager_execution()
test.main()
| |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The PlanBcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test fee estimation code."""
from test_framework.test_framework import PlanbcoinTestFramework
from test_framework.util import *
from test_framework.script import CScript, OP_1, OP_DROP, OP_2, OP_HASH160, OP_EQUAL, hash160, OP_TRUE
from test_framework.mininode import CTransaction, CTxIn, CTxOut, COutPoint, ToHex, COIN
# Construct 2 trivial P2SH's and the ScriptSigs that spend them
# So we can create many transactions without needing to spend
# time signing.
redeem_script_1 = CScript([OP_1, OP_DROP])
redeem_script_2 = CScript([OP_2, OP_DROP])
P2SH_1 = CScript([OP_HASH160, hash160(redeem_script_1), OP_EQUAL])
P2SH_2 = CScript([OP_HASH160, hash160(redeem_script_2), OP_EQUAL])
# Associated ScriptSig's to spend satisfy P2SH_1 and P2SH_2
SCRIPT_SIG = [CScript([OP_TRUE, redeem_script_1]), CScript([OP_TRUE, redeem_script_2])]
global log
def small_txpuzzle_randfee(from_node, conflist, unconflist, amount, min_fee, fee_increment):
"""
Create and send a transaction with a random fee.
The transaction pays to a trivial P2SH script, and assumes that its inputs
are of the same form.
The function takes a list of confirmed outputs and unconfirmed outputs
and attempts to use the confirmed list first for its inputs.
It adds the newly created outputs to the unconfirmed list.
Returns (raw transaction, fee)
"""
# It's best to exponentially distribute our random fees
# because the buckets are exponentially spaced.
# Exponentially distributed from 1-128 * fee_increment
rand_fee = float(fee_increment)*(1.1892**random.randint(0,28))
# Total fee ranges from min_fee to min_fee + 127*fee_increment
fee = min_fee - fee_increment + satoshi_round(rand_fee)
tx = CTransaction()
total_in = Decimal("0.00000000")
while total_in <= (amount + fee) and len(conflist) > 0:
t = conflist.pop(0)
total_in += t["amount"]
tx.vin.append(CTxIn(COutPoint(int(t["txid"], 16), t["vout"]), b""))
if total_in <= amount + fee:
while total_in <= (amount + fee) and len(unconflist) > 0:
t = unconflist.pop(0)
total_in += t["amount"]
tx.vin.append(CTxIn(COutPoint(int(t["txid"], 16), t["vout"]), b""))
if total_in <= amount + fee:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount+fee, total_in))
tx.vout.append(CTxOut(int((total_in - amount - fee)*COIN), P2SH_1))
tx.vout.append(CTxOut(int(amount*COIN), P2SH_2))
# These transactions don't need to be signed, but we still have to insert
# the ScriptSig that will satisfy the ScriptPubKey.
for inp in tx.vin:
inp.scriptSig = SCRIPT_SIG[inp.prevout.n]
txid = from_node.sendrawtransaction(ToHex(tx), True)
unconflist.append({ "txid" : txid, "vout" : 0 , "amount" : total_in - amount - fee})
unconflist.append({ "txid" : txid, "vout" : 1 , "amount" : amount})
return (ToHex(tx), fee)
def split_inputs(from_node, txins, txouts, initial_split = False):
"""
We need to generate a lot of inputs so we can generate a ton of transactions.
This function takes an input from txins, and creates and sends a transaction
which splits the value into 2 outputs which are appended to txouts.
Previously this was designed to be small inputs so they wouldn't have
a high coin age when the notion of priority still existed.
"""
prevtxout = txins.pop()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(int(prevtxout["txid"], 16), prevtxout["vout"]), b""))
half_change = satoshi_round(prevtxout["amount"]/2)
rem_change = prevtxout["amount"] - half_change - Decimal("0.00001000")
tx.vout.append(CTxOut(int(half_change*COIN), P2SH_1))
tx.vout.append(CTxOut(int(rem_change*COIN), P2SH_2))
# If this is the initial split we actually need to sign the transaction
# Otherwise we just need to insert the proper ScriptSig
if (initial_split) :
completetx = from_node.signrawtransaction(ToHex(tx))["hex"]
else :
tx.vin[0].scriptSig = SCRIPT_SIG[prevtxout["vout"]]
completetx = ToHex(tx)
txid = from_node.sendrawtransaction(completetx, True)
txouts.append({ "txid" : txid, "vout" : 0 , "amount" : half_change})
txouts.append({ "txid" : txid, "vout" : 1 , "amount" : rem_change})
def check_estimates(node, fees_seen, max_invalid, print_estimates = True):
"""
This function calls estimatefee and verifies that the estimates
meet certain invariants.
"""
all_estimates = [ node.estimatefee(i) for i in range(1,26) ]
if print_estimates:
log.info([str(all_estimates[e-1]) for e in [1,2,3,6,15,25]])
delta = 1.0e-6 # account for rounding error
last_e = max(fees_seen)
for e in [x for x in all_estimates if x >= 0]:
# Estimates should be within the bounds of what transactions fees actually were:
if float(e)+delta < min(fees_seen) or float(e)-delta > max(fees_seen):
raise AssertionError("Estimated fee (%f) out of range (%f,%f)"
%(float(e), min(fees_seen), max(fees_seen)))
# Estimates should be monotonically decreasing
if float(e)-delta > last_e:
raise AssertionError("Estimated fee (%f) larger than last fee (%f) for lower number of confirms"
%(float(e),float(last_e)))
last_e = e
valid_estimate = False
invalid_estimates = 0
for i,e in enumerate(all_estimates): # estimate is for i+1
if e >= 0:
valid_estimate = True
if i >= 13: # for n>=14 estimatesmartfee(n/2) should be at least as high as estimatefee(n)
assert(node.estimatesmartfee((i+1)//2)["feerate"] > float(e) - delta)
else:
invalid_estimates += 1
# estimatesmartfee should still be valid
approx_estimate = node.estimatesmartfee(i+1)["feerate"]
answer_found = node.estimatesmartfee(i+1)["blocks"]
assert(approx_estimate > 0)
assert(answer_found > i+1)
# Once we're at a high enough confirmation count that we can give an estimate
# We should have estimates for all higher confirmation counts
if valid_estimate:
raise AssertionError("Invalid estimate appears at higher confirm count than valid estimate")
# Check on the expected number of different confirmation counts
# that we might not have valid estimates for
if invalid_estimates > max_invalid:
raise AssertionError("More than (%d) invalid estimates"%(max_invalid))
return all_estimates
class EstimateFeeTest(PlanbcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 3
self.setup_clean_chain = False
def setup_network(self):
"""
We'll setup the network to have 3 nodes that all mine with different parameters.
But first we need to use one node to create a lot of outputs
which we will use to generate our transactions.
"""
self.nodes = []
# Use node0 to mine blocks for input splitting
self.nodes.append(self.start_node(0, self.options.tmpdir, ["-maxorphantx=1000",
"-whitelist=127.0.0.1"]))
self.log.info("This test is time consuming, please be patient")
self.log.info("Splitting inputs so we can generate tx's")
self.txouts = []
self.txouts2 = []
# Split a coinbase into two transaction puzzle outputs
split_inputs(self.nodes[0], self.nodes[0].listunspent(0), self.txouts, True)
# Mine
while (len(self.nodes[0].getrawmempool()) > 0):
self.nodes[0].generate(1)
# Repeatedly split those 2 outputs, doubling twice for each rep
# Use txouts to monitor the available utxo, since these won't be tracked in wallet
reps = 0
while (reps < 5):
#Double txouts to txouts2
while (len(self.txouts)>0):
split_inputs(self.nodes[0], self.txouts, self.txouts2)
while (len(self.nodes[0].getrawmempool()) > 0):
self.nodes[0].generate(1)
#Double txouts2 to txouts
while (len(self.txouts2)>0):
split_inputs(self.nodes[0], self.txouts2, self.txouts)
while (len(self.nodes[0].getrawmempool()) > 0):
self.nodes[0].generate(1)
reps += 1
self.log.info("Finished splitting")
# Now we can connect the other nodes, didn't want to connect them earlier
# so the estimates would not be affected by the splitting transactions
# Node1 mines small blocks but that are bigger than the expected transaction rate.
# NOTE: the CreateNewBlock code starts counting block size at 1,000 bytes,
# (17k is room enough for 110 or so transactions)
self.nodes.append(self.start_node(1, self.options.tmpdir,
["-blockmaxsize=17000", "-maxorphantx=1000"]))
connect_nodes(self.nodes[1], 0)
# Node2 is a stingy miner, that
# produces too small blocks (room for only 55 or so transactions)
node2args = ["-blockmaxsize=8000", "-maxorphantx=1000"]
self.nodes.append(self.start_node(2, self.options.tmpdir, node2args))
connect_nodes(self.nodes[0], 2)
connect_nodes(self.nodes[2], 1)
self.sync_all()
def transact_and_mine(self, numblocks, mining_node):
min_fee = Decimal("0.00001")
# We will now mine numblocks blocks generating on average 100 transactions between each block
# We shuffle our confirmed txout set before each set of transactions
# small_txpuzzle_randfee will use the transactions that have inputs already in the chain when possible
# resorting to tx's that depend on the mempool when those run out
for i in range(numblocks):
random.shuffle(self.confutxo)
for j in range(random.randrange(100-50,100+50)):
from_index = random.randint(1,2)
(txhex, fee) = small_txpuzzle_randfee(self.nodes[from_index], self.confutxo,
self.memutxo, Decimal("0.005"), min_fee, min_fee)
tx_kbytes = (len(txhex) // 2) / 1000.0
self.fees_per_kb.append(float(fee)/tx_kbytes)
sync_mempools(self.nodes[0:3], wait=.1)
mined = mining_node.getblock(mining_node.generate(1)[0],True)["tx"]
sync_blocks(self.nodes[0:3], wait=.1)
# update which txouts are confirmed
newmem = []
for utx in self.memutxo:
if utx["txid"] in mined:
self.confutxo.append(utx)
else:
newmem.append(utx)
self.memutxo = newmem
def run_test(self):
# Make log handler available to helper functions
global log
log = self.log
self.fees_per_kb = []
self.memutxo = []
self.confutxo = self.txouts # Start with the set of confirmed txouts after splitting
self.log.info("Will output estimates for 1/2/3/6/15/25 blocks")
for i in range(2):
self.log.info("Creating transactions and mining them with a block size that can't keep up")
# Create transactions and mine 10 small blocks with node 2, but create txs faster than we can mine
self.transact_and_mine(10, self.nodes[2])
check_estimates(self.nodes[1], self.fees_per_kb, 14)
self.log.info("Creating transactions and mining them at a block size that is just big enough")
# Generate transactions while mining 10 more blocks, this time with node1
# which mines blocks with capacity just above the rate that transactions are being created
self.transact_and_mine(10, self.nodes[1])
check_estimates(self.nodes[1], self.fees_per_kb, 2)
# Finish by mining a normal-sized block:
while len(self.nodes[1].getrawmempool()) > 0:
self.nodes[1].generate(1)
sync_blocks(self.nodes[0:3], wait=.1)
self.log.info("Final estimates after emptying mempools")
check_estimates(self.nodes[1], self.fees_per_kb, 2)
if __name__ == '__main__':
EstimateFeeTest().main()
| |
# SPDX-License-Identifier: Apache-2.0
# Copyright Contributors to the Rez Project
"""
Test cases for package_order.py (package ordering)
"""
import json
from rez.config import config
from rez.package_order import NullPackageOrder, PackageOrder, PerFamilyOrder, VersionSplitPackageOrder, \
TimestampPackageOrder, SortedOrder, PackageOrderList, from_pod
from rez.packages import iter_packages
from rez.tests.util import TestBase, TempdirMixin
from rez.vendor.version.version import Version
class _BaseTestPackagesOrder(TestBase, TempdirMixin):
"""Base class for a package ordering test case"""
@classmethod
def setUpClass(cls):
TempdirMixin.setUpClass()
cls.py_packages_path = cls.data_path("packages", "py_packages")
cls.solver_packages_path = cls.data_path("solver", "packages")
cls.settings = dict(
packages_path=[
cls.solver_packages_path,
cls.py_packages_path
],
package_filter=None)
@classmethod
def tearDownClass(cls):
TempdirMixin.tearDownClass()
def _test_reorder(self, orderer, package_name, expected_order):
"""Ensure ordered order package version as expected."""
it = iter_packages(package_name)
descending = sorted(it, key=lambda x: x.version, reverse=True)
ordered = orderer.reorder(descending) or descending
result = [str(x.version) for x in ordered]
self.assertEqual(expected_order, result)
def _test_pod(self, orderer):
"""Ensure an orderer integrity when serialized to pod."""
pod = json.loads(json.dumps(orderer.to_pod())) # roundtrip to JSON
actual = orderer.__class__.from_pod(pod)
self.assertEqual(orderer, actual)
class TestAbstractPackageOrder(TestBase):
"""Test case for the abstract PackageOrder class"""
def test_reorder(self):
"""Validate reorder is not implemented"""
with self.assertRaises(NotImplementedError):
PackageOrder().reorder([])
def test_to_pod(self):
"""Validate to_pod is not implemented"""
self.assertRaises(NotImplementedError, PackageOrder().to_pod)
def test_str(self):
"""Validate __str__ is not implemented"""
with self.assertRaises(NotImplementedError):
str(PackageOrder())
def test_eq(self):
"""Validate __eq__ is not implemented"""
with self.assertRaises(NotImplementedError):
PackageOrder() == PackageOrder()
class TestNullPackageOrder(_BaseTestPackagesOrder):
"""Test case for the NullPackageOrder class"""
def test_repr(self):
"""Validate we can represent a VersionSplitPackageOrder as a string."""
self.assertEqual("NullPackageOrder({})", repr(NullPackageOrder()))
def test_comparison(self):
"""Validate we can compare VersionSplitPackageOrder together."""
inst1 = NullPackageOrder()
inst2 = NullPackageOrder()
self.assertTrue(inst1 == inst2) # __eq__ positive
self.assertFalse(inst1 == "wrong_type") # __eq__ negative (wrong type)
self.assertTrue(inst1 != "wrong_type") # __ne__ positive (wrong type)
self.assertFalse(inst1 != inst2) # __ne__ negative
def test_pod(self):
"""Validate we can save and load a VersionSplitPackageOrder to it's pod representation."""
self._test_pod(NullPackageOrder())
def test_sha1(self):
"""Validate we can get a sha1 hash.
"""
self.assertEqual(
'bf7c2fa4e6bd198c02adeea2c3a382cf57242051', NullPackageOrder().sha1
)
class TestSortedOrder(_BaseTestPackagesOrder):
"""Test case for the SortedOrder class"""
def test_reorder_ascending(self):
"""Validate we can sort packages in ascending order."""
self._test_reorder(SortedOrder(descending=False), "pymum", ["1", "2", "3"])
def test_reorder_descending(self):
"""Validate we can sort packages in descending order."""
self._test_reorder(SortedOrder(descending=True), "pymum", ["3", "2", "1"])
def test_comparison(self):
"""Validate we can compare SortedOrder together."""
inst1 = SortedOrder(descending=False)
inst2 = SortedOrder(descending=False)
inst3 = SortedOrder(descending=True)
self.assertTrue(inst1 == inst2) # __eq__ positive
self.assertFalse(inst1 == inst3) # __eq__ negative
self.assertTrue(inst1 != inst3) # __ne__ positive
self.assertFalse(inst1 != inst2) # __ne__ negative
self.assertFalse(inst1 == "wrong_type") # __eq__ negative (wrong type)
self.assertTrue(inst1 != "wrong_type") # __eq__ negative (wrong type)
def test_repr(self):
"""Validate we can represent a SortedOrder as a string."""
self.assertEqual("SortedOrder(True)", repr(SortedOrder(descending=True)))
def test_pod(self):
"""Validate we can save and load a SortedOrder to it's pod representation."""
self._test_pod(SortedOrder(descending=True))
class TestPerFamilyOrder(_BaseTestPackagesOrder):
"""Test case for the PerFamilyOrder class"""
def test_reorder(self):
"""Test ordering."""
expected_null_result = ["7", "6", "5"]
expected_split_result = ["2.6.0", "2.5.2", "2.7.0", "2.6.8"]
expected_timestamp_result = ["1.1.1", "1.1.0", "1.0.6", "1.0.5", "1.2.0", "2.0.0", "2.1.5", "2.1.0"]
orderer = PerFamilyOrder(
order_dict=dict(
pysplit=NullPackageOrder(),
python=VersionSplitPackageOrder(Version("2.6.0")),
timestamped=TimestampPackageOrder(timestamp=3001, rank=3)
),
default_order=SortedOrder(descending=False)
)
self._test_reorder(orderer, "pysplit", expected_null_result)
self._test_reorder(orderer, "python", expected_split_result)
self._test_reorder(orderer, "timestamped", expected_timestamp_result)
self._test_reorder(orderer, "pymum", ["1", "2", "3"])
def test_reorder_no_packages(self):
"""Validate ordering for a family with no packages."""
orderer = PerFamilyOrder(order_dict=dict(missing_package=NullPackageOrder()))
self._test_reorder(orderer, "missing_package", [])
def test_reorder_no_default_order(self):
"""Test behavior when there's no secondary default_order."""
fam_orderer = PerFamilyOrder(order_dict={})
self._test_reorder(fam_orderer, "pymum", ["3", "2", "1"])
def test_comparison(self):
"""Validate we can compare PerFamilyOrder."""
inst1 = PerFamilyOrder(order_dict={'foo': NullPackageOrder()}, default_order=NullPackageOrder())
inst2 = PerFamilyOrder(order_dict={'foo': NullPackageOrder()}, default_order=NullPackageOrder())
inst3 = PerFamilyOrder(order_dict={'bar': NullPackageOrder()}, default_order=NullPackageOrder())
inst4 = PerFamilyOrder(order_dict={'foo': NullPackageOrder()}, default_order=None)
self.assertTrue(inst1 == inst2) # __eq__ positive
self.assertFalse(inst1 == inst3) # __eq__ negative (different order dict)
self.assertFalse(inst1 == inst4) # __eq__ negative (different default_order)
self.assertTrue(inst1 != inst3) # __ne__ positive (different order dict)
self.assertTrue(inst1 != inst4) # __ne__ positive (different default order)
self.assertFalse(inst1 != inst2) # __ne__ negative
def test_repr(self):
"""Validate we can represent a PerFamilyOrder as a string."""
inst = PerFamilyOrder(order_dict={"family1": VersionSplitPackageOrder(Version("2.6.0"))})
self.assertEqual("PerFamilyOrder(([('family1', '2.6.0')], 'None'))", repr(inst))
def test_pod(self):
"""Validate we can save and load a PerFamilyOrder to it's pod representation."""
self._test_pod(
PerFamilyOrder(order_dict={'foo': NullPackageOrder()}, default_order=NullPackageOrder())
)
# No default_order
self._test_pod(
PerFamilyOrder(order_dict={'foo': NullPackageOrder()})
)
class TestVersionSplitPackageOrder(_BaseTestPackagesOrder):
"""Test case for the VersionSplitPackageOrder class"""
def test_reordere(self):
"""Validate package ordering with a VersionSplitPackageOrder"""
orderer = VersionSplitPackageOrder(Version("2.6.0"))
expected = ["2.6.0", "2.5.2", "2.7.0", "2.6.8"]
self._test_reorder(orderer, "python", expected)
def test_comparison(self):
"""Validate we can compare VersionSplitPackageOrder together."""
inst1 = VersionSplitPackageOrder(first_version=Version("1.2.3"))
inst2 = VersionSplitPackageOrder(first_version=Version("1.2.3"))
inst3 = VersionSplitPackageOrder(first_version=Version("1.2.4"))
self.assertTrue(inst1 == inst2) # __eq__ positive
self.assertFalse(inst1 == inst3) # __eq__ negative
self.assertTrue(inst1 != inst3) # __ne__ positive
self.assertFalse(inst1 != inst2) # __ne__ negative
self.assertFalse(inst1 == "wrong_type") # __eq__ negative (wrong type)
self.assertTrue(inst1 != "wrong_type") # __eq__ negative (wrong type)
def test_repr(self):
"""Validate we can represent a VersionSplitPackageOrder as a string."""
inst = VersionSplitPackageOrder(first_version=Version("1,2,3"))
self.assertEqual("VersionSplitPackageOrder(1,2,3)", repr(inst))
def test_pod(self):
"""Validate we can save and load a VersionSplitPackageOrder to it's pod representation."""
self._test_pod(VersionSplitPackageOrder(first_version=Version("1.2.3")))
class TestTimestampPackageOrder(_BaseTestPackagesOrder):
"""Test cases for the TimestampPackageOrder class"""
def test_reorder_no_rank(self):
"""Validate reordering with a rank of 0."""
orderer = TimestampPackageOrder(timestamp=3001)
expected = ['1.1.0', '1.0.6', '1.0.5', '1.1.1', '1.2.0', '2.0.0', '2.1.0', '2.1.5']
self._test_reorder(orderer, "timestamped", expected)
def test_reorder_rank_3(self):
"""Validate reordering with a rank of 3."""
# after v1.1.0 and before v1.1.1
orderer1 = TimestampPackageOrder(timestamp=3001, rank=3)
expected1 = ["1.1.1", "1.1.0", "1.0.6", "1.0.5", "1.2.0", "2.0.0", "2.1.5", "2.1.0"]
self._test_reorder(orderer1, "timestamped", expected1)
# after v2.1.0 and before v2.1.5
orderer2 = TimestampPackageOrder(timestamp=7001, rank=3)
expected2 = ["2.1.5", "2.1.0", "2.0.0", "1.2.0", "1.1.1", "1.1.0", "1.0.6", "1.0.5"]
self._test_reorder(orderer2, "timestamped", expected2)
def test_reorder_rank_2(self):
"""Add coverage for a corner case where there's only one candidate without the rank."""
orderer = TimestampPackageOrder(timestamp=4001, rank=3) # 1.1.1
expected = ['1.1.1', '1.1.0', '1.0.6', '1.0.5', '1.2.0', '2.0.0', '2.1.5', '2.1.0']
self._test_reorder(orderer, "timestamped", expected)
def test_reorder_packages_without_timestamps(self):
"""Validate reordering of packages that have no timestamp data."""
orderer = TimestampPackageOrder(timestamp=3001)
self._test_reorder(orderer, "pymum", ["3", "2", "1"])
def test_reorder_all_packages_before_timestamp(self):
"""Test behavior when all packages are before the timestamp."""
timestamp_orderer = TimestampPackageOrder(timestamp=9999999999, rank=3)
expected = ['2.1.5', '2.1.0', '2.0.0', '1.2.0', '1.1.1', '1.1.0', '1.0.6', '1.0.5']
self._test_reorder(timestamp_orderer, "timestamped", expected)
def test_reorder_all_packages_after_timestamp(self):
"""Test behavior when all packages are after the timestamp."""
timestamp_orderer = TimestampPackageOrder(timestamp=0, rank=3)
expected = ['1.0.6', '1.0.5', '1.1.1', '1.1.0', '1.2.0', '2.0.0', '2.1.5', '2.1.0']
self._test_reorder(timestamp_orderer, "timestamped", expected)
def test_comparison(self):
"""Validate we can compare TimestampPackageOrder."""
inst1 = TimestampPackageOrder(timestamp=1, rank=1)
inst2 = TimestampPackageOrder(timestamp=1, rank=1)
inst3 = TimestampPackageOrder(timestamp=2, rank=1)
inst4 = TimestampPackageOrder(timestamp=2, rank=2)
self.assertTrue(inst1 == inst2) # __eq__ positive
self.assertFalse(inst1 == inst3) # __eq__ negative (different timestamp)
self.assertFalse(inst1 == inst4) # __eq__ negative (different rank)
self.assertTrue(inst1 != inst3) # __ne__ positive (different timestamp)
self.assertTrue(inst1 != inst4) # __ne__ positive (different rank)
self.assertFalse(inst1 != inst2) # __ne__ negative
def test_repr(self):
"""Validate we can represent a TimestampPackageOrder as a string."""
inst = TimestampPackageOrder(timestamp=1, rank=2)
self.assertEqual(repr(inst), "TimestampPackageOrder((1, 2))")
def test_pod(self):
"""Validate we can save and load a TimestampPackageOrder to pod representation."""
self._test_pod(TimestampPackageOrder(timestamp=3001, rank=3))
class TestPackageOrdererList(_BaseTestPackagesOrder):
"""Test cases for the PackageOrderList class."""
def test_singleton(self):
"""Validate we can build a PackageOrderList object from configuration values."""
config.override("package_orderers", [
{
"type": "per_family",
"orderers": [
{
"packages": ["python"],
"type": "version_split",
"first_version": "2.9.9"
}
]
}
])
expected = PackageOrderList()
expected.append(PerFamilyOrder(order_dict={
"python": VersionSplitPackageOrder(Version("2.9.9"))
}))
# Clear @classproperty cache
try:
delattr(PackageOrderList, '_class_property_singleton')
except AttributeError:
pass
self.assertEqual(expected, PackageOrderList.singleton)
def test_singleton_novalue(self):
"""Validate we can build a PackageOrderList object from empty configuration values."""
config.override("package_orderers", None)
# Clear @classproperty cache
try:
delattr(PackageOrderList, '_class_property_singleton')
except AttributeError:
pass
self.assertEqual(PackageOrderList(), PackageOrderList.singleton)
def test_pod(self):
"""Validate we can save and load a PackageOrdererList to pod representation."""
inst = PackageOrderList((
VersionSplitPackageOrder(Version("2.6.0")),
PerFamilyOrder(order_dict={}, default_order=SortedOrder(descending=False))
))
self._test_pod(inst)
class TestPackageOrderPublic(TestBase):
"""Additional tests for public symbols in package_order.py"""
def test_from_pod_old_style(self):
"""Validate from_pod is still compatible with the older pod style."""
self.assertEqual(
VersionSplitPackageOrder(first_version=Version("1.2.3")),
from_pod(("version_split", {"first_version": "1.2.3"}))
)
| |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1
from google.api_core import grpc_helpers_async
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.logging_v2.types import logging
from google.protobuf import empty_pb2 # type: ignore
from .base import LoggingServiceV2Transport, DEFAULT_CLIENT_INFO
from .grpc import LoggingServiceV2GrpcTransport
class LoggingServiceV2GrpcAsyncIOTransport(LoggingServiceV2Transport):
"""gRPC AsyncIO backend transport for LoggingServiceV2.
Service for ingesting and querying logs.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "logging.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "logging.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def delete_log(
self,
) -> Callable[[logging.DeleteLogRequest], Awaitable[empty_pb2.Empty]]:
r"""Return a callable for the delete log method over gRPC.
Deletes all the log entries in a log for the \_Default Log
Bucket. The log reappears if it receives new entries. Log
entries written shortly before the delete operation might not be
deleted. Entries received after the delete operation with a
timestamp before the operation will be deleted.
Returns:
Callable[[~.DeleteLogRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_log" not in self._stubs:
self._stubs["delete_log"] = self.grpc_channel.unary_unary(
"/google.logging.v2.LoggingServiceV2/DeleteLog",
request_serializer=logging.DeleteLogRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_log"]
@property
def write_log_entries(
self,
) -> Callable[
[logging.WriteLogEntriesRequest], Awaitable[logging.WriteLogEntriesResponse]
]:
r"""Return a callable for the write log entries method over gRPC.
Writes log entries to Logging. This API method is the
only way to send log entries to Logging. This method is
used, directly or indirectly, by the Logging agent
(fluentd) and all logging libraries configured to use
Logging. A single request may contain log entries for a
maximum of 1000 different resources (projects,
organizations, billing accounts or folders)
Returns:
Callable[[~.WriteLogEntriesRequest],
Awaitable[~.WriteLogEntriesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "write_log_entries" not in self._stubs:
self._stubs["write_log_entries"] = self.grpc_channel.unary_unary(
"/google.logging.v2.LoggingServiceV2/WriteLogEntries",
request_serializer=logging.WriteLogEntriesRequest.serialize,
response_deserializer=logging.WriteLogEntriesResponse.deserialize,
)
return self._stubs["write_log_entries"]
@property
def list_log_entries(
self,
) -> Callable[
[logging.ListLogEntriesRequest], Awaitable[logging.ListLogEntriesResponse]
]:
r"""Return a callable for the list log entries method over gRPC.
Lists log entries. Use this method to retrieve log entries that
originated from a project/folder/organization/billing account.
For ways to export log entries, see `Exporting
Logs <https://cloud.google.com/logging/docs/export>`__.
Returns:
Callable[[~.ListLogEntriesRequest],
Awaitable[~.ListLogEntriesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_log_entries" not in self._stubs:
self._stubs["list_log_entries"] = self.grpc_channel.unary_unary(
"/google.logging.v2.LoggingServiceV2/ListLogEntries",
request_serializer=logging.ListLogEntriesRequest.serialize,
response_deserializer=logging.ListLogEntriesResponse.deserialize,
)
return self._stubs["list_log_entries"]
@property
def list_monitored_resource_descriptors(
self,
) -> Callable[
[logging.ListMonitoredResourceDescriptorsRequest],
Awaitable[logging.ListMonitoredResourceDescriptorsResponse],
]:
r"""Return a callable for the list monitored resource
descriptors method over gRPC.
Lists the descriptors for monitored resource types
used by Logging.
Returns:
Callable[[~.ListMonitoredResourceDescriptorsRequest],
Awaitable[~.ListMonitoredResourceDescriptorsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_monitored_resource_descriptors" not in self._stubs:
self._stubs[
"list_monitored_resource_descriptors"
] = self.grpc_channel.unary_unary(
"/google.logging.v2.LoggingServiceV2/ListMonitoredResourceDescriptors",
request_serializer=logging.ListMonitoredResourceDescriptorsRequest.serialize,
response_deserializer=logging.ListMonitoredResourceDescriptorsResponse.deserialize,
)
return self._stubs["list_monitored_resource_descriptors"]
@property
def list_logs(
self,
) -> Callable[[logging.ListLogsRequest], Awaitable[logging.ListLogsResponse]]:
r"""Return a callable for the list logs method over gRPC.
Lists the logs in projects, organizations, folders,
or billing accounts. Only logs that have entries are
listed.
Returns:
Callable[[~.ListLogsRequest],
Awaitable[~.ListLogsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_logs" not in self._stubs:
self._stubs["list_logs"] = self.grpc_channel.unary_unary(
"/google.logging.v2.LoggingServiceV2/ListLogs",
request_serializer=logging.ListLogsRequest.serialize,
response_deserializer=logging.ListLogsResponse.deserialize,
)
return self._stubs["list_logs"]
@property
def tail_log_entries(
self,
) -> Callable[
[logging.TailLogEntriesRequest], Awaitable[logging.TailLogEntriesResponse]
]:
r"""Return a callable for the tail log entries method over gRPC.
Streaming read of log entries as they are ingested.
Until the stream is terminated, it will continue reading
logs.
Returns:
Callable[[~.TailLogEntriesRequest],
Awaitable[~.TailLogEntriesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "tail_log_entries" not in self._stubs:
self._stubs["tail_log_entries"] = self.grpc_channel.stream_stream(
"/google.logging.v2.LoggingServiceV2/TailLogEntries",
request_serializer=logging.TailLogEntriesRequest.serialize,
response_deserializer=logging.TailLogEntriesResponse.deserialize,
)
return self._stubs["tail_log_entries"]
def close(self):
return self.grpc_channel.close()
__all__ = ("LoggingServiceV2GrpcAsyncIOTransport",)
| |
# Copyright 2014, Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from neutron_lib import constants as n_const
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_utils import excutils
from osprofiler import profiler
from neutron._i18n import _LE, _LI, _LW
from neutron.common import utils as n_utils
from neutron.plugins.common import constants as p_const
from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants
LOG = logging.getLogger(__name__)
cfg.CONF.import_group('AGENT', 'neutron.plugins.ml2.drivers.openvswitch.'
'agent.common.config')
# A class to represent a DVR-hosted subnet including vif_ports resident on
# that subnet
class LocalDVRSubnetMapping(object):
def __init__(self, subnet, csnat_ofport=constants.OFPORT_INVALID):
# set of compute ports on this dvr subnet
self.compute_ports = {}
self.subnet = subnet
self.csnat_ofport = csnat_ofport
self.dvr_owned = False
def __str__(self):
return ("subnet = %s compute_ports = %s csnat_port = %s"
" is_dvr_owned = %s" %
(self.subnet, self.get_compute_ofports(),
self.get_csnat_ofport(), self.is_dvr_owned()))
def get_subnet_info(self):
return self.subnet
def set_dvr_owned(self, owned):
self.dvr_owned = owned
def is_dvr_owned(self):
return self.dvr_owned
def add_compute_ofport(self, vif_id, ofport):
self.compute_ports[vif_id] = ofport
def remove_compute_ofport(self, vif_id):
self.compute_ports.pop(vif_id, 0)
def remove_all_compute_ofports(self):
self.compute_ports.clear()
def get_compute_ofports(self):
return self.compute_ports
def set_csnat_ofport(self, ofport):
self.csnat_ofport = ofport
def get_csnat_ofport(self):
return self.csnat_ofport
class OVSPort(object):
def __init__(self, id, ofport, mac, device_owner):
self.id = id
self.mac = mac
self.ofport = ofport
self.subnets = set()
self.device_owner = device_owner
def __str__(self):
return ("OVSPort: id = %s, ofport = %s, mac = %s, "
"device_owner = %s, subnets = %s" %
(self.id, self.ofport, self.mac,
self.device_owner, self.subnets))
def add_subnet(self, subnet_id):
self.subnets.add(subnet_id)
def remove_subnet(self, subnet_id):
self.subnets.remove(subnet_id)
def remove_all_subnets(self):
self.subnets.clear()
def get_subnets(self):
return self.subnets
def get_device_owner(self):
return self.device_owner
def get_mac(self):
return self.mac
def get_ofport(self):
return self.ofport
@profiler.trace_cls("ovs_dvr_agent")
class OVSDVRNeutronAgent(object):
'''
Implements OVS-based DVR(Distributed Virtual Router), for overlay networks.
'''
# history
# 1.0 Initial version
def __init__(self, context, plugin_rpc, integ_br, tun_br,
bridge_mappings, phys_brs, int_ofports, phys_ofports,
patch_int_ofport=constants.OFPORT_INVALID,
patch_tun_ofport=constants.OFPORT_INVALID,
host=None, enable_tunneling=False,
enable_distributed_routing=False):
self.context = context
self.plugin_rpc = plugin_rpc
self.host = host
self.enable_tunneling = enable_tunneling
self.enable_distributed_routing = enable_distributed_routing
self.bridge_mappings = bridge_mappings
self.phys_brs = phys_brs
self.int_ofports = int_ofports
self.phys_ofports = phys_ofports
self.reset_ovs_parameters(integ_br, tun_br,
patch_int_ofport, patch_tun_ofport)
self.reset_dvr_parameters()
self.dvr_mac_address = None
if self.enable_distributed_routing:
self.get_dvr_mac_address()
self.conf = cfg.CONF
def setup_dvr_flows(self):
self.setup_dvr_flows_on_integ_br()
self.setup_dvr_flows_on_tun_br()
self.setup_dvr_flows_on_phys_br()
self.setup_dvr_mac_flows_on_all_brs()
def reset_ovs_parameters(self, integ_br, tun_br,
patch_int_ofport, patch_tun_ofport):
'''Reset the openvswitch parameters'''
self.int_br = integ_br
self.tun_br = tun_br
self.patch_int_ofport = patch_int_ofport
self.patch_tun_ofport = patch_tun_ofport
def reset_dvr_parameters(self):
'''Reset the DVR parameters'''
self.local_dvr_map = {}
self.local_csnat_map = {}
self.local_ports = {}
self.registered_dvr_macs = set()
def get_dvr_mac_address(self):
try:
self.get_dvr_mac_address_with_retry()
except oslo_messaging.RemoteError as e:
LOG.error(_LE('L2 agent could not get DVR MAC address at '
'startup due to RPC error. It happens when the '
'server does not support this RPC API. Detailed '
'message: %s'), e)
except oslo_messaging.MessagingTimeout:
LOG.error(_LE('DVR: Failed to obtain a valid local '
'DVR MAC address'))
if not self.in_distributed_mode():
sys.exit(1)
def get_dvr_mac_address_with_retry(self):
# Get the local DVR MAC Address from the Neutron Server.
# This is the first place where we contact the server on startup
# so retry in case it's not ready to respond
for retry_count in reversed(range(5)):
try:
details = self.plugin_rpc.get_dvr_mac_address_by_host(
self.context, self.host)
except oslo_messaging.MessagingTimeout as e:
with excutils.save_and_reraise_exception() as ctx:
if retry_count > 0:
ctx.reraise = False
LOG.warning(_LW('L2 agent could not get DVR MAC '
'address from server. Retrying. '
'Detailed message: %s'), e)
else:
LOG.debug("L2 Agent DVR: Received response for "
"get_dvr_mac_address_by_host() from "
"plugin: %r", details)
self.dvr_mac_address = details['mac_address']
return
def setup_dvr_flows_on_integ_br(self):
'''Setup up initial dvr flows into br-int'''
if not self.in_distributed_mode():
return
LOG.info(_LI("L2 Agent operating in DVR Mode with MAC %s"),
self.dvr_mac_address)
# Remove existing flows in integration bridge
if self.conf.AGENT.drop_flows_on_start:
self.int_br.delete_flows()
# Add a canary flow to int_br to track OVS restarts
self.int_br.setup_canary_table()
# Insert 'drop' action as the default for Table DVR_TO_SRC_MAC
self.int_br.install_drop(table_id=constants.DVR_TO_SRC_MAC, priority=1)
self.int_br.install_drop(table_id=constants.DVR_TO_SRC_MAC_VLAN,
priority=1)
# Insert 'normal' action as the default for Table LOCAL_SWITCHING
self.int_br.install_normal(table_id=constants.LOCAL_SWITCHING,
priority=1)
for physical_network in self.bridge_mappings:
self.int_br.install_drop(table_id=constants.LOCAL_SWITCHING,
priority=2,
in_port=self.int_ofports[
physical_network])
def setup_dvr_flows_on_tun_br(self):
'''Setup up initial dvr flows into br-tun'''
if not self.enable_tunneling or not self.in_distributed_mode():
return
self.tun_br.install_goto(dest_table_id=constants.DVR_PROCESS,
priority=1,
in_port=self.patch_int_ofport)
# table-miss should be sent to learning table
self.tun_br.install_goto(table_id=constants.DVR_NOT_LEARN,
dest_table_id=constants.LEARN_FROM_TUN)
self.tun_br.install_goto(table_id=constants.DVR_PROCESS,
dest_table_id=constants.PATCH_LV_TO_TUN)
def setup_dvr_flows_on_phys_br(self):
'''Setup up initial dvr flows into br-phys'''
if not self.in_distributed_mode():
return
for physical_network in self.bridge_mappings:
self.phys_brs[physical_network].install_goto(
in_port=self.phys_ofports[physical_network],
priority=2,
dest_table_id=constants.DVR_PROCESS_VLAN)
self.phys_brs[physical_network].install_goto(
priority=1,
dest_table_id=constants.DVR_NOT_LEARN_VLAN)
self.phys_brs[physical_network].install_goto(
table_id=constants.DVR_PROCESS_VLAN,
priority=0,
dest_table_id=constants.LOCAL_VLAN_TRANSLATION)
self.phys_brs[physical_network].install_drop(
table_id=constants.LOCAL_VLAN_TRANSLATION,
in_port=self.phys_ofports[physical_network],
priority=2)
self.phys_brs[physical_network].install_normal(
table_id=constants.DVR_NOT_LEARN_VLAN,
priority=1)
def _add_dvr_mac_for_phys_br(self, physical_network, mac):
self.int_br.add_dvr_mac_vlan(mac=mac,
port=self.int_ofports[physical_network])
phys_br = self.phys_brs[physical_network]
phys_br.add_dvr_mac_vlan(mac=mac,
port=self.phys_ofports[physical_network])
def _remove_dvr_mac_for_phys_br(self, physical_network, mac):
# REVISIT(yamamoto): match in_port as well?
self.int_br.remove_dvr_mac_vlan(mac=mac)
phys_br = self.phys_brs[physical_network]
# REVISIT(yamamoto): match in_port as well?
phys_br.remove_dvr_mac_vlan(mac=mac)
def _add_dvr_mac_for_tun_br(self, mac):
self.int_br.add_dvr_mac_tun(mac=mac, port=self.patch_tun_ofport)
self.tun_br.add_dvr_mac_tun(mac=mac, port=self.patch_int_ofport)
def _remove_dvr_mac_for_tun_br(self, mac):
self.int_br.remove_dvr_mac_tun(mac=mac, port=self.patch_tun_ofport)
# REVISIT(yamamoto): match in_port as well?
self.tun_br.remove_dvr_mac_tun(mac=mac)
def _add_dvr_mac(self, mac):
for physical_network in self.bridge_mappings:
self._add_dvr_mac_for_phys_br(physical_network, mac)
if self.enable_tunneling:
self._add_dvr_mac_for_tun_br(mac)
LOG.debug("Added DVR MAC flow for %s", mac)
self.registered_dvr_macs.add(mac)
def _remove_dvr_mac(self, mac):
for physical_network in self.bridge_mappings:
self._remove_dvr_mac_for_phys_br(physical_network, mac)
if self.enable_tunneling:
self._remove_dvr_mac_for_tun_br(mac)
LOG.debug("Removed DVR MAC flow for %s", mac)
self.registered_dvr_macs.remove(mac)
def setup_dvr_mac_flows_on_all_brs(self):
if not self.in_distributed_mode():
LOG.debug("Not in distributed mode, ignoring invocation "
"of get_dvr_mac_address_list() ")
return
dvr_macs = self.plugin_rpc.get_dvr_mac_address_list(self.context)
LOG.debug("L2 Agent DVR: Received these MACs: %r", dvr_macs)
for mac in dvr_macs:
if mac['mac_address'] == self.dvr_mac_address:
continue
self._add_dvr_mac(mac['mac_address'])
def dvr_mac_address_update(self, dvr_macs):
if not self.dvr_mac_address:
LOG.debug("Self mac unknown, ignoring this "
"dvr_mac_address_update() ")
return
dvr_host_macs = set()
for entry in dvr_macs:
if entry['mac_address'] == self.dvr_mac_address:
continue
dvr_host_macs.add(entry['mac_address'])
if dvr_host_macs == self.registered_dvr_macs:
LOG.debug("DVR Mac address already up to date")
return
dvr_macs_added = dvr_host_macs - self.registered_dvr_macs
dvr_macs_removed = self.registered_dvr_macs - dvr_host_macs
for oldmac in dvr_macs_removed:
self._remove_dvr_mac(oldmac)
for newmac in dvr_macs_added:
self._add_dvr_mac(newmac)
def in_distributed_mode(self):
return self.dvr_mac_address is not None
def process_tunneled_network(self, network_type, lvid, segmentation_id):
self.tun_br.provision_local_vlan(
network_type=network_type,
lvid=lvid,
segmentation_id=segmentation_id,
distributed=self.in_distributed_mode())
def _bind_distributed_router_interface_port(self, port, lvm,
fixed_ips, device_owner):
# since distributed router port must have only one fixed
# IP, directly use fixed_ips[0]
fixed_ip = fixed_ips[0]
subnet_uuid = fixed_ip['subnet_id']
csnat_ofport = constants.OFPORT_INVALID
ldm = None
if subnet_uuid in self.local_dvr_map:
ldm = self.local_dvr_map[subnet_uuid]
csnat_ofport = ldm.get_csnat_ofport()
if csnat_ofport == constants.OFPORT_INVALID:
LOG.error(_LE("DVR: Duplicate DVR router interface detected "
"for subnet %s"), subnet_uuid)
return
else:
# set up LocalDVRSubnetMapping available for this subnet
subnet_info = self.plugin_rpc.get_subnet_for_dvr(
self.context, subnet_uuid, fixed_ips=fixed_ips)
if not subnet_info:
LOG.warning(_LW("DVR: Unable to retrieve subnet information "
"for subnet_id %s. The subnet or the gateway "
"may have already been deleted"), subnet_uuid)
return
LOG.debug("get_subnet_for_dvr for subnet %(uuid)s "
"returned with %(info)s",
{"uuid": subnet_uuid, "info": subnet_info})
ldm = LocalDVRSubnetMapping(subnet_info)
self.local_dvr_map[subnet_uuid] = ldm
# DVR takes over
ldm.set_dvr_owned(True)
vlan_to_use = lvm.vlan
if lvm.network_type == p_const.TYPE_VLAN:
vlan_to_use = lvm.segmentation_id
subnet_info = ldm.get_subnet_info()
ip_version = subnet_info['ip_version']
local_compute_ports = (
self.plugin_rpc.get_ports_on_host_by_subnet(
self.context, self.host, subnet_uuid))
LOG.debug("DVR: List of ports received from "
"get_ports_on_host_by_subnet %s",
local_compute_ports)
vif_by_id = self.int_br.get_vifs_by_ids(
[local_port['id'] for local_port in local_compute_ports])
for local_port in local_compute_ports:
vif = vif_by_id.get(local_port['id'])
if not vif:
continue
ldm.add_compute_ofport(vif.vif_id, vif.ofport)
if vif.vif_id in self.local_ports:
# ensure if a compute port is already on
# a different dvr routed subnet
# if yes, queue this subnet to that port
comp_ovsport = self.local_ports[vif.vif_id]
comp_ovsport.add_subnet(subnet_uuid)
else:
# the compute port is discovered first here that its on
# a dvr routed subnet queue this subnet to that port
comp_ovsport = OVSPort(vif.vif_id, vif.ofport,
vif.vif_mac, local_port['device_owner'])
comp_ovsport.add_subnet(subnet_uuid)
self.local_ports[vif.vif_id] = comp_ovsport
# create rule for just this vm port
self.int_br.install_dvr_to_src_mac(
network_type=lvm.network_type,
vlan_tag=vlan_to_use,
gateway_mac=subnet_info['gateway_mac'],
dst_mac=comp_ovsport.get_mac(),
dst_port=comp_ovsport.get_ofport())
if lvm.network_type == p_const.TYPE_VLAN:
# TODO(vivek) remove the IPv6 related flows once SNAT is not
# used for IPv6 DVR.
br = self.phys_brs[lvm.physical_network]
if lvm.network_type in constants.TUNNEL_NETWORK_TYPES:
br = self.tun_br
# TODO(vivek) remove the IPv6 related flows once SNAT is not
# used for IPv6 DVR.
if ip_version == 4:
br.install_dvr_process_ipv4(
vlan_tag=lvm.vlan, gateway_ip=subnet_info['gateway_ip'])
else:
br.install_dvr_process_ipv6(
vlan_tag=lvm.vlan, gateway_mac=subnet_info['gateway_mac'])
br.install_dvr_process(
vlan_tag=lvm.vlan, vif_mac=port.vif_mac,
dvr_mac_address=self.dvr_mac_address)
# the dvr router interface is itself a port, so capture it
# queue this subnet to that port. A subnet appears only once as
# a router interface on any given router
ovsport = OVSPort(port.vif_id, port.ofport,
port.vif_mac, device_owner)
ovsport.add_subnet(subnet_uuid)
self.local_ports[port.vif_id] = ovsport
def _bind_port_on_dvr_subnet(self, port, lvm, fixed_ips,
device_owner):
# Handle new compute port added use-case
subnet_uuid = None
for ips in fixed_ips:
if ips['subnet_id'] not in self.local_dvr_map:
continue
subnet_uuid = ips['subnet_id']
ldm = self.local_dvr_map[subnet_uuid]
if not ldm.is_dvr_owned():
# well this is CSNAT stuff, let dvr come in
# and do plumbing for this vm later
continue
# This confirms that this compute port belongs
# to a dvr hosted subnet.
# Accommodate this VM Port into the existing rule in
# the integration bridge
LOG.debug("DVR: Plumbing compute port %s", port.vif_id)
subnet_info = ldm.get_subnet_info()
ldm.add_compute_ofport(port.vif_id, port.ofport)
if port.vif_id in self.local_ports:
# ensure if a compute port is already on a different
# dvr routed subnet
# if yes, queue this subnet to that port
ovsport = self.local_ports[port.vif_id]
ovsport.add_subnet(subnet_uuid)
else:
# the compute port is discovered first here that its
# on a dvr routed subnet, queue this subnet to that port
ovsport = OVSPort(port.vif_id, port.ofport,
port.vif_mac, device_owner)
ovsport.add_subnet(subnet_uuid)
self.local_ports[port.vif_id] = ovsport
vlan_to_use = lvm.vlan
if lvm.network_type == p_const.TYPE_VLAN:
vlan_to_use = lvm.segmentation_id
# create a rule for this vm port
self.int_br.install_dvr_to_src_mac(
network_type=lvm.network_type,
vlan_tag=vlan_to_use,
gateway_mac=subnet_info['gateway_mac'],
dst_mac=ovsport.get_mac(),
dst_port=ovsport.get_ofport())
def _bind_centralized_snat_port_on_dvr_subnet(self, port, lvm,
fixed_ips, device_owner):
# since centralized-SNAT (CSNAT) port must have only one fixed
# IP, directly use fixed_ips[0]
fixed_ip = fixed_ips[0]
if port.vif_id in self.local_ports:
# throw an error if CSNAT port is already on a different
# dvr routed subnet
ovsport = self.local_ports[port.vif_id]
subs = list(ovsport.get_subnets())
if subs[0] == fixed_ip['subnet_id']:
return
LOG.error(_LE("Centralized-SNAT port %(port)s on subnet "
"%(port_subnet)s already seen on a different "
"subnet %(orig_subnet)s"), {
"port": port.vif_id,
"port_subnet": fixed_ip['subnet_id'],
"orig_subnet": subs[0],
})
return
subnet_uuid = fixed_ip['subnet_id']
ldm = None
subnet_info = None
if subnet_uuid not in self.local_dvr_map:
# no csnat ports seen on this subnet - create csnat state
# for this subnet
subnet_info = self.plugin_rpc.get_subnet_for_dvr(
self.context, subnet_uuid, fixed_ips=fixed_ips)
if not subnet_info:
LOG.warning(_LW("DVR: Unable to retrieve subnet information "
"for subnet_id %s. The subnet or the gateway "
"may have already been deleted"), subnet_uuid)
return
LOG.debug("get_subnet_for_dvr for subnet %(uuid)s "
"returned with %(info)s",
{"uuid": subnet_uuid, "info": subnet_info})
ldm = LocalDVRSubnetMapping(subnet_info, port.ofport)
self.local_dvr_map[subnet_uuid] = ldm
else:
ldm = self.local_dvr_map[subnet_uuid]
subnet_info = ldm.get_subnet_info()
# Store csnat OF Port in the existing DVRSubnetMap
ldm.set_csnat_ofport(port.ofport)
# create ovsPort footprint for csnat port
ovsport = OVSPort(port.vif_id, port.ofport,
port.vif_mac, device_owner)
ovsport.add_subnet(subnet_uuid)
self.local_ports[port.vif_id] = ovsport
vlan_to_use = lvm.vlan
if lvm.network_type == p_const.TYPE_VLAN:
vlan_to_use = lvm.segmentation_id
self.int_br.install_dvr_to_src_mac(
network_type=lvm.network_type,
vlan_tag=vlan_to_use,
gateway_mac=subnet_info['gateway_mac'],
dst_mac=ovsport.get_mac(),
dst_port=ovsport.get_ofport())
def bind_port_to_dvr(self, port, local_vlan_map,
fixed_ips, device_owner):
if not self.in_distributed_mode():
return
if local_vlan_map.network_type not in (constants.TUNNEL_NETWORK_TYPES
+ [p_const.TYPE_VLAN]):
LOG.debug("DVR: Port %s is with network_type %s not supported"
" for dvr plumbing" % (port.vif_id,
local_vlan_map.network_type))
return
if (port.vif_id in self.local_ports and
self.local_ports[port.vif_id].ofport != port.ofport):
LOG.info(_LI("DVR: Port %(vif)s changed port number to "
"%(ofport)s, rebinding."),
{'vif': port.vif_id, 'ofport': port.ofport})
self.unbind_port_from_dvr(port, local_vlan_map)
if device_owner == n_const.DEVICE_OWNER_DVR_INTERFACE:
self._bind_distributed_router_interface_port(port,
local_vlan_map,
fixed_ips,
device_owner)
if device_owner and n_utils.is_dvr_serviced(device_owner):
self._bind_port_on_dvr_subnet(port, local_vlan_map,
fixed_ips,
device_owner)
if device_owner == n_const.DEVICE_OWNER_ROUTER_SNAT:
self._bind_centralized_snat_port_on_dvr_subnet(port,
local_vlan_map,
fixed_ips,
device_owner)
def _unbind_distributed_router_interface_port(self, port, lvm):
ovsport = self.local_ports[port.vif_id]
# removal of distributed router interface
subnet_ids = ovsport.get_subnets()
subnet_set = set(subnet_ids)
network_type = lvm.network_type
physical_network = lvm.physical_network
vlan_to_use = lvm.vlan
if network_type == p_const.TYPE_VLAN:
vlan_to_use = lvm.segmentation_id
# ensure we process for all the subnets laid on this removed port
for sub_uuid in subnet_set:
if sub_uuid not in self.local_dvr_map:
continue
ldm = self.local_dvr_map[sub_uuid]
subnet_info = ldm.get_subnet_info()
ip_version = subnet_info['ip_version']
# DVR is no more owner
ldm.set_dvr_owned(False)
# remove all vm rules for this dvr subnet
# clear of compute_ports altogether
compute_ports = ldm.get_compute_ofports()
for vif_id in compute_ports:
comp_port = self.local_ports[vif_id]
self.int_br.delete_dvr_to_src_mac(
network_type=network_type,
vlan_tag=vlan_to_use, dst_mac=comp_port.get_mac())
ldm.remove_all_compute_ofports()
if ldm.get_csnat_ofport() == constants.OFPORT_INVALID:
# if there is no csnat port for this subnet, remove
# this subnet from local_dvr_map, as no dvr (or) csnat
# ports available on this agent anymore
self.local_dvr_map.pop(sub_uuid, None)
if network_type == p_const.TYPE_VLAN:
br = self.phys_brs[physical_network]
if network_type in constants.TUNNEL_NETWORK_TYPES:
br = self.tun_br
if ip_version == 4:
br.delete_dvr_process_ipv4(
vlan_tag=lvm.vlan, gateway_ip=subnet_info['gateway_ip'])
else:
br.delete_dvr_process_ipv6(
vlan_tag=lvm.vlan, gateway_mac=subnet_info['gateway_mac'])
ovsport.remove_subnet(sub_uuid)
if lvm.network_type == p_const.TYPE_VLAN:
br = self.phys_brs[physical_network]
if lvm.network_type in constants.TUNNEL_NETWORK_TYPES:
br = self.tun_br
br.delete_dvr_process(vlan_tag=lvm.vlan, vif_mac=port.vif_mac)
# release port state
self.local_ports.pop(port.vif_id, None)
def _unbind_port_on_dvr_subnet(self, port, lvm):
ovsport = self.local_ports[port.vif_id]
# This confirms that this compute port being removed belonged
# to a dvr hosted subnet.
LOG.debug("DVR: Removing plumbing for compute port %s", port)
subnet_ids = ovsport.get_subnets()
# ensure we process for all the subnets laid on this port
for sub_uuid in subnet_ids:
if sub_uuid not in self.local_dvr_map:
continue
ldm = self.local_dvr_map[sub_uuid]
ldm.remove_compute_ofport(port.vif_id)
vlan_to_use = lvm.vlan
if lvm.network_type == p_const.TYPE_VLAN:
vlan_to_use = lvm.segmentation_id
# first remove this vm port rule
self.int_br.delete_dvr_to_src_mac(
network_type=lvm.network_type,
vlan_tag=vlan_to_use, dst_mac=ovsport.get_mac())
# release port state
self.local_ports.pop(port.vif_id, None)
def _unbind_centralized_snat_port_on_dvr_subnet(self, port, lvm):
ovsport = self.local_ports[port.vif_id]
# This confirms that this compute port being removed belonged
# to a dvr hosted subnet.
LOG.debug("DVR: Removing plumbing for csnat port %s", port)
sub_uuid = list(ovsport.get_subnets())[0]
# ensure we process for all the subnets laid on this port
if sub_uuid not in self.local_dvr_map:
return
ldm = self.local_dvr_map[sub_uuid]
ldm.set_csnat_ofport(constants.OFPORT_INVALID)
vlan_to_use = lvm.vlan
if lvm.network_type == p_const.TYPE_VLAN:
vlan_to_use = lvm.segmentation_id
# then remove csnat port rule
self.int_br.delete_dvr_to_src_mac(
network_type=lvm.network_type,
vlan_tag=vlan_to_use, dst_mac=ovsport.get_mac())
if not ldm.is_dvr_owned():
# if not owned by DVR (only used for csnat), remove this
# subnet state altogether
self.local_dvr_map.pop(sub_uuid, None)
# release port state
self.local_ports.pop(port.vif_id, None)
def unbind_port_from_dvr(self, vif_port, local_vlan_map):
if not self.in_distributed_mode():
return
# Handle port removed use-case
if vif_port and vif_port.vif_id not in self.local_ports:
LOG.debug("DVR: Non distributed port, ignoring %s", vif_port)
return
ovsport = self.local_ports[vif_port.vif_id]
device_owner = ovsport.get_device_owner()
if device_owner == n_const.DEVICE_OWNER_DVR_INTERFACE:
self._unbind_distributed_router_interface_port(vif_port,
local_vlan_map)
if device_owner and n_utils.is_dvr_serviced(device_owner):
self._unbind_port_on_dvr_subnet(vif_port,
local_vlan_map)
if device_owner == n_const.DEVICE_OWNER_ROUTER_SNAT:
self._unbind_centralized_snat_port_on_dvr_subnet(vif_port,
local_vlan_map)
| |
# -*- coding: utf-8 -*-
"""
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import print_function
import itertools
from collections import Counter
import scipy
import numpy as np
import copy
import matplotlib.pyplot as plt
from ..mdr import MDR
def entropy(X, base=2):
"""Calculates the entropy, H(X), in the given base
Parameters
----------
X: array-like (# samples)
An array of values for which to compute the entropy
base: integer (default: 2)
The base in which to calculate entropy
Returns
----------
entropy: float
The entropy calculated according to the equation H(X) = -sum(p_x * log p_x) for all states of X
"""
return scipy.stats.entropy(list(Counter(X).values()), base=base)
def joint_entropy(X, Y, base=2):
"""Calculates the joint entropy, H(X,Y), in the given base
Parameters
----------
X: array-like (# samples)
An array of values for which to compute the joint entropy
Y: array-like (# samples)
An array of values for which to compute the joint entropy
base: integer (default: 2)
The base in which to calculate joint entropy
Returns
----------
joint_entropy: float
The joint entropy calculated according to the equation H(X,Y) = -sum(p_xy * log p_xy) for all combined states of X and Y
"""
X_Y = ['{}{}'.format(x, y) for x, y in zip(X, Y)]
return entropy(X_Y, base=base)
def conditional_entropy(X, Y, base=2):
"""Calculates the conditional entropy, H(X|Y), in the given base
Parameters
----------
X: array-like (# samples)
An array of values for which to compute the conditional entropy
Y: array-like (# samples)
An array of values for which to compute the conditional entropy
base: integer (default: 2)
The base in which to calculate conditional entropy
Returns
----------
conditional_entropy: float
The conditional entropy calculated according to the equation H(X|Y) = H(X,Y) - H(Y)
"""
return joint_entropy(X, Y, base=base) - entropy(Y, base=base)
def mutual_information(X, Y, base=2):
"""Calculates the mutual information between two variables, I(X;Y), in the given base
Parameters
----------
X: array-like (# samples)
An array of values for which to compute the mutual information
Y: array-like (# samples)
An array of values for which to compute the mutual information
base: integer (default: 2)
The base in which to calculate mutual information
Returns
----------
mutual_information: float
The mutual information calculated according to the equation I(X;Y) = H(Y) - H(Y|X)
"""
return entropy(Y, base=base) - conditional_entropy(Y, X, base=base)
def two_way_information_gain(X, Y, Z, base=2):
"""Calculates the two-way information gain between three variables, I(X;Y;Z), in the given base
IG(X;Y;Z) indicates the information gained about variable Z by the joint variable X_Y, after removing
the information that X and Y have about Z individually. Thus, two-way information gain measures the
synergistic predictive value of variables X and Y about variable Z.
Parameters
----------
X: array-like (# samples)
An array of values for which to compute the 2-way information gain
Y: array-like (# samples)
An array of values for which to compute the 2-way information gain
Z: array-like (# samples)
An array of outcome values for which to compute the 2-way information gain
base: integer (default: 2)
The base in which to calculate 2-way information
Returns
----------
mutual_information: float
The information gain calculated according to the equation IG(X;Y;Z) = I(X,Y;Z) - I(X;Z) - I(Y;Z)
"""
X_Y = ['{}{}'.format(x, y) for x, y in zip(X, Y)]
return (mutual_information(X_Y, Z, base=base) -
mutual_information(X, Z, base=base) -
mutual_information(Y, Z, base=base))
def three_way_information_gain(W, X, Y, Z, base=2):
"""Calculates the three-way information gain between three variables, I(W;X;Y;Z), in the given base
IG(W;X;Y;Z) indicates the information gained about variable Z by the joint variable W_X_Y, after removing
the information that W, X, and Y have about Z individually and jointly in pairs. Thus, 3-way information gain
measures the synergistic predictive value of variables W, X, and Y about variable Z.
Parameters
----------
W: array-like (# samples)
An array of values for which to compute the 3-way information gain
X: array-like (# samples)
An array of values for which to compute the 3-way information gain
Y: array-like (# samples)
An array of values for which to compute the 3-way information gain
Z: array-like (# samples)
An array of outcome values for which to compute the 3-way information gain
base: integer (default: 2)
The base in which to calculate 3-way information
Returns
----------
mutual_information: float
The information gain calculated according to the equation:
IG(W;X;Y;Z) = I(W,X,Y;Z) - IG(W;X;Z) - IG(W;Y;Z) - IG(X;Y;Z) - I(W;Z) - I(X;Z) - I(Y;Z)
"""
W_X_Y = ['{}{}{}'.format(w, x, y) for w, x, y in zip(W, X, Y)]
return (mutual_information(W_X_Y, Z, base=base) -
two_way_information_gain(W, X, Z, base=base) -
two_way_information_gain(W, Y, Z, base=base) -
two_way_information_gain(X, Y, Z, base=base) -
mutual_information(W, Z, base=base) -
mutual_information(X, Z, base=base) -
mutual_information(Y, Z, base=base))
def _mdr_predict(X, Y, labels):
"""Fits a MDR model to variables X and Y with the given labels, then returns the resulting predictions
This is a convenience method that should only be used internally.
Parameters
----------
X: array-like (# samples)
An array of values corresponding to one feature in the MDR model
Y: array-like (# samples)
An array of values corresponding to one feature in the MDR model
labels: array-like (# samples)
The class labels corresponding to features X and Y
Returns
----------
predictions: array-like (# samples)
The predictions from the fitted MDR model
"""
return MDR().fit_predict(np.column_stack((X, Y)), labels)
def mdr_entropy(X, Y, labels, base=2):
"""Calculates the MDR entropy, H(XY), in the given base
MDR entropy is calculated by combining variables X and Y into a single MDR model then calculating
the entropy of the resulting model's predictions.
Parameters
----------
X: array-like (# samples)
An array of values corresponding to one feature in the MDR model
Y: array-like (# samples)
An array of values corresponding to one feature in the MDR model
labels: array-like (# samples)
The class labels corresponding to features X and Y
base: integer (default: 2)
The base in which to calculate MDR entropy
Returns
----------
mdr_entropy: float
The MDR entropy calculated according to the equation H(XY) = -sum(p_xy * log p_xy) for all output states of the MDR model
"""
return entropy(_mdr_predict(X, Y, labels), base=base)
def mdr_conditional_entropy(X, Y, labels, base=2):
"""Calculates the MDR conditional entropy, H(XY|labels), in the given base
MDR conditional entropy is calculated by combining variables X and Y into a single MDR model then calculating
the entropy of the resulting model's predictions conditional on the provided labels.
Parameters
----------
X: array-like (# samples)
An array of values corresponding to one feature in the MDR model
Y: array-like (# samples)
An array of values corresponding to one feature in the MDR model
labels: array-like (# samples)
The class labels corresponding to features X and Y
base: integer (default: 2)
The base in which to calculate MDR conditional entropy
Returns
----------
mdr_conditional_entropy: float
The MDR conditional entropy calculated according to the equation H(XY|labels) = H(XY,labels) - H(labels)
"""
return conditional_entropy(_mdr_predict(X, Y, labels), labels, base=base)
def mdr_mutual_information(X, Y, labels, base=2):
"""Calculates the MDR mutual information, I(XY;labels), in the given base
MDR mutual information is calculated by combining variables X and Y into a single MDR model then calculating
the mutual information between the resulting model's predictions and the labels.
Parameters
----------
X: array-like (# samples)
An array of values corresponding to one feature in the MDR model
Y: array-like (# samples)
An array of values corresponding to one feature in the MDR model
labels: array-like (# samples)
The class labels corresponding to features X and Y
base: integer (default: 2)
The base in which to calculate MDR mutual information
Returns
----------
mdr_mutual_information: float
The MDR mutual information calculated according to the equation I(XY;labels) = H(labels) - H(labels|XY)
"""
return mutual_information(_mdr_predict(X, Y, labels), labels, base=base)
def n_way_models(mdr_instance, X, y, n=[2], feature_names=None):
"""Fits a MDR model to all n-way combinations of the features in X.
Note that this function performs an exhaustive search through all feature combinations and can be computationally expensive.
Parameters
----------
mdr_instance: object
An instance of the MDR type to use.
X: array-like (# rows, # features)
NumPy matrix containing the features
y: array-like (# rows, 1)
NumPy matrix containing the target values
n: list (default: [2])
The maximum size(s) of the MDR model to generate.
e.g., if n == [3], all 3-way models will be generated.
feature_names: list (default: None)
The corresponding names of the features in X.
If None, then the features will be named according to their order.
Returns
----------
(fitted_model, fitted_model_score, fitted_model_features): tuple of (list, list, list)
fitted_model contains the MDR model fitted to the data.
fitted_model_score contains the training scores corresponding to the fitted MDR model.
fitted_model_features contains a list of the names of the features that were used in the corresponding model.
"""
if feature_names is None:
feature_names = list(range(X.shape[1]))
for cur_n in n:
for features in itertools.combinations(range(X.shape[1]), cur_n):
mdr_model = copy.deepcopy(mdr_instance)
mdr_model.fit(X[:, features], y)
mdr_model_score = mdr_model.score(X[:, features], y)
model_features = [feature_names[feature] for feature in features]
yield mdr_model, mdr_model_score, model_features
def plot_mdr_grid(mdr_instance):
"""Visualizes the MDR grid of a given fitted MDR instance. Only works for 2-way MDR models.
This function is currently incomplete.
Parameters
----------
mdr_instance: object
A fitted instance of the MDR type to visualize.
Returns
----------
fig: matplotlib.figure
Figure object for the visualized MDR grid.
"""
var1_levels = list(set([variables[0] for variables in mdr_instance.feature_map]))
var2_levels = list(set([variables[1] for variables in mdr_instance.feature_map]))
max_count = np.array(list(mdr_instance.class_count_matrix.values())).flatten().max()
"""
TODO:
- Add common axis labels
- Make sure this scales for smaller and larger record sizes
- Extend to 3-way+ models, e.g., http://4.bp.blogspot.com/-vgKCjEkWFUc/UPwPuHo6XvI/AAAAAAAAAE0/fORHqDcoikE/s1600/model.jpg
"""
fig, splots = plt.subplots(ncols=len(var1_levels), nrows=len(var2_levels), sharey=True, sharex=True)
fig.set_figwidth(6)
fig.set_figheight(6)
for (var1, var2) in itertools.product(var1_levels, var2_levels):
class_counts = mdr_instance.class_count_matrix[(var1, var2)]
splot = splots[var2_levels.index(var2)][var1_levels.index(var1)]
splot.set_yticks([])
splot.set_xticks([])
splot.set_ylim(0, max_count * 1.5)
splot.set_xlim(-0.5, 1.5)
if var2_levels.index(var2) == 0:
splot.set_title('X1 = {}'.format(var1), fontsize=12)
if var1_levels.index(var1) == 0:
splot.set_ylabel('X2 = {}'.format(var2), fontsize=12)
bars = splot.bar(left=range(class_counts.shape[0]),
height=class_counts, width=0.5,
color='black', align='center')
bgcolor = 'lightgrey' if mdr_instance.feature_map[(var1, var2)] == 0 else 'darkgrey'
splot.set_axis_bgcolor(bgcolor)
for index, bar in enumerate(bars):
splot.text(index, class_counts[index] + (max_count * 0.1), class_counts[index], ha='center')
fig.tight_layout()
return fig
| |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""GQL -- the SQL-like interface to the datastore.
Defines the GQL-based query class, which is a query mechanism
for the datastore which provides an alternative model for interacting with
data stored.
"""
import calendar
import datetime
import logging
import re
import time
from google.appengine.api import datastore
from google.appengine.api import datastore_errors
from google.appengine.api import datastore_types
from google.appengine.api import users
MultiQuery = datastore.MultiQuery
LOG_LEVEL = logging.DEBUG - 1
_EPOCH = datetime.datetime.utcfromtimestamp(0)
def Execute(query_string, *args, **keyword_args):
"""Execute command to parse and run the query.
Calls the query parser code to build a proto-query which is an
unbound query. The proto-query is then bound into a real query and
executed.
Args:
query_string: properly formatted GQL query string.
args: rest of the positional arguments used to bind numeric references in
the query.
keyword_args: dictionary-based arguments (for named parameters).
Returns:
the result of running the query with *args.
"""
app = keyword_args.pop('_app', None)
proto_query = GQL(query_string, _app=app)
return proto_query.Bind(args, keyword_args).Run()
class GQL(object):
"""A GQL interface to the datastore.
GQL is a SQL-like language which supports more object-like semantics
in a langauge that is familiar to SQL users. The language supported by
GQL will change over time, but will start off with fairly simple
semantics.
- reserved words are case insensitive
- names are case sensitive
The syntax for SELECT is fairly straightforward:
SELECT [* | __key__ ] [FROM <entity>]
[WHERE <condition> [AND <condition> ...]]
[ORDER BY <property> [ASC | DESC] [, <property> [ASC | DESC] ...]]
[LIMIT [<offset>,]<count>]
[OFFSET <offset>]
[HINT (ORDER_FIRST | HINT FILTER_FIRST | HINT ANCESTOR_FIRST)]
<condition> := <property> {< | <= | > | >= | = | != | IN} <value>
<condition> := <property> {< | <= | > | >= | = | != | IN} CAST(<value>)
<condition> := <property> IN (<value>, ...)
<condition> := ANCESTOR IS <entity or key>
Currently the parser is LL(1) because of the simplicity of the grammer
(as it is largely predictive with one token lookahead).
The class is implemented using some basic regular expression tokenization
to pull out reserved tokens and then the recursive descent parser will act
as a builder for the pre-compiled query. This pre-compiled query is then
bound to arguments before executing the query.
Initially, three parameter passing mechanisms are supported when calling
Execute():
- Positional parameters
Execute('SELECT * FROM Story WHERE Author = :1 AND Date > :2')
- Named parameters
Execute('SELECT * FROM Story WHERE Author = :author AND Date > :date')
- Literals (numbers, strings, booleans, and NULL)
Execute('SELECT * FROM Story WHERE Author = \'James\'')
Users are also given the option of doing type conversions to other datastore
types (e.g. db.Email, db.GeoPt). The language provides a conversion function
which allows the caller to express conversions of both literals and
parameters. The current conversion operators are:
- GEOPT(float, float)
- USER(str)
- KEY(kind, id/name[, kind, id/name...])
- DATETIME(year, month, day, hour, minute, second)
- DATETIME('YYYY-MM-DD HH:MM:SS')
- DATE(year, month, day)
- DATE('YYYY-MM-DD')
- TIME(hour, minute, second)
- TIME('HH:MM:SS')
We will properly serialize and quote all values.
It should also be noted that there are some caveats to the queries that can
be expressed in the syntax. The parser will attempt to make these clear as
much as possible, but some of the caveats include:
- There is no OR operation. In most cases, you should prefer to use IN to
express the idea of wanting data matching one of a set of values.
- You cannot express inequality operators on multiple different properties
- You can only have one != operator per query (related to the previous
rule).
- The IN and != operators must be used carefully because they can
dramatically raise the amount of work done by the datastore. As such,
there is a limit on the number of elements you can use in IN statements.
This limit is set fairly low. Currently, a max of 30 datastore queries is
allowed in a given GQL query. != translates into 2x the number of
datastore queries, and IN multiplies by the number of elements in the
clause (so having two IN clauses, one with 5 elements, the other with 6
will cause 30 queries to occur).
- Literals can take the form of basic types or as type-cast literals. On
the other hand, literals within lists can currently only take the form of
simple types (strings, integers, floats).
SELECT * will return an iterable set of entities; SELECT __key__ will return
an iterable set of Keys.
"""
TOKENIZE_REGEX = re.compile(r"""
(?:'[^'\n\r]*')+|
<=|>=|!=|=|<|>|
:\w+|
,|
\*|
-?\d+(?:\.\d+)?|
\w+|
\(|\)|
\S+
""", re.VERBOSE | re.IGNORECASE)
MAX_ALLOWABLE_QUERIES = datastore.MAX_ALLOWABLE_QUERIES
__ANCESTOR = -1
def __init__(self, query_string, _app=None, _auth_domain=None):
"""Ctor.
Parses the input query into the class as a pre-compiled query, allowing
for a later call to Bind() to bind arguments as defined in the
documentation.
Args:
query_string: properly formatted GQL query string.
Raises:
datastore_errors.BadQueryError: if the query is not parsable.
"""
self._entity = ''
self.__filters = {}
self.__has_ancestor = False
self.__orderings = []
self.__offset = -1
self.__limit = -1
self.__hint = ''
self.__app = _app
self.__auth_domain = _auth_domain
self.__symbols = self.TOKENIZE_REGEX.findall(query_string)
self.__next_symbol = 0
if not self.__Select():
raise datastore_errors.BadQueryError(
'Unable to parse query')
else:
pass
def Bind(self, args, keyword_args):
"""Bind the existing query to the argument list.
Assumes that the input args are first positional, then a dictionary.
So, if the query contains references to :1, :2 and :name, it is assumed
that arguments are passed as (:1, :2, dict) where dict contains a mapping
[name] -> value.
Args:
args: the arguments to bind to the object's unbound references.
keyword_args: dictionary-based arguments (for named parameters).
Raises:
datastore_errors.BadArgumentError: when arguments are left unbound
(missing from the inputs arguments) or when arguments do not match the
expected type.
Returns:
The bound datastore.Query object. This may take the form of a MultiQuery
object if the GQL query will require multiple backend queries to statisfy.
"""
num_args = len(args)
input_args = frozenset(xrange(num_args))
used_args = set()
queries = []
enumerated_queries = self.EnumerateQueries(used_args, args, keyword_args)
if enumerated_queries:
query_count = len(enumerated_queries)
else:
query_count = 1
for i in xrange(query_count):
queries.append(datastore.Query(self._entity, _app=self.__app,
keys_only=self._keys_only))
logging.log(LOG_LEVEL,
'Binding with %i positional args %s and %i keywords %s'
, len(args), args, len(keyword_args), keyword_args)
for ((identifier, condition), value_list) in self.__filters.iteritems():
for (operator, params) in value_list:
value = self.__Operate(args, keyword_args, used_args, operator, params)
if not self.__IsMultiQuery(condition):
for query in queries:
self.__AddFilterToQuery(identifier, condition, value, query)
unused_args = input_args - used_args
if unused_args:
unused_values = [unused_arg + 1 for unused_arg in unused_args]
raise datastore_errors.BadArgumentError('Unused positional arguments %s' %
unused_values)
if enumerated_queries:
logging.log(LOG_LEVEL,
'Multiple Queries Bound: %s',
enumerated_queries)
for (query, enumerated_query) in zip(queries, enumerated_queries):
query.update(enumerated_query)
if self.__orderings:
for query in queries:
query.Order(*tuple(self.__orderings))
if query_count > 1:
return MultiQuery(queries, self.__orderings)
else:
return queries[0]
def EnumerateQueries(self, used_args, args, keyword_args):
"""Create a list of all multi-query filter combinations required.
To satisfy multi-query requests ("IN" and "!=" filters), multiple queries
may be required. This code will enumerate the power-set of all multi-query
filters.
Args:
used_args: set of used positional parameters (output only variable used in
reporting for unused positional args)
args: positional arguments referenced by the proto-query in self. This
assumes the input is a tuple (and can also be called with a varargs
param).
keyword_args: dict of keyword arguments referenced by the proto-query in
self.
Returns:
A list of maps [(identifier, condition) -> value] of all queries needed
to satisfy the GQL query with the given input arguments.
"""
enumerated_queries = []
for ((identifier, condition), value_list) in self.__filters.iteritems():
for (operator, params) in value_list:
value = self.__Operate(args, keyword_args, used_args, operator, params)
self.__AddMultiQuery(identifier, condition, value, enumerated_queries)
return enumerated_queries
def __CastError(self, operator, values, error_message):
"""Query building error for type cast operations.
Args:
operator: the failed cast operation
values: value list passed to the cast operator
error_message: string to emit as part of the 'Cast Error' string.
Raises:
BadQueryError and passes on an error message from the caller. Will raise
BadQueryError on all calls.
"""
raise datastore_errors.BadQueryError(
'Type Cast Error: unable to cast %r with operation %s (%s)' %
(values, operator.upper(), error_message))
def __CastNop(self, values):
"""Return values[0] if it exists -- default for most where clauses."""
if len(values) != 1:
self.__CastError(values, 'nop', 'requires one and only one value')
else:
return values[0]
def __CastList(self, values):
"""Return the full list of values -- only useful for IN clause."""
if values:
return values
else:
return None
def __CastKey(self, values):
"""Cast input values to Key() class using encoded string or tuple list."""
if not len(values) % 2:
return datastore_types.Key.from_path(_app=self.__app, *values)
elif len(values) == 1 and isinstance(values[0], basestring):
return datastore_types.Key(values[0])
else:
self.__CastError('KEY', values,
'requires an even number of operands '
'or a single encoded string')
def __CastGeoPt(self, values):
"""Cast input to GeoPt() class using 2 input parameters."""
if len(values) != 2:
self.__CastError('GEOPT', values, 'requires 2 input parameters')
return datastore_types.GeoPt(*values)
def __CastUser(self, values):
"""Cast to User() class using the email address in values[0]."""
if len(values) != 1:
self.__CastError('user', values, 'requires one and only one value')
elif values[0] is None:
self.__CastError('user', values, 'must be non-null')
else:
return users.User(email=values[0], _auth_domain=self.__auth_domain)
def __EncodeIfNeeded(self, value):
"""Simple helper function to create an str from possibly unicode strings.
Args:
value: input string (should pass as an instance of str or unicode).
"""
if isinstance(value, unicode):
return value.encode('utf8')
else:
return value
def __CastDate(self, values):
"""Cast DATE values (year/month/day) from input (to datetime.datetime).
Casts DATE input values formulated as ISO string or time tuple inputs.
Args:
values: either a single string with ISO time representation or 3
integer valued date tuple (year, month, day).
Returns:
datetime.datetime value parsed from the input values.
"""
if len(values) == 1:
value = self.__EncodeIfNeeded(values[0])
if isinstance(value, str):
try:
time_tuple = time.strptime(value, '%Y-%m-%d')[0:6]
except ValueError, err:
self.__CastError('DATE', values, err)
else:
self.__CastError('DATE', values, 'Single input value not a string')
elif len(values) == 3:
time_tuple = (values[0], values[1], values[2], 0, 0, 0)
else:
self.__CastError('DATE', values,
'function takes 1 string or 3 integer values')
try:
return datetime.datetime(*time_tuple)
except ValueError, err:
self.__CastError('DATE', values, err)
def __CastTime(self, values):
"""Cast TIME values (hour/min/sec) from input (to datetime.datetime).
Casts TIME input values formulated as ISO string or time tuple inputs.
Args:
values: either a single string with ISO time representation or 1-4
integer valued time tuple (hour), (hour, minute),
(hour, minute, second), (hour, minute, second, microsec).
Returns:
datetime.datetime value parsed from the input values.
"""
if len(values) == 1:
value = self.__EncodeIfNeeded(values[0])
if isinstance(value, str):
try:
time_tuple = time.strptime(value, '%H:%M:%S')
except ValueError, err:
self.__CastError('TIME', values, err)
time_tuple = (1970, 1, 1) + time_tuple[3:]
time_tuple = time_tuple[0:6]
elif isinstance(value, int):
time_tuple = (1970, 1, 1, value)
else:
self.__CastError('TIME', values,
'Single input value not a string or integer hour')
elif len(values) <= 4:
time_tuple = (1970, 1, 1) + tuple(values)
else:
self.__CastError('TIME', values,
'function takes 1 to 4 integers or 1 string')
try:
return datetime.datetime(*time_tuple)
except ValueError, err:
self.__CastError('TIME', values, err)
def __CastDatetime(self, values):
"""Cast DATETIME values (string or tuple) from input (to datetime.datetime).
Casts DATETIME input values formulated as ISO string or datetime tuple
inputs.
Args:
values: either a single string with ISO representation or 3-7
integer valued time tuple (year, month, day, ...).
Returns:
datetime.datetime value parsed from the input values.
"""
if len(values) == 1:
value = self.__EncodeIfNeeded(values[0])
if isinstance(value, str):
try:
time_tuple = time.strptime(str(value), '%Y-%m-%d %H:%M:%S')[0:6]
except ValueError, err:
self.__CastError('DATETIME', values, err)
else:
self.__CastError('DATETIME', values, 'Single input value not a string')
else:
time_tuple = values
try:
return datetime.datetime(*time_tuple)
except ValueError, err:
self.__CastError('DATETIME', values, err)
def __Operate(self, args, keyword_args, used_args, operator, params):
"""Create a single output value from params using the operator string given.
Args:
args,keyword_args: arguments passed in for binding purposes (used in
binding positional and keyword based arguments).
used_args: set of numeric arguments accessed in this call.
values are ints representing used zero-based positional arguments.
used as an output parameter with new used arguments appended to the
list.
operator: string representing the operator to use 'nop' just returns
the first value from params.
params: parameter list to operate on (positional references, named
references, or literals).
Returns:
A value which can be used as part of a GQL filter description (either a
list of datastore types -- for use with IN, or a single datastore type --
for use with other filters).
"""
if not params:
return None
param_values = []
for param in params:
if isinstance(param, Literal):
value = param.Get()
else:
value = self.__GetParam(param, args, keyword_args)
if isinstance(param, int):
used_args.add(param - 1)
logging.log(LOG_LEVEL, 'found param for bind: %s value: %s',
param, value)
param_values.append(value)
logging.log(LOG_LEVEL, '%s Operating on values: %s',
operator, repr(param_values))
if operator in self.__cast_operators:
result = self.__cast_operators[operator](self, param_values)
else:
self.__Error('Operation %s is invalid' % operator)
return result
def __IsMultiQuery(self, condition):
"""Return whether or not this condition could require multiple queries."""
return condition.lower() in ('in', '!=')
def __GetParam(self, reference, args, keyword_args):
"""Get the specified parameter from the input arguments.
Args:
reference: id for a filter reference in the filter list (string or
number)
args: positional args passed in by the user (tuple of arguments, indexed
numerically by "reference")
keyword_args: dict of keyword based arguments (strings in "reference")
Returns:
The specified param from the input list.
Raises:
BadArgumentError if the referenced argument doesn't exist.
"""
num_args = len(args)
if isinstance(reference, int):
if reference <= num_args:
return args[reference - 1]
else:
raise datastore_errors.BadArgumentError(
'Missing argument for bind, requires argument #%i, '
'but only has %i args.' % (reference, num_args))
elif isinstance(reference, basestring):
if reference in keyword_args:
return keyword_args[reference]
else:
raise datastore_errors.BadArgumentError(
'Missing named arguments for bind, requires argument %s' %
reference)
else:
assert False, 'Unknown reference %s' % reference
def __AddMultiQuery(self, identifier, condition, value, enumerated_queries):
"""Helper function to add a muti-query to previously enumerated queries.
Args:
identifier: property being filtered by this condition
condition: filter condition (e.g. !=,in)
value: value being bound
enumerated_queries: in/out list of already bound queries -> expanded list
with the full enumeration required to satisfy the condition query
Raises:
BadArgumentError if the filter is invalid (namely non-list with IN)
"""
if condition.lower() in ('!=', 'in') and self._keys_only:
raise datastore_errors.BadQueryError(
'Keys only queries do not support IN or != filters.')
def CloneQueries(queries, n):
"""Do a full copy of the queries and append to the end of the queries.
Does an in-place replication of the input list and sorts the result to
put copies next to one-another.
Args:
queries: list of all filters to clone
n: number of copies to make
Returns:
Number of iterations needed to fill the structure
"""
if not enumerated_queries:
for i in xrange(n):
queries.append({})
return 1
else:
old_size = len(queries)
tmp_queries = []
for i in xrange(n - 1):
[tmp_queries.append(filter_map.copy()) for filter_map in queries]
queries.extend(tmp_queries)
queries.sort()
return old_size
if condition == '!=':
if len(enumerated_queries) * 2 > self.MAX_ALLOWABLE_QUERIES:
raise datastore_errors.BadArgumentError(
'Cannot satisfy query -- too many IN/!= values.')
num_iterations = CloneQueries(enumerated_queries, 2)
for i in xrange(num_iterations):
enumerated_queries[2 * i]['%s <' % identifier] = value
enumerated_queries[2 * i + 1]['%s >' % identifier] = value
elif condition.lower() == 'in':
if not isinstance(value, list):
raise datastore_errors.BadArgumentError('List expected for "IN" filter')
in_list_size = len(value)
if len(enumerated_queries) * in_list_size > self.MAX_ALLOWABLE_QUERIES:
raise datastore_errors.BadArgumentError(
'Cannot satisfy query -- too many IN/!= values.')
num_iterations = CloneQueries(enumerated_queries, in_list_size)
for clone_num in xrange(num_iterations):
for value_num in xrange(len(value)):
list_val = value[value_num]
query_num = in_list_size * clone_num + value_num
filt = '%s =' % identifier
enumerated_queries[query_num][filt] = list_val
def __AddFilterToQuery(self, identifier, condition, value, query):
"""Add a filter condition to a query based on the inputs.
Args:
identifier: name of the property (or self.__ANCESTOR for ancestors)
condition: test condition
value: test value passed from the caller
query: query to add the filter to
"""
if identifier != self.__ANCESTOR:
filter_condition = '%s %s' % (identifier, condition)
logging.log(LOG_LEVEL, 'Setting filter on "%s" with value "%s"',
filter_condition, value.__class__)
datastore._AddOrAppend(query, filter_condition, value)
else:
logging.log(LOG_LEVEL, 'Setting ancestor query for ancestor %s', value)
query.Ancestor(value)
def Run(self, *args, **keyword_args):
"""Runs this query.
Similar to datastore.Query.Run.
Assumes that limit == -1 or > 0
Args:
args: arguments used to bind to references in the compiled query object.
keyword_args: dictionary-based arguments (for named parameters).
Returns:
A list of results if a query count limit was passed.
A result iterator if no limit was given.
"""
bind_results = self.Bind(args, keyword_args)
offset = 0
if self.__offset != -1:
offset = self.__offset
if self.__limit == -1:
it = bind_results.Run()
try:
for i in xrange(offset):
it.next()
except StopIteration:
pass
return it
else:
res = bind_results.Get(self.__limit, offset)
return res
def filters(self):
"""Return the compiled list of filters."""
return self.__filters
def hint(self):
"""Return the datastore hint."""
return self.__hint
def limit(self):
"""Return numerical result count limit."""
return self.__limit
def orderings(self):
"""Return the result ordering list."""
return self.__orderings
def is_keys_only(self):
"""Returns True if this query returns Keys, False if it returns Entities."""
return self._keys_only
__iter__ = Run
__result_type_regex = re.compile(r'(\*|__key__)')
__quoted_string_regex = re.compile(r'((?:\'[^\'\n\r]*\')+)')
__ordinal_regex = re.compile(r':(\d+)$')
__named_regex = re.compile(r':(\w+)$')
__identifier_regex = re.compile(r'(\w+)$')
__conditions_regex = re.compile(r'(<=|>=|!=|=|<|>|is|in)$', re.IGNORECASE)
__number_regex = re.compile(r'(\d+)$')
__cast_regex = re.compile(
r'(geopt|user|key|date|time|datetime)$', re.IGNORECASE)
__cast_operators = {
'geopt': __CastGeoPt,
'user': __CastUser,
'key': __CastKey,
'datetime': __CastDatetime,
'date': __CastDate,
'time': __CastTime,
'list': __CastList,
'nop': __CastNop,
}
def __Error(self, error_message):
"""Generic query error.
Args:
error_message: string to emit as part of the 'Parse Error' string.
Raises:
BadQueryError and passes on an error message from the caller. Will raise
BadQueryError on all calls to __Error()
"""
if self.__next_symbol >= len(self.__symbols):
raise datastore_errors.BadQueryError(
'Parse Error: %s at end of string' % error_message)
else:
raise datastore_errors.BadQueryError(
'Parse Error: %s at symbol %s' %
(error_message, self.__symbols[self.__next_symbol]))
def __Accept(self, symbol_string):
"""Advance the symbol and return true iff the next symbol matches input."""
if self.__next_symbol < len(self.__symbols):
logging.log(LOG_LEVEL, '\t%s', self.__symbols)
logging.log(LOG_LEVEL, '\tExpect: %s Got: %s',
symbol_string, self.__symbols[self.__next_symbol].upper())
if self.__symbols[self.__next_symbol].upper() == symbol_string:
self.__next_symbol += 1
return True
return False
def __Expect(self, symbol_string):
"""Require that the next symbol matches symbol_string, or emit an error.
Args:
symbol_string: next symbol expected by the caller
Raises:
BadQueryError if the next symbol doesn't match the parameter passed in.
"""
if not self.__Accept(symbol_string):
self.__Error('Unexpected Symbol: %s' % symbol_string)
def __AcceptRegex(self, regex):
"""Advance and return the symbol if the next symbol matches the regex.
Args:
regex: the compiled regular expression to attempt acceptance on.
Returns:
The first group in the expression to allow for convenient access
to simple matches. Requires () around some objects in the regex.
None if no match is found.
"""
if self.__next_symbol < len(self.__symbols):
match_symbol = self.__symbols[self.__next_symbol]
logging.log(LOG_LEVEL, '\taccept %s on symbol %s', regex, match_symbol)
match = regex.match(match_symbol)
if match:
self.__next_symbol += 1
if match.groups():
matched_string = match.group(1)
logging.log(LOG_LEVEL, '\taccepted %s', matched_string)
return matched_string
return None
def __AcceptTerminal(self):
"""Only accept an empty string.
Returns:
True
Raises:
BadQueryError if there are unconsumed symbols in the query.
"""
if self.__next_symbol < len(self.__symbols):
self.__Error('Expected no additional symbols')
return True
def __Select(self):
"""Consume the SELECT clause and everything that follows it.
Assumes SELECT * to start.
Transitions to a FROM clause.
Returns:
True if parsing completed okay.
"""
self.__Expect('SELECT')
result_type = self.__AcceptRegex(self.__result_type_regex)
self._keys_only = (result_type == '__key__')
return self.__From()
def __From(self):
"""Consume the FROM clause.
Assumes a single well formed entity in the clause.
Assumes FROM <Entity Name>
Transitions to a WHERE clause.
Returns:
True if parsing completed okay.
"""
if self.__Accept('FROM'):
kind = self.__AcceptRegex(self.__identifier_regex)
if kind:
self._entity = kind
else:
self.__Error('Identifier Expected')
return False
else:
self._entity = None
return self.__Where()
def __Where(self):
"""Consume the WHERE cluase.
These can have some recursion because of the AND symbol.
Returns:
True if parsing the WHERE clause completed correctly, as well as all
subsequent clauses
"""
if self.__Accept('WHERE'):
return self.__FilterList()
return self.__OrderBy()
def __FilterList(self):
"""Consume the filter list (remainder of the WHERE clause)."""
identifier = self.__AcceptRegex(self.__identifier_regex)
if not identifier:
self.__Error('Invalid WHERE Identifier')
return False
condition = self.__AcceptRegex(self.__conditions_regex)
if not condition:
self.__Error('Invalid WHERE Condition')
return False
self.__CheckFilterSyntax(identifier, condition)
if not self.__AddSimpleFilter(identifier, condition, self.__Reference()):
if not self.__AddSimpleFilter(identifier, condition, self.__Literal()):
type_cast = self.__TypeCast()
if (not type_cast or
not self.__AddProcessedParameterFilter(identifier, condition,
*type_cast)):
self.__Error('Invalid WHERE condition')
if self.__Accept('AND'):
return self.__FilterList()
return self.__OrderBy()
def __GetValueList(self):
"""Read in a list of parameters from the tokens and return the list.
Reads in a set of tokens, but currently only accepts literals, positional
parameters, or named parameters. Or empty list if nothing was parsed.
Returns:
A list of values parsed from the input, with values taking the form of
strings (unbound, named reference), integers (unbound, positional
reference), or Literal() (bound value usable directly as part of a filter
with no additional information).
"""
params = []
while True:
reference = self.__Reference()
if reference:
params.append(reference)
else:
literal = self.__Literal()
if literal:
params.append(literal)
else:
self.__Error('Parameter list requires literal or reference parameter')
if not self.__Accept(','):
break
return params
def __CheckFilterSyntax(self, identifier, condition):
"""Check that filter conditions are valid and throw errors if not.
Args:
identifier: identifier being used in comparison
condition: string form of the comparison operator used in the filter
"""
if identifier.lower() == 'ancestor':
if condition.lower() == 'is':
if self.__has_ancestor:
self.__Error('Only one ANCESTOR IS" clause allowed')
else:
self.__Error('"IS" expected to follow "ANCESTOR"')
elif condition.lower() == 'is':
self.__Error('"IS" can only be used when comparing against "ANCESTOR"')
def __AddProcessedParameterFilter(self, identifier, condition,
operator, parameters):
"""Add a filter with post-processing required.
Args:
identifier: property being compared.
condition: comparison operation being used with the property (e.g. !=).
operator: operation to perform on the parameters before adding the filter.
parameters: list of bound parameters passed to 'operator' before creating
the filter. When using the parameters as a pass-through, pass 'nop'
into the operator field and the first value will be used unprocessed).
Returns:
True if the filter was okay to add.
"""
if parameters is None:
return False
if parameters[0] is None:
return False
logging.log(LOG_LEVEL, 'Adding Filter %s %s %s',
identifier, condition, repr(parameters))
filter_rule = (identifier, condition)
if identifier.lower() == 'ancestor':
self.__has_ancestor = True
filter_rule = (self.__ANCESTOR, 'is')
assert condition.lower() == 'is'
if operator == 'list' and condition.lower() != 'in':
self.__Error('Only IN can process a list of values')
self.__filters.setdefault(filter_rule, []).append((operator, parameters))
return True
def __AddSimpleFilter(self, identifier, condition, parameter):
"""Add a filter to the query being built (no post-processing on parameter).
Args:
identifier: identifier being used in comparison
condition: string form of the comparison operator used in the filter
parameter: ID of the reference being made or a value of type Literal
Returns:
True if the filter could be added.
False otherwise.
"""
return self.__AddProcessedParameterFilter(identifier, condition,
'nop', [parameter])
def __Reference(self):
"""Consume a parameter reference and return it.
Consumes a reference to a positional parameter (:1) or a named parameter
(:email). Only consumes a single reference (not lists).
Returns:
The name of the reference (integer for positional parameters or string
for named parameters) to a bind-time parameter.
"""
logging.log(LOG_LEVEL, 'Try Reference')
reference = self.__AcceptRegex(self.__ordinal_regex)
if reference:
return int(reference)
else:
reference = self.__AcceptRegex(self.__named_regex)
if reference:
return reference
return None
def __Literal(self):
"""Parse literals from our token list.
Returns:
The parsed literal from the input string (currently either a string,
integer, or floating point value).
"""
logging.log(LOG_LEVEL, 'Try Literal')
literal = None
try:
literal = int(self.__symbols[self.__next_symbol])
except ValueError:
pass
else:
self.__next_symbol += 1
if literal is None:
try:
literal = float(self.__symbols[self.__next_symbol])
except ValueError:
pass
else:
self.__next_symbol += 1
if literal is None:
literal = self.__AcceptRegex(self.__quoted_string_regex)
if literal:
literal = literal[1:-1].replace("''", "'")
if literal is None:
if self.__Accept('TRUE'):
literal = True
elif self.__Accept('FALSE'):
literal = False
if literal is not None:
return Literal(literal)
if self.__Accept('NULL'):
return Literal(None)
else:
return None
def __TypeCast(self):
"""Check if the next operation is a type-cast and return the cast if so.
Casting operators look like simple function calls on their parameters. This
code returns the cast operator found and the list of parameters provided by
the user to complete the cast operation.
Returns:
A tuple (cast operator, params) which represents the cast operation
requested and the parameters parsed from the cast clause.
None - if there is no TypeCast function.
"""
logging.log(LOG_LEVEL, 'Try Type Cast')
cast_op = self.__AcceptRegex(self.__cast_regex)
if not cast_op:
if self.__Accept('('):
cast_op = 'list'
else:
return None
else:
cast_op = cast_op.lower()
self.__Expect('(')
params = self.__GetValueList()
self.__Expect(')')
logging.log(LOG_LEVEL, 'Got casting operator %s with params %s',
cast_op, repr(params))
return (cast_op, params)
def __OrderBy(self):
"""Consume the ORDER BY clause."""
if self.__Accept('ORDER'):
self.__Expect('BY')
return self.__OrderList()
return self.__Limit()
def __OrderList(self):
"""Consume variables and sort order for ORDER BY clause."""
identifier = self.__AcceptRegex(self.__identifier_regex)
if identifier:
if self.__Accept('DESC'):
self.__orderings.append((identifier, datastore.Query.DESCENDING))
elif self.__Accept('ASC'):
self.__orderings.append((identifier, datastore.Query.ASCENDING))
else:
self.__orderings.append((identifier, datastore.Query.ASCENDING))
else:
self.__Error('Invalid ORDER BY Property')
logging.log(LOG_LEVEL, self.__orderings)
if self.__Accept(','):
return self.__OrderList()
return self.__Limit()
def __Limit(self):
"""Consume the LIMIT clause."""
if self.__Accept('LIMIT'):
maybe_limit = self.__AcceptRegex(self.__number_regex)
if maybe_limit:
if self.__Accept(','):
self.__offset = int(maybe_limit)
if self.__offset < 0:
self.__Error('Bad offset in LIMIT Value')
else:
logging.log(LOG_LEVEL, 'Set offset to %i', self.__offset)
maybe_limit = self.__AcceptRegex(self.__number_regex)
self.__limit = int(maybe_limit)
if self.__limit < 1:
self.__Error('Bad Limit in LIMIT Value')
else:
logging.log(LOG_LEVEL, 'Set limit to %i', self.__limit)
else:
self.__Error('Non-number limit in LIMIT clause')
return self.__Offset()
def __Offset(self):
"""Consume the OFFSET clause."""
if self.__Accept('OFFSET'):
if self.__offset != -1:
self.__Error('Offset already defined in LIMIT clause')
offset = self.__AcceptRegex(self.__number_regex)
if offset:
self.__offset = int(offset)
if self.__offset < 0:
self.__Error('Bad offset in OFFSET clause')
else:
logging.log(LOG_LEVEL, 'Set offset to %i', self.__offset)
else:
self.__Error('Non-number offset in OFFSET clause')
return self.__Hint()
def __Hint(self):
"""Consume the HINT clause.
Requires one of three options (mirroring the rest of the datastore):
HINT ORDER_FIRST
HINT ANCESTOR_FIRST
HINT FILTER_FIRST
Returns:
True if the hint clause and later clauses all parsed okay
"""
if self.__Accept('HINT'):
if self.__Accept('ORDER_FIRST'):
self.__hint = 'ORDER_FIRST'
elif self.__Accept('FILTER_FIRST'):
self.__hint = 'FILTER_FIRST'
elif self.__Accept('ANCESTOR_FIRST'):
self.__hint = 'ANCESTOR_FIRST'
else:
self.__Error('Unknown HINT')
return False
return self.__AcceptTerminal()
class Literal(object):
"""Class for representing literal values in a way unique from unbound params.
This is a simple wrapper class around basic types and datastore types.
"""
def __init__(self, value):
self.__value = value
def Get(self):
"""Return the value of the literal."""
return self.__value
def __repr__(self):
return 'Literal(%s)' % repr(self.__value)
| |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Messages used in the models."""
__author__ = 'Boris Roussev (borislavr@google.com)'
from common import safe_dom
DRAFT_TEXT = 'Private'
PUBLISHED_TEXT = 'Public'
PEER_MATCHER_NAME = 'Peer'
LESSON_TITLE_DESCRIPTION = """
The lesson title is displayed to students on the unit page.
"""
LESSON_PARENT_UNIT_DESCRIPTION = """
This lesson is part of this unit.
"""
LESSON_VIDEO_ID_DESCRIPTION = """
Provide a YouTube video ID to embed a video.
"""
LESSON_SCORED_DESCRIPTION = """
If this is set to "Questions are scored", the questions in this lesson will
be scored (summative). Otherwise, they will only provide textual feedback
(formative).
"""
LESSON_TEXT_VERSION_URL_DESCRIPTION = """
This is the URL to the text version of this lesson's content. If present, it is
accessed by clicking on the "Text Version" button on the lesson page. Links to
other sites must start with "http" or "https".
"""
LESSON_AUTO_NUMBER_DESCRIPTION = """
If checked, this lesson will be numbered in sequence in the list of lessons
in this unit.
"""
LESSON_ACTIVITY_TITLE_DESCRIPTION = """
This appears above your activity.
"""
LESSON_ACTIVITY_LISTED_DESCRIPTION = """
Whether the activity should be viewable as a stand-alone item in the unit index.
"""
LESSON_ACTIVITY_DESCRIPTION = """
Note: Activities defined in the "Activity" area are deprecated, please use the
"Lesson Body" area instead. Old-style activities are automatically
converted during "Import Course".
"""
LESSON_REQUIRE_MANUAL_COMPLETION_DESCRIPTION = """
If checked, students must click a completion button for this lesson to
be marked as completed. Does not apply to lessons with questions.
"""
LESSON_AVAILABILITY_DESCRIPTION = """
If this lesson is "%s", only admins can see it. If it is "%s", then anyone
who has access to the course can see it.
""" % (DRAFT_TEXT, PUBLISHED_TEXT)
QUESTION_DESCRIPTION = 'This is the description of this question.'
INCORRECT_ANSWER_FEEDBACK = """
Shown when the student response does not match any of the possible answers.
"""
INPUT_FIELD_HEIGHT_DESCRIPTION = """
Height of the input field, measured in rows.
"""
INPUT_FIELD_WIDTH_DESCRIPTION = """
Width of the input field, measured in columns.
"""
LINK_TITLE_DESCRIPTION = """
The link title is displayed to students on the syllabus page.
"""
LINK_DESCRIPTION_DESCRIPTION = """
The link description is displayed to students on the syllabus page.
"""
LINK_AVAILABILITY_DESCRIPTION = """
If this link is "%s", only admins can see it. If it is "%s", then
anyone who has access to the course can see it.
""" % (DRAFT_TEXT, PUBLISHED_TEXT)
LINK_SYLLABUS_VISIBILITY_DESCRIPTION = """
If this link is "%s", this controls whether or not its title is still
shown to students on the syllabus page.
""" % DRAFT_TEXT
LINK_URL_DESCRIPTION = """
This is the URL to which this link goes. Links to other sites must start
with "http" or "https".
"""
ASSESSMENT_TITLE_DESCRIPTION = """
The assessment title is displayed to students on the syllabus page.
"""
ASSESSMENT_DESCRIPTION_DESCRIPTION = """
The assessment description is displayed to students on the syllabus page.
"""
ASSESSMENT_AVAILABILITY_DESCRIPTION = """
If this assessment is "%s", only admins can see it. If it is "%s",
then anyone who has access to the course can see it.
""" % (DRAFT_TEXT, PUBLISHED_TEXT)
ASSESSMENT_SYLLABUS_VISIBILITY_DESCRIPTION = """
If this assessment is "%s", this controls whether or not its title is
still shown to students on the syllabus page.
""" % DRAFT_TEXT
ASSESSMENT_CONTENT_DESCRIPTION = "Assessment questions and answers."
ASSESSMENT_CONTENT_JAVASCRIPT_DESCRIPTION = (
"%s (JavaScript format)." % ASSESSMENT_CONTENT_DESCRIPTION)
ASSESSMENT_POINTS_DESCRIPTION = """
This is the number of points to assign to this assessment.
"""
ASSESSMENT_SHOW_CORRECT_ANSWER_DESCRIPTION = """
If checked, students will see "Check Answers" buttons which indicate if the
correct answer is given when pressed.
"""
ASSESSMENT_SINGLE_SUBMISSION_DESCRIPTION = """
If checked, students may only submit their answers once.
"""
ASSESSMENT_DUE_DATE_FORMAT_DESCRIPTION = """
If specified, students will not be able to submit answers after this date.
"""
ASSESSMENT_SHOW_FEEDBACK_DESCRIPTION = """Show students their total score and
the feedback for their answers after the due date is passed. If no due date is
set, this has no effect.
"""
ASSESSMENT_FAILING_TEXT = """
This text is shown to a student upon receiving a failing result on the final
assessment. Use "%s%%" to insert the student's score.
"""
ASSESSMENT_PASSING_TEXT = """
This text is shown to a student upon receiving a passing result on the final
assessment. Use "%s%%" to insert the student's score.
"""
HOMEPAGE_PRIVACY_URL_DESCRIPTION = """
This link to your terms of service and privacy policy is displayed in the
footer of every page. If blank, the link will be omitted. Links to other
sites must start with "http" or "https".
"""
HOMEPAGE_TITLE_DESCRIPTION = """
The course title is the name of the course.
"""
HOMEPAGE_ABSTRACT_DESCRIPTION = """
The course abstract is displayed to students on the course homepage and
should describe the course.
"""
HOMEPAGE_INSTRUCTOR_DETAILS_DESCRIPTION = """
Link to the course image displayed to students on the courses page (aspect ratio of 2:1).
"""
HOMEPAGE_SHOW_GPLUS_BUTTON_DESCRIPTION = """
If checked, a G+ button will be displayed in the header of all pages.
"""
ASSESSMENT_GRADING_METHOD_DESCRIPTION = """
If this is set to "Peer review", this assessment will use the Peer Review
module. Otherwise, it will be graded automatically.
"""
ASSESSMENT_DETAILS_DESCRIPTION = """
Properties and restrictions of your assessment.
"""
ASSESSMENT_REVIEWER_FEEDBACK_FORM_DESCRIPTION = """
Review form questions and answers (JavaScript format).
"""
ASSESSMENT_REVIEWER_FEEDBACK_FORM_HTML_DESCRIPTION = """
Add the content that reviewers of a Peer Review assignment see.
"""
ASSESSMENT_REVIEW_DUE_DATE_FORMAT_DESCRIPTION = """
Reviews must be completed by this date. Must be after the actual assessment due
date.
"""
ASSESSMENT_REVIEW_MIN_COUNT_DESCRIPTION = """
This is the minimum number of reviews a student must complete to get credit for
the assessment.
"""
ASSESSMENT_REVIEW_TIMEOUT_IN_MINUTES = """
How long a reviewer has to review an assignment once the reviewer accepts the
assignment. This value should be specified in minutes.
"""
UNIT_TITLE_DESCRIPTION = """
The unit title is displayed to students on the syllabus page.
"""
UNIT_DESCRIPTION_DESCRIPTION = """
The unit description is displayed to students on the syllabus page.
"""
UNIT_PRE_ASSESSMENT_DESCRIPTION = """
This assessment is given to students at the start of this unit.
"""
UNIT_POST_ASSESSMENT_DESCRIPTION = """
This assessment is given to students at the end of this unit.
"""
UNIT_SHOW_ON_ONE_PAGE_DESCRIPTION = """
If checked, all assessments, lessons, and activties in this unit are shown on
one page. Otherwise, each is shown on its own page.
"""
UNIT_ALLOW_MANUAL_COMPLETION_DESCRIPTION = """
If checked, students will be able to click a completion button to mark the
unit as completed.
"""
UNIT_HIDE_ASSESSMENT_NAV = """
If checked, the "Previous Page" and "Next Page" buttons will be omitted from
pre- and post-assessments within units.
"""
UNIT_HIDE_LESSON_NAV = """
If checked, the "Previous Page" and "Next Page" buttons will be omitted from
lesson and activity pages.
"""
UNIT_HIDE_UNIT_NUMBERS = """
If checked, numbers will be omitted when displaying unit titles.
"""
UNIT_SHOW_UNIT_LINK = """
If checked, unit links will be displayed in the side navigation bar.
"""
COURSE_ADMIN_EMAILS_DESCRIPTION = """
This list of email addresses represents the administrators for this course.
Separate addresses with a comma, space, or newline.
"""
COURSE_GOOGLE_ANALYTICS_ID_DESCRIPTION = """
This ID is used to add Google Analytics functionality to this course.
"""
COURSE_GOOGLE_TAG_MANAGER_ID_DESCRIPTION = """
This ID is used to add Google Tag Manager functionality to this course.
"""
COURSE_GOOGLE_API_KEY_DESCRIPTION = """
The Google API Key is required to enable certain functionality.
"""
COURSE_GOOGLE_CLIENT_ID_DESCRIPTION = """
The Google Client ID is required to enable certain functionality.
"""
COURSE_GOOGLE_CLIENT_SECRET_DESCRIPTION = """
The Google Client Secret is required to enable certain functionality.
"""
UNIT_HEADER_DESCRIPTION = """
This content appears at the top of the unit page.
"""
UNIT_FOOTER_DESCRIPTION = """
This content appears at the bottom of the unit page.
"""
SHORT_ANSWER_SCORE_DESCRIPTION = """
Points a student receives for answering this question correctly. 1.0 indicates
full credit.
"""
SHORT_ANSWER_TYPE_DESCRIPTION = """
This indicates the type of answer.
"""
SHORT_ANSWER_ANSWER_DESCRIPTION = """
The correct answer for this question.
"""
SHORT_ANSWER_FEEDBACK_DESCRIPTION = """
Shown when the student response does not match any of the possible answers.
"""
SHORT_ANSWER_DESCRIPTION_DESCRIPTION = """
This is the description of this question.
"""
SHORT_ANSWER_HINT_DESCRIPTION = """
This provides a hint to the answer.
"""
MULTIPLE_CHOICE_FEEDBACK_DESCRIPTION = """
This provides feedback to the student for this entire question.
"""
MULTIPLE_CHOICE_CHOICE_FEEDBACK_DESCRIPTION = """
This text provides feedback to the student for this particular answer choice.
"""
MULTIPLE_CHOICE_RANDOMIZE_CHOICES_DESCRIPTION = """
If checked, the answer choices will be presented to each student in a random
order.
"""
TRANSLATIONS_BASE_LANGUAGE = """
This is the base language of the course; other languages represent translations
of the default content in this language.
"""
TRANSLATIONS_OTHER_LANGUAGES = """
The course is available in the languages listed here which are marked as
available.
"""
TRANSLATIONS_PREVENT_EDITS = """
If checked, translations cannot be edited. This can be set to prevent accidental
or undesired edits to translated content.
"""
TRANSLATIONS_SHOW_LANGUAGE_PICKER = """
If checked, students can select among the available languages at any time via a
language picker. Otherwise, the desire language must be assigned during
registration.
"""
REGISTRATION_EMAIL_BODY = """
This is the body for welcome emails. Use the string {{student_name}} to include
the name of the student and {{course_title}} to include the course title. To
avoid spamming, you should always include {{unsubscribe_url}} in your message to
add a link which the recipient can use to unsubscribe from future mailings.
"""
REGISTRATION_EMAIL_SENDER = """
This is the "from" email address for welcome emails. It must be set to a valid
value for App Engine email.
"""
REGISTRATION_EMAIL_SUBJECT = """
This is the subject line for welcome emails. Use the string {{student_name}} to
include the name of the student and {{course_title}} to include the course
title.
"""
REGISTRATION_INTRODUCTION = """
This introduction text is shown to students at the top of the registration page.
"""
REGISTRATION_REGISTRATION_FORM = """
This text or question is shown below the default registration question.
"""
REGISTRATION_SEND_WELCOME_EMAIL = """
If checked, welcome emails will be sent when new students register for the
course. You must also leave notifications and unsubscribe modules active
(this is the default). An email sender must also be specified.
Do not enable if you expect more than 25 registrations per day.
"""
ROLES_PERMISSION_ALL_LOCALES_DESCRIPTION = """
Can pick all languages, including unavailable ones.
"""
ROLES_PERMISSION_SEE_DRAFTS_DESCRIPTION = """
Can see lessons and assessments with draft status.
"""
SITE_SETTINGS_AGGREGATE_COUNTERS = """
If "True", counter values are aggregated across all frontend application
instances and recorded in memcache. This slightly increases latency of all
requests, but improves the quality of performance metrics. Otherwise, you will
only see counter values for the one frontend instance you are connected to right
now.
"""
SITE_SETTINGS_CACHE_CONTENT = """
If "True", course content is cached. During course development you should turn
this setting to "False" so you can see your changes instantaneously. Otherwise,
keep this setting at "True" to maximize performance.
"""
SITE_SETTINGS_COURSE_URLS = safe_dom.NodeList().append(
safe_dom.Element('div').add_text("""
Specify the URLs for your course(s). Specify only one course per line.""")
).append(safe_dom.Element('br')).append(
safe_dom.Element('span').add_text("""
The syntax has four parts, separated by colons (':'). The four parts are:""")
).append(
safe_dom.Element('ol').add_child(
safe_dom.Element('li').add_text(
'The word \'course\', which is a required element.')
).add_child(
safe_dom.Element('li').add_text("""
A unique course URL prefix. Examples could be '/cs101' or '/art'.
Default: '/'""")
).add_child(
safe_dom.Element('li').add_text("""
A file system location of course asset files. If location is left empty,
the course assets are stored in a datastore instead of the file system. A course
with assets in a datastore can be edited online. A course with assets on file
system must be re-deployed to Google App Engine manually.""")
).add_child(
safe_dom.Element('li').add_text("""
A course datastore namespace where course data is stored in App Engine.
Note: this value cannot be changed after the course is created."""))
).append(
safe_dom.Text(
'For example, consider the following two course entries:')
).append(safe_dom.Element('br')).append(
safe_dom.Element('div', className='gcb-message').add_text(
'course:/cs101::ns_cs101'
).add_child(
safe_dom.Element('br')
).add_text('course:/:/')
).append(
safe_dom.Element('div').add_text("""
Assuming you are hosting Course Builder on http://www.example.com, the first
entry defines a course on a http://www.example.com/cs101 and both its assets
and student data are stored in the datastore namespace 'ns_cs101'. The second
entry defines a course hosted on http://www.example.com/, with its assets
stored in the '/' folder of the installation and its data stored in the default
empty datastore namespace.""")
).append(safe_dom.Element('br')).append(
safe_dom.Element('div').add_text("""
A line that starts with '#' is ignored. Course entries are applied in the
order they are defined.""")
)
SITE_SETTINGS_GOOGLE_APIS = """
If "True", courses can use Google APIs. You must still configure the relevant
APIs in the Cloud Console to successfully make API calls.
"""
SITE_SETTINGS_MEMCACHE = """
If "True", various objects are cached in memcache. During course development you
should turn this setting to "False" so you can see your changes instantaneously.
Otherwise, keep this setting at "True" to maximize performance.
"""
SITE_SETTINGS_QUEUE_NOTIFICATION = safe_dom.NodeList().append(
safe_dom.Element('div').add_text("""
Specify the number of queue failures before Course Builder sends a notification
email to the course administrator(s)."""
).append(
safe_dom.Element('br')
).append(
safe_dom.Element('br')
).append(
safe_dom.Element('div').add_text("""
Course Builder uses a work queue to notify modules of changes in the status of
students (enrollment, unenrollment, etc.). Since some of the work done from this
queue is potentially sensitive (e.g., privacy concerns), the queue will re-try
failed work indefinitely. If the failures persist for the specified number of
attempts, an email is sent to all the course administrators to alert them of the
problem. Retries are done with increasingly large delays 0:15, 0:30, 1:00, 2:00,
4:00, 8:00, 32:00, 1:04:00 and every two hours thereafter.""")))
SITE_SETTINGS_REFRESH_INTERVAL_TEMPLATE = """
An update interval (in seconds) for reloading runtime properties from the
datastore. Specify an integer value between 1 and %s, inclusive. To completely
disable reloading properties set the value to 0 in the app.yaml file.
"""
SITE_SETTINGS_SITE_ADMIN_EMAILS = """
This list of email addresses represents the super-administrators for the whole
site. Super-admin users have the highest level of access to your Google App
Engine istance and to all data about all courses and students within that
instance. Separate addresses with a comma, space, or newline.
"""
SITE_SETTINGS_WHITELIST = """
Specify a list of email addresses of users who are allowed to access courses.
Separate the email addresses with commas. If this field is blank, site-wide user
whitelisting is disabled. Access to courses is implicitly granted to Google App
Engine admins and course admins, so don't repeat them here. Course-specific
whitelists supercede this list: if a course has a non-blank whitelist, this list
is ignored.
"""
ORGANIZATION_NAME_DESCRIPTION = """
The organization name appears in the footer of every page, but only when the
Organization URL is also provided.
"""
ORGANIZATION_URL_DESCRIPTION = """
When the Organization Name is provided, it is displayed in the footer of every
page linked to the Organization URL. Links to other sites must start with "http"
or "https".
"""
IMAGE_OR_VIDEO_DESCRIPTION = """
URL for the preview image or YouTube video shown on the course homepage.
"""
IMAGE_DESCRIPTION_DESCRIPTION = """
This is the alt text for the preview image on the course syllabus page (useful
for screen readers).
"""
SITE_NAME_DESCRIPTION = """
This is the name of the site header of every page, next to the Site Logo.
"""
SITE_LOGO_DESCRIPTION = """
This logo is displayed in the upper left corner of the Course Explorer and
every page of all courses.
"""
SITE_LOGO_DESCRIPTION_DESCRIPTION = """
This is the alt text for the Site Logo (useful for screen readers).
"""
COURSE_URL_COMPONENT_DESCRIPTION = """
This is the basename of your course in the URL.
"""
COURSE_NAMESPACE_DESCRIPTION = """
This is the namespace for your course.
"""
| |
# Authors: Denis Engemann <denis.engemann@gmail.com>
#
# License: BSD (3-clause)
from io import BytesIO
import os
import os.path as op
from functools import reduce, partial
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_allclose, assert_equal)
import pytest
from mne.datasets import testing
from mne.io import read_raw_fif, read_raw_bti
from mne.io._digitization import _make_bti_dig_points
from mne.io.bti.bti import (_read_config,
_read_bti_header, _get_bti_dev_t,
_correct_trans, _get_bti_info,
_loc_to_coil_trans, _convert_coil_trans,
_check_nan_dev_head_t, _rename_channels)
from mne.io.bti.bti import _read_head_shape
from mne.io.tests.test_raw import _test_raw_reader
from mne.io.pick import pick_info
from mne.io.constants import FIFF
from mne import pick_types
from mne.utils import assert_dig_allclose, run_tests_if_main
from mne.transforms import Transform, combine_transforms, invert_transform
base_dir = op.join(op.abspath(op.dirname(__file__)), 'data')
archs = 'linux', 'solaris'
pdf_fnames = [op.join(base_dir, 'test_pdf_%s' % a) for a in archs]
config_fnames = [op.join(base_dir, 'test_config_%s' % a) for a in archs]
hs_fnames = [op.join(base_dir, 'test_hs_%s' % a) for a in archs]
exported_fnames = [op.join(base_dir, 'exported4D_%s_raw.fif' % a)
for a in archs]
tmp_raw_fname = op.join(base_dir, 'tmp_raw.fif')
fname_2500 = op.join(testing.data_path(download=False), 'BTi', 'erm_HFH',
'c,rfDC')
fname_sim = op.join(testing.data_path(download=False), 'BTi', '4Dsim',
'c,rfDC')
fname_sim_filt = op.join(testing.data_path(download=False), 'BTi', '4Dsim',
'c,rfDC,fn50,o')
# the 4D exporter doesn't export all channels, so we confine our comparison
NCH = 248
@testing.requires_testing_data
def test_read_2500():
"""Test reading data from 2500 system."""
_test_raw_reader(read_raw_bti, pdf_fname=fname_2500, head_shape_fname=None)
def test_read_config():
"""Test read bti config file."""
# for config in config_fname, config_solaris_fname:
for config in config_fnames:
cfg = _read_config(config)
assert all('unknown' not in block.lower() and block != ''
for block in cfg['user_blocks'])
def test_crop_append():
"""Test crop and append raw."""
raw = _test_raw_reader(
read_raw_bti, pdf_fname=pdf_fnames[0],
config_fname=config_fnames[0], head_shape_fname=hs_fnames[0])
y, t = raw[:]
t0, t1 = 0.25 * t[-1], 0.75 * t[-1]
mask = (t0 <= t) * (t <= t1)
raw_ = raw.copy().crop(t0, t1)
y_, _ = raw_[:]
assert (y_.shape[1] == mask.sum())
assert (y_.shape[0] == y.shape[0])
def test_transforms():
"""Test transformations."""
bti_trans = (0.0, 0.02, 0.11)
bti_dev_t = Transform('ctf_meg', 'meg', _get_bti_dev_t(0.0, bti_trans))
for pdf, config, hs, in zip(pdf_fnames, config_fnames, hs_fnames):
raw = read_raw_bti(pdf, config, hs, preload=False)
dev_ctf_t = raw.info['dev_ctf_t']
dev_head_t_old = raw.info['dev_head_t']
ctf_head_t = raw.info['ctf_head_t']
# 1) get BTI->Neuromag
bti_dev_t = Transform('ctf_meg', 'meg', _get_bti_dev_t(0.0, bti_trans))
# 2) get Neuromag->BTI head
t = combine_transforms(invert_transform(bti_dev_t), dev_ctf_t,
'meg', 'ctf_head')
# 3) get Neuromag->head
dev_head_t_new = combine_transforms(t, ctf_head_t, 'meg', 'head')
assert_array_equal(dev_head_t_new['trans'], dev_head_t_old['trans'])
@pytest.mark.slowtest
def test_raw():
"""Test bti conversion to Raw object."""
for pdf, config, hs, exported in zip(pdf_fnames, config_fnames, hs_fnames,
exported_fnames):
# rx = 2 if 'linux' in pdf else 0
pytest.raises(ValueError, read_raw_bti, pdf, 'eggs', preload=False)
pytest.raises(ValueError, read_raw_bti, pdf, config, 'spam',
preload=False)
if op.exists(tmp_raw_fname):
os.remove(tmp_raw_fname)
ex = read_raw_fif(exported, preload=True)
ra = read_raw_bti(pdf, config, hs, preload=False)
assert ('RawBTi' in repr(ra))
assert_equal(ex.ch_names[:NCH], ra.ch_names[:NCH])
assert_array_almost_equal(ex.info['dev_head_t']['trans'],
ra.info['dev_head_t']['trans'], 7)
assert len(ex.info['dig']) in (3563, 5154)
assert_dig_allclose(ex.info, ra.info, limit=100)
coil1, coil2 = [np.concatenate([d['loc'].flatten()
for d in r_.info['chs'][:NCH]])
for r_ in (ra, ex)]
assert_array_almost_equal(coil1, coil2, 7)
loc1, loc2 = [np.concatenate([d['loc'].flatten()
for d in r_.info['chs'][:NCH]])
for r_ in (ra, ex)]
assert_allclose(loc1, loc2)
assert_allclose(ra[:NCH][0], ex[:NCH][0])
assert_array_equal([c['range'] for c in ra.info['chs'][:NCH]],
[c['range'] for c in ex.info['chs'][:NCH]])
assert_array_equal([c['cal'] for c in ra.info['chs'][:NCH]],
[c['cal'] for c in ex.info['chs'][:NCH]])
assert_array_equal(ra._cals[:NCH], ex._cals[:NCH])
# check our transforms
for key in ('dev_head_t', 'dev_ctf_t', 'ctf_head_t'):
if ex.info[key] is None:
pass
else:
assert (ra.info[key] is not None)
for ent in ('to', 'from', 'trans'):
assert_allclose(ex.info[key][ent],
ra.info[key][ent])
ra.save(tmp_raw_fname)
re = read_raw_fif(tmp_raw_fname)
print(re)
for key in ('dev_head_t', 'dev_ctf_t', 'ctf_head_t'):
assert (isinstance(re.info[key], dict))
this_t = re.info[key]['trans']
assert_equal(this_t.shape, (4, 4))
# check that matrix by is not identity
assert (not np.allclose(this_t, np.eye(4)))
os.remove(tmp_raw_fname)
def test_info_no_rename_no_reorder_no_pdf():
"""Test private renaming, reordering and partial construction option."""
for pdf, config, hs in zip(pdf_fnames, config_fnames, hs_fnames):
info, bti_info = _get_bti_info(
pdf_fname=pdf, config_fname=config, head_shape_fname=hs,
rotation_x=0.0, translation=(0.0, 0.02, 0.11), convert=False,
ecg_ch='E31', eog_ch=('E63', 'E64'),
rename_channels=False, sort_by_ch_name=False)
info2, bti_info = _get_bti_info(
pdf_fname=None, config_fname=config, head_shape_fname=hs,
rotation_x=0.0, translation=(0.0, 0.02, 0.11), convert=False,
ecg_ch='E31', eog_ch=('E63', 'E64'),
rename_channels=False, sort_by_ch_name=False)
assert_equal(info['ch_names'],
[ch['ch_name'] for ch in info['chs']])
assert_equal([n for n in info['ch_names'] if n.startswith('A')][:5],
['A22', 'A2', 'A104', 'A241', 'A138'])
assert_equal([n for n in info['ch_names'] if n.startswith('A')][-5:],
['A133', 'A158', 'A44', 'A134', 'A216'])
info = pick_info(info, pick_types(info, meg=True, stim=True,
resp=True))
info2 = pick_info(info2, pick_types(info2, meg=True, stim=True,
resp=True))
assert (info['sfreq'] is not None)
assert (info['lowpass'] is not None)
assert (info['highpass'] is not None)
assert (info['meas_date'] is not None)
assert_equal(info2['sfreq'], None)
assert_equal(info2['lowpass'], None)
assert_equal(info2['highpass'], None)
assert_equal(info2['meas_date'], None)
assert_equal(info['ch_names'], info2['ch_names'])
assert_equal(info['ch_names'], info2['ch_names'])
for key in ['dev_ctf_t', 'dev_head_t', 'ctf_head_t']:
assert_array_equal(info[key]['trans'], info2[key]['trans'])
assert_array_equal(
np.array([ch['loc'] for ch in info['chs']]),
np.array([ch['loc'] for ch in info2['chs']]))
# just check reading data | corner case
raw1 = read_raw_bti(
pdf_fname=pdf, config_fname=config, head_shape_fname=None,
sort_by_ch_name=False, preload=True)
# just check reading data | corner case
raw2 = read_raw_bti(
pdf_fname=pdf, config_fname=config, head_shape_fname=None,
rename_channels=False,
sort_by_ch_name=True, preload=True)
sort_idx = [raw1.bti_ch_labels.index(ch) for ch in raw2.bti_ch_labels]
raw1._data = raw1._data[sort_idx]
assert_array_equal(raw1._data, raw2._data)
assert_array_equal(raw2.bti_ch_labels, raw2.ch_names)
def test_no_conversion():
"""Test bti no-conversion option."""
get_info = partial(
_get_bti_info,
rotation_x=0.0, translation=(0.0, 0.02, 0.11), convert=False,
ecg_ch='E31', eog_ch=('E63', 'E64'),
rename_channels=False, sort_by_ch_name=False)
for pdf, config, hs in zip(pdf_fnames, config_fnames, hs_fnames):
raw_info, _ = get_info(pdf, config, hs, convert=False)
raw_info_con = read_raw_bti(
pdf_fname=pdf, config_fname=config, head_shape_fname=hs,
convert=True, preload=False).info
pick_info(raw_info_con,
pick_types(raw_info_con, meg=True, ref_meg=True),
copy=False)
pick_info(raw_info,
pick_types(raw_info, meg=True, ref_meg=True), copy=False)
bti_info = _read_bti_header(pdf, config)
dev_ctf_t = _correct_trans(bti_info['bti_transform'][0])
assert_array_equal(dev_ctf_t, raw_info['dev_ctf_t']['trans'])
assert_array_equal(raw_info['dev_head_t']['trans'], np.eye(4))
assert_array_equal(raw_info['ctf_head_t']['trans'], np.eye(4))
nasion, lpa, rpa, hpi, dig_points = _read_head_shape(hs)
dig, t, _ = _make_bti_dig_points(nasion, lpa, rpa, hpi, dig_points,
convert=False, use_hpi=False)
assert_array_equal(t['trans'], np.eye(4))
for ii, (old, new, con) in enumerate(zip(
dig, raw_info['dig'], raw_info_con['dig'])):
assert_equal(old['ident'], new['ident'])
assert_array_equal(old['r'], new['r'])
assert (not np.allclose(old['r'], con['r']))
if ii > 10:
break
ch_map = {ch['chan_label']: ch['loc'] for ch in bti_info['chs']}
for ii, ch_label in enumerate(raw_info['ch_names']):
if not ch_label.startswith('A'):
continue
t1 = ch_map[ch_label] # correction already performed in bti_info
t2 = raw_info['chs'][ii]['loc']
t3 = raw_info_con['chs'][ii]['loc']
assert_allclose(t1, t2, atol=1e-15)
assert (not np.allclose(t1, t3))
idx_a = raw_info_con['ch_names'].index('MEG 001')
idx_b = raw_info['ch_names'].index('A22')
assert_equal(
raw_info_con['chs'][idx_a]['coord_frame'],
FIFF.FIFFV_COORD_DEVICE)
assert_equal(
raw_info['chs'][idx_b]['coord_frame'],
FIFF.FIFFV_MNE_COORD_4D_HEAD)
def test_bytes_io():
"""Test bti bytes-io API."""
for pdf, config, hs in zip(pdf_fnames, config_fnames, hs_fnames):
raw = read_raw_bti(pdf, config, hs, convert=True, preload=False)
with open(pdf, 'rb') as fid:
pdf = BytesIO(fid.read())
with open(config, 'rb') as fid:
config = BytesIO(fid.read())
with open(hs, 'rb') as fid:
hs = BytesIO(fid.read())
raw2 = read_raw_bti(pdf, config, hs, convert=True, preload=False)
repr(raw2)
assert_array_equal(raw[:][0], raw2[:][0])
def test_setup_headshape():
"""Test reading bti headshape."""
for hs in hs_fnames:
nasion, lpa, rpa, hpi, dig_points = _read_head_shape(hs)
dig, t, _ = _make_bti_dig_points(nasion, lpa, rpa, hpi, dig_points)
expected = {'kind', 'ident', 'r'}
found = set(reduce(lambda x, y: list(x) + list(y),
[d.keys() for d in dig]))
assert (not expected - found)
def test_nan_trans():
"""Test unlikely case that the device to head transform is empty."""
for ii, pdf_fname in enumerate(pdf_fnames):
bti_info = _read_bti_header(
pdf_fname, config_fnames[ii], sort_by_ch_name=True)
dev_ctf_t = Transform('ctf_meg', 'ctf_head',
_correct_trans(bti_info['bti_transform'][0]))
# reading params
convert = True
rotation_x = 0.
translation = (0.0, 0.02, 0.11)
bti_dev_t = _get_bti_dev_t(rotation_x, translation)
bti_dev_t = Transform('ctf_meg', 'meg', bti_dev_t)
ecg_ch = 'E31'
eog_ch = ('E63', 'E64')
# read parts of info to get trans
bti_ch_names = list()
for ch in bti_info['chs']:
ch_name = ch['name']
if not ch_name.startswith('A'):
ch_name = ch.get('chan_label', ch_name)
bti_ch_names.append(ch_name)
neuromag_ch_names = _rename_channels(
bti_ch_names, ecg_ch=ecg_ch, eog_ch=eog_ch)
ch_mapping = zip(bti_ch_names, neuromag_ch_names)
# add some nan in some locations!
dev_ctf_t['trans'][:, 3] = np.nan
_check_nan_dev_head_t(dev_ctf_t)
for idx, (chan_4d, chan_neuromag) in enumerate(ch_mapping):
loc = bti_info['chs'][idx]['loc']
if loc is not None:
if convert:
t = _loc_to_coil_trans(bti_info['chs'][idx]['loc'])
t = _convert_coil_trans(t, dev_ctf_t, bti_dev_t)
@testing.requires_testing_data
@pytest.mark.parametrize('fname', (fname_sim, fname_sim_filt))
@pytest.mark.parametrize('preload', (True, False))
def test_bti_ch_data(fname, preload):
"""Test for gh-6048."""
read_raw_bti(fname, preload=preload) # used to fail with ascii decode err
run_tests_if_main()
| |
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
DEPS = [
'chromium',
'depot_tools/bot_update',
'depot_tools/gclient',
'recipe_engine/json',
'recipe_engine/path',
'recipe_engine/properties',
'recipe_engine/python',
'recipe_engine/step',
]
def linux_sdk_multi_steps(api):
build_properties = api.properties.legacy()
# update scripts step; implicitly run by recipe engine.
# bot_update step
src_cfg = api.gclient.make_config(GIT_MODE=True)
soln = src_cfg.solutions.add()
soln.name = "src"
soln.url = "https://chromium.googlesource.com/chromium/src.git"
soln.custom_deps = {'src/third_party/WebKit/LayoutTests': None}
soln.custom_vars = {'webkit_trunk': 'svn://svn.chromium.org/blink/trunk',
'googlecode_url': 'svn://svn.chromium.org/%s',
'nacl_trunk':
'svn://svn.chromium.org/native_client/trunk',
'sourceforge_url': 'svn://svn.chromium.org/%(repo)s',
'llvm_url': 'svn://svn.chromium.org/llvm-project'}
soln = src_cfg.solutions.add()
soln.name = "src-internal"
soln.url = "svn://svn.chromium.org/chrome-internal/trunk/src-internal"
soln.custom_deps = {'src/chrome/test/data/firefox2_searchplugins': None,
'src/tools/grit/grit/test/data': None,
'src/chrome/test/data/firefox3_searchplugins': None,
'src/webkit/data/test_shell/plugins': None,
'src/data/page_cycler': None,
'src/data/mozilla_js_tests': None,
'src/chrome/test/data/firefox2_profile/searchplugins':
None,
'src/data/esctf': None,
'src/data/memory_test': None,
'src/data/mach_ports': None,
'src/webkit/data/xbm_decoder': None,
'src/webkit/data/ico_decoder': None,
'src/data/selenium_core': None,
'src/chrome/test/data/ssl/certs': None,
'src/chrome/test/data/osdd': None,
'src/webkit/data/bmp_decoder': None,
'src/chrome/test/data/firefox3_profile/searchplugins':
None,
'src/data/autodiscovery': None}
soln.custom_vars = {}
src_cfg.got_revision_mapping.update(
{'src': 'got_revision',
'src/third_party/WebKit': 'got_webkit_revision',
'src/tools/swarming_client': 'got_swarming_client_revision',
'src/v8': 'got_v8_revision'})
api.gclient.c = src_cfg
result = api.bot_update.ensure_checkout(force=True)
build_properties.update(result.json.output.get("properties", {}))
# gclient revert step; made unnecessary by bot_update
# gclient update step; made unnecessary by bot_update
# gclient runhooks wrapper step
env = {'CHROMIUM_GYP_SYNTAX_CHECK': '1',
'LANDMINES_VERBOSE': '1',
'DEPOT_TOOLS_UPDATE': '0',
'GYP_DEFINES': 'fastbuild=1 component=static_library'}
api.python("gclient runhooks wrapper",
api.path["build"].join("scripts", "slave",
"runhooks_wrapper.py"),
env=env)
# cleanup_temp step
api.chromium.cleanup_temp()
# compile.py step
args = ['--target', 'Release', '--clobber', '--compiler=goma',
'chromium_builder_nacl_sdk']
api.python("compile",
api.path["build"].join("scripts", "slave", "compile.py"),
args=args)
# annotated_steps step
api.python(
"annotated_steps",
api.path["build"].join("scripts", "slave", "chromium",
"nacl_sdk_buildbot_run.py"),
args=
['--build-properties=%s' % api.json.dumps(build_properties,
separators=(',', ':')),
'--factory-properties={"annotated_script":"nacl_sdk_buildbot_run.py"'+\
',"blink_config":"chromium",'+\
'"gclient_env":{"CHROMIUM_GYP_SYNTAX_CHECK":"1",'+\
'"DEPOT_TOOLS_UPDATE":"0","GYP_DEFINES":'+\
'"fastbuild=1 component=static_library","LANDMINES_VERBOSE":"1"'+\
'},"no_gclient_branch":true,"nuke_and_pave":false}'
],
allow_subannotations=True)
def mac_sdk_multi_steps(api):
build_properties = api.properties.legacy()
# update scripts step; implicitly run by recipe engine.
# bot_update step
src_cfg = api.gclient.make_config(GIT_MODE=True)
soln = src_cfg.solutions.add()
soln.name = "src"
soln.url = "https://chromium.googlesource.com/chromium/src.git"
soln.custom_deps = {'src/third_party/WebKit/LayoutTests': None}
soln.custom_vars = {'webkit_trunk': 'svn://svn.chromium.org/blink/trunk',
'googlecode_url': 'svn://svn.chromium.org/%s',
'nacl_trunk':
'svn://svn.chromium.org/native_client/trunk',
'sourceforge_url': 'svn://svn.chromium.org/%(repo)s',
'llvm_url': 'svn://svn.chromium.org/llvm-project'}
soln = src_cfg.solutions.add()
soln.name = "src-internal"
soln.url = "svn://svn.chromium.org/chrome-internal/trunk/src-internal"
soln.custom_deps = {'src/chrome/test/data/firefox2_searchplugins': None,
'src/tools/grit/grit/test/data': None,
'src/chrome/test/data/firefox3_searchplugins': None,
'src/webkit/data/test_shell/plugins': None,
'src/data/page_cycler': None,
'src/data/mozilla_js_tests': None,
'src/chrome/test/data/firefox2_profile/searchplugins':
None,
'src/data/esctf': None,
'src/data/memory_test': None,
'src/data/mach_ports': None,
'src/webkit/data/xbm_decoder': None,
'src/webkit/data/ico_decoder': None,
'src/data/selenium_core': None,
'src/chrome/test/data/ssl/certs': None,
'src/chrome/test/data/osdd': None,
'src/webkit/data/bmp_decoder': None,
'src/chrome/test/data/firefox3_profile/searchplugins':
None,
'src/data/autodiscovery': None}
soln.custom_vars = {}
src_cfg.got_revision_mapping.update(
{'src': 'got_revision',
'src/third_party/WebKit': 'got_webkit_revision',
'src/tools/swarming_client': 'got_swarming_client_revision',
'src/v8': 'got_v8_revision'})
api.gclient.c = src_cfg
result = api.bot_update.ensure_checkout(force=True)
build_properties.update(result.json.output.get("properties", {}))
# gclient revert step; made unnecessary by bot_update
# gclient update step; made unnecessary by bot_update
# gclient runhooks wrapper step
env = {'CHROMIUM_GYP_SYNTAX_CHECK': '1',
'GYP_GENERATORS': 'ninja',
'DEPOT_TOOLS_UPDATE': '0',
'GYP_DEFINES': 'fastbuild=1 component=static_library',
'LANDMINES_VERBOSE': '1'}
api.python("gclient runhooks wrapper",
api.path["build"].join("scripts", "slave",
"runhooks_wrapper.py"),
env=env)
# cleanup_temp step
api.chromium.cleanup_temp()
# compile.py step
args = ['--target', 'Release', '--clobber', '--build-tool=ninja',
'--compiler=goma-clang', '--', 'chromium_builder_nacl_sdk']
api.python("compile",
api.path["build"].join("scripts", "slave", "compile.py"),
args=args)
# annotated_steps step
api.python(
"annotated_steps",
api.path["build"].join("scripts", "slave", "chromium",
"nacl_sdk_buildbot_run.py"),
args=
['--build-properties=%s' % api.json.dumps(build_properties,
separators=(',', ':')),
'--factory-properties={"annotated_script":"nacl_sdk_buildbot_run.py"'+\
',"blink_config":"chromium","gclient_env":'+\
'{"CHROMIUM_GYP_SYNTAX_CHECK":"1","DEPOT_TOOLS_UPDATE":"0",'+\
'"GYP_DEFINES":"fastbuild=1 component=static_library",'+\
'"GYP_GENERATORS":"ninja","LANDMINES_VERBOSE":"1"},'+\
'"no_gclient_branch":true,"nuke_and_pave":false}'
],
allow_subannotations=True)
def windows_sdk_multi_steps(api):
build_properties = api.properties.legacy()
# svnkill step; not necessary in recipes
# update scripts step; implicitly run by recipe engine.
# taskkill step
api.python("taskkill", api.path["build"].join("scripts", "slave",
"kill_processes.py"))
# bot_update step
src_cfg = api.gclient.make_config(GIT_MODE=True)
soln = src_cfg.solutions.add()
soln.name = "src"
soln.url = "https://chromium.googlesource.com/chromium/src.git"
soln.custom_deps = {'src/third_party/WebKit/LayoutTests': None}
soln.custom_vars = {'webkit_trunk': 'svn://svn.chromium.org/blink/trunk',
'googlecode_url': 'svn://svn.chromium.org/%s',
'nacl_trunk':
'svn://svn.chromium.org/native_client/trunk',
'sourceforge_url': 'svn://svn.chromium.org/%(repo)s',
'llvm_url': 'svn://svn.chromium.org/llvm-project'}
soln = src_cfg.solutions.add()
soln.name = "src-internal"
soln.url = "svn://svn.chromium.org/chrome-internal/trunk/src-internal"
soln.custom_deps = {'src/chrome/test/data/firefox2_searchplugins': None,
'src/tools/grit/grit/test/data': None,
'src/chrome/test/data/firefox3_searchplugins': None,
'src/webkit/data/test_shell/plugins': None,
'src/data/page_cycler': None,
'src/data/mozilla_js_tests': None,
'src/chrome/test/data/firefox2_profile/searchplugins':
None,
'src/data/esctf': None,
'src/data/memory_test': None,
'src/data/mach_ports': None,
'src/webkit/data/xbm_decoder': None,
'src/webkit/data/ico_decoder': None,
'src/data/selenium_core': None,
'src/chrome/test/data/ssl/certs': None,
'src/chrome/test/data/osdd': None,
'src/webkit/data/bmp_decoder': None,
'src/chrome/test/data/firefox3_profile/searchplugins':
None,
'src/data/autodiscovery': None}
soln.custom_vars = {}
src_cfg.got_revision_mapping.update(
{'src': 'got_revision',
'src/third_party/WebKit': 'got_webkit_revision',
'src/tools/swarming_client': 'got_swarming_client_revision',
'src/v8': 'got_v8_revision'})
api.gclient.c = src_cfg
result = api.bot_update.ensure_checkout(force=True)
build_properties.update(result.json.output.get("properties", {}))
# gclient revert step; made unnecessary by bot_update
# gclient update step; made unnecessary by bot_update
# gclient runhooks wrapper step
env = {'CHROMIUM_GYP_SYNTAX_CHECK': '1',
'LANDMINES_VERBOSE': '1',
'DEPOT_TOOLS_UPDATE': '0',
'GYP_DEFINES': 'fastbuild=1 component=static_library'}
api.python("gclient runhooks wrapper",
api.path["build"].join("scripts", "slave",
"runhooks_wrapper.py"),
env=env)
# cleanup_temp step
api.chromium.cleanup_temp()
# compile.py step
args = ['--solution', 'all.sln', '--project', 'chromium_builder_nacl_sdk',
'--target', 'Release', '--clobber', '--compiler=goma']
api.python("compile",
api.path["build"].join("scripts", "slave", "compile.py"),
args=args)
# annotated_steps step
api.python(
"annotated_steps",
api.path["build"].join("scripts", "slave", "chromium",
"nacl_sdk_buildbot_run.py"),
args=
['--build-properties=%s' % api.json.dumps(build_properties,
separators=(',', ':')),
'--factory-properties={"annotated_script":"nacl_sdk_buildbot_run.py"'+\
',"blink_config":"chromium","gclient_env":'+\
'{"CHROMIUM_GYP_SYNTAX_CHECK":"1","DEPOT_TOOLS_UPDATE":"0",'+\
'"GYP_DEFINES":"fastbuild=1 component=static_library",'+\
'"LANDMINES_VERBOSE":"1"},"no_gclient_branch":true,'+\
'"nuke_and_pave":false}'
],
allow_subannotations=True)
def linux_sdk_multirel_steps(api):
build_properties = api.properties.legacy()
# update scripts step; implicitly run by recipe engine.
# bot_update step
src_cfg = api.gclient.make_config(GIT_MODE=True)
soln = src_cfg.solutions.add()
soln.name = "chrome-official"
soln.url = "svn://svn.chromium.org/chrome-internal/trunk/tools/buildspec/"+\
"build/chrome-official"
soln.custom_deps = {'src-pdf': None, 'src/pdf': None}
soln.custom_vars = {'webkit_trunk': 'svn://svn.chromium.org/blink/trunk',
'googlecode_url': 'svn://svn.chromium.org/%s',
'sourceforge_url': 'svn://svn.chromium.org/%(repo)s',
'svn_url': 'svn://svn.chromium.org'}
src_cfg.got_revision_mapping.update(
{'src': 'got_revision',
'src/third_party/WebKit': 'got_webkit_revision',
'src/tools/swarming_client': 'got_swarming_client_revision',
'src/v8': 'got_v8_revision'})
api.gclient.c = src_cfg
result = api.bot_update.ensure_checkout(force=True)
build_properties.update(result.json.output.get("properties", {}))
# unnamed step; null converted
# gclient runhooks wrapper step
env = {'CHROMIUM_GYP_SYNTAX_CHECK': '1',
'LANDMINES_VERBOSE': '1',
'DEPOT_TOOLS_UPDATE': '0',
'GYP_DEFINES': 'fastbuild=1 component=static_library'}
api.python("gclient runhooks wrapper",
api.path["build"].join("scripts", "slave",
"runhooks_wrapper.py"),
env=env)
# cleanup_temp step
api.chromium.cleanup_temp()
# compile.py step
args = ['--target', 'Release', '--clobber', '--compiler=goma',
'chromium_builder_tests']
api.python("compile",
api.path["build"].join("scripts", "slave", "compile.py"),
args=args)
# annotated_steps step
api.python(
"annotated_steps",
api.path["build"].join("scripts", "slave", "chromium",
"nacl_sdk_buildbot_run.py"),
args=
['--build-properties=%s' % api.json.dumps(build_properties,
separators=(',', ':')),
'--factory-properties={"annotated_script":"nacl_sdk_buildbot_run.py"'+\
',"blink_config":"chromium","gclient_env":'+\
'{"CHROMIUM_GYP_SYNTAX_CHECK":"1","DEPOT_TOOLS_UPDATE":"0",'+\
'"GYP_DEFINES":"fastbuild=1 component=static_library",'+\
'"LANDMINES_VERBOSE":"1"},"no_gclient_branch":true,'+\
'"nuke_and_pave":true}'
],
allow_subannotations=True)
def windows_sdk_multirel_steps(api):
build_properties = api.properties.legacy()
# svnkill step; not necessary in recipes
# update scripts step; implicitly run by recipe engine.
# taskkill step
api.python("taskkill", api.path["build"].join("scripts", "slave",
"kill_processes.py"))
# bot_update step
src_cfg = api.gclient.make_config(GIT_MODE=True)
soln = src_cfg.solutions.add()
soln.name = "chrome-official"
soln.url = "svn://svn.chromium.org/chrome-internal/trunk/tools/buildspec/"+\
"build/chrome-official"
soln.custom_deps = {'src-pdf': None, 'src/pdf': None}
soln.custom_vars = {'webkit_trunk': 'svn://svn.chromium.org/blink/trunk',
'googlecode_url': 'svn://svn.chromium.org/%s',
'sourceforge_url': 'svn://svn.chromium.org/%(repo)s',
'svn_url': 'svn://svn.chromium.org'}
src_cfg.got_revision_mapping.update(
{'src': 'got_revision',
'src/third_party/WebKit': 'got_webkit_revision',
'src/tools/swarming_client': 'got_swarming_client_revision',
'src/v8': 'got_v8_revision'})
api.gclient.c = src_cfg
result = api.bot_update.ensure_checkout(force=True)
build_properties.update(result.json.output.get("properties", {}))
# unnamed step; null converted
# gclient runhooks wrapper step
env = {'CHROMIUM_GYP_SYNTAX_CHECK': '1',
'LANDMINES_VERBOSE': '1',
'DEPOT_TOOLS_UPDATE': '0',
'GYP_DEFINES': 'fastbuild=1 component=static_library'}
api.python("gclient runhooks wrapper",
api.path["build"].join("scripts", "slave",
"runhooks_wrapper.py"),
env=env)
# cleanup_temp step
api.chromium.cleanup_temp()
# compile.py step
args = ['--solution', 'all.sln', '--project', 'chromium_builder_tests',
'--target', 'Release', '--clobber', '--compiler=goma']
api.python("compile",
api.path["build"].join("scripts", "slave", "compile.py"),
args=args)
# annotated_steps step
api.python(
"annotated_steps",
api.path["build"].join("scripts", "slave", "chromium",
"nacl_sdk_buildbot_run.py"),
args=
['--build-properties=%s' % api.json.dumps(build_properties,
separators=(',', ':')),
'--factory-properties={"annotated_script":"nacl_sdk_buildbot_run.py"'+\
',"blink_config":"chromium","gclient_env":'+\
'{"CHROMIUM_GYP_SYNTAX_CHECK":"1","DEPOT_TOOLS_UPDATE":"0",'+\
'"GYP_DEFINES":"fastbuild=1 component=static_library",'+\
'"LANDMINES_VERBOSE":"1"},"no_gclient_branch":true,'+\
'"nuke_and_pave":true}'
],
allow_subannotations=True)
def mac_sdk_multirel_steps(api):
build_properties = api.properties.legacy()
# update scripts step; implicitly run by recipe engine.
# bot_update step
src_cfg = api.gclient.make_config(GIT_MODE=True)
soln = src_cfg.solutions.add()
soln.name = "chrome-official"
soln.url = "svn://svn.chromium.org/chrome-internal/trunk/tools/buildspec/"+\
"build/chrome-official"
soln.custom_deps = {'src-pdf': None, 'src/pdf': None}
soln.custom_vars = {'webkit_trunk': 'svn://svn.chromium.org/blink/trunk',
'googlecode_url': 'svn://svn.chromium.org/%s',
'sourceforge_url': 'svn://svn.chromium.org/%(repo)s',
'svn_url': 'svn://svn.chromium.org'}
src_cfg.got_revision_mapping.update(
{'src': 'got_revision',
'src/third_party/WebKit': 'got_webkit_revision',
'src/tools/swarming_client': 'got_swarming_client_revision',
'src/v8': 'got_v8_revision'})
api.gclient.c = src_cfg
result = api.bot_update.ensure_checkout(force=True)
build_properties.update(result.json.output.get("properties", {}))
# unnamed step; null converted
# gclient runhooks wrapper step
env = {'LANDMINES_VERBOSE': '1',
'GYP_GENERATORS': 'ninja',
'DEPOT_TOOLS_UPDATE': '0',
'GYP_DEFINES': 'fastbuild=1 component=static_library',
'CHROMIUM_GYP_SYNTAX_CHECK': '1'}
api.python("gclient runhooks wrapper",
api.path["build"].join("scripts", "slave",
"runhooks_wrapper.py"),
env=env)
# cleanup_temp step
api.chromium.cleanup_temp()
# compile.py step
args = ['--target', 'Release', '--clobber', '--build-tool=ninja',
'--compiler=goma-clang', '--', 'chromium_builder_tests']
api.python("compile",
api.path["build"].join("scripts", "slave", "compile.py"),
args=args)
# annotated_steps step
api.python(
"annotated_steps",
api.path["build"].join("scripts", "slave", "chromium",
"nacl_sdk_buildbot_run.py"),
args=
['--build-properties=%s' % api.json.dumps(build_properties,
separators=(',', ':')),
'--factory-properties={"annotated_script":"nacl_sdk_buildbot_run.py"'+\
',"blink_config":"chromium","gclient_env":'+\
'{"CHROMIUM_GYP_SYNTAX_CHECK":"1","DEPOT_TOOLS_UPDATE":"0",'+\
'"GYP_DEFINES":"fastbuild=1 component=static_library",'+\
'"GYP_GENERATORS":"ninja","LANDMINES_VERBOSE":"1"},'+\
'"no_gclient_branch":true,"nuke_and_pave":true}'
],
allow_subannotations=True)
dispatch_directory = {
'linux-sdk-multi': linux_sdk_multi_steps,
'mac-sdk-multi': mac_sdk_multi_steps,
'windows-sdk-multi': windows_sdk_multi_steps,
'linux-sdk-multirel': linux_sdk_multirel_steps,
'linux-sdk-asan-multi': linux_sdk_multi_steps,
'windows-sdk-multirel': windows_sdk_multirel_steps,
'mac-sdk-multirel': mac_sdk_multirel_steps,
}
def RunSteps(api):
if api.properties["buildername"] not in dispatch_directory:
raise api.step.StepFailure("Builder unsupported by recipe.")
else:
dispatch_directory[api.properties["buildername"]](api)
def GenTests(api):
yield (api.test('linux_sdk_multi') + api.properties(
mastername='client.nacl.sdk') + api.properties(
buildername='linux-sdk-multi') +
api.properties(revision='123456789abcdef') + api.properties(
got_revision='123456789abcdef') + api.properties(
buildnumber='42') + api.properties(slavename='TestSlave'))
yield (api.test('mac_sdk_multi') + api.properties(
mastername='client.nacl.sdk') + api.properties(
buildername='mac-sdk-multi') +
api.properties(revision='123456789abcdef') + api.properties(
got_revision='123456789abcdef') + api.properties(
buildnumber='42') + api.properties(slavename='TestSlave'))
yield (api.test('windows_sdk_multi') + api.properties(
mastername='client.nacl.sdk') + api.properties(
buildername='windows-sdk-multi') +
api.properties(revision='123456789abcdef') + api.properties(
got_revision='123456789abcdef') + api.properties(
buildnumber='42') + api.properties(slavename='TestSlave'))
yield (api.test('linux_sdk_multirel') + api.properties(
mastername='client.nacl.sdk') + api.properties(
buildername='linux-sdk-multirel') +
api.properties(revision='123456789abcdef') + api.properties(
got_revision='123456789abcdef') + api.properties(
buildnumber='42') + api.properties(slavename='TestSlave'))
yield (api.test('linux_sdk_asan_multi') + api.properties(
mastername='client.nacl.sdk') + api.properties(
buildername='linux-sdk-asan-multi') +
api.properties(revision='123456789abcdef') + api.properties(
got_revision='123456789abcdef') + api.properties(
buildnumber='42') + api.properties(slavename='TestSlave'))
yield (api.test('windows_sdk_multirel') + api.properties(
mastername='client.nacl.sdk') + api.properties(
buildername='windows-sdk-multirel') +
api.properties(revision='123456789abcdef') + api.properties(
got_revision='123456789abcdef') + api.properties(
buildnumber='42') + api.properties(slavename='TestSlave'))
yield (api.test('mac_sdk_multirel') + api.properties(
mastername='client.nacl.sdk') + api.properties(
buildername='mac-sdk-multirel') +
api.properties(revision='123456789abcdef') + api.properties(
got_revision='123456789abcdef') + api.properties(
buildnumber='42') + api.properties(slavename='TestSlave'))
yield (api.test('builder_not_in_dispatch_directory') + api.properties(
mastername='client.nacl.sdk') + api.properties(
buildername='nonexistent_builder') + api.properties(
slavename='TestSlave'))
| |
import random
from ..core import Basic, Integer
from ..core.compatibility import as_int
class GrayCode(Basic):
"""
A Gray code is essentially a Hamiltonian walk on
a n-dimensional cube with edge length of one.
The vertices of the cube are represented by vectors
whose values are binary. The Hamilton walk visits
each vertex exactly once. The Gray code for a 3d
cube is ['000','100','110','010','011','111','101',
'001'].
A Gray code solves the problem of sequentially
generating all possible subsets of n objects in such
a way that each subset is obtained from the previous
one by either deleting or adding a single object.
In the above example, 1 indicates that the object is
present, and 0 indicates that its absent.
Gray codes have applications in statistics as well when
we want to compute various statistics related to subsets
in an efficient manner.
References
==========
* Nijenhuis,A. and Wilf,H.S.(1978).
Combinatorial Algorithms. Academic Press.
* Knuth, D. (2011). The Art of Computer Programming, Vol 4
Addison Wesley
Examples
========
>>> a = GrayCode(3)
>>> list(a.generate_gray())
['000', '001', '011', '010', '110', '111', '101', '100']
>>> a = GrayCode(4)
>>> list(a.generate_gray())
['0000', '0001', '0011', '0010', '0110', '0111', '0101', '0100',
'1100', '1101', '1111', '1110', '1010', '1011', '1001', '1000']
"""
_skip = False
_current = 0
_rank = None
def __new__(cls, n, *args, **kw_args):
"""
Default constructor.
It takes a single argument ``n`` which gives the dimension of the Gray
code. The starting Gray code string (``start``) or the starting ``rank``
may also be given; the default is to start at rank = 0 ('0...0').
Examples
========
>>> a = GrayCode(3)
>>> a
GrayCode(3)
>>> a.n
3
>>> a = GrayCode(3, start='100')
>>> a.current
'100'
>>> a = GrayCode(4, rank=4)
>>> a.current
'0110'
>>> a.rank
4
"""
if n < 1 or int(n) != n:
raise ValueError(
f'Gray code dimension must be a positive integer, not {n:d}')
n = int(n)
args = (Integer(n),) + args
obj = Basic.__new__(cls, *args)
if 'start' in kw_args:
obj._current = kw_args['start']
if len(obj._current) > n:
raise ValueError(f'Gray code start has length {len(obj._current):d} but '
f'should not be greater than {n:d}')
elif 'rank' in kw_args:
kw_args['rank'] = as_int(kw_args['rank'])
if kw_args['rank'] <= 0:
raise ValueError('Gray code rank must be a positive integer, '
f"not {kw_args['rank']:d}")
obj._rank = kw_args['rank'] % obj.selections
obj._current = obj.unrank(n, obj._rank)
return obj
def next(self, delta=1):
"""
Returns the Gray code a distance ``delta`` (default = 1) from the
current value in canonical order.
Examples
========
>>> a = GrayCode(3, start='110')
>>> a.next().current
'111'
>>> a.next(-1).current
'010'
"""
return GrayCode(self.n, rank=(self.rank + delta) % self.selections)
@property
def selections(self):
"""
Returns the number of bit vectors in the Gray code.
Examples
========
>>> a = GrayCode(3)
>>> a.selections
8
"""
return 2**self.n
@property
def n(self):
"""
Returns the dimension of the Gray code.
Examples
========
>>> a = GrayCode(5)
>>> a.n
5
"""
return int(self.args[0])
def generate_gray(self, **hints):
"""
Generates the sequence of bit vectors of a Gray Code.
[1] Knuth, D. (2011). The Art of Computer Programming,
Vol 4, Addison Wesley
Examples
========
>>> a = GrayCode(3)
>>> list(a.generate_gray())
['000', '001', '011', '010', '110', '111', '101', '100']
>>> list(a.generate_gray(start='011'))
['011', '010', '110', '111', '101', '100']
>>> list(a.generate_gray(rank=4))
['110', '111', '101', '100']
See Also
========
skip
"""
bits = self.n
start = None
if 'start' in hints:
start = hints['start']
elif 'rank' in hints:
start = GrayCode.unrank(self.n, hints['rank'])
if start is not None:
self._current = start
current = self.current
graycode_bin = gray_to_bin(current)
if len(graycode_bin) > self.n:
raise ValueError(f'Gray code start has length {len(graycode_bin):d} but should '
f'not be greater than {bits:d}')
self._current = int(current, 2)
graycode_int = int(''.join(graycode_bin), 2)
for i in range(graycode_int, 1 << bits):
if self._skip:
self._skip = False
else:
yield self.current
bbtc = (i ^ (i + 1))
gbtc = (bbtc ^ (bbtc >> 1))
self._current = (self._current ^ gbtc)
self._current = 0
def skip(self):
"""
Skips the bit generation.
Examples
========
>>> a = GrayCode(3)
>>> for i in a.generate_gray():
... if i == '010':
... a.skip()
... print(i)
...
000
001
011
010
111
101
100
See Also
========
generate_gray
"""
self._skip = True
@property
def rank(self):
"""
Ranks the Gray code.
A ranking algorithm determines the position (or rank)
of a combinatorial object among all the objects w.r.t.
a given order. For example, the 4 bit binary reflected
Gray code (BRGC) '0101' has a rank of 6 as it appears in
the 6th position in the canonical ordering of the family
of 4 bit Gray codes.
References
==========
* http://statweb.stanford.edu/~susan/courses/s208/node12.html
Examples
========
>>> a = GrayCode(3)
>>> list(a.generate_gray())
['000', '001', '011', '010', '110', '111', '101', '100']
>>> GrayCode(3, start='100').rank
7
>>> GrayCode(3, rank=7).current
'100'
See Also
========
unrank
"""
if self._rank is None:
self._rank = int(gray_to_bin(self.current), 2)
return self._rank
@property
def current(self):
"""
Returns the currently referenced Gray code as a bit string.
Examples
========
>>> GrayCode(3, start='100').current
'100'
"""
rv = self._current or '0'
if type(rv) is not str:
rv = bin(rv)[2:]
return rv.rjust(self.n, '0')
@classmethod
def unrank(cls, n, rank):
"""
Unranks an n-bit sized Gray code of rank k. This method exists
so that a derivative GrayCode class can define its own code of
a given rank.
The string here is generated in reverse order to allow for tail-call
optimization.
Examples
========
>>> GrayCode(5, rank=3).current
'00010'
>>> GrayCode.unrank(5, 3)
'00010'
See Also
========
rank
"""
def _unrank(k, n):
if n == 1:
return str(k % 2)
m = 2**(n - 1)
if k < m:
return '0' + _unrank(k, n - 1)
return '1' + _unrank(m - (k % m) - 1, n - 1)
return _unrank(rank, n)
def random_bitstring(n):
"""
Generates a random bitlist of length n.
Examples
========
>>> random_bitstring(3) # doctest: +SKIP
100
"""
return ''.join([random.choice('01') for i in range(n)])
def gray_to_bin(bin_list):
"""
Convert from Gray coding to binary coding.
We assume big endian encoding.
Examples
========
>>> gray_to_bin('100')
'111'
See Also
========
bin_to_gray
"""
b = [bin_list[0]]
for i in range(1, len(bin_list)):
b += str(int(b[i - 1] != bin_list[i]))
return ''.join(b)
def bin_to_gray(bin_list):
"""
Convert from binary coding to gray coding.
We assume big endian encoding.
Examples
========
>>> bin_to_gray('111')
'100'
See Also
========
gray_to_bin
"""
b = [bin_list[0]]
for i in range(len(bin_list) - 1):
b += str(int(bin_list[i]) ^ int(b[i - 1]))
return ''.join(b)
def get_subset_from_bitstring(super_set, bitstring):
"""
Gets the subset defined by the bitstring.
Examples
========
>>> get_subset_from_bitstring(['a', 'b', 'c', 'd'], '0011')
['c', 'd']
>>> get_subset_from_bitstring(['c', 'a', 'c', 'c'], '1100')
['c', 'a']
See Also
========
graycode_subsets
"""
if len(super_set) != len(bitstring):
raise ValueError('The sizes of the lists are not equal')
return [super_set[i] for i, j in enumerate(bitstring)
if bitstring[i] == '1']
def graycode_subsets(gray_code_set):
"""
Generates the subsets as enumerated by a Gray code.
Examples
========
>>> list(graycode_subsets(['a', 'b', 'c']))
[[], ['c'], ['b', 'c'], ['b'], ['a', 'b'], ['a', 'b', 'c'],
['a', 'c'], ['a']]
>>> list(graycode_subsets(['a', 'b', 'c', 'c']))
[[], ['c'], ['c', 'c'], ['c'], ['b', 'c'], ['b', 'c', 'c'],
['b', 'c'], ['b'], ['a', 'b'], ['a', 'b', 'c'], ['a', 'b', 'c', 'c'],
['a', 'b', 'c'], ['a', 'c'], ['a', 'c', 'c'], ['a', 'c'], ['a']]
See Also
========
get_subset_from_bitstring
"""
for bitstring in list(GrayCode(len(gray_code_set)).generate_gray()):
yield get_subset_from_bitstring(gray_code_set, bitstring)
| |
#!/usr/bin/env python
'''
Exports layers and paths to OpenRaster compatible file with
extra metadata useful for importing into other systems, like games.
'''
import csv
import errno
import os.path
import shutil
import urlparse
import xml.etree.cElementTree as et
from zipfile import ZipFile
import gimpfu
from gimp import pdb
def ora_plus(img, active_layer, compression, dir_name, should_merge, should_zip):
''' Plugin entry point
'''
# Create the root now
root = et.Element('image')
root.set('w', unicode(img.width))
root.set('h', unicode(img.height))
stack = et.SubElement(root, 'stack')
# Create the image directory
name = os.path.splitext(os.path.basename(img.filename))[0]
base_dir = os.path.join(dir_name, name)
if os.access(base_dir, os.F_OK):
shutil.rmtree(base_dir, ignore_errors=False)
mkdirs(os.path.join(base_dir, 'data'))
# Save the layer images and metadata
for layer in img.layers:
to_save = process_layer(img, layer, stack, ['data'], base_dir, should_merge)
save_layers(img, to_save, compression, base_dir)
# Write the thumbnail
save_thumb(img, base_dir)
if len(img.vectors) > 0:
# Create the path directory
paths_path = os.path.join(base_dir, 'paths')
mkdirs(paths_path)
# Save the paths and metadata
paths_node = et.SubElement(root, 'paths')
for path in img.vectors:
to_save = process_path(path, paths_node, ['paths'])
save_paths(to_save, base_dir)
# Write the mimetype file
with open(os.path.join(base_dir, 'mimetype'), 'w') as output_file:
output_file.write('image/openraster')
# Write the metadata file
with open(os.path.join(base_dir, 'stack.xml'), 'w') as output_file:
et.ElementTree(root).write(output_file,
xml_declaration=True,
encoding='utf-8',
method='xml')
# Zip it, if requested
if should_zip:
with ZipFile(os.path.join(dir_name, '%s.ora' % name), 'w') as f:
old_cwd = os.getcwd()
os.chdir(base_dir)
try:
for root, dirs, files in os.walk('.'):
for filename in files:
full_path = os.path.join(root, filename)
f.write(full_path, full_path[2:])
finally:
os.chdir(old_cwd)
shutil.rmtree(base_dir, ignore_errors=False)
def process_layer(img, layer, stack, dir_stack, base_dir, should_merge):
processed = []
layer_name, attributes = parse_attributes(layer.name)
# If this layer is a layer has sublayers, recurse into them
if not should_merge and hasattr(layer, 'layers'):
new_dir_stack = dir_stack + [layer_name]
try:
os.makedirs(os.path.join(base_dir, *new_dir_stack))
except OSError, e:
if e.errno != errno.EEXIST:
raise
for sublayer in layer.layers:
processed.extend(process_layer(img, sublayer, stack, new_dir_stack, base_dir, should_merge))
else:
x, y = layer.offsets
filename = '/'.join(dir_stack + ['%s.png' % layer_name])
layer_node = et.SubElement(stack, 'layer')
layer_node.set('name', layer_name)
layer_node.set('src', filename)
layer_node.set('x', unicode(x))
layer_node.set('y', unicode(y))
# Hardcoded vals. FIXME one day
layer_node.set('composite-op', 'svg:src-over')
layer_node.set('opacity', '1.0')
layer_node.set('visibility', 'visible')
# Set the custom attributes, if any
set_custom_attributes(layer_node, attributes)
processed.append((filename, layer))
return processed
def save_layers(img, layers, compression, base_dir):
for rel_path, layer in layers:
rel_path = rel_path.replace('/', os.sep)
tmp_img = pdb.gimp_image_new(img.width, img.height, img.base_type)
tmp_layer = pdb.gimp_layer_new_from_drawable(layer, tmp_img)
tmp_layer.name = layer.name
tmp_img.add_layer(tmp_layer, 0)
tmp_img.resize_to_layers()
full_path = os.path.join(base_dir, rel_path)
filename = os.path.basename(rel_path)
pdb.file_png_save(
tmp_img,
tmp_img.layers[0],
full_path,
filename,
0, # interlace
compression, # compression
1, # bkgd
1, # gama
1, # offs
1, # phys
1 # time
)
def process_path(path, paths_node, base_dir):
data = [[None] * 8]
strokes_count = 0
path_name, attributes = parse_attributes(path.name)
for stroke in path.strokes:
strokes_count = strokes_count+1
stroke_points, is_closed = stroke.points
# copy triplets
for triplet in range(0, len(stroke_points), 6):
row = [path_name, strokes_count]
row.extend(stroke_points[triplet:triplet + 6])
data.append(row)
# for closed stroke, close with first triplet
if is_closed:
row = [path_name, strokes_count]
row.extend(stroke_points[:6])
data.append(row)
filename = '/'.join(base_dir + ['%s.csv' % path_name])
path_node = et.SubElement(paths_node, 'path')
path_node.set('name', path_name)
path_node.set('src', filename)
set_custom_attributes(path_node, attributes)
return [(filename, data)]
def save_paths(paths, base_dir):
for rel_path, path_data in paths:
rel_path = rel_path.replace('/', os.sep)
with open(os.path.join(base_dir, rel_path), 'w') as f:
writer = csv.writer(f, lineterminator='\n')
writer.writerows(path_data)
def save_thumb(img, base_dir):
tmp_img = pdb.gimp_image_new(img.width, img.height, img.base_type)
for i, layer in enumerate(img.layers):
tmp_layer = pdb.gimp_layer_new_from_drawable(layer, tmp_img)
tmp_img.add_layer(tmp_layer, i)
flattened = tmp_img.flatten()
max_dim = 255
if img.width > max_dim or img.height > max_dim:
if img.width > img.height:
width = max_dim
height = width * img.height / img.width
elif img.width < img.height:
height = max_dim
width = height * img.width / img.height
else:
width = height = max_dim
pdb.gimp_image_scale(tmp_img, width, height)
thumb_path = os.path.join(base_dir, 'Thumbnails')
mkdirs(thumb_path)
thumb_filename = 'thumbnail.png'
pdb.file_png_save_defaults(tmp_img, flattened, os.path.join(thumb_path, thumb_filename), thumb_filename)
# Helper functions
def mkdirs(dir_name):
try:
os.makedirs(dir_name)
except OSError, e:
if e.errno != errno.EEXIST:
raise
def parse_attributes(unparsed_name):
parsed = urlparse.urlparse(unparsed_name)
layer_name = parsed.path
attributes = urlparse.parse_qs(parsed.query)
return layer_name, attributes
def set_custom_attributes(node, attributes):
for key, vals in attributes.iteritems():
node.set(key, ','.join(vals))
# Initialization
gimpfu.register(
# name
"ora-plus",
# blurb
"OpenRaster Plus exporter",
# help
"Exports layers and paths to OpenRaster compatible file with extra metadata useful for importing into other systems, like games.",
# author
"Carlo Cabanilla",
# copyright
"Carlo Cabanilla",
# date
"2014",
# menupath
"<Image>/File/Export/Export as OpenRaster Plus",
# imagetypes
"*",
# params
[
(gimpfu.PF_ADJUSTMENT, "compression", "PNG Compression level:", 0, (0, 9, 1)),
(gimpfu.PF_DIRNAME, "dir", "Directory", os.getcwd()),
(gimpfu.PF_BOOL, "should_merge", "Merge layer groups?", True),
(gimpfu.PF_BOOL, "should_zip", "Zip to .ora?", False),
],
# results
[],
# function
ora_plus
)
gimpfu.main()
| |
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import json
import logging
import os.path
import re
import tempfile
import kerberos
from datetime import datetime
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import REDIRECT_FIELD_NAME, BACKEND_SESSION_KEY, authenticate, load_backend, login
from django.contrib.auth.middleware import RemoteUserMiddleware
from django.core import exceptions, urlresolvers
import django.db
from django.http import HttpResponseNotAllowed
from django.core.urlresolvers import resolve
from django.http import HttpResponseRedirect, HttpResponse
from django.utils.translation import ugettext as _
from django.utils.http import urlquote, is_safe_url
from django.utils.encoding import iri_to_uri
import django.views.static
import desktop.views
import desktop.conf
from desktop.context_processors import get_app_name
from desktop.lib import apputil, i18n
from desktop.lib.django_util import render, render_json, is_jframe_request
from desktop.lib.exceptions import StructuredException
from desktop.lib.exceptions_renderable import PopupException
from desktop.log.access import access_log, log_page_hit
from desktop import appmanager
from hadoop import cluster
from desktop.log import get_audit_logger
LOG = logging.getLogger(__name__)
MIDDLEWARE_HEADER = "X-Hue-Middleware-Response"
# Views inside Django that don't require login
# (see LoginAndPermissionMiddleware)
DJANGO_VIEW_AUTH_WHITELIST = [
django.views.static.serve,
desktop.views.is_alive,
]
class AjaxMiddleware(object):
"""
Middleware that augments request to set request.ajax
for either is_ajax() (looks at HTTP headers) or ?format=json
GET parameters.
"""
def process_request(self, request):
request.ajax = request.is_ajax() or request.REQUEST.get("format", "") == "json"
return None
class ExceptionMiddleware(object):
"""
If exceptions know how to render themselves, use that.
"""
def process_exception(self, request, exception):
import traceback
tb = traceback.format_exc()
logging.info("Processing exception: %s: %s" % (i18n.smart_unicode(exception),
i18n.smart_unicode(tb)))
if isinstance(exception, PopupException):
return exception.response(request)
if isinstance(exception, StructuredException):
if request.ajax:
response = render_json(exception.response_data)
response[MIDDLEWARE_HEADER] = 'EXCEPTION'
response.status_code = getattr(exception, 'error_code', 500)
return response
else:
response = render("error.mako", request,
dict(error=exception.response_data.get("message")))
response.status_code = getattr(exception, 'error_code', 500)
return response
return None
class ClusterMiddleware(object):
"""
Manages setting request.fs and request.jt
"""
def process_view(self, request, view_func, view_args, view_kwargs):
"""
Sets request.fs and request.jt on every request to point to the
configured filesystem.
"""
request.fs_ref = request.REQUEST.get('fs', view_kwargs.get('fs', 'default'))
if "fs" in view_kwargs:
del view_kwargs["fs"]
try:
request.fs = cluster.get_hdfs(request.fs_ref)
except KeyError:
raise KeyError(_('Cannot find HDFS called "%(fs_ref)s".') % {'fs_ref': request.fs_ref})
if request.user.is_authenticated():
if request.fs is not None:
request.fs.setuser(request.user.username)
request.jt = cluster.get_default_mrcluster() # Deprecated, only there for MR1
if request.jt is not None:
request.jt.setuser(request.user.username)
else:
request.jt = None
class NotificationMiddleware(object):
"""
Manages setting request.info and request.error
"""
def process_view(self, request, view_func, view_args, view_kwargs):
def message(title, detail=None):
if detail is None:
detail = ''
else:
detail = '<br/>%s' % detail
return '%s %s' % (title, detail)
def info(title, detail=None):
messages.info(request, message(title, detail))
def error(title, detail=None):
messages.error(request, message(title, detail))
def warn(title, detail=None):
messages.warning(request, message(title, detail))
request.info = info
request.error = error
request.warn = warn
class AppSpecificMiddleware(object):
@classmethod
def augment_request_with_app(cls, request, view_func):
""" Stuff the app into the request for use in later-stage middleware """
if not hasattr(request, "_desktop_app"):
module = inspect.getmodule(view_func)
request._desktop_app = apputil.get_app_for_module(module)
if not request._desktop_app and not module.__name__.startswith('django.'):
logging.debug("no app for view func: %s in %s" % (view_func, module))
def __init__(self):
self.middlewares_by_app = {}
for app in appmanager.DESKTOP_APPS:
self.middlewares_by_app[app.name] = self._load_app_middleware(app)
def _get_middlewares(self, app, type):
return self.middlewares_by_app.get(app, {}).get(type, [])
def process_view(self, request, view_func, view_args, view_kwargs):
"""View middleware"""
self.augment_request_with_app(request, view_func)
if not request._desktop_app:
return None
# Run the middlewares
ret = None
for middleware in self._get_middlewares(request._desktop_app, 'view'):
ret = middleware(request, view_func, view_args, view_kwargs)
if ret: return ret # short circuit
return ret
def process_response(self, request, response):
"""Response middleware"""
# We have the app that we stuffed in there
if not hasattr(request, '_desktop_app'):
logging.debug("No desktop_app known for request.")
return response
for middleware in reversed(self._get_middlewares(request._desktop_app, 'response')):
response = middleware(request, response)
return response
def process_exception(self, request, exception):
"""Exception middleware"""
# We have the app that we stuffed in there
if not hasattr(request, '_desktop_app'):
logging.debug("No desktop_app known for exception.")
return None
# Run the middlewares
ret = None
for middleware in self._get_middlewares(request._desktop_app, 'exception'):
ret = middleware(request, exception)
if ret: return ret # short circuit
return ret
def _load_app_middleware(cls, app):
app_settings = app.settings
if not app_settings:
return
mw_classes = app_settings.__dict__.get('MIDDLEWARE_CLASSES', [])
result = {'view': [], 'response': [], 'exception': []}
for middleware_path in mw_classes:
# This code brutally lifted from django.core.handlers
try:
dot = middleware_path.rindex('.')
except ValueError:
raise exceptions.ImproperlyConfigured, _('%(module)s isn\'t a middleware module.') % {'module': middleware_path}
mw_module, mw_classname = middleware_path[:dot], middleware_path[dot+1:]
try:
mod = __import__(mw_module, {}, {}, [''])
except ImportError, e:
raise exceptions.ImproperlyConfigured, _('Error importing middleware %(module)s: "%(error)s".') % {'module': mw_module, 'error': e}
try:
mw_class = getattr(mod, mw_classname)
except AttributeError:
raise exceptions.ImproperlyConfigured, _('Middleware module "%(module)s" does not define a "%(class)s" class.') % {'module': mw_module, 'class':mw_classname}
try:
mw_instance = mw_class()
except exceptions.MiddlewareNotUsed:
continue
# End brutal code lift
# We need to make sure we don't have a process_request function because we don't know what
# application will handle the request at the point process_request is called
if hasattr(mw_instance, 'process_request'):
raise exceptions.ImproperlyConfigured, \
_('AppSpecificMiddleware module "%(module)s" has a process_request function' + \
' which is impossible.') % {'module': middleware_path}
if hasattr(mw_instance, 'process_view'):
result['view'].append(mw_instance.process_view)
if hasattr(mw_instance, 'process_response'):
result['response'].insert(0, mw_instance.process_response)
if hasattr(mw_instance, 'process_exception'):
result['exception'].insert(0, mw_instance.process_exception)
return result
class LoginAndPermissionMiddleware(object):
"""
Middleware that forces all views (except those that opt out) through authentication.
"""
def process_view(self, request, view_func, view_args, view_kwargs):
"""
We also perform access logging in ``process_view()`` since we have the view function,
which tells us the log level. The downside is that we don't have the status code,
which isn't useful for status logging anyways.
"""
access_log_level = getattr(view_func, 'access_log_level', None)
# First, skip views not requiring login
# If the view has "opted out" of login required, skip
if hasattr(view_func, "login_notrequired"):
log_page_hit(request, view_func, level=access_log_level or logging.DEBUG)
return None
# There are certain django views which are also opt-out, but
# it would be evil to go add attributes to them
if view_func in DJANGO_VIEW_AUTH_WHITELIST:
log_page_hit(request, view_func, level=access_log_level or logging.DEBUG)
return None
# If user is logged in, check that he has permissions to access the
# app.
if request.user.is_active and request.user.is_authenticated():
AppSpecificMiddleware.augment_request_with_app(request, view_func)
# Until we get Django 1.3 and resolve returning the URL name, we just do a match of the name of the view
try:
access_view = 'access_view:%s:%s' % (request._desktop_app, resolve(request.path)[0].__name__)
except Exception, e:
access_log(request, 'error checking view perm: %s', e, level=access_log_level)
access_view =''
# Accessing an app can access an underlying other app.
# e.g. impala or spark uses code from beeswax and so accessing impala shows up as beeswax here.
# Here we trust the URL to be the real app we need to check the perms.
app_accessed = request._desktop_app
ui_app_accessed = get_app_name(request)
if app_accessed != ui_app_accessed and ui_app_accessed not in ('logs', 'accounts', 'login'):
app_accessed = ui_app_accessed
if app_accessed and \
app_accessed not in ("desktop", "home", "about") and \
not (request.user.has_hue_permission(action="access", app=app_accessed) or
request.user.has_hue_permission(action=access_view, app=app_accessed)):
access_log(request, 'permission denied', level=access_log_level)
return PopupException(
_("You do not have permission to access the %(app_name)s application.") % {'app_name': app_accessed.capitalize()}, error_code=401).response(request)
else:
log_page_hit(request, view_func, level=access_log_level)
return None
logging.info("Redirecting to login page: %s", request.get_full_path())
access_log(request, 'login redirection', level=access_log_level)
if request.ajax:
# Send back a magic header which causes Hue.Request to interpose itself
# in the ajax request and make the user login before resubmitting the
# request.
response = HttpResponse("/* login required */", content_type="text/javascript")
response[MIDDLEWARE_HEADER] = 'LOGIN_REQUIRED'
return response
else:
return HttpResponseRedirect("%s?%s=%s" % (settings.LOGIN_URL, REDIRECT_FIELD_NAME, urlquote(request.get_full_path())))
class JsonMessage(object):
def __init__(self, **kwargs):
self.kwargs = kwargs
def __str__(self):
return json.dumps(self.kwargs)
class AuditLoggingMiddleware(object):
def __init__(self):
from desktop.conf import AUDIT_EVENT_LOG_DIR
if not AUDIT_EVENT_LOG_DIR.get():
LOG.info('Unloading AuditLoggingMiddleware')
raise exceptions.MiddlewareNotUsed
def process_response(self, request, response):
try:
audit_logger = get_audit_logger()
audit_logger.debug(JsonMessage(**{
datetime.utcnow().strftime('%s'): {
'user': request.user.username if hasattr(request, 'user') else 'anonymous',
"status": response.status_code,
"impersonator": None,
"ip_address": request.META.get('REMOTE_ADDR'),
"authorization_failure": response.status_code == 401,
"service": get_app_name(request),
"url": request.path,
}
}))
response['audited'] = True
except Exception, e:
LOG.error('Could not audit the request: %s' % e)
return response
try:
import tidylib
_has_tidylib = True
except Exception, ex:
# The exception type is not ImportError. It's actually an OSError.
logging.warn("Failed to import tidylib (for debugging). Is libtidy installed?")
_has_tidylib = False
class HtmlValidationMiddleware(object):
"""
If configured, validate output html for every response.
"""
def __init__(self):
self._logger = logging.getLogger('HtmlValidationMiddleware')
if not _has_tidylib:
logging.error("HtmlValidationMiddleware not activatived: "
"Failed to import tidylib.")
return
# Things that we don't care about
self._to_ignore = (
re.compile('- Warning: <.*> proprietary attribute "data-'),
re.compile('- Warning: trimming empty'),
re.compile('- Info:'),
)
# Find the directory to write tidy html output
try:
self._outdir = os.path.join(tempfile.gettempdir(), 'hue_html_validation')
if not os.path.isdir(self._outdir):
os.mkdir(self._outdir, 0755)
except Exception, ex:
self._logger.exception('Failed to get temp directory: %s', (ex,))
self._outdir = tempfile.mkdtemp(prefix='hue_html_validation-')
# Options to pass to libtidy. See
# http://tidy.sourceforge.net/docs/quickref.html
self._options = {
'show-warnings': 1,
'output-html': 0,
'output-xhtml': 1,
'char-encoding': 'utf8',
'output-encoding': 'utf8',
'indent': 1,
'wrap': 0,
}
def process_response(self, request, response):
if not _has_tidylib or not self._is_html(request, response):
return response
html, errors = tidylib.tidy_document(response.content,
self._options,
keep_doc=True)
if not errors:
return response
# Filter out what we care about
err_list = errors.rstrip().split('\n')
err_list = self._filter_warnings(err_list)
if not err_list:
return response
try:
fn = urlresolvers.resolve(request.path)[0]
fn_name = '%s.%s' % (fn.__module__, fn.__name__)
except:
fn_name = '<unresolved_url>'
# Write the two versions of html out for offline debugging
filename = os.path.join(self._outdir, fn_name)
result = "HTML tidy result: %s [%s]:" \
"\n\t%s" \
"\nPlease see %s.orig %s.tidy\n-------" % \
(request.path, fn_name, '\n\t'.join(err_list), filename, filename)
file(filename + '.orig', 'w').write(i18n.smart_str(response.content))
file(filename + '.tidy', 'w').write(i18n.smart_str(html))
file(filename + '.info', 'w').write(i18n.smart_str(result))
self._logger.error(result)
return response
def _filter_warnings(self, err_list):
"""A hacky way to filter out things that we don't care about."""
res = [ ]
for err in err_list:
for ignore in self._to_ignore:
if ignore.search(err):
break
else:
res.append(err)
return res
def _is_html(self, request, response):
return not request.is_ajax() and \
'html' in response['Content-Type'] and \
200 <= response.status_code < 300
class SpnegoMiddleware(object):
"""
Based on the WSGI SPNEGO middlware class posted here:
http://code.activestate.com/recipes/576992/
"""
def __init__(self):
if not 'SpnegoDjangoBackend' in desktop.conf.AUTH.BACKEND.get():
LOG.info('Unloading SpnegoMiddleware')
raise exceptions.MiddlewareNotUsed
def process_response(self, request, response):
if 'GSS-String' in request.META:
response['WWW-Authenticate'] = request.META['GSS-String']
elif 'Return-401' in request.META:
response = HttpResponse("401 Unauthorized", content_type="text/plain",
status=401)
response['WWW-Authenticate'] = 'Negotiate'
response.status = 401
return response
def process_request(self, request):
"""
The process_request() method needs to communicate some state to the
process_response() method. The two options for this are to return an
HttpResponse object or to modify the META headers in the request object. In
order to ensure that all of the middleware is properly invoked, this code
currently uses the later approach. The following headers are currently used:
GSS-String:
This means that GSS authentication was successful and that we need to pass
this value for the WWW-Authenticate header in the response.
Return-401:
This means that the SPNEGO backend is in use, but we didn't get an
AUTHORIZATION header from the client. The way that the protocol works
(http://tools.ietf.org/html/rfc4559) is by having the first response to an
un-authenticated request be a 401 with the WWW-Authenticate header set to
Negotiate. This will cause the browser to re-try the request with the
AUTHORIZATION header set.
"""
# AuthenticationMiddleware is required so that request.user exists.
if not hasattr(request, 'user'):
raise ImproperlyConfigured(
"The Django remote user auth middleware requires the"
" authentication middleware to be installed. Edit your"
" MIDDLEWARE_CLASSES setting to insert"
" 'django.contrib.auth.middleware.AuthenticationMiddleware'"
" before the SpnegoUserMiddleware class.")
if 'HTTP_AUTHORIZATION' in request.META:
type, authstr = request.META['HTTP_AUTHORIZATION'].split(' ', 1)
if type == 'Negotiate':
try:
result, context = kerberos.authGSSServerInit('HTTP')
if result != 1:
return
gssstring=''
r=kerberos.authGSSServerStep(context,authstr)
if r == 1:
gssstring=kerberos.authGSSServerResponse(context)
request.META['GSS-String'] = 'Negotiate %s' % gssstring
else:
kerberos.authGSSServerClean(context)
return
username = kerberos.authGSSServerUserName(context)
kerberos.authGSSServerClean(context)
if request.user.is_authenticated():
if request.user.username == self.clean_username(username, request):
return
user = authenticate(username=username)
if user:
request.user = user
login(request, user)
return
except:
LOG.exception('Unexpected error when authenticating against KDC')
return
else:
request.META['Return-401'] = ''
return
else:
if not request.user.is_authenticated():
request.META['Return-401'] = ''
return
def clean_username(self, username, request):
"""
Allows the backend to clean the username, if the backend defines a
clean_username method.
"""
backend_str = request.session[BACKEND_SESSION_KEY]
backend = load_backend(backend_str)
try:
username = backend.clean_username(username)
except AttributeError:
pass
return username
class HueRemoteUserMiddleware(RemoteUserMiddleware):
"""
Middleware to delegate authentication to a proxy server. The proxy server
will set an HTTP header (defaults to Remote-User) with the name of the
authenticated user. This class extends the RemoteUserMiddleware class
built into Django with the ability to configure the HTTP header and to
unload the middleware if the RemoteUserDjangoBackend is not currently
in use.
"""
def __init__(self):
if not 'RemoteUserDjangoBackend' in desktop.conf.AUTH.BACKEND.get():
LOG.info('Unloading HueRemoteUserMiddleware')
raise exceptions.MiddlewareNotUsed
self.header = desktop.conf.AUTH.REMOTE_USER_HEADER.get()
class EnsureSafeMethodMiddleware(object):
"""
Middleware to white list configured HTTP request methods.
"""
def process_request(self, request):
if request.method not in desktop.conf.HTTP_ALLOWED_METHODS.get():
return HttpResponseNotAllowed(desktop.conf.HTTP_ALLOWED_METHODS.get())
class EnsureSafeRedirectURLMiddleware(object):
"""
Middleware to white list configured redirect URLs.
"""
def process_response(self, request, response):
if response.status_code in (301, 302, 303, 305, 307, 308) and response.get('Location'):
redirection_patterns = desktop.conf.REDIRECT_WHITELIST.get()
location = response['Location']
if any(regexp.match(location) for regexp in redirection_patterns):
return response
if is_safe_url(location, request.get_host()):
return response
response = render("error.mako", request, dict(error=_('Redirect to %s is not allowed.') % response['Location']))
response.status_code = 403
return response
else:
return response
| |
""" Global configuration for the utility based on config files and environment variables."""
import os
import re
import math
import yaml
import multiprocessing
from ddsc.core.util import verify_file_private
from ddsc.exceptions import DDSUserException
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
GLOBAL_CONFIG_FILENAME = '/etc/ddsclient.conf'
LOCAL_CONFIG_FILENAME = '~/.ddsclient'
LOCAL_CONFIG_ENV = 'DDSCLIENT_CONF'
DUKE_DATA_SERVICE_URL = 'https://api.dataservice.duke.edu/api/v1'
D4S2_SERVICE_URL = 'https://datadelivery.genome.duke.edu/api/v1'
MB_TO_BYTES = 1024 * 1024
DDS_DEFAULT_UPLOAD_CHUNKS = 100 * MB_TO_BYTES
DDS_DEFAULT_DOWNLOAD_CHUNK_SIZE = 20 * MB_TO_BYTES
AUTH_ENV_KEY_NAME = 'DUKE_DATA_SERVICE_AUTH'
# when uploading skip .DS_Store, our key file, and ._ (resource fork metadata)
FILE_EXCLUDE_REGEX_DEFAULT = '^\.DS_Store$|^\.ddsclient$|^\.\_'
MAX_DEFAULT_WORKERS = 8
GET_PAGE_SIZE_DEFAULT = 100 # fetch 100 items per page
DEFAULT_FILE_DOWNLOAD_RETRIES = 5
DEFAULT_BACKING_STORAGE = "dds"
def get_user_config_filename():
user_config_filename = os.environ.get(LOCAL_CONFIG_ENV)
if user_config_filename:
return user_config_filename
else:
return LOCAL_CONFIG_FILENAME
def create_config(allow_insecure_config_file=False):
"""
Create config based on /etc/ddsclient.conf and ~/.ddsclient.conf($DDSCLIENT_CONF)
:param allow_insecure_config_file: bool: when true we will not check ~/.ddsclient permissions.
:return: Config with the configuration to use for DDSClient.
"""
config = Config()
config.add_properties(GLOBAL_CONFIG_FILENAME)
user_config_filename = get_user_config_filename()
if user_config_filename == LOCAL_CONFIG_FILENAME and not allow_insecure_config_file:
verify_file_private(user_config_filename)
config.add_properties(user_config_filename)
return config
def default_num_workers():
"""
Return the number of workers to use as default if not specified by a config file.
Returns the number of CPUs or MAX_DEFAULT_WORKERS (whichever is less).
"""
return min(multiprocessing.cpu_count(), MAX_DEFAULT_WORKERS)
class Config(object):
"""
Global configuration object based on config files an environment variables.
"""
URL = 'url' # specifies the dataservice host we are connecting too
USER_KEY = 'user_key' # user key: /api/v1/current_user/api_key
AGENT_KEY = 'agent_key' # software_agent key: /api/v1/software_agents/{id}/api_key
AUTH = 'auth' # Holds actual auth token for connecting to the dataservice
UPLOAD_BYTES_PER_CHUNK = 'upload_bytes_per_chunk' # bytes per chunk we will upload
UPLOAD_WORKERS = 'upload_workers' # how many worker processes used for uploading
DOWNLOAD_WORKERS = 'download_workers' # how many worker processes used for downloading
DOWNLOAD_BYTES_PER_CHUNK = 'download_bytes_per_chunk' # bytes per chunk we will download
DEBUG_MODE = 'debug' # show stack traces
D4S2_URL = 'd4s2_url' # url for use with the D4S2 (share/deliver service)
FILE_EXCLUDE_REGEX = 'file_exclude_regex' # allows customization of which filenames will be uploaded
GET_PAGE_SIZE = 'get_page_size' # page size used for GET pagination requests
STORAGE_PROVIDER_ID = 'storage_provider_id' # setting to override the default storage provider
FILE_DOWNLOAD_RETRIES = 'file_download_retries' # number of times to retry a failed file download
BACKING_STORAGE = 'backing_storage' # backing storage either "dds" or "azure"
def __init__(self):
self.values = {}
def add_properties(self, filename):
"""
Add properties to config based on filename replacing previous values.
:param filename: str path to YAML file to pull top level properties from
"""
filename = os.path.expanduser(filename)
if os.path.exists(filename):
with open(filename, 'r') as yaml_file:
config_data = yaml.safe_load(yaml_file)
if config_data:
self.update_properties(config_data)
else:
raise DDSUserException("Error: Empty config file {}".format(filename))
def update_properties(self, new_values):
"""
Add items in new_values to the internal list replacing existing values.
:param new_values: dict properties to set
"""
self.values = dict(self.values, **new_values)
@property
def url(self):
"""
Specifies the dataservice host we are connecting too.
:return: str url to a dataservice host
"""
return self.values.get(Config.URL, DUKE_DATA_SERVICE_URL)
def get_portal_url_base(self):
"""
Determine root url of the data service from the url specified.
:return: str root url of the data service (eg: https://dataservice.duke.edu)
"""
api_url = urlparse(self.url).hostname
portal_url = re.sub('^api\.', '', api_url)
portal_url = re.sub(r'api', '', portal_url)
return portal_url
@property
def user_key(self):
"""
Contains user key user created from /api/v1/current_user/api_key used to create a login token.
:return: str user key that can be used to create an auth token
"""
return self.values.get(Config.USER_KEY, None)
@property
def agent_key(self):
"""
Contains user agent key created from /api/v1/software_agents/{id}/api_key used to create a login token.
:return: str agent key that can be used to create an auth token
"""
return self.values.get(Config.AGENT_KEY, None)
@property
def auth(self):
"""
Contains the auth token for use with connecting to the dataservice.
:return:
"""
return self.values.get(Config.AUTH, os.environ.get(AUTH_ENV_KEY_NAME, None))
@property
def upload_bytes_per_chunk(self):
"""
Return the bytes per chunk to be sent to external store.
:return: int bytes per upload chunk
"""
value = self.values.get(Config.UPLOAD_BYTES_PER_CHUNK, DDS_DEFAULT_UPLOAD_CHUNKS)
return Config.parse_bytes_str(value)
@property
def upload_workers(self):
"""
Return the number of parallel works to use when uploading a file.
:return: int number of workers. Specify None or 1 to disable parallel uploading
"""
return self.values.get(Config.UPLOAD_WORKERS, default_num_workers())
@property
def download_workers(self):
"""
Return the number of parallel works to use when downloading a file.
:return: int number of workers. Specify None or 1 to disable parallel downloading
"""
default_workers = int(math.ceil(default_num_workers()))
return self.values.get(Config.DOWNLOAD_WORKERS, default_workers)
@property
def download_bytes_per_chunk(self):
return self.values.get(Config.DOWNLOAD_BYTES_PER_CHUNK, DDS_DEFAULT_DOWNLOAD_CHUNK_SIZE)
@property
def debug_mode(self):
"""
Return true if we should show stack traces on error.
:return: boolean True if debugging is enabled
"""
return self.values.get(Config.DEBUG_MODE, False)
@property
def d4s2_url(self):
"""
Returns url for D4S2 service or '' if not setup.
:return: str url
"""
return self.values.get(Config.D4S2_URL, D4S2_SERVICE_URL)
@staticmethod
def parse_bytes_str(value):
"""
Given a value return the integer number of bytes it represents.
Trailing "MB" causes the value multiplied by 1024*1024
:param value:
:return: int number of bytes represented by value.
"""
if type(value) == str:
if "MB" in value:
return int(value.replace("MB", "")) * MB_TO_BYTES
else:
return int(value)
else:
return value
@property
def file_exclude_regex(self):
"""
Returns regex that should be used to filter out filenames.
:return: str: regex that when matches we should exclude a file from uploading.
"""
return self.values.get(Config.FILE_EXCLUDE_REGEX, FILE_EXCLUDE_REGEX_DEFAULT)
@property
def page_size(self):
"""
Returns the page size used to fetch paginated lists from DukeDS.
For DukeDS APIs that fail related to timeouts lowering this value can help.
:return:
"""
return self.values.get(Config.GET_PAGE_SIZE, GET_PAGE_SIZE_DEFAULT)
@property
def storage_provider_id(self):
"""
Returns storage provider id from /api/v1/storage_providers DukeDS API or None to use default.
:return: str: uuid of storage provider
"""
return self.values.get(Config.STORAGE_PROVIDER_ID, None)
@property
def file_download_retries(self):
"""
Returns number of times to retry failed external file downloads
:return: int: number of retries allowed before failure
"""
return self.values.get(Config.FILE_DOWNLOAD_RETRIES, DEFAULT_FILE_DOWNLOAD_RETRIES)
@property
def backing_storage(self):
return self.values.get(Config.BACKING_STORAGE, DEFAULT_BACKING_STORAGE)
| |
# tests/arguments.py --------------------------------------------*- python -*-
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ----------------------------------------------------------------------------
import argparse
import os
import sys
import unittest
try:
# py2
from StringIO import StringIO
except ImportError:
# py3
from io import StringIO
from swift_build_support.arguments import (
action as argaction,
type as argtype,
)
class ArgumentsTypeTestCase(unittest.TestCase):
def test_bool(self):
self.assertTrue(argtype.bool("1"))
self.assertTrue(argtype.bool("true"))
self.assertTrue(argtype.bool("True"))
self.assertFalse(argtype.bool("0"))
self.assertFalse(argtype.bool("false"))
self.assertFalse(argtype.bool("False"))
self.assertRaises(argparse.ArgumentTypeError, argtype.bool, 'foobar')
self.assertRaises(argparse.ArgumentTypeError, argtype.bool, 'TRUE')
self.assertRaises(argparse.ArgumentTypeError, argtype.bool, 'FALSE')
def test_shell_split(self):
self.assertEqual(
argtype.shell_split("-BAR=\"foo bar\" -BAZ='foo,bar',-QUX $baz"),
['-BAR=foo bar', '-BAZ=foo,bar', '-QUX', '$baz'])
def test_clang_compiler_version(self):
self.assertEqual(
argtype.clang_compiler_version('1.23.456').components,
("1", "23", "456", None))
self.assertEqual(
argtype.clang_compiler_version('1.2.3').components,
("1", "2", "3", None))
self.assertEqual(
argtype.clang_compiler_version('1.2.3.4').components,
("1", "2", "3", "4"))
self.assertEqual(
argtype.clang_compiler_version('12.34.56').components,
("12", "34", "56", None))
self.assertEqual(
argtype.clang_compiler_version('12.34.56.78').components,
("12", "34", "56", "78"))
self.assertRaises(
argparse.ArgumentTypeError,
argtype.clang_compiler_version,
"ver1.2.3")
self.assertRaises(
argparse.ArgumentTypeError,
argtype.clang_compiler_version,
"1.beta2.3")
self.assertRaises(
argparse.ArgumentTypeError,
argtype.clang_compiler_version,
"1.2.preview3")
self.assertRaises(
argparse.ArgumentTypeError,
argtype.clang_compiler_version,
"1.2.3-rc4")
self.assertRaises(
argparse.ArgumentTypeError,
argtype.clang_compiler_version,
"1..2")
def test_executable(self):
python = sys.executable
self.assertTrue(os.path.isabs(argtype.executable(python)))
# On this test directory, specifying "../../build-script-impl" returns
# absolute path of build-script-impl
impl = os.path.join("..", "..", "build-script-impl")
cwd = os.getcwd()
os.chdir(os.path.dirname(__file__))
self.assertTrue(os.path.isabs(argtype.executable(impl)))
os.chdir(cwd)
self.assertRaises(
argparse.ArgumentTypeError,
argtype.executable, __file__) # this file is not executable
self.assertRaises(
argparse.ArgumentTypeError,
argtype.executable, os.path.dirname(__file__))
self.assertRaises(
argparse.ArgumentTypeError,
argtype.executable, "/bin/example-command-not-exist")
self.assertRaises(
argparse.ArgumentTypeError,
argtype.executable, "../example-command-not-exist")
class ArgumentsActionTestCase(unittest.TestCase):
def test_unavailable(self):
orig_stderr = sys.stderr
parser = argparse.ArgumentParser()
parser.add_argument("--foo")
parser.add_argument(
"--do-not-use",
"--never-ever",
action=argaction.unavailable)
args, unknown_args = parser.parse_known_args(
['--foo', 'bar', '--baz', 'qux'])
self.assertEqual(args.foo, 'bar')
self.assertEqual(unknown_args, ['--baz', 'qux'])
self.assertFalse(hasattr(args, 'sentinel'))
stderr = StringIO()
sys.stderr = stderr
self.assertRaises(
SystemExit,
parser.parse_known_args,
['--foo', 'bar', '--do-not-use', 'baz'])
self.assertIn('--do-not-use', stderr.getvalue())
stderr = StringIO()
sys.stderr = stderr
self.assertRaises(
SystemExit,
parser.parse_known_args,
['--foo', 'bar', '--never-ever=baz'])
self.assertIn('--never-ever', stderr.getvalue())
sys.stderr = orig_stderr
def test_concat(self):
# Has default
parser = argparse.ArgumentParser()
parser.add_argument(
"--str-opt",
action=argaction.concat,
default="def")
parser.add_argument(
"--list-opt",
action=argaction.concat,
type=argtype.shell_split,
default=["def"])
self.assertEqual(
parser.parse_args(['--str-opt', '12', '--str-opt=42']),
argparse.Namespace(str_opt="def1242", list_opt=["def"]))
self.assertEqual(
parser.parse_args(['--list-opt', 'foo 12', '--list-opt=bar 42']),
argparse.Namespace(
str_opt="def", list_opt=["def", "foo", "12", "bar", "42"]))
# Default less
parser = argparse.ArgumentParser()
parser.add_argument(
"--str-opt",
action=argaction.concat)
parser.add_argument(
"--list-opt",
action=argaction.concat,
type=argtype.shell_split)
self.assertEqual(
parser.parse_args(['--str-opt', '12', '--str-opt=42']),
argparse.Namespace(str_opt="1242", list_opt=None))
self.assertEqual(
parser.parse_args(['--list-opt', 'foo 12', '--list-opt=bar 42']),
argparse.Namespace(
str_opt=None, list_opt=["foo", "12", "bar", "42"]))
def test_optional_bool(self):
parser = argparse.ArgumentParser()
parser.add_argument(
"--test-default-default",
action=argaction.optional_bool)
parser.add_argument(
"--test-default-true",
action=argaction.optional_bool,
default=True)
parser.add_argument(
"--test-default-false",
action=argaction.optional_bool,
default=False)
args, unknown_args = parser.parse_known_args([])
self.assertEqual(args.test_default_default, False)
self.assertEqual(args.test_default_true, True)
self.assertEqual(args.test_default_false, False)
args, unknown_args = parser.parse_known_args(
['--test-default-default', '0',
'--test-default-true', '0',
'--test-default-false', '0'])
self.assertEqual(args.test_default_default, False)
self.assertEqual(args.test_default_true, False)
self.assertEqual(args.test_default_false, False)
args, unknown_args = parser.parse_known_args(
['--test-default-default', '1',
'--test-default-true', '1',
'--test-default-false', '1'])
self.assertEqual(args.test_default_default, True)
self.assertEqual(args.test_default_true, True)
self.assertEqual(args.test_default_false, True)
args, unknown_args = parser.parse_known_args(
['--test-default-default',
'--test-default-true',
'--test-default-false'])
self.assertEqual(args.test_default_default, True)
self.assertEqual(args.test_default_true, True)
self.assertEqual(args.test_default_false, True)
| |
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A setup module for the GRPC Python package."""
from distutils import cygwinccompiler
from distutils import extension as _extension
from distutils import util
import os
import os.path
import pkg_resources
import platform
import re
import shlex
import shutil
import sys
import sysconfig
import setuptools
from setuptools.command import egg_info
# Redirect the manifest template from MANIFEST.in to PYTHON-MANIFEST.in.
egg_info.manifest_maker.template = 'PYTHON-MANIFEST.in'
PY3 = sys.version_info.major == 3
PYTHON_STEM = os.path.join('src', 'python', 'grpcio')
CORE_INCLUDE = ('include', '.',)
BORINGSSL_INCLUDE = (os.path.join('third_party', 'boringssl', 'include'),)
ZLIB_INCLUDE = (os.path.join('third_party', 'zlib'),)
README = os.path.join(PYTHON_STEM, 'README.rst')
# Ensure we're in the proper directory whether or not we're being used by pip.
os.chdir(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, os.path.abspath(PYTHON_STEM))
# Break import-style to ensure we can actually find our in-repo dependencies.
import _spawn_patch
import commands
import grpc_core_dependencies
import grpc_version
_spawn_patch.monkeypatch_spawn()
LICENSE = '3-clause BSD'
# Environment variable to determine whether or not the Cython extension should
# *use* Cython or use the generated C files. Note that this requires the C files
# to have been generated by building first *with* Cython support. Even if this
# is set to false, if the script detects that the generated `.c` file isn't
# present, then it will still attempt to use Cython.
BUILD_WITH_CYTHON = os.environ.get('GRPC_PYTHON_BUILD_WITH_CYTHON', False)
# Environment variable to determine whether or not to enable coverage analysis
# in Cython modules.
ENABLE_CYTHON_TRACING = os.environ.get(
'GRPC_PYTHON_ENABLE_CYTHON_TRACING', False)
# Environment variable specifying whether or not there's interest in setting up
# documentation building.
ENABLE_DOCUMENTATION_BUILD = os.environ.get(
'GRPC_PYTHON_ENABLE_DOCUMENTATION_BUILD', False)
# There are some situations (like on Windows) where CC, CFLAGS, and LDFLAGS are
# entirely ignored/dropped/forgotten by distutils and its Cygwin/MinGW support.
# We use these environment variables to thus get around that without locking
# ourselves in w.r.t. the multitude of operating systems this ought to build on.
# We can also use these variables as a way to inject environment-specific
# compiler/linker flags. We assume GCC-like compilers and/or MinGW as a
# reasonable default.
EXTRA_ENV_COMPILE_ARGS = os.environ.get('GRPC_PYTHON_CFLAGS', None)
EXTRA_ENV_LINK_ARGS = os.environ.get('GRPC_PYTHON_LDFLAGS', None)
if EXTRA_ENV_COMPILE_ARGS is None:
EXTRA_ENV_COMPILE_ARGS = ''
if 'win32' in sys.platform and sys.version_info < (3, 5):
# We use define flags here and don't directly add to DEFINE_MACROS below to
# ensure that the expert user/builder has a way of turning it off (via the
# envvars) without adding yet more GRPC-specific envvars.
# See https://sourceforge.net/p/mingw-w64/bugs/363/
if '32' in platform.architecture()[0]:
EXTRA_ENV_COMPILE_ARGS += ' -D_ftime=_ftime32 -D_timeb=__timeb32 -D_ftime_s=_ftime32_s'
else:
EXTRA_ENV_COMPILE_ARGS += ' -D_ftime=_ftime64 -D_timeb=__timeb64'
elif 'win32' in sys.platform:
EXTRA_ENV_COMPILE_ARGS += ' -D_PYTHON_MSVC'
elif "linux" in sys.platform or "darwin" in sys.platform:
EXTRA_ENV_COMPILE_ARGS += ' -fvisibility=hidden -fno-wrapv'
if EXTRA_ENV_LINK_ARGS is None:
EXTRA_ENV_LINK_ARGS = ''
if "linux" in sys.platform or "darwin" in sys.platform:
EXTRA_ENV_LINK_ARGS += ' -lpthread'
elif "win32" in sys.platform and sys.version_info < (3, 5):
msvcr = cygwinccompiler.get_msvcr()[0]
# TODO(atash) sift through the GCC specs to see if libstdc++ can have any
# influence on the linkage outcome on MinGW for non-C++ programs.
EXTRA_ENV_LINK_ARGS += (
' -static-libgcc -static-libstdc++ -mcrtdll={msvcr} '
'-static'.format(msvcr=msvcr))
if "linux" in sys.platform:
EXTRA_ENV_LINK_ARGS += ' -Wl,-wrap,memcpy'
EXTRA_COMPILE_ARGS = shlex.split(EXTRA_ENV_COMPILE_ARGS)
EXTRA_LINK_ARGS = shlex.split(EXTRA_ENV_LINK_ARGS)
CYTHON_EXTENSION_PACKAGE_NAMES = ()
CYTHON_EXTENSION_MODULE_NAMES = ('grpc._cython.cygrpc',)
CYTHON_HELPER_C_FILES = ()
CORE_C_FILES = tuple(grpc_core_dependencies.CORE_SOURCE_FILES)
EXTENSION_INCLUDE_DIRECTORIES = (
(PYTHON_STEM,) + CORE_INCLUDE + BORINGSSL_INCLUDE + ZLIB_INCLUDE)
EXTENSION_LIBRARIES = ()
if "linux" in sys.platform:
EXTENSION_LIBRARIES += ('rt',)
if not "win32" in sys.platform:
EXTENSION_LIBRARIES += ('m',)
if "win32" in sys.platform:
EXTENSION_LIBRARIES += ('advapi32', 'ws2_32',)
DEFINE_MACROS = (
('OPENSSL_NO_ASM', 1), ('_WIN32_WINNT', 0x600),
('GPR_BACKWARDS_COMPATIBILITY_MODE', 1),)
if "win32" in sys.platform:
DEFINE_MACROS += (('WIN32_LEAN_AND_MEAN', 1),)
if '64bit' in platform.architecture()[0]:
DEFINE_MACROS += (('MS_WIN64', 1),)
elif sys.version_info >= (3, 5):
# For some reason, this is needed to get access to inet_pton/inet_ntop
# on msvc, but only for 32 bits
DEFINE_MACROS += (('NTDDI_VERSION', 0x06000000),)
LDFLAGS = tuple(EXTRA_LINK_ARGS)
CFLAGS = tuple(EXTRA_COMPILE_ARGS)
if "linux" in sys.platform or "darwin" in sys.platform:
pymodinit_type = 'PyObject*' if PY3 else 'void'
pymodinit = '__attribute__((visibility ("default"))) {}'.format(pymodinit_type)
DEFINE_MACROS += (('PyMODINIT_FUNC', pymodinit),)
# By default, Python3 distutils enforces compatibility of
# c plugins (.so files) with the OSX version Python3 was built with.
# For Python3.4, this is OSX 10.6, but we need Thread Local Support (__thread)
if 'darwin' in sys.platform and PY3:
mac_target = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET')
if mac_target and (pkg_resources.parse_version(mac_target) <
pkg_resources.parse_version('10.7.0')):
os.environ['MACOSX_DEPLOYMENT_TARGET'] = '10.7'
os.environ['_PYTHON_HOST_PLATFORM'] = re.sub(
r'macosx-[0-9]+\.[0-9]+-(.+)',
r'macosx-10.7-\1',
util.get_platform())
def cython_extensions_and_necessity():
cython_module_files = [os.path.join(PYTHON_STEM,
name.replace('.', '/') + '.pyx')
for name in CYTHON_EXTENSION_MODULE_NAMES]
extensions = [
_extension.Extension(
name=module_name,
sources=[module_file] + list(CYTHON_HELPER_C_FILES) + list(CORE_C_FILES),
include_dirs=list(EXTENSION_INCLUDE_DIRECTORIES),
libraries=list(EXTENSION_LIBRARIES),
define_macros=list(DEFINE_MACROS),
extra_compile_args=list(CFLAGS),
extra_link_args=list(LDFLAGS),
) for (module_name, module_file) in zip(list(CYTHON_EXTENSION_MODULE_NAMES), cython_module_files)
]
need_cython = BUILD_WITH_CYTHON
if not BUILD_WITH_CYTHON:
need_cython = need_cython or not commands.check_and_update_cythonization(extensions)
return commands.try_cythonize(extensions, linetracing=ENABLE_CYTHON_TRACING, mandatory=BUILD_WITH_CYTHON), need_cython
CYTHON_EXTENSION_MODULES, need_cython = cython_extensions_and_necessity()
PACKAGE_DIRECTORIES = {
'': PYTHON_STEM,
}
INSTALL_REQUIRES = (
'six>=1.5.2',
'enum34>=1.0.4',
# TODO(atash): eventually split the grpcio package into a metapackage
# depending on protobuf and the runtime component (independent of protobuf)
'protobuf>=3.2.0',
)
if not PY3:
INSTALL_REQUIRES += ('futures>=2.2.0',)
SETUP_REQUIRES = INSTALL_REQUIRES + (
'sphinx>=1.3',
'sphinx_rtd_theme>=0.1.8',
'six>=1.10',
) if ENABLE_DOCUMENTATION_BUILD else ()
try:
import Cython
except ImportError:
if BUILD_WITH_CYTHON:
sys.stderr.write(
"You requested a Cython build via GRPC_PYTHON_BUILD_WITH_CYTHON, "
"but do not have Cython installed. We won't stop you from using "
"other commands, but the extension files will fail to build.\n")
elif need_cython:
sys.stderr.write(
'We could not find Cython. Setup may take 10-20 minutes.\n')
SETUP_REQUIRES += ('cython>=0.23',)
COMMAND_CLASS = {
'doc': commands.SphinxDocumentation,
'build_project_metadata': commands.BuildProjectMetadata,
'build_py': commands.BuildPy,
'build_ext': commands.BuildExt,
'gather': commands.Gather,
}
# Ensure that package data is copied over before any commands have been run:
credentials_dir = os.path.join(PYTHON_STEM, 'grpc', '_cython', '_credentials')
try:
os.mkdir(credentials_dir)
except OSError:
pass
shutil.copyfile(os.path.join('etc', 'roots.pem'),
os.path.join(credentials_dir, 'roots.pem'))
PACKAGE_DATA = {
# Binaries that may or may not be present in the final installation, but are
# mentioned here for completeness.
'grpc._cython': [
'_credentials/roots.pem',
'_windows/grpc_c.32.python',
'_windows/grpc_c.64.python',
],
}
PACKAGES = setuptools.find_packages(PYTHON_STEM)
setuptools.setup(
name='grpcio',
version=grpc_version.VERSION,
description='HTTP/2-based RPC framework',
author='The gRPC Authors',
author_email='grpc-io@googlegroups.com',
url='http://www.grpc.io',
license=LICENSE,
long_description=open(README).read(),
ext_modules=CYTHON_EXTENSION_MODULES,
packages=list(PACKAGES),
package_dir=PACKAGE_DIRECTORIES,
package_data=PACKAGE_DATA,
install_requires=INSTALL_REQUIRES,
setup_requires=SETUP_REQUIRES,
cmdclass=COMMAND_CLASS,
)
| |
#!/usr/bin/env python
# Classes used to handle Photo Booth files
import os
import subprocess
import glob
import zipfile
from twython import Twython
from twython import TwythonError
from twython import TwythonAuthError
from datetime import datetime
# Thanks http://stackoverflow.com/questions/26790916/python-3-backward-compatability-shlex-quote-vs-pipes-quote
try:
from shlex import quote as cmd_quote
except ImportError:
from pipes import quote as cmd_quote
from auth import (
consumer_key,
consumer_secret,
access_token,
access_token_secret
)
# Set up the directories etc. to support photo storage and upload
local_file_dir = os.path.join(os.sep, 'home', 'pi', 'tweetBooth', 'pics') # path to save PiCamera images to on Pi
local_upload_file_dir = os.path.join(os.sep, 'home', 'pi', 'tweetBooth', 'pics',
'upload') # path to save images to be uploaded
local_archive_dir = os.path.join(os.sep, 'home', 'pi', 'tweetBooth', 'archive') # path to store photos
class FileHandler(object):
'Basic handling class for file operations'
global local_file_dir
global local_upload_file_dir
global local_archive_dir
def __init__(self):
# Ensure photo storage and upload directories exist
try:
# We could use os.mkdir below - but unix 'mkdir -p' makes all directories
# necessary in the entire path?
# Ensure the 'upload' directory exists
subprocess.check_call(["mkdir", "-p", local_upload_file_dir])
# Ensure the 'pics' directory exists
subprocess.check_call(["mkdir", "-p", local_file_dir])
subprocess.check_call(["mkdir", "-p", local_archive_dir])
except subprocess.CalledProcessError as e:
print "Error making local directories: ", e.returncode
raise
def get_local_file_dir(self):
return local_file_dir
def delete_files(self, the_dir):
# Delete files in directory the_dir
files = os.listdir(the_dir)
for f in files:
full_path = os.path.join(the_dir, f)
# Only delete files, leave directories alone
if os.path.isfile(full_path):
self.delete_file(full_path)
def delete_file(self, file_path):
os.remove(file_path)
def delete_local_files(self):
self.delete_files(local_file_dir)
def delete_upload_files(self):
self.delete_files(local_upload_file_dir)
def get_local_file_dir(self):
return local_file_dir
def get_upload_file_dir(self):
return local_upload_file_dir
def get_archive_file_dir(self):
return local_archive_dir
def get_full_path(self, prefix, postfix):
return os.path.join(prefix, postfix)
def get_sorted_file_list(self, filepath_pattern):
return sorted(glob.glob(filepath_pattern))
# *** Zip the images up, ready for upload
def zip_images(self, image_extension, zip_filename):
print "Zipping files ..."
file_pattern = os.path.join(local_upload_file_dir, "*photobooth*" + image_extension)
files = sorted(glob.glob(file_pattern))
with zipfile.ZipFile(os.path.join(local_upload_file_dir, zip_filename), 'w') as myzip:
for curr_file in files:
myzip.write(curr_file, arcname=os.path.basename(curr_file), compress_type=zipfile.ZIP_DEFLATED)
# Copy file at src_filepath to dest_filepath
def copy_file(self, src_filepath, dest_filepath):
print "Copy file: " + src_filepath + " TO " + dest_filepath
try:
subprocess.check_call("cp " + src_filepath + " " + dest_filepath, shell=True)
except subprocess.CalledProcessError as e:
print "Error copying file: ", e.returncode
raise
# *** Upload files ***
# file_defs is a list of lists containing:
# - full_local_filepath: the full path to file(s) to upload, including (e.g.) local_file_path
# full_local_filepath can include a file pattern to match a number of files
# - dest_filename: if "" use source filename, otherwise change destination filename (but retain extension)
# - full_remote_dir_path: the full path to the dir to upload files into, including remote_file_dir
# - num_files: if full_local_filepath includes a pattern that matches a number of files,
# this is the number of those files to upload. 0 means all files.
# - overwrite: if file exists in destination, overwrite if True, otherwise modify filename to make unique
def upload_files(self, file_defs):
print "Uploading files ... "
for curr_file_def in file_defs:
full_local_filepath, dest_filename, full_remote_dir_path, num_files, overwrite = curr_file_def
# Find all the files that match our full_local_filepath (which may contain pattern)
local_files = sorted(glob.glob(full_local_filepath))
try:
if len(local_files) > 0:
# Ensure the remote dir exits
# TODO: How expensive are calls to mkdir if dir already exists,
# better to check dir exists first?
subprocess.check_call("ssh " + remote_account + " 'mkdir -p " +
full_remote_dir_path + "'", shell=True)
curr_file_num = 1
for curr_file in local_files:
if (num_files is not 0) and (curr_file_num > num_files):
break
# Deal with the case where we want to alter the destination filename
curr_src_full_filename = os.path.basename(curr_file)
if dest_filename is "":
full_remote_filepath = cmd_quote(os.path.join(full_remote_dir_path,
curr_src_full_filename))
else:
filename, extension = os.path.splitext(curr_src_full_filename)
full_remote_filepath = cmd_quote(os.path.join(full_remote_dir_path,
dest_filename + extension))
# Deal with the case where we do not want to overwrite the dest file
file_num = 2
if overwrite is False:
full_remote_filename = os.path.basename(full_remote_filepath)
while subprocess.call(['ssh', remote_account, 'test -e ' +
full_remote_filepath]) == 0:
filename_no_ext, filename_ext = os.path.splitext(full_remote_filename)
full_remote_filepath = cmd_quote(os.path.join(full_remote_dir_path,
filename_no_ext + "_" + str(
file_num) + filename_ext))
file_num += 1
subprocess.check_call("scp " + curr_file + " " +
remote_account + ":" + full_remote_filepath, shell=True)
curr_file_num += 1
except subprocess.CalledProcessError as e:
print "Error uploading files: ", e.returncode
raise
print "... upload finished."
def tweet_file(self):
try:
success = True
twitter = Twython(consumer_key, consumer_secret,
access_token, access_token_secret)
message = '#CVconference with Team @RiosRoadRunners! #YouBelong #RiosRocks @ErinGassaway @LizLoether @CajonValleyUSD'
# Get directories
image_dir = self.get_local_file_dir()
# PiCamera captures images at 72 pixels/inch.
# Collect a list of the original PiCamera-saved files
file_pattern = os.path.join(image_dir, "twitterBooth*.jpg")
files = self.get_sorted_file_list(file_pattern)
for curr_img in files:
with open(curr_img, 'rb') as photo:
response = twitter.upload_media(media=photo)
twitter.update_status(status=message, media_ids=[response['media_id']])
# twitter.update_status_with_media(status=message, media=photo)
self.copy_file(curr_img, local_archive_dir + "/%s.jpg" % datetime.now().isoformat())
except TwythonAuthError as e:
print "Auth Error: ", e
success = False
raise
except TwythonError as e:
print "Error uploading files: ", e
success = False
raise
return success
| |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test fee estimation code
#
from collections import OrderedDict
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
# Construct 2 trivial P2SH's and the ScriptSigs that spend them
# So we can create many many transactions without needing to spend
# time signing.
P2SH_1 = "2MySexEGVzZpRgNQ1JdjdP5bRETznm3roQ2" # P2SH of "OP_1 OP_DROP"
P2SH_2 = "2NBdpwq8Aoo1EEKEXPNrKvr5xQr3M9UfcZA" # P2SH of "OP_2 OP_DROP"
# Associated ScriptSig's to spend satisfy P2SH_1 and P2SH_2
# 4 bytes of OP_TRUE and push 2-byte redeem script of "OP_1 OP_DROP" or "OP_2 OP_DROP"
SCRIPT_SIG = ["0451025175", "0451025275"]
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
def small_txpuzzle_randfee(from_node, conflist, unconflist, amount, min_fee, fee_increment):
'''
Create and send a transaction with a random fee.
The transaction pays to a trival P2SH script, and assumes that its inputs
are of the same form.
The function takes a list of confirmed outputs and unconfirmed outputs
and attempts to use the confirmed list first for its inputs.
It adds the newly created outputs to the unconfirmed list.
Returns (raw transaction, fee)
'''
# It's best to exponentially distribute our random fees
# because the buckets are exponentially spaced.
# Exponentially distributed from 1-128 * fee_increment
rand_fee = float(fee_increment)*(1.1892**random.randint(0,28))
# Total fee ranges from min_fee to min_fee + 127*fee_increment
fee = min_fee - fee_increment + satoshi_round(rand_fee)
inputs = []
total_in = Decimal("0.00000000")
while total_in <= (amount + fee) and len(conflist) > 0:
t = conflist.pop(0)
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]} )
if total_in <= amount + fee:
while total_in <= (amount + fee) and len(unconflist) > 0:
t = unconflist.pop(0)
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]} )
if total_in <= amount + fee:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount+fee, total_in))
outputs = {}
outputs = OrderedDict([(P2SH_1, total_in - amount - fee),
(P2SH_2, amount)])
rawtx = from_node.createrawtransaction(inputs, outputs)
# Createrawtransaction constructions a transaction that is ready to be signed
# These transactions don't need to be signed, but we still have to insert the ScriptSig
# that will satisfy the ScriptPubKey.
completetx = rawtx[0:10]
inputnum = 0
for inp in inputs:
completetx += rawtx[10+82*inputnum:82+82*inputnum]
completetx += SCRIPT_SIG[inp["vout"]]
completetx += rawtx[84+82*inputnum:92+82*inputnum]
inputnum += 1
completetx += rawtx[10+82*inputnum:]
txid = from_node.sendrawtransaction(completetx, True)
unconflist.append({ "txid" : txid, "vout" : 0 , "amount" : total_in - amount - fee})
unconflist.append({ "txid" : txid, "vout" : 1 , "amount" : amount})
return (completetx, fee)
def split_inputs(from_node, txins, txouts, initial_split = False):
'''
We need to generate a lot of very small inputs so we can generate a ton of transactions
and they will have low priority.
This function takes an input from txins, and creates and sends a transaction
which splits the value into 2 outputs which are appended to txouts.
'''
prevtxout = txins.pop()
inputs = []
inputs.append({ "txid" : prevtxout["txid"], "vout" : prevtxout["vout"] })
half_change = satoshi_round(prevtxout["amount"]/2)
rem_change = prevtxout["amount"] - half_change - Decimal("0.00001000")
outputs = OrderedDict([(P2SH_1, half_change), (P2SH_2, rem_change)])
rawtx = from_node.createrawtransaction(inputs, outputs)
# If this is the initial split we actually need to sign the transaction
# Otherwise we just need to insert the property ScriptSig
if (initial_split) :
completetx = from_node.signrawtransaction(rawtx)["hex"]
else :
completetx = rawtx[0:82] + SCRIPT_SIG[prevtxout["vout"]] + rawtx[84:]
txid = from_node.sendrawtransaction(completetx, True)
txouts.append({ "txid" : txid, "vout" : 0 , "amount" : half_change})
txouts.append({ "txid" : txid, "vout" : 1 , "amount" : rem_change})
def check_estimates(node, fees_seen, max_invalid, print_estimates = True):
'''
This function calls estimatefee and verifies that the estimates
meet certain invariants.
'''
all_estimates = [ node.estimatefee(i) for i in range(1,26) ]
if print_estimates:
print([str(all_estimates[e-1]) for e in [1,2,3,6,15,25]])
delta = 1.0e-6 # account for rounding error
last_e = max(fees_seen)
for e in [x for x in all_estimates if x >= 0]:
# Estimates should be within the bounds of what transactions fees actually were:
if float(e)+delta < min(fees_seen) or float(e)-delta > max(fees_seen):
raise AssertionError("Estimated fee (%f) out of range (%f,%f)"
%(float(e), min(fees_seen), max(fees_seen)))
# Estimates should be monotonically decreasing
if float(e)-delta > last_e:
raise AssertionError("Estimated fee (%f) larger than last fee (%f) for lower number of confirms"
%(float(e),float(last_e)))
last_e = e
valid_estimate = False
invalid_estimates = 0
for e in all_estimates:
if e >= 0:
valid_estimate = True
else:
invalid_estimates += 1
# Once we're at a high enough confirmation count that we can give an estimate
# We should have estimates for all higher confirmation counts
if valid_estimate and e < 0:
raise AssertionError("Invalid estimate appears at higher confirm count than valid estimate")
# Check on the expected number of different confirmation counts
# that we might not have valid estimates for
if invalid_estimates > max_invalid:
raise AssertionError("More than (%d) invalid estimates"%(max_invalid))
return all_estimates
class EstimateFeeTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 3
self.setup_clean_chain = False
def setup_network(self):
'''
We'll setup the network to have 3 nodes that all mine with different parameters.
But first we need to use one node to create a lot of small low priority outputs
which we will use to generate our transactions.
'''
self.nodes = []
# Use node0 to mine blocks for input splitting
self.nodes.append(start_node(0, self.options.tmpdir, ["-maxorphantx=1000",
"-allowfreetx=0", "-whitelist=127.0.0.1"]))
print("This test is time consuming, please be patient")
print("Splitting inputs to small size so we can generate low priority tx's")
self.txouts = []
self.txouts2 = []
# Split a coinbase into two transaction puzzle outputs
split_inputs(self.nodes[0], self.nodes[0].listunspent(0), self.txouts, True)
# Mine
while (len(self.nodes[0].getrawmempool()) > 0):
self.nodes[0].generate(1)
# Repeatedly split those 2 outputs, doubling twice for each rep
# Use txouts to monitor the available utxo, since these won't be tracked in wallet
reps = 0
while (reps < 5):
#Double txouts to txouts2
while (len(self.txouts)>0):
split_inputs(self.nodes[0], self.txouts, self.txouts2)
while (len(self.nodes[0].getrawmempool()) > 0):
self.nodes[0].generate(1)
#Double txouts2 to txouts
while (len(self.txouts2)>0):
split_inputs(self.nodes[0], self.txouts2, self.txouts)
while (len(self.nodes[0].getrawmempool()) > 0):
self.nodes[0].generate(1)
reps += 1
print("Finished splitting")
# Now we can connect the other nodes, didn't want to connect them earlier
# so the estimates would not be affected by the splitting transactions
# Node1 mines small blocks but that are bigger than the expected transaction rate,
# and allows free transactions.
# NOTE: the CreateNewBlock code starts counting block size at 1,000 bytes,
# (17k is room enough for 110 or so transactions)
self.nodes.append(start_node(1, self.options.tmpdir,
["-blockmaxsize=18000",
"-maxorphantx=1000", "-allowfreetx=0", "-debug=estimatefee"]))
connect_nodes(self.nodes[1], 0)
# Node2 is a stingy miner, that
# produces too small blocks (room for only 70 or so transactions)
node2args = ["-allowfreetx=0", "-blockmaxsize=12000", "-maxorphantx=1000"]
self.nodes.append(start_node(2, self.options.tmpdir, node2args))
connect_nodes(self.nodes[0], 2)
connect_nodes(self.nodes[2], 1)
self.is_network_split = False
self.sync_all()
def transact_and_mine(self, numblocks, mining_node):
min_fee = Decimal("0.00001")
# We will now mine numblocks blocks generating on average 100 transactions between each block
# We shuffle our confirmed txout set before each set of transactions
# small_txpuzzle_randfee will use the transactions that have inputs already in the chain when possible
# resorting to tx's that depend on the mempool when those run out
for i in range(numblocks):
random.shuffle(self.confutxo)
for j in range(random.randrange(100-50,100+50)):
from_index = random.randint(1,2)
(txhex, fee) = small_txpuzzle_randfee(self.nodes[from_index], self.confutxo,
self.memutxo, Decimal("0.005"), min_fee, min_fee)
tx_kbytes = (len(txhex) // 2) / 1000.0
self.fees_per_kb.append(float(fee)/tx_kbytes)
sync_mempools(self.nodes[0:3],.1)
mined = mining_node.getblock(mining_node.generate(1)[0],True)["tx"]
sync_blocks(self.nodes[0:3],.1)
#update which txouts are confirmed
newmem = []
for utx in self.memutxo:
if utx["txid"] in mined:
self.confutxo.append(utx)
else:
newmem.append(utx)
self.memutxo = newmem
def run_test(self):
self.fees_per_kb = []
self.memutxo = []
self.confutxo = self.txouts # Start with the set of confirmed txouts after splitting
print("Checking estimates for 1/2/3/6/15/25 blocks")
print("Creating transactions and mining them with a huge block size")
# Create transactions and mine 20 big blocks with node 0 such that the mempool is always emptied
self.transact_and_mine(30, self.nodes[0])
check_estimates(self.nodes[1], self.fees_per_kb, 1)
print("Creating transactions and mining them with a block size that can't keep up")
# Create transactions and mine 30 small blocks with node 2, but create txs faster than we can mine
self.transact_and_mine(20, self.nodes[2])
check_estimates(self.nodes[1], self.fees_per_kb, 3)
print("Creating transactions and mining them at a block size that is just big enough")
# Generate transactions while mining 40 more blocks, this time with node1
# which mines blocks with capacity just above the rate that transactions are being created
self.transact_and_mine(40, self.nodes[1])
check_estimates(self.nodes[1], self.fees_per_kb, 2)
# Finish by mining a normal-sized block:
while len(self.nodes[1].getrawmempool()) > 0:
self.nodes[1].generate(1)
sync_blocks(self.nodes[0:3],.1)
print("Final estimates after emptying mempools")
check_estimates(self.nodes[1], self.fees_per_kb, 2)
if __name__ == '__main__':
EstimateFeeTest().main()
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module for constructing RNN Cells."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops.math_ops import reduce_sum
from tensorflow.python.ops.math_ops import sigmoid
from tensorflow.python.ops.math_ops import tanh
from tensorflow.python.ops.nn_ops import conv2d
from tensorflow.python.ops.nn_ops import softmax
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
def _get_concat_variable(name, shape, dtype, num_shards):
"""Get a sharded variable concatenated into one tensor."""
sharded_variable = _get_sharded_variable(name, shape, dtype, num_shards)
if len(sharded_variable) == 1:
return sharded_variable[0]
concat_name = name + "/concat"
concat_full_name = vs.get_variable_scope().name + "/" + concat_name + ":0"
for value in ops.get_collection(ops.GraphKeys.CONCATENATED_VARIABLES):
if value.name == concat_full_name:
return value
concat_variable = array_ops.concat(0, sharded_variable, name=concat_name)
ops.add_to_collection(ops.GraphKeys.CONCATENATED_VARIABLES,
concat_variable)
return concat_variable
def _get_sharded_variable(name, shape, dtype, num_shards):
"""Get a list of sharded variables with the given dtype."""
if num_shards > shape[0]:
raise ValueError("Too many shards: shape=%s, num_shards=%d" %
(shape, num_shards))
unit_shard_size = int(math.floor(shape[0] / num_shards))
remaining_rows = shape[0] - unit_shard_size * num_shards
shards = []
for i in range(num_shards):
current_size = unit_shard_size
if i < remaining_rows:
current_size += 1
shards.append(vs.get_variable(name + "_%d" % i, [current_size] + shape[1:],
dtype=dtype))
return shards
class TimeFreqLSTMCell(rnn_cell.RNNCell):
"""Time-Frequency Long short-term memory unit (LSTM) recurrent network cell.
This implementation is based on:
Tara N. Sainath and Bo Li
"Modeling Time-Frequency Patterns with LSTM vs. Convolutional Architectures
for LVCSR Tasks." submitted to INTERSPEECH, 2016.
It uses peep-hole connections and optional cell clipping.
"""
def __init__(self, num_units, use_peepholes=False,
cell_clip=None, initializer=None,
num_unit_shards=1, forget_bias=1.0,
feature_size=None, frequency_skip=None):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell
use_peepholes: bool, set True to enable diagonal/peephole connections.
cell_clip: (optional) A float value, if provided the cell state is clipped
by this value prior to the cell output activation.
initializer: (optional) The initializer to use for the weight and
projection matrices.
num_unit_shards: int, How to split the weight matrix. If >1, the weight
matrix is stored across num_unit_shards.
forget_bias: float, Biases of the forget gate are initialized by default
to 1 in order to reduce the scale of forgetting at the beginning
of the training.
feature_size: int, The size of the input feature the LSTM spans over.
frequency_skip: int, The amount the LSTM filter is shifted by in
frequency.
"""
self._num_units = num_units
self._use_peepholes = use_peepholes
self._cell_clip = cell_clip
self._initializer = initializer
self._num_unit_shards = num_unit_shards
self._forget_bias = forget_bias
self._feature_size = feature_size
self._frequency_skip = frequency_skip
self._state_size = 2 * num_units
self._output_size = num_units
@property
def output_size(self):
return self._output_size
@property
def state_size(self):
return self._state_size
def __call__(self, inputs, state, scope=None):
"""Run one step of LSTM.
Args:
inputs: input Tensor, 2D, batch x num_units.
state: state Tensor, 2D, batch x state_size.
scope: VariableScope for the created subgraph; defaults to
"TimeFreqLSTMCell".
Returns:
A tuple containing:
- A 2D, batch x output_dim, Tensor representing the output of the LSTM
after reading "inputs" when previous state was "state".
Here output_dim is num_units.
- A 2D, batch x state_size, Tensor representing the new state of LSTM
after reading "inputs" when previous state was "state".
Raises:
ValueError: if an input_size was specified and the provided inputs have
a different dimension.
"""
freq_inputs = self._make_tf_features(inputs)
dtype = inputs.dtype
actual_input_size = freq_inputs[0].get_shape().as_list()[1]
with vs.variable_scope(scope or type(self).__name__,
initializer=self._initializer): # "TimeFreqLSTMCell"
concat_w = _get_concat_variable(
"W", [actual_input_size + 2*self._num_units, 4 * self._num_units],
dtype, self._num_unit_shards)
b = vs.get_variable(
"B", shape=[4 * self._num_units],
initializer=array_ops.zeros_initializer, dtype=dtype)
# Diagonal connections
if self._use_peepholes:
w_f_diag = vs.get_variable(
"W_F_diag", shape=[self._num_units], dtype=dtype)
w_i_diag = vs.get_variable(
"W_I_diag", shape=[self._num_units], dtype=dtype)
w_o_diag = vs.get_variable(
"W_O_diag", shape=[self._num_units], dtype=dtype)
# initialize the first freq state to be zero
m_prev_freq = array_ops.zeros([int(inputs.get_shape()[0]),
self._num_units], dtype)
for fq in range(len(freq_inputs)):
c_prev = array_ops.slice(state, [0, 2*fq*self._num_units],
[-1, self._num_units])
m_prev = array_ops.slice(state, [0, (2*fq+1)*self._num_units],
[-1, self._num_units])
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
cell_inputs = array_ops.concat(1, [freq_inputs[fq], m_prev,
m_prev_freq])
lstm_matrix = nn_ops.bias_add(math_ops.matmul(cell_inputs, concat_w), b)
i, j, f, o = array_ops.split(1, 4, lstm_matrix)
if self._use_peepholes:
c = (sigmoid(f + self._forget_bias + w_f_diag * c_prev) * c_prev +
sigmoid(i + w_i_diag * c_prev) * tanh(j))
else:
c = (sigmoid(f + self._forget_bias) * c_prev + sigmoid(i) * tanh(j))
if self._cell_clip is not None:
# pylint: disable=invalid-unary-operand-type
c = clip_ops.clip_by_value(c, -self._cell_clip, self._cell_clip)
# pylint: enable=invalid-unary-operand-type
if self._use_peepholes:
m = sigmoid(o + w_o_diag * c) * tanh(c)
else:
m = sigmoid(o) * tanh(c)
m_prev_freq = m
if fq == 0:
state_out = array_ops.concat(1, [c, m])
m_out = m
else:
state_out = array_ops.concat(1, [state_out, c, m])
m_out = array_ops.concat(1, [m_out, m])
return m_out, state_out
def _make_tf_features(self, input_feat):
"""Make the frequency features.
Args:
input_feat: input Tensor, 2D, batch x num_units.
Returns:
A list of frequency features, with each element containing:
- A 2D, batch x output_dim, Tensor representing the time-frequency feature
for that frequency index. Here output_dim is feature_size.
Raises:
ValueError: if input_size cannot be inferred from static shape inference.
"""
input_size = input_feat.get_shape().with_rank(2)[-1].value
if input_size is None:
raise ValueError("Cannot infer input_size from static shape inference.")
num_feats = int((input_size - self._feature_size) / (
self._frequency_skip)) + 1
freq_inputs = []
for f in range(num_feats):
cur_input = array_ops.slice(input_feat, [0, f*self._frequency_skip],
[-1, self._feature_size])
freq_inputs.append(cur_input)
return freq_inputs
class GridLSTMCell(rnn_cell.RNNCell):
"""Grid Long short-term memory unit (LSTM) recurrent network cell.
The default is based on:
Nal Kalchbrenner, Ivo Danihelka and Alex Graves
"Grid Long Short-Term Memory," Proc. ICLR 2016.
http://arxiv.org/abs/1507.01526
When peephole connections are used, the implementation is based on:
Tara N. Sainath and Bo Li
"Modeling Time-Frequency Patterns with LSTM vs. Convolutional Architectures
for LVCSR Tasks." submitted to INTERSPEECH, 2016.
The code uses optional peephole connections, shared_weights and cell clipping.
"""
def __init__(self, num_units, use_peepholes=False,
share_time_frequency_weights=False,
cell_clip=None, initializer=None,
num_unit_shards=1, forget_bias=1.0,
feature_size=None, frequency_skip=None):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell
use_peepholes: bool, default False. Set True to enable diagonal/peephole
connections.
share_time_frequency_weights: bool, default False. Set True to enable
shared cell weights between time and frequency LSTMs.
cell_clip: (optional) A float value, if provided the cell state is clipped
by this value prior to the cell output activation.
initializer: (optional) The initializer to use for the weight and
projection matrices.
num_unit_shards: int, How to split the weight matrix. If >1, the weight
matrix is stored across num_unit_shards.
forget_bias: float, Biases of the forget gate are initialized by default
to 1 in order to reduce the scale of forgetting at the beginning
of the training.
feature_size: int, The size of the input feature the LSTM spans over.
frequency_skip: int, The amount the LSTM filter is shifted by in
frequency.
"""
self._num_units = num_units
self._use_peepholes = use_peepholes
self._share_time_frequency_weights = share_time_frequency_weights
self._cell_clip = cell_clip
self._initializer = initializer
self._num_unit_shards = num_unit_shards
self._forget_bias = forget_bias
self._feature_size = feature_size
self._frequency_skip = frequency_skip
self._state_size = 2 * num_units
self._output_size = num_units
@property
def output_size(self):
return self._output_size
@property
def state_size(self):
return self._state_size
def __call__(self, inputs, state, scope=None):
"""Run one step of LSTM.
Args:
inputs: input Tensor, 2D, batch x num_units.
state: state Tensor, 2D, batch x state_size.
scope: VariableScope for the created subgraph; defaults to "LSTMCell".
Returns:
A tuple containing:
- A 2D, batch x output_dim, Tensor representing the output of the LSTM
after reading "inputs" when previous state was "state".
Here output_dim is num_units.
- A 2D, batch x state_size, Tensor representing the new state of LSTM
after reading "inputs" when previous state was "state".
Raises:
ValueError: if an input_size was specified and the provided inputs have
a different dimension.
"""
freq_inputs = self._make_tf_features(inputs)
dtype = inputs.dtype
actual_input_size = freq_inputs[0].get_shape().as_list()[1]
with vs.variable_scope(scope or type(self).__name__,
initializer=self._initializer): # "GridLSTMCell"
concat_w_f = _get_concat_variable(
"W_f", [actual_input_size + 2*self._num_units, 4 * self._num_units],
dtype, self._num_unit_shards)
b_f = vs.get_variable(
"B_f", shape=[4 * self._num_units],
initializer=array_ops.zeros_initializer, dtype=dtype)
if not self._share_time_frequency_weights:
concat_w_t = _get_concat_variable(
"W_t", [actual_input_size + 2*self._num_units, 4 * self._num_units],
dtype, self._num_unit_shards)
b_t = vs.get_variable(
"B_t", shape=[4 * self._num_units],
initializer=array_ops.zeros_initializer, dtype=dtype)
if self._use_peepholes:
# Diagonal connections
w_f_diag_freqf = vs.get_variable(
"W_F_diag_freqf", shape=[self._num_units], dtype=dtype)
w_i_diag_freqf = vs.get_variable(
"W_I_diag_freqf", shape=[self._num_units], dtype=dtype)
w_o_diag_freqf = vs.get_variable(
"W_O_diag_freqf", shape=[self._num_units], dtype=dtype)
w_f_diag_freqt = vs.get_variable(
"W_F_diag_freqt", shape=[self._num_units], dtype=dtype)
w_i_diag_freqt = vs.get_variable(
"W_I_diag_freqt", shape=[self._num_units], dtype=dtype)
w_o_diag_freqt = vs.get_variable(
"W_O_diag_freqt", shape=[self._num_units], dtype=dtype)
if not self._share_time_frequency_weights:
w_f_diag_timef = vs.get_variable(
"W_F_diag_timef", shape=[self._num_units], dtype=dtype)
w_i_diag_timef = vs.get_variable(
"W_I_diag_timef", shape=[self._num_units], dtype=dtype)
w_o_diag_timef = vs.get_variable(
"W_O_diag_timef", shape=[self._num_units], dtype=dtype)
w_f_diag_timet = vs.get_variable(
"W_F_diag_timet", shape=[self._num_units], dtype=dtype)
w_i_diag_timet = vs.get_variable(
"W_I_diag_timet", shape=[self._num_units], dtype=dtype)
w_o_diag_timet = vs.get_variable(
"W_O_diag_timet", shape=[self._num_units], dtype=dtype)
# initialize the first freq state to be zero
m_prev_freq = array_ops.zeros([int(inputs.get_shape()[0]),
self._num_units], dtype)
c_prev_freq = array_ops.zeros([int(inputs.get_shape()[0]),
self._num_units], dtype)
for freq_index in range(len(freq_inputs)):
c_prev_time = array_ops.slice(state, [0, 2 * freq_index *
self._num_units],
[-1, self._num_units])
m_prev_time = array_ops.slice(state, [0, (2 * freq_index + 1) *
self._num_units],
[-1, self._num_units])
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
cell_inputs = array_ops.concat(1, [freq_inputs[freq_index], m_prev_time,
m_prev_freq])
# F-LSTM
lstm_matrix_freq = nn_ops.bias_add(math_ops.matmul(cell_inputs,
concat_w_f), b_f)
i_freq, j_freq, f_freq, o_freq = array_ops.split(1, 4, lstm_matrix_freq)
# T-LSTM
if self._share_time_frequency_weights:
i_time = i_freq
j_time = j_freq
f_time = f_freq
o_time = o_freq
else:
lstm_matrix_time = nn_ops.bias_add(math_ops.matmul(cell_inputs,
concat_w_t), b_t)
i_time, j_time, f_time, o_time = array_ops.split(1, 4,
lstm_matrix_time)
# F-LSTM c_freq
if self._use_peepholes:
c_freq = (sigmoid(f_freq + self._forget_bias + w_f_diag_freqf * (
c_prev_freq) + w_f_diag_freqt * c_prev_time) * c_prev_freq +
sigmoid(i_freq + w_i_diag_freqf * c_prev_freq + (
w_i_diag_freqt * c_prev_time)) * tanh(j_freq))
else:
c_freq = (sigmoid(f_freq + self._forget_bias) * c_prev_freq +
sigmoid(i_freq) * tanh(j_freq))
if self._cell_clip is not None:
# pylint: disable=invalid-unary-operand-type
c_freq = clip_ops.clip_by_value(c_freq, -self._cell_clip,
self._cell_clip)
# pylint: enable=invalid-unary-operand-type
# T-LSTM c_freq
if self._use_peepholes:
if self._share_time_frequency_weights:
c_time = sigmoid(f_time + self._forget_bias + w_f_diag_freqf * (
c_prev_freq + w_f_diag_freqt * c_prev_time)) * c_prev_time + (
sigmoid(i_time + w_i_diag_freqf * c_prev_freq + (
w_i_diag_freqt * c_prev_time)) * tanh(j_time))
else:
c_time = sigmoid(f_time + self._forget_bias + w_f_diag_timef * (
c_prev_time + w_f_diag_timet * c_prev_time)) * c_prev_time + (
sigmoid(i_time + w_i_diag_timef * c_prev_freq + (
w_i_diag_timet * c_prev_time)) * tanh(j_time))
else:
c_time = (sigmoid(f_time + self._forget_bias) * c_prev_time +
sigmoid(i_time) * tanh(j_time))
if self._cell_clip is not None:
# pylint: disable=invalid-unary-operand-type
c_time = clip_ops.clip_by_value(c_freq, -self._cell_clip,
self._cell_clip)
# pylint: enable=invalid-unary-operand-type
# F-LSTM m_freq
if self._use_peepholes:
m_freq = sigmoid(o_freq + w_o_diag_freqf * c_freq +
w_o_diag_freqt * c_time) * tanh(c_freq)
else:
m_freq = sigmoid(o_freq) * tanh(c_freq)
# T-LSTM m_time
if self._use_peepholes:
if self._share_time_frequency_weights:
m_time = sigmoid(o_time + w_o_diag_freqf * c_freq +
w_o_diag_freqt * c_time) * tanh(c_time)
else:
m_time = sigmoid(o_time + w_o_diag_timef * c_freq +
w_o_diag_timet * c_time) * tanh(c_time)
else:
m_time = sigmoid(o_time) * tanh(c_time)
m_prev_freq = m_freq
c_prev_freq = c_freq
# Concatenate the outputs for T-LSTM and F-LSTM for each shift
if freq_index == 0:
state_out = array_ops.concat(1, [c_time, m_time])
m_out = array_ops.concat(1, [m_time, m_freq])
else:
state_out = array_ops.concat(1, [state_out, c_time, m_time])
m_out = array_ops.concat(1, [m_out, m_time, m_freq])
return m_out, state_out
def _make_tf_features(self, input_feat):
"""Make the frequency features.
Args:
input_feat: input Tensor, 2D, batch x num_units.
Returns:
A list of frequency features, with each element containing:
- A 2D, batch x output_dim, Tensor representing the time-frequency feature
for that frequency index. Here output_dim is feature_size.
Raises:
ValueError: if input_size cannot be inferred from static shape inference.
"""
input_size = input_feat.get_shape().with_rank(2)[-1].value
if input_size is None:
raise ValueError("Cannot infer input_size from static shape inference.")
num_feats = int((input_size - self._feature_size) / (
self._frequency_skip)) + 1
freq_inputs = []
for f in range(num_feats):
cur_input = array_ops.slice(input_feat, [0, f*self._frequency_skip],
[-1, self._feature_size])
freq_inputs.append(cur_input)
return freq_inputs
# pylint: disable=protected-access
_linear = rnn_cell._linear
# pylint: enable=protected-access
class AttentionCellWrapper(rnn_cell.RNNCell):
"""Basic attention cell wrapper.
Implementation based on https://arxiv.org/pdf/1601.06733.pdf.
"""
def __init__(self, cell, attn_length, attn_size=None, attn_vec_size=None,
input_size=None, state_is_tuple=False):
"""Create a cell with attention.
Args:
cell: an RNNCell, an attention is added to it.
attn_length: integer, the size of an attention window.
attn_size: integer, the size of an attention vector. Equal to
cell.output_size by default.
attn_vec_size: integer, the number of convolutional features calculated
on attention state and a size of the hidden layer built from
base cell state. Equal attn_size to by default.
input_size: integer, the size of a hidden linear layer,
built from inputs and attention. Derived from the input tensor
by default.
state_is_tuple: If True, accepted and returned states are n-tuples, where
`n = len(cells)`. By default (False), the states are all
concatenated along the column axis.
Raises:
TypeError: if cell is not an RNNCell.
ValueError: if cell returns a state tuple but the flag
`state_is_tuple` is `False` or if attn_length is zero or less.
"""
if not isinstance(cell, rnn_cell.RNNCell):
raise TypeError("The parameter cell is not RNNCell.")
if nest.is_sequence(cell.state_size) and not state_is_tuple:
raise ValueError("Cell returns tuple of states, but the flag "
"state_is_tuple is not set. State size is: %s"
% str(cell.state_size))
if attn_length <= 0:
raise ValueError("attn_length should be greater than zero, got %s"
% str(attn_length))
if not state_is_tuple:
logging.warn(
"%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True." % self)
if attn_size is None:
attn_size = cell.output_size
if attn_vec_size is None:
attn_vec_size = attn_size
self._state_is_tuple = state_is_tuple
self._cell = cell
self._attn_vec_size = attn_vec_size
self._input_size = input_size
self._attn_size = attn_size
self._attn_length = attn_length
@property
def state_size(self):
size = (self._cell.state_size, self._attn_size,
self._attn_size * self._attn_length)
if self._state_is_tuple:
return size
else:
return sum(list(size))
@property
def output_size(self):
return self._attn_size
def __call__(self, inputs, state, scope=None):
"""Long short-term memory cell with attention (LSTMA)."""
with vs.variable_scope(scope or type(self).__name__):
if self._state_is_tuple:
state, attns, attn_states = state
else:
states = state
state = array_ops.slice(states, [0, 0], [-1, self._cell.state_size])
attns = array_ops.slice(
states, [0, self._cell.state_size], [-1, self._attn_size])
attn_states = array_ops.slice(
states, [0, self._cell.state_size + self._attn_size],
[-1, self._attn_size * self._attn_length])
attn_states = array_ops.reshape(attn_states,
[-1, self._attn_length, self._attn_size])
input_size = self._input_size
if input_size is None:
input_size = inputs.get_shape().as_list()[1]
inputs = _linear([inputs, attns], input_size, True)
lstm_output, new_state = self._cell(inputs, state)
if self._state_is_tuple:
new_state_cat = array_ops.concat(1, nest.flatten(new_state))
else:
new_state_cat = new_state
new_attns, new_attn_states = self._attention(new_state_cat, attn_states)
with vs.variable_scope("AttnOutputProjection"):
output = _linear([lstm_output, new_attns], self._attn_size, True)
new_attn_states = array_ops.concat(1, [new_attn_states,
array_ops.expand_dims(output, 1)])
new_attn_states = array_ops.reshape(
new_attn_states, [-1, self._attn_length * self._attn_size])
new_state = (new_state, new_attns, new_attn_states)
if not self._state_is_tuple:
new_state = array_ops.concat(1, list(new_state))
return output, new_state
def _attention(self, query, attn_states):
with vs.variable_scope("Attention"):
k = vs.get_variable("AttnW", [1, 1, self._attn_size, self._attn_vec_size])
v = vs.get_variable("AttnV", [self._attn_vec_size])
hidden = array_ops.reshape(attn_states,
[-1, self._attn_length, 1, self._attn_size])
hidden_features = conv2d(hidden, k, [1, 1, 1, 1], "SAME")
y = _linear(query, self._attn_vec_size, True)
y = array_ops.reshape(y, [-1, 1, 1, self._attn_vec_size])
s = reduce_sum(v * tanh(hidden_features + y), [2, 3])
a = softmax(s)
d = reduce_sum(
array_ops.reshape(a, [-1, self._attn_length, 1, 1]) * hidden, [1, 2])
new_attns = array_ops.reshape(d, [-1, self._attn_size])
new_attn_states = array_ops.slice(attn_states, [0, 1, 0], [-1, -1, -1])
return new_attns, new_attn_states
| |
"""Definitions used by commands sent to inferior Python in python.el."""
# Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
# Author: Dave Love <fx@gnu.org>
# Alex Kritikos <alex@8bitb.us> hacked it to work with IPython
# This file is part of GNU Emacs.
# GNU Emacs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# GNU Emacs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with GNU Emacs. If not, see <http://www.gnu.org/licenses/>.
import os, sys, traceback, inspect, __main__
try:
set
except:
from sets import Set as set
__all__ = ["eexecfile", "eargs", "complete", "ehelp", "eimport", "modpath"]
def format_exception (filename, should_remove_self):
type, value, tb = sys.exc_info ()
sys.last_type = type
sys.last_value = value
sys.last_traceback = tb
if type is SyntaxError:
try: # parse the error message
msg, (dummy_filename, lineno, offset, line) = value
except:
pass # Not the format we expect; leave it alone
else:
# Stuff in the right filename
value = SyntaxError(msg, (filename, lineno, offset, line))
sys.last_value = value
res = traceback.format_exception_only (type, value)
# There are some compilation errors which do not provide traceback so we
# should not massage it.
if should_remove_self:
tblist = traceback.extract_tb (tb)
del tblist[:1]
res = traceback.format_list (tblist)
if res:
res.insert(0, "Traceback (most recent call last):\n")
res[len(res):] = traceback.format_exception_only (type, value)
# traceback.print_exception(type, value, tb)
for line in res: print line,
def eexecfile (file):
"""Execute FILE and then remove it.
Execute the file within the __main__ namespace.
If we get an exception, print a traceback with the top frame
(ourselves) excluded."""
# We cannot use real execfile since it has a bug where the file stays
# locked forever (under w32) if SyntaxError occurs.
# --- code based on code.py and PyShell.py.
try:
try:
try:
from IPython import ipapi
ip = ipapi.get()
ip.runlines(open(file).readlines())
except:
execfile(file, globals(), __main__.__dict__)
except (OverflowError, SyntaxError, ValueError):
# FIXME: When can compile() raise anything else than
# SyntaxError ????
format_exception (file, False)
return
finally:
os.remove (file)
def eargs (name, imports):
"Get arglist of NAME for Eldoc &c."
try:
if imports: exec imports
parts = name.split ('.')
if len (parts) > 1:
exec 'import ' + parts[0] # might fail
func = eval (name)
if inspect.isbuiltin (func) or type(func) is type:
doc = func.__doc__
if doc.find (' ->') != -1:
print '_emacs_out', doc.split (' ->')[0]
else:
print '_emacs_out', doc.split ('\n')[0]
return
if inspect.ismethod (func):
func = func.im_func
if not inspect.isfunction (func):
print '_emacs_out '
return
(args, varargs, varkw, defaults) = inspect.getargspec (func)
# No space between name and arglist for consistency with builtins.
print '_emacs_out', \
func.__name__ + inspect.formatargspec (args, varargs, varkw,
defaults)
except:
print "_emacs_out "
def all_names (object):
"""Return (an approximation to) a list of all possible attribute
names reachable via the attributes of OBJECT, i.e. roughly the
leaves of the dictionary tree under it."""
def do_object (object, names):
if inspect.ismodule (object):
do_module (object, names)
elif inspect.isclass (object):
do_class (object, names)
# Might have an object without its class in scope.
elif hasattr (object, '__class__'):
names.add ('__class__')
do_class (object.__class__, names)
# Probably not a good idea to try to enumerate arbitrary
# dictionaries...
return names
def do_module (module, names):
if hasattr (module, '__all__'): # limited export list
names.update(module.__all__)
for i in module.__all__:
do_object (getattr (module, i), names)
else: # use all names
names.update(dir (module))
for i in dir (module):
do_object (getattr (module, i), names)
return names
def do_class (object, names):
ns = dir (object)
names.update(ns)
if hasattr (object, '__bases__'): # superclasses
for i in object.__bases__: do_object (i, names)
return names
return do_object (object, set([]))
def complete (name, imports):
"""Complete TEXT in NAMESPACE and print a Lisp list of completions.
Exec IMPORTS first."""
import __main__, keyword
def class_members(object):
names = dir (object)
if hasattr (object, '__bases__'):
for super in object.__bases__:
names = class_members (super)
return names
names = set([])
base = None
try:
dict = __main__.__dict__.copy()
if imports: exec imports in dict
l = len (name)
if not "." in name:
for src in [dir (__builtins__), keyword.kwlist, dict.keys()]:
for elt in src:
if elt[:l] == name: names.add(elt)
else:
base = name[:name.rfind ('.')]
name = name[name.rfind('.')+1:]
try:
object = eval (base, dict)
names = set(dir (object))
if hasattr (object, '__class__'):
names.add('__class__')
names.update(class_members (object))
except: names = all_names (dict)
except:
print sys.exc_info()
names = []
l = len(name)
print '_emacs_out (',
for n in names:
if name == n[:l]:
if base: print '"%s.%s"' % (base, n),
else: print '"%s"' % n,
print ')'
def ehelp (name, imports):
"""Get help on string NAME.
First try to eval name for, e.g. user definitions where we need
the object. Otherwise try the string form."""
locls = {}
if imports:
try: exec imports in locls
except: pass
try: help (eval (name, globals(), locls))
except: help (name)
def eimport (mod, dir):
"""Import module MOD with directory DIR at the head of the search path.
NB doesn't load from DIR if MOD shadows a system module."""
from __main__ import __dict__
path0 = sys.path[0]
sys.path[0] = dir
try:
try:
if __dict__.has_key(mod) and inspect.ismodule (__dict__[mod]):
reload (__dict__[mod])
else:
__dict__[mod] = __import__ (mod)
except:
(type, value, tb) = sys.exc_info ()
print "Traceback (most recent call last):"
traceback.print_exception (type, value, tb.tb_next)
finally:
sys.path[0] = path0
def modpath (module):
"""Return the source file for the given MODULE (or None).
Assumes that MODULE.py and MODULE.pyc are in the same directory."""
try:
path = __import__ (module).__file__
if path[-4:] == '.pyc' and os.path.exists (path[0:-1]):
path = path[:-1]
print "_emacs_out", path
except:
print "_emacs_out ()"
# print '_emacs_ok' # ready for input and can call continuation
# arch-tag: d90408f3-90e2-4de4-99c2-6eb9c7b9ca46
| |
#!/usr/bin/env python3
#
# Copyright (c) 2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# Author: Sathish Kuttan <sathish.k.kuttan@intel.com>
# This file defines a message class that contains functions to create
# commands to the target and to parse responses from the target.
import bitstruct
class Message:
"""
Message class containing the methods to create command messages and
parse response messages.
"""
message_id = {1: 'Control'}
cmd_rsp = {2: 'Load Firmware',
4: 'Mode Select',
0x10: 'Memory Read',
0x11: 'Memory Write',
0x12: 'Memory Block Write',
0x13: 'Execute',
0x14: 'Wait',
0x20: 'Ready'}
tx_data = None
tx_bulk_data = None
tx_index = 0
cmd_word_fmt = 'u1 u1 u1 u5 u16 u8'
cmd_keys = ['cmd', 'rsvd1', 'rsp', 'msg_id', 'rsvd2', 'cmd_rsp']
def __init__(self):
"""
Intialize a byte array of 64 bytes for command messages
Intialize another byte array of 4096 bytes for bulk messages
"""
self.tx_data = bytearray(64)
self.tx_bulk_data = bytearray(4096)
def init_tx_data(self):
"""
Intialize transmit message buffers to zeros
"""
for index in range(len(self.tx_data)):
self.tx_data[index] = 0
self.tx_index = 0
@staticmethod
def endian_swap(dst, dst_offset, src):
"""
Performs a byte swap of a 32-bit word to change it's endianness
"""
for index in range(0, len(src), 4):
dst[dst_offset + index + 0] = src[index + 3]
dst[dst_offset + index + 1] = src[index + 2]
dst[dst_offset + index + 2] = src[index + 1]
dst[dst_offset + index + 3] = src[index + 0]
def print_cmd_message(self):
"""
Prints the contents of the command message buffer
"""
for index in range(0, self.tx_index, 4):
offset = index * 8
word = bitstruct.unpack_from('u32', self.tx_data, offset)
print('Index: %2d Content: 0x%08x' %(index, word[0]))
def print_response(self, msg, verbose=False):
"""
Parses and prints the contents of the response message
"""
unpacked = bitstruct.unpack_from_dict(self.cmd_word_fmt,
self.cmd_keys, msg)
msg_id = unpacked['msg_id']
rsp = unpacked['cmd_rsp']
if msg_id == 0 and rsp == 0:
print('RSP <<< NULL.')
else:
print('RSP <<< %s.' % self.cmd_rsp[rsp])
if verbose:
count = bitstruct.unpack_from('u32', msg, 4 * 8)[0]
count &= 0x1ff
for index in range(0, 8 + (count * 4), 4):
offset = index * 8
word = bitstruct.unpack_from('u32', msg, offset)
print('Index: %2d Content: 0x%08x' %(index, word[0]))
def get_cmd_code(self, cmd):
"""
Looks up the command and returns the numeric code
"""
index = list(self.cmd_rsp.values()).index(cmd)
return list(self.cmd_rsp.keys())[index]
def print_cmd_code(self, cmd):
"""
Prints the numeric code for the given command
"""
key = self.get_cmd_code(cmd)
print('CMD >>> %s. Command Code: 0x%02x' % (cmd, key))
def create_null_cmd(self):
"""
Creates a NULL command
"""
print('CMD >>> NULL.')
for index in range(len(self.tx_data)):
self.tx_data[index] = 0
self.tx_index = len(self.tx_data)
return self.tx_data
def create_memwrite_cmd(self, tuple):
"""
Creates a memory write command with memory address and value pairs
"""
cmd = 'Memory Write'
print('CMD >>> %s.' % cmd)
code = self.get_cmd_code(cmd)
self.init_tx_data()
index = list(self.message_id.values()).index('Control')
msg_id = list(self.message_id.keys())[index]
bitstruct.pack_into_dict(self.cmd_word_fmt, self.cmd_keys,
self.tx_data, 0, {'cmd': 1, 'rsvd1': 0, 'rsp': 0,
'msg_id': msg_id, 'rsvd2': 0, 'cmd_rsp': code})
self.tx_index += 4
bitstruct.pack_into('u32', self.tx_data, self.tx_index * 8,
len(tuple))
self.tx_index += 4
for elm in tuple:
bitstruct.pack_into('u32', self.tx_data, self.tx_index * 8, elm)
self.tx_index += 4
return self.tx_data
def create_memread_cmd(self, tuple):
"""
Creates a memory read command with memory addresses
"""
cmd = 'Memory Read'
print('CMD >>> %s.' % cmd)
code = self.get_cmd_code(cmd)
self.init_tx_data()
index = list(self.message_id.values()).index('Control')
msg_id = list(self.message_id.keys())[index]
bitstruct.pack_into_dict(self.cmd_word_fmt, self.cmd_keys,
self.tx_data, 0, {'cmd': 1, 'rsvd1': 0, 'rsp': 0,
'msg_id': msg_id, 'rsvd2': 0, 'cmd_rsp': code})
self.tx_index += 4
bitstruct.pack_into('u32', self.tx_data, self.tx_index * 8,
len(tuple))
self.tx_index += 4
for elm in tuple:
bitstruct.pack_into('u32', self.tx_data, self.tx_index * 8, elm)
self.tx_index += 4
return self.tx_data
def create_loadfw_cmd(self, size, sha):
"""
Creates a command to load firmware with associated parameters
"""
cmd = 'Load Firmware'
print('CMD >>> %s.' % cmd)
code = self.get_cmd_code(cmd)
FW_NO_EXEC_FLAG = (1 << 26)
SEL_HP_CLK = (1 << 21)
LD_FW_HEADER_LEN = 3
count_flags = FW_NO_EXEC_FLAG | SEL_HP_CLK
count_flags |= (LD_FW_HEADER_LEN + int(len(sha) / 4))
self.init_tx_data()
index = list(self.message_id.values()).index('Control')
msg_id = list(self.message_id.keys())[index]
bitstruct.pack_into_dict(self.cmd_word_fmt, self.cmd_keys,
self.tx_data, 0, {'cmd': 1, 'rsvd1': 0, 'rsp': 0,
'msg_id': msg_id, 'rsvd2': 0, 'cmd_rsp': code})
self.tx_index += 4
bitstruct.pack_into('u32', self.tx_data, self.tx_index * 8, count_flags)
self.tx_index += 4
bitstruct.pack_into('u32', self.tx_data, self.tx_index * 8, 0xbe000000)
self.tx_index += 4
bitstruct.pack_into('u32', self.tx_data, self.tx_index * 8, 0)
self.tx_index += 4
bitstruct.pack_into('u32', self.tx_data, self.tx_index * 8, size)
self.tx_index += 4
self.endian_swap(self.tx_data, self.tx_index, sha)
self.tx_index += len(sha)
return self.tx_data
def create_execfw_cmd(self):
"""
Creates a command to excute firmware
"""
cmd = 'Execute'
print('CMD >>> %s.' % cmd)
code = self.get_cmd_code(cmd)
EXE_FW_HEADER_LEN = 1
count = EXE_FW_HEADER_LEN
self.init_tx_data()
index = list(self.message_id.values()).index('Control')
msg_id = list(self.message_id.keys())[index]
bitstruct.pack_into_dict(self.cmd_word_fmt, self.cmd_keys,
self.tx_data, 0, {'cmd': 1, 'rsvd1': 0, 'rsp': 0,
'msg_id': msg_id, 'rsvd2': 0, 'cmd_rsp': code})
self.tx_index += 4
bitstruct.pack_into('u32', self.tx_data, self.tx_index * 8, count)
self.tx_index += 4
bitstruct.pack_into('u32', self.tx_data, self.tx_index * 8, 0xbe000000)
self.tx_index += 4
return self.tx_data
def create_bulk_message(self, data):
"""
Copies the input byte stream to the bulk message buffer
"""
self.endian_swap(self.tx_bulk_data, 0, data)
return self.tx_bulk_data[:len(data)]
def get_bulk_message_size(self):
"""
Returns the size of the bulk message buffer
"""
return len(self.tx_bulk_data)
| |
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""test adapter matcher module"""
import os
import unittest2
os.environ['COMPASS_IGNORE_SETTING'] = 'true'
from compass.utils import setting_wrapper as setting
reload(setting)
from compass.log_analyzor import adapter_matcher
from compass.log_analyzor.file_matcher import FileMatcher
from compass.log_analyzor.file_matcher import FileReaderFactory
from compass.log_analyzor.line_matcher import LineMatcher
from compass.utils import flags
from compass.utils import logsetting
class TestAdapterItemMatcher(unittest2.TestCase):
def setUp(self):
super(TestAdapterItemMatcher, self).setUp()
logsetting.init()
def tearDown(self):
super(TestAdapterItemMatcher, self).tearDown()
def test_update_progress(self):
file_matchers = [
FileMatcher(
min_progress=0.6,
max_progress=0.9,
filename='test_log',
line_matchers={
'start': LineMatcher(
pattern=r'',
severity='',
)
}
)
]
adapter_item_matcher = adapter_matcher.AdapterItemMatcher(
file_matchers
)
file_reader_factory = FileReaderFactory(
logdir=os.path.dirname(
os.path.abspath(__file__)) + '/data'
)
state = {
'message': 'dummy',
'severity': 'dummy',
'percentage': 0.5
}
log_history_mapping = {
'test_log': {
'filename': 'test_log',
'partial_line': '',
'position': 0,
'line_matcher_name': 'start',
'percentage': 0.7,
'message': '',
'severity': 'INFO'
}
}
adapter_item_matcher.update_progress(
file_reader_factory=file_reader_factory,
name='host1',
state=state,
log_history_mapping=log_history_mapping
)
self.assertEqual(0.81, state['percentage'])
def test_no_filename_update_progress(self):
file_matchers = [
FileMatcher(
min_progress=0.6,
max_progress=0.9,
filename='test_log',
line_matchers={
'start': LineMatcher(
pattern=r'',
severity='',
)
}
)
]
adapter_item_matcher = adapter_matcher.AdapterItemMatcher(
file_matchers
)
file_reader_factory = FileReaderFactory(
logdir=os.path.dirname(
os.path.abspath(__file__)) + '/data'
)
state = {
'message': 'dummy',
'severity': 'dummy',
'percentage': 0.5
}
log_history_mapping = {
'dummy_log': {
'filename': 'test_log',
'partial_line': '',
'position': 0,
'line_matcher_name': 'start',
'percentage': 0.7,
'message': '',
'severity': 'INFO'
}
}
adapter_item_matcher.update_progress(
file_reader_factory=file_reader_factory,
name='host1',
state=state,
log_history_mapping=log_history_mapping
)
self.assertEqual(0.6, state['percentage'])
class TestOSMatcher(unittest2.TestCase):
def setUp(self):
super(TestOSMatcher, self).setUp()
logsetting.init()
file_matchers = [
FileMatcher(
min_progress=0.6,
max_progress=0.9,
filename='test_log',
line_matchers={
'start': LineMatcher(
pattern=r'',
severity='',
)
}
)
]
self.item_matcher = adapter_matcher.AdapterItemMatcher(file_matchers)
file_reader_factory = FileReaderFactory(
logdir=os.path.dirname(
os.path.abspath(__file__)) + '/data'
)
self.os_matcher = adapter_matcher.OSMatcher(
os_installer_name='cobbler',
os_pattern=r'CentOS.*',
item_matcher=self.item_matcher,
file_reader_factory=file_reader_factory
)
def tearDown(self):
super(TestOSMatcher, self).tearDown()
def test_match_none(self):
matcher = self.os_matcher.match(
os_installer_name='cobbler',
os_name=None
)
self.assertFalse(matcher)
def test_match(self):
test_match = {
'os_installer_name': 'cobbler',
'os_name': 'CentOS',
}
matcher = self.os_matcher.match(**test_match)
self.assertTrue(matcher)
def test_installer_unmatch(self):
test_unmatch = {
'os_installer_name': 'dummy',
'os_name': 'CentOS',
}
matcher = self.os_matcher.match(**test_unmatch)
self.assertFalse(matcher)
def test_os_unmatch(self):
test_unmatch = {
'os_installer_name': 'cobbler',
'os_name': 'dummy'
}
matcher = self.os_matcher.match(**test_unmatch)
self.assertFalse(matcher)
def test_both_unmatch(self):
test_unmatch = {
'os_installer_name': 'dummy',
'os_name': 'dummy'
}
matcher = self.os_matcher.match(**test_unmatch)
self.assertFalse(matcher)
def test_update_progress(self):
state = {
'message': 'dummy',
'severity': 'dummy',
'percentage': 0.5
}
log_history_mapping = {
'test_log': {
'filename': 'test_log',
'partial_line': '',
'position': 0,
'line_matcher_name': 'start',
'percentage': 0.0,
'message': '',
'severity': 'INFO'
}
}
self.os_matcher.update_progress(
name='host1',
state=state,
log_history_mapping=log_history_mapping
)
self.assertEqual(0.6, state['percentage'])
class TestPackageMatcher(unittest2.TestCase):
def setUp(self):
super(TestPackageMatcher, self).setUp()
logsetting.init()
file_matchers = [
FileMatcher(
min_progress=0.6,
max_progress=0.9,
filename='test_log',
line_matchers={
'start': LineMatcher(
pattern=r'',
severity='',
)
}
)
]
self.item_matcher = adapter_matcher.AdapterItemMatcher(file_matchers)
self.file_reader_factory = FileReaderFactory(
logdir=os.path.dirname(
os.path.abspath(__file__)) + '/data'
)
self.package_matcher = adapter_matcher.PackageMatcher(
package_installer_name='chef',
distributed_system_pattern=r'openstack',
item_matcher=self.item_matcher,
file_reader_factory=self.file_reader_factory
)
def tearDown(self):
super(TestPackageMatcher, self).tearDown()
def test_match_none(self):
test_match_none = {
'package_installer_name': None,
'distributed_system_name': 'openstack'
}
matcher = self.package_matcher.match(**test_match_none)
self.assertFalse(matcher)
def test_match(self):
test_match = {
'package_installer_name': 'chef',
'distributed_system_name': 'openstack'
}
matcher = self.package_matcher.match(**test_match)
self.assertTrue(matcher)
def test_installer_unmatch(self):
test_unmatch = {
'package_installer_name': 'dummy',
'distributed_system_name': 'openstack'
}
matcher = self.package_matcher.match(**test_unmatch)
self.assertFalse(matcher)
def test_name_unmatch(self):
test_unmatch = {
'package_installer_name': 'chef',
'distributed_system_name': 'dummy'
}
matcher = self.package_matcher.match(**test_unmatch)
self.assertFalse(matcher)
def test_both_unmatch(self):
test_unmatch = {
'package_installer_name': 'dummy',
'distributed_system_name': 'dummy'
}
matcher = self.package_matcher.match(**test_unmatch)
self.assertFalse(matcher)
def test_update_progress(self):
state = {
'message': 'dummy',
'severity': 'dummy',
'percentage': 0.5
}
log_history_mapping = {
'test_log': {
'filename': 'test_log',
'partial_line': '',
'position': 0,
'line_matcher_name': 'start',
'percentage': 0.0,
'message': '',
'severity': 'INFO'
}
}
self.package_matcher.update_progress(
name='host1',
state=state,
log_history_mapping=log_history_mapping
)
self.assertEqual(0.6, state['percentage'])
if __name__ == '__main__':
flags.init()
logsetting.init()
unittest2.main()
| |
# encoding: utf-8
"""
Data label-related objects.
"""
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
from ..text.text import Font, TextFrame
from ..util import lazyproperty
class DataLabels(object):
"""
Collection of data labels associated with a plot, and perhaps with
a series or data point, although the latter two are not yet implemented.
"""
def __init__(self, dLbls):
super(DataLabels, self).__init__()
self._element = dLbls
@lazyproperty
def font(self):
"""
The |Font| object that provides access to the text properties for
these data labels, such as bold, italic, etc.
"""
defRPr = self._element.defRPr
font = Font(defRPr)
return font
@property
def number_format(self):
"""
Read/write string specifying the format for the numbers on this set
of data labels. Returns 'General' if no number format has been set.
Note that this format string has no effect on rendered data labels
when :meth:`number_format_is_linked` is |True|. Assigning a format
string to this property automatically sets
:meth:`number_format_is_linked` to |False|.
"""
numFmt = self._element.numFmt
if numFmt is None:
return 'General'
return numFmt.formatCode
@number_format.setter
def number_format(self, value):
self._element.get_or_add_numFmt().formatCode = value
self.number_format_is_linked = False
@property
def number_format_is_linked(self):
"""
Read/write boolean specifying whether number formatting should be
taken from the source spreadsheet rather than the value of
:meth:`number_format`.
"""
numFmt = self._element.numFmt
if numFmt is None:
return True
souceLinked = numFmt.sourceLinked
if souceLinked is None:
return True
return numFmt.sourceLinked
@number_format_is_linked.setter
def number_format_is_linked(self, value):
numFmt = self._element.get_or_add_numFmt()
numFmt.sourceLinked = value
@property
def position(self):
"""
Read/write :ref:`XlDataLabelPosition` enumeration value specifying
the position of the data labels with respect to their data point, or
|None| if no position is specified. Assigning |None| causes
PowerPoint to choose the default position, which varies by chart
type.
"""
dLblPos = self._element.dLblPos
if dLblPos is None:
return None
return dLblPos.val
@position.setter
def position(self, value):
if value is None:
self._element._remove_dLblPos()
return
self._element.get_or_add_dLblPos().val = value
class DataLabel(object):
"""
The data label associated with an individual data point.
"""
def __init__(self, ser, idx):
super(DataLabel, self).__init__()
self._ser = self._element = ser
self._idx = idx
@lazyproperty
def font(self):
"""The |Font| object providing text formatting for this data label.
This font object is used to customize the appearance of automatically
inserted text, such as the data point value. The font applies to the
entire data label. More granular control of the appearance of custom
data label text is controlled by a font object on runs in the text
frame.
"""
txPr = self._get_or_add_txPr()
text_frame = TextFrame(txPr, self)
paragraph = text_frame.paragraphs[0]
return paragraph.font
@property
def has_text_frame(self):
"""
Return |True| if this data label has a text frame (implying it has
custom data label text), and |False| otherwise. Assigning |True|
causes a text frame to be added if not already present. Assigning
|False| causes any existing text frame to be removed along with any
text contained in the text frame.
"""
dLbl = self._dLbl
if dLbl is None:
return False
if dLbl.xpath('c:tx/c:rich'):
return True
return False
@has_text_frame.setter
def has_text_frame(self, value):
if bool(value) is True:
self._get_or_add_tx_rich()
else:
self._remove_tx_rich()
@property
def position(self):
"""
Read/write :ref:`XlDataLabelPosition` member specifying the position
of this data label with respect to its data point, or |None| if no
position is specified. Assigning |None| causes PowerPoint to choose
the default position, which varies by chart type.
"""
dLbl = self._dLbl
if dLbl is None:
return None
dLblPos = dLbl.dLblPos
if dLblPos is None:
return None
return dLblPos.val
@position.setter
def position(self, value):
if value is None:
dLbl = self._dLbl
if dLbl is None:
return
dLbl._remove_dLblPos()
return
dLbl = self._get_or_add_dLbl()
dLbl.get_or_add_dLblPos().val = value
@property
def text_frame(self):
"""
|TextFrame| instance for this data label, containing the text of the
data label and providing access to its text formatting properties.
"""
rich = self._get_or_add_rich()
return TextFrame(rich, self)
@property
def _dLbl(self):
"""
Return the |CT_DLbl| instance referring specifically to this
individual data label (having the same index value), or |None| if not
present.
"""
return self._ser.get_dLbl(self._idx)
def _get_or_add_dLbl(self):
"""
The ``CT_DLbl`` instance referring specifically to this individual
data label, newly created if not yet present in the XML.
"""
return self._ser.get_or_add_dLbl(self._idx)
def _get_or_add_rich(self):
"""
Return the `c:rich` element representing the text frame for this data
label, newly created with its ancestors if not present.
"""
dLbl = self._get_or_add_dLbl()
# having a c:spPr or c:txPr when a c:tx is present causes the "can't
# save" bug on bubble charts. Remove c:spPr and c:txPr when present.
dLbl._remove_spPr()
dLbl._remove_txPr()
return dLbl.get_or_add_rich()
def _get_or_add_tx_rich(self):
"""
Return the `c:tx` element for this data label, with its `c:rich`
child and descendants, newly created if not yet present.
"""
dLbl = self._get_or_add_dLbl()
# having a c:spPr or c:txPr when a c:tx is present causes the "can't
# save" bug on bubble charts. Remove c:spPr and c:txPr when present.
dLbl._remove_spPr()
dLbl._remove_txPr()
return dLbl.get_or_add_tx_rich()
def _get_or_add_txPr(self):
"""Return the `c:txPr` element for this data label.
The `c:txPr` element and its parent `c:dLbl` element are created if
not yet present.
"""
dLbl = self._get_or_add_dLbl()
return dLbl.get_or_add_txPr()
def _remove_tx_rich(self):
"""
Remove any `c:tx/c:rich` child of the `c:dLbl` element for this data
label. Do nothing if that element is not present.
"""
dLbl = self._dLbl
if dLbl is None:
return
dLbl.remove_tx_rich()
| |
# pyOCD debugger
# Copyright (c) 2021 Huada Semiconductor Corporation
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...coresight.coresight_target import CoreSightTarget
from ...core.memory_map import (FlashRegion, RamRegion, MemoryMap)
from ...debug.svd.loader import SVDFile
class DBGMCU:
STCTL = 0xE0042020
STCTL_VALUE = 0x7FFFFF
STCTL1 = 0xE0042028
STCTL1_VALUE = 0xFFF
TRACECTL = 0xE0042024
TRACECTL_VALUE = 0x0
FLASH_ALGO = { 'load_address' : 0x20000000,
'instructions' : [
0xE00ABE00, 0x062D780D, 0x24084068, 0xD3000040, 0x1E644058, 0x1C49D1FA, 0x2A001E52, 0x4770D1F2,
0x4770ba40, 0x4770bac0, 0x0030ea4f, 0x00004770, 0x49052000, 0x49057008, 0x20016008, 0x39264902,
0x002af881, 0x00004770, 0x40054026, 0x40010418, 0x4004f240, 0xf0006800, 0xb1180001, 0x490b480a,
0xe0026008, 0x4909480a, 0x20056008, 0x60084909, 0x490a4809, 0x20006208, 0x312a4908, 0x20057008,
0xf8814906, 0x47700026, 0x22205981, 0x40054100, 0x22204781, 0x40010418, 0x00116310, 0x40054000,
0x2400b530, 0xf9a6f000, 0x7083f44f, 0x6008491a, 0x4510f44f, 0x60282000, 0x1c64e007, 0x42844817,
0xf000d303, 0x2001f997, 0x4813bd30, 0x68001d00, 0x7080f400, 0x7f80f5b0, 0x480fd007, 0x68001d00,
0x7080f000, 0x7f80f1b0, 0xe007d1e7, 0x3008480a, 0xf0406800, 0x49081010, 0x60083108, 0x1d004806,
0xf0006800, 0x28001010, 0x4903d1f0, 0xf0006008, 0x2000f971, 0x0000e7d8, 0x4001041c, 0x77359400,
0x4604b530, 0xf0002500, 0x2004f965, 0x60084919, 0x60202000, 0x1c6de007, 0x42854817, 0xf000d303,
0x2001f959, 0x4813bd30, 0x68001d00, 0x7080f400, 0x7f80f5b0, 0x480fd007, 0x68001d00, 0x7080f000,
0x7f80f1b0, 0xe007d1e7, 0x3008480a, 0xf0406800, 0x49081010, 0x60083108, 0x1d004806, 0xf0006800,
0x28001010, 0x4903d1f0, 0xf0006008, 0x2000f933, 0x0000e7d8, 0x4001041c, 0x77359400, 0x4604b570,
0x4616460d, 0xf926f000, 0x1023f240, 0x60084917, 0x2010f243, 0xf04f6008, 0x491530ff, 0x1d096008,
0x1d096008, 0x1d096008, 0x1d096008, 0x1d096008, 0x1d096008, 0x1d096008, 0x480e6008, 0x1d09490b,
0x43c06008, 0x480c6008, 0x31184908, 0x12c06008, 0x60081d09, 0x5001f24a, 0x80084908, 0xff18f7ff,
0xff26f7ff, 0xf8f6f000, 0xbd702000, 0x40010400, 0x40010590, 0x01234567, 0x00080005, 0x400543fe,
0x41f8e92d, 0x460c4606, 0x48484617, 0x46b89000, 0x1003f240, 0x60084946, 0x48444635, 0xbf009000,
0xf8d8f000, 0x0000f8d8, 0xf5b56028, 0xd22b1f80, 0x68004840, 0x42884940, 0x2000d026, 0xe00b9000,
0x1c409800, 0x49399000, 0x42889800, 0xf000d304, 0x2001f8c1, 0x81f8e8bd, 0x1d004835, 0xf0006800,
0x28100010, 0xe007d1ec, 0x30084831, 0xf0406800, 0x492f0010, 0x60083108, 0x1d00482d, 0xf0006800,
0x28100010, 0xe026d0f0, 0x90002000, 0x9800e00a, 0x90001c40, 0x98004925, 0xd3034288, 0xf89af000,
0xe7d72001, 0x1d004822, 0xf4006800, 0xf5b01080, 0xd1ec1f80, 0x481ee007, 0x68003008, 0x1080f440,
0x3108491b, 0x481a6008, 0x68001d00, 0x1080f400, 0x1f80f5b0, 0xf108d0ef, 0x1d2d0804, 0x2c041f24,
0x2000d29e, 0x60084912, 0xe00a9000, 0x1c409800, 0x490e9000, 0x42889800, 0xf000d303, 0x2001f86b,
0x480be7a8, 0x68001d00, 0x7080f400, 0x7f80f5b0, 0x4807d007, 0x68001d00, 0x7080f000, 0x7f80f1b0,
0xf000d1e4, 0x2000f857, 0x0000e794, 0x0bebc200, 0x4001041c, 0x03002000, 0x005a5a5a, 0x49034802,
0x48036008, 0x47706008, 0xffff0123, 0x40049408, 0xffff3210, 0x20004601, 0x60104a03, 0x4a021e40,
0x60103a1c, 0x47702000, 0x4001041c, 0x4604b5f0, 0x2300460d, 0x27002600, 0x21004626, 0xf856e007,
0x6810cb04, 0xd0004584, 0x1d12e004, 0xebb11c49, 0xd3f40f95, 0x4637bf00, 0xe0062300, 0xcb01f817,
0x45845cd0, 0xe004d000, 0xf0051c5b, 0x42980003, 0xbf00d8f4, 0x0081eb04, 0xbdf04418, 0x49034802,
0x48036088, 0x47706088, 0xffff0123, 0x40049000, 0xffff3210, 0xf44fb500, 0x68006080, 0x3080f400,
0xf000b908, 0xf44ff855, 0x68006080, 0x0001f000, 0xf000b908, 0xbd00f801, 0x4824b510, 0xb2826800,
0x6080f44f, 0xf3c06800, 0xf44f0481, 0x68006080, 0x2303f3c0, 0x1192b90c, 0x2c01e008, 0x1292d101,
0x2c02e004, 0x1312d101, 0x1392e000, 0x2001b90b, 0x2000e000, 0xd1012b0f, 0xe0002101, 0x43082100,
0xf7ffb110, 0xe020ff7b, 0x0001f003, 0xb9e2b118, 0xff74f7ff, 0xf003e019, 0x28020002, 0x2a01d104,
0xf7ffd113, 0xe010ff6b, 0x0004f003, 0xd1042804, 0xd10a2a02, 0xff62f7ff, 0xf003e007, 0x28080008,
0x2a03d103, 0xf7ffd101, 0xbd10ff59, 0x40049404, 0x4824b510, 0xb2826840, 0x6080f44f, 0xf3c06800,
0xf44f4481, 0x68006080, 0x6303f3c0, 0x1192b90c, 0x2c01e008, 0x1292d101, 0x2c02e004, 0x1312d101,
0x1392e000, 0x2001b90b, 0x2000e000, 0xd1012b0f, 0xe0002101, 0x43082100, 0xf7ffb110, 0xe020ff6f,
0x0001f003, 0xb9e2b118, 0xff68f7ff, 0xf003e019, 0x28020002, 0x2a01d104, 0xf7ffd113, 0xe010ff5f,
0x0004f003, 0xd1042804, 0xd10a2a02, 0xff56f7ff, 0xf003e007, 0x28080008, 0x2a03d103, 0xf7ffd101,
0xbd10ff4d, 0x40049000, 0x00000000
],
# Relative function addresses
'pc_init': 0x2000019d,
'pc_unInit': 0x20000375,
'pc_program_page': 0x20000221,
'pc_erase_sector': 0x20000121,
'pc_eraseAll': 0x200000a1,
'static_base' : 0x20000000 + 0x00000020 + 0x00000528,
'begin_stack' : 0x20000800,
'begin_data' : 0x20000000 + 0x1000,
'page_size' : 0x800,
'analyzer_supported' : False,
'analyzer_address' : 0x00000000,
'page_buffers' : [0x20001000, 0x20001800], # Enable double buffering
'min_program_length' : 0x800,
# Flash information
'flash_start': 0x0,
'flash_size': 0x200000,
'sector_sizes': (
(0x0, 0x2000),
)
}
FLASH_ALGO_OTP = {
'load_address' : 0x20000000,
# Flash algorithm as a hex string
'instructions': [
0xE00ABE00, 0x062D780D, 0x24084068, 0xD3000040, 0x1E644058, 0x1C49D1FA, 0x2A001E52, 0x4770D1F2,
0x4770ba40, 0x4770ba40, 0x4770bac0, 0x4770bac0, 0x0030ea4f, 0x00004770, 0x0030ea4f, 0x00004770,
0x49052000, 0x49057008, 0x20016008, 0x39264902, 0x002af881, 0x00004770, 0x40054026, 0x40010418,
0x4004f240, 0xf0006800, 0xb1180001, 0x490b480a, 0xe0026008, 0x4909480a, 0x20056008, 0x60084909,
0x490a4809, 0x20006208, 0x312a4908, 0x20057008, 0xf8814906, 0x47700026, 0x22205981, 0x40054100,
0x22204781, 0x40010418, 0x00116310, 0x40054000, 0xf000b510, 0xbd10f811, 0x4604b510, 0xf0004620,
0xbd10f817, 0x49032000, 0x1e406008, 0x391c4901, 0x47706008, 0x4001041c, 0x2400b510, 0xf948f000,
0xf0004802, 0x2000f805, 0x0000bd10, 0x03001000, 0x4604b530, 0xf0002500, 0x2004f93b, 0x60084919,
0x60202000, 0x1c6de007, 0x42854817, 0xf000d303, 0x2001f92f, 0x4813bd30, 0x68001d00, 0x7080f400,
0x7f80f5b0, 0x480fd007, 0x68001d00, 0x7080f000, 0x7f80f1b0, 0xe007d1e7, 0x3008480a, 0xf0406800,
0x49081010, 0x60083108, 0x1d004806, 0xf0006800, 0x28001010, 0x4903d1f0, 0xf0006008, 0x2000f909,
0x0000e7d8, 0x4001041c, 0x00061a80, 0xf000b500, 0xf240f8ff, 0x49171023, 0xf2436008, 0x60082010,
0x30fff04f, 0x60084914, 0x60081d09, 0x60081d09, 0x60081d09, 0x60081d09, 0x60081d09, 0x60081d09,
0x60081d09, 0x490b480d, 0x60081d09, 0x600843c0, 0x4908480b, 0x60083118, 0x1d0912c0, 0xf24a6008,
0x49085001, 0xf7ff8008, 0xf7ffff3b, 0xf000ff49, 0xbd00f8cf, 0x40010400, 0x40010590, 0x01234567,
0x00080005, 0x400543fe, 0x41f8e92d, 0x460c4605, 0xf6494616, 0x90004040, 0xf24046b0, 0x492f1003,
0x462f6008, 0x4040f649, 0xbf009000, 0xf8b0f000, 0x0000f8d8, 0x20006038, 0xe00c9000, 0x1c409800,
0xf6499000, 0x98004140, 0xd3044288, 0xf8a0f000, 0xe8bd2001, 0x482181f8, 0x68001d00, 0x0010f000,
0xd1eb2810, 0x481de007, 0x68003008, 0x0010f040, 0x3108491a, 0x48196008, 0x68001d00, 0x0010f000,
0xd0f02810, 0x0804f108, 0x1f241d3f, 0xd2cd2c04, 0x49122000, 0x90006008, 0x9800e00b, 0x90001c40,
0x4140f649, 0x42889800, 0xf000d303, 0x2001f871, 0x480ae7cf, 0x68001d00, 0x7080f400, 0x7f80f5b0,
0x4806d007, 0x68001d00, 0x7080f000, 0x7f80f1b0, 0xf000d1e3, 0x2000f85d, 0x0000e7bb, 0x4001041c,
0x4604b570, 0x4616460d, 0xff50f7ff, 0xbd702000, 0x4604b570, 0x4616460d, 0x46294632, 0xf7ff4620,
0xbd70ff83, 0x49034802, 0x48036008, 0x47706008, 0xffff0123, 0x40049408, 0xffff3210, 0x4604b510,
0xfee0f7ff, 0xbd102000, 0x4604b5f0, 0x2300460d, 0x27002600, 0x21004626, 0xf856e007, 0x6810cb04,
0xd0004584, 0x1d12e004, 0xebb11c49, 0xd3f40f95, 0x4637bf00, 0xe0062300, 0xcb01f817, 0x45845cd0,
0xe004d000, 0xf0051c5b, 0x42980003, 0xbf00d8f4, 0x0081eb04, 0xbdf04418, 0x49034802, 0x48036088,
0x47706088, 0xffff0123, 0x40049000, 0xffff3210, 0xf44fb500, 0x68006080, 0x3080f400, 0xf000b908,
0xf44ff85b, 0x68006080, 0x0001f000, 0xf000b908, 0xbd00f807, 0x1e01bf00, 0x0001f1a0, 0x4770d1fb,
0x4824b510, 0xb2826800, 0x6080f44f, 0xf3c06800, 0xf44f0481, 0x68006080, 0x2303f3c0, 0x1192b90c,
0x2c01e008, 0x1292d101, 0x2c02e004, 0x1312d101, 0x1392e000, 0x2001b90b, 0x2000e000, 0xd1012b0f,
0xe0002101, 0x43082100, 0xf7ffb110, 0xe020ff7b, 0x0001f003, 0xb9e2b118, 0xff74f7ff, 0xf003e019,
0x28020002, 0x2a01d104, 0xf7ffd113, 0xe010ff6b, 0x0004f003, 0xd1042804, 0xd10a2a02, 0xff62f7ff,
0xf003e007, 0x28080008, 0x2a03d103, 0xf7ffd101, 0xbd10ff59, 0x40049404, 0x4824b510, 0xb2826840,
0x6080f44f, 0xf3c06800, 0xf44f4481, 0x68006080, 0x6303f3c0, 0x1192b90c, 0x2c01e008, 0x1292d101,
0x2c02e004, 0x1312d101, 0x1392e000, 0x2001b90b, 0x2000e000, 0xd1012b0f, 0xe0002101, 0x43082100,
0xf7ffb110, 0xe020ff69, 0x0001f003, 0xb9e2b118, 0xff62f7ff, 0xf003e019, 0x28020002, 0x2a01d104,
0xf7ffd113, 0xe010ff59, 0x0004f003, 0xd1042804, 0xd10a2a02, 0xff50f7ff, 0xf003e007, 0x28080008,
0x2a03d103, 0xf7ffd101, 0xbd10ff47, 0x40049000, 0x00000000
],
# Relative function addresses
'pc_init': 0x200002c1,
'pc_unInit': 0x200002fd,
'pc_program_page': 0x200002d1,
'pc_erase_sector': 0x200000b9,
'pc_eraseAll': 0x200000b1,
'static_base' : 0x20000000 + 0x00000020 + 0x000004b0,
'begin_stack' : 0x20000700,
'begin_data' : 0x20000000 + 0x1000,
'page_size' : 0x1800,
'analyzer_supported' : False,
'analyzer_address' : 0x00000000,
'page_buffers' : [0x20001000, 0x20002800], # Enable double buffering
'min_program_length' : 0x1800,
# Flash information
'flash_start': 0x3000000,
'flash_size': 0x1800,
'sector_sizes': (
(0x0, 0x1800),
)
}
class HC32F4A0xG(CoreSightTarget):
VENDOR = "HDSC"
MEMORY_MAP = MemoryMap(
FlashRegion( start=0x00000000, length=0x100000, page_size=0x800, sector_size=0x2000,
is_boot_memory=True,
algo=FLASH_ALGO),
FlashRegion( start=0x3000000, length=0x1800, page_size=0x1800, sector_size=0x1800,
is_boot_memory=False,
is_default=False,
algo=FLASH_ALGO_OTP),
RamRegion( start=0x1FFFE000, length=0x80000),
RamRegion( start=0x200F0000, length=0x1000)
)
def __init__(self, session):
super(HC32F4A0xG, self).__init__(session, self.MEMORY_MAP)
self._svd_location = SVDFile.from_builtin("HC32F4A0.svd")
def post_connect_hook(self):
self.write32(DBGMCU.STCTL, DBGMCU.STCTL_VALUE)
self.write32(DBGMCU.STCTL1, DBGMCU.STCTL1_VALUE)
self.write32(DBGMCU.TRACECTL, DBGMCU.TRACECTL_VALUE)
class HC32F4A0xI(CoreSightTarget):
VENDOR = "HDSC"
MEMORY_MAP = MemoryMap(
FlashRegion( start=0x00000000, length=0x200000, page_size=0x800, sector_size=0x2000,
is_boot_memory=True,
algo=FLASH_ALGO),
FlashRegion( start=0x3000000, length=0x1800, page_size=0x1800, sector_size=0x1800,
is_boot_memory=False,
is_default=False,
algo=FLASH_ALGO_OTP),
RamRegion( start=0x1FFFE000, length=0x80000),
RamRegion( start=0x200F0000, length=0x1000)
)
def __init__(self, session):
super(HC32F4A0xI, self).__init__(session, self.MEMORY_MAP)
self._svd_location = SVDFile.from_builtin("HC32F4A0.svd")
def post_connect_hook(self):
self.write32(DBGMCU.STCTL, DBGMCU.STCTL_VALUE)
self.write32(DBGMCU.STCTL1, DBGMCU.STCTL1_VALUE)
self.write32(DBGMCU.TRACECTL, DBGMCU.TRACECTL_VALUE)
| |
# -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import google.api_core.grpc_helpers
from google.cloud.kms_v1.proto import service_pb2_grpc
from google.iam.v1 import iam_policy_pb2
class KeyManagementServiceGrpcTransport(object):
"""gRPC transport class providing stubs for
google.cloud.kms.v1 KeyManagementService API.
The transport provides access to the raw gRPC stubs,
which can be used to take advantage of advanced
features of gRPC.
"""
# The scopes needed to make gRPC calls to all of the methods defined
# in this service.
_OAUTH_SCOPES = ('https://www.googleapis.com/auth/cloud-platform', )
def __init__(self,
channel=None,
credentials=None,
address='cloudkms.googleapis.com:443'):
"""Instantiate the transport class.
Args:
channel (grpc.Channel): A ``Channel`` instance through
which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
address (str): The address where the service is hosted.
"""
# If both `channel` and `credentials` are specified, raise an
# exception (channels come with credentials baked in already).
if channel is not None and credentials is not None:
raise ValueError(
'The `channel` and `credentials` arguments are mutually '
'exclusive.', )
# Create the channel.
if channel is None:
channel = self.create_channel(
address=address,
credentials=credentials,
)
# gRPC uses objects called "stubs" that are bound to the
# channel and provide a basic method for each RPC.
self._stubs = {
'key_management_service_stub':
service_pb2_grpc.KeyManagementServiceStub(channel),
'iam_policy_stub':
iam_policy_pb2.IAMPolicyStub(channel),
}
@classmethod
def create_channel(cls,
address='cloudkms.googleapis.com:443',
credentials=None):
"""Create and return a gRPC channel object.
Args:
address (str): The host for the channel to use.
credentials (~.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
Returns:
grpc.Channel: A gRPC channel object.
"""
return google.api_core.grpc_helpers.create_channel(
address,
credentials=credentials,
scopes=cls._OAUTH_SCOPES,
)
@property
def list_key_rings(self):
"""Return the gRPC stub for {$apiMethod.name}.
Lists ``KeyRings``.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['key_management_service_stub'].ListKeyRings
@property
def list_crypto_keys(self):
"""Return the gRPC stub for {$apiMethod.name}.
Lists ``CryptoKeys``.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['key_management_service_stub'].ListCryptoKeys
@property
def list_crypto_key_versions(self):
"""Return the gRPC stub for {$apiMethod.name}.
Lists ``CryptoKeyVersions``.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['key_management_service_stub'].ListCryptoKeyVersions
@property
def get_key_ring(self):
"""Return the gRPC stub for {$apiMethod.name}.
Returns metadata for a given ``KeyRing``.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['key_management_service_stub'].GetKeyRing
@property
def get_crypto_key(self):
"""Return the gRPC stub for {$apiMethod.name}.
Returns metadata for a given ``CryptoKey``, as well as its
``primary`` ``CryptoKeyVersion``.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['key_management_service_stub'].GetCryptoKey
@property
def get_crypto_key_version(self):
"""Return the gRPC stub for {$apiMethod.name}.
Returns metadata for a given ``CryptoKeyVersion``.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['key_management_service_stub'].GetCryptoKeyVersion
@property
def create_key_ring(self):
"""Return the gRPC stub for {$apiMethod.name}.
Create a new ``KeyRing`` in a given Project and Location.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['key_management_service_stub'].CreateKeyRing
@property
def create_crypto_key(self):
"""Return the gRPC stub for {$apiMethod.name}.
Create a new ``CryptoKey`` within a ``KeyRing``.
``CryptoKey.purpose`` and
``CryptoKey.version_template.algorithm``
are required.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['key_management_service_stub'].CreateCryptoKey
@property
def create_crypto_key_version(self):
"""Return the gRPC stub for {$apiMethod.name}.
Create a new ``CryptoKeyVersion`` in a ``CryptoKey``.
The server will assign the next sequential id. If unset,
``state`` will be set to
``ENABLED``.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs[
'key_management_service_stub'].CreateCryptoKeyVersion
@property
def update_crypto_key(self):
"""Return the gRPC stub for {$apiMethod.name}.
Update a ``CryptoKey``.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['key_management_service_stub'].UpdateCryptoKey
@property
def update_crypto_key_version(self):
"""Return the gRPC stub for {$apiMethod.name}.
Update a ``CryptoKeyVersion``'s metadata.
``state`` may be changed between
``ENABLED`` and
``DISABLED`` using this
method. See ``DestroyCryptoKeyVersion`` and ``RestoreCryptoKeyVersion`` to
move between other states.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs[
'key_management_service_stub'].UpdateCryptoKeyVersion
@property
def encrypt(self):
"""Return the gRPC stub for {$apiMethod.name}.
Encrypts data, so that it can only be recovered by a call to ``Decrypt``.
The ``CryptoKey.purpose`` must be
``ENCRYPT_DECRYPT``.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['key_management_service_stub'].Encrypt
@property
def decrypt(self):
"""Return the gRPC stub for {$apiMethod.name}.
Decrypts data that was protected by ``Encrypt``. The ``CryptoKey.purpose``
must be ``ENCRYPT_DECRYPT``.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['key_management_service_stub'].Decrypt
@property
def update_crypto_key_primary_version(self):
"""Return the gRPC stub for {$apiMethod.name}.
Update the version of a ``CryptoKey`` that will be used in ``Encrypt``.
Returns an error if called on an asymmetric key.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs[
'key_management_service_stub'].UpdateCryptoKeyPrimaryVersion
@property
def destroy_crypto_key_version(self):
"""Return the gRPC stub for {$apiMethod.name}.
Schedule a ``CryptoKeyVersion`` for destruction.
Upon calling this method, ``CryptoKeyVersion.state`` will be set to
``DESTROY_SCHEDULED``
and ``destroy_time`` will be set to a time 24
hours in the future, at which point the ``state``
will be changed to
``DESTROYED``, and the key
material will be irrevocably destroyed.
Before the ``destroy_time`` is reached,
``RestoreCryptoKeyVersion`` may be called to reverse the process.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs[
'key_management_service_stub'].DestroyCryptoKeyVersion
@property
def restore_crypto_key_version(self):
"""Return the gRPC stub for {$apiMethod.name}.
Restore a ``CryptoKeyVersion`` in the
``DESTROY_SCHEDULED``
state.
Upon restoration of the CryptoKeyVersion, ``state``
will be set to ``DISABLED``,
and ``destroy_time`` will be cleared.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs[
'key_management_service_stub'].RestoreCryptoKeyVersion
@property
def get_public_key(self):
"""Return the gRPC stub for {$apiMethod.name}.
Returns the public key for the given ``CryptoKeyVersion``. The
``CryptoKey.purpose`` must be
``ASYMMETRIC_SIGN`` or
``ASYMMETRIC_DECRYPT``.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['key_management_service_stub'].GetPublicKey
@property
def asymmetric_decrypt(self):
"""Return the gRPC stub for {$apiMethod.name}.
Decrypts data that was encrypted with a public key retrieved from
``GetPublicKey`` corresponding to a ``CryptoKeyVersion`` with
``CryptoKey.purpose`` ASYMMETRIC_DECRYPT.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['key_management_service_stub'].AsymmetricDecrypt
@property
def asymmetric_sign(self):
"""Return the gRPC stub for {$apiMethod.name}.
Signs data using a ``CryptoKeyVersion`` with ``CryptoKey.purpose``
ASYMMETRIC_SIGN, producing a signature that can be verified with the public
key retrieved from ``GetPublicKey``.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['key_management_service_stub'].AsymmetricSign
@property
def set_iam_policy(self):
"""Return the gRPC stub for {$apiMethod.name}.
Sets the access control policy on the specified resource. Replaces any
existing policy.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['iam_policy_stub'].SetIamPolicy
@property
def get_iam_policy(self):
"""Return the gRPC stub for {$apiMethod.name}.
Gets the access control policy for a resource.
Returns an empty policy if the resource exists and does not have a policy
set.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['iam_policy_stub'].GetIamPolicy
@property
def test_iam_permissions(self):
"""Return the gRPC stub for {$apiMethod.name}.
Returns permissions that a caller has on the specified resource.
If the resource does not exist, this will return an empty set of
permissions, not a NOT_FOUND error.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['iam_policy_stub'].TestIamPermissions
| |
""" Test ffmpeg
"""
from io import BytesIO
import time
import threading
import numpy as np
from pytest import raises, skip
from imageio.testing import run_tests_if_main, get_test_dir, need_internet
import imageio
from imageio import core
from imageio.core import get_remote_file, IS_PYPY
test_dir = get_test_dir()
def test_select():
fname1 = get_remote_file('images/cockatoo.mp4', test_dir)
F = imageio.formats['ffmpeg']
assert F.name == 'FFMPEG'
assert F.can_read(core.Request(fname1, 'rI'))
assert F.can_write(core.Request(fname1, 'wI'))
assert not F.can_read(core.Request(fname1, 'ri'))
assert not F.can_read(core.Request(fname1, 'rv'))
# ffmpeg is default
assert imageio.formats['.mp4'] is F
assert imageio.formats.search_write_format(core.Request(fname1, 'wI')) is F
assert imageio.formats.search_read_format(core.Request(fname1, 'rI')) is F
def test_read_and_write():
need_internet()
R = imageio.read(get_remote_file('images/cockatoo.mp4'), 'ffmpeg')
assert R.format is imageio.formats['ffmpeg']
fname1 = get_remote_file('images/cockatoo.mp4', test_dir)
fname2 = fname1[:-4] + '.out.mp4'
# Read
ims1 = []
with imageio.read(fname1, 'ffmpeg') as R:
for i in range(10):
im = R.get_next_data()
ims1.append(im)
assert im.shape == (720, 1280, 3)
assert (im.sum() / im.size) > 0 # pypy mean is broken
assert im.sum() > 0
# Seek
im = R.get_data(120)
assert im.shape == (720, 1280, 3)
# Save
with imageio.save(fname2, 'ffmpeg') as W:
for im in ims1:
W.append_data(im)
# Read the result
ims2 = imageio.mimread(fname2, 'ffmpeg')
assert len(ims1) == len(ims2)
# Check
for im1, im2 in zip(ims1, ims2):
diff = np.abs(im1.astype(np.float32) - im2.astype(np.float32))
if IS_PYPY:
assert (diff.sum() / diff.size) < 100
else:
assert diff.mean() < 2.5
def test_reader_more():
need_internet()
fname1 = get_remote_file('images/cockatoo.mp4', test_dir)
fname3 = fname1[:-4] + '.stub.mp4'
# Get meta data
R = imageio.read(fname1, 'ffmpeg', loop=True)
meta = R.get_meta_data()
assert len(R) == 280
assert isinstance(meta, dict)
assert 'fps' in meta
R.close()
# Test size argument
im = imageio.read(fname1, 'ffmpeg', size=(50, 50)).get_data(0)
assert im.shape == (50, 50, 3)
im = imageio.read(fname1, 'ffmpeg', size='40x40').get_data(0)
assert im.shape == (40, 40, 3)
raises(ValueError, imageio.read, fname1, 'ffmpeg', size=20)
raises(ValueError, imageio.read, fname1, 'ffmpeg', pixelformat=20)
# Read all frames and test length
R = imageio.read(get_remote_file('images/realshort.mp4'), 'ffmpeg')
count = 0
while True:
try:
R.get_next_data()
except IndexError:
break
else:
count += 1
assert count == len(R)
assert count in (35, 36) # allow one frame off size that we know
# Test index error -1
raises(IndexError, R.get_data, -1)
# Now read beyond (simulate broken file)
with raises(RuntimeError):
R._read_frame() # ffmpeg seems to have an extra frame, avbin not?
R._read_frame()
# Test loop
R = imageio.read(get_remote_file('images/realshort.mp4'), 'ffmpeg', loop=1)
im1 = R.get_next_data()
for i in range(1, len(R)):
R.get_next_data()
im2 = R.get_next_data()
im3 = R.get_data(0)
im4 = R.get_data(2) # touch skipping frames
assert (im1 == im2).all()
assert (im1 == im3).all()
assert not (im1 == im4).all()
R.close()
# Read invalid
open(fname3, 'wb')
raises(IOError, imageio.read, fname3, 'ffmpeg')
# Read printing info
imageio.read(fname1, 'ffmpeg', print_info=True)
def test_writer_more():
need_internet()
fname1 = get_remote_file('images/cockatoo.mp4', test_dir)
fname2 = fname1[:-4] + '.out.mp4'
W = imageio.save(fname2, 'ffmpeg')
with raises(ValueError): # Invalid shape
W.append_data(np.zeros((20, 20, 5), np.uint8))
W.append_data(np.zeros((20, 20, 3), np.uint8))
with raises(ValueError): # Different shape from first image
W.append_data(np.zeros((20, 19, 3), np.uint8))
with raises(ValueError): # Different depth from first image
W.append_data(np.zeros((20, 20, 4), np.uint8))
with raises(RuntimeError): # No meta data
W.set_meta_data({'foo': 3})
W.close()
def test_writer_file_properly_closed(tmpdir):
# Test to catch if file is correctly closed.
# Otherwise it won't play in most players. This seems to occur on windows.
need_internet()
tmpf = tmpdir.join('test.mp4')
W = imageio.get_writer(str(tmpf))
for i in range(10):
W.append_data(np.zeros((100, 100, 3), np.uint8))
W.close()
W = imageio.get_reader(str(tmpf))
# If Duration: N/A reported by ffmpeg, then the file was not
# correctly closed.
# This will cause the file to not be readable in many players.
assert "Duration: N/A" not in W._stderr_catcher.header
def test_writer_pixelformat_size_verbose(tmpdir):
# Check that video pixel format and size get written as expected.
need_internet()
# Make sure verbose option works and that default pixelformat is yuv420p
tmpf = tmpdir.join('test.mp4', fps=30)
W = imageio.get_writer(str(tmpf), ffmpeg_log_level='debug')
nframes = 4 # Number of frames in video
for i in range(nframes):
# Use size divisible by 16 or it gets changed.
W.append_data(np.zeros((64, 64, 3), np.uint8))
W.close()
# Check that video is correct size & default output video pixel format
# is correct
W = imageio.get_reader(str(tmpf))
assert len(W) == nframes
assert "64x64" in W._stderr_catcher.header
assert "yuv420p" in W._stderr_catcher.header
# Now check that macroblock size gets turned off if requested
W = imageio.get_writer(str(tmpf), macro_block_size=None,
ffmpeg_log_level='debug')
for i in range(nframes):
W.append_data(np.zeros((100, 106, 3), np.uint8))
W.close()
W = imageio.get_reader(str(tmpf))
assert len(W) == nframes
assert "106x100" in W._stderr_catcher.header
assert "yuv420p" in W._stderr_catcher.header
# Now double check values different than default work
W = imageio.get_writer(str(tmpf), macro_block_size=4,
ffmpeg_log_level='debug')
for i in range(nframes):
W.append_data(np.zeros((64, 65, 3), np.uint8))
W.close()
W = imageio.get_reader(str(tmpf))
assert len(W) == nframes
assert "68x64" in W._stderr_catcher.header
assert "yuv420p" in W._stderr_catcher.header
# Now check that the macroblock works as expected for the default of 16
W = imageio.get_writer(str(tmpf), ffmpeg_log_level='debug')
for i in range(nframes):
W.append_data(np.zeros((111, 140, 3), np.uint8))
W.close()
W = imageio.get_reader(str(tmpf))
assert len(W) == nframes
# Check for warning message with macroblock
assert "144x112" in W._stderr_catcher.header
assert "yuv420p" in W._stderr_catcher.header
def test_writer_ffmpeg_params(tmpdir):
need_internet()
# Test optional ffmpeg_params with a valid option
# Also putting in an image size that is not divisible by macroblock size
# To check that the -vf scale overwrites what it does.
tmpf = tmpdir.join('test.mp4')
W = imageio.get_writer(str(tmpf), ffmpeg_params=['-vf', 'scale=320:240'])
for i in range(10):
W.append_data(np.zeros((100, 100, 3), np.uint8))
W.close()
W = imageio.get_reader(str(tmpf))
# Check that the optional argument scaling worked.
assert "320x240" in W._stderr_catcher.header
def test_writer_wmv(tmpdir):
need_internet()
# WMV has different default codec, make sure it works.
tmpf = tmpdir.join('test.wmv')
W = imageio.get_writer(str(tmpf), ffmpeg_params=['-v', 'info'])
for i in range(10):
W.append_data(np.zeros((100, 100, 3), np.uint8))
W.close()
W = imageio.get_reader(str(tmpf))
# Check that default encoder is msmpeg4 for wmv
assert "msmpeg4" in W._stderr_catcher.header
def test_cvsecs():
cvsecs = imageio.plugins.ffmpeg.cvsecs
assert cvsecs(20) == 20
assert cvsecs(2, 20) == (2 * 60) + 20
assert cvsecs(2, 3, 20) == (2 * 3600) + (3 * 60) + 20
def test_limit_lines():
limit_lines = imageio.plugins.ffmpeg.limit_lines
lines = ['foo'] * 10
assert len(limit_lines(lines)) == 10
lines = ['foo'] * 50
assert len(limit_lines(lines)) == 50 # < 2 * N
lines = ['foo'] * 70 + ['bar']
lines2 = limit_lines(lines)
assert len(lines2) == 33 # > 2 * N
assert b'last few lines' in lines2[0]
assert 'bar' == lines2[-1]
def test_framecatcher():
class BlockingBytesIO(BytesIO):
def __init__(self):
BytesIO.__init__(self)
self._lock = threading.RLock()
def write_and_rewind(self, bb):
with self._lock:
t = self.tell()
self.write(bb)
self.seek(t)
def read(self, n):
if self.closed:
return b''
while True:
time.sleep(0.001)
with self._lock:
bb = BytesIO.read(self, n)
if bb:
return bb
# Test our class
file = BlockingBytesIO()
file.write_and_rewind(b'v')
assert file.read(100) == b'v'
file = BlockingBytesIO()
N = 100
T = imageio.plugins.ffmpeg.FrameCatcher(file, N)
# Init None
time.sleep(0.1)
assert T._frame is None # get_frame() would stall
# Read frame
file.write_and_rewind(b'x' * (N - 20))
time.sleep(0.2) # Let it read a part
assert T._frame is None # get_frame() would stall
file.write_and_rewind(b'x' * 20)
time.sleep(0.2) # Let it read the rest
assert T.get_frame() == b'x' * N
# Read frame when we pass plenty of data
file.write_and_rewind(b'y' * N * 3)
time.sleep(0.2)
assert T.get_frame() == b'y' * N
# Close
file.close()
def test_webcam():
need_internet()
try:
imageio.read('<video0>')
except Exception:
skip('no web cam')
def show_in_console():
reader = imageio.read('cockatoo.mp4', 'ffmpeg')
#reader = imageio.read('<video0>')
im = reader.get_next_data()
while True:
im = reader.get_next_data()
print('frame min/max/mean: %1.1f / %1.1f / %1.1f' %
(im.min(), im.max(), (im.sum() / im.size)))
def show_in_visvis():
reader = imageio.read('cockatoo.mp4', 'ffmpeg')
#reader = imageio.read('<video0>')
import visvis as vv
im = reader.get_next_data()
f = vv.clf()
f.title = reader.format.name
t = vv.imshow(im, clim=(0, 255))
while not f._destroyed:
t.SetData(reader.get_next_data())
vv.processEvents()
if __name__ == '__main__':
run_tests_if_main()
#reader = imageio.read('cockatoo.mp4', 'ffmpeg')
| |
import uuid
import re
import datetime
import decimal
import itertools
import functools
import random
import string
import six
from six import iteritems
from ..exceptions import (
StopValidation, ValidationError, ConversionError, MockCreationError
)
try:
from string import ascii_letters # PY3
except ImportError:
from string import letters as ascii_letters #PY2
try:
basestring #PY2
except NameError:
basestring = str #PY3
try:
unicode #PY2
except:
import codecs
unicode = str #PY3
def utf8_decode(s):
if six.PY3:
s = str(s) #todo: right thing to do?
else:
s = unicode(s, 'utf-8')
return s
def fill_template(template, min_length, max_length):
return template % random_string(
get_value_in(
min_length,
max_length,
padding=len(template) - 2,
required_length=1))
def force_unicode(obj, encoding='utf-8'):
if isinstance(obj, basestring):
if not isinstance(obj, unicode):
#obj = unicode(obj, encoding)
obj = utf8_decode(obj)
elif not obj is None:
#obj = unicode(obj)
obj = utf8_decode(obj)
return obj
def get_range_endpoints(min_length, max_length, padding=0, required_length=0):
if min_length is None and max_length is None:
min_length = 0
max_length = 16
elif min_length is None:
min_length = 0
elif max_length is None:
max_length = max(min_length * 2, 16)
if padding:
max_length = max_length - padding
min_length = max(min_length - padding, 0)
if max_length < required_length:
raise MockCreationError(
'This field is too short to hold the mock data')
min_length = max(min_length, required_length)
return min_length, max_length
def get_value_in(min_length, max_length, padding=0, required_length=0):
return random.randint(
*get_range_endpoints(min_length, max_length, padding, required_length))
def random_string(length, chars=ascii_letters + string.digits):
return ''.join(random.choice(chars) for _ in range(length))
_last_position_hint = -1
_next_position_hint = itertools.count()
class TypeMeta(type):
"""
Meta class for BaseType. Merges `MESSAGES` dict and accumulates
validator methods.
"""
def __new__(mcs, name, bases, attrs):
messages = {}
validators = []
for base in reversed(bases):
if hasattr(base, 'MESSAGES'):
messages.update(base.MESSAGES)
if hasattr(base, "_validators"):
validators.extend(base._validators)
if 'MESSAGES' in attrs:
messages.update(attrs['MESSAGES'])
attrs['MESSAGES'] = messages
for attr_name, attr in iteritems(attrs):
if attr_name.startswith("validate_"):
validators.append(attr)
attrs["_validators"] = validators
return type.__new__(mcs, name, bases, attrs)
class BaseType(TypeMeta('BaseTypeBase', (object, ), {})):
"""A base class for Types in a Schematics model. Instances of this
class may be added to subclasses of ``Model`` to define a model schema.
Validators that need to access variables on the instance
can be defined be implementing methods whose names start with ``validate_``
and accept one parameter (in addition to ``self``)
:param required:
Invalidate field when value is None or is not supplied. Default:
False.
:param default:
When no data is provided default to this value. May be a callable.
Default: None.
:param serialized_name:
The name of this field defaults to the class attribute used in the
model. However if the field has another name in foreign data set this
argument. Serialized data will use this value for the key name too.
:param deserialize_from:
A name or list of named fields for which foreign data sets are
searched to provide a value for the given field. This only effects
inbound data.
:param choices:
A list of valid choices. This is the last step of the validator
chain.
:param validators:
A list of callables. Each callable receives the value after it has been
converted into a rich python type. Default: []
:param serialize_when_none:
Dictates if the field should appear in the serialized data even if the
value is None. Default: True
:param messages:
Override the error messages with a dict. You can also do this by
subclassing the Type and defining a `MESSAGES` dict attribute on the
class. A metaclass will merge all the `MESSAGES` and override the
resulting dict with instance level `messages` and assign to
`self.messages`.
"""
MESSAGES = {
'required': u"This field is required.",
'choices': u"Value must be one of {0}.",
}
def __init__(self, required=False, default=None, serialized_name=None,
choices=None, validators=None, deserialize_from=None,
serialize_when_none=None, messages=None):
super(BaseType, self).__init__()
self.required = required
self._default = default
self.serialized_name = serialized_name
if choices and not isinstance(choices, (list, tuple)):
raise TypeError('"choices" must be a list or tuple')
self.choices = choices
self.deserialize_from = deserialize_from
self.validators = [functools.partial(v, self) for v in self._validators]
if validators:
self.validators += validators
self.serialize_when_none = serialize_when_none
self.messages = dict(self.MESSAGES, **(messages or {}))
self._position_hint = next(_next_position_hint) # For ordering of fields
def __call__(self, value):
return self.to_native(value)
def _mock(self, context=None):
return None
def _setup(self, field_name, owner_model):
"""Perform late-stage setup tasks that are run after the containing model
has been created.
"""
self.name = field_name
self.owner_model = owner_model
@property
def default(self):
default = self._default
if callable(self._default):
default = self._default()
return default
def to_primitive(self, value, context=None):
"""Convert internal data to a value safe to serialize.
"""
return value
def to_native(self, value, context=None):
"""
Convert untrusted data to a richer Python construct.
"""
return value
def allow_none(self):
if hasattr(self, 'owner_model'):
return self.owner_model.allow_none(self)
else:
return self.serialize_when_none
def validate(self, value):
"""
Validate the field and return a clean value or raise a
``ValidationError`` with a list of errors raised by the validation
chain. Stop the validation process from continuing through the
validators by raising ``StopValidation`` instead of ``ValidationError``.
"""
errors = []
for validator in self.validators:
try:
validator(value)
except ValidationError as exc:
errors.extend(exc.messages)
if isinstance(exc, StopValidation):
break
if errors:
raise ValidationError(errors)
def validate_required(self, value):
if self.required and value is None:
raise ValidationError(self.messages['required'])
def validate_choices(self, value):
if self.choices is not None:
if value not in self.choices:
raise ValidationError(self.messages['choices']
.format(unicode(self.choices)))
def mock(self, context=None):
if not self.required and not random.choice([True, False]):
return self.default
if self.choices is not None:
return random.choice(self.choices)
return self._mock(context)
class UUIDType(BaseType):
"""A field that stores a valid UUID value.
"""
MESSAGES = {
'convert': u"Couldn't interpret '{0}' value as UUID.",
}
def _mock(self, context=None):
return uuid.uuid4()
def to_native(self, value, context=None):
if not isinstance(value, uuid.UUID):
try:
value = uuid.UUID(value)
except (AttributeError, TypeError, ValueError):
raise ConversionError(self.messages['convert'].format(value))
return value
def to_primitive(self, value, context=None):
return str(value)
class IPv4Type(BaseType):
""" A field that stores a valid IPv4 address """
def _mock(self, context=None):
return '.'.join(str(random.randrange(256)) for _ in range(4))
@classmethod
def valid_ip(cls, addr):
try:
addr = addr.strip().split(".")
except AttributeError:
return False
try:
return len(addr) == 4 and all(0 <= int(octet) < 256 for octet in addr)
except ValueError:
return False
def validate(self, value):
"""
Make sure the value is a IPv4 address:
http://stackoverflow.com/questions/9948833/validate-ip-address-from-list
"""
if not IPv4Type.valid_ip(value):
error_msg = 'Invalid IPv4 address'
raise ValidationError(error_msg)
return True
class StringType(BaseType):
"""A unicode string field. Default minimum length is one. If you want to
accept empty strings, init with ``min_length`` 0.
"""
allow_casts = (int, str)
MESSAGES = {
'convert': u"Couldn't interpret '{0}' as string.",
'max_length': u"String value is too long.",
'min_length': u"String value is too short.",
'regex': u"String value did not match validation regex.",
}
def __init__(self, regex=None, max_length=None, min_length=None, **kwargs):
self.regex = regex
self.max_length = max_length
self.min_length = min_length
super(StringType, self).__init__(**kwargs)
def _mock(self, context=None):
return random_string(get_value_in(self.min_length, self.max_length))
def to_native(self, value, context=None):
if value is None:
return None
if not isinstance(value, unicode):
if isinstance(value, self.allow_casts):
if not isinstance(value, str):
value = str(value)
value = utf8_decode(value) #unicode(value, 'utf-8')
else:
raise ConversionError(self.messages['convert'].format(value))
return value
def validate_length(self, value):
len_of_value = len(value) if value else 0
if self.max_length is not None and len_of_value > self.max_length:
raise ValidationError(self.messages['max_length'])
if self.min_length is not None and len_of_value < self.min_length:
raise ValidationError(self.messages['min_length'])
def validate_regex(self, value):
if self.regex is not None and re.match(self.regex, value) is None:
raise ValidationError(self.messages['regex'])
class URLType(StringType):
"""A field that validates input as an URL.
If verify_exists=True is passed the validate function will make sure
the URL makes a valid connection.
"""
MESSAGES = {
'invalid_url': u"Not a well formed URL.",
'not_found': u"URL does not exist.",
}
URL_REGEX = re.compile(
r'^https?://'
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,2000}[A-Z0-9])?\.)+[A-Z]{2,63}\.?|'
r'localhost|'
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'
r'(?::\d+)?'
r'(?:/?|[/?]\S+)$', re.IGNORECASE
)
def __init__(self, verify_exists=False, **kwargs):
self.verify_exists = verify_exists
super(URLType, self).__init__(**kwargs)
def _mock(self, context=None):
return fill_template('http://a%s.ZZ', self.min_length,
self.max_length)
def validate_url(self, value):
if not URLType.URL_REGEX.match(value):
raise StopValidation(self.messages['invalid_url'])
if self.verify_exists:
from six.moves import urllib
try:
request = urllib.Request(value)
urllib.urlopen(request)
except Exception:
raise StopValidation(self.messages['not_found'])
class EmailType(StringType):
"""A field that validates input as an E-Mail-Address.
"""
MESSAGES = {
'email': u"Not a well formed email address."
}
EMAIL_REGEX = re.compile(
# dot-atom
r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*"
# quoted-string
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-011\013\014\016'
r'-\177])*"'
# domain
r')@(?:[A-Z0-9](?:[A-Z0-9-]{0,2000}[A-Z0-9])?\.)+[A-Z]{2,63}\.?$',
re.IGNORECASE
)
def _mock(self, context=None):
return fill_template('%s@example.com', self.min_length,
self.max_length)
def validate_email(self, value):
if not EmailType.EMAIL_REGEX.match(value):
raise StopValidation(self.messages['email'])
class NumberType(BaseType):
"""A number field.
"""
MESSAGES = {
'number_coerce': u"Value '{0}' is not {1}.",
'number_min': u"{0} value should be greater than {1}.",
'number_max': u"{0} value should be less than {1}.",
}
def __init__(self, number_class, number_type,
min_value=None, max_value=None, **kwargs):
self.number_class = number_class
self.number_type = number_type
self.min_value = min_value
self.max_value = max_value
super(NumberType, self).__init__(**kwargs)
def _mock(self, context=None):
return get_value_in(self.min_value, self.max_value)
def to_native(self, value, context=None):
try:
value = self.number_class(value)
except (TypeError, ValueError):
raise ConversionError(self.messages['number_coerce']
.format(value, self.number_type.lower()))
return value
def validate_is_a_number(self, value):
try:
self.number_class(value)
except (TypeError, ValueError):
raise ConversionError(self.messages['number_coerce']
.format(value, self.number_type.lower()))
def validate_range(self, value):
if self.min_value is not None and value < self.min_value:
raise ValidationError(self.messages['number_min']
.format(self.number_type, self.min_value))
if self.max_value is not None and value > self.max_value:
raise ValidationError(self.messages['number_max']
.format(self.number_type, self.max_value))
return value
class IntType(NumberType):
"""A field that validates input as an Integer
"""
def __init__(self, *args, **kwargs):
super(IntType, self).__init__(number_class=int,
number_type='Int',
*args, **kwargs)
class LongType(NumberType):
"""A field that validates input as a Long
"""
def __init__(self, *args, **kwargs):
try:
number_class = long #PY2
except NameError:
number_class = int #PY3
super(LongType, self).__init__(number_class=number_class,
number_type='Long',
*args, **kwargs)
class FloatType(NumberType):
"""A field that validates input as a Float
"""
def __init__(self, *args, **kwargs):
super(FloatType, self).__init__(number_class=float,
number_type='Float',
*args, **kwargs)
class DecimalType(BaseType):
"""A fixed-point decimal number field.
"""
MESSAGES = {
'number_coerce': u"Number '{0}' failed to convert to a decimal.",
'number_min': u"Value should be greater than {0}.",
'number_max': u"Value should be less than {0}.",
}
def __init__(self, min_value=None, max_value=None, **kwargs):
self.min_value, self.max_value = min_value, max_value
super(DecimalType, self).__init__(**kwargs)
def _mock(self, context=None):
return get_value_in(self.min_value, self.max_value)
def to_primitive(self, value, context=None):
return unicode(value)
def to_native(self, value, context=None):
if not isinstance(value, decimal.Decimal):
if not isinstance(value, basestring):
value = unicode(value)
try:
value = decimal.Decimal(value)
except (TypeError, decimal.InvalidOperation):
raise ConversionError(self.messages['number_coerce'].format(value))
return value
def validate_range(self, value):
if self.min_value is not None and value < self.min_value:
error_msg = self.messages['number_min'].format(self.min_value)
raise ValidationError(error_msg)
if self.max_value is not None and value > self.max_value:
error_msg = self.messages['number_max'].format(self.max_value)
raise ValidationError(error_msg)
return value
class HashType(BaseType):
MESSAGES = {
'hash_length': u"Hash value is wrong length.",
'hash_hex': u"Hash value is not hexadecimal.",
}
def _mock(self, context=None):
return random_string(self.LENGTH, string.hexdigits)
def to_native(self, value, context=None):
if len(value) != self.LENGTH:
raise ValidationError(self.messages['hash_length'])
try:
int(value, 16)
except ValueError:
raise ConversionError(self.messages['hash_hex'])
return value
class MD5Type(HashType):
"""A field that validates input as resembling an MD5 hash.
"""
LENGTH = 32
class SHA1Type(HashType):
"""A field that validates input as resembling an SHA1 hash.
"""
LENGTH = 40
class BooleanType(BaseType):
"""A boolean field type. In addition to ``True`` and ``False``, coerces these
values:
+ For ``True``: "True", "true", "1"
+ For ``False``: "False", "false", "0"
"""
TRUE_VALUES = ('True', 'true', '1')
FALSE_VALUES = ('False', 'false', '0')
def _mock(self, context=None):
return random.choice([True, False])
def to_native(self, value, context=None):
if isinstance(value, basestring):
if value in self.TRUE_VALUES:
value = True
elif value in self.FALSE_VALUES:
value = False
if isinstance(value, int) and value in [0, 1]:
value = bool(value)
if not isinstance(value, bool):
raise ConversionError(u"Must be either true or false.")
return value
class DateType(BaseType):
"""Defaults to converting to and from ISO8601 date values.
"""
SERIALIZED_FORMAT = '%Y-%m-%d'
MESSAGES = {
'parse': u"Could not parse {0}. Should be ISO8601 (YYYY-MM-DD).",
}
def __init__(self, **kwargs):
self.serialized_format = self.SERIALIZED_FORMAT
super(DateType, self).__init__(**kwargs)
def _mock(self, context=None):
return datetime.datetime(
year=random.randrange(600) + 1900,
month=random.randrange(12) + 1,
day=random.randrange(28) + 1,
)
def to_native(self, value, context=None):
if isinstance(value, datetime.date):
return value
try:
return datetime.datetime.strptime(value, self.serialized_format).date()
except (ValueError, TypeError):
raise ConversionError(self.messages['parse'].format(value))
def to_primitive(self, value, context=None):
return value.strftime(self.serialized_format)
class DateTimeType(BaseType):
"""Defaults to converting to and from ISO8601 datetime values.
:param formats:
A value or list of values suitable for ``datetime.datetime.strptime``
parsing. Default: `('%Y-%m-%dT%H:%M:%S.%f', '%Y-%m-%dT%H:%M:%S',
'%Y-%m-%dT%H:%M:%S.%fZ', '%Y-%m-%dT%H:%M:%SZ')`
:param serialized_format:
The output format suitable for Python ``strftime``. Default: ``'%Y-%m-%dT%H:%M:%S.%f'``
"""
DEFAULT_FORMATS = (
'%Y-%m-%dT%H:%M:%S.%f', '%Y-%m-%dT%H:%M:%S',
'%Y-%m-%dT%H:%M:%S.%fZ', '%Y-%m-%dT%H:%M:%SZ',
)
SERIALIZED_FORMAT = '%Y-%m-%dT%H:%M:%S.%f'
MESSAGES = {
'parse_formats': u'Could not parse {0}. Valid formats: {1}',
'parse': u"Could not parse {0}. Should be ISO8601.",
}
def __init__(self, formats=None, serialized_format=None, **kwargs):
"""
"""
if isinstance(formats, basestring):
formats = [formats]
if formats is None:
formats = self.DEFAULT_FORMATS
if serialized_format is None:
serialized_format = self.SERIALIZED_FORMAT
self.formats = formats
self.serialized_format = serialized_format
super(DateTimeType, self).__init__(**kwargs)
def _mock(self, context=None):
return datetime.datetime(
year=random.randrange(600) + 1900,
month=random.randrange(12) + 1,
day=random.randrange(28) + 1,
hour=random.randrange(24),
minute=random.randrange(60),
second=random.randrange(60),
microsecond=random.randrange(1000000),
)
def to_native(self, value, context=None):
if isinstance(value, datetime.datetime):
return value
for fmt in self.formats:
try:
return datetime.datetime.strptime(value, fmt)
except (ValueError, TypeError):
continue
if self.formats == self.DEFAULT_FORMATS:
message = self.messages['parse'].format(value)
else:
message = self.messages['parse_formats'].format(
value, ", ".join(self.formats)
)
raise ConversionError(message)
def to_primitive(self, value, context=None):
if callable(self.serialized_format):
return self.serialized_format(value)
return value.strftime(self.serialized_format)
class GeoPointType(BaseType):
"""A list storing a latitude and longitude.
"""
MESSAGES = {
'point_min': u"{0} value {1} should be greater than {2}.",
'point_max': u"{0} value {1} should be less than {2}."
}
def _mock(self, context=None):
return (random.randrange(-90, 90), random.randrange(-180, 180))
def to_native(self, value, context=None):
"""Make sure that a geo-value is of type (x, y)
"""
if not len(value) == 2:
raise ValueError('Value must be a two-dimensional point')
if isinstance(value, dict):
for val in value.values():
if not isinstance(val, (float, int)):
raise ValueError('Both values in point must be float or int')
elif isinstance(value, (list, tuple)):
if (not isinstance(value[0], (float, int)) or
not isinstance(value[1], (float, int))):
raise ValueError('Both values in point must be float or int')
else:
raise ValueError('GeoPointType can only accept tuples, lists, or dicts')
return value
def validate_range(self, value):
latitude, longitude = value
if latitude < -90:
raise ValidationError(
self.messages['point_min'].format('Latitude', latitude, '-90')
)
if latitude > 90:
raise ValidationError(
self.messages['point_max'].format('Latitude', latitude, '90')
)
if longitude < -180:
raise ValidationError(
self.messages['point_min'].format('Longitude', longitude, -180)
)
if longitude > 180:
raise ValidationError(
self.messages['point_max'].format('Longitude', longitude, 180)
)
class MultilingualStringType(BaseType):
"""
A multilanguage string field, stored as a dict with {'locale': 'localized_value'}.
Minimum and maximum lengths apply to each of the localized values.
At least one of ``default_locale`` or ``context['locale']`` must be defined
when calling ``.to_primitive``.
"""
allow_casts = (int, str)
MESSAGES = {
'convert': u"Couldn't interpret value as string.",
'max_length': u"String value in locale {0} is too long.",
'min_length': u"String value in locale {0} is too short.",
'locale_not_found': u"No requested locale was available.",
'no_locale': u"No default or explicit locales were given.",
'regex_locale': u"Name of locale {0} did not match validation regex.",
'regex_localized': u"String value in locale {0} did not match validation regex.",
}
LOCALE_REGEX = r'^[a-z]{2}(:?_[A-Z]{2})?$'
def __init__(self, regex=None, max_length=None, min_length=None,
default_locale=None, locale_regex=LOCALE_REGEX, **kwargs):
self.regex = re.compile(regex) if regex else None
self.max_length = max_length
self.min_length = min_length
self.default_locale = default_locale
self.locale_regex = re.compile(locale_regex) if locale_regex else None
super(MultilingualStringType, self).__init__(**kwargs)
def _mock(self, context=None):
return random_string(get_value_in(self.min_length, self.max_length))
def to_native(self, value, context=None):
"""Make sure a MultilingualStringType value is a dict or None."""
if not (value is None or isinstance(value, dict)):
raise ValueError('Value must be a dict or None')
return value
def to_primitive(self, value, context=None):
"""
Use a combination of ``default_locale`` and ``context['locale']`` to return
the best localized string.
"""
if value is None:
return None
context_locale = None
if context is not None and 'locale' in context:
context_locale = context['locale']
# Build a list of all possible locales to try
possible_locales = []
for locale in (context_locale, self.default_locale):
if not locale:
continue
if isinstance(locale, basestring):
possible_locales.append(locale)
else:
possible_locales.extend(locale)
if not possible_locales:
raise ConversionError(self.messages['no_locale'])
for locale in possible_locales:
if locale in value:
localized = value[locale]
break
else:
raise ConversionError(self.messages['locale_not_found'])
if not isinstance(localized, unicode):
if isinstance(localized, self.allow_casts):
if not isinstance(localized, str):
localized = str(localized)
#localized = unicode(localized, 'utf-8')
localized = utf8_decode(localized)
else:
raise ConversionError(self.messages['convert'])
return localized
def validate_length(self, value):
for locale, localized in value.items():
len_of_value = len(localized) if localized else 0
if self.max_length is not None and len_of_value > self.max_length:
raise ValidationError(self.messages['max_length'].format(locale))
if self.min_length is not None and len_of_value < self.min_length:
raise ValidationError(self.messages['min_length'].format(locale))
def validate_regex(self, value):
if self.regex is None and self.locale_regex is None:
return
for locale, localized in value.items():
if self.regex is not None and self.regex.match(localized) is None:
raise ValidationError(
self.messages['regex_localized'].format(locale))
if self.locale_regex is not None and self.locale_regex.match(locale) is None:
raise ValidationError(
self.messages['regex_locale'].format(locale))
| |
#!/usr/bin/env python
import serial
import struct
import sys, getopt, time, glob#, traceback
from mtdef import MID, OutputMode, OutputSettings, MTException, Baudrates
################################################################
# MTDevice class
################################################################
## XSens MT device communication object.
class MTDevice(object):
"""XSens MT device communication object."""
def __init__(self, port, baudrate=115200, timeout=0.1, autoconf=True,
config_mode=False):
"""Open device."""
## serial interface to the device
self.device = serial.Serial(port, baudrate, timeout=timeout,
writeTimeout=timeout)
self.device.flushInput() # flush to make sure the port is ready TODO
self.device.flushOutput() # flush to make sure the port is ready TODO
## timeout for communication
self.timeout = timeout
if autoconf:
self.auto_config()
else:
## mode parameter of the IMU
self.mode = None
## settings parameter of the IMU
self.settings = None
## length of the MTData message
self.length = None
## header of the MTData message
self.header = None
if config_mode:
self.GoToConfig()
############################################################
# Low-level communication
############################################################
## Low-level message sending function.
def write_msg(self, mid, data=[]):
"""Low-level message sending function."""
length = len(data)
if length>254:
lendat = [0xFF, 0xFF&length, 0xFF&(length>>8)]
else:
lendat = [length]
packet = [0xFA, 0xFF, mid] + lendat + list(data)
packet.append(0xFF&(-(sum(packet[1:]))))
msg = struct.pack('%dB'%len(packet), *packet)
self.device.write(msg)
# print "MT: Write message id 0x%02X with %d data bytes: [%s]"%(mid,length,
# ' '.join("%02X"% v for v in data))
#self.device.flush() #TODO evaluate
## Low-level MTData receiving function.
# Take advantage of known message length.
def read_data_msg(self, buf=bytearray()):
"""Low-level MTData receiving function.
Take advantage of known message length."""
start = time.time()
if self.length>254:
totlength = 7 + self.length
else:
totlength = 5 + self.length
while (time.time()-start)<self.timeout:
while len(buf)<totlength:
buf.extend(self.device.read(totlength-len(buf)))
preamble_ind = buf.find(self.header)
if preamble_ind==-1: # not found
# discard unexploitable data
#sys.stderr.write("MT: discarding (no preamble).\n")
del buf[:-3]
continue
elif preamble_ind: # found but not at start
# discard leading bytes
#sys.stderr.write("MT: discarding (before preamble).\n")
del buf[:preamble_ind]
# complete message for checksum
while len(buf)<totlength:
buf.extend(self.device.read(totlength-len(buf)))
if 0xFF&sum(buf[1:]):
#sys.stderr.write("MT: invalid checksum; discarding data and "\
# "waiting for next message.\n")
del buf[:buf.find(self.header)-2]
continue
data = str(buf[-self.length-1:-1])
del buf[:]
return data
else:
raise MTException("could not find MTData message.")
## Low-level message receiving function.
def read_msg(self):
"""Low-level message receiving function."""
start = time.time()
while (time.time()-start)<self.timeout:
# read first char of preamble
c = self.device.read()
if not c:
raise MTException("timeout waiting for message.")
if ord(c)<>0xFA:
continue
# second part of preamble
if ord(self.device.read())<>0xFF: # we assume no timeout anymore
continue
# read message id and length of message
mid, length = struct.unpack('!BB', self.device.read(2))
if length==255: # extended length
length, = struct.unpack('!H', self.device.read(2))
# read contents and checksum
buf = self.device.read(length+1)
while (len(buf)<length+1) and ((time.time()-start)<self.timeout):
buf+= self.device.read(length+1-len(buf))
if (len(buf)<length+1):
continue
checksum = ord(buf[-1])
data = struct.unpack('!%dB'%length, buf[:-1])
if mid == MID.Error:
sys.stderr.write("MT error 0x%02X: %s."%(data[0],
MID.ErrorCodes[data[0]]))
# print "MT: Got message id 0x%02X with %d data bytes: [%s]"%(mid,length,
# ' '.join("%02X"% v for v in data))
if 0xFF&sum(data, 0xFF+mid+length+checksum):
sys.stderr.write("invalid checksum; discarding data and "\
"waiting for next message.\n")
continue
return (mid, buf[:-1])
else:
raise MTException("could not find message.")
## Send a message and read confirmation
def write_ack(self, mid, data=[]):
"""Send a message a read confirmation."""
self.write_msg(mid, data)
for tries in range(10):
mid_ack, data_ack = self.read_msg()
if mid_ack==(mid+1):
break
else:
raise MTException("Ack (0x%X) expected, MID 0x%X received instead"\
" (after 10 tries)."%(mid+1, mid_ack))
return data_ack
############################################################
# High-level functions
############################################################
## Reset MT device.
def Reset(self):
"""Reset MT device."""
self.write_ack(MID.Reset)
## Place MT device in configuration mode.
def GoToConfig(self):
"""Place MT device in configuration mode."""
self.write_ack(MID.GoToConfig)
## Place MT device in measurement mode.
def GoToMeasurement(self):
"""Place MT device in measurement mode."""
self.write_ack(MID.GoToMeasurement)
## Restore MT device configuration to factory defaults (soft version).
def RestoreFactoryDefaults(self):
"""Restore MT device configuration to factory defaults (soft version).
"""
self.GoToConfig()
self.write_ack(MID.RestoreFactoryDef)
## Get current output mode.
# Assume the device is in Config state.
def GetOutputMode(self):
"""Get current output mode.
Assume the device is in Config state."""
data = self.write_ack(MID.SetOutputMode)
self.mode, = struct.unpack('!H', data)
return self.mode
## Select which information to output.
# Assume the device is in Config state.
def SetOutputMode(self, mode):
"""Select which information to output.
Assume the device is in Config state."""
H, L = (mode&0xFF00)>>8, mode&0x00FF
self.write_ack(MID.SetOutputMode, (H, L))
## Get current output mode.
# Assume the device is in Config state.
def GetOutputSettings(self):
"""Get current output mode.
Assume the device is in Config state."""
data = self.write_ack(MID.SetOutputSettings)
self.settings, = struct.unpack('!I', data)
return self.settings
## Select how to output the information.
# Assume the device is in Config state.
def SetOutputSettings(self, settings):
"""Select how to output the information.
Assume the device is in Config state."""
HH, HL = (settings&0xFF000000)>>24, (settings&0x00FF0000)>>16
LH, LL = (settings&0x0000FF00)>>8, settings&0x000000FF
self.write_ack(MID.SetOutputSettings, (HH, HL, LH, LL))
## Set the period of sampling.
# Assume the device is in Config state.
def SetPeriod(self, period):
"""Set the period of sampling.
Assume the device is in Config state."""
H, L = (period&0xFF00)>>8, period&0x00FF
self.write_ack(MID.SetPeriod, (H, L))
## Set the output skip factor.
# Assume the device is in Config state.
def SetOutputSkipFactor(self, skipfactor):
"""Set the output skip factor.
Assume the device is in Config state."""
H, L = (skipfactor&0xFF00)>>8, skipfactor&0x00FF
self.write_ack(MID.SetOutputSkipFactor, (H, L))
## Get data length.
# Assume the device is in Config state.
def ReqDataLength(self):
"""Get data length.
Assume the device is in Config state."""
data = self.write_ack(MID.ReqDataLength)
self.length, = struct.unpack('!H', data)
self.header = '\xFA\xFF\x32'+chr(self.length)
return self.length
## Ask for the current configuration of the MT device.
# Assume the device is in Config state.
def ReqConfiguration(self):
"""Ask for the current configuration of the MT device.
Assume the device is in Config state."""
config = self.write_ack(MID.ReqConfiguration)
try:
masterID, period, skipfactor, _, _, _, date, time, num, deviceID,\
length, mode, settings =\
struct.unpack('!IHHHHI8s8s32x32xHIHHI8x', config)
except struct.error:
raise MTException("could not parse configuration.")
self.mode = mode
self.settings = settings
self.length = length
self.header = '\xFA\xFF\x32'+chr(length)
conf = {'output-mode': mode,
'output-settings': settings,
'length': length,
'period': period,
'skipfactor': skipfactor,
'Master device ID': masterID,
'date': date,
'time': time,
'number of devices': num,
'device ID': deviceID}
return conf
## Set the baudrate of the device using the baudrate id.
# Assume the device is in Config state.
def SetBaudrate(self, brid):
"""Set the baudrate of the device using the baudrate id.
Assume the device is in Config state."""
self.write_ack(MID.SetBaudrate, (brid,))
## Request the available XKF scenarios on the device.
# Assume the device is in Config state.
def ReqAvailableScenarios(self):
"""Request the available XKF scenarios on the device.
Assume the device is in Config state."""
scenarios_dat = self.write_ack(MID.ReqAvailableScenarios)
scenarios = []
try:
for i in range(len(scenarios_dat)/22):
scenario_type, version, label =\
struct.unpack('!BB20s', scenarios_dat[22*i:22*(i+1)])
scenarios.append((scenario_type, version, label.strip()))
## available XKF scenarios
self.scenarios = scenarios
except struct.error:
raise MTException("could not parse the available XKF scenarios.")
return scenarios
## Request the ID of the currently used XKF scenario.
# Assume the device is in Config state.
def ReqCurrentScenario(self):
"""Request the ID of the currently used XKF scenario.
Assume the device is in Config state."""
data = self.write_ack(MID.ReqCurrentScenario)
## current XKF id
self.scenario_id, = struct.unpack('!H', data)
try:
scenarios = self.scenarios
except AttributeError:
scenarios = self.ReqAvailableScenarios()
for t, _, label in scenarios:
if t==self.scenario_id:
## current XKF label
self.scenario_label = label
break
else:
self.scenario_label = ""
return self.scenario_id, self.scenario_label
## Sets the XKF scenario to use.
# Assume the device is in Config state.
def SetCurrentScenario(self, scenario_id):
"""Sets the XKF scenario to use.
Assume the device is in Config state."""
self.write_ack(MID.SetCurrentScenario, (0x00, scenario_id&0xFF))
############################################################
# High-level utility functions
############################################################
## Configure the mode and settings of the MT device.
def configure(self, mode, settings, period=None, skipfactor=None):
"""Configure the mode and settings of the MT device."""
self.GoToConfig()
self.SetOutputMode(mode)
self.SetOutputSettings(settings)
if period is not None:
self.SetPeriod(period)
if skipfactor is not None:
self.SetOutputSkipFactor(skipfactor)
self.GetOutputMode()
self.GetOutputSettings()
self.ReqDataLength()
self.GoToMeasurement()
## Read configuration from device.
def auto_config(self):
"""Read configuration from device."""
self.GoToConfig()
mode = self.GetOutputMode()
settings = self.GetOutputSettings()
length = self.ReqDataLength()
self.GoToMeasurement()
return mode, settings, length
## Read and parse a measurement packet
def read_measurement(self, mode=None, settings=None):
"""Read and parse a measurement packet."""
# getting mode
if mode is None:
mode = self.mode
if settings is None:
settings = self.settings
# getting data
data = self.read_data_msg()
#_, data = self.read_msg()
# data object
output = {}
try:
# raw IMU first
if mode & OutputMode.RAW:
o = {}
o['accX'], o['accY'], o['accZ'], o['gyrX'], o['gyrY'], o['gyrZ'],\
o['magX'], o['magY'], o['magZ'], o['temp'] =\
struct.unpack('!10H', data[:20])
data = data[20:]
output['RAW'] = o
# raw GPS second
if mode & OutputMode.RAWGPS:
o = {}
o['Press'], o['bPrs'], o['ITOW'], o['LAT'], o['LON'], o['ALT'],\
o['VEL_N'], o['VEL_E'], o['VEL_D'], o['Hacc'], o['Vacc'],\
o['Sacc'], o['bGPS'] = struct.unpack('!HBI6i3IB', data[:44])
data = data[44:]
output['RAWGPS'] = o
# temperature
if mode & OutputMode.Temp:
temp, = struct.unpack('!f', data[:4])
data = data[4:]
output['Temp'] = temp
# calibrated data
if mode & OutputMode.Calib:
o = {}
if not (settings&OutputSettings.CalibMode_GyrMag):
o['accX'], o['accY'], o['accZ'] = struct.unpack('!3f',\
data[:12])
data = data[12:]
if not (settings&OutputSettings.CalibMode_AccMag):
o['gyrX'], o['gyrY'], o['gyrZ'] = struct.unpack('!3f',\
data[:12])
data = data[12:]
if not (settings&OutputSettings.CalibMode_AccGyr):
o['magX'], o['magY'], o['magZ'] = struct.unpack('!3f',\
data[:12])
data = data[12:]
output['Calib'] = o
# orientation
if mode & OutputMode.Orient:
o = {}
if settings & OutputSettings.OrientMode_Euler:
o['roll'], o['pitch'], o['yaw'] = struct.unpack('!3f', data[:12])
data = data[12:]
elif settings & OutputSettings.OrientMode_Matrix:
a, b, c, d, e, f, g, h, i = struct.unpack('!9f', data[:36])
data = data[36:]
o['matrix'] = ((a, b, c), (d, e, f), (g, h, i))
else: # OutputSettings.OrientMode_Quaternion:
q0, q1, q2, q3 = struct.unpack('!4f', data[:16])
data = data[16:]
o['quaternion'] = (q0, q1, q2, q3)
output['Orient'] = o
# auxiliary
if mode & OutputMode.Auxiliary:
o = {}
if not (settings&OutputSettings.AuxiliaryMode_NoAIN1):
o['Ain_1'], = struct.unpack('!H', data[:2])
data = data[2:]
if not (settings&OutputSettings.AuxiliaryMode_NoAIN2):
o['Ain_2'], = struct.unpack('!H', data[:2])
data = data[2:]
output['Auxiliary'] = o
# position
if mode & OutputMode.Position:
o = {}
o['Lat'], o['Lon'], o['Alt'] = struct.unpack('!3f', data[:12])
data = data[12:]
output['Position'] = o
# velocity
if mode & OutputMode.Velocity:
o = {}
o['Vel_X'], o['Vel_Y'], o['Vel_Z'] = struct.unpack('!3f', data[:12])
data = data[12:]
output['Velocity'] = o
# status
if mode & OutputMode.Status:
status, = struct.unpack('!B', data[:1])
data = data[1:]
output['Status'] = status
# sample counter
if settings & OutputSettings.Timestamp_SampleCnt:
TS, = struct.unpack('!H', data[:2])
data = data[2:]
output['Sample'] = TS
except struct.error, e:
raise MTException("could not parse MTData message.")
if data <> '':
raise MTException("could not parse MTData message (too long).")
return output
## Change the baudrate, reset the device and reopen communication.
def ChangeBaudrate(self, baudrate):
"""Change the baudrate, reset the device and reopen communication."""
self.GoToConfig()
brid = Baudrates.get_BRID(baudrate)
self.SetBaudrate(brid)
self.Reset()
#self.device.flush()
self.device.baudrate=baudrate
#self.device.flush()
time.sleep(0.01)
self.read_msg()
self.write_msg(0x3f)
################################################################
# Auto detect port
################################################################
def find_devices():
mtdev_list = []
for port in glob.glob("/dev/tty*S*"):
try:
br = find_baudrate(port)
if br:
mtdev_list.append((port, br))
except MTException:
pass
return mtdev_list
################################################################
# Auto detect baudrate
################################################################
def find_baudrate(port):
baudrates = (115200, 460800, 921600, 230400, 57600, 38400, 19200, 9600)
for br in baudrates:
try:
mt = MTDevice(port, br)
except serial.SerialException:
raise MTException("unable to open %s"%port)
try:
mt.GoToConfig()
mt.GoToMeasurement()
return br
except MTException:
pass
################################################################
# Documentation for stand alone usage
################################################################
def usage():
print """MT device driver.
Usage:
./mtdevice.py [commands] [opts]
Commands:
-h, --help
Print this help and quit.
-r, --reset
Reset device to factory defaults.
-a, --change-baudrate=NEW_BAUD
Change baudrate from BAUD (see below) to NEW_BAUD.
-c, --configure
Configure the device (needs MODE and SETTINGS arguments below).
-e, --echo
Print MTData. It is the default if no other command is supplied.
-i, --inspect
Print current MT device configuration.
-x, --xkf-scenario=ID
Change the current XKF scenario.
Options:
-d, --device=DEV
Serial interface of the device (default: /dev/ttyUSB0). If 'auto', then
all serial ports are tested at all baudrates and the first
suitable device is used.
-b, --baudrate=BAUD
Baudrate of serial interface (default: 115200). If 0, then all
rates are tried until a suitable one is found.
-m, --output-mode=MODE
Mode of the device selecting the information to output.
This is required for 'configure' command. If it is not present
in 'echo' command, the configuration will be read from the
device.
MODE can be either the mode value in hexadecimal, decimal or
binary form, or a string composed of the following characters
(in any order):
t temperature, [0x0001]
c calibrated data, [0x0002]
o orientation data, [0x0004]
a auxiliary data, [0x0008]
p position data (requires MTi-G), [0x0010]
v velocity data (requires MTi-G), [0x0020]
s status data, [0x0800]
g raw GPS mode (requires MTi-G), [0x1000]
r raw (incompatible with others except raw GPS),
[0x4000]
For example, use "--output-mode=so" to have status and
orientation data.
-s, --output-settings=SETTINGS
Settings of the device.
This is required for 'configure' command. If it is not present
in 'echo' command, the configuration will be read from the
device.
SETTINGS can be either the settings value in hexadecimal,
decimal or binary form, or a string composed of the following
characters (in any order):
t sample count (excludes 'n')
n no sample count (excludes 't')
q orientation in quaternion (excludes 'e' and 'm')
e orientation in Euler angles (excludes 'm' and
'q')
m orientation in matrix (excludes 'q' and 'e')
A acceleration in calibrated data
G rate of turn in calibrated data
M magnetic field in calibrated data
i only analog input 1 (excludes 'j')
j only analog input 2 (excludes 'i')
N North-East-Down instead of default: X North Z up
For example, use "--output-settings=tqMAG" for all calibrated
data, sample counter and orientation in quaternion.
-p, --period=PERIOD
Sampling period in (1/115200) seconds (default: 1152).
Minimum is 225 (1.95 ms, 512 Hz), maximum is 1152
(10.0 ms, 100 Hz).
Note that it is the period at which sampling occurs, not the
period at which messages are sent (see below).
-f, --skip-factor=SKIPFACTOR
Number of samples to skip before sending MTData message
(default: 0).
The frequency at which MTData message is send is:
115200/(PERIOD * (SKIPFACTOR + 1))
If the value is 0xffff, no data is send unless a ReqData request
is made.
"""
################################################################
# Main function
################################################################
def main():
# parse command line
shopts = 'hra:ceid:b:m:s:p:f:x:'
lopts = ['help', 'reset', 'change-baudrate=', 'configure', 'echo',
'inspect', 'device=', 'baudrate=', 'output-mode=',
'output-settings=', 'period=', 'skip-factor=', 'xkf-scenario=']
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], shopts, lopts)
except getopt.GetoptError, e:
print e
usage()
return 1
# default values
device = '/dev/ttyUSB0'
baudrate = 115200
mode = None
settings = None
period = None
skipfactor = None
new_baudrate = None
new_xkf = None
actions = []
# filling in arguments
for o, a in opts:
if o in ('-h', '--help'):
usage()
return
if o in ('-r', '--reset'):
actions.append('reset')
if o in ('-a', '--change-baudrate'):
try:
new_baudrate = int(a)
except ValueError:
print "change-baudrate argument must be integer."
return 1
actions.append('change-baudrate')
if o in ('-c', '--configure'):
actions.append('configure')
if o in ('-e', '--echo'):
actions.append('echo')
if o in ('-i', '--inspect'):
actions.append('inspect')
if o in ('-x', '--xkf-scenario'):
try:
new_xkf = int(a)
except ValueError:
print "xkf-scenario argument must be integer."
return 1
actions.append('xkf-scenario')
if o in ('-d', '--device'):
device = a
if o in ('-b', '--baudrate'):
try:
baudrate = int(a)
except ValueError:
print "baudrate argument must be integer."
return 1
if o in ('-m', '--output-mode'):
mode = get_mode(a)
if mode is None:
return 1
if o in ('-s', '--output-settings'):
settings = get_settings(a)
if settings is None:
return 1
if o in ('-p', '--period'):
try:
period = int(a)
except ValueError:
print "period argument must be integer."
return 1
if o in ('-f', '--skip-factor'):
try:
skipfactor = int(a)
except ValueError:
print "skip-factor argument must be integer."
return 1
# if nothing else: echo
if len(actions) == 0:
actions.append('echo')
try:
if device=='auto':
devs = find_devices()
if devs:
print "Detected devices:","".join('\n\t%s @ %d'%(d,p) for d,p in
devs)
print "Using %s @ %d"%devs[0]
device, baudrate = devs[0]
else:
print "No suitable device found."
return 1
# find baudrate
if not baudrate:
baudrate = find_baudrate(device)
if not baudrate:
print "No suitable baudrate found."
return 1
# open device
try:
mt = MTDevice(device, baudrate)
except serial.SerialException:
raise MTException("unable to open %s"%device)
# execute actions
if 'inspect' in actions:
mt.GoToConfig()
print "Device: %s at %d Bd:"%(device, baudrate)
print "General configuration:", mt.ReqConfiguration()
print "Available scenarios:", mt.ReqAvailableScenarios()
print "Current scenario: %s (id: %d)"%mt.ReqCurrentScenario()[::-1]
mt.GoToMeasurement()
if 'change-baudrate' in actions:
print "Changing baudrate from %d to %d:"%(baudrate, new_baudrate),
sys.stdout.flush()
mt.ChangeBaudrate(new_baudrate)
print " Ok" # should we test it was actually ok?
if 'reset' in actions:
print "Restoring factory defaults",
sys.stdout.flush()
mt.RestoreFactoryDefaults()
print " Ok" # should we test it was actually ok?
if 'configure' in actions:
if mode is None:
print "output-mode is require to configure the device."
return 1
if settings is None:
print "output-settings is required to configure the device."
return 1
print "Configuring mode and settings",
sys.stdout.flush()
mt.configure(mode, settings, period, skipfactor)
print " Ok" # should we test it was actually ok?
if 'xkf-scenario' in actions:
print "Changing XKF scenario",
sys.stdout.flush()
mt.GoToConfig()
mt.SetCurrentScenario(new_xkf)
mt.GoToMeasurement()
print "Ok"
if 'echo' in actions:
# if (mode is None) or (settings is None):
# mode, settings, length = mt.auto_config()
# print mode, settings, length
try:
while True:
print mt.read_measurement(mode, settings)
except KeyboardInterrupt:
pass
except MTException as e:
#traceback.print_tb(sys.exc_info()[2])
print e
def get_mode(arg):
"""Parse command line output-mode argument."""
try: # decimal
mode = int(arg)
return mode
except ValueError:
pass
if arg[0]=='0':
try: # binary
mode = int(arg, 2)
return mode
except ValueError:
pass
try: # hexadecimal
mode = int(arg, 16)
return mode
except ValueError:
pass
# string mode specification
mode = 0
for c in arg:
if c=='t':
mode |= OutputMode.Temp
elif c=='c':
mode |= OutputMode.Calib
elif c=='o':
mode |= OutputMode.Orient
elif c=='a':
mode |= OutputMode.Auxiliary
elif c=='p':
mode |= OutputMode.Position
elif c=='v':
mode |= OutputMode.Velocity
elif c=='s':
mode |= OutputMode.Status
elif c=='g':
mode |= OutputMode.RAWGPS
elif c=='r':
mode |= OutputMode.RAW
else:
print "Unknown output-mode specifier: '%s'"%c
return
return mode
def get_settings(arg):
"""Parse command line output-settings argument."""
try: # decimal
settings = int(arg)
return settings
except ValueError:
pass
if arg[0]=='0':
try: # binary
settings = int(arg, 2)
return settings
except ValueError:
pass
try: # hexadecimal
settings = int(arg, 16)
return settings
except ValueError:
pass
# strings settings specification
timestamp = 0
orient_mode = 0
calib_mode = OutputSettings.CalibMode_Mask
NED = 0
for c in arg:
if c=='t':
timestamp = OutputSettings.Timestamp_SampleCnt
elif c=='n':
timestamp = OutputSettings.Timestamp_None
elif c=='q':
orient_mode = OutputSettings.OrientMode_Quaternion
elif c=='e':
orient_mode = OutputSettings.OrientMode_Euler
elif c=='m':
orient_mode = OutputSettings.OrientMode_Matrix
elif c=='A':
calib_mode &= OutputSettings.CalibMode_Acc
elif c=='G':
calib_mode &= OutputSettings.CalibMode_Gyr
elif c=='M':
calib_mode &= OutputSettings.CalibMode_Mag
elif c=='i':
calib_mode &= OutputSettings.AuxiliaryMode_NoAIN2
elif c=='j':
calib_mode &= OutputSettings.AuxiliaryMode_NoAIN1
elif c=='N':
NED = OutputSettings.Coordinates_NED
else:
print "Unknown output-settings specifier: '%s'"%c
return
settings = timestamp|orient_mode|calib_mode|NED
return settings
if __name__=='__main__':
main()
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=too-many-lines
"""
This module contains a Google Cloud SQL Hook.
"""
import errno
import json
import os
import os.path
import platform
import random
import re
import shutil
import socket
import string
import subprocess
import time
import uuid
from subprocess import PIPE, Popen
from typing import Any, Dict, List, Optional, Union
from urllib.parse import quote_plus
import requests
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
from sqlalchemy.orm import Session
from airflow.exceptions import AirflowException
# Number of retries - used by googleapiclient method calls to perform retries
# For requests that are "retriable"
from airflow.hooks.base_hook import BaseHook
from airflow.models import Connection
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
from airflow.providers.mysql.hooks.mysql import MySqlHook
from airflow.providers.postgres.hooks.postgres import PostgresHook
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.session import provide_session
UNIX_PATH_MAX = 108
# Time to sleep between active checks of the operation results
TIME_TO_SLEEP_IN_SECONDS = 20
class CloudSqlOperationStatus:
"""
Helper class with operation statuses.
"""
PENDING = "PENDING"
RUNNING = "RUNNING"
DONE = "DONE"
UNKNOWN = "UNKNOWN"
# noinspection PyAbstractClass
class CloudSQLHook(GoogleBaseHook):
"""
Hook for Google Cloud SQL APIs.
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
"""
def __init__(
self,
api_version: str,
gcp_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None
) -> None:
super().__init__(gcp_conn_id, delegate_to)
self.api_version = api_version
self._conn = None
def get_conn(self):
"""
Retrieves connection to Cloud SQL.
:return: Google Cloud SQL services object.
:rtype: dict
"""
if not self._conn:
http_authorized = self._authorize()
self._conn = build('sqladmin', self.api_version,
http=http_authorized, cache_discovery=False)
return self._conn
@GoogleBaseHook.fallback_to_default_project_id
def get_instance(self, instance: str, project_id: str) -> Dict:
"""
Retrieves a resource containing information about a Cloud SQL instance.
:param instance: Database instance ID. This does not include the project ID.
:type instance: str
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:return: A Cloud SQL instance resource.
:rtype: dict
"""
return self.get_conn().instances().get( # noqa # pylint: disable=no-member
project=project_id,
instance=instance
).execute(num_retries=self.num_retries)
@GoogleBaseHook.fallback_to_default_project_id
@GoogleBaseHook.operation_in_progress_retry()
def create_instance(self, body: Dict, project_id: str) -> None:
"""
Creates a new Cloud SQL instance.
:param body: Body required by the Cloud SQL insert API, as described in
https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/instances/insert#request-body.
:type body: dict
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:return: None
"""
response = self.get_conn().instances().insert( # noqa # pylint: disable=no-member
project=project_id,
body=body
).execute(num_retries=self.num_retries)
operation_name = response["name"]
self._wait_for_operation_to_complete(project_id=project_id, # type:ignore
operation_name=operation_name)
@GoogleBaseHook.fallback_to_default_project_id
@GoogleBaseHook.operation_in_progress_retry()
def patch_instance(self, body: Dict, instance: str, project_id: str) -> None:
"""
Updates settings of a Cloud SQL instance.
Caution: This is not a partial update, so you must include values for
all the settings that you want to retain.
:param body: Body required by the Cloud SQL patch API, as described in
https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/instances/patch#request-body.
:type body: dict
:param instance: Cloud SQL instance ID. This does not include the project ID.
:type instance: str
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:return: None
"""
response = self.get_conn().instances().patch( # noqa # pylint: disable=no-member
project=project_id,
instance=instance,
body=body
).execute(num_retries=self.num_retries)
operation_name = response["name"]
self._wait_for_operation_to_complete(project_id=project_id, # type:ignore
operation_name=operation_name)
@GoogleBaseHook.fallback_to_default_project_id
@GoogleBaseHook.operation_in_progress_retry()
def delete_instance(self, instance: str, project_id: str) -> None:
"""
Deletes a Cloud SQL instance.
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param instance: Cloud SQL instance ID. This does not include the project ID.
:type instance: str
:return: None
"""
response = self.get_conn().instances().delete( # noqa # pylint: disable=no-member
project=project_id,
instance=instance,
).execute(num_retries=self.num_retries)
operation_name = response["name"]
self._wait_for_operation_to_complete(project_id=project_id, # type:ignore
operation_name=operation_name)
@GoogleBaseHook.fallback_to_default_project_id
def get_database(self, instance: str, database: str, project_id: str) -> Dict:
"""
Retrieves a database resource from a Cloud SQL instance.
:param instance: Database instance ID. This does not include the project ID.
:type instance: str
:param database: Name of the database in the instance.
:type database: str
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:return: A Cloud SQL database resource, as described in
https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/databases#resource.
:rtype: dict
"""
return self.get_conn().databases().get( # noqa # pylint: disable=no-member
project=project_id,
instance=instance,
database=database
).execute(num_retries=self.num_retries)
@GoogleBaseHook.fallback_to_default_project_id
@GoogleBaseHook.operation_in_progress_retry()
def create_database(self, instance: str, body: Dict, project_id: str) -> None:
"""
Creates a new database inside a Cloud SQL instance.
:param instance: Database instance ID. This does not include the project ID.
:type instance: str
:param body: The request body, as described in
https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/databases/insert#request-body.
:type body: dict
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:return: None
"""
response = self.get_conn().databases().insert( # noqa # pylint: disable=no-member
project=project_id,
instance=instance,
body=body
).execute(num_retries=self.num_retries)
operation_name = response["name"]
self._wait_for_operation_to_complete(project_id=project_id, # type:ignore
operation_name=operation_name)
@GoogleBaseHook.fallback_to_default_project_id
@GoogleBaseHook.operation_in_progress_retry()
def patch_database(
self,
instance: str,
database: str,
body: Dict,
project_id: str,
) -> None:
"""
Updates a database resource inside a Cloud SQL instance.
This method supports patch semantics.
See https://cloud.google.com/sql/docs/mysql/admin-api/how-tos/performance#patch.
:param instance: Database instance ID. This does not include the project ID.
:type instance: str
:param database: Name of the database to be updated in the instance.
:type database: str
:param body: The request body, as described in
https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/databases/insert#request-body.
:type body: dict
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:return: None
"""
response = self.get_conn().databases().patch( # noqa # pylint: disable=no-member
project=project_id,
instance=instance,
database=database,
body=body
).execute(num_retries=self.num_retries)
operation_name = response["name"]
self._wait_for_operation_to_complete(project_id=project_id, # type:ignore
operation_name=operation_name)
@GoogleBaseHook.fallback_to_default_project_id
@GoogleBaseHook.operation_in_progress_retry()
def delete_database(self, instance: str, database: str, project_id: str) -> None:
"""
Deletes a database from a Cloud SQL instance.
:param instance: Database instance ID. This does not include the project ID.
:type instance: str
:param database: Name of the database to be deleted in the instance.
:type database: str
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:return: None
"""
response = self.get_conn().databases().delete( # noqa # pylint: disable=no-member
project=project_id,
instance=instance,
database=database
).execute(num_retries=self.num_retries)
operation_name = response["name"]
self._wait_for_operation_to_complete(project_id=project_id, # type:ignore
operation_name=operation_name)
@GoogleBaseHook.fallback_to_default_project_id
@GoogleBaseHook.operation_in_progress_retry()
def export_instance(self, instance: str, body: Dict, project_id: str) -> None:
"""
Exports data from a Cloud SQL instance to a Cloud Storage bucket as a SQL dump
or CSV file.
:param instance: Database instance ID of the Cloud SQL instance. This does not include the
project ID.
:type instance: str
:param body: The request body, as described in
https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/instances/export#request-body
:type body: dict
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:return: None
"""
response = self.get_conn().instances().export( # noqa # pylint: disable=no-member
project=project_id,
instance=instance,
body=body
).execute(num_retries=self.num_retries)
operation_name = response["name"]
self._wait_for_operation_to_complete(project_id=project_id, # type:ignore
operation_name=operation_name)
@GoogleBaseHook.fallback_to_default_project_id
def import_instance(self, instance: str, body: Dict, project_id: str) -> None:
"""
Imports data into a Cloud SQL instance from a SQL dump or CSV file in
Cloud Storage.
:param instance: Database instance ID. This does not include the
project ID.
:type instance: str
:param body: The request body, as described in
https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/instances/export#request-body
:type body: dict
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:return: None
"""
try:
response = self.get_conn().instances().import_( # noqa # pylint: disable=no-member
project=project_id,
instance=instance,
body=body
).execute(num_retries=self.num_retries)
operation_name = response["name"]
self._wait_for_operation_to_complete(project_id=project_id, # type: ignore
operation_name=operation_name)
except HttpError as ex:
raise AirflowException(
'Importing instance {} failed: {}'.format(instance, ex.content)
)
def _wait_for_operation_to_complete(self, project_id: str, operation_name: str) -> None:
"""
Waits for the named operation to complete - checks status of the
asynchronous call.
:param project_id: Project ID of the project that contains the instance.
:type project_id: str
:param operation_name: Name of the operation.
:type operation_name: str
:return: None
"""
service = self.get_conn()
while True:
operation_response = service.operations().get( # noqa # pylint: disable=no-member
project=project_id,
operation=operation_name,
).execute(num_retries=self.num_retries)
if operation_response.get("status") == CloudSqlOperationStatus.DONE:
error = operation_response.get("error")
if error:
# Extracting the errors list as string and trimming square braces
error_msg = str(error.get("errors"))[1:-1]
raise AirflowException(error_msg)
# No meaningful info to return from the response in case of success
return
time.sleep(TIME_TO_SLEEP_IN_SECONDS)
CLOUD_SQL_PROXY_DOWNLOAD_URL = "https://dl.google.com/cloudsql/cloud_sql_proxy.{}.{}"
CLOUD_SQL_PROXY_VERSION_DOWNLOAD_URL = \
"https://storage.googleapis.com/cloudsql-proxy/{}/cloud_sql_proxy.{}.{}"
GCP_CREDENTIALS_KEY_PATH = "extra__google_cloud_platform__key_path"
GCP_CREDENTIALS_KEYFILE_DICT = "extra__google_cloud_platform__keyfile_dict"
class CloudSqlProxyRunner(LoggingMixin):
"""
Downloads and runs cloud-sql-proxy as subprocess of the Python process.
The cloud-sql-proxy needs to be downloaded and started before we can connect
to the Google Cloud SQL instance via database connection. It establishes
secure tunnel connection to the database. It authorizes using the
GCP credentials that are passed by the configuration.
More details about the proxy can be found here:
https://cloud.google.com/sql/docs/mysql/sql-proxy
:param path_prefix: Unique path prefix where proxy will be downloaded and
directories created for unix sockets.
:type path_prefix: str
:param instance_specification: Specification of the instance to connect the
proxy to. It should be specified in the form that is described in
https://cloud.google.com/sql/docs/mysql/sql-proxy#multiple-instances in
-instances parameter (typically in the form of ``<project>:<region>:<instance>``
for UNIX socket connections and in the form of
``<project>:<region>:<instance>=tcp:<port>`` for TCP connections.
:type instance_specification: str
:param gcp_conn_id: Id of Google Cloud Platform connection to use for
authentication
:type gcp_conn_id: str
:param project_id: Optional id of the GCP project to connect to - it overwrites
default project id taken from the GCP connection.
:type project_id: str
:param sql_proxy_version: Specific version of SQL proxy to download
(for example 'v1.13'). By default latest version is downloaded.
:type sql_proxy_version: str
:param sql_proxy_binary_path: If specified, then proxy will be
used from the path specified rather than dynamically generated. This means
that if the binary is not present in that path it will also be downloaded.
:type sql_proxy_binary_path: str
"""
def __init__(
self,
path_prefix: str,
instance_specification: str,
gcp_conn_id: str = 'google_cloud_default',
project_id: Optional[str] = None,
sql_proxy_version: Optional[str] = None,
sql_proxy_binary_path: Optional[str] = None
) -> None:
super().__init__()
self.path_prefix = path_prefix
if not self.path_prefix:
raise AirflowException("The path_prefix must not be empty!")
self.sql_proxy_was_downloaded = False
self.sql_proxy_version = sql_proxy_version
self.download_sql_proxy_dir = None
self.sql_proxy_process = None # type: Optional[Popen]
self.instance_specification = instance_specification
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.command_line_parameters = [] # type: List[str]
self.cloud_sql_proxy_socket_directory = self.path_prefix
self.sql_proxy_path = sql_proxy_binary_path if sql_proxy_binary_path \
else self.path_prefix + "_cloud_sql_proxy"
self.credentials_path = self.path_prefix + "_credentials.json"
self._build_command_line_parameters()
def _build_command_line_parameters(self) -> None:
self.command_line_parameters.extend(
['-dir', self.cloud_sql_proxy_socket_directory])
self.command_line_parameters.extend(
['-instances', self.instance_specification])
@staticmethod
def _is_os_64bit() -> bool:
return platform.machine().endswith('64')
def _download_sql_proxy_if_needed(self) -> None:
if os.path.isfile(self.sql_proxy_path):
self.log.info("cloud-sql-proxy is already present")
return
system = platform.system().lower()
processor = "amd64" if CloudSqlProxyRunner._is_os_64bit() else "386"
if not self.sql_proxy_version:
download_url = CLOUD_SQL_PROXY_DOWNLOAD_URL.format(system, processor)
else:
download_url = CLOUD_SQL_PROXY_VERSION_DOWNLOAD_URL.format(
self.sql_proxy_version, system, processor)
proxy_path_tmp = self.sql_proxy_path + ".tmp"
self.log.info("Downloading cloud_sql_proxy from %s to %s",
download_url, proxy_path_tmp)
response = requests.get(download_url, allow_redirects=True)
# Downloading to .tmp file first to avoid case where partially downloaded
# binary is used by parallel operator which uses the same fixed binary path
with open(proxy_path_tmp, 'wb') as file:
file.write(response.content)
if response.status_code != 200:
raise AirflowException(
"The cloud-sql-proxy could not be downloaded. Status code = {}. "
"Reason = {}".format(response.status_code, response.reason))
self.log.info("Moving sql_proxy binary from %s to %s",
proxy_path_tmp, self.sql_proxy_path)
shutil.move(proxy_path_tmp, self.sql_proxy_path)
os.chmod(self.sql_proxy_path, 0o744) # Set executable bit
self.sql_proxy_was_downloaded = True
@provide_session
def _get_credential_parameters(self, session: Session) -> List[str]:
connection = session.query(Connection). \
filter(Connection.conn_id == self.gcp_conn_id).first()
session.expunge_all()
if connection.extra_dejson.get(GCP_CREDENTIALS_KEY_PATH):
credential_params = [
'-credential_file',
connection.extra_dejson[GCP_CREDENTIALS_KEY_PATH]
]
elif connection.extra_dejson.get(GCP_CREDENTIALS_KEYFILE_DICT):
credential_file_content = json.loads(
connection.extra_dejson[GCP_CREDENTIALS_KEYFILE_DICT])
self.log.info("Saving credentials to %s", self.credentials_path)
with open(self.credentials_path, "w") as file:
json.dump(credential_file_content, file)
credential_params = [
'-credential_file',
self.credentials_path
]
else:
self.log.info(
"The credentials are not supplied by neither key_path nor "
"keyfile_dict of the gcp connection %s. Falling back to "
"default activated account", self.gcp_conn_id)
credential_params = []
if not self.instance_specification:
project_id = connection.extra_dejson.get(
'extra__google_cloud_platform__project')
if self.project_id:
project_id = self.project_id
if not project_id:
raise AirflowException("For forwarding all instances, the project id "
"for GCP should be provided either "
"by project_id extra in the GCP connection or by "
"project_id provided in the operator.")
credential_params.extend(['-projects', project_id])
return credential_params
def start_proxy(self) -> None:
"""
Starts Cloud SQL Proxy.
You have to remember to stop the proxy if you started it!
"""
self._download_sql_proxy_if_needed()
if self.sql_proxy_process:
raise AirflowException("The sql proxy is already running: {}".format(
self.sql_proxy_process))
else:
command_to_run = [self.sql_proxy_path]
command_to_run.extend(self.command_line_parameters)
try:
self.log.info("Creating directory %s",
self.cloud_sql_proxy_socket_directory)
os.makedirs(self.cloud_sql_proxy_socket_directory)
except OSError:
# Needed for python 2 compatibility (exists_ok missing)
pass
command_to_run.extend(self._get_credential_parameters()) # pylint: disable=no-value-for-parameter
self.log.info("Running the command: `%s`", " ".join(command_to_run))
self.sql_proxy_process = Popen(command_to_run,
stdin=PIPE, stdout=PIPE, stderr=PIPE)
self.log.info("The pid of cloud_sql_proxy: %s", self.sql_proxy_process.pid)
while True:
line = self.sql_proxy_process.stderr.readline().decode('utf-8') \
if self.sql_proxy_process.stderr else ""
return_code = self.sql_proxy_process.poll()
if line == '' and return_code is not None:
self.sql_proxy_process = None
raise AirflowException(
"The cloud_sql_proxy finished early with return code {}!".format(
return_code))
if line != '':
self.log.info(line)
if "googleapi: Error" in line or "invalid instance name:" in line:
self.stop_proxy()
raise AirflowException(
"Error when starting the cloud_sql_proxy {}!".format(
line))
if "Ready for new connections" in line:
return
def stop_proxy(self) -> None:
"""
Stops running proxy.
You should stop the proxy after you stop using it.
"""
if not self.sql_proxy_process:
raise AirflowException("The sql proxy is not started yet")
else:
self.log.info("Stopping the cloud_sql_proxy pid: %s",
self.sql_proxy_process.pid)
self.sql_proxy_process.kill()
self.sql_proxy_process = None
# Cleanup!
self.log.info("Removing the socket directory: %s",
self.cloud_sql_proxy_socket_directory)
shutil.rmtree(self.cloud_sql_proxy_socket_directory, ignore_errors=True)
if self.sql_proxy_was_downloaded:
self.log.info("Removing downloaded proxy: %s", self.sql_proxy_path)
# Silently ignore if the file has already been removed (concurrency)
try:
os.remove(self.sql_proxy_path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
else:
self.log.info("Skipped removing proxy - it was not downloaded: %s",
self.sql_proxy_path)
if os.path.isfile(self.credentials_path):
self.log.info("Removing generated credentials file %s",
self.credentials_path)
# Here file cannot be delete by concurrent task (each task has its own copy)
os.remove(self.credentials_path)
def get_proxy_version(self) -> Optional[str]:
"""
Returns version of the Cloud SQL Proxy.
"""
self._download_sql_proxy_if_needed()
command_to_run = [self.sql_proxy_path]
command_to_run.extend(['--version'])
command_to_run.extend(self._get_credential_parameters()) # pylint: disable=no-value-for-parameter
result = subprocess.check_output(command_to_run).decode('utf-8')
pattern = re.compile("^.*[V|v]ersion ([^;]*);.*$")
matched = pattern.match(result)
if matched:
return matched.group(1)
else:
return None
def get_socket_path(self) -> str:
"""
Retrieves UNIX socket path used by Cloud SQL Proxy.
:return: The dynamically generated path for the socket created by the proxy.
:rtype: str
"""
return self.cloud_sql_proxy_socket_directory + "/" + self.instance_specification
CONNECTION_URIS = {
"postgres": {
"proxy": {
"tcp":
"postgresql://{user}:{password}@127.0.0.1:{proxy_port}/{database}",
"socket":
"postgresql://{user}:{password}@{socket_path}/{database}"
},
"public": {
"ssl":
"postgresql://{user}:{password}@{public_ip}:{public_port}/{database}?"
"sslmode=verify-ca&"
"sslcert={client_cert_file}&"
"sslkey={client_key_file}&"
"sslrootcert={server_ca_file}",
"non-ssl":
"postgresql://{user}:{password}@{public_ip}:{public_port}/{database}"
}
},
"mysql": {
"proxy": {
"tcp":
"mysql://{user}:{password}@127.0.0.1:{proxy_port}/{database}",
"socket":
"mysql://{user}:{password}@localhost/{database}?"
"unix_socket={socket_path}"
},
"public": {
"ssl":
"mysql://{user}:{password}@{public_ip}:{public_port}/{database}?"
"ssl={ssl_spec}",
"non-ssl":
"mysql://{user}:{password}@{public_ip}:{public_port}/{database}"
}
}
} # type: Dict[str, Dict[str, Dict[str, str]]]
CLOUD_SQL_VALID_DATABASE_TYPES = ['postgres', 'mysql']
# noinspection PyAbstractClass
class CloudSQLDatabaseHook(BaseHook):
# pylint: disable=too-many-instance-attributes
"""
Serves DB connection configuration for Google Cloud SQL (Connections
of *gcpcloudsql://* type).
The hook is a "meta" one. It does not perform an actual connection.
It is there to retrieve all the parameters configured in gcpcloudsql:// connection,
start/stop Cloud SQL Proxy if needed, dynamically generate Postgres or MySQL
connection in the database and return an actual Postgres or MySQL hook.
The returned Postgres/MySQL hooks are using direct connection or Cloud SQL
Proxy socket/TCP as configured.
Main parameters of the hook are retrieved from the standard URI components:
* **user** - User name to authenticate to the database (from login of the URI).
* **password** - Password to authenticate to the database (from password of the URI).
* **public_ip** - IP to connect to for public connection (from host of the URI).
* **public_port** - Port to connect to for public connection (from port of the URI).
* **database** - Database to connect to (from schema of the URI).
Remaining parameters are retrieved from the extras (URI query parameters):
* **project_id** - Optional, Google Cloud Platform project where the Cloud SQL
instance exists. If missing, default project id passed is used.
* **instance** - Name of the instance of the Cloud SQL database instance.
* **location** - The location of the Cloud SQL instance (for example europe-west1).
* **database_type** - The type of the database instance (MySQL or Postgres).
* **use_proxy** - (default False) Whether SQL proxy should be used to connect to Cloud
SQL DB.
* **use_ssl** - (default False) Whether SSL should be used to connect to Cloud SQL DB.
You cannot use proxy and SSL together.
* **sql_proxy_use_tcp** - (default False) If set to true, TCP is used to connect via
proxy, otherwise UNIX sockets are used.
* **sql_proxy_binary_path** - Optional path to Cloud SQL Proxy binary. If the binary
is not specified or the binary is not present, it is automatically downloaded.
* **sql_proxy_version** - Specific version of the proxy to download (for example
v1.13). If not specified, the latest version is downloaded.
* **sslcert** - Path to client certificate to authenticate when SSL is used.
* **sslkey** - Path to client private key to authenticate when SSL is used.
* **sslrootcert** - Path to server's certificate to authenticate when SSL is used.
:param gcp_cloudsql_conn_id: URL of the connection
:type gcp_cloudsql_conn_id: str
:param gcp_conn_id: The connection ID used to connect to Google Cloud Platform for
cloud-sql-proxy authentication.
:type gcp_conn_id: str
:param default_gcp_project_id: Default project id used if project_id not specified
in the connection URL
:type default_gcp_project_id: str
"""
_conn = None # type: Optional[Any]
def __init__(
self,
gcp_cloudsql_conn_id: str = 'google_cloud_sql_default',
gcp_conn_id: str = 'google_cloud_default',
default_gcp_project_id: Optional[str] = None
) -> None:
super().__init__()
self.gcp_conn_id = gcp_conn_id
self.gcp_cloudsql_conn_id = gcp_cloudsql_conn_id
self.cloudsql_connection = self.get_connection(self.gcp_cloudsql_conn_id)
self.extras = self.cloudsql_connection.extra_dejson
self.project_id = self.extras.get('project_id', default_gcp_project_id) # type: Optional[str]
self.instance = self.extras.get('instance') # type: Optional[str]
self.database = self.cloudsql_connection.schema # type: Optional[str]
self.location = self.extras.get('location') # type: Optional[str]
self.database_type = self.extras.get('database_type') # type: Optional[str]
self.use_proxy = self._get_bool(self.extras.get('use_proxy', 'False')) # type: bool
self.use_ssl = self._get_bool(self.extras.get('use_ssl', 'False')) # type: bool
self.sql_proxy_use_tcp = self._get_bool(self.extras.get('sql_proxy_use_tcp', 'False')) # type: bool
self.sql_proxy_version = self.extras.get('sql_proxy_version') # type: Optional[str]
self.sql_proxy_binary_path = self.extras.get('sql_proxy_binary_path') # type: Optional[str]
self.user = self.cloudsql_connection.login # type: Optional[str]
self.password = self.cloudsql_connection.password # type: Optional[str]
self.public_ip = self.cloudsql_connection.host # type: Optional[str]
self.public_port = self.cloudsql_connection.port # type: Optional[str]
self.sslcert = self.extras.get('sslcert') # type: Optional[str]
self.sslkey = self.extras.get('sslkey') # type: Optional[str]
self.sslrootcert = self.extras.get('sslrootcert') # type: Optional[str]
# Port and socket path and db_hook are automatically generated
self.sql_proxy_tcp_port = None
self.sql_proxy_unique_path = None # type: Optional[str]
self.db_hook = None # type: Optional[Union[PostgresHook, MySqlHook]]
self.reserved_tcp_socket = None # type: Optional[socket.socket]
# Generated based on clock + clock sequence. Unique per host (!).
# This is important as different hosts share the database
self.db_conn_id = str(uuid.uuid1())
self._validate_inputs()
@staticmethod
def _get_bool(val: Any) -> bool:
if val == 'False':
return False
return True
@staticmethod
def _check_ssl_file(file_to_check, name) -> None:
if not file_to_check:
raise AirflowException("SSL connections requires {name} to be set".
format(name=name))
if not os.path.isfile(file_to_check):
raise AirflowException("The {file_to_check} must be a readable file".
format(file_to_check=file_to_check))
def _validate_inputs(self) -> None:
if self.project_id == '':
raise AirflowException("The required extra 'project_id' is empty")
if not self.location:
raise AirflowException("The required extra 'location' is empty or None")
if not self.instance:
raise AirflowException("The required extra 'instance' is empty or None")
if self.database_type not in CLOUD_SQL_VALID_DATABASE_TYPES:
raise AirflowException("Invalid database type '{}'. Must be one of {}".format(
self.database_type, CLOUD_SQL_VALID_DATABASE_TYPES
))
if self.use_proxy and self.use_ssl:
raise AirflowException("Cloud SQL Proxy does not support SSL connections."
" SSL is not needed as Cloud SQL Proxy "
"provides encryption on its own")
def validate_ssl_certs(self) -> None:
"""
SSL certificates validator.
:return: None
"""
if self.use_ssl:
self._check_ssl_file(self.sslcert, "sslcert")
self._check_ssl_file(self.sslkey, "sslkey")
self._check_ssl_file(self.sslrootcert, "sslrootcert")
def validate_socket_path_length(self) -> None:
"""
Validates sockets path length.
:return: None or rises AirflowException
"""
if self.use_proxy and not self.sql_proxy_use_tcp:
if self.database_type == 'postgres':
suffix = "/.s.PGSQL.5432"
else:
suffix = ""
expected_path = "{}/{}:{}:{}{}".format(
self._generate_unique_path(),
self.project_id, self.instance,
self.database, suffix)
if len(expected_path) > UNIX_PATH_MAX:
self.log.info("Too long (%s) path: %s", len(expected_path), expected_path)
raise AirflowException(
"The UNIX socket path length cannot exceed {} characters "
"on Linux system. Either use shorter instance/database "
"name or switch to TCP connection. "
"The socket path for Cloud SQL proxy is now:"
"{}".format(
UNIX_PATH_MAX, expected_path))
@staticmethod
def _generate_unique_path() -> str:
"""
We are not using mkdtemp here as the path generated with mkdtemp
can be close to 60 characters and there is a limitation in
length of socket path to around 100 characters in total.
We append project/location/instance to it later and postgres
appends its own prefix, so we chose a shorter "/tmp/[8 random characters]"
"""
random.seed()
while True:
candidate = "/tmp/" + ''.join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(8))
if not os.path.exists(candidate):
return candidate
@staticmethod
def _quote(value) -> Optional[str]:
return quote_plus(value) if value else None
def _generate_connection_uri(self) -> str:
if self.use_proxy:
if self.sql_proxy_use_tcp:
if not self.sql_proxy_tcp_port:
self.reserve_free_tcp_port()
if not self.sql_proxy_unique_path:
self.sql_proxy_unique_path = self._generate_unique_path()
if not self.database_type:
raise ValueError("The database_type should be set")
database_uris = CONNECTION_URIS[self.database_type] # type: Dict[str, Dict[str, str]]
ssl_spec = None
socket_path = None
if self.use_proxy:
proxy_uris = database_uris['proxy'] # type: Dict[str, str]
if self.sql_proxy_use_tcp:
format_string = proxy_uris['tcp']
else:
format_string = proxy_uris['socket']
socket_path = \
"{sql_proxy_socket_path}/{instance_socket_name}".format(
sql_proxy_socket_path=self.sql_proxy_unique_path,
instance_socket_name=self._get_instance_socket_name()
)
else:
public_uris = database_uris['public'] # type: Dict[str, str]
if self.use_ssl:
format_string = public_uris['ssl']
ssl_spec = {
'cert': self.sslcert,
'key': self.sslkey,
'ca': self.sslrootcert
}
else:
format_string = public_uris['non-ssl']
if not self.user:
raise AirflowException("The login parameter needs to be set in connection")
if not self.public_ip:
raise AirflowException("The location parameter needs to be set in connection")
if not self.password:
raise AirflowException("The password parameter needs to be set in connection")
if not self.database:
raise AirflowException("The database parameter needs to be set in connection")
connection_uri = format_string.format(
user=quote_plus(self.user) if self.user else '',
password=quote_plus(self.password) if self.password else '',
database=quote_plus(self.database) if self.database else '',
public_ip=self.public_ip,
public_port=self.public_port,
proxy_port=self.sql_proxy_tcp_port,
socket_path=self._quote(socket_path),
ssl_spec=self._quote(json.dumps(ssl_spec)) if ssl_spec else '',
client_cert_file=self._quote(self.sslcert) if self.sslcert else '',
client_key_file=self._quote(self.sslkey) if self.sslcert else '',
server_ca_file=self._quote(self.sslrootcert if self.sslcert else '')
)
self.log.info("DB connection URI %s", connection_uri.replace(
quote_plus(self.password) if self.password else 'PASSWORD', 'XXXXXXXXXXXX'))
return connection_uri
def _get_instance_socket_name(self) -> str:
return self.project_id + ":" + self.location + ":" + self.instance # type: ignore
def _get_sqlproxy_instance_specification(self) -> str:
instance_specification = self._get_instance_socket_name()
if self.sql_proxy_use_tcp:
instance_specification += "=tcp:" + str(self.sql_proxy_tcp_port)
return instance_specification
def create_connection(self) -> Connection:
"""
Create Connection object, according to whether it uses proxy, TCP, UNIX sockets, SSL.
Connection ID will be randomly generated.
"""
connection = Connection(conn_id=self.db_conn_id)
uri = self._generate_connection_uri()
self.log.info("Creating connection %s", self.db_conn_id)
connection.parse_from_uri(uri)
return connection
def get_sqlproxy_runner(self) -> CloudSqlProxyRunner:
"""
Retrieve Cloud SQL Proxy runner. It is used to manage the proxy
lifecycle per task.
:return: The Cloud SQL Proxy runner.
:rtype: CloudSqlProxyRunner
"""
if not self.use_proxy:
raise ValueError("Proxy runner can only be retrieved in case of use_proxy = True")
if not self.sql_proxy_unique_path:
raise ValueError("The sql_proxy_unique_path should be set")
return CloudSqlProxyRunner(
path_prefix=self.sql_proxy_unique_path,
instance_specification=self._get_sqlproxy_instance_specification(),
project_id=self.project_id,
sql_proxy_version=self.sql_proxy_version,
sql_proxy_binary_path=self.sql_proxy_binary_path,
gcp_conn_id=self.gcp_conn_id
)
def get_database_hook(self, connection: Connection) -> Union[PostgresHook, MySqlHook]:
"""
Retrieve database hook. This is the actual Postgres or MySQL database hook
that uses proxy or connects directly to the Google Cloud SQL database.
"""
if self.database_type == 'postgres':
self.db_hook = PostgresHook(connection=connection, schema=self.database)
else:
self.db_hook = MySqlHook(connection=connection, schema=self.database)
return self.db_hook
def cleanup_database_hook(self) -> None:
"""
Clean up database hook after it was used.
"""
if self.database_type == 'postgres':
if not self.db_hook:
raise ValueError("The db_hook should be set")
if not isinstance(self.db_hook, PostgresHook):
raise ValueError(f"The db_hook should be PostrgresHook and is {type(self.db_hook)}")
conn = getattr(self.db_hook, 'conn') # type: ignore
if conn and conn.notices:
for output in self.db_hook.conn.notices:
self.log.info(output)
def reserve_free_tcp_port(self) -> None:
"""
Reserve free TCP port to be used by Cloud SQL Proxy
"""
self.reserved_tcp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.reserved_tcp_socket.bind(('127.0.0.1', 0))
self.sql_proxy_tcp_port = self.reserved_tcp_socket.getsockname()[1]
def free_reserved_port(self) -> None:
"""
Free TCP port. Makes it immediately ready to be used by Cloud SQL Proxy.
"""
if self.reserved_tcp_socket:
self.reserved_tcp_socket.close()
self.reserved_tcp_socket = None
| |
"""
Copyright (c) 2016 Jet Propulsion Laboratory,
California Institute of Technology. All rights reserved
"""
import json
import logging
import threading
import time
from datetime import datetime
from itertools import chain
from math import cos, radians
import numpy as np
import pyproj
import requests
from nexustiles.nexustiles import NexusTileService
from pytz import timezone, UTC
from scipy import spatial
from shapely import wkt
from shapely.geometry import Point
from shapely.geometry import box
from shapely.geos import ReadingError
from webservice.NexusHandler import SparkHandler, nexus_handler
from webservice.algorithms.doms import config as edge_endpoints
from webservice.algorithms.doms import values as doms_values
from webservice.algorithms.doms.BaseDomsHandler import DomsQueryResults
from webservice.algorithms.doms.ResultsStorage import ResultsStorage
from webservice.webmodel import NexusProcessingException
EPOCH = timezone('UTC').localize(datetime(1970, 1, 1))
ISO_8601 = '%Y-%m-%dT%H:%M:%S%z'
def iso_time_to_epoch(str_time):
return (datetime.strptime(str_time, "%Y-%m-%dT%H:%M:%SZ").replace(
tzinfo=UTC) - EPOCH).total_seconds()
@nexus_handler
class Matchup(SparkHandler):
name = "Matchup"
path = "/match_spark"
description = "Match measurements between two or more datasets"
params = {
"primary": {
"name": "Primary Dataset",
"type": "string",
"description": "The Primary dataset used to find matches for. Required"
},
"matchup": {
"name": "Match-Up Datasets",
"type": "comma-delimited string",
"description": "The Dataset(s) being searched for measurements that match the Primary. Required"
},
"parameter": {
"name": "Match-Up Parameter",
"type": "string",
"description": "The parameter of interest used for the match up. One of 'sst', 'sss', 'wind'. Required"
},
"startTime": {
"name": "Start Time",
"type": "string",
"description": "Starting time in format YYYY-MM-DDTHH:mm:ssZ or seconds since EPOCH. Required"
},
"endTime": {
"name": "End Time",
"type": "string",
"description": "Ending time in format YYYY-MM-DDTHH:mm:ssZ or seconds since EPOCH. Required"
},
"b": {
"name": "Bounding box",
"type": "comma-delimited float",
"description": "Minimum (Western) Longitude, Minimum (Southern) Latitude, "
"Maximum (Eastern) Longitude, Maximum (Northern) Latitude. Required"
},
"depthMin": {
"name": "Minimum Depth",
"type": "float",
"description": "Minimum depth of measurements. Must be less than depthMax. Optional. Default: no limit"
},
"depthMax": {
"name": "Maximum Depth",
"type": "float",
"description": "Maximum depth of measurements. Must be greater than depthMin. Optional. Default: no limit"
},
"tt": {
"name": "Time Tolerance",
"type": "long",
"description": "Tolerance in time (seconds) when comparing two measurements. Optional. Default: 86400"
},
"rt": {
"name": "Radius Tolerance",
"type": "float",
"description": "Tolerance in radius (meters) when comparing two measurements. Optional. Default: 1000"
},
"platforms": {
"name": "Platforms",
"type": "comma-delimited integer",
"description": "Platforms to include for matchup consideration. Required"
},
"matchOnce": {
"name": "Match Once",
"type": "boolean",
"description": "Optional True/False flag used to determine if more than one match per primary point is returned. "
+ "If true, only the nearest point will be returned for each primary point. "
+ "If false, all points within the tolerances will be returned for each primary point. Default: False"
},
"resultSizeLimit": {
"name": "Result Size Limit",
"type": "int",
"description": "Optional integer value that limits the number of results returned from the matchup. "
"If the number of primary matches is greater than this limit, the service will respond with "
"(HTTP 202: Accepted) and an empty response body. A value of 0 means return all results. "
"Default: 500"
}
}
singleton = True
def __init__(self):
SparkHandler.__init__(self, skipCassandra=True)
self.log = logging.getLogger(__name__)
def parse_arguments(self, request):
# Parse input arguments
self.log.debug("Parsing arguments")
try:
bounding_polygon = request.get_bounding_polygon()
except:
raise NexusProcessingException(
reason="'b' argument is required. Must be comma-delimited float formatted as Minimum (Western) Longitude, Minimum (Southern) Latitude, Maximum (Eastern) Longitude, Maximum (Northern) Latitude",
code=400)
primary_ds_name = request.get_argument('primary', None)
if primary_ds_name is None:
raise NexusProcessingException(reason="'primary' argument is required", code=400)
matchup_ds_names = request.get_argument('matchup', None)
if matchup_ds_names is None:
raise NexusProcessingException(reason="'matchup' argument is required", code=400)
parameter_s = request.get_argument('parameter', 'sst')
if parameter_s not in ['sst', 'sss', 'wind']:
raise NexusProcessingException(
reason="Parameter %s not supported. Must be one of 'sst', 'sss', 'wind'." % parameter_s, code=400)
try:
start_time = request.get_start_datetime()
except:
raise NexusProcessingException(
reason="'startTime' argument is required. Can be int value seconds from epoch or string format YYYY-MM-DDTHH:mm:ssZ",
code=400)
try:
end_time = request.get_end_datetime()
except:
raise NexusProcessingException(
reason="'endTime' argument is required. Can be int value seconds from epoch or string format YYYY-MM-DDTHH:mm:ssZ",
code=400)
if start_time > end_time:
raise NexusProcessingException(
reason="The starting time must be before the ending time. Received startTime: %s, endTime: %s" % (
request.get_start_datetime().strftime(ISO_8601), request.get_end_datetime().strftime(ISO_8601)),
code=400)
depth_min = request.get_decimal_arg('depthMin', default=None)
depth_max = request.get_decimal_arg('depthMax', default=None)
if depth_min is not None and depth_max is not None and depth_min >= depth_max:
raise NexusProcessingException(
reason="Depth Min should be less than Depth Max", code=400)
time_tolerance = request.get_int_arg('tt', default=86400)
radius_tolerance = request.get_decimal_arg('rt', default=1000.0)
platforms = request.get_argument('platforms', None)
if platforms is None:
raise NexusProcessingException(reason="'platforms' argument is required", code=400)
try:
p_validation = platforms.split(',')
p_validation = [int(p) for p in p_validation]
del p_validation
except:
raise NexusProcessingException(reason="platforms must be a comma-delimited list of integers", code=400)
match_once = request.get_boolean_arg("matchOnce", default=False)
result_size_limit = request.get_int_arg("resultSizeLimit", default=500)
start_seconds_from_epoch = long((start_time - EPOCH).total_seconds())
end_seconds_from_epoch = long((end_time - EPOCH).total_seconds())
return bounding_polygon, primary_ds_name, matchup_ds_names, parameter_s, \
start_time, start_seconds_from_epoch, end_time, end_seconds_from_epoch, \
depth_min, depth_max, time_tolerance, radius_tolerance, \
platforms, match_once, result_size_limit
def calc(self, request, **args):
start = datetime.utcnow()
# TODO Assuming Satellite primary
bounding_polygon, primary_ds_name, matchup_ds_names, parameter_s, \
start_time, start_seconds_from_epoch, end_time, end_seconds_from_epoch, \
depth_min, depth_max, time_tolerance, radius_tolerance, \
platforms, match_once, result_size_limit = self.parse_arguments(request)
with ResultsStorage() as resultsStorage:
execution_id = str(resultsStorage.insertExecution(None, start, None, None))
self.log.debug("Querying for tiles in search domain")
# Get tile ids in box
tile_ids = [tile.tile_id for tile in
self._tile_service.find_tiles_in_polygon(bounding_polygon, primary_ds_name,
start_seconds_from_epoch, end_seconds_from_epoch,
fetch_data=False, fl='id',
sort=['tile_min_time_dt asc', 'tile_min_lon asc',
'tile_min_lat asc'], rows=5000)]
# Call spark_matchup
self.log.debug("Calling Spark Driver")
try:
spark_result = spark_matchup_driver(tile_ids, wkt.dumps(bounding_polygon), primary_ds_name,
matchup_ds_names, parameter_s, depth_min, depth_max, time_tolerance,
radius_tolerance, platforms, match_once, sc=self._sc)
except Exception as e:
self.log.exception(e)
raise NexusProcessingException(reason="An unknown error occurred while computing matches", code=500)
end = datetime.utcnow()
self.log.debug("Building and saving results")
args = {
"primary": primary_ds_name,
"matchup": matchup_ds_names,
"startTime": start_time,
"endTime": end_time,
"bbox": request.get_argument('b'),
"timeTolerance": time_tolerance,
"radiusTolerance": float(radius_tolerance),
"platforms": platforms,
"parameter": parameter_s
}
if depth_min is not None:
args["depthMin"] = float(depth_min)
if depth_max is not None:
args["depthMax"] = float(depth_max)
total_keys = len(spark_result.keys())
total_values = sum(len(v) for v in spark_result.itervalues())
details = {
"timeToComplete": int((end - start).total_seconds()),
"numInSituRecords": 0,
"numInSituMatched": total_values,
"numGriddedChecked": 0,
"numGriddedMatched": total_keys
}
matches = Matchup.convert_to_matches(spark_result)
def do_result_insert():
with ResultsStorage() as storage:
storage.insertResults(results=matches, params=args, stats=details,
startTime=start, completeTime=end, userEmail="",
execution_id=execution_id)
threading.Thread(target=do_result_insert).start()
if 0 < result_size_limit < len(matches):
result = DomsQueryResults(results=None, args=args, details=details, bounds=None, count=None,
computeOptions=None, executionId=execution_id, status_code=202)
else:
result = DomsQueryResults(results=matches, args=args, details=details, bounds=None, count=None,
computeOptions=None, executionId=execution_id)
return result
@classmethod
def convert_to_matches(cls, spark_result):
matches = []
for primary_domspoint, matched_domspoints in spark_result.iteritems():
p_matched = [cls.domspoint_to_dict(p_match) for p_match in matched_domspoints]
primary = cls.domspoint_to_dict(primary_domspoint)
primary['matches'] = list(p_matched)
matches.append(primary)
return matches
@staticmethod
def domspoint_to_dict(domspoint):
return {
"sea_water_temperature": domspoint.sst,
"sea_water_temperature_depth": domspoint.sst_depth,
"sea_water_salinity": domspoint.sss,
"sea_water_salinity_depth": domspoint.sss_depth,
"wind_speed": domspoint.wind_speed,
"wind_direction": domspoint.wind_direction,
"wind_u": domspoint.wind_u,
"wind_v": domspoint.wind_v,
"platform": doms_values.getPlatformById(domspoint.platform),
"device": doms_values.getDeviceById(domspoint.device),
"x": str(domspoint.longitude),
"y": str(domspoint.latitude),
"point": "Point(%s %s)" % (domspoint.longitude, domspoint.latitude),
"time": datetime.strptime(domspoint.time, "%Y-%m-%dT%H:%M:%SZ").replace(tzinfo=UTC),
"fileurl": domspoint.file_url,
"id": domspoint.data_id,
"source": domspoint.source,
}
class DomsPoint(object):
def __init__(self, longitude=None, latitude=None, time=None, depth=None, data_id=None):
self.time = time
self.longitude = longitude
self.latitude = latitude
self.depth = depth
self.data_id = data_id
self.wind_u = None
self.wind_v = None
self.wind_direction = None
self.wind_speed = None
self.sst = None
self.sst_depth = None
self.sss = None
self.sss_depth = None
self.source = None
self.depth = None
self.platform = None
self.device = None
self.file_url = None
def __repr__(self):
return str(self.__dict__)
@staticmethod
def from_nexus_point(nexus_point, tile=None, parameter='sst'):
point = DomsPoint()
point.data_id = "%s[%s]" % (tile.tile_id, nexus_point.index)
# TODO Not an ideal solution; but it works for now.
if parameter == 'sst':
point.sst = nexus_point.data_val.item()
elif parameter == 'sss':
point.sss = nexus_point.data_val.item()
elif parameter == 'wind':
point.wind_u = nexus_point.data_val.item()
try:
point.wind_v = tile.meta_data['wind_v'][tuple(nexus_point.index)].item()
except (KeyError, IndexError):
pass
try:
point.wind_direction = tile.meta_data['wind_dir'][tuple(nexus_point.index)].item()
except (KeyError, IndexError):
pass
try:
point.wind_speed = tile.meta_data['wind_speed'][tuple(nexus_point.index)].item()
except (KeyError, IndexError):
pass
else:
raise NotImplementedError('%s not supported. Only sst, sss, and wind parameters are supported.' % parameter)
point.longitude = nexus_point.longitude.item()
point.latitude = nexus_point.latitude.item()
point.time = datetime.utcfromtimestamp(nexus_point.time).strftime('%Y-%m-%dT%H:%M:%SZ')
try:
point.depth = nexus_point.depth
except KeyError:
# No depth associated with this measurement
pass
point.sst_depth = 0
point.source = tile.dataset
point.file_url = tile.granule
# TODO device should change based on the satellite making the observations.
point.platform = 9
point.device = 5
return point
@staticmethod
def from_edge_point(edge_point):
point = DomsPoint()
try:
x, y = wkt.loads(edge_point['point']).coords[0]
except ReadingError:
try:
x, y = Point(*[float(c) for c in edge_point['point'].split(' ')]).coords[0]
except ValueError:
y, x = Point(*[float(c) for c in edge_point['point'].split(',')]).coords[0]
point.longitude = x
point.latitude = y
point.time = edge_point['time']
point.wind_u = edge_point.get('eastward_wind')
point.wind_v = edge_point.get('northward_wind')
point.wind_direction = edge_point.get('wind_direction')
point.wind_speed = edge_point.get('wind_speed')
point.sst = edge_point.get('sea_water_temperature')
point.sst_depth = edge_point.get('sea_water_temperature_depth')
point.sss = edge_point.get('sea_water_salinity')
point.sss_depth = edge_point.get('sea_water_salinity_depth')
point.source = edge_point.get('source')
point.platform = edge_point.get('platform')
point.device = edge_point.get('device')
point.file_url = edge_point.get('fileurl')
try:
point.data_id = unicode(edge_point['id'])
except KeyError:
point.data_id = "%s:%s:%s" % (point.time, point.longitude, point.latitude)
return point
from threading import Lock
DRIVER_LOCK = Lock()
def spark_matchup_driver(tile_ids, bounding_wkt, primary_ds_name, matchup_ds_names, parameter, depth_min, depth_max,
time_tolerance, radius_tolerance, platforms, match_once, sc=None):
from functools import partial
with DRIVER_LOCK:
# Broadcast parameters
primary_b = sc.broadcast(primary_ds_name)
matchup_b = sc.broadcast(matchup_ds_names)
depth_min_b = sc.broadcast(float(depth_min) if depth_min is not None else None)
depth_max_b = sc.broadcast(float(depth_max) if depth_max is not None else None)
tt_b = sc.broadcast(time_tolerance)
rt_b = sc.broadcast(float(radius_tolerance))
platforms_b = sc.broadcast(platforms)
bounding_wkt_b = sc.broadcast(bounding_wkt)
parameter_b = sc.broadcast(parameter)
# Parallelize list of tile ids
rdd = sc.parallelize(tile_ids, determine_parllelism(len(tile_ids)))
# Map Partitions ( list(tile_id) )
rdd_filtered = rdd.mapPartitions(
partial(match_satellite_to_insitu, primary_b=primary_b, matchup_b=matchup_b, parameter_b=parameter_b, tt_b=tt_b,
rt_b=rt_b, platforms_b=platforms_b, bounding_wkt_b=bounding_wkt_b, depth_min_b=depth_min_b,
depth_max_b=depth_max_b), preservesPartitioning=True) \
.filter(lambda p_m_tuple: abs(
iso_time_to_epoch(p_m_tuple[0].time) - iso_time_to_epoch(p_m_tuple[1].time)) <= time_tolerance)
if match_once:
# Only the 'nearest' point for each primary should be returned. Add an extra map/reduce which calculates
# the distance and finds the minimum
# Method used for calculating the distance between 2 DomsPoints
from pyproj import Geod
def dist(primary, matchup):
wgs84_geod = Geod(ellps='WGS84')
lat1, lon1 = (primary.latitude, primary.longitude)
lat2, lon2 = (matchup.latitude, matchup.longitude)
az12, az21, distance = wgs84_geod.inv(lon1, lat1, lon2, lat2)
return distance
rdd_filtered = rdd_filtered \
.map(lambda (primary, matchup): tuple([primary, tuple([matchup, dist(primary, matchup)])])) \
.reduceByKey(lambda match_1, match_2: match_1 if match_1[1] < match_2[1] else match_2) \
.mapValues(lambda x: [x[0]])
else:
rdd_filtered = rdd_filtered \
.combineByKey(lambda value: [value], # Create 1 element list
lambda value_list, value: value_list + [value], # Add 1 element to list
lambda value_list_a, value_list_b: value_list_a + value_list_b) # Add two lists together
result_as_map = rdd_filtered.collectAsMap()
return result_as_map
def determine_parllelism(num_tiles):
"""
Try to stay at a maximum of 140 tiles per partition; But don't go over 128 partitions.
Also, don't go below the default of 8
"""
num_partitions = max(min(num_tiles / 140, 128), 8)
return num_partitions
def add_meters_to_lon_lat(lon, lat, meters):
"""
Uses a simple approximation of
1 degree latitude = 111,111 meters
1 degree longitude = 111,111 meters * cosine(latitude)
:param lon: longitude to add meters to
:param lat: latitude to add meters to
:param meters: meters to add to the longitude and latitude values
:return: (longitude, latitude) increased by given meters
"""
longitude = lon + ((meters / 111111) * cos(radians(lat)))
latitude = lat + (meters / 111111)
return longitude, latitude
def match_satellite_to_insitu(tile_ids, primary_b, matchup_b, parameter_b, tt_b, rt_b, platforms_b,
bounding_wkt_b, depth_min_b, depth_max_b):
the_time = datetime.now()
tile_ids = list(tile_ids)
if len(tile_ids) == 0:
return []
tile_service = NexusTileService()
# Determine the spatial temporal extents of this partition of tiles
tiles_bbox = tile_service.get_bounding_box(tile_ids)
tiles_min_time = tile_service.get_min_time(tile_ids)
tiles_max_time = tile_service.get_max_time(tile_ids)
# Increase spatial extents by the radius tolerance
matchup_min_lon, matchup_min_lat = add_meters_to_lon_lat(tiles_bbox.bounds[0], tiles_bbox.bounds[1],
-1 * rt_b.value)
matchup_max_lon, matchup_max_lat = add_meters_to_lon_lat(tiles_bbox.bounds[2], tiles_bbox.bounds[3], rt_b.value)
# Don't go outside of the search domain
search_min_x, search_min_y, search_max_x, search_max_y = wkt.loads(bounding_wkt_b.value).bounds
matchup_min_lon = max(matchup_min_lon, search_min_x)
matchup_min_lat = max(matchup_min_lat, search_min_y)
matchup_max_lon = min(matchup_max_lon, search_max_x)
matchup_max_lat = min(matchup_max_lat, search_max_y)
# Find the centroid of the matchup bounding box and initialize the projections
matchup_center = box(matchup_min_lon, matchup_min_lat, matchup_max_lon, matchup_max_lat).centroid.coords[0]
aeqd_proj = pyproj.Proj(proj='aeqd', lon_0=matchup_center[0], lat_0=matchup_center[1])
lonlat_proj = pyproj.Proj(proj='lonlat')
# Increase temporal extents by the time tolerance
matchup_min_time = tiles_min_time - tt_b.value
matchup_max_time = tiles_max_time + tt_b.value
print "%s Time to determine spatial-temporal extents for partition %s to %s" % (
str(datetime.now() - the_time), tile_ids[0], tile_ids[-1])
# Query edge for all points within the spatial-temporal extents of this partition
the_time = datetime.now()
edge_session = requests.Session()
edge_results = []
with edge_session:
for insitudata_name in matchup_b.value.split(','):
bbox = ','.join(
[str(matchup_min_lon), str(matchup_min_lat), str(matchup_max_lon), str(matchup_max_lat)])
edge_response = query_edge(insitudata_name, parameter_b.value, matchup_min_time, matchup_max_time, bbox,
platforms_b.value, depth_min_b.value, depth_max_b.value, session=edge_session)
if edge_response['totalResults'] == 0:
continue
r = edge_response['results']
for p in r:
p['source'] = insitudata_name
edge_results.extend(r)
print "%s Time to call edge for partition %s to %s" % (str(datetime.now() - the_time), tile_ids[0], tile_ids[-1])
if len(edge_results) == 0:
return []
# Convert edge points to utm
the_time = datetime.now()
matchup_points = np.ndarray((len(edge_results), 2), dtype=np.float32)
for n, edge_point in enumerate(edge_results):
try:
x, y = wkt.loads(edge_point['point']).coords[0]
except ReadingError:
try:
x, y = Point(*[float(c) for c in edge_point['point'].split(' ')]).coords[0]
except ValueError:
y, x = Point(*[float(c) for c in edge_point['point'].split(',')]).coords[0]
matchup_points[n][0], matchup_points[n][1] = pyproj.transform(p1=lonlat_proj, p2=aeqd_proj, x=x, y=y)
print "%s Time to convert match points for partition %s to %s" % (
str(datetime.now() - the_time), tile_ids[0], tile_ids[-1])
# Build kdtree from matchup points
the_time = datetime.now()
m_tree = spatial.cKDTree(matchup_points, leafsize=30)
print "%s Time to build matchup tree" % (str(datetime.now() - the_time))
# The actual matching happens in the generator. This is so that we only load 1 tile into memory at a time
match_generators = [match_tile_to_point_generator(tile_service, tile_id, m_tree, edge_results, bounding_wkt_b.value,
parameter_b.value, rt_b.value, lonlat_proj, aeqd_proj) for tile_id
in tile_ids]
return chain(*match_generators)
def match_tile_to_point_generator(tile_service, tile_id, m_tree, edge_results, search_domain_bounding_wkt,
search_parameter, radius_tolerance, lonlat_proj, aeqd_proj):
from nexustiles.model.nexusmodel import NexusPoint
from webservice.algorithms_spark.Matchup import DomsPoint # Must import DomsPoint or Spark complains
# Load tile
try:
the_time = datetime.now()
tile = tile_service.mask_tiles_to_polygon(wkt.loads(search_domain_bounding_wkt),
tile_service.find_tile_by_id(tile_id))[0]
print "%s Time to load tile %s" % (str(datetime.now() - the_time), tile_id)
except IndexError:
# This should only happen if all measurements in a tile become masked after applying the bounding polygon
raise StopIteration
# Convert valid tile lat,lon tuples to UTM tuples
the_time = datetime.now()
# Get list of indices of valid values
valid_indices = tile.get_indices()
primary_points = np.array(
[pyproj.transform(p1=lonlat_proj, p2=aeqd_proj, x=tile.longitudes[aslice[2]], y=tile.latitudes[aslice[1]]) for
aslice in valid_indices])
print "%s Time to convert primary points for tile %s" % (str(datetime.now() - the_time), tile_id)
a_time = datetime.now()
p_tree = spatial.cKDTree(primary_points, leafsize=30)
print "%s Time to build primary tree" % (str(datetime.now() - a_time))
a_time = datetime.now()
matched_indexes = p_tree.query_ball_tree(m_tree, radius_tolerance)
print "%s Time to query primary tree for tile %s" % (str(datetime.now() - a_time), tile_id)
for i, point_matches in enumerate(matched_indexes):
if len(point_matches) > 0:
p_nexus_point = NexusPoint(tile.latitudes[valid_indices[i][1]],
tile.longitudes[valid_indices[i][2]], None,
tile.times[valid_indices[i][0]], valid_indices[i],
tile.data[tuple(valid_indices[i])])
p_doms_point = DomsPoint.from_nexus_point(p_nexus_point, tile=tile, parameter=search_parameter)
for m_point_index in point_matches:
m_doms_point = DomsPoint.from_edge_point(edge_results[m_point_index])
yield p_doms_point, m_doms_point
def query_edge(dataset, variable, startTime, endTime, bbox, platform, depth_min, depth_max, itemsPerPage=1000,
startIndex=0, stats=True, session=None):
try:
startTime = datetime.utcfromtimestamp(startTime).strftime('%Y-%m-%dT%H:%M:%SZ')
except TypeError:
# Assume we were passed a properly formatted string
pass
try:
endTime = datetime.utcfromtimestamp(endTime).strftime('%Y-%m-%dT%H:%M:%SZ')
except TypeError:
# Assume we were passed a properly formatted string
pass
try:
platform = platform.split(',')
except AttributeError:
# Assume we were passed a list
pass
params = {"variable": variable,
"startTime": startTime,
"endTime": endTime,
"bbox": bbox,
"minDepth": depth_min,
"maxDepth": depth_max,
"platform": platform,
"itemsPerPage": itemsPerPage, "startIndex": startIndex, "stats": str(stats).lower()}
if session is not None:
edge_request = session.get(edge_endpoints.getEndpointByName(dataset)['url'], params=params)
else:
edge_request = requests.get(edge_endpoints.getEndpointByName(dataset)['url'], params=params)
edge_request.raise_for_status()
edge_response = json.loads(edge_request.text)
# Get all edge results
next_page_url = edge_response.get('next', None)
while next_page_url is not None:
if session is not None:
edge_page_request = session.get(next_page_url)
else:
edge_page_request = requests.get(next_page_url)
edge_page_request.raise_for_status()
edge_page_response = json.loads(edge_page_request.text)
edge_response['results'].extend(edge_page_response['results'])
next_page_url = edge_page_response.get('next', None)
return edge_response
| |
"""Implements testing routines for given iterative method solvers"""
import traceback
import sys
from functools import partial
from numpy import eye, dot, max, sqrt, zeros, ones
from numpy.linalg import solve
from pprint import pprint
from sympy.utilities.pytest import skip
from ignition.dsl.flame.tensors import all_back_sub, numpy_print, T, Tensor
TEST_NUM_SOLS = 1000
TEST_LEVELS = -1
ABS_RES = 1e-3
#b = random(100)
b = zeros(100)
b[48] = 1.0
#x0 = random(100)
x0 = zeros(100)
#x0 = ones(100)
def get_fd_poisson(n=100):
A = 2 * eye(n)
A[0][0] = 1
A[n - 1][n - 1] = 1
for i in xrange(1, n - 2):
A[i + 1][i] = A[i][i + 1] = -1
# b = random(n)
# b = ones(n)
return A, b
def cg_alg_driver(alg, n=100):
A, b = get_fd_poisson(n)
iters, x = alg(A, b, n)
x_numpy = solve(A, b)
diff = x - x_numpy
l_inf = max(diff)
l_2 = sqrt(dot(diff, diff))
# assert(l_inf < 1e-10)
# assert(l_2 < 1e-10)
return iters, l_2, l_inf
def cg_alg_gold(A, b, n=100):
"""Standard cg algorithm for comparisons"""
# Taken from Templates book
# x_i = random(b.shape)
x_i = zeros(b.shape)
r_i = b - dot(A, x_i)
rho_i_1 = 0
for i in xrange(n):
rho_i = dot(r_i, r_i)
if i == 0:
p_i = r_i
else:
beta_i = rho_i / rho_i_1
p_i = r_i + beta_i * p_i
q_i = dot(A, p_i)
alpha_i = rho_i / dot(p_i, q_i)
print "iteration %d, residual %.2e, alpha %.2e" \
% (i, sqrt(sum([x ** 2 for x in r_i])), alpha_i)
x_i = x_i + alpha_i * p_i
r_i = r_i - alpha_i * q_i
rho_i_1 = rho_i
if sqrt(sum([r ** 2 for r in r_i ])) < ABS_RES:
break
return i, x_i
def cg_saad(A, b, n=100):
"""Saad CG algorithm"""
# x_i = random(b.shape)
x_i = zeros(b.shape)
r_i = b - dot(A, x_i)
z_i = r_i
p_i = z_i
s_i = dot(r_i, z_i)
for i in xrange(n):
v_i = dot(A, p_i)
vAp = dot(v_i, dot(A, p_i))
App = dot(dot(A, p_i), p_i)
alpha_i = s_i / App
s_i_1 = alpha_i ** 2 * vAp - s_i
beta_i = s_i_1 / s_i
s_i = s_i_1
print "iteration %d, residual %.2e, alpha %.2e" \
% (i, sqrt(sum([x ** 2 for x in r_i])), alpha_i)
x_i = x_i + alpha_i * p_i
r_i = r_i - alpha_i * dot(A, p_i)
p_i = z_i - alpha_i * v_i + beta_i * p_i
z_i = z_i - alpha_i * v_i
if sqrt(sum([r ** 2 for r in r_i ])) < ABS_RES:
break
return i, x_i
def cg_saad_meurant(A, b, n=100):
"""Saad Meurant CG algorithm"""
# x_i = random(b.shape)
x_i = zeros(b.shape)
r_i = b - dot(A, x_i)
z_i = r_i
p_i = z_i
for i in xrange(n):
q_i = dot(A, p_i)
v_i = q_i
vAp = dot(v_i, dot(A, p_i))
App = dot(dot(A, p_i), p_i)
rz = dot(r_i, z_i)
alpha_i = rz / App
s_i = alpha_i ** 2 * vAp - rz
beta_i = s_i / rz
print "iteration %d, residual %.2e, alpha %.2e" \
% (i, sqrt(sum([x ** 2 for x in r_i])), alpha_i)
x_i = x_i + alpha_i * p_i
r_i = r_i - alpha_i * q_i
p_i = z_i - alpha_i * v_i + beta_i * p_i
z_i = z_i - alpha_i * v_i
if sqrt(sum([r ** 2 for r in r_i ])) < ABS_RES:
break
return i, x_i
def sols_to_co(sols):
sol, order = sols
src = ""
q = sol.keys()
for var in order:
src = "%s = %s\n" % (str(var), numpy_print(sol[var])) \
+ src
q.remove(var)
reject = []
len_reject = 0
while len(q) != 0 or len(reject) != 0:
if len(q) == 0:
if len_reject == len(reject):
break
len_reject = len(reject)
q = reject
reject = []
var = q.pop(0)
rejected = False
for k in q:
if var in sol[k]:
reject.append(var)
rejected = True
break
if rejected:
continue
src = "%s = %s\n" % (str(var), numpy_print(sol[var])) + src
if len(reject) != 0:
raise ValueError("Sols has circular dependency,\n%s" % \
str((reject, sol)))
return compile(src, "<sols_to_co>", "exec")
def cg_alg_from_sols(sols, A, b, n=101):
p_2 = None
x_2 = None
r_2 = None
q_2 = None
pi_2 = None
# x_1 = random(b.shape)
# x_1 = zeros(b.shape)
x_1 = x0
r_1 = b - dot(A, x_1)
p_1 = r_1
q_1 = dot(A, p_1)
delta_1 = 0
pi_1 = dot(p_1, dot(A, p_1))
co = sols_to_co(sols)
for i in xrange(n):
exec(co)
p_1 = p_2
x_1 = x_2
r_1 = r_2
q_1 = q_2
pi_1 = pi_2
if sqrt(sum([r ** 2 for r in r_1 ])) < ABS_RES:
break
return i, x_1
def run_cg_algorithms(cg_eqns, knowns):
print("Solving for CG updates.")
cg_sols = all_back_sub(cg_eqns, knowns, levels=TEST_LEVELS)
tot_test = min(len(cg_sols), TEST_NUM_SOLS)
print("Found %d solutions, testing the first %d" \
% (len(cg_sols), tot_test))
for i in xrange(tot_test):
print "=" * 80
print "Algorithm %d:" % i
pprint(cg_sols[i][0])
try:
iters, l2, linf = cg_alg_driver(partial(cg_alg_from_sols, cg_sols[i]))
print "-" * 80
print "iters = %d" % iters
print "l2 error = %.2e" % l2
print "linf error = %.2e" % linf
except Exception as e:
print "-" * 80
print "Algorithm failed"
print e
traceback.print_exc(file=sys.stdout)
print "=" * 80
def test_reg_cg():
delta_1, mu_12 = map(lambda x: Tensor(x, rank=0), ['delta_1', 'mu_12'])
r_1, r_2, p_1, p_2, x_1, x_2 = map(lambda x: Tensor(x, rank=1), \
['r_1', 'r_2', 'p_1', 'p_2', 'x_1', 'x_2'])
A = Tensor('A', rank=2)
# Specify which variables are known
knowns = [p_1, r_1, x_1, A]
# Specify the CG eqns (coming from a 4x4 PME)
cg_eqns = [delta_1 * A * p_1 - r_1 + r_2,
p_2 - r_2 + p_1 * mu_12,
x_2 - x_1 - delta_1 * p_1,
T(r_1) * r_2,
T(p_1) * A * p_2,
]
run_cg_algorithms(cg_eqns, knowns)
def test_expanded_cg():
skip("Test takes too long")
delta_1, mu_12 = map(lambda x: Tensor(x, rank=0), ['delta_1', 'mu_12'])
r_1, r_2, q_1, q_2, p_1, p_2, x_1, x_2 = map(lambda x: Tensor(x, rank=1), \
['r_1', 'r_2', 'q_1', 'q_2', 'p_1', 'p_2', 'x_1', 'x_2'])
A = Tensor('A', rank=2)
# Specify which variables are known
knowns = [p_1, q_1, r_1, x_1, A]
# Specify the CG eqns (coming from a 4x4 PME)
cg_eqns = [delta_1 * q_1 - r_1 + r_2,
delta_1 * A * p_1 - r_1 + r_2,
q_2 - A * p_2,
q_2 - A * r_2 + q_1 * mu_12,
p_2 - r_2 + p_1 * mu_12,
x_2 - x_1 - delta_1 * p_1,
T(r_1) * r_2,
T(p_1) * q_2,
]
run_cg_algorithms(cg_eqns, knowns)
def test_chronos_cg():
skip("Test takes too long")
delta_1, omega_2, pi_1, pi_2, mu_12 = map(lambda x: Tensor(x, rank=0), \
['delta_1', 'omega_2', 'pi_1', 'pi_2', 'mu_12'])
r_1, r_2, q_1, q_2, p_1, p_2, x_1, x_2 = map(lambda x: Tensor(x, rank=1), \
['r_1', 'r_2', 'q_1', 'q_2', 'p_1', 'p_2', 'x_1', 'x_2'])
A, R_0, P_0 = map(lambda x: Tensor(x, rank=2), ['A', 'R_0', 'P_0'])
# Specify which variables are known
knowns = [pi_1, p_1, r_1, q_1, x_1, A, R_0, P_0]
# Now try the chronos variant and repeat.
chronos_eqns = [r_2 - r_1 - delta_1 * q_1,
q_2 - A * p_2,
p_2 - r_2 + p_1 * mu_12,
q_2 - A * r_2 + q_1 * mu_12,
x_2 - x_1 - delta_1 * p_1,
omega_2 - T(r_2) * r_2,
pi_2 - T(p_2) * A * p_2,
T(R_0) * r_2,
T(r_1) * r_2,
T(P_0) * A * p_2,
T(p_1) * A * p_2,
T(p_2) * A * p_2 - T(r_2) * A * r_2 + T(mu_12) * pi_1 * mu_12,
]
run_cg_algorithms(chronos_eqns, knowns)
def test_cg_gold():
iters, l2, linf = cg_alg_driver(cg_alg_gold)
print l2, linf
# assert(l2 < 1e-10)
# assert(linf < 1e-10)
def test_cg_saad_meurant():
iters, l2, linf = cg_alg_driver(cg_saad_meurant)
print l2, linf
# assert(l2 < 1e-10)
# assert(linf < 1e-10)
def test_cg_saad():
iters, l2, linf = cg_alg_driver(cg_saad)
print l2, linf
if __name__ == "__main__":
# test_reg_cg()
test_expanded_cg()
# test_chronos_cg()
# test_cg_saad()
# test_cg_saad_meurant()
# test_cg_gold()
| |
# parameters_t.py
#-*- coding: utf-8 -*-
from decimal import Decimal
from loris import img_info
from loris.loris_exception import RequestException
from loris.loris_exception import SyntaxException
from loris.parameters import DECIMAL_ONE
from loris.parameters import FULL_MODE
from loris.parameters import PCT_MODE
from loris.parameters import PIXEL_MODE
from loris.parameters import RegionParameter
from loris.parameters import RotationParameter
from loris.parameters import SizeParameter
import loris_t
"""
Parameter object tests. To run this test on its own, do:
$ python -m unittest -v tests.parameters_t
from the `/loris` (not `/loris/loris`) directory.
"""
# TODO: bring over old tile-generator test for precision checking.
class _ParameterUnitTest(loris_t.LorisTest):
def _get_info(self):
# jp2, y is long dimension
fp = self.test_jp2_color_fp
fmt = self.test_jp2_color_fmt
ident = self.test_jp2_color_id
uri = self.test_jp2_color_uri
return img_info.ImageInfo.from_image_file(uri, fp, fmt)
def _get_info2(self):
# jpeg, x is long dimension
fp = self.test_jpeg_fp
fmt = self.test_jpeg_fmt
ident = self.test_jpeg_id
uri = self.test_jpeg_uri
return img_info.ImageInfo.from_image_file(uri, fp, fmt)
class Test_G_RegionParameterUnit(_ParameterUnitTest):
def test_a_populate_slots_from_pct(self):
info = self._get_info()
rp = RegionParameter('pct:25,25,50,50', info)
self.assertEquals(rp.pixel_x, int(info.width*0.25))
self.assertEquals(rp.pixel_y, int(info.height*0.25))
self.assertEquals(rp.pixel_w, int(info.width*0.50))
self.assertEquals(rp.pixel_h, int(info.height*0.50))
self.assertEquals(rp.decimal_x, Decimal('0.25'))
self.assertEquals(rp.decimal_y, Decimal('0.25'))
self.assertEquals(rp.decimal_w, Decimal('0.50'))
self.assertEquals(rp.decimal_h, Decimal('0.50'))
def test_b_populate_slots_from_pixel(self):
info = self._get_info2()
rp = RegionParameter('797,900,1594,1600', info)
self.assertEquals(rp.pixel_x, 797)
self.assertEquals(rp.pixel_y, 900)
self.assertEquals(rp.pixel_w, 1594)
self.assertEquals(rp.pixel_h, 1600)
self.assertEquals(rp.decimal_x, rp.pixel_x / Decimal(str(info.width)))
self.assertEquals(rp.decimal_y, rp.pixel_y / Decimal(str(info.height)))
self.assertEquals(rp.decimal_w, rp.pixel_w / Decimal(str(info.width)))
self.assertEquals(rp.decimal_h, rp.pixel_h / Decimal(str(info.height)))
def test_c_canonical_uri_value_oob_w_pixel(self):
info = self._get_info2() # x is long dimension
x = 200
offset = 1
oob_w = info.width - x + offset
rp = RegionParameter('%d,13,%d,17' % (x,oob_w), info)
expected_canonical = '%d,13,%d,17' % (x, info.width - x)
# Note that the below will need to be changed if decimal precision is
# changed (currently 25 places)
self.assertEquals(rp.decimal_w, Decimal('0.9444444444444444444444444'))
self.assertEquals(rp.canonical_uri_value, expected_canonical)
def test_d_canonical_uri_value_oob_w_pct(self):
info = self._get_info() # y is long dimension
x = 20
w = 81
rp = RegionParameter('pct:%d,13,%d,27' % (x,w), info)
self.assertEquals(rp.decimal_w, Decimal('0.8'))
expected_canonical = '1181,936,4725,1944'
self.assertEquals(rp.canonical_uri_value, expected_canonical)
def test_e_canonical_uri_value_oob_y_pixel(self):
info = self._get_info() # y is long dimension
y = 300
offset = 1 # request would be this many pixels OOB
oob_h = info.height - y + offset
rp = RegionParameter('29,%d,31,%d' % (y,oob_h), info)
expected_canonical = '29,%d,31,%d' % (y, info.height - y)
self.assertEquals(rp.canonical_uri_value, expected_canonical)
def test_f_canonical_uri_value_oob_y_pct(self):
info = self._get_info2() # x is long dimension
y = 28.3
h = 72.2
rp = RegionParameter('pct:13,%f,17,%f' % (y,h), info)
expected_canonical = '468,845,612,2142'
self.assertEquals(rp.canonical_uri_value, expected_canonical)
def test_g_syntax_exceptions(self):
info = self._get_info()
try:
with self.assertRaises(SyntaxException):
RegionParameter('n:1,2,3,4', info)
with self.assertRaises(SyntaxException):
RegionParameter('1,2,3,q', info)
with self.assertRaises(SyntaxException):
RegionParameter('1,2,3', info)
with self.assertRaises(SyntaxException):
RegionParameter('something', info)
except TypeError: # python < 2.7
self.assertRaises(SyntaxException, RegionParameter, 'something', info)
self.assertRaises(SyntaxException, RegionParameter, '1,2,3,q', info)
self.assertRaises(SyntaxException, RegionParameter, '1,2,3', info)
self.assertRaises(SyntaxException, RegionParameter, 'something', info)
def test_h_request_exceptions(self):
info = self._get_info()
try:
with self.assertRaises(RequestException):
RegionParameter('1,2,0,3', info)
with self.assertRaises(RequestException):
RegionParameter('1,2,3,0', info)
with self.assertRaises(RequestException):
RegionParameter('pct:100,2,3,0', info)
except TypeError: # python < 2.7
self.assertRaises(RequestException, RegionParameter, '1,2,0,3', info)
self.assertRaises(RequestException, RegionParameter, '1,2,3,0', info)
self.assertRaises(RequestException, RegionParameter, 'pct:100,2,3,0', info)
class Test_H_RegionParameterFunctional(_ParameterUnitTest):
# TODO: with client once other parameters are impl.
pass
class Test_I_SizeParameterUnit(_ParameterUnitTest):
def test_a_exceptions(self):
info = self._get_info()
rp = RegionParameter('pct:25,25,75,75', info)
try:
with self.assertRaises(SyntaxException):
SizeParameter('!25,',rp)
with self.assertRaises(SyntaxException):
SizeParameter('!25',rp)
with self.assertRaises(SyntaxException):
SizeParameter('25',rp)
except TypeError: # python < 2.7
self.assertRaises(SyntaxException, SizeParameter, '!25,', rp)
self.assertRaises(SyntaxException, SizeParameter, '!25', rp)
self.assertRaises(SyntaxException, SizeParameter, '25', rp)
def test_b_populate_slots_from_full(self):
# full
info = self._get_info()
rp = RegionParameter('full', info)
sp = SizeParameter('full',rp)
self.assertEquals(sp.force_aspect, False)
self.assertEquals(sp.mode, FULL_MODE)
self.assertEquals(sp.canonical_uri_value, FULL_MODE)
rp = RegionParameter('256,256,256,256', info)
sp = SizeParameter('full',rp)
self.assertEquals(sp.force_aspect, False)
self.assertEquals(sp.mode, FULL_MODE)
self.assertEquals(sp.canonical_uri_value, FULL_MODE)
def test_c_populate_slots_from_pct(self):
# pct:n
info = self._get_info()
rp = RegionParameter('full', info)
sp = SizeParameter('pct:25',rp)
self.assertEquals(sp.force_aspect, False)
self.assertEquals(sp.mode, PCT_MODE)
self.assertEquals(sp.canonical_uri_value, '1476,')
rp = RegionParameter('256,256,256,256', info)
sp = SizeParameter('pct:25',rp)
self.assertEquals(sp.force_aspect, False)
self.assertEquals(sp.mode, PCT_MODE)
self.assertEquals(sp.canonical_uri_value, '64,')
rp = RegionParameter('pct:0,0,50,50', info)
sp = SizeParameter('pct:25',rp)
self.assertEquals(sp.force_aspect, False)
self.assertEquals(sp.mode, PCT_MODE)
self.assertEquals(sp.canonical_uri_value, '738,')
def test_c_populate_slots_from_w_only(self):
# w,
info = self._get_info()
rp = RegionParameter('full', info)
sp = SizeParameter('180,',rp)
self.assertEquals(sp.force_aspect, False)
self.assertEquals(sp.mode, PIXEL_MODE)
self.assertEquals(sp.canonical_uri_value, '180,')
rp = RegionParameter('200,300,500,600', info)
sp = SizeParameter('125,',rp)
self.assertEquals(sp.force_aspect, False)
self.assertEquals(sp.mode, PIXEL_MODE)
self.assertEquals(sp.canonical_uri_value, '125,')
def test_d_populate_slots_from_h_only(self):
# ,h
info = self._get_info()
rp = RegionParameter('full', info)
sp = SizeParameter(',90',rp)
self.assertEquals(sp.force_aspect, False)
self.assertEquals(sp.mode, PIXEL_MODE)
self.assertEquals(sp.canonical_uri_value, '73,')
rp = RegionParameter('50,290,360,910', info)
sp = SizeParameter(',275',rp)
self.assertEquals(sp.force_aspect, False)
self.assertEquals(sp.mode, PIXEL_MODE)
self.assertEquals(sp.canonical_uri_value, '108,')
def test_e_populate_slots_from_wh(self):
# w,h
info = self._get_info()
rp = RegionParameter('full', info)
sp = SizeParameter('48,48',rp)
self.assertEquals(sp.force_aspect, True)
self.assertEquals(sp.mode, PIXEL_MODE)
self.assertEquals(sp.canonical_uri_value, '48,48')
rp = RegionParameter('15,16,23,42', info)
sp = SizeParameter('60,60',rp) # upsample!
self.assertEquals(sp.force_aspect, True)
self.assertEquals(sp.mode, PIXEL_MODE)
self.assertEquals(sp.canonical_uri_value, '60,60')
def test_f_populate_slots_from_bang_wh(self):
# !w,h
info = self._get_info()
rp = RegionParameter('full', info)
sp = SizeParameter('!120,140',rp)
self.assertEquals(sp.force_aspect, False)
self.assertEquals(sp.mode, PIXEL_MODE)
self.assertEquals(sp.canonical_uri_value, '114,')
rp = RegionParameter('0,0,125,160', info)
sp = SizeParameter('!120,140',rp,)
self.assertEquals(sp.force_aspect, False)
self.assertEquals(sp.mode, PIXEL_MODE)
self.assertEquals(sp.canonical_uri_value, '109,')
rp = RegionParameter('0,0,125,160', info)
sp = SizeParameter('!130,140',rp)
self.assertEquals(sp.force_aspect, False)
self.assertEquals(sp.mode, PIXEL_MODE)
self.assertEquals(sp.canonical_uri_value, '109,')
rp = RegionParameter('50,80,140,160', info)
sp = SizeParameter('!130,180',rp)
self.assertEquals(sp.force_aspect, False)
self.assertEquals(sp.mode, PIXEL_MODE)
self.assertEquals(sp.canonical_uri_value, '130,')
rp = RegionParameter('50,80,140,160', info)
sp = SizeParameter('!145,165',rp)
self.assertEquals(sp.force_aspect, False)
self.assertEquals(sp.mode, PIXEL_MODE)
self.assertEquals(sp.canonical_uri_value, '144,')
rp = RegionParameter('50,80,140,180', info)
sp = SizeParameter('!145,185',rp)
self.assertEquals(sp.force_aspect, False)
self.assertEquals(sp.mode, PIXEL_MODE)
self.assertEquals(sp.canonical_uri_value, '143,')
class Test_J_SizeParameterFunctional(_ParameterUnitTest):
pass
class Test_K_RotationParameterUnit(_ParameterUnitTest):
def test_exceptions(self):
try:
with self.assertRaises(SyntaxException):
rp = RotationParameter('a')
with self.assertRaises(SyntaxException):
rp = RotationParameter('361')
with self.assertRaises(SyntaxException):
rp = RotationParameter('-1')
with self.assertRaises(SyntaxException):
rp = RotationParameter('!-1')
with self.assertRaises(SyntaxException):
rp = RotationParameter('!361')
with self.assertRaises(SyntaxException):
rp = RotationParameter('-0.1')
except TypeError: # Python < 2.7
self.assertRaises(SyntaxException, RotationParameter, 'a')
self.assertRaises(SyntaxException, RotationParameter, '361')
self.assertRaises(SyntaxException, RotationParameter, '-1')
self.assertRaises(SyntaxException, RotationParameter, '!-1')
self.assertRaises(SyntaxException, RotationParameter, '!361')
self.assertRaises(SyntaxException, RotationParameter, '-0.1')
def test_uri_value(self):
rp = RotationParameter('0')
self.assertEquals(rp.rotation, '0')
rp = RotationParameter('46')
self.assertEquals(rp.rotation, '46')
rp = RotationParameter('180')
self.assertEquals(rp.rotation, '180')
def test_mirroring(self):
rp = RotationParameter('180')
self.assertFalse(rp.mirror)
rp = RotationParameter('!180')
self.assertTrue(rp.mirror)
def test_c14n(self):
rp = RotationParameter('42.10')
self.assertEquals(rp.canonical_uri_value, '42.1')
rp = RotationParameter('180.0')
self.assertEquals(rp.canonical_uri_value, '180')
rp = RotationParameter('!180.0')
self.assertEquals(rp.canonical_uri_value, '!180')
rp = RotationParameter('!180.10')
self.assertEquals(rp.canonical_uri_value, '!180.1')
def suite():
import unittest
test_suites = []
test_suites.append(unittest.makeSuite(Test_G_RegionParameterUnit, 'test'))
test_suites.append(unittest.makeSuite(Test_H_RegionParameterFunctional, 'test'))
test_suites.append(unittest.makeSuite(Test_I_SizeParameterUnit, 'test'))
test_suites.append(unittest.makeSuite(Test_J_SizeParameterFunctional, 'test'))
test_suites.append(unittest.makeSuite(Test_K_RotationParameterUnit, 'test'))
test_suite = unittest.TestSuite(test_suites)
return test_suite
| |
# vim:ts=4:sts=4:sw=4:expandtab
"""Uniform view for Python reflection information.
"""
import inspect
import os
import sys
import types
from satori.objects import Object, Argument, ArgumentError, DispatchOn
class Reflector(Object, dict):
"""Caches and interrelates descriptors for reflected objects.
"""
def __init__(self):
self.groups = []
self.implicit = SystemModules(cache=self)
def __getitem__(self, obj):
if obj not in self:
descriptor = self._create(obj)
self[obj] = descriptor
return super(Reflector, self).__getitem__(obj)
def add(self, type_, **kwargs):
"""Construct and add a descriptor of a given type.
"""
kwargs['cache'] = self
group = type_(**kwargs)
self.groups.append(group)
return group
@DispatchOn(obj=types.ModuleType)
def _create(self, obj): # pylint: disable-msg=E0102
return Module(obj=obj, cache=self)
@DispatchOn(obj=types.ClassType)
@DispatchOn(obj=types.TypeType)
def _create(self, obj): # pylint: disable-msg=E0102
return Class(obj=obj, cache=self)
@DispatchOn(obj=types.MethodType)
def _create(self, obj): # pylint: disable-msg=E0102
return Method(obj=obj, cache=self)
@DispatchOn(obj=types.FunctionType)
def _create(self, obj): # pylint: disable-msg=E0102
return Function(obj=obj, cache=self)
def __iter__(self):
seen = set()
for group in self.groups:
for descendant in group.traverse(seen):
yield descendant
_ismodule = lambda p: isinstance(p[1], Module)
_isclass = lambda p: isinstance(p[1], Class) and not p[0].startswith('_')
_ismethod = lambda p: isinstance(p[1], Method) and not p[0].startswith('_')
_isfunction = lambda p: isinstance(p[1], Function) and not p[0].startswith('_')
class Descriptor(Object):
"""Base class for descriptors of reflected objects.
"""
@Argument('cache', type=Reflector)
def __init__(self, obj, cache):
self.object = obj
self.cache = cache
self.name = getattr(self.object, '__name__', None)
self.docstring = inspect.cleandoc(getattr(self.object, '__doc__', None) or "")
class source_file(object): # pylint: disable-msg=C0103
"""Lazy property. The path of the source file for the described object.
"""
def __get__(_, self, _type=None): # pylint: disable-msg=E0213
try:
self.source_file = os.path.abspath(inspect.getsourcefile(self.object))
return self.source_file
except: # pylint: disable-msg=W0702
return None
source_file = source_file()
class source_line(object): # pylint: disable-msg=C0103
"""Lazy property. The number of the first source line for the described object.
"""
def __get__(_, self, _type=None): # pylint: disable-msg=E0213
try:
lines = inspect.getsourcelines(self.object)
self.source_code = '\n'.join(lines[0])
self.source_line = lines[1] or 1
return self.source_line
except: # pylint: disable-msg=W0702
return None
source_line = source_line()
class source_code(object): # pylint: disable-msg=C0103
"""Lazy property. The source code for the described object.
"""
def __get__(_, self, _type=None): # pylint: disable-msg=E0213
try:
lines = inspect.getsourcelines(self.object)
self.source_code = '\n'.join(lines[0])
self.source_line = lines[1] or 1
return self.source_code
except: # pylint: disable-msg=W0702
return None
source_code = source_code()
@property
def children(self):
"""Generator. Enumerate this descriptor's children.
"""
for name in dir(self.object):
try:
obj = getattr(self.object, name)
yield name, self.cache[obj]
except (AttributeError, KeyError, TypeError, ArgumentError):
pass
modules = property(lambda self: (x for x in self.children if _ismodule(x)))
classes = property(lambda self: (x for x in self.children if _isclass(x)))
methods = property(lambda self: (x for x in self.children if _ismethod(x)))
functions = property(lambda self: (x for x in self.children if _isfunction(x)))
def traverse(self, seen=None):
"""Generator. Enumerate this descriptor's descendants.
"""
seen = seen or set()
if self in seen:
return
seen.add(self)
yield self
for _, child in self.children:
for descendant in child.traverse(seen):
yield descendant
class ModuleGroup(Descriptor):
"""A Descriptor for a group of modules.
"""
@Argument('obj', fixed=None)
def __init__(self):
self.module_list = []
self.parent = self
self.group = self
@property
def children(self):
"""Generator. Enumerate this descriptor's children.
"""
by_name = lambda m1, m2: cmp(m1.__name__, m2.__name__)
for module in sorted(self.module_list, by_name):
yield module.__name__, self.cache[module]
def __contains__(self, module):
return module in self.module_list
class SystemModules(ModuleGroup):
"""A Descriptor for system modules.
"""
def __contains__(self, module):
return True
def __str__(self):
return "(system modules)"
class Location(ModuleGroup):
"""A Descriptor for module group defined in a single place.
"""
@Argument('root', type=str)
def __init__(self, root):
def walk(root, base=[]): # pylint: disable-msg=W0102
"""Generator. Walk a directory hierarchy looking for Python modules.
"""
for entry in os.listdir(root):
path = os.path.join(root, entry)
if os.path.isdir(path):
if os.path.isfile(os.path.join(path, '__init__.py')):
for module in walk(path, base + [entry]):
yield module
if not os.path.isfile(path):
continue
if entry[-3:] != '.py':
continue
if entry == '__init__.py':
yield base
else:
yield base + [entry[:-3]]
self.root = root
sys.path.insert(0, self.root)
for parts in walk(self.root):
name = '.'.join(parts)
__import__(name)
self.module_list.append(sys.modules[name])
sys.path.remove(self.root)
def __str__(self):
return self.root
class Module(Descriptor):
"""A Descriptor for a module.
"""
def __init__(self):
self.group = None
for group in self.cache.groups:
if self.object in group:
self.group = group
if self.name.count('.'):
parent = sys.modules[self.name[:self.name.rfind('.')]]
self.parent = self.cache[parent]
self.group = self.group or self.parent.group
else:
self.group = self.group or self.cache.implicit
self.parent = self.group
@property
def children(self):
"""Generator. Enumerate this descriptor's children.
"""
for name, child in super(Module, self).children:
if child.parent is self:
yield name, child
def __str__(self):
return "module {0} at {1}".format(self.name, self.group)
class Class(Descriptor):
"""A Descriptor for a class.
"""
def __init__(self):
self.parent = self.cache[sys.modules[self.object.__module__]]
self.group = self.parent.group
self.bases = [self.cache[base] for base in self.object.__bases__]
def __str__(self):
return "class {0} in {1}".format(self.name, self.parent)
class Callable(Descriptor):
"""A Descriptor for a callable.
"""
def __init__(self):
spec = inspect.getargspec(self.object)
args = spec.args
defs = spec.defaults or ()
sign = []
for index, name in enumerate(args):
if len(args) - index <= len(defs):
sign.append(name + '=' + str(defs[index-len(args)]))
else:
sign.append(name)
if spec.varargs is not None:
sign.append('*' + spec.varargs)
if spec.keywords is not None:
sign.append('**' + spec.keywords)
self.signature = ', '.join(sign)
@property
def children(self):
"""Generator. Enumerate this descriptor's children.
"""
return []
def __str__(self):
return "{0}({1}) in {2}".format(self.name, self.signature, self.parent)
class Method(Callable):
"""A Descriptor for a method.
"""
def __init__(self):
class_ = self.object.im_class
code = self.object.func_code
for base in class_.__mro__:
if hasattr(base, self.name):
impl = getattr(base, self.name, None)
if getattr(impl, 'func_code', None) is code:
class_ = base
self.parent = self.cache[class_]
self.group = self.parent.group
class Function(Callable):
"""A Descriptor for a function.
"""
def __init__(self):
self.parent = self.cache[sys.modules[self.object.__module__]]
self.group = self.parent.group
| |
"""
implement recommend for parts
@author: Bowen, Ray, Yu
"""
from design.models import parts, team_parts, teams
from elasticsearch import Elasticsearch
from operator import itemgetter
import json
import os.path
import pickle
from design.search_part import get_func_parts
BASE = os.path.dirname(os.path.abspath(__file__))
def getApriorRecommend(chainStr, funcStr=None):
"""
get recommendations with aprior algorithm
@param chainStr: part chain
@type chainStr: str
@return : recommendations
@rytpe: dict
"""
dataList = chainStr.split('_')
#dataList = dataList[len(dataList)-2:len(dataList)]
fList = list()
with open(BASE+'/../freq.txt', 'rb') as f:
fList = pickle.load(f)
strResult = getResult(dataList, fList, funcStr)
recommend_list = list()
for partId in strResult:
partObj = parts.objects.get(part_id=int(partId))
partInfo = {
'part_id': partObj.part_id,
'part_name': partObj.part_name,
'part_type': partObj.part_type,
}
recommend_list.append(partInfo)
result = {
'isSuccessful' : True,
'recommend_list': recommend_list,
}
return result
def analyseData(dataList,dataLength = 2):
tempData = []
tempData1 = []
tempData2 = []
for item in dataList:
tempData.append(item)
tempData1.append(tempData)
tempData = []
tempData1 = map(set,tempData1)
tempData2 = tempData1
for i in range(dataLength - 1):
for item in tempData1:
for j in range(len(tempData2)):
if (item.union(tempData2[j]) not in tempData):
tempData.append(item.union(tempData2[j]))
tempData2 = tempData
tempData = []
flag = False
for item in tempData2:
if len(item) < dataLength:
tempData2.remove(item)
flag = True
while (flag == True):
flag = False
for item in tempData2:
if len(item) < dataLength:
tempData2.remove(item)
flag = True
return tempData2
def getResult(currentList,dataList, funcStr):#currentList ,dataList pin fan xiang ji
dataList = toFrozenset(dataList)
dataLength = len(currentList)
max_length = 4
resultList = []
if dataLength == 0:
return resultList
if dataLength > max_length:
currentList = currentList[dataLength-4:]
dataLength = 4
while dataLength > 0:
for item in dataList:
for item1 in item:
if frozenset(currentList).issubset(item1):
if (item1^frozenset(currentList)) not in resultList:
resultList.append(item1^frozenset(currentList))
if len(resultList) >= 5:
break
currentList = currentList[1:]
dataLength = dataLength - 1
resultList = toBeOne(resultList)
result_part_count = len(resultList)
dictionary_result = {}
for each_part in range(result_part_count):
dictionary_result[resultList[each_part]] = 100 - (100 * each_part) / result_part_count
if funcStr != None and funcStr != '':
adjuct_to_func(funcStr, dictionary_result)
final_result = list()
for part_pair in sorted(dictionary_result.items(), key=itemgetter(1), reverse=True):
final_result.append(part_pair[0])
return final_result
def adjuct_to_func(funcStr, dictionary_result):
if funcStr.startswith('_'):
funcStr = funcStr[1:]
if funcStr.endswith('_'):
funcStr = funcStr[:-1]
func_part_list = get_func_parts(funcStr.split('_'))
for key in dictionary_result:
if long(key) in func_part_list:
dictionary_result[key] += 10
def toBeOne(data):#delete chong fu xiang
result = []
for item in data:
t = list(item)
for item2 in t:
if item2 not in result:
result.append(item2)
return result
def toFrozenset(data):
result = []
for item in data:
temp = []
for i in item:
temp.append(frozenset(i))
result.append(temp)
return result
def getMarkovRecommend(part_id):
"""
get recommendations with Markov algorithm
@param part_id: part id
@type part_id: str
@return : recommendations
@rytpe: dict
"""
result = {
'isSuccessful' : True,
}
predictChains = predict(4, 5, part_id, loadA())
if not predictChains:
result['isSuccessful'] = False
return result
chains = list()
for predictChain in predictChains:
chain = list()
for part in predictChain:
infos = getPartNameAndType(part)
if not infos[0]:
break
item = {
'part_id':part,
'part_name': infos[0],
'part_type' : infos[1]
}
chain.append(item)
chains.append(chain)
result['recommend_list'] = chains
return result
def loadA():
tranFile = open(BASE+'/../tran.json', 'r')
A = json.loads(tranFile.read())
return A
def getPartNameAndType(part_id):
try:
partObj = parts.objects.get(part_id=int(part_id))
return partObj.part_name, partObj.part_type
except:
return None, None
def get_chain(elem, num, process):
"""get chain which had predicted
according to information in process,
get the chain from first element to elem variable
and save the chain in a list
args:
elem: the last element in chain
num: the line number in process
process: a variable record the predict process
return:
a chain from first to elem variable
"""
last_elem = process[num][elem][1]
if last_elem is None:
return [elem]
else:
chain = get_chain(last_elem, num-1, process)
chain.append(elem)
return chain
def predict(m, count, s, A):
"""predict the chain after s
calculate the probability of a m-length chain,
then return chains.
CAUTION the number of chains maybe less then count
args:
m: the length of predict chain
count: the number of predict chain
s: the last element of the current chain
A: transition matrix
return:
some chains save in list
"""
process = []
start = {}
start[s] = [1, None]
process.append(start)
for i in range(m):
line = process[-1]
next_line = {}
for key in line.keys():
if A.get(key, None) is None:
continue
for k in A[key].keys():
p = next_line.get(k, [0, None])[0]
if p < A[key][k] * line[key][0]:
next_line[k] = [A[key][k] * line[key][0], key]
process.append(next_line)
ans = process[-1]
# sort according to probability from high to low
ans = sorted(ans.iteritems(), key=lambda item: item[1][0], reverse=True)
if len(ans) == 0:
return None # Can't predict, because of no answer can be find
else:
count = min(len(ans), count) # the number of ans maybe less than count
chains = []
length = len(process)
for i in range(count):
elem = ans[i][0]
chain = get_chain(elem, length-1, process)
chains.append(chain[1:])
return chains
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.